diff --git a/arch/x86/configs/tencent.config b/arch/x86/configs/tencent.config index c0f5462d18daea280c88ec69300bfb9fe5317f8b..d42bff8f1274ed170554cc05f273203fb4d53bc0 100644 --- a/arch/x86/configs/tencent.config +++ b/arch/x86/configs/tencent.config @@ -915,6 +915,9 @@ CONFIG_I40E=m CONFIG_I40E_DCB=y CONFIG_I40EVF=m CONFIG_ICE=m +CONFIG_SXE2=m +CONFIG_SXE2_VF=m +CONFIG_INFINIBAND_SXE2RDMA=m CONFIG_FM10K=m CONFIG_IGC=m CONFIG_JME=m diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index b6b89ac9c7df32ce378f206cb6f475de3151ccd6..9edfb273f3412179e2e1fc5508159b4043b9a2b1 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -96,6 +96,7 @@ source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" +source "drivers/infiniband/hw/sxe2rdma/Kconfig" endif # !UML source "drivers/infiniband/sw/rxe/Kconfig" source "drivers/infiniband/sw/siw/Kconfig" diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 9b6ea9f7564a78dc502ad59ec4bc4ea031d7b930..4216c93b75c6d85a859e741a5035da432af7f989 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -16,3 +16,5 @@ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/ obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/ obj-m += zrdma/ +obj-$(CONFIG_INFINIBAND_SXE2RDMA) += sxe2rdma/ + diff --git a/drivers/infiniband/hw/sxe2rdma/Kconfig b/drivers/infiniband/hw/sxe2rdma/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3745d82473f06540bb545a960807a061e8c8e8ed --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sxe2 network device configuration +# + +config INFINIBAND_SXE2RDMA + tristate "sxe2 network adapters support" + depends on X86 && NETDEVICES && ETHERNET && PCI + default m + help + This driver supports sxe PCI Express family of adapters. + + To compile this driver as a module, choose M here. diff --git a/drivers/infiniband/hw/sxe2rdma/Makefile b/drivers/infiniband/hw/sxe2rdma/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6d4ed43d33b4c3de35898c8db6002ef6a421299f --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/Makefile @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: GPL-2.0 +CONFIG_MODULE_SIG=n + +# Makefile path under the current directory +MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST))) +# Current directory +CURDIR :=$(shell dirname $(MAKEPATH)) +KDIR := /lib/modules/$(shell uname -r)/build +ifneq ($(KERNELRELEASE),) +# Compilation +obj-m += sxe2rdma.o +sxe2rdma-objs += $(patsubst %.c, rdma/%.o, $(notdir $(wildcard $(CURDIR)/rdma/*.c))) +# Whether auxiliary device drivers are to be compiled +ifneq ($(wildcard $(CURDIR)/vercode_build.sh),) +KER_DIR=$(srctree) +SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR)) +ifneq ($(SPECIFIC_LINUX),) +ccflags-y += -DSPECIFIC_LINUX +ccflags-y += -D$(SPECIFIC_LINUX) +endif +endif +ifeq ($(SPECIFIC_LINUX),BCLINUX_21_10U4) +obj-m += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +else +CHECK_AUX_BUS ?= $(CURDIR)/check_aux_support.sh +NEED_AUX_BUS := $(shell bash ${CHECK_AUX_BUS} --ksrc="$(KDIR)" >/dev/null 2>&1; echo $$?) +$(info "NEED_AUX_BUS = $(NEED_AUX_BUS)") +ifeq ($(NEED_AUX_BUS), 2) +obj-m += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +endif +endif +# Add compiler options and macros +ccflags-y += -g -ggdb #Supports debug information +ccflags-y += -Wall #-Werror Enable all GCC warnings and treat warnings as errors (-Werror). +ccflags-y += -Wextra #Enable additional GCC warning options +ccflags-y += -frecord-gcc-switches #Afterwards, you can view the number of compilation options using the command readelf -p .gcc.command.li +# Kernel headers do not strictly comply with variable usage standards; not disabling this check will cause compilation to fail +ccflags-y += -Wno-unused-parameter -Wno-missing-field-initializers -Wno-implicit-fallthrough -Wno-type-limits +ccflags-y += -Wmaybe-uninitialized -frecord-gcc-switches +ccflags-y += -I$(CURDIR)/baselib/include +ccflags-y += -I$(CURDIR)/common/mbx +ccflags-y += -I$(CURDIR)/common/sxe2flow +ccflags-y += -I$(CURDIR)/common/sxe2pf +ccflags-y += -I$(CURDIR)/include/drv +ccflags-y += -I$(CURDIR)/rdma/compat +ccflags-y += -I$(CURDIR)/base/auxiliary +ccflags-y += -DSXE2_SUPPORT_CONFIGFS +ifeq ($(MD_MAKEMODE), release) + ccflags-y += -DSXE2_CFG_RELEASE +else ifeq ($(MD_MAKEMODE), trace) + ccflags-y += -DSXE2_CFG_RELEASE + ccflags-y += -DSXE2_DRIVER_TRACE +else + ccflags-y += -DSXE2_CFG_DEBUG +endif + +# Default FPGA platform support, compatible with different hardware platforms +ifeq ($(MT_BUILDMODE),asic) + ccflags-y += -DSXE2_HARDWARE_ASIC +else #ifeq ($(MT_BUILDMODE),emu) + ccflags-y += -DSXE2_HARDWARE_EMU +endif +# make linux kernel version code +else # KERNELRELEASE +# Path to the kernel source tree +KDIR := /lib/modules/$(shell uname -r)/build + +all: + @$(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt + @rm -rf ./rdma/*.o ./rdma/.*.cmd + @rm -rf ./base/log/*.o ./common/mbx/*.o ./common/sxe2pf/*.o +endif # KERNELRELEASE + diff --git a/drivers/infiniband/hw/sxe2rdma/base/auxiliary/Makefile b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1e9263d4d7ad10c7faa590f188221f62941c0cc5 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/Makefile @@ -0,0 +1,13 @@ +################################################################################ +# For building auxiliary device drivers +################################################################################ +obj-m := sxe2_auxiliary.o + +KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +all: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary.c b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary.c new file mode 100644 index 0000000000000000000000000000000000000000..f4a22e826fbe2b1a7ce8231a608d512399a5932b --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: auxiliary.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include "auxiliary_bus.h" + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + size_t match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +static int auxiliary_match(struct device *dev, struct device_driver *drv) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv); + + return !!auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *name, *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX, + (int)(p - name), name); +} + +static const struct dev_pm_ops auxiliary_dev_pm_ops = { + SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) +}; + +static int auxiliary_bus_probe(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + if (auxdrv->probe) { + ret = auxdrv->probe(auxdev, + auxiliary_match_id(auxdrv->id_table, + auxdev)); + if (ret) + dev_pm_domain_detach(dev, true); + } else { + ret = 0; + } + } + + return ret; +} + +static int auxiliary_bus_remove(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->remove) + auxdrv->remove(auxdev); + dev_pm_domain_detach(dev, true); + + return 0; +} + +static void auxiliary_bus_shutdown(struct device *dev) +{ + struct auxiliary_driver *auxdrv = NULL; + struct auxiliary_device *auxdev; + + if (dev->driver) { + auxdrv = to_auxiliary_drv(dev->driver); + auxdev = to_auxiliary_dev(dev); + } + + if (auxdrv && auxdrv->shutdown) + auxdrv->shutdown(auxdev); +} + +static struct bus_type auxiliary_bus_type = { + .name = "sxe2_auxiliary", + .probe = auxiliary_bus_probe, + .remove = auxiliary_bus_remove, + .shutdown = auxiliary_bus_shutdown, + .match = auxiliary_match, + .uevent = auxiliary_uevent, + .pm = &auxiliary_dev_pm_ops, +}; + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &auxiliary_bus_type; + device_initialize(&auxdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_device_init); + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname) +{ + struct device *dev = &auxdev->dev; + int ret; + + if (!modname) { + dev_err(dev, "auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret) + dev_err(dev, "adding auxiliary device failed!: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_device_add); + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)) +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)) +#endif +{ + struct device *dev; + + dev = bus_find_device(&auxiliary_bus_type, start, data, match); + if (!dev) + return NULL; + + return to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_find_device); + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + int ret; + + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + if (!auxdrv->driver.name) + return -ENOMEM; + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + ret = driver_register(&auxdrv->driver); + if (ret) + kfree(auxdrv->driver.name); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_driver_register); + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_driver_unregister); + +static int __init auxiliary_bus_init(void) +{ + return bus_register(&auxiliary_bus_type); +} + +static void __exit auxiliary_bus_exit(void) +{ + bus_unregister(&auxiliary_bus_type); +} + +module_init(auxiliary_bus_init); +module_exit(auxiliary_bus_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Auxiliary Bus Standalone"); +MODULE_AUTHOR("linux.tucana@Stars Micro System.com"); diff --git a/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_bus.h b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_bus.h new file mode 100644 index 0000000000000000000000000000000000000000..f0b41e121b3fe4bc16bef959a744d3cc09a2f793 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_bus.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: auxiliary_bus.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#include +#include +#include +#include "auxiliary_compat.h" + +#ifndef HAVE_AUXILIARY_DEVICE_ID +#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_MODULE_PREFIX "sxe2_auxiliary:" +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; +#endif + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct auxiliary_device, dev); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev); +#define auxiliary_device_init(auxdev) sxe2_auxiliary_device_init(auxdev) + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); + +#define auxiliary_device_add(auxdev) __sxe2_auxiliary_device_add(auxdev, KBUILD_MODNAME) + +static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define auxiliary_driver_register(auxdrv) \ + __sxe2_auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +#define auxiliary_driver_unregister(auxdrv) sxe2_auxiliary_driver_unregister(auxdrv) + +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)); +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)); +#endif + +#define auxiliary_find_device sxe2_auxiliary_find_device + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_compat.h b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..fb588676c19b21e3d4acbe4f750cbf5435906520 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/base/auxiliary/auxiliary_compat.h @@ -0,0 +1,44 @@ +#ifndef __AUXILIARY_COMPAT_H__ +#define __AUXILIARY_COMPAT_H__ + +#include +#include + +#include "sxe2_compat_gcc.h" + +#include "sxe2_compat_inc.h" + +#include "sxe2_compat.h" + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/common/mbx/sxe2_mbx_public.h b/drivers/infiniband/hw/sxe2rdma/common/mbx/sxe2_mbx_public.h new file mode 100644 index 0000000000000000000000000000000000000000..965887b770ed8d35712936cb597c501771865c2c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/mbx/sxe2_mbx_public.h @@ -0,0 +1,837 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_mbx_public.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_VF_PUBLIC_H__ +#define __SXE2_VF_PUBLIC_H__ + +#include "sxe2_spec.h" +#ifdef __KERNEL__ +#include "sxe2_compat.h" +#endif + +#include "sxe2_host_regs.h" +#include "sxe2_flow_public.h" + +#pragma pack(1) + +#define SXE2_VF_VERSION_MAJOR 1 +#define SXE2_VF_VERSION_MINOR 1 + +#define SXE2_VF_MAX_VSI_CNT 4 + +#define SXE2_VF_VLAN_STATUS_INVALID (0xFF) + +#define SXE2_VF_ETH_Q_NUM 16 +#define SXE2_VF_DPDK_Q_NUM 16 + +#define SXE2_VF_DRV_TO_HW (0x3) +#define SXE2_VF_VF_TO_PF (0x0801) +#define SXE2_VF_PF_TO_VF (0x0802) + +#define SXE2_VF_MBX_MAGIC (0xFEFEEFEF) + +#define SXE2_VF_PROMISC BIT_ULL(0) +#define SXE2_VF_PROMISC_MULTICAST BIT_ULL(1) +#define SXE2_VF_VLAN_FILTER BIT_ULL(2) + +#define SXE2_VF_OFFLOAD_L2 BIT(0) +#define SXE2_VF_OFFLOAD_VLAN BIT(1) +#define SXE2_VF_OFFLOAD_RSS BIT(2) +#define SXE2_VF_OFFLOAD_IPSEC BIT(3) +#define SXE2_VF_OFFLOAD_FNAV BIT(4) +#define SXE2_VF_OFFLOAD_TM BIT(5) +#define SXE2_VF_OFFLOAD_PTP BIT(6) + +#define SXE2_IPSEC_DIR_TX (0) +#define SXE2_IPSEC_DIR_RX (1) +#define SXE2_IPSEC_INVAILID_SA_IDX (0xFFFF) + +enum sxe2vf_vsi_type { + SXE2VF_VSI_TYPE_ETH = 0, + SXE2VF_VSI_TYPE_DPDK, + SXE2VF_VSI_TYPE_NR, +}; + +#define SXE2_VF_VSI_CNT_USED SXE2VF_VSI_TYPE_NR + +enum sxe2_vf_opcode { + SXE2_VF_UNKOWN = 0, + SXE2_VF_RESET_REQUEST = 0x1, + SXE2_VF_VERSION_MATCH = 0x2, + SXE2_VF_HW_RES_GET = 0x3, + SXE2_VF_IRQ_MAP = 0x4, + SXE2_VF_QUEUES_DISABLE = 0x5, + SXE2_VF_RXQ_CFG_AND_ENABLE = 0x6, + SXE2_VF_TXQ_CFG_AND_ENABLE = 0x7, + SXE2_VF_MAC_ADDR_ADD = 0x8, + SXE2_VF_MAC_ADDR_DEL = 0x9, + SXE2_VF_VLAN_ADD = 0xa, + SXE2_VF_VLAN_DEL = 0xb, + SXE2_VF_STATS_GET = 0xc, + SXE2_VF_LINK_UPDATE_NOTIFY = 0xd, + SXE2_VF_PROMISC_CFG = 0xe, + SXE2_VF_VLAN_CAPS_GET = 0xf, + SXE2_VF_VLAN_OFFLOAD_CFG = 0x10, + SXE2_VF_VLAN_FILTER_CFG = 0x11, + SXE2_VF_LINK_STATUS_GET = 0x12, + SXE2_VF_RESET_NOTIFY = 0x13, + SXE2_VF_RDMA = 0x14, + SXE2_VF_QV_MAP = 0x15, + SXE2_VF_QV_UNMAP = 0x16, + SXE2_VF_RDMA_MGR_CMD = 0x17, + + SXE2_VF_GET_RSS_KEY = 0x18, + SXE2_VF_GET_RSS_LUT = 0x19, + SXE2_VF_SET_RSS_KEY = 0x1a, + SXE2_VF_SET_RSS_LUT = 0x1b, + SXE2_VF_ADD_RSS_CFG = 0x1c, + + SXE2_VF_DEL_RSS_CFG = 0x1d, + SXE2_VF_CLEAR_RSS_CFG = 0x1e, + SXE2_VF_SET_RSS_HASH_CTRL = 0X1f, + + SXE2_VF_FNAV_FILTER_ADD = 0x20, + SXE2_VF_FNAV_FILTER_DEL = 0x21, + SXE2_VF_FNAV_FILTER_CLEAR = 0X22, + SXE2_VF_FNAV_ALLOC_STAT = 0X23, + SXE2_VF_FNAV_FREE_STAT = 0X24, + SXE2_VF_FNAV_QUERY_STAT = 0x25, + + SXE2_VF_STATS_CLEAR = 0x26, + SXE2_VF_RXQ_DISABLE = 0x27, + SXE2_VF_TXQ_DISABLE = 0x28, + + SXE2_VF_GET_PTP_CLOCK = 0x29, + SXE2_VF_IPSEC_SA_ADD = 0x2a, + SXE2_VF_IPSEC_SA_CLEAR = 0x2b, + SXE2_VF_IPSEC_GET_CAPA = 0x2c, + + SXE2_VF_RDMA_DUMP_PCAP = 0x2d, + + SXE2_VF_IRQ_UNMAP = 0x2e, + + SXE2_VF_ADD_DEFAULT_RSS_CFG = 0x2f, + SXE2_VF_REPLAY_RSS_CFG = 0x30, + SXE2_VF_STATS_PUSH = 0x31, + SXE2_VF_GET_ETHTOOL_INFO = 0x32, + SXE2_VF_FNAV_MATCH_CLEAR = 0x33, + SXE2_VF_VSI_CFG = 0x34, + SXE2_VF_USER_DRIVER_RELEASE = 0x35, + + SXE2_VF_MAC_ADDR_UPDATE = 0x36, + SXE2_VF_PROMISC_UPDATE = 0x37, + SXE2_VF_USER_VLAN_PROCESS = 0x38, + + SXE2_VF_ACL_FILTER_ADD = 0x39, + SXE2_VF_ACL_FILTER_DEL = 0x3a, + SXE2_VF_ACL_FILTER_CLEAR = 0x3b, + + SXE2_VF_PASSTHROUGH_USER_VF_DATA = 0x3c, + + SXE2_VF_DRV_MODE_SET = 0x3d, + SXE2_VF_DRV_MODE_GET = 0x3e, + + SXE2_VF_OPCODE_NR, + + SXE2_VF_MBX_DISABLE = 0xFFFF, +}; + +enum sxe2_vf_err_code { + SXE2_VF_ERR_SUCCESS = 0, + SXE2_VF_ERR_PARAM = 1024, + SXE2_VF_ERR_NO_MEMORY, + SXE2_VF_ERR_HANDLE_ERROR, + SXE2_VF_ERR_CQP_COMPL_ERROR, + SXE2_VF_ERR_INVALID_VF_ID, + SXE2_VF_ERR_ADMIN_QUEUE_ERROR, + SXE2_VF_ERR_NOT_SUPPORTED, + SXE2_VF_ERR_PF_STATUS_ABNORMAL, + SXE2_VF_ERR_VF_STATUS_ABNORMAL, +}; + +enum sxe2_vf_msg_type { + SXE2VF_MSG_TYPE_PF_TO_VF = 3, + SXE2VF_MSG_TYPE_VF_TO_PF, + SXE2VF_MSG_TYPE_DRV_TO_HW, + SXE2VF_MSG_TYPE_PF_REPLY_VF, +}; + +#define SXE2VF_CMD_HDR_SIZE sizeof(struct sxe2vf_cmd_hdr) + +#define SXE2VF_MBX_MSG_HDR_SIZE \ + sizeof(struct sxe2vf_mbx_msg_hdr) + +#define SXE2VF_MBX_RAW_MSG_MAX_SPEC (4096) + +#define SXE2VF_MBX_RAW_MSG_OFFSET (SXE2VF_CMD_HDR_SIZE + SXE2VF_MBX_MSG_HDR_SIZE) + +#define SXE2VF_MBX_FULL_HDR_SIZE SXE2VF_MBX_RAW_MSG_OFFSET + +#define SXE2VF_MBX_RAW_MSG_MAX_SIZE \ + (SXE2VF_MBX_RAW_MSG_MAX_SPEC - SXE2VF_MBX_RAW_MSG_OFFSET) + +#define SXE2VF_MBX_DATA_OFFSET(buf) \ + ((((struct sxe2_cmd_hdr *)(buf))->hdr_len) + \ + (SXE2_MBX_MSG_HDR_PTR((struct sxe2_cmd_hdr *)buf)->data_offset)) + +#define SXE2_FNAV_MAX_NUM_PROTO_HDRS (9) +#define SXE2_FNAV_MAX_NUM_ACTIONS (3) +#define SXE2_FNAV_IPV6_ADDR_LEN_TO_U32 (4) +#define SXE2_FNAV_ETH_ADDR_LEN (6) +#define SXE2_VF_FNAV_INVALID_LOC (0xFFFF) +#define SXE2_VF_FNAV_INVALID_FLOW_ID (0xFFFF) +#define SXE2_VF_FNAV_INVALID_STAT_IDX (0xFFFF) + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F + +struct sxe2vf_cmd_hdr { + __le32 magic_code; + __le16 in_len; + __le16 out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 resv[28]; + u8 body[]; +}; + +struct sxe2vf_mbx_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 recv[14]; + u8 body[]; +}; + +enum sxe2_driver_type { + SXE2_DRIVER_TYPE_VF = 0, +}; + +struct sxe2_vf_vfres_msg_req { + u8 driver_type; + u8 support_sw_stats; + u8 reserve[2]; +}; + +struct sxe2_vf_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_ver_msg { + __le16 major; + __le16 minor; +}; + +struct sxe2_vf_rxq_ctxt { + u8 lro_status; + u8 keep_crc_en; + __le16 queue_id; + __le16 depth; + __le16 buf_len; + __le64 dma_addr; +}; + +struct sxe2_vf_rxq_msg { + __le16 q_cnt; + __le16 vsi_id; + __le16 max_frame_size; + struct sxe2_vf_rxq_ctxt ctxt[]; +}; + +struct sxe2_vf_vsi_sw_stats { + __le64 rx_packets; + __le64 rx_bytes; + __le64 tx_packets; + __le64 tx_bytes; +}; + +struct sxe2_vf_sw_stats { + __le16 vsi_id; + struct sxe2_vf_vsi_sw_stats sw_stats; + __le16 fnav_stats_idx; +}; + +struct sxe2_vf_vsi_res { + __le16 vsi_id; +}; + +struct sxe2_vf_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; +}; + +struct sxe2_vf_hw_stats_rsp { + struct sxe2_vf_vsi_hw_stats hw_stats; + __le64 fnav_match; +}; + +struct sxe2_fw_ver_msg { + u8 main_version_id; + u8 sub_version_id; + u8 fix_version_id; + u8 build_id; +}; + +struct sxe2_vf_txsch_caps { + u8 layer_cap; + u8 tm_mid_node_num; + u8 prio_num; +}; + +struct sxe2_vf_rxft_caps { + __le16 rss_lut_type; + __le16 rss_key_size; + __le16 rss_lut_size; + __le16 fnav_space_gsize; + __le16 fnav_space_bsize; +}; + +struct sxe2_vf_vfres_msg { + __le16 num_vsis; + __le16 max_vectors; + __le16 q_cnt; + __le16 itr_gran; + u8 addr[ETH_ALEN]; + __le16 max_vlan_cnt; + u8 port_vlan_exsit; + u8 is_switchdev; + u8 pf_cnt; + u8 parent_pfid; + __le16 vf_id_in_dev; + struct sxe2_vf_rxft_caps rxft_cap; + struct sxe2_vf_vsi_res vsi_res[SXE2_VF_MAX_VSI_CNT]; + struct sxe2_vf_txsch_caps vf_txsch_cap; + struct sxe2_fw_ver_msg fw_ver; + __le32 cap_flags; + u8 tm_layers; + u8 parent_portid; + u8 mode; +}; + +struct sxe2_vf_irq_map { + __le16 irq_id; + __le16 txq_map; + __le16 rxq_map; + __le16 rxitr_idx; + __le16 txitr_idx; +}; + +struct sxe2_vf_irq_map_msg { + __le16 num_irqs; + __le16 vsi_id; + struct sxe2_vf_irq_map irq_maps[]; +}; + +struct sxe2_vf_irq_unmap_msg { + __le16 vsi_id; +}; + +enum { + SXE2_VF_MAC_TYPE_P = 0, + SXE2_VF_MAC_TYPE_C, +}; + +struct sxe2_vf_addr { + u8 addr[ETH_ALEN]; + u8 type; +}; + +struct sxe2_vf_addr_msg { + bool is_user; + __le16 vsi_id; + __le16 addr_cnt; + struct sxe2_vf_addr elem[]; +}; + +struct sxe2_vf_addr_update_msg { + bool to_user; + __le16 vsi_id; + u8 addr[ETH_ALEN]; +}; + +struct sxe2_vf_promisc_update_msg { + bool to_user; + bool is_promisc; + __le16 vsi_id; +}; + +struct sxe2_vf_link_msg { + __le32 speed; + u8 status; +}; + +struct sxe2_vf_txq_stop_msg { + __le16 q_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_txq_ctxt { + __le16 vsi_id; + __le16 queue_id; + __le16 depth; + __le64 dma_addr; + __le32 sched_mode; +}; + +struct sxe2_vf_txq_ctxt_msg { + __le16 q_cnt; + __le16 vsi_id; + struct sxe2_vf_txq_ctxt ctxs[]; +}; + +struct sxe2_vf_qps_dis_msg { + __le16 qps_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_q_stop_msg { + __le16 vsi_id; + __le16 q_idx; +}; + +struct sxe2_vf_promisc_msg { + bool is_user; + __le16 vsi_id; + u8 resv[2]; + __le32 flags; +}; + +struct sxe2_vf_vlan_caps { + u8 port_vlan_exsit; + __le16 max_cnt; +}; + +struct sxe2_vf_vlan_offload_cfg { + u8 stag_strip_enable; + u8 ctag_strip_enable; + u8 stag_insert_enable; + u8 ctag_insert_enable; +}; + +struct sxe2_vf_vlan_filter_cfg { + bool is_user; + u8 ctag_filter_enable; + u8 stag_filter_enable; +}; + +struct sxe2_vf_vlan { + __le16 vid; + __le16 tpid; +}; + +struct sxe2_vf_vlan_filter_msg { + __le16 vsi_id; + __le16 vlan_cnt; + struct sxe2_vf_vlan elem[]; +}; + +struct sxe2_vf_user_vlan_msg { + bool is_add; + __le16 vsi_id; + struct sxe2_vf_vlan vlan; +}; + +struct sxe2_vf_user_vlan_fltr_msg { + bool is_en; + __le16 vsi_id; +}; + +struct sxe2_vf_rss_hash_ctrl { + u8 hash_func; +}; + +struct sxe2_vf_rss_hash_msg { + __le32 headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + __le32 hash_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + __le32 hdr_type; + u8 symm; +}; + +enum sxe2_fnav_flow_type { + SXE2_FNAV_FLOW_TYPE_NONE = SXE2_FLOW_TYPE_NONE, + SXE2_FNAV_FLOW_TYPE_FRAG_IPV4 = SXE2_FLOW_MAC_IPV4_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_ETH = SXE2_FLOW_MAC_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_OTHER = SXE2_FLOW_MAC_IPV4_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_UDP = SXE2_FLOW_MAC_IPV4_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_TCP = SXE2_FLOW_MAC_IPV4_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_SCTP = SXE2_FLOW_MAC_IPV4_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_FRAG_IPV6 = SXE2_FLOW_MAC_IPV6_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER = SXE2_FLOW_MAC_IPV6_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_UDP = SXE2_FLOW_MAC_IPV6_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_TCP = SXE2_FLOW_MAC_IPV6_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_SCTP = SXE2_FLOW_MAC_IPV6_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP = SXE2_FLOW_TYPE_MAX, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP, + SXE2_FNAV_FLOW_TYPE_MAX, +}; + +enum sxe2_fnav_act_type { + SXE2_FNAV_ACTION_DROP = 0, + SXE2_FNAV_ACTION_TC_REDIRECT, + SXE2_FNAV_ACTION_PASSTHRU, + SXE2_FNAV_ACTION_QUEUE, + SXE2_FNAV_ACTION_Q_REGION, + SXE2_FNAV_ACTION_MARK, + SXE2_FNAV_ACTION_COUNT, +}; + +enum sxe2_fnav_tunnel_level { + SXE2_FNAV_TUNNEL_OUTER, + SXE2_FNAV_TUNNEL_INNER, + SXE2_FNAV_TUNNEL_ANY, +}; + +enum sxe2_fnav_tunnel_flag_type { + SXE2_FNAV_TUN_FLAG_NO_TUNNEL, + SXE2_FNAV_TUN_FLAG_TUNNEL, + SXE2_FNAV_TUN_FLAG_ANY, +}; + +struct sxe2_fnav_comm_eth { + u8 dst[SXE2_FNAV_ETH_ADDR_LEN]; + u8 src[SXE2_FNAV_ETH_ADDR_LEN]; + __be16 etype; +}; + +struct sxe2_fnav_comm_vlan { + __be16 vlan_vid; + __be16 vlan_tci; + __be16 vlan_type; +}; + +struct sxe2_fnav_comm_ipv4 { + __be32 saddr; + __be32 daddr; + u8 tos; + u8 ttl; + u8 proto; +}; + +struct sxe2_fnav_comm_ipv6 { + __be32 dst_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + __be32 src_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + u8 tc; + u8 proto; + u8 hlim; +}; + +struct sxe2_fnav_comm_l4 { + __be16 dst_port; + __be16 src_port; +}; + +struct sxe2_fnav_comm_vxlan { + __be32 vni; +}; + +struct sxe2_fnav_comm_geneve { + __be32 vni; +}; + +struct sxe2_fnav_comm_gtpu { + __be32 teid; +}; + +struct sxe2_fnav_comm_gre { + __be32 tni; +}; + +struct sxe2_fnav_comm_proto_hdr { + u8 tunnel_level; + u8 type; + __le32 flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + union { + struct sxe2_fnav_comm_eth eth; + struct sxe2_fnav_comm_vlan vlan; + struct sxe2_fnav_comm_ipv4 ipv4; + struct sxe2_fnav_comm_ipv6 ipv6; + struct sxe2_fnav_comm_l4 l4; + struct sxe2_fnav_comm_vxlan vxlan; + struct sxe2_fnav_comm_geneve geneve; + struct sxe2_fnav_comm_gtpu gtpu; + struct sxe2_fnav_comm_gre gre; + }; +}; + +struct sxe2_fnav_comm_action_queue { + __le16 q_index; +}; + +struct sxe2_fnav_comm_action_queue_region { + __le16 q_index; + u8 region; +}; + +struct sxe2_fnav_comm_action_mark { + __le32 mark_id; +}; + +struct sxe2_fnav_comm_action_count { + __le32 stat_index; + __le32 stat_ctrl; +}; + +struct sxe2_fnav_comm_action { + __le32 type; + union { + struct sxe2_fnav_comm_action_queue act_queue; + struct sxe2_fnav_comm_action_queue_region act_q_region; + struct sxe2_fnav_comm_action_mark act_mark; + struct sxe2_fnav_comm_action_count act_count; + }; +}; + +struct sxe2_fnav_comm_user_data { + u8 has_flex_filed; + u8 resv[3]; + __le16 flex_offset; + __be16 flex_word; +}; + +struct sxe2_fnav_comm_full_msg { + __le32 filter_loc; + __le32 flow_type; + __le32 tunn_flag; + u8 action_cnt; + u8 proto_cnt; + u8 rsv[2]; + struct sxe2_fnav_comm_action action[SXE2_FNAV_MAX_NUM_ACTIONS]; + struct sxe2_fnav_comm_proto_hdr proto_hdr[SXE2_FNAV_MAX_NUM_PROTO_HDRS]; + struct sxe2_fnav_comm_user_data usr_data; +}; + +struct sxe2_vf_fnav_filter_del_msg { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_add_filter_resp { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_stat_alloc_req_msg { + u8 need_update; +}; + +struct sxe2_vf_fnav_stat_msg { + __le16 stat_index; +}; + +struct sxe2_vf_fnav_stat_query_req_msg { + __le16 stat_index; + __le32 stat_ctrl; + __le32 is_clear; +}; + +struct sxe2_vf_fnav_stat_query_resp_msg { + __le16 stat_index; + __le64 stat_hits; + __le64 stat_bytes; +}; + +struct sxe2_vf_qv_info { + __le32 v_idx; + __le16 ceq_idx; + __le16 aeq_idx; + u8 itr_idx; + u8 pad[3]; +}; + +struct sxe2_vf_qv_map_msg { + __le32 num_vectors; + struct sxe2_vf_qv_info qv_info[]; +}; + +struct sxe2_vf_rdma_mgr_cmd_msg { + __le32 opcode; + __le32 msg_len; + __le32 resv_len; + u8 msg[]; +}; + +struct sxe2_vf_tm_res { + __le16 teid; +}; + +struct sxe2_vf_tm_info { + __le32 committed; + __le32 peak; + u8 priority; + u8 reserve; + __le16 weight; +}; + +struct sxe2_vf_tm_add_root_msg { + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_node_msg { + __le16 parent_teid; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_queue_msg { + __le16 parent_teid; + __le16 queue_id; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_ptp_clock_res { + __le32 clock_ns; + __le64 clock_s; +}; + +struct sxe2_mbx_obj { + __le32 func_type : 2; + __le32 resv : 2; + __le32 pf_id : 4; + __le32 vf_id : 12; + __le32 resv1 : 4; + __le32 drv_type : 2; + __le32 drv_id : 6; +}; + +struct sxe2_com_user_data_passthrough_req { + struct sxe2_mbx_obj obj; + u32 opcode; + u16 func_id; + u16 vsi_id; + u32 req_len; + u32 resp_len; + u32 buff_len; + u8 cmd_buff[]; +}; + +struct sxe2_com_user_data_passthrough_resp { + u32 buff_len; + u8 cmd_buff[]; +}; + +#define SXE2_MBX_IPSEC_IPV6 BIT(0) +#define SXE2_MBX_IPSEC_SM4 BIT(1) +#define SXE2_MBX_IPSEC_AUTH BIT(2) +#define SXE2_MBX_IPSEC_KEY_LEN (32) +#define SCBGE_MBX_IPSEC_IPV4_LEN (4) +#define SCBGE_MBX_IPSEC_IPV6_LEN (16) + +struct sxe2_vf_ipsec_sa_add_msg { + __le32 spi; + u8 dir; + u8 mode; + u8 rsvd[2]; + __le32 addr[SCBGE_MBX_IPSEC_IPV6_LEN / 4]; + u8 enc_key[SXE2_MBX_IPSEC_KEY_LEN]; + u8 auth_key[SXE2_MBX_IPSEC_KEY_LEN]; + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_add_resp { + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_del_msg { + u8 dir; + u8 rsvd[3]; + __le32 sa_idx; +}; + +struct sxe2vf_get_capa_response { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; +}; + +struct sxe2vf_acl_filter_del_req { + __le32 filter_id; +}; + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD SXE2_VSI_L2TAGSTXVALID_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_VLAN SXE2_VSI_L2TAGSTXVALID_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q SXE2_VSI_TSR_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD SXE2_VSI_TSR_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1 SXE2_VSI_TSR_ID_OUT_VLAN2 + +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_VLAN +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID + +#define SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1 SXE2_VSI_TSR_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_FIELD (0X0F) +#define SXE2_DPDK_OFFLOAD_TAGID_FIELD (0X07) + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK (SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1) +#define SXE2_DPDK_OFFLOAD_STRIP_OFFSET SXE2_VSI_TSR_SHOW_TAG_S + +#define SXE2_DPDK_OFFLOAD_INSERT_ENABLE (BIT(3)) + +struct sxe2_dpdk_portvlan_cfg { + u16 vf_idx; + u16 tpid; + u16 vid; + u8 prio; + u8 rsv; +}; + +struct sxe2vf_rdma_dump_pcap_msg { + u8 mac[ETH_ALEN]; + u8 rsvd[2]; + bool is_add; + u8 rsvd1[3]; +}; + +struct sxe2_vf_vsi_cfg { + bool is_clear; + __le16 txq_base_idx; + __le16 txq_cnt; + __le16 rxq_base_idx; + __le16 rxq_cnt; + __le16 irq_base_idx; + __le16 irq_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_user_driver_release { + u8 func_id; + u8 drv_id; +}; + +#pragma pack() +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2flow/sxe2_flow_public.h b/drivers/infiniband/hw/sxe2rdma/common/sxe2flow/sxe2_flow_public.h new file mode 100644 index 0000000000000000000000000000000000000000..077c8d0cd1a2ae444d7877760fda10deb86b111d --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2flow/sxe2_flow_public.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_flow_public.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_FLOW_PUBLIC_H__ +#define __SXE2_FLOW_PUBLIC_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_osal.h" +#endif +enum sxe2_flow_type { + SXE2_FLOW_TYPE_NONE = 0, + SXE2_FLOW_MAC_PAY = 1, + SXE2_FLOW_MAC_IPV4_FRAG_PAY = 22, + SXE2_FLOW_MAC_IPV4_PAY = 23, + SXE2_FLOW_MAC_IPV4_UDP_PAY = 24, + SXE2_FLOW_MAC_IPV4_TCP_PAY = 26, + SXE2_FLOW_MAC_IPV4_SCTP_PAY = 27, + SXE2_FLOW_MAC_IPV4_IPV4_FRAG_PAY = 29, + SXE2_FLOW_MAC_IPV4_IPV4_PAY = 30, + SXE2_FLOW_MAC_IPV4_IPV4_UDP_PAY = 31, + SXE2_FLOW_MAC_IPV4_IPV4_TCP_PAY = 33, + SXE2_FLOW_MAC_IPV4_IPV4_SCTP_PAY = 34, + SXE2_FLOW_MAC_IPV4_IPV6_FRAG_PAY = 36, + SXE2_FLOW_MAC_IPV4_IPV6_PAY = 37, + SXE2_FLOW_MAC_IPV4_IPV6_UDP_PAY = 38, + SXE2_FLOW_MAC_IPV4_IPV6_TCP_PAY = 40, + SXE2_FLOW_MAC_IPV4_IPV6_SCTP_PAY = 41, + SXE2_FLOW_MAC_IPV4_GRE_PAY = 43, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_FRAG_PAY = 44, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_PAY = 45, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_UDP_PAY = 46, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_TCP_PAY = 48, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_SCTP_PAY = 49, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_FRAG_PAY = 51, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_PAY = 52, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_UDP_PAY = 53, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_TCP_PAY = 55, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_SCTP_PAY = 56, + SXE2_FLOW_MAC_IPV4_GRE_MAC_PAY = 58, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_FRAG_PAY = 59, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_PAY = 60, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_UDP_PAY = 61, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_TCP_PAY = 63, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_SCTP_PAY = 64, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_FRAG_PAY = 66, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_PAY = 67, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_UDP_PAY = 68, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_TCP_PAY = 70, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_SCTP_PAY = 71, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_PAY = 73, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_FRAG_PAY = 74, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_PAY = 75, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_UDP_PAY = 76, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_TCP_PAY = 78, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_SCTP_PAY = 79, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_FRAG_PAY = 81, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_PAY = 82, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_UDP_PAY = 83, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_TCP_PAY = 85, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_SCTP_PAY = 86, + SXE2_FLOW_MAC_IPV6_FRAG_PAY = 88, + SXE2_FLOW_MAC_IPV6_PAY = 89, + SXE2_FLOW_MAC_IPV6_UDP_PAY = 90, + SXE2_FLOW_MAC_IPV6_TCP_PAY = 92, + SXE2_FLOW_MAC_IPV6_SCTP_PAY = 93, + SXE2_FLOW_MAC_IPV6_IPV4_FRAG_PAY = 95, + SXE2_FLOW_MAC_IPV6_IPV4_PAY = 96, + SXE2_FLOW_MAC_IPV6_IPV4_UDP_PAY = 97, + SXE2_FLOW_MAC_IPV6_IPV4_TCP_PAY = 99, + SXE2_FLOW_MAC_IPV6_IPV4_SCTP_PAY = 100, + SXE2_FLOW_MAC_IPV6_IPV6_FRAG_PAY = 102, + SXE2_FLOW_MAC_IPV6_IPV6_PAY = 103, + SXE2_FLOW_MAC_IPV6_IPV6_UDP_PAY = 104, + SXE2_FLOW_MAC_IPV6_IPV6_TCP_PAY = 106, + SXE2_FLOW_MAC_IPV6_IPV6_SCTP_PAY = 107, + SXE2_FLOW_MAC_IPV6_GRE_PAY = 109, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_FRAG_PAY = 110, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_PAY = 111, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_UDP_PAY = 112, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_TCP_PAY = 114, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_SCTP_PAY = 115, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_FRAG_PAY = 117, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_PAY = 118, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_UDP_PAY = 119, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_TCP_PAY = 121, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_SCTP_PAY = 122, + SXE2_FLOW_MAC_IPV6_GRE_MAC_PAY = 124, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_FRAG_PAY = 125, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_PAY = 126, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_UDP_PAY = 127, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_TCP_PAY = 129, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_SCTP_PAY = 130, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_FRAG_PAY = 132, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_PAY = 133, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_UDP_PAY = 134, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_TCP_PAY = 136, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_SCTP_PAY = 137, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_PAY = 139, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_FRAG_PAY = 140, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_PAY = 141, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_UDP_PAY = 142, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_TCP_PAY = 144, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_SCTP_PAY = 145, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_FRAG_PAY = 147, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_PAY = 148, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_UDP_PAY = 149, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_TCP_PAY = 151, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_SCTP_PAY = 152, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_PAY = 329, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_PAY = 330, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_FRAG_PAY = 331, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_PAY = 332, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_UDP_PAY = 333, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_TCP_PAY = 334, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_SCTP_PAY = 335, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_FRAG_PAY = 336, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_PAY = 337, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_UDP_PAY = 338, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_TCP_PAY = 339, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_SCTP_PAY = 340, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_FRAG_PAY = 341, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_PAY = 342, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_UDP_PAY = 343, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_TCP_PAY = 344, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_SCTP_PAY = 345, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_FRAG_PAY = 346, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_PAY = 347, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_UDP_PAY = 348, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_TCP_PAY = 349, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_SCTP_PAY = 350, + SXE2_FLOW_MAC_IPV6_MAC_PAY = 820, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_FRAG_PAY = 821, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_PAY = 822, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_UDP_PAY = 823, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_TCP_PAY = 824, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_SCTP_PAY = 825, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_FRAG_PAY = 827, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_PAY = 828, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_UDP_PAY = 829, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_TCP_PAY = 830, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_SCTP_PAY = 831, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_PAY = 835, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_FRAG_PAY = 836, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_PAY = 837, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_UDP_PAY = 838, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_TCP_PAY = 839, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_SCTP_PAY = 840, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_FRAG_PAY = 842, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_PAY = 843, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_UDP_PAY = 844, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_TCP_PAY = 845, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_SCTP_PAY = 846, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_PAY = 878, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_FRAG_PAY = 877, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_PAY = 876, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_UDP_PAY = 879, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_TCP_PAY = 880, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_SCTP_PAY = 875, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_FRAG_PAY = 871, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_PAY = 870, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_UDP_PAY = 872, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_TCP_PAY = 873, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_SCTP_PAY = 869, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_PAY = 891, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_FRAG_PAY = 890, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_PAY = 889, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_UDP_PAY = 892, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_TCP_PAY = 893, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_SCTP_PAY = 888, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_FRAG_PAY = 884, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_PAY = 883, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_UDP_PAY = 885, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_TCP_PAY = 886, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_SCTP_PAY = 882, + SXE2_FLOW_MAC_IPV6_UDP_GRE_PAY = 904, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_FRAG_PAY = 903, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_PAY = 902, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_UDP_PAY = 905, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_TCP_PAY = 906, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_SCTP_PAY = 901, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_FRAG_PAY = 897, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_PAY = 896, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_UDP_PAY = 898, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_TCP_PAY = 899, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_SCTP_PAY = 895, + SXE2_FLOW_MAC_IPV4_UDP_GRE_PAY = 917, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_FRAG_PAY = 916, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_PAY = 915, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_UDP_PAY = 918, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_TCP_PAY = 919, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_SCTP_PAY = 914, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_FRAG_PAY = 910, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_PAY = 909, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_UDP_PAY = 911, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_TCP_PAY = 912, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_SCTP_PAY = 908, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_PAY = 930, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 929, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 928, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 931, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 932, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 927, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 923, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 922, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 924, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 925, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 921, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_PAY = 943, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 942, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 941, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 944, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 945, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 940, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 936, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 935, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 937, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 938, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 934, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_PAY = 956, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 955, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_PAY = 954, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 957, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 958, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 953, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 949, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_PAY = 948, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 950, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 951, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 947, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_PAY = 969, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 968, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_PAY = 967, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 970, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 971, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 966, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 962, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_PAY = 961, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 963, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 964, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 960, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_PAY = 982, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 981, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_PAY = 980, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_UDP_PAY = 983, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_TCP_PAY = 984, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 979, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 975, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_PAY = 974, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_UDP_PAY = 976, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_TCP_PAY = 977, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 973, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_PAY = 995, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 994, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_PAY = 993, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_UDP_PAY = 996, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_TCP_PAY = 997, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 992, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 988, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_PAY = 987, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_UDP_PAY = 989, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_TCP_PAY = 990, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 986, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_PAY = 1008, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_FRAG_PAY = 1007, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_PAY = 1006, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_UDP_PAY = 1009, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_TCP_PAY = 1010, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_SCTP_PAY = 1005, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_FRAG_PAY = 1001, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_PAY = 1000, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_UDP_PAY = 1002, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_TCP_PAY = 1003, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_SCTP_PAY = 999, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_PAY = 1021, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_FRAG_PAY = 1020, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_PAY = 1019, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_UDP_PAY = 1022, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_TCP_PAY = 1023, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_SCTP_PAY = 1018, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_FRAG_PAY = 1014, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_PAY = 1013, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_UDP_PAY = 1015, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_TCP_PAY = 1016, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_SCTP_PAY = 1012, + SXE2_FLOW_TYPE_MAX = 2048, +}; + +enum sxe2_rss_cfg_hdr_type { + SXE2_RSS_OUTER_HEADERS, + SXE2_RSS_INNER_HEADERS, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GTPU, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GTPU, + SXE2_RSS_ANY_HEADERS +}; + +enum sxe2_flow_hdr { + SXE2_FLOW_HDR_ETH = 0, + SXE2_FLOW_HDR_VLAN, + SXE2_FLOW_HDR_QINQ, + SXE2_FLOW_HDR_IPV4, + SXE2_FLOW_HDR_IPV6, + SXE2_FLOW_HDR_ICMP = 5, + SXE2_FLOW_HDR_TCP, + SXE2_FLOW_HDR_UDP, + SXE2_FLOW_HDR_SCTP, + SXE2_FLOW_HDR_GRE, + SXE2_FLOW_HDR_VXLAN = 10, + SXE2_FLOW_HDR_GENEVE, + SXE2_FLOW_HDR_GTPU, + SXE2_FLOW_HDR_IPV_FRAG, + SXE2_FLOW_HDR_IPV_OTHER, + SXE2_FLOW_HDR_ETH_NON_IP = 15, + SXE2_FLOW_HDR_MAX = 128, +}; + +enum sxe2_flow_fld_id { + SXE2_FLOW_FLD_ID_ETH_DA = 0, + SXE2_FLOW_FLD_ID_ETH_SA, + SXE2_FLOW_FLD_ID_S_TCI, + SXE2_FLOW_FLD_ID_C_TCI, + SXE2_FLOW_FLD_ID_S_TPID, + SXE2_FLOW_FLD_ID_C_TPID = 5, + SXE2_FLOW_FLD_ID_S_VID, + SXE2_FLOW_FLD_ID_C_VID, + SXE2_FLOW_FLD_ID_ETH_TYPE, + SXE2_FLOW_FLD_ID_IPV4_TOS, + SXE2_FLOW_FLD_ID_IPV6_DSCP = 10, + SXE2_FLOW_FLD_ID_IPV4_TTL, + SXE2_FLOW_FLD_ID_IPV4_PROT, + SXE2_FLOW_FLD_ID_IPV6_TTL, + SXE2_FLOW_FLD_ID_IPV6_PROT, + SXE2_FLOW_FLD_ID_IPV4_SA = 15, + SXE2_FLOW_FLD_ID_IPV4_DA, + SXE2_FLOW_FLD_ID_IPV6_SA, + SXE2_FLOW_FLD_ID_IPV6_DA, + SXE2_FLOW_FLD_ID_IPV4_CHKSUM, + SXE2_FLOW_FLD_ID_IPV4_ID = 20, + SXE2_FLOW_FLD_ID_IPV6_ID, + SXE2_FLOW_FLD_ID_IPV6_PRE32_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE32_DA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_DA = 25, + SXE2_FLOW_FLD_ID_IPV6_PRE64_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE64_DA, + SXE2_FLOW_FLD_ID_TCP_SRC_PORT, + SXE2_FLOW_FLD_ID_TCP_DST_PORT, + SXE2_FLOW_FLD_ID_UDP_SRC_PORT = 30, + SXE2_FLOW_FLD_ID_UDP_DST_PORT, + SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, + SXE2_FLOW_FLD_ID_SCTP_DST_PORT, + SXE2_FLOW_FLD_ID_TCP_FLAGS, + SXE2_FLOW_FLD_ID_TCP_CHKSUM = 35, + SXE2_FLOW_FLD_ID_UDP_CHKSUM, + SXE2_FLOW_FLD_ID_SCTP_CHKSUM, + SXE2_FLOW_FLD_ID_VXLAN_VNI, + SXE2_FLOW_FLD_ID_GENEVE_VNI, + SXE2_FLOW_FLD_ID_GTPU_TEID = 40, + SXE2_FLOW_FLD_ID_NVGRE_TNI, + + SXE2_FLOW_FLD_ID_MAX = 128, +}; + +struct sxe2_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be16 ether_type; +}; + +struct sxe2_vlan_hdr { + __be16 type; + __be16 vlan; +}; + +struct sxe2_ipv4_hdr { + u8 ver_ihl; + u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + u8 ttl; + u8 protocol; + __be16 check; + __be32 saddr; + __be32 daddr; +}; + +#define SXE2_IPV6_ADDR_LENGTH (16) +#define SXE2_IPV6_TC_SHIFT (20) +#define SXE2_IPV6_TC_MASK (0xFF) +struct sxe2_ipv6_hdr { + __be32 pri_ver_flow; + __be16 payload_len; + u8 nexthdr; + u8 hop_limit; + union { + u8 saddr[16]; + __be16 saddr16[8]; + __be32 saddr32[4]; + }; + union { + u8 daddr[16]; + __be16 daddr16[8]; + __be32 daddr32[4]; + }; +}; + +struct sxe2_tcp_hdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __be16 flag; + __be16 window; + __be16 check; + __be16 urg_ptr; +}; + +struct sxe2_udp_hdr { + __be16 source; + __be16 dest; + __be16 len; + __be16 check; +}; + +struct sxe2_sctp_hdr { + __be16 src_port; + __be16 dst_port; +}; + +struct sxe2_nvgre_hdr { + __be16 flags; + __be16 protocol; + __be32 tni; +}; + +struct sxe2_geneve_hdr { + __be16 flags; + __be16 protocol; + __be32 vni; +}; + +struct sxe2_gtpu_hdr { + u8 flag; + u8 msg_type; + __be16 msg_len; + __be32 teid; +}; + +struct sxe2_vxlan_hdr { + u8 flag; + u8 resvd0; + u8 resvd1; + u8 protocol; + __be32 vni; +}; + +enum sxe2_flow_act_type { + SXE2_FLOW_ACTION_DROP = 0, + SXE2_FLOW_ACTION_TC_REDIRECT, + SXE2_FLOW_ACTION_TO_VSI, + SXE2_FLOW_ACTION_TO_VSI_LIST, + SXE2_FLOW_ACTION_PASSTHRU, + SXE2_FLOW_ACTION_QUEUE, + SXE2_FLOW_ACTION_Q_REGION, + SXE2_FLOW_ACTION_MARK, + SXE2_FLOW_ACTION_COUNT, + SXE2_FLOW_ACTION_RSS, + SXE2_FLOW_ACTION_MAX = 32, +}; + +enum sxe2_rss_hash_key_func { + SXE2_RSS_HASH_FUNC_TOEPLITZ = 0, + SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ = 1, + SXE2_RSS_HASH_FUNC_XOR = 2, + SXE2_RSS_HASH_FUNC_JEKINS = 3 +}; + +struct sxe2_flow_action_rss { + DECLARE_BITMAP(hdr_out, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hdr_in, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(fld, SXE2_FLOW_FLD_ID_MAX); + u8 is_inner; + u8 func; + u8 hdr_type; +}; + +struct sxe2_flow_action_queue { + u16 vsi_index; + u16 q_index; +}; + +struct sxe2_flow_action_queue_region { + u16 vsi_index; + u16 q_index; + u8 region; +}; + +struct sxe2_flow_action_passthru { + u16 vsi_index; +}; + +struct sxe2_flow_action_mark { + u32 mark_id; +}; + +#define SXE2_VSI_MAX (2048) +struct sxe2_flow_action_vsi { + u16 vsi_index; +}; + +struct sxe2_flow_action_vsi_list { + DECLARE_BITMAP(vsi_list_map, SXE2_VSI_MAX); + u16 vsi_cnt; +}; + +enum sxe2_fnav_stat_ctrl_type { + SXE2_FNAV_STAT_ENA_NONE = 0, + SXE2_FNAV_STAT_ENA_PKTS, + SXE2_FNAV_STAT_ENA_BYTES, + SXE2_FNAV_STAT_ENA_ALL, +}; + +struct sxe2_flow_action_count { + u32 user_id; + u32 driver_id; + u32 stat_index; + u32 stat_ctrl; +}; + +enum sxe2_flow_engine_type { + SXE2_FLOW_ENGINE_ACL, + SXE2_FLOW_ENGINE_SWITCH, + SXE2_FLOW_ENGINE_FNAV, + SXE2_FLOW_ENGINE_RSS, + SXE2_FLOW_ENGINE_MAX, +}; + +struct sxe2_flow_item { + struct sxe2_ether_hdr eth; + struct sxe2_vlan_hdr vlan; + struct sxe2_vlan_hdr qinq; + struct sxe2_ipv4_hdr ipv4; + struct sxe2_ipv6_hdr ipv6; + struct sxe2_udp_hdr udp; + struct sxe2_tcp_hdr tcp; + struct sxe2_sctp_hdr sctp; + struct sxe2_gtpu_hdr gtpu; + struct sxe2_vxlan_hdr vxlan; + struct sxe2_nvgre_hdr nvgre; + struct sxe2_geneve_hdr geneve; +}; + +enum sxe2_flow_sw_direct_type { + SXE2_FLOW_SW_DIRECT_TX, + SXE2_FLOW_SW_DIRECT_RX, + SXE2_FLOW_SW_DIRECT_MAX, +}; + +enum sxe2_flow_sw_pattern_type { + SXE2_FLOW_SW_PATTERN_ONLY, + SXE2_FLOW_SW_PATTERN_LAST, + SXE2_FLOW_SW_PATTERN_FIRST, + SXE2_FLOW_SW_PATTERN_MAX, +}; + +enum sxe2_flow_tunnel_type { + SXE2_FLOW_TUNNEL_TYPE_NONE, + SXE2_FLOW_TUNNEL_TYPE_PARENT, + SXE2_FLOW_TUNNEL_TYPE_VXLAN, + SXE2_FLOW_TUNNEL_TYPE_GTPU, + SXE2_FLOW_TUNNEL_TYPE_GENEVE, + SXE2_FLOW_TUNNEL_TYPE_GRE, + SXE2_FLOW_TUNNEL_TYPE_IPIP, +}; + +struct sxe2_flow_meta { + u8 switch_pattern_dup_allow; + u8 switch_src_direct; + u16 flow_src_vsi; + u16 flow_rule_vsi; + u32 flow_prio; + u16 flow_type; + u8 tunnel_type; + u8 rsv; +}; + +struct sxe2_flow_pattern { + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(map_spec, SXE2_FLOW_FLD_ID_MAX); + DECLARE_BITMAP(map_mask, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_flow_item item_spec; + struct sxe2_flow_item item_mask; + u64 rss_type_allow; +}; + +struct sxe2_flow_action { + DECLARE_BITMAP(act_types, SXE2_FLOW_ACTION_MAX); + struct sxe2_flow_action_rss rss; + struct sxe2_flow_action_queue queue; + struct sxe2_flow_action_queue_region q_region; + struct sxe2_flow_action_passthru passthru; + struct sxe2_flow_action_vsi vsi; + struct sxe2_flow_action_vsi_list vsi_list; + struct sxe2_flow_action_mark mark; + struct sxe2_flow_action_count count; +}; +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_drv_aux.h b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_drv_aux.h new file mode 100644 index 0000000000000000000000000000000000000000..05a61d10627786016cb56e41c3f93a4c3bb4ba3a --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_drv_aux.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_aux.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_AUX_H_ +#define _SXE2_DRV_AUX_H_ + +#include +#include +#include +#include "sxe2_compat.h" +#include "sxe2_cmd.h" + +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif + +#define AUX_MAJOR_VER (1) +#define AUX_MINOR_VER (1) +#define SXE2_RDMA_INDEX (0x1) +#define AUX_RDMA_INVALID_PF_IDX (0xFF) +#define AUX_MAX_USER_PRIORITY (8) +#define AUX_MAX_APPS (64) +#define AUX_MAX_DSCP_MAPPING (64) +#define AUX_MAX_NUM_AUX (5) +#define SXE2_RDMA_VCHNL_Q_INVALID_IDX (0xFFFF) + +#define SXE2_RDMA_INVALID_PF 0xFF +#define SXE2_RDMA_PF0 BIT(0) +#define SXE2_RDMA_PF1 BIT(1) +#define SXE2_RDMA_BOTH_PF 0x3 + +#define SXE2_DRV_VER_STR_LEN 32 + +enum aux_rdma_opcode { + RDMA_MAC_RULE_ADD, + RDMA_MAC_RULE_DELETE, + RDMA_OPCODE_MAX +}; + +enum aux_reset_type { + AUX_PFR, + AUX_CORER, + AUX_GLOBR, +}; + +enum aux_function_type { + AUX_FUNCTION_TYPE_PF, + AUX_FUNCTION_TYPE_VF, +}; + +enum aux_rdma_gen { + AUX_RDMA_GEN_RESERVED = 0, + AUX_RDMA_GEN_1 = 1, + AUX_RDMA_GEN_2 = 2, + AUX_RDMA_GEN_3 = 3, +}; + +struct aux_rdma_caps { + u8 gen; +}; + +enum aux_event_type { + SXE2_EVENT_MTU_CHANGED, + SXE2_EVENT_NOTIFY_RESET, + SXE2_EVENT_VF_RESET, + SXE2_EVENT_AEQ_OVERFLOW, + SXE2_EVENT_FAILOVER, + SXE2_EVENT_TC_CHANGE, + SXE2_EVENT_MAX +}; + +struct aux_ver_info { + u16 major; + u16 minor; + u64 support; +}; + +struct aux_core_dev_info; + +struct aux_rdma_qset_params { + u16 teid; + u16 qset_id; + u16 vport_id; + u8 tc[2]; + u8 user_pri; + u8 qset_port; +}; + +struct aux_rdma_multi_qset_params { + u16 teid[2]; + u16 qset_id[2]; + u8 qset_port[2]; + u16 vport_id; + u8 tc[2]; + u8 num; + u8 rdma_port[2]; + u8 active_ports; + u8 user_pri; +}; + +struct aux_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct aux_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct aux_qos_params { + struct aux_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[AUX_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u32 num_apps; + u8 pfc_mode; + struct aux_dcb_app_info apps[AUX_MAX_APPS]; + u8 dscp_map[AUX_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +struct aux_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct aux_qvlist_info { + u32 num_vectors; + struct aux_qv_info qv_info[]; +}; + +struct aux_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +struct sxe2_core_ops { + int (*alloc_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*free_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*request_reset)(struct aux_core_dev_info *cdev_info, + enum aux_reset_type reset_type); + int (*update_vport_filter)(struct aux_core_dev_info *cdev_info, + u16 vport_id, bool enable); + int (*get_vf_info)(struct aux_core_dev_info *cdev_info, + u16 vf_id, struct aux_vf_port_info *vf_port_info); + int (*vc_send)(struct aux_core_dev_info *cdev_info, + u16 vf_id, u8 *msg, u16 len, u64 session_id); + int (*vc_send_sync)(struct aux_core_dev_info *cdev_info, u8 *msg, + u16 len, u8 *recv_msg, u16 recv_len); + int (*rdma_send_cmd)(struct aux_core_dev_info *cdev_info, + enum sxe2_drv_cmd_opcode opcode, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len); + int (*rdma_drv_config)(struct aux_core_dev_info *cdev_info, u8 opcode, u8 *msg); + int (*vc_queue_vec_map_unmap)(struct aux_core_dev_info *cdev_info, + struct aux_qvlist_info *qvl_info, bool map); + int (*alloc_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*free_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*dump_pcap_cmd)(struct aux_core_dev_info *cdev_info, u8 *mac, bool is_add); + void (*notify_rdma_load)(struct aux_core_dev_info *cdev_info, bool loaded); + u32 (*rdma_get_link_speed)(struct aux_core_dev_info *cdev_info); +}; + +struct sxe2_rdma_event_info { + DECLARE_BITMAP(type, SXE2_EVENT_MAX); + u16 vf_id; + struct aux_qos_params port_qos; +}; + +struct aux_core_dev_info { + struct pci_dev *pdev; + struct auxiliary_device *adev; + u8 __iomem *hw_addr; + struct aux_ver_info ver; + char drv_ver[SXE2_DRV_VER_STR_LEN]; + enum aux_function_type ftype; + const struct sxe2_aux_ops *aux_ops; + struct sxe2_core_ops *ops; + int cdev_info_id; + u8 pf_id; + u8 pf_cnt; + u16 vfid_base; + u16 vport_id; + struct aux_qos_params qos_info[2]; + struct net_device *netdev; + struct msix_entry *msix_entries; + u32 msix_count; + struct aux_rdma_caps rdma_caps; + struct sxe2_adapter *adapter; + u8 bond_mode; + u8 rdma_pf_bitmap; + void *ext_ops; + void *ext_info; +}; + +struct sxe2_aux_ops { + void (*event_handler)(struct aux_core_dev_info *cdev_info, + struct sxe2_rdma_event_info *event); + int (*vc_receive)(struct aux_core_dev_info *cdev_info, + u32 vf_id, u8 *msg, u16 len, u64 session_id); +}; + +struct sxe2_auxiliary_device { + struct auxiliary_device adev; + struct aux_core_dev_info *cdev_info; +}; + +struct sxe2_auxiliary_drv { + struct auxiliary_driver adrv; + struct sxe2_aux_ops aux_ops; +}; + +void sxe2_rdma_aux_adev_release(struct device *dev); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.c b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..953ec8bc0d27d510fa566867ee5e17caaebf3d64 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.c @@ -0,0 +1,1710 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_hw.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_ethdev.h" +#else +#include "sxe2.h" +#endif +#include "sxe2_hw.h" +#include "sxe2_log.h" +#include "sxe2_spec.h" + +#ifdef SXE2_CFG_DEBUG +extern int reg_log; +#endif + +#ifdef SXE2_DPDK_DRIVER +#define SXE2_HW_REG_WRITE(hw, reg, value) \ + do { \ + (void)hw; \ + rte_write32(value, reg); \ + } while (0) + +#define SXE2_HW_REG_READ(hw, reg) ((void)(hw), rte_read32(reg)) + +#define SXE2_HW_PCI_DEV(_ad) \ + (RTE_DEV_TO_PCI(rte_eth_devices[_ad->dev_info.dev_data->port_id].device)) +#else +#define SXE2_HW_REG_WRITE(hw, reg, value) hw->reg_write(value, reg) +#define SXE2_HW_REG_READ(hw, reg) hw->reg_read(reg) +#endif + +#define SXE2_SET_USED(x) ((void)(x)) + +void __iomem *sxe2_reg_addr_get(struct sxe2_hw *hw, resource_size_t reg) +{ +#ifdef SXE2_DPDK_DRIVER + return (u8 __iomem *)hw->hw_map + reg; +#else + u32 i; + struct sxe2_map_info *map; + struct sxe2_hw_map *hw_addr = (struct sxe2_hw_map *)hw->hw_map; + + if (WARN_ON(!hw_addr)) + return (void __iomem *)ERR_PTR(-SXE2_HW_ERR_FAULT); + + for (i = 0, map = hw_addr->maps; i < hw_addr->map_cnt; i++, map++) + if (reg >= map->start && reg < map->end) + return (u8 __iomem *)map->addr + (reg - map->start); + + LOG_WARN("Unable to map register address 0x%llx to kernel address", reg); + + return (void __iomem *)ERR_PTR(-SXE2_HW_ERR_FAULT); +#endif +} + +bool sxe2_hw_is_fault(struct sxe2_hw *hw) +{ + u32 val; + + val = sxe2_hw_read_pcie_sys_ready(hw); + if (val == SXE2_REG_INVALID_VALUE) + return true; + else + return false; +} + +u32 sxe2_read_reg(struct sxe2_hw *hw, u32 reg) +{ + u32 value; + struct sxe2_adapter *adapter = hw->adapter; + u8 __iomem *reg_addr = sxe2_reg_addr_get(hw, reg); + + SXE2_SET_USED(adapter); + + if (IS_ERR(reg_addr)) { + LOG_DEBUG_BDF("reg addr:0x%x is error.\n", reg); + value = SXE2_REG_INVALID_VALUE; + goto l_ret; + } + + value = SXE2_HW_REG_READ(hw, reg_addr); + +#ifdef SXE2_CFG_DEBUG + if (reg_log) + LOG_DEBUG_BDF("reg: 0x%x, value: 0x%x\n", reg, value); +#endif + +l_ret: + return value; +} + +STATIC u32 sxe2_read_reg_valid(struct sxe2_hw *hw, u32 reg, u32 dflt_val) +{ + u32 val = sxe2_read_reg(hw, reg); + + return val == SXE2_REG_INVALID_VALUE ? dflt_val : val; +} + +u64 sxe2_read_reg64(struct sxe2_hw *hw, u32 reg) +{ + u32 low, high; + + low = sxe2_read_reg(hw, reg); + high = sxe2_read_reg(hw, reg + 4); + return low + ((u64)high << 32); +} + +void sxe2_write_reg(struct sxe2_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = sxe2_reg_addr_get(hw, reg); + struct sxe2_adapter *adapter = hw->adapter; + + SXE2_SET_USED(adapter); + + if (unlikely(sxe2_hw_is_fault(hw)) || IS_ERR(reg_addr)) + goto l_ret; + + SXE2_HW_REG_WRITE(hw, reg_addr, value); + +#ifdef SXE2_CFG_DEBUG + if (reg_log) + LOG_DEBUG_BDF("reg:0x%x write value:0x%x read value:0x%x.\n", reg, + value, SXE2_HW_REG_READ(hw, reg_addr)); +#endif + +l_ret: + return; +} + +static void sxe2_hw_evt_irq_cause_cfg(struct sxe2_hw *hw, u32 cause) +{ + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, 0); + (void)sxe2_read_reg(hw, SXE2_PF_INT_OICR); + (void)sxe2_hw_fw_irq_cause_get(hw); + + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, cause); +} + +static void sxe2_hw_evt_irq_cause_enable(struct sxe2_hw *hw, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = (irq_idx & SXE2_PF_INT_OICR_CTL_MSIX_IDX) | + (itr_idx << SXE2_PF_INT_OICR_CTL_ITR_IDX_S & + SXE2_PF_INT_OICR_CTL_ITR_IDX) | + SXE2_PF_INT_OICR_CTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_OICR_CTL, value); +} + +void sxe2_hw_evt_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_OICR_CTL, 0); + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, 0); + (void)sxe2_read_reg(hw, SXE2_PF_INT_OICR); +} + +u32 sxe2_hw_evt_irq_mask_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); +} + +u32 sxe2_hw_evt_irq_cause_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg_valid(hw, SXE2_PF_INT_OICR, 0); +} + +void sxe2_hw_evt_irq_cfg(struct sxe2_hw *hw, u32 value, u16 itr_idx, u16 irq_idx) +{ + sxe2_hw_evt_irq_cause_cfg(hw, value); + + sxe2_hw_evt_irq_cause_enable(hw, itr_idx, irq_idx); +} + +void sxe2_hw_fwq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx) +{ + u32 value; + (void)itr_idx; + value = (irq_idx & SXE2_PF_INT_FWQ_CTL_MSIX_IDX) | + SXE2_PF_INT_FWQ_CTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_FWQ_CTL, value); +} + +void sxe2_hw_mbxq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx) +{ + u32 value; + (void)itr_idx; + value = (irq_idx & SXE2_PF_INT_MBX_CTL_MSIX_IDX) | + SXE2_PF_INT_MBX_CTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_MBX_CTL, value); +} + +void sxe2_hw_fwq_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_FWQ_CTL, 0); +} + +void sxe2_hw_mbxq_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_MBX_CTL, 0); +} + +void sxe2_hw_irq_enable(struct sxe2_hw *hw, u16 irq_idx) +{ + u32 value = SXE2_VF_DYN_CTL_INTENABLE | + SXE2_VF_DYN_CTL_CLEARPBA | + (SXE2_ITR_IDX_NONE + << SXE2_VF_DYN_CTL_ITR_IDX_S); + + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_disable(struct sxe2_hw *hw, u16 irq_idx) +{ + u32 value = (SXE2_ITR_IDX_NONE << SXE2_VF_DYN_CTL_ITR_IDX_S); + + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_trigger(struct sxe2_hw *hw, u16 irq_idx) +{ + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), + (SXE2_ITR_IDX_NONE << SXE2_VF_DYN_CTL_ITR_IDX_S) | + SXE2_VF_DYN_CTL_SWINT_TRIG | + SXE2_VF_DYN_CTL_INTENABLE_MSK); +} + +void sxe2_hw_irq_dyn_ctl(struct sxe2_hw *hw, u16 irq_idx, u32 value) +{ + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_itr_set(struct sxe2_hw *hw, u16 irq_idx, u16 itr_idx, u16 interval) +{ + u32 value = (interval / hw->hw_cfg.itr_gran) & SXE2_VF_INT_ITR_INTERVAL; + + sxe2_write_reg(hw, SXE2_VF_INT_ITR(itr_idx, irq_idx), value); +} + +void sxe2_hw_irq_rate_limit_set(struct sxe2_hw *hw, u16 irq_idx, u16 rate_limit) +{ + u32 value, rate_limit_reg, credit_max_value_reg; + + if (rate_limit == 0) { + value = 0; + } else { + rate_limit_reg = (u32)FIELD_PREP( + SXE2_PF_INT_RATE_CREDIT_INTERVAL, + (rate_limit / + hw->hw_cfg.credit_interval_gran)); + credit_max_value_reg = + SXE2_PF_INT_RATE_CREDIT_MAX_VALUE; + + value = rate_limit_reg | credit_max_value_reg | + SXE2_PF_INT_RATE_INTRL_ENABLE; + } + + sxe2_write_reg(hw, SXE2_PF_INT_RATE(irq_idx), value); +} + +u32 sxe2_hw_irq_gran_info_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_PFG_INT_CTL); +} + +void sxe2_hw_txq_irq_cause_setup(struct sxe2_hw *hw, u16 txq_idx, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = FIELD_PREP(SXE2_PF_INT_TQCTL_MSIX_IDX, + irq_idx) | + FIELD_PREP(SXE2_PF_INT_TQCTL_ITR_IDX, + itr_idx) | + SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_txq_irq_cause_clear(struct sxe2_hw *hw, u16 txq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_TQCTL(txq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_txq_irq_cause_switch(struct sxe2_hw *hw, u16 txq_idx, bool enable) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_TQCTL(txq_idx)); + u32 value; + + if (enable) + value = old_value | SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + else + value = old_value & ~SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_setup(struct sxe2_hw *hw, u16 rxq_idx, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = FIELD_PREP(SXE2_PF_INT_RQCTL_MSIX_IDX, + irq_idx) | + FIELD_PREP(SXE2_PF_INT_RQCTL_ITR_IDX, + itr_idx) | + SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_clear(struct sxe2_hw *hw, u16 rxq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_RQCTL_MSIX_IDX; + value |= irq_idx; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_switch(struct sxe2_hw *hw, u16 rxq_idx, bool enable) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + if (enable) + value = old_value | SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + else + value = old_value & ~SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +#define SXE2_HW_CMD_QUEUE_ENABLE(type) \ + do { \ + s32 ret = 0; \ + u32 value; \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAL, lower_32_bits(addr)); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAH, upper_32_bits(addr)); \ + value = FIELD_PREP(SXE2_CMD_REG_LEN_M, depth) | \ + SXE2_CMD_REG_LEN_ENABLE_M; \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + if (sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##BAL) != \ + lower_32_bits(addr)) { \ + ret = -SXE2_HW_ERR_IO; \ + } \ + return ret; \ + } while (0) + +#define SXE2_HW_CMD_QUEUE_DISABLE(type) \ + do { \ + bool is_admin = false; \ + bool old_tail = false; \ + bool old_head = false; \ + u32 value = sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##LEN); \ + value &= ~(SXE2_CMD_REG_LEN_VFE_M | SXE2_CMD_REG_LEN_OVFL_M | \ + SXE2_CMD_REG_LEN_CRIT_M | SXE2_CMD_REG_LEN_ENABLE_M); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + is_admin = ((SXE2_PF_CTRLQ_##type##LEN == \ + SXE2_PF_CTRLQ_FW_ARQLEN) || \ + (SXE2_PF_CTRLQ_##type##LEN == SXE2_PF_CTRLQ_FW_ATQLEN)) \ + ? true \ + : false; \ + if (!is_admin) { \ + if (SXE2_PF_CTRLQ_##type##LEN == \ + SXE2_PF_CTRLQ_MBX_ATQLEN) { \ + old_tail = sxe2_read_reg(hw, \ + SXE2_PF_CTRLQ_##type##T); \ + old_head = sxe2_read_reg(hw, \ + SXE2_PF_CTRLQ_##type##H); \ + if (old_tail >= old_head) { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + } else { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + old_tail); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + } \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + } else { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + } \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAL, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAH, 0); \ + } \ + } while (0) + +#define SXE2_HW_CMD_QUEUE_WRITE_TAIL(type) \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, value &SXE2_CMD_REG_HEAD_M) +#define SXE2_HW_CMD_QUEUE_READ_HEAD(type) \ + ({ (sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##H) & SXE2_CMD_REG_HEAD_M); }) + +#define SXE2_HW_CMD_QUEUE_GET_ERR(type) \ + do { \ + u32 err = 0; \ + u32 value = sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##LEN); \ + if (value == SXE2_REG_INVALID_VALUE) \ + return 0; \ + if (value & SXE2_CMD_REG_LEN_VFE_M) { \ + err |= SXE2_CMD_REG_LEN_VFE_M; \ + } else if (value & SXE2_CMD_REG_LEN_CRIT_M) { \ + err |= SXE2_CMD_REG_LEN_CRIT_M; \ + } \ + if (err) { \ + value &= ~(SXE2_CMD_REG_LEN_VFE_M | \ + SXE2_CMD_REG_LEN_CRIT_M); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + } \ + return err; \ + } while (0) + +s32 sxe2_hw_fw_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(FW_ATQ); +} + +void sxe2_hw_fw_tq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(FW_ATQ); +} + +s32 sxe2_hw_fw_tq_is_idle(struct sxe2_hw *hw) +{ + u32 val; + s32 ret; + u32 old_tail; + u32 old_head; + + val = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_HW_STS); + if (val == SXE2_REG_INVALID_VALUE) { + ret = true; + goto l_out; + } + ret = val & SXE2_PF_CTRLQ_FW_ATQ_IDLE_MASK; + +l_out: + if (ret) { + old_tail = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQT); + old_head = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQH); + if (old_tail >= old_head) { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + } else { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, old_tail); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + } + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQH, 0); + + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQBAL, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQBAH, 0); + } + + return ret; +} + +void sxe2_hw_fw_tq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(FW_ATQ); +} + +u32 sxe2_hw_fw_tq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(FW_ATQ); +} + +u32 sxe2_hw_fw_tq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(FW_ATQ); +} + +s32 sxe2_hw_fw_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(FW_ARQ); +} + +void sxe2_hw_fw_rq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(FW_ARQ); +} + +s32 sxe2_hw_fw_rq_is_idle(struct sxe2_hw *hw) +{ + u32 val; + s32 ret; + + SXE2_SET_USED(hw); + val = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_HW_STS); + if (val == SXE2_REG_INVALID_VALUE) { + ret = true; + goto l_out; + } + ret = val & SXE2_PF_CTRLQ_FW_ARQ_IDLE_MASK; + +l_out: + if (ret) { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQH, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQT, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQBAL, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQBAH, 0); + } + + return ret; +} + +void sxe2_hw_fw_rq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(FW_ARQ); +} + +u32 sxe2_hw_fw_rq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(FW_ARQ); +} + +u32 sxe2_hw_fw_rq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(FW_ARQ); +} + +s32 sxe2_hw_mbx_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(MBX_ATQ); +} + +void sxe2_hw_mbx_tq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(MBX_ATQ); +} + +void sxe2_hw_mbx_tq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(MBX_ATQ); +} + +u32 sxe2_hw_mbx_tq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(MBX_ATQ); +} + +u32 sxe2_hw_mbx_tq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(MBX_ATQ); +} + +s32 sxe2_hw_mbx_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(MBX_ARQ); +} + +void sxe2_hw_mbx_rq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(MBX_ARQ); +} + +void sxe2_hw_mbx_rq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(MBX_ARQ); +} + +u32 sxe2_hw_mbx_rq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(MBX_ARQ); +} + +u32 sxe2_hw_mbx_rq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(MBX_ARQ); +} + +void sxe2_hw_rxq_ctxt_cfg(struct sxe2_hw *hw, struct sxe2_hw_rxq_ctxt *rxq_ctxt, + u16 rxq_idx) +{ + u8 i; + struct sxe2_adapter *adapter = hw->adapter; + u32 value[SXE2_RX_CTXT_CNT]; + u32 base_addr_h = rxq_ctxt->base_addr >> SXE2_RX_CTXT_BASE_L_W; + u16 depth_h = rxq_ctxt->depth >> SXE2_RX_CTXT_DEPTH_L_W; + + SXE2_SET_USED(adapter); + + value[SXE2_RX_CTXT0] = SXE2_CTXT_REG_VALUE(rxq_ctxt->base_addr, + SXE2_RX_CTXT_BASE_L_S, + SXE2_RX_CTXT_BASE_L_W); + + value[SXE2_RX_CTXT1] = SXE2_CTXT_REG_VALUE( + base_addr_h, SXE2_RX_CTXT_BASE_H_S, SXE2_RX_CTXT_BASE_H_W); + value[SXE2_RX_CTXT1] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->depth, SXE2_RX_CTXT_DEPTH_L_S, + SXE2_RX_CTXT_DEPTH_L_W); + + value[SXE2_RX_CTXT2] = SXE2_CTXT_REG_VALUE(depth_h, SXE2_RX_CTXT_DEPTH_H_S, + SXE2_RX_CTXT_DEPTH_H_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->dbuff_len, + SXE2_RX_CTXT_DBUFF_S, + SXE2_RX_CTXT_DBUFF_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hbuff_len, + SXE2_RX_CTXT_HBUFF_S, + SXE2_RX_CTXT_HBUFF_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_type, + SXE2_RX_CTXT_HSPLT_TYPE_S, + SXE2_RX_CTXT_HSPLT_TYPE_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->desc_type, + SXE2_RX_CTXT_DESC_TYPE_S, + SXE2_RX_CTXT_DESC_TYPE_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->crc_strip, SXE2_RX_CTXT_CRC_S, SXE2_RX_CTXT_CRC_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->l2tag1_show, + SXE2_RX_CTXT_L2TAG_FLAG_S, + SXE2_RX_CTXT_L2TAG_FLAG_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_0, + SXE2_RX_CTXT_HSPLT_0_S, + SXE2_RX_CTXT_HSPLT_0_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_1, + SXE2_RX_CTXT_HSPLT_1_S, + SXE2_RX_CTXT_HSPLT_1_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->inner_vlan_strip, + SXE2_RX_CTXT_INVALN_STP_S, + SXE2_RX_CTXT_INVALN_STP_W); + + value[SXE2_RX_CTXT3] = SXE2_CTXT_REG_VALUE(rxq_ctxt->lro_enable, + SXE2_RX_CTXT_LRO_ENABLE_S, + SXE2_RX_CTXT_LRO_ENABLE_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->cpuid, SXE2_RX_CTXT_CPUID_S, SXE2_RX_CTXT_CPUID_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->max_frame_size, + SXE2_RX_CTXT_MAX_FRAME_SIZE_S, + SXE2_RX_CTXT_MAX_FRAME_SIZE_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->lro_desc_max, + SXE2_RX_CTXT_LRO_DESC_MAX_S, + SXE2_RX_CTXT_LRO_DESC_MAX_W); + + value[SXE2_RX_CTXT4] = SXE2_CTXT_REG_VALUE(rxq_ctxt->tphrdesc_enable, + SXE2_RX_CTXT_THPRDESC_ENABLE_S, + SXE2_RX_CTXT_THPRDESC_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphwdesc_enable, + SXE2_RX_CTXT_THPWDESC_ENABLE_S, + SXE2_RX_CTXT_THPWDESC_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphdata_enable, + SXE2_RX_CTXT_THPRDATA_ENABLE_S, + SXE2_RX_CTXT_THPRDATA_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphhead_enable, + SXE2_RX_CTXT_THPHEAD_ENABLE_S, + SXE2_RX_CTXT_THPHEAD_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->low_desc_waterline, + SXE2_RX_CTXT_LOW_DESC_LINE_S, + SXE2_RX_CTXT_LOW_DESC_LINE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->vfid, SXE2_RX_CTXT_VF_ID_S, SXE2_RX_CTXT_VF_ID_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->pfid, SXE2_RX_CTXT_PF_ID_S, SXE2_RX_CTXT_PF_ID_W); + value[SXE2_RX_CTXT4] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->vfen, SXE2_RX_CTXT_VF_ENABLE_S, + SXE2_RX_CTXT_VF_ENABLE_W); + value[SXE2_RX_CTXT4] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->vsi_id, SXE2_RX_CTXT_VSI_ID_S, + SXE2_RX_CTXT_VSI_ID_W); + + for (i = 0; i < SXE2_RX_CTXT_CNT; i++) { + SXE2_REG_WRITE(hw, SXE2_RXQ_CTXT(i, rxq_idx), value[i]); + LOG_INFO_BDF("rxq:%u ctxt[%u]:0x%x.\n", rxq_idx, i, value[i]); + } +} + +void sxe2_hw_vf_irq_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq) +{ + u32 value; + u16 idx; + + value = (((vf_irq->first_in_dev << SXE2_PFVP_INT_ALLOC_FIRST_S) & + SXE2_PFVP_INT_ALLOC_FIRST_M) | + ((vf_irq->last_in_dev << SXE2_PFVP_INT_ALLOC_LAST_S) & + SXE2_PFVP_INT_ALLOC_LAST_M) | + SXE2_PFVP_INT_ALLOC_VALID); + SXE2_REG_WRITE(hw, SXE2_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), value); + + value = (((vf_irq->first_in_dev << SXE2_PCI_PFVP_INT_ALLOC_FIRST_S) & + SXE2_PCI_PFVP_INT_ALLOC_FIRST_M) | + ((vf_irq->last_in_dev << SXE2_PCI_PFVP_INT_ALLOC_LAST_S) & + SXE2_PCI_PFVP_INT_ALLOC_LAST_M) | + SXE2_PCI_PFVP_INT_ALLOC_VALID); + SXE2_REG_WRITE(hw, SXE2_PCI_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), value); + + for (idx = vf_irq->first_in_pf; idx <= vf_irq->last_in_pf; idx++) { + value = (((vf_irq->vfid_in_dev << SXE2_PCIEPROC_INT2FUNC_VF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_VF_NUM_M) | + ((vf_irq->pf_id << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_PF_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_PCIEPROC_INT2FUNC(idx), value); + } + + SXE2_REG_WRITE(hw, SXE2_VSI_PF(vf_irq->vfid_in_dev), + SXE2_VSI_PF_EN_M | (vf_irq->pf_id & SXE2_VSI_PF_ID_M)); + + SXE2_REG_WRITE(hw, SXE2_MBX_CTL(vf_irq->vfid_in_dev), + SXE2_MBX_CTL_CAUSE_ENA_M | (vf_irq->first_in_pf & + SXE2_MBX_CTL_MSIX_INDX_M)); +} + +void sxe2_hw_vf_queue_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue) +{ + u32 reg; + + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_MAPENA(vf_queue->vfid_in_pf), + SXE2_VF_TXQ_MAPENA_M); + reg = (((vf_queue->txq_first_in_pf << SXE2_VF_TXQ_BASE_FIRST_Q_S) & + SXE2_VF_TXQ_BASE_FIRST_Q_M) | + (((vf_queue->txq_cnt - 1) << SXE2_VF_TXQ_BASE_Q_NUM_S) & + SXE2_VF_TXQ_BASE_Q_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_BASE(vf_queue->vfid_in_pf), reg); + + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_MAPENA(vf_queue->vfid_in_pf), + SXE2_VF_RXQ_MAPENA_M); + + reg = (((vf_queue->rxq_first_in_pf << SXE2_VF_RXQ_BASE_FIRST_Q_S) & + SXE2_VF_RXQ_BASE_FIRST_Q_M) | + (((vf_queue->rxq_cnt - 1) << SXE2_VF_RXQ_BASE_Q_NUM_S) & + SXE2_VF_RXQ_BASE_Q_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_BASE(vf_queue->vfid_in_pf), reg); +} + +void sxe2_hw_vf_irq_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq) +{ + u16 idx; + u32 value; + + SXE2_REG_WRITE(hw, SXE2_MBX_CTL(vf_irq->vfid_in_dev), 0); + + SXE2_REG_WRITE(hw, SXE2_VSI_PF(vf_irq->vfid_in_dev), 0); + + for (idx = vf_irq->first_in_pf; idx <= vf_irq->last_in_pf; idx++) { + value = (((1 << SXE2_PCIEPROC_INT2FUNC_IS_PF_S) & + SXE2_PCIEPROC_INT2FUNC_IS_PF_M) | + ((vf_irq->pf_id << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_PF_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_PCIEPROC_INT2FUNC(idx), value); + } + + SXE2_REG_WRITE(hw, SXE2_PCI_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), 0); +} + +void sxe2_hw_vf_queue_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue) +{ + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_BASE(vf_queue->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_MAPENA(vf_queue->vfid_in_pf), 0); + + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_BASE(vf_queue->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_MAPENA(vf_queue->vfid_in_pf), 0); +} + +s32 sxe2_hw_rxq_status_check(struct sxe2_hw *hw, u32 reg_idx, bool enable) +{ + struct sxe2_adapter *adapter = hw->adapter; + s32 ret; + u8 i; + u32 value; + + (void)adapter; + + for (i = 0; i < SXE2_QUEUE_WAIT_RETRY_CNT; i++) { + value = SXE2_REG_READ(hw, SXE2_RXQ_CTRL(reg_idx)); + if ((enable == !!(value & SXE2_RXQ_CTRL_STATUS_ACTIVE)) || + (value == SXE2_REG_INVALID_VALUE)) { + ret = 0; + LOG_DEBUG_BDF("rxq[%u] %s done.\n", reg_idx, + enable ? "enable" : "disable"); + goto l_out; + } + + usleep_range(20, 40); + } + + ret = -SXE2_HW_ERR_TIMEDOUT; + +l_out: + return ret; +} + +s32 sxe2_hw_rxq_ctrl(struct sxe2_hw *hw, u16 reg_idx, bool enable, bool wait, + bool cde) +{ + s32 ret = 0; + u32 ctrl_reg = SXE2_REG_READ(hw, SXE2_RXQ_CTRL(reg_idx)); + u32 value; + + if (enable == !!(ctrl_reg & SXE2_RXQ_CTRL_STATUS_ACTIVE)) { + LOG_WARN("rxq idx:%u status:%u already.\n", reg_idx, enable); + return ret; + } + + if (enable) { + value = cde ? (SXE2_RXQ_CTRL_ENABLED | SXE2_RXQ_CTRL_CDE_ENABLE) + : SXE2_RXQ_CTRL_ENABLED; + ctrl_reg |= value; + } else { + ctrl_reg &= ~(SXE2_RXQ_CTRL_ENABLED | SXE2_RXQ_CTRL_CDE_ENABLE); + } + + SXE2_REG_WRITE(hw, SXE2_RXQ_CTRL(reg_idx), ctrl_reg); + + if (!wait) + return ret; + + sxe2_flush(hw); + ret = sxe2_hw_rxq_status_check(hw, reg_idx, enable); + + sxe2_flush(hw); + + return ret; +} + +u32 sxe2_fw_state_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_STATE); +} + +u32 sxe2_fw_ver_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_VER); +} + +u32 sxe2_fw_comp_ver_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_COMP_VER_ADDR); +} + +u32 sxe2_fw_mode_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_MISC) & SXE2_FW_MISC_MODE_M; +} + +u32 sxe2_fw_pop_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_MISC) & SXE2_FW_MISC_POP_M; +} + +void sxe2_hw_l2tag_accept(struct sxe2_hw *hw, u16 vsi_hw_id) +{ + u32 val; + + val = (SXE2_PFP_L2TAGSEN_ALL_TAG << SXE2_VSI_TAR_UNTAGGED_SHIFT) | + SXE2_PFP_L2TAGSEN_ALL_TAG; + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); +} + +static s32 sxe2_tpid_to_bits(bool pvlan_exist, bool is_strip, u16 tpid, u32 *bits) +{ + if (!pvlan_exist) { + switch (tpid) { + case ETH_P_8021Q: + *bits = is_strip ? SXE2_VSI_TSR_ID_OUT_VLAN1 + : SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1; + break; + case ETH_P_8021AD: + *bits = is_strip ? SXE2_VSI_TSR_ID_STAG + : SXE2_VSI_L2TAGSTXVALID_ID_STAG; + break; + case ETH_P_QINQ1: + *bits = is_strip ? SXE2_VSI_TSR_ID_OUT_VLAN2 + : SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2; + break; + default: + return -SXE2_HW_ERR_INVAL; + } + } else { + if (tpid != ETH_P_8021Q) + return -SXE2_HW_ERR_INVAL; + *bits = is_strip ? SXE2_VSI_TSR_ID_VLAN + : SXE2_VSI_L2TAGSTXVALID_ID_VLAN; + } + + return 0; +} + +s32 sxe2_hw_desc_vlan_param_check(bool pvlan_exist, bool is_strip, u16 tpid) +{ + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, is_strip, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + return 0; +} + +s32 sxe2_hw_desc_vlan_strip_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, + bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, true, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TSR(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_TSR_STRIP_TAG_S) | + (bits << SXE2_VSI_TSR_SHOW_TAG_S); + else + val &= ~((bits << SXE2_VSI_TSR_STRIP_TAG_S) | + (bits << SXE2_VSI_TSR_SHOW_TAG_S)); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_desc_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, + bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, false, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID; + else + val &= ~((SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_M + << SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_desc_outer_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, + u16 tpid, bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, false, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID; + else + val &= ~((SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_M + << SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_port_vlan_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 vlan_info, + u16 tpid) +{ + u32 val; + u32 strip_bits = 0; + u32 insert_bits = 0; + + if (vlan_info) { + if (sxe2_tpid_to_bits(false, true, tpid, &strip_bits)) + return -SXE2_HW_ERR_INVAL; + if (sxe2_tpid_to_bits(false, false, tpid, &insert_bits)) + return -SXE2_HW_ERR_INVAL; + + val = strip_bits << SXE2_VSI_TSR_STRIP_TAG_S; + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), val); + + val = (insert_bits << SXE2_VSI_L2TAGSTXVALID_TIR0_ID_S) | + SXE2_VSI_L2TAGSTXVALID_TIR0_VALID; + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + } else { + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), 0); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), 0); + } + + sxe2_write_reg(hw, SXE2_VSI_TIR0(vsi_hw_id), vlan_info); + + return 0; +} + +s32 sxe2_hw_port_inner_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, + bool acceptedtagged, + bool accepteduntagged) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TAR(vsi_hw_id)); + if (acceptedtagged) + val |= (BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_TAGGED_S); + else + val &= ~(BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_TAGGED_S); + + if (accepteduntagged) + val |= (BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_UNTAGGED_S); + else + val &= ~(BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_UNTAGGED_S); + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); + + return 0; +} + +void sxe2_hw_rx_vlan_filter_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_RX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE; + else + val &= ~SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE; + sxe2_write_reg(hw, SXE2_VSI_RX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_loopback_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN; + else + val &= ~SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_mac_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_MACAS_EN; + else + val &= ~SXE2_VSI_TX_SW_CTRL_MACAS_EN; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_vlan_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE; + else + val &= ~SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +u32 sxe2_hw_fw_irq_cause_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg_valid(hw, SXE2_PF_INT_FW_EVENT, 0); +} + +s32 sxe2_hw_corer_irq_cause_get(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + + if (val == SXE2_REG_INVALID_VALUE) + return 0; + if (val & SXE2_PCIE_SYS_READY_CORER_ASSERT) + pci_write_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, + SXE2_PCIE_SYS_READY_CORER_ASSERT); +#else + u32 val_wr; + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); + if (val & SXE2_PCIE_SYS_READY_CORER_ASSERT) { + val_wr = SXE2_PCIE_SYS_READY_CORER_ASSERT; + (void)rte_pci_write_config(SXE2_HW_PCI_DEV(adapter), &val_wr, + sizeof(val_wr), SXE2_PCIE_SYS_READY); + } +#endif + + return val & SXE2_PCIE_SYS_READY_CORER_ASSERT; +} + +void sxe2_hw_trigger_pfr(struct sxe2_hw *hw) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_PFGEN_CTRL); + + sxe2_write_reg(hw, SXE2_PFGEN_CTRL, (reg | SXE2_PFGEN_CTRL_PFSWR)); +} + +s32 sxe2_hw_pfr_done(struct sxe2_hw *hw) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_PFGEN_CTRL); + if (val == SXE2_REG_INVALID_VALUE) { + if (!sxe2_hw_stop_drop_done(hw)) + return -EBUSY; + + return 0; + } + + return !(val & SXE2_PFGEN_CTRL_PFSWR); +} + +void sxe2_hw_trigger_corer(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_TOP_CFG_CORE, SXE2_TOP_CFG_CORE_RST_CODE); +} + +s32 sxe2_hw_corer_done(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + + if (val == SXE2_REG_INVALID_VALUE) + return 0; +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val & SXE2_PCIE_SYS_READY_R5; +} + +void sxe2_hw_stop_drop(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + pci_write_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, + (val | SXE2_PCIE_SYS_READY_STOP_DROP)); +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); + val = val | SXE2_PCIE_SYS_READY_STOP_DROP; + (void)rte_pci_write_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif +} + +s32 sxe2_hw_stop_drop_done(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; + +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + if (val == SXE2_REG_INVALID_VALUE) + return 0; +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val & SXE2_PCIE_SYS_READY_STOP_DROP_DONE; +} + +u32 sxe2_hw_read_pcie_sys_ready(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val; +} + +u32 sxe2_hw_heartbeat_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_HEARTBEAT); +} + +void sxe2_hw_trigger_vfr(struct sxe2_hw *hw, u16 vf_id) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_VFGEN_CTRL(vf_id)); + + sxe2_write_reg(hw, SXE2_VFGEN_CTRL(vf_id), (reg | SXE2_VFGEN_CTRL_VFSWR)); + + LOG_INFO("vf:%d vfr triggered.\n", vf_id); +} + +u32 sxe2_hw_vfr_done(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + struct sxe2_adapter *adapter = hw->adapter; + +#ifdef SXE2_DPDK_DRIVER + UNUSED(adapter); +#endif + + val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + if (val == SXE2_REG_INVALID_VALUE) { + LOG_WARN_BDF("core reset or pfr detected.\n"); + return SXE2_REG_UNACCESS; + } + + return (val & SXE2_VF_VRC_VFGEN_VFRSTAT_COMPLETE); +} + +void sxe2_hw_vf_active(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + u32 reg_val; + struct sxe2_adapter *adapter = hw->adapter; + +#ifdef SXE2_DPDK_DRIVER + UNUSED(adapter); +#endif + + reg_val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + + val = FIELD_PREP(SXE2_VF_VRC_VFGEN_VFRSTAT, + SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE) | + SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK | reg_val; + sxe2_write_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id), val); + + LOG_INFO_BDF("vf_id:%u activated 0x%x.\n", vf_id, + sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id))); +} + +void sxe2_hw_vf_deactive(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + val &= ~(SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE); + sxe2_write_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id), val); +} + +bool sxe2_hw_vflr_cause_get(struct sxe2_hw *hw, u16 vf_id_in_dev) +{ + u32 val; + u32 reg_idx, bit_idx; + + reg_idx = vf_id_in_dev / 32; + bit_idx = vf_id_in_dev % 32; + + val = sxe2_read_reg(hw, SXE2_GLGEN_VFLRSTAT(reg_idx)); + return !!(val & BIT(bit_idx)); +} + +void sxe2_hw_vflr_cause_clear(struct sxe2_hw *hw, u16 vf_id_in_dev) +{ + u32 reg_idx, bit_idx; + + reg_idx = vf_id_in_dev / 32; + bit_idx = vf_id_in_dev % 32; + + sxe2_write_reg(hw, (u32)SXE2_GLGEN_VFLRSTAT(reg_idx), (u32)BIT(bit_idx)); +} + +s32 sxe2_hw_port_outer_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, + u16 tpid, bool acceptedtagged, + bool accepteduntagged) +{ + u32 val; + u32 tag_id = 0; + + if (sxe2_tpid_to_bits(false, false, tpid, &tag_id)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TAR(vsi_hw_id)); + if (acceptedtagged) + val |= (u32)(BIT(tag_id) << SXE2_ACCEPT_RULE_TAGGED_S); + else + val &= (u32)(~(BIT(tag_id) << SXE2_ACCEPT_RULE_TAGGED_S)); + + if (accepteduntagged) + val |= (u32)(BIT(tag_id) << SXE2_ACCEPT_RULE_UNTAGGED_S); + else + val &= (u32)(~(BIT(tag_id) << SXE2_ACCEPT_RULE_UNTAGGED_S)); + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); + + return 0; +} + +void sxe2_hw_ptp_main_enable(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, GLTSYN, GLTSYN_ENA_M); +} + +void sxe2_hw_ptp_main_disable(struct sxe2_hw *hw) +{ + u32 val = sxe2_read_reg(hw, GLTSYN); + + val &= ~GLTSYN_ENA_M; + sxe2_write_reg(hw, GLTSYN, val); +} + +bool sxe2_hw_ptp_main_is_enabled(struct sxe2_hw *hw) +{ + u32 val = sxe2_read_reg(hw, GLTSYN); + + if (val & GLTSYN_ENA_M) + return true; + + return false; +} + +void sxe2_hw_ptp_init_incval(struct sxe2_hw *hw, u64 incval) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, upper_32_bits(incval)); + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, lower_32_bits(incval)); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_INIT_INCVAL); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_tsyn_switch(struct sxe2_hw *hw, bool on) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); + if (on) + reg |= SXE2_PF_INT_OICR_TSYN_TX; + else + reg &= ~SXE2_PF_INT_OICR_TSYN_TX; + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, reg); +} + +void sxe2_hw_ptp_tsyn_event_switch(struct sxe2_hw *hw, bool on) +{ + u32 oicr_value; + + oicr_value = sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); + if (on) + oicr_value |= SXE2_PF_INT_OICR_TSYN_EVENT; + else + oicr_value &= ~SXE2_PF_INT_OICR_TSYN_EVENT; + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, oicr_value); +} + +void sxe2_hw_ptp_aux_in_set(struct sxe2_hw *hw, u32 index, u32 value) +{ + sxe2_write_reg(hw, (u32)GLTSYN_AUXIN(index), value); +} + +u64 sxe2_hw_ptp_get_event_second(struct sxe2_hw *hw, u32 index) +{ + u32 lo; + u32 hi; + + lo = sxe2_read_reg(hw, (u32)GLTSYN_EVENT_S_L(index)); + hi = sxe2_read_reg(hw, (u32)GLTSYN_EVENT_S_H(index)); + + return (((u64)(hi & GLTSYN_EVENT_S_H_MASK)) << 32) | lo; +} + +u64 sxe2_hw_ptp_get_event_nanosecond(struct sxe2_hw *hw, u32 index) +{ + return sxe2_read_reg(hw, (u32)GLTSYN_EVENT_NS(index)); +} + +static bool sxe2_hw_ptp_tx_tstamp_valid(struct sxe2_hw *hw, u8 port_id, u32 index) +{ + u32 val; + + val = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32)); + if (!(val & BIT(index % 32))) + return false; + + return true; +} + +static bool sxe2_hw_ptp_mac_tx_tstamp_valid(struct sxe2_hw *hw, u8 phy_id, u32 index) +{ + u32 val; + + val = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32)); + if (!(val & BIT(index % 32))) + return false; + + return true; +} + +#define SXE2_TSTAMP_TX_HI_SHIFT (24) +#define SXE2_TSTAMP_TX_LO_SHIFT (8) +bool sxe2_hw_ptp_tx_tstamp_read(struct sxe2_hw *hw, u8 port_id, u32 index, + u64 *timestamp) +{ + u32 lo; + u32 hi; + + if (!sxe2_hw_ptp_tx_tstamp_valid(hw, port_id, index)) + return false; + + hi = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TXHI(port_id, index)); + lo = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TXLO(port_id, index)); + + *timestamp = ((u64)hi << SXE2_TSTAMP_TX_HI_SHIFT | + (u64)lo >> SXE2_TSTAMP_TX_LO_SHIFT); + return true; +} + +bool sxe2_hw_ptp_mac_tx_tstamp_read(struct sxe2_hw *hw, u8 phy_id, u8 index, + u64 *timestamp) +{ + u32 lo; + u32 hi; + + if (!sxe2_hw_ptp_mac_tx_tstamp_valid(hw, phy_id, index)) + return false; + + lo = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TXLO(phy_id, index)); + hi = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TXHI(phy_id, index)) & 0x7F; + *timestamp = ((u64)hi << 24 | (u64)lo >> 8); + return true; +} + +void sxe2_hw_ptp_tx_tstamp_discard(struct sxe2_hw *hw, u8 port_id, u32 index) +{ + u32 value = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32)); + + value &= (u32)~BIT(index % 32); + sxe2_write_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32), value); +} + +void sxe2_hw_ptp_mac_tx_tstamp_discard(struct sxe2_hw *hw, u8 phy_id, u32 index) +{ + u32 value = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32)); + + value &= (u32)(~(1ULL << (index % 32))); + sxe2_write_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32), value); +} + +void sxe2_hw_ptp_mac_tx_tstamp_clear_all(struct sxe2_hw *hw, u8 phy_id, u32 reg_idx) +{ + sxe2_write_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, reg_idx), 0); +} + +bool sxe2_hw_ptp_acquire_1588_lock(struct sxe2_hw *hw) +{ + u32 value; + + value = sxe2_read_reg(hw, GLTSYN_SEM); + value = value & GLTSYN_SEM_BUSY_M; + return !(!!value); +} + +void sxe2_hw_ptp_release_1588_lock(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, GLTSYN_SEM, 0); +} + +void sxe2_hw_ptp_1588_timestamp_read(struct sxe2_hw *hw, u64 *second, + u64 *nanosecond) +{ + u32 sh_hi; + u32 sh_lo; + u32 sh_ns; + u32 sh_ns2; + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_LATCHING_SHTIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); + sh_ns = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + sh_hi = sxe2_read_reg(hw, GLTSYN_SHTIME_S_H); + sh_lo = sxe2_read_reg(hw, GLTSYN_SHTIME_S_L); + sh_ns2 = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + + if (sh_ns != sh_ns2) { + sh_hi = sxe2_read_reg(hw, GLTSYN_SHTIME_S_H); + sh_lo = sxe2_read_reg(hw, GLTSYN_SHTIME_S_L); + sh_ns = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + } + *nanosecond = sh_ns; + + *second = (((u64)(sh_hi & 0xFFFF)) << 32) | sh_lo; +} + +void sxe2_hw_ptp_1588_timestamp_write(struct sxe2_hw *hw, u64 second, u32 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_SHTIME_S_H, upper_32_bits(second)); + sxe2_write_reg(hw, GLTSYN_SHTIME_S_L, lower_32_bits(second)); + sxe2_write_reg(hw, GLTSYN_SHTIME_NS, nanosecond); + sxe2_write_reg(hw, GLTSYN_SHTIME_SUBNS, 0); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_INIT_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_1588_clockout_write(struct sxe2_hw *hw, u32 index, u64 period, + u64 second, u64 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_CLKO(index), (u32)period); + sxe2_write_reg(hw, GLTSYN_TGT_NS(index), (u32)nanosecond); + + sxe2_write_reg(hw, GLTSYN_TGT_S_L(index), (u32)second); + sxe2_write_reg(hw, GLTSYN_TGT_S_H(index), second >> 32); +} + +u32 sxe2_hw_ptp_auxout_get(struct sxe2_hw *hw, u32 index) +{ + return sxe2_read_reg(hw, GLTSYN_AUXOUT(index)); +} + +void sxe2_hw_ptp_auxout_set(struct sxe2_hw *hw, u32 index, u32 value) +{ + sxe2_write_reg(hw, GLTSYN_AUXOUT(index), value); +} + +#define PTP_GLTSYN_SHADJ_NS_POS (0x3fffffff) +#define PTP_GLTSYN_SHADJ_NS_NEG (0x80000000) +void sxe2_hw_ptp_1588_timestamp_adjust(struct sxe2_hw *hw, u32 nanosecond, bool neg) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, 0); + if (!neg) + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, + nanosecond & PTP_GLTSYN_SHADJ_NS_POS); + else + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, + nanosecond | PTP_GLTSYN_SHADJ_NS_NEG); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_ADJ_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_1588_timestamp_adjust_at_time(struct sxe2_hw *hw, u32 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, 0); + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, nanosecond); + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_ADJ_TIME_AT_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw) +{ + return (s32)sxe2_read_reg(hw, GLTSYN_STAT); +} + +#define SXE2_IPSEC_RX_SPI_TBL (0x1) +#define SXE2_IPSEC_RX_KEY_TBL (0x2) + +void sxe2_hw_ipsec_tcam_clear(struct sxe2_hw *hw, u32 sa_index) +{ + u32 val = 0; + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSIPID_ADDR, 0); + + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSSPI0_ADDR, 0); + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSSPI0_ADDR, + 0 ^ SXE2_IPSEC_RX_IPSSPI1_SPI_Y_MASK); + + val = (SXE2_IPSEC_RX_SPI_TBL << SXE2_IPSEC_RX_IPSIDX_TABLE_SHIFT) & + SXE2_IPSEC_RX_IPSIDX_TABLE_MASK; + val &= ~(BIT(SXE2_IPSEC_RX_IPSIDX_VBI_SHIFT)); + val |= (sa_index << SXE2_IPSEC_RX_IPSIDX_SA_IDX_SHIFT) & + SXE2_IPSEC_RX_IPSIDX_SA_IDX_MASK; + val |= SXE2_IPSEC_RX_IPSIDX_SWRITE_SHIFT; + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSIDX_ADDR, val); +} + +STATIC void sxe2_stat_update32(struct sxe2_hw *hw, u32 reg, u64 *prev_stat, + u64 *cur_stat, bool prev_stat_loaded) +{ + u32 new_data; + + new_data = sxe2_read_reg(hw, reg); + + if (new_data == BIT_ULL(32) - 1) + goto l_end; + + if (!prev_stat_loaded) { + *prev_stat = new_data; + return; + } + + if (new_data >= *prev_stat) + *cur_stat += new_data - *prev_stat; + else + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + *prev_stat = new_data; + +l_end: + return; +} + +void sxe2_hw_pause_stats_update(struct sxe2_hw *hw, u8 port_idx, bool prev_loaded, + struct sxe2_pause_stats *cur, + struct sxe2_pause_stats *prev) +{ + u32 i; + + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) { + sxe2_stat_update32(hw, SXE2_TXPFCXONFRAMES_LO(port_idx, i), + &prev->prio_xon_tx[i], &cur->prio_xon_tx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_TXPFCXOFFFRAMES_LO(port_idx, i), + &prev->prio_xoff_tx[i], &cur->prio_xoff_tx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_TXPFCXONTOXOFFFRAMES_LO(port_idx, i), + &prev->prio_xon_2_xoff[i], + &cur->prio_xon_2_xoff[i], prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPFCXONFRAMES_LO(port_idx, i), + &prev->prio_xon_rx[i], &cur->prio_xon_rx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPFCXOFFFRAMES_LO(port_idx, i), + &prev->prio_xoff_rx[i], &cur->prio_xoff_rx[i], + prev_loaded); + } + + sxe2_stat_update32(hw, SXE2_TXPAUSEXOFFFRAMES_LO(port_idx), &prev->tx_pause, + &cur->tx_pause, prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPAUSEXOFFFRAMES_LO(port_idx), &prev->rx_pause, + &cur->rx_pause, prev_loaded); +} diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.h b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..460dc7219bf237041631942600509941ff119f76 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_hw.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_hw.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HW_H__ +#define __SXE2_HW_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_osal.h" + +#ifndef IEEE_8021Q_MAX_PRIORITIES +#define IEEE_8021Q_MAX_PRIORITIES 8 +#endif +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif + +#else +#include +#include +#include +#include "sxe2_cmd_channel.h" +#include "sxe2_mbx_public.h" +#endif + +#include "sxe2_cmd.h" +#include "sxe2_host_regs.h" + +#define SXE2_BAR_RDMA_WB_START 0x03F0000 +#define SXE2_BAR_RDMA_WB_END 0x0BFFFFF + +#define SXE2_REG_INVALID_VALUE 0xffffffffU +#define SXE2_REG_RETRY_CNT 5 + +#define SXE2_FW_STATE_MASK 0xF0000 +#define SXE2_FW_STATE_FINISH 0x20000 +#define SXE2_FW_STATE_ABNORMAL 0x30000 + +#define SXE2_REG_UNACCESS (2) +#define sxe2_flush(hw) ((void)sxe2_read_reg((hw), SXE2_STATUS)) +#define SXE2_REG_READ(hw, addr) sxe2_read_reg(hw, addr) +#define SXE2_REG_WRITE(hw, reg, value) sxe2_write_reg(hw, reg, value) + +struct sxe2_adapter; + +enum sxe2_hw_err_code { + SXE2_HW_ERR_SUCCESS = 0, + SXE2_HW_ERR_FAULT, + SXE2_HW_ERR_TIMEDOUT, + SXE2_HW_ERR_IO, + SXE2_HW_ERR_INVAL, +}; + +struct sxe2_hw_cfg { + u16 itr_gran; + u16 credit_interval_gran; +}; + +struct sxe2_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct sxe2_map_info { + void __iomem *addr; + resource_size_t start; + resource_size_t end; + u32 bar_idx; +}; + +struct sxe2_hw_map { + u32 map_cnt; + struct sxe2_map_info maps[]; +}; + +struct sxe2_hw { + u8 *hw_map; + struct sxe2_hw_cfg hw_cfg; + void *adapter; + struct sxe2_mac_info mac_info; + + u8 *pkg_copy; + u32 pkg_size; + +#ifndef SXE2_DPDK_DRIVER + u32 (*reg_read)(const __iomem void *reg); + void (*reg_write)(u32 value, __iomem void *reg); + struct sxe2_fw_ver_msg fw_ver; +#endif + bool is_pop_type; +}; + +struct sxe2_hw_vf_irq { + u16 first_in_pf; + u16 last_in_pf; + u16 first_in_dev; + u16 last_in_dev; + u16 vfid_in_pf; + u16 vfid_in_dev; + u16 pf_id; +}; + +struct sxe2_hw_vf_queue { + u16 txq_first_in_pf; + u16 txq_cnt; + u16 rxq_first_in_pf; + u16 rxq_cnt; + u16 vfid_in_pf; +}; + +struct sxe2_pause_stats { + __le64 prio_xoff_rx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_rx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_tx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xoff_tx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_2_xoff[IEEE_8021Q_MAX_PRIORITIES]; + __le64 rx_pause; + __le64 tx_pause; +}; + +void sxe2_hw_pause_stats_update(struct sxe2_hw *hw, u8 port_idx, bool prev_loaded, struct sxe2_pause_stats *cur, + struct sxe2_pause_stats *prev); + +void __iomem *sxe2_reg_addr_get(struct sxe2_hw *hw, resource_size_t reg); + +#ifndef SXE2_DPDK_DRIVER +static inline void sxe2_hw_reg_handle_init(struct sxe2_hw *hw, u32 (*read)(const __iomem void *), + void (*write)(u32, __iomem void *)) +{ + hw->reg_read = read; + hw->reg_write = write; +} +#endif +void sxe2_write_reg(struct sxe2_hw *hw, u32 reg, u32 value); + +u32 sxe2_read_reg(struct sxe2_hw *hw, u32 reg); + +u64 sxe2_read_reg64(struct sxe2_hw *hw, u32 reg); + +bool sxe2_hw_is_fault(struct sxe2_hw *hw); + +u32 sxe2_hw_read_pcie_sys_ready(struct sxe2_hw *hw); + +u32 sxe2_hw_evt_irq_cause_get(struct sxe2_hw *hw); + +void sxe2_hw_irq_enable(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_disable(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_trigger(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_dyn_ctl(struct sxe2_hw *hw, u16 irq_idx, u32 value); + +void sxe2_hw_irq_itr_set(struct sxe2_hw *hw, u16 irq_idx, u16 itr_idx, u16 interval); + +void sxe2_hw_irq_rate_limit_set(struct sxe2_hw *hw, u16 irq_idx, u16 intrl); + +u32 sxe2_hw_irq_gran_info_get(struct sxe2_hw *hw); + +void sxe2_hw_txq_irq_cause_setup(struct sxe2_hw *hw, u16 txq_idx, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_txq_irq_cause_clear(struct sxe2_hw *hw, u16 txq_idx); + +void sxe2_hw_txq_irq_cause_switch(struct sxe2_hw *hw, u16 txq_idx, bool enable); +void sxe2_hw_rxq_irq_cause_setup(struct sxe2_hw *hw, u16 rxq_idx, u16 itr_idx, u16 irq_idx); +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx); + +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx); + +void sxe2_hw_rxq_irq_cause_clear(struct sxe2_hw *hw, u16 rxq_idx); + +void sxe2_hw_rxq_irq_cause_switch(struct sxe2_hw *hw, u16 rxq_idx, bool enable); + +void sxe2_hw_evt_irq_cfg(struct sxe2_hw *hw, u32 value, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_fwq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_mbxq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_evt_irq_clear(struct sxe2_hw *hw); + +u32 sxe2_hw_evt_irq_mask_get(struct sxe2_hw *hw); + +void sxe2_hw_fwq_irq_clear(struct sxe2_hw *hw); + +void sxe2_hw_mbxq_irq_clear(struct sxe2_hw *hw); + +struct sxe2_hw_rxq_ctxt { + u64 base_addr; + u16 depth; + + u16 dbuff_len; + u16 hbuff_len; + u8 hsplit_type; + u8 desc_type; + u8 crc_strip; + u8 l2tag1_show; + u8 hsplit_0; + u8 hsplit_1; + u8 inner_vlan_strip; + + u8 lro_enable; + u8 cpuid; + u16 max_frame_size; + u16 lro_desc_max; + u8 relax_data; + u8 relax_wb_desc; + u8 relax_rd_desc; + + u8 tphrdesc_enable; + u8 tphwdesc_enable; + u8 tphdata_enable; + u8 tphhead_enable; + u8 low_desc_waterline; + u16 vfid; + u8 pfid; + + u8 vfen; + u16 vsi_id; + + u8 pref_enable; + u16 head; +}; + +s32 sxe2_hw_fw_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_fw_tq_disable(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_tq_is_idle(struct sxe2_hw *hw); + +void sxe2_hw_fw_tq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_fw_tq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_fw_tq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_fw_rq_disable(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_rq_is_idle(struct sxe2_hw *hw); + +void sxe2_hw_fw_rq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_fw_rq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_fw_rq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_mbx_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_mbx_tq_disable(struct sxe2_hw *hw); + +void sxe2_hw_mbx_tq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_mbx_tq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_mbx_tq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_mbx_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_mbx_rq_disable(struct sxe2_hw *hw); + +void sxe2_hw_mbx_rq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_mbx_rq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_mbx_rq_get_error(struct sxe2_hw *hw); + +void sxe2_hw_rxq_ctxt_cfg(struct sxe2_hw *hw, struct sxe2_hw_rxq_ctxt *rxq_ctxt, u16 rxq_idx); + +s32 sxe2_hw_rxq_ctrl(struct sxe2_hw *hw, u16 reg_idx, bool enable, bool wait, bool cde); + +s32 sxe2_hw_rxq_status_check(struct sxe2_hw *hw, u32 reg_idx, bool enable); + +u32 sxe2_fw_state_get(struct sxe2_hw *hw); + +u32 sxe2_fw_ver_get(struct sxe2_hw *hw); + +u32 sxe2_fw_comp_ver_get(struct sxe2_hw *hw); + +u32 sxe2_fw_mode_get(struct sxe2_hw *hw); +u32 sxe2_fw_pop_get(struct sxe2_hw *hw); + +void sxe2_hw_l2tag_accept(struct sxe2_hw *hw, u16 vsi_hw_id); + +s32 sxe2_hw_desc_vlan_param_check(bool pvlan_exist, bool is_strip, u16 tpid); + +s32 sxe2_hw_desc_vlan_strip_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_desc_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_port_vlan_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 vlan_info, u16 tpid); + +void sxe2_hw_rx_vlan_filter_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_loopback_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_mac_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_vlan_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +u32 sxe2_hw_fw_irq_cause_get(struct sxe2_hw *hw); + +s32 sxe2_hw_corer_irq_cause_get(struct sxe2_hw *hw); + +void sxe2_hw_trigger_pfr(struct sxe2_hw *hw); + +s32 sxe2_hw_pfr_done(struct sxe2_hw *hw); + +void sxe2_hw_trigger_corer(struct sxe2_hw *hw); + +s32 sxe2_hw_corer_done(struct sxe2_hw *hw); + +void sxe2_hw_stop_drop(struct sxe2_hw *hw); + +s32 sxe2_hw_stop_drop_done(struct sxe2_hw *hw); + +u32 sxe2_hw_heartbeat_get(struct sxe2_hw *hw); + +void sxe2_hw_trigger_vfr(struct sxe2_hw *hw, u16 vf_id); + +u32 sxe2_hw_vfr_done(struct sxe2_hw *hw, u16 vf_id); + +void sxe2_hw_vf_active(struct sxe2_hw *hw, u16 vf_id); + +void sxe2_hw_vf_deactive(struct sxe2_hw *hw, u16 vf_id); + +bool sxe2_hw_vflr_cause_get(struct sxe2_hw *hw, u16 vf_id_in_dev); + +void sxe2_hw_vflr_cause_clear(struct sxe2_hw *hw, u16 vf_id_in_dev); + +s32 sxe2_hw_desc_outer_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_port_inner_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, bool acceptedtagged, + bool accepteduntagged); + +s32 sxe2_hw_port_outer_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool acceptedtagged, + bool accepteduntagged); + +void sxe2_hw_vf_irq_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq); + +void sxe2_hw_vf_queue_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue); + +void sxe2_hw_ptp_main_enable(struct sxe2_hw *hw); + +void sxe2_hw_ptp_main_disable(struct sxe2_hw *hw); + +bool sxe2_hw_ptp_main_is_enabled(struct sxe2_hw *hw); +void sxe2_hw_ptp_init_incval(struct sxe2_hw *hw, u64 incval); + +void sxe2_hw_ptp_tsyn_switch(struct sxe2_hw *hw, bool on); +void sxe2_hw_ptp_tsyn_event_switch(struct sxe2_hw *hw, bool on); +u64 sxe2_hw_ptp_get_event_second(struct sxe2_hw *hw, u32 index); +u64 sxe2_hw_ptp_get_event_nanosecond(struct sxe2_hw *hw, u32 index); +void sxe2_hw_ptp_aux_in_set(struct sxe2_hw *hw, u32 index, u32 value); + +bool sxe2_hw_ptp_tx_tstamp_read(struct sxe2_hw *hw, u8 port_id, u32 index, u64 *timestamp); +bool sxe2_hw_ptp_mac_tx_tstamp_read(struct sxe2_hw *hw, u8 phy_id, u8 index, u64 *timestamp); +void sxe2_hw_ptp_tx_tstamp_discard(struct sxe2_hw *hw, u8 port_id, u32 index); +void sxe2_hw_ptp_mac_tx_tstamp_discard(struct sxe2_hw *hw, u8 phy_id, u32 index); + +void sxe2_hw_ptp_mac_tx_tstamp_clear_all(struct sxe2_hw *hw, u8 phy_id, u32 reg_idx); +bool sxe2_hw_ptp_acquire_1588_lock(struct sxe2_hw *hw); +void sxe2_hw_ptp_release_1588_lock(struct sxe2_hw *hw); +void sxe2_hw_ptp_1588_timestamp_read(struct sxe2_hw *hw, u64 *second, u64 *nanosecond); +void sxe2_hw_ptp_1588_timestamp_write(struct sxe2_hw *hw, u64 second, u32 nanosecond); +void sxe2_hw_ptp_1588_clockout_write(struct sxe2_hw *hw, u32 index, u64 period, u64 second, u64 nanosecond); +void sxe2_hw_ptp_1588_timestamp_adjust(struct sxe2_hw *hw, u32 nanosecond, bool neg); +void sxe2_hw_ptp_1588_timestamp_adjust_at_time(struct sxe2_hw *hw, u32 nanosecond); +u32 sxe2_hw_ptp_auxout_get(struct sxe2_hw *hw, u32 index); +void sxe2_hw_ptp_auxout_set(struct sxe2_hw *hw, u32 index, u32 value); + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw); + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw); + +void sxe2_hw_vf_queue_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue); + +void sxe2_hw_vf_irq_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq); + +void sxe2_hw_ipsec_tcam_clear(struct sxe2_hw *hw, u32 sa_index); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.c b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.c new file mode 100644 index 0000000000000000000000000000000000000000..1e268571e5a472ec946f24d80a056dc5fab861fe --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_log_export.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#define SXE2_DUMP_FILE_DIR "/var/log/" +#define SXE2_DUMP_FILE_SIZE_LIMIT (200 * 1024 * 1024) +#define SXE2_SINCE_YEAR (1900) +#define SXE2_SINCE_MONTH (1) +#define SXE2_MINUTE_TO_SECES (60) + +#include +#include + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_event.h" +#include "sxe2_log_export.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_compat.h" + +static s32 sxe2_export_local_time(struct rtc_time *tm) +{ + struct timespec64 time; + time64_t local_time; + + ktime_get_real_ts64(&time); + local_time = (time64_t)(time.tv_sec - + (sys_tz.tz_minuteswest * SXE2_MINUTE_TO_SECES)); + rtc_time64_to_tm(local_time, tm); + + tm->tm_mon += SXE2_SINCE_MONTH; + tm->tm_year += SXE2_SINCE_YEAR; + return 0; +} + +static void sxe2_export_filename_build(struct sxe2_export_context *ctxt, + s8 *filename, u32 len) +{ + struct sxe2_adapter *adapter = ctxt->adapter; + struct rtc_time tm; + struct pci_dev *pdev = adapter->pdev; + s8 *p_str = filename; + + (void)sxe2_export_local_time(&tm); + p_str += snprintf(p_str, len - (p_str - filename), "%s", SXE2_DUMP_FILE_DIR); + p_str += snprintf(p_str, len - (p_str - filename), "sxe2-fw"); + p_str += snprintf(p_str, len - (p_str - filename), "-%04x:%02x:%02x.%x.log.", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + p_str += snprintf(p_str, len - (p_str - filename), + "%04d%02d%02d-%02d%02d%02d", tm.tm_year, tm.tm_mon, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); +} + +static void sxe2_export_file_close(struct sxe2_export_file_info *file_info) +{ + bool err = IS_ERR(file_info->fp); + + if (file_info && file_info->fp) { + SXE2_BUG_ON(err); + (void)filp_close(file_info->fp, NULL); + + file_info->fp = NULL; + } +} + +static s32 sxe2_export_file_open(struct sxe2_export_context *ctxt) +{ + struct sxe2_adapter *adapter = ctxt->adapter; + struct sxe2_export_file_info *file_info = &ctxt->file; + s8 filename[SXE2_DUMP_FILE_NAME_LEN] = {0}; + s32 ret = 0; + struct file *filp = NULL; + + if (file_info->fp && file_info->file_size < ctxt->file_size_limit) + goto l_out; + + sxe2_export_file_close(file_info); + + memset(file_info, 0, sizeof(struct sxe2_export_file_info)); + + sxe2_export_filename_build(ctxt, filename, SXE2_DUMP_FILE_NAME_LEN); + + filp = (struct file *)filp_open(filename, + O_CREAT | O_RDWR | O_TRUNC | O_LARGEFILE, 0); + if (IS_ERR(filp)) { + LOG_ERROR_BDF("export file create: filp_open error filename %s\t" + "errno %d\n", + filename, (int)PTR_ERR(filp)); + ret = -EIO; + goto l_out; + } + + memcpy(file_info->filename, filename, sizeof(filename)); + file_info->fp = filp; + +l_out: + return ret; +} + +static s32 sxe2_export_file_write(struct sxe2_export_file_info *file_info, u8 *buf, + u32 len) +{ + struct file *filp = file_info->fp; + s32 ret = 0; + u32 pos = 0; + + while (pos < len) { + do { +#ifdef KERNEL_WRITE_POS_LOFF + ret = (s32)kernel_write(filp, buf + pos, len - pos, + filp->f_pos); +#else + ret = (s32)kernel_write(filp, buf + pos, len - pos, + &filp->f_pos); +#endif + } while (ret == -EINTR); + + if (ret < 0) + return ret; + if (ret == 0) + return -EIO; + + fsnotify_modify(filp); + file_info->file_size += len; + file_info->file_w_cnt++; + pos += ret; + } + + return 0; +} + +s32 sxe2_log_export_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_event event = {}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_AUTO_LOG); + ret = sxe2_fwc_event_subscribe(adapter, &event); + if (ret) + goto l_subscribe_failed; + + ctxt->adapter = adapter; + ctxt->file_size_limit = SXE2_DUMP_FILE_SIZE_LIMIT; + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_AUTO_LOG, + SXE2_CMD_EVENT_STATUS_SUB); + + return 0; + +l_subscribe_failed: + return ret; +} + +void sxe2_log_export_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_event event = {}; + + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_AUTO_LOG, + SXE2_CMD_EVENT_STATUS_UNSUB); + + ctxt->adapter = NULL; + sxe2_export_file_close(&ctxt->file); + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_AUTO_LOG); + (void)sxe2_fwc_event_unsubscribe(adapter, &event); +} + +static s32 sxe2_fwc_log_export_ack(struct sxe2_adapter *adapter, + struct sxe2_fwc_fw_log_ack *ack) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_fill(&cmd, SXE2_CMD_EVENT_FW_LOG_ACK, ack, sizeof(*ack), + NULL, 0, SXE2_DRV_CMD_DFLT_TIMEOUT, false, true); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("export log ack failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_event_log_export(struct sxe2_adapter *adapter, void *buf, u32 buf_len) +{ + s32 ret; + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_fw_log_ack ack = {}; + s32 result; + + if (!ctxt->adapter) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_export_file_open(ctxt); + if (ret) + goto l_ack; + + ret = sxe2_export_file_write(&ctxt->file, buf, buf_len); + if (ret) { + LOG_ERROR_BDF("file %s write %d failed: %d\n", ctxt->file.filename, + buf_len, ret); + } + +l_ack: + result = ret ? -SXE2_CMD_DUMP_LOG_FAILED : 0; + ack.result = cpu_to_le32((u32)result); + ret = sxe2_fwc_log_export_ack(adapter, &ack); +l_end: + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.h b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.h new file mode 100644 index 0000000000000000000000000000000000000000..b3abc495d5b0e5ca7e076d60ff8741fc4d6b9103 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/common/sxe2pf/sxe2_log_export.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_log_export.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_LOG_EXPORT_H__ +#define __SXE2_LOG_EXPORT_H__ + +#include +#include +#include +#include + +struct sxe2_adapter; + +#define SXE2_DUMP_FILE_DIR_LEN (128) +#define SXE2_DUMP_FILE_NAME_LEN (256) + +struct sxe2_export_file_info { + u8 filename[SXE2_DUMP_FILE_NAME_LEN]; + struct file *fp; + u64 file_size; + u32 file_w_cnt; + u32 file_status; + u8 reserved[4]; +}; + +struct sxe2_export_context { + struct sxe2_adapter *adapter; + struct sxe2_export_file_info file; + u32 file_size_limit; +}; + +s32 sxe2_log_export_init(struct sxe2_adapter *adapter); + +void sxe2_log_export_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_event_log_export(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cli_drv_msg.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cli_drv_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..908dd875dec1472314fb8edc659bb25e40da2b9c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cli_drv_msg.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_cli_drv_msg.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CLI_DRV_MSG_H__ +#define __SXE2_CLI_DRV_MSG_H__ + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define SXE2_DRV_MSG_MAX_SIZE (8192) +#define SXE2_DRV_MSG_MAGIC_CODE (0x56781234) +#define SXE2_MAX_NETDEV_NAME_SIZE (128) + +#define SXE2_CLI_DRV_SUCCESS (0) +#define SXE2_MOD_DRV (1) +#define SXE2_SUB_MOD_DEV (1) + +#define MODULE_ID_SHIFT (24) +#define SUB_MODULE_ID_SHIFT (16) +#define ERROR_INDEX_MASK (0xFFFF0000) +#define SXE2_MAKE_ERR_CODE_INDEX(module, sub_module) \ + ((((u32)((module) << MODULE_ID_SHIFT)) | ((u32)((sub_module) << SUB_MODULE_ID_SHIFT))) & \ + ERROR_INDEX_MASK) + +enum sxe2_priv_drv_err_code { + SXE2_ERR_DRV_DEV = SXE2_MAKE_ERR_CODE_INDEX(SXE2_MOD_DRV, SXE2_SUB_MOD_DEV), + SXE2_ERR_DRV_DEV_PARAMS_INVAL, + SXE2_ERR_DRV_DEV_NULL_PTR, + SXE2_ERR_DRV_DEV_NOT_FOUND, + SXE2_ERR_DRV_DEV_NOT_SUPPORT, + SXE2_ERR_DRV_DEV_NO_MEM, + SXE2_ERR_DRV_DEV_FAULT, + SXE2_ERR_DRV_DEV_MAGIC_INVAL, +}; + +enum sxe2_cli_drv_cmd_opcode { + SXE2_CLI_CMD_GET_NETDEV_NAME = 0, + SXE2_CLI_CMD_MAX = 0xFFFF, +}; + +struct drv_msg_info { + u32 magic; + u32 opcode; + u32 error; + u32 timeout; + u32 runver; + u32 req_length; + u32 ack_length; + u16 hdr_len; + u8 reserved[2]; + u64 trace_id; + u8 pad[8]; + u8 body[]; +}; + +struct sxe2_cli_drv_get_pname_rsp_msg { + char netdev_name[SXE2_MAX_NETDEV_NAME_SIZE]; +}; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cmd.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..3cf787281d5a2a9764b9983c790b8f5185e569c3 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_cmd.h @@ -0,0 +1,3687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_cmd.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CMD_H__ +#define __SXE2_CMD_H__ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#if defined(SXE2_SUPPORT_UEFI) || defined(SXE2_SUPPORT_IPXE) +#include "sxe2_uefi_type.h" +#include "sxe2_uefi_def.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "rte_os.h" +#include "sxe2_type.h" +#include "sxe2_common.h" +#include "sxe2_osal.h" +#endif + +#ifndef SXE2_DRIVER_SIM +#include "sxe2_spec.h" +#endif + +#pragma pack(4) + +#define SXE2_VSI_MAX_CNT (768) + +#define SXE2_INVAL_U8 (0xFF) +#define SXE2_INVAL_U16 (0xFFFF) +#define SXE2_INVAL_U32 (0xFFFFFFFF) +#define SXE2_VF_ID_INVAL (0xFFFF) + +#define SXE2_CMD_MAGIC (0xFEFEEFEF) + +#define SXE2_FW_COMP_MAJOR_VER (1) +#define SXE2_FW_COMP_MINOR_VER (1) +#define SXE2_FW_COMP_VER \ + (SXE2_FW_COMP_MAJOR_VER << 16 | \ + SXE2_FW_COMP_MINOR_VER) + +#define SXE2_CMD_LARGE_BUF_SIZE (512) +#define SXE2_CMD_MAX_BUF \ + (2 * 4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE \ + (SXE2_CMD_MAX_BUF - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_MAX_BUF_MBX \ + (4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX \ + (SXE2_CMD_MAX_BUF_MBX - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE_MBX \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_DD BIT(0) +#define SXE2_CMD_COMPLETE BIT(1) +#define SXE2_CMD_ERROR BIT(2) +#define SXE2_CMD_LARGE_BUF BIT(9) +#define SXE2_CMD_READ BIT(10) +#define SXE2_CMD_BUF BIT(12) +#define SXE2_CMD_NO_INTR BIT(13) + +#define SXE2_CMD_DONE (SXE2_CMD_DD | SXE2_CMD_COMPLETE | SXE2_CMD_ERROR) + +#define SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT \ + (16) +#define SXE2_TC_MAX_CNT (8) +#define SXE2_TXQ_CTXT_LEN (24) + +#define SXE2_CMD_HDR_SIZE sizeof(struct sxe2_cmd_hdr) +#define SXE2_DRV_MSG_HDR_SIZE \ + sizeof(struct sxe2_drv_msg_hdr) + +#define SXE2_MBX_DATA_PTR(type, msg_hdr_ptr) \ + ((type *)((u8 *)(msg_hdr_ptr) + (msg_hdr_ptr)->data_offset)) + +#define SXE2_MBX_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2vf_mbx_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_DRV_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2_drv_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_CMD_MODULE_S (8) +#define SXE2_MK_CMD(module, cmd) ((module) << SXE2_CMD_MODULE_S | (cmd)) + +#define SXE2_TCAM_KEY_VALUE_LEN (5) +#define SXE2_TCAM_KEY_LEN (2 * SXE2_TCAM_KEY_VALUE_LEN) + +#define SXE2_FULLKEY_DWORD_CNT (3) +#define SXE2_PACKET_INFO_DWORD_CNT (20) +#define SXE2_SWITCH_FV_CNT (48) +#define SXE2_PACKET_MAX_RECIPES (32) + +#define SXE2_MAX_NUM_RECIPES (64) +#define SXE2_MAX_NUM_RECIPES_PER_PROFILE (32) +#define SXE2_MAX_NUM_ROOT_RECIPES_PER_PROFILE (24) + +#if defined(SXE2_TEST) +#define SXE2_MAX_NUM_RECIPES_VER_I_O (64) +#else +#define SXE2_MAX_NUM_RECIPES_VER_I_O (24) +#endif + +#define SXE2_MAX_NUM_PROFILES (256) +#define SXE2_NUM_WORDS_RECIPE (4) +#define SXE2_MAX_REPLY_RECIPE (4) +#define SXE2_MAX_CHAIN_RECIPE (SXE2_MAX_REPLY_RECIPE + 1) +#define SXE2_MAX_CHAIN_WORDS (SXE2_NUM_WORDS_RECIPE * \ + SXE2_MAX_REPLY_RECIPE) +#define SXE2_VSI_LIST_DAT_LEN DIV_ROUND_UP(SXE2_VSI_MAX_CNT, \ + (BITS_PER_BYTE * sizeof(u32))) + +#define SXE2_ACTION_PRIORITY_HIGH (7) + +#define SXE2_CMD_SWITCH_RULE_FLAG_COMPLEX BIT(0) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE BIT(1) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_INC BIT(2) + +#define SXE2_SINGLE_ACT_LB_ENABLE BIT(16) +#define SXE2_SINGLE_ACT_LAN_ENABLE BIT(15) + +#define SXE2_SINGLE_ACT_VSI_TYPE_S (17) +#define SXE2_SINGLE_ACT_VSI_FORWARD (0x0 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_VSI_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_LIST_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST BIT(4) +#define SXE2_SINGLE_ACT_VALID_BIT BIT(1) +#define SXE2_SINGLE_ACT_DROP BIT(0) + +#define SXE2_SINGLE_ACT_TO_Q (0x1 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_Q_INDEX_S (4) +#define SXE2_SINGLE_ACT_Q_INDEX_M (0x7FF << SXE2_SINGLE_ACT_Q_INDEX_S) +#define SXE2_SINGLE_ACT_Q_REGION_S (1) +#define SXE2_SINGLE_ACT_Q_REGION_M (0x7 << SXE2_SINGLE_ACT_Q_REGION_S) +#define SXE2_SINGLE_ACT_Q_PRIORITY BIT(0) + +#define SXE2_SINGLE_ACT_PRUNE (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_EGRESS BIT(3) +#define SXE2_SINGLE_ACT_INGRESS BIT(2) +#define SXE2_SINGLE_ACT_PRUNET BIT(1) + +#define SXE2_SINGLE_ACT_MIRROR (0x3 << SXE2_SINGLE_ACT_VSI_TYPE_S) + +#define SXE2_SINGLE_ACT_POINTER (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_TO_LARGE BIT(0) +#define SXE2_SINGLE_ACT_HASFWD BIT(1) + +#define SXE2_MAC_NUM (4) + +#define SXE2_RSS_FV_CNT (24) + +#define SXE2_RSS_FV_TRACE_CNT (12) + +#define SXE2_OG_BUF_SIZE (4096) +#define SXE2_FV_CNT_MAX SXE2_SWITCH_FV_CNT +#define SXE2_FNAV_INPUT_CNT (30) + +#define SXE2_BFD_FV_CNT_MAX (32) + +#define SXE2_RXFT_PPE_INFO_REG_CNT (20) + +#define SXE2_FV_DIRECTION_OFFSET (10) +#define SXE2_FV_DIRECTION_MASK BIT(SXE2_FV_DIRECTION_OFFSET) +#define SXE2_FV_DIRECTION_TX (0) +#define SXE2_FV_DIRECTION_RX (1) + +#define SXE2_FV_CAST_OFFSET (0) +#define SXE2_FV_CAST_UNI (0) +#define SXE2_FV_CAST_MULTI (1) +#define SXE2_FV_CAST_BROAD (2) + +#define SXE2_FV_PKT_SRC_OFFSET (10) +#define SXE2_FV_PKT_SRC_MASK (0x3 << SXE2_FV_PKT_SRC_OFFSET) +#define SXE2_FV_PKT_SRC_TX (0x3) +#define SXE2_FV_PKT_SRC_RX (0x0) + +#define SXE2_FV_VSI_NUM_OFFSET (0) +#define SXE2_FV_VSI_NUM_MASK (0x3ff << SXE2_FV_VSI_NUM_OFFSET) + +#define SXE2_FV_PKT_TO_RDMA_OFFSET (8) +#define SXE2_FV_PKT_TO_RDMA_MASK (0x1 << SXE2_FV_PKT_TO_RDMA_OFFSET) +#define SXE2_FV_PKT_TO_RDMA (1) +#define SXE2_FV_PKT_TO_RDMA_NO (0) + +#define SXE2_SWITCH_RECIPE_PRIO_7 (7) +#define SXE2_SWITCH_RECIPE_PRIO_6 (6) + +#define SXE2_LLDP_FRAME_MAX_SIZE (1500) +#define SXE2_MAX_TRAFFIC_CLASS (8) +#define SXE2_MAX_USER_PRIORITY (8) +#define SXE2_DCBX_MAX_APPS (64) +#define SXE2_DSCP_MAX_NUM (64) + +#define SXE2_DSCP_OUI (0xFFFFFFU) +#define SXE2_DSCP_SUBTYPE_DSCP2UP (0x41U) +#define SXE2_DSCP_SUBTYPE_ENFORCE (0x42U) +#define SXE2_DSCP_SUBTYPE_TCBW (0x43U) +#define SXE2_DSCP_SUBTYPE_PFC (0x44U) +#define SXE2_DSCP_IPV6_OFFSET (80) +#define SXE2_DSCP_IPV4_UNTAG_OFFSET (64) +#define SXE2_DSCP_IPV6_UNTAG_OFFSET (144) + +#define SXE2_CMD_VSI_STATS_MAX_CNT (16) + +#define SXE2_SERIAL_NUM_LEN (20) + +#define SXE2_MDD_TYPE_TX (1) +#define SXE2_MDD_TYPE_RX (2) + +#define SXE2_FNAV_DEFAULT_MASK_CNT (6) + +#define SXE2_RSS_CORE_LUT_SIZE (32) + +#define SXE2_LARGE_ACTION_COUNT_IN_GROUP (4) +#define SXE2_FLM_VENDOR_LEN 16 +#define SXE2_FLM_VENDOR_PN_LEN 16 +#define SXE2_HOST_FLM_VENDOR_LEN 32 +#define SXE2_HOST_FLM_VENDOR_PN_LEN 32 + +#define SXE2_LLDP_FW_AGENT_DISABLE 0 +#define SXE2_LLDP_FW_AGENT_ENABLE 1 + +enum sxe2_txq_quanta_prof_cfg { + SXE2_TXQ_QUANTA_PROF_DEFAULT = 0, + SXE2_TXQ_QUANTA_PROF_SIMPLE, + SXE2_TXQ_QUANTA_PROF_COMPLEX, +}; + +enum sxe2_cmd_type { + SXE2_CMD_TYPE_CLI = 0, + SXE2_CMD_TYPE_DRV_TO_FW, + SXE2_CMD_TYPE_FW_NOTIFY, + SXE2_CMD_TYPE_PF_TO_VF, + SXE2_CMD_TYPE_VF_TO_PF, + SXE2_CMD_TYPE_DRV_TO_HW, + SXE2_CMD_TYPE_PF_REPLY_VF, +}; + +enum sxe2_cmd_module { + SXE2_CMD_MODULE_HANDSHAKE = 0, + SXE2_CMD_MODULE_CAPS = 1, + SXE2_CMD_MODULE_VSI = 2, + SXE2_CMD_MODULE_QUEUE = 3, + SXE2_CMD_MODULE_CFG = 4, + SXE2_CMD_MODULE_SWITCH = 5, + SXE2_CMD_MODULE_RULE = 6, + SXE2_CMD_MODULE_EVENT = 7, + SXE2_CMD_MODULE_MBX = 8, + SXE2_CMD_MODULE_TXSCHED = 9, + SXE2_CMD_MODULE_STATS = 11, + SXE2_CMD_MODULE_OPT = 12, + SXE2_CMD_MODULE_RSS = 13, + SXE2_CMD_MODULE_LED = 14, + SXE2_CMD_MODULE_OG = 15, + SXE2_CMD_MODULE_RDMA = 16, + SXE2_CMD_MODULE_IPSEC = 17, + SXE2_CMD_MODULE_FNAV = 18, + SXE2_CMD_MODULE_PXE = 19, + SXE2_CMD_MODULE_DCB = 20, + SXE2_CMD_MODULE_LLDP = 21, + SXE2_CMD_MODULE_PTP = 22, + SXE2_CMD_MODULE_MACADDR = 23, + SXE2_CMD_MODULE_MACSEC = 24, + SXE2_CMD_MODULE_UPGRADE = 25, + SXE2_CMD_MODULE_ETHTOOL = 26, + SXE2_CMD_MODULE_FLM = 27, + SXE2_CMD_MODULE_SFP = 28, + SXE2_CMD_MODULE_RWREG = 29, + SXE2_CMD_MODULE_UDPTUNEEL = 30, + SXE2_CMD_MODULE_NCD = 31, + SXE2_CMD_MODULE_BFD = 32, + SXE2_CMD_MODULE_NCD_UDF = 33, + SXE2_CMD_MODULE_QUEUE_STATS_MAP = 34, + SXE2_CMD_MODULE_ACL = 35, +}; + +enum sxe2_drv_cmd_opcode { + + SXE2_CMD_Q_HANDSHAKE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 1), + SXE2_CMD_Q_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 3), + + SXE2_CMD_DEV_CAPS = SXE2_MK_CMD(SXE2_CMD_MODULE_CAPS, 1), + SXE2_CMD_FUNC_CAPS, + SXE2_CMD_PF_CFG_CLEAR, + SXE2_CMD_PF_SRIOV_SET, + SXE2_CMD_PF_DDP_REF_CLR, + SXE2_CMD_PHY_PORT_INFO_GET, + SXE2_CMD_PF_SERIAL_GET, + SXE2_CMD_DRV_MODE_GET, + SXE2_CMD_DRV_MODE_SET, + + SXE2_CMD_VSI_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_VSI, 1), + SXE2_CMD_UPDATE_VSI, + SXE2_CMD_FREE_VSI, + SXE2_CMD_VSI_VLAN_FILTER, + SXE2_CMD_VSI_LOOPBACK, + SXE2_CMD_VSI_SPOOFCHK, + SXE2_CMD_VSI_SRC_PRUNE, + SXE2_CMD_VSI_MDD_CHECK, + SXE2_CMD_VSI_VF_QUEUE_SET, + SXE2_CMD_VSI_VF_QUEUE_CLEAR, + + SXE2_CMD_TXQ_CFG_AND_ENABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 1), + SXE2_CMD_RXQ_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 2), + SXE2_CMD_RX_FB = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 3), + SXE2_CMD_TXQ_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 4), + SXE2_CMD_TXQ_STATE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 5), + + SXE2_CMD_CFG_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_CFG, 1), + SXE2_CMD_CFG_UPDATE, + SXE2_CMD_DP_DLD_PRE, + SXE2_CMD_DP_DLD_PROC, + SXE2_CMD_DP_DLD_DONE, + SXE2_CMD_DP_DLD_STATE, + + SXE2_CMD_SWITCH_RULE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_SWITCH, 1), + SXE2_CMD_SWITCH_RULE_DEL, + SXE2_CMD_SWITCH_RULE_UPDATE, + SXE2_CMD_SWITCH_RULE_GET, + SXE2_CMD_SWITCH_VSI_LIST_ADD, + SXE2_CMD_SWITCH_VSI_LIST_DEL, + SXE2_CMD_SWITCH_VSI_LIST_GET, + SXE2_CMD_SWITCH_VSI_LIST_UPDATE, + SXE2_CMD_SWITCH_LARGE_ACTION_CFG, + SXE2_CMD_SWITCH_RULE_CPX_ADD, + SXE2_CMD_SWITCH_RULE_CPX_DEL, + SXE2_CMD_SWITCH_RULE_CPX_UPDATE, + SXE2_CMD_SWITCH_RULE_CPX_GET, + SXE2_CMD_SWITCH_TRACE_TRIGGER, + SXE2_CMD_SWITCH_TRACE_RECORDER, + SXE2_CMD_HW_DFX_SHOW, + SXE2_CMD_SWITCH_RECIPE_GET, + SXE2_CMD_SWITCH_PROFILE_RECIPE_MAP_GET, + SXE2_CMD_SWITCH_SHARE_ID_GET, + SXE2_CMD_SWITCH_DFX_IRQ, + + SXE2_CMD_PARSE_RULE = SXE2_MK_CMD(SXE2_CMD_MODULE_RULE, 1), + SXE2_CMD_UDP_TUNNEL_PORT, + + SXE2_CMD_EVENT_SUBSCRIBE = SXE2_MK_CMD(SXE2_CMD_MODULE_EVENT, 1), + SXE2_CMD_EVENT_UNSUBSCRIBE, + SXE2_CMD_EVENT_FW_LOG_ACK, + + SXE2_CMD_MBX_TO_PF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 1), + SXE2_CMD_MBX_TO_VF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 2), + + SXE2_CMD_TXSCHED_CAP_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 1), + SXE2_CMD_TXSCHED_DFLT_TOPO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 2), + SXE2_CMD_TX_SCHED_NODE_INFO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 3), + SXE2_CMD_TX_SCHED_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 4), + SXE2_CMD_TX_SCHED_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 5), + SXE2_CMD_TX_SCHED_NODE_SUSPEND = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 6), + SXE2_CMD_TX_SCHED_NODE_RESUME = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 7), + SXE2_CMD_TX_SCHED_LEAF_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 10), + SXE2_CMD_TX_SCHED_LEAF_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 11), + SXE2_CMD_TX_SCHED_NODE_RL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 12), + SXE2_CMD_TX_SCHED_Q_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 13), + SXE2_CMD_TX_SCHED_Q_STOP = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 14), + SXE2_CMD_TX_SCHED_ETS_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 15), + SXE2_CMD_TX_SCHED_LEAF_NODE_MOVE = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 16), + SXE2_CMD_TX_SCHED_QSET_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 17), + SXE2_CMD_TX_SCHED_QSET_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 18), + SXE2_CMD_TX_SCHED_PRIO_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 19), + SXE2_CMD_TX_SCHED_WEIGHT_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 20), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 21), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 22), + SXE2_CMD_TX_SCHED_NODE_SRL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 23), + SXE2_CMD_TX_SCHED_PROFILE_RL_PRE_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 24), + SXE2_CMD_TX_SCHED_PROFILE_SRL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 25), + SXE2_CMD_TX_SCHED_PROFILE_SRL_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 26), + SXE2_CMD_TX_SCHED_PROFILE_SRL_UPD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 27), + + SXE2_CMD_GET_PF_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 1), + SXE2_CMD_GET_VSI_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 2), + SXE2_CMD_GET_PPE_DFX, + + SXE2_CMD_OPT_EEP = SXE2_MK_CMD(SXE2_CMD_MODULE_OPT, 1), + + SXE2_CMD_RSS_VSI_HCTRL_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_RSS, 1), + SXE2_CMD_RSS_LUT_SET, + SXE2_CMD_RSS_LUT_GET, + SXE2_CMD_RSS_HKEY_SET, + SXE2_CMD_RSS_HKEY_GET, + SXE2_CMD_RSS_SYMM_FV_SET, + SXE2_CMD_RSS_TRACE_TRIGGER, + SXE2_CMD_RSS_TRACE_RECORDER, + + SXE2_CMD_LED_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_LED, 1), + + SXE2_CMD_OG_CFG_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_OG, 1), + SXE2_CMD_OG_TCAM_ENTRY_ALLOC, + SXE2_CMD_OG_TCAM_ENTRY_FREE, + SXE2_CMD_OG_TCAM_ENTRY_BATCH, + SXE2_CMD_OG_PROF_ID_ALLOC, + SXE2_CMD_OG_PROF_ID_FREE, + SXE2_CMD_OG_MASK_SEL_UPDATE, + + SXE2_CMD_RDMA_QP_ATTACH_MC = SXE2_MK_CMD(SXE2_CMD_MODULE_RDMA, 1), + SXE2_CMD_RDMA_QP_DETACH_MC, + SXE2_CMD_RDMA_QET_BIND_TC, + SXE2_CMD_RDMA_PF_FUNC_TABLE_INIT, + SXE2_CMD_RDMA_DESTROY_CC_QP, + SXE2_CMD_RDMA_GET_CC_QP_DFX, + SXE2_CMD_RDMA_NOTIFY_STATUS, + + SXE2_CMD_IPSEC_GET_CAPA = SXE2_MK_CMD(SXE2_CMD_MODULE_IPSEC, 1), + SXE2_CMD_IPSEC_TXSA_ADD, + SXE2_CMD_IPSEC_TXSA_DEL, + SXE2_CMD_IPSEC_TXSA_SET, + SXE2_CMD_IPSEC_RXSA_ADD, + SXE2_CMD_IPSEC_RXSA_DEL, + SXE2_CMD_IPSEC_RXSA_SET, + SXE2_CMD_IPSEC_STATS_SHOW, + SXE2_CMD_IPSEC_TXRX_SWITCH, + SXE2_CMD_IPSEC_DRV_CLEAR, + + SXE2_CMD_FNAV_FILTER_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_FNAV, 1), + SXE2_CMD_FNAV_TRACE_TRIGGER, + SXE2_CMD_FNAV_TRACE_RECORDER, + SXE2_CMD_FNAV_HW_STS, + SXE2_CMD_FNAV_HW_CLEAR, + SXE2_CMD_RXFT_PPE_INFO, + SXE2_CMD_VF_FNAV_FILTER_CLEAR, + SXE2_CMD_FNAV_STATS_GET, + SXE2_CMD_FNAV_DFLT_COMP_QIDX_SET, + SXE2_CMD_FNAV_SPACE_CNT_GET, + SXE2_CMD_FNAV_MATCH_GET_BATCH, + + SXE2_PXE_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_PXE, 1), + SXE2_UEFI_PRIV_DATA_SET, + SXE2_UEFI_PRIV_DATA_GET, + SXE2_UEFI_SOCINFO_GET, + + SXE2_CMD_QOS_MODE_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_DCB, 1), + SXE2_CMD_QOS_MODE_GET, + SXE2_CMD_LINK_FLOW_CONTROL_GET, + SXE2_CMD_LINK_FLOW_CONTROL_SET, + + SXE2_CMD_LLDP_MIB_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_LLDP, 1), + SXE2_CMD_LLDP_MIB_GET, + SXE2_CMD_LLDP_MIB_NOTIFY, + SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, + SXE2_CMD_LLDP_DCBX_FW_AGENT_GET, + SXE2_CMD_LLDP_FW_STATS, + SXE2_CMD_LLDP_REMOTE_MIBS_INFO, + SXE2_CMD_LLDP_REMOTE_MIBS_DUMP, + SXE2_CMD_LLDP_FW_AGENT_SET, + SXE2_CMD_LLDP_FW_AGENT_GET, + + SXE2_CMD_PTP_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_PTP, 1), + SXE2_CMD_PTP_RX_MODE_SET, + SXE2_CMD_PTP_SEM_CLEAN, + + SXE2_CMD_MAC_ADDR_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_MACADDR, 1), + SXE2_CMD_MAC_ADDR_SET, + SXE2_CMD_MAC_MTU_SET, + + SXE2_CMD_MACSEC_TXSC_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_MACSEC, 1), + SXE2_CMD_MACSEC_TXSA_CFG, + SXE2_CMD_MACSEC_RXSC_CFG, + SXE2_CMD_MACSEC_RXSA_CFG, + SXE2_CMD_MACSEC_FIX_CFG, + + SXE2_CMD_FW_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_UPGRADE, 1), + SXE2_CMD_FW_DOWNLOAD_PRE, + SXE2_CMD_FW_DOWNLOAD_OPEN, + SXE2_CMD_FW_DOWNLOAD_FLASH, + SXE2_CMD_FW_DOWNLOAD_CLOSE, + SXE2_CMD_FW_DOWNLOAD_END, + + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE_STATS_MAP, 1), + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_RES_REL, + SXE2_CMD_TXQUEUE_STATS_MAP_RES_REL, + + SXE2_CMD_ETHTOOL_LOOPBACK_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_ETHTOOL, 1), + + SXE2_CMD_FLM_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_FLM, 1), + SXE2_CMD_FLM_LINK_UP, + SXE2_CMD_FLM_LINK_DOWN, + SXE2_CMD_FLM_FEC_GET, + SXE2_CMD_FLM_FEC_SET, + SXE2_CMD_FLM_AN_SET, + SXE2_CMD_FLM_LINK_INFO_SET, + SXE2_CMD_FLM_LINK_INFO_GET, + SXE2_CMD_FLM_LINK_STATUS_SET, + SXE2_CMD_FLM_LINK_STATUS_SYNC, + SXE2_CMD_TEST_LINK_STATUS, + SXE2_CMD_FLM_LINK_UP_DOWN_SET, + + SXE2_CMD_SFP_WHITE_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_SFP, 1), + SXE2_CMD_SFP_TX_FAULT_CFG, + SXE2_CMD_SFP_SET_FEC_CFG, + SXE2_CMD_SFP_GET_FEC_CFG, + SXE2_CMD_SFP_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_SET_LINKINFO_CFG, + SXE2_CMD_PERSIST_GET_LINK_CFG, + SXE2_CMD_SUPPORT_SPEED_GET_CFG, + SXE2_CMD_CURRENT_SPEED_STATUS_GET_CFG, + SXE2_CMD_CURRENT_SPEED_GET_CFG, + SXE2_CMD_SFP_SET_LINK_CFG, + SXE2_CMD_GET_LINKST_CFG, + SXE2_CMD_GET_VENDOR_INFO_CHECK_WARNING, + SXE2_CMD_GET_OPT_DATA_INFO, + + SXE2_CMD_CLI_READ_REG = SXE2_MK_CMD(SXE2_CMD_MODULE_RWREG, 1), + SXE2_CMD_CLI_WRITE_REG, + + SXE2_CMD_UDPTUNNEL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_UDPTUNEEL, 1), + SXE2_CMD_UDPTUNNEL_DEL, + SXE2_CMD_UDPTUNNEL_GET, + + SXE2_CMD_BFD_INTRQ_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_BFD, 1), + SXE2_CMD_BFD_CAPA_GET, + SXE2_CMD_BFD_CFG_SET, + SXE2_CMD_BFD_SESS_CFG_SET, + SXE2_CMD_BFD_SESS_STATE_GET, + SXE2_CMD_BFD_PACK_PROF_SET, + SXE2_CMD_BFD_FLOW_RULE_SET, + SXE2_CMD_BFD_KEYLEN_SET, + + SXE2_CMD_NCD_CORE_NUM = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD, 1), + SXE2_CMD_NCD_CORE_FS_QUEUE_SET, + SXE2_CMD_NCD_CORE_FS_QUEUE_GET, + + SXE2_CMD_NCD_UDF_CAPA_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD_UDF, 1), + + SXE2_CMD_NCD_SWITCH_TAG_EN, + SXE2_CMD_NCD_SWITCH_TAG_SET, + SXE2_CMD_NCD_SWITCH_TAG_GET, + SXE2_CMD_NCD_TXLEN_ADJ_SET, + SXE2_CMD_NCD_TXLEN_ADJ_GET, + + SXE2_CMD_NCD_SDF_EN, + SXE2_CMD_NCD_SDF_SET, + SXE2_CMD_NCD_SDF_GET, + + SXE2_CMD_NCD_SDN_UDP_ADD, + SXE2_CMD_NCD_SDN_UDP_DEL, + SXE2_CMD_NCD_SDN_UDP_QUERY, + SXE2_CMD_NCD_SDN_ADD, + SXE2_CMD_NCD_SDN_DEL, + SXE2_CMD_NCD_SDN_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_UDP_ADD, + SXE2_CMD_NCD_SDN_IPSEC_UDP_DEL, + + SXE2_CMD_NCD_APP_PORT_SET, + + SXE2_CMD_NCD_PKT_PRI_SET, + + SXE2_CMD_ACL_LUT_ALLOC = SXE2_MK_CMD(SXE2_CMD_MODULE_ACL, 1), + SXE2_CMD_ACL_LUT_DEALLOC, + SXE2_CMD_ACL_PROF_SEL_BASE_SET, + SXE2_CMD_ACL_SCEN_ALLOC, + SXE2_CMD_ACL_SCEN_DEALLOC, + SXE2_CMD_ACL_LUT_ENTRY_SET, + SXE2_CMD_ACL_ACT_ENTRY_SET, + SXE2_CMD_ACL_TRACE_TRIGGER, + SXE2_CMD_ACL_TRACE_RECORDER, + SXE2_CMD_ACL_DFX_INFO_GET, + + SXE2_CMD_MAX = 0xFFFF, +}; + +enum sxe2_drv_event_code { + SXE2_EVENT_CODE_INVAL = 0, + SXE2_EVENT_CODE_AUTO_LOG, + SXE2_EVENT_CODE_MIB_NOTIFY, + SXE2_EVENT_CODE_SFP_WHITE_LIST, + SXE2_EVENT_CODE_SFP_TX_FAULT, + SXE2_EVENT_CODE_QSFP_TX_FAULT_COUNT, + SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, + + SXE2_EVENT_CODE_MAX, + SXE2_EVENT_CODE_ALL = 255, +}; + +enum sxe2_desc_err_code { + SXE2_CMD_DESC_ERR_NONE = 0, + SXE2_CMD_DESC_ERR_DES_ERR, + SXE2_CMD_DESC_ERR_BUF_ERR, + SXE2_CMD_DESC_ERR_BUF_NUM_ERR, + SXE2_CMD_DESC_ERR_SRC_BUSY, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK2, + SXE2_CMD_DESC_ERR_SESSION_BUFFER_OV, + SXE2_CMD_DESC_ERR_CMD_BUFFER_OV, + SXE2_CMD_DESC_ERR_IN_OUT_LEN_LACK, + SXE2_CMD_DESC_ERR_UNKNOW_OPCODE, + SXE2_CMD_DESC_ERR_UNKNOW_CMD_TYPE, + SXE2_CMD_DESC_ERR_ADMINQ_STATE, + SXE2_CMD_DESC_ERR_FIND_JOB, + SXE2_CMD_DESC_ERR_NONE_START, + SXE2_CMD_DESC_ERR_JOB_DELIVERY, + SXE2_CMD_DESC_ERR_PF_FLR, + SXE2_CMD_DESC_ERR_OVER_FLOW, + SXE2_CMD_DESC_ERR_SEQ_ERR, + SXE2_CMD_DESC_ERR_NR, +}; + +enum sxe2_cmd_drv_err_code { + SXE2_CMD_DRV_SUCCESS = 0, + SXE2_CMD_DRV_HW_OP_ERR = 1024, + + SXE2_CMD_DRV_NO_FREE_VSI, + SXE2_CMD_DUMP_LOG_FAILED, + + SXE2_CMD_DRV_RXQ_CFG_FAIL, + SXE2_CMD_DRV_TXQ_EN_FAIL, + SXE2_CMD_DRV_TXQ_DISA_FAIL, + + SXE2_CMD_DRV_PFR_FAILED, + SXE2_CMD_DRV_VFR_FAILED, + SXE2_CMD_DRV_PARAM_INVALID, + SXE2_CMD_DRV_HW_RETURN, + SXE2_CMD_DRV_HW_TIMEOUT, + SXE2_CMD_DRV_HW_MISMATCH, + SXE2_CMD_DRV_HW_NOSPC, + SXE2_CMD_DRV_HW_EXIST, + SXE2_CMD_DRV_HW_HID_EXIST, + SXE2_CMD_DRV_HW_NOENT, + SXE2_CMD_DRV_FW_NOMEM, + SXE2_CMD_DRV_HW_NO_RES, + SXE2_CMD_DRV_TLV_ERROR, + SXE2_CMD_DRV_DCB_ERROR, + SXE2_CMD_DRV_LINK_REBUILD_FAILED, + + SXE2_CMD_DRV_UNSUPPORT, + SXE2_CMD_DRV_TXSCHED_CFG_FAILED, + SXE2_CMD_DRV_TXSCHED_TIMEOUT, + SXE2_CMD_DRV_TXSCHED_TEID_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_CHILDIDX_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_ALLOC_FAILED, + + SXE2_CMD_DRV_UDP_TUNNEL_WRONG_PORT, + + SXE2_CMD_DRV_NCD_UNSUPPORT, + SXE2_CMD_DRV_BFD_INTQ_NOP, + SXE2_CMD_DRV_BFD_FLOW_NOSPC, + SXE2_CMD_DRV_BFD_FLOW_HT_COLLISION, + + SXE2_CMD_DRV_LINK_UPDATE_FAILED, + SXE2_OPT_DEV_BUSY, + +}; + +enum sxe2_fwc_mapping_mode { + SXE2_MAPPING_CONTIG = 0, + SXE2_MAPPING_SCATTER, +}; + +enum sxe2_fwc_vsi_type_hw { + SXE2_VSI_HW_T_VF = 0, + SXE2_VSI_HW_T_VMDQ2 = 1, + SXE2_VSI_HW_T_PF = 2, + SXE2_VSI_HW_T_MNG = 3, +}; + +enum sxe2_cmd_buffer_st { + SXE2_CMD_BUFFER_ST_NORMAL = (s16)0, + SXE2_CMD_BUFFER_ST_OVERFLOW, + SXE2_CMD_BUFFER_ST_SEQ_ERR, + SXE2_CMD_BUFFER_ST_NR, +}; + +enum sxe2_cmd_queue_stats_map_add { + SXE2_CMD_QUEUE_STATS_MAP_ADD_SUCCEED = 0, + SXE2_CMD_QUEUE_STATS_MAP_ADD_FAIL = 1, +}; + +enum sxe2_weight_type { + SXE2_UNKNOWN_TYPE = 0, + SXE2_CIR_WEIGHT, + SXE2_PIR_WEIGHT, +}; + +struct sxe2_cmd_desc { + __le16 flags; + __le16 opcode; + __le16 data_len; + __le16 ret; + u8 checksum; + u8 rsvd[3]; + __le32 custom1; + __le32 custom2; + __le32 custom3; + __le32 buf_addr_h; + __le32 buf_addr_l; +}; + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F +struct sxe2_cmd_hdr { + __le32 magic_code; + __le16 tran_in_len; + __le16 tran_out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 no_resp; + u8 resv1; + __le16 cur_in_len; + u8 resv[24]; + u8 body[]; +}; + +struct sxe2_drv_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 mac_id; + u8 mac_id_valid; + u8 resv[12]; + u8 body[]; +}; + +struct sxe2_channel_handshake_req { + __le32 drv_ver; + u8 drv_mode; + u8 resv[3]; + __le64 timestamp; +}; + +struct sxe2_channel_handshake_resp { + __le32 fw_ver; +}; + +struct sxe2_fwc_serial_num_resp { + u8 serial_num[SXE2_SERIAL_NUM_LEN]; +}; + +struct sxe2_fwc_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_vf_caps { + __le16 cnt; + __le16 base_idx; + u8 sriov_cap; + u8 resv[27]; +}; + +struct sxe2_fwc_queue_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_msix_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_vsi_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_ppe_caps { + __le16 rss_lut_size; + __le16 fnav_space_bsize; + __le16 fnav_space_gsize; + __le16 fnav_counter_base; + __le16 fnav_counter_num; + __le16 bfd_sess_size; + __le16 rss_global_lut_base; + __le16 rss_global_lut_num; + u8 resv[16]; +}; + +struct sxe2_dev_common_caps { + u8 rdma_support; + u8 ipsec_support; + u8 macsec_support; + u8 rss_support; + u8 fnav_support; + u8 acl_support; + u8 switch_support; + u8 bfd_support; + u8 sdn_support; + u8 sdf_support; + u8 core_fs_support; + u8 switch_tag_support; + u8 mac_ts_support; + u8 resv[19]; +}; + +struct sxe2_fwc_dev_caps { + u8 pf_cnt; + u8 port_cnt; + __le16 vf_cnt; + struct sxe2_dev_common_caps dev_common_caps; + u8 pad[92]; +}; + +struct sxe2_common_caps { + u8 vmdq_support; + u8 ptp_owner; + u8 resv[30]; +}; + +struct sxe2_fwc_func_caps { + struct sxe2_fwc_vf_caps vf_caps; + struct sxe2_fwc_queue_caps tx_caps; + struct sxe2_fwc_queue_caps rx_caps; + struct sxe2_fwc_msix_caps msix_caps; + struct sxe2_fwc_vsi_caps vsi_caps; + struct sxe2_fwc_ppe_caps ppe_caps; + struct sxe2_common_caps common_caps; + u8 pf_idx; + u8 port_idx; + u8 mode; + u8 resv; +}; + +struct sxe2_fwc_sw_cfg_entry { + __le16 type; + __le16 idx; + __le16 sw_id; + __le16 pf_vf_id; + u8 resv[8]; +}; + +struct sxe2_fwc_phy_port_info { + u8 mac_to_phy_port[SXE2_MAC_NUM]; +}; + +struct sxe2_fwc_sw_cfg { + __le16 count; + __le16 remain; + struct sxe2_fwc_sw_cfg_entry caps_entry[]; +}; + +struct sxe2_fwc_tc_rxq_info { + __le16 pow; + __le16 offset; +}; + +struct sxe2_fwc_vsi_q_info { + u8 mapping_mode; + u8 resv[7]; + __le16 cnt; + u8 resv1[6]; + union { + __le16 base_idx; + __le16 q_id[SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT]; + }; + + struct sxe2_fwc_tc_rxq_info tc_q_map[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_fnav_info { + u8 fnav_enable; + u8 auto_evict; + u8 prog_enable; + u8 rsv0[1]; + __le16 gsize; + __le16 bsize; +}; + +struct sxe2_fwc_vsi_props { + u8 rxq_valid; + u8 rsv[31]; + struct sxe2_fwc_vsi_q_info rxq_info; + struct sxe2_fwc_vsi_q_info txq_info; + struct sxe2_fwc_vsi_fnav_info fnav_info; + +}; + +struct sxe2_fwc_vsi_crud_info { + __le16 vsi_id; + __le16 vf_id; + u8 type; + u8 is_clear; + u8 resv[10]; + struct sxe2_fwc_vsi_props props; +}; + +struct sxe2_fwc_vsi_crud_resp { + __le16 vsi_id; + u8 resv[14]; +}; + +struct sxe2_fwc_ena_txq_entry { + __le16 q_id; + u8 resv[2]; + u8 txq_ctxt[SXE2_TXQ_CTXT_LEN]; +}; + +struct sxe2_fwc_ena_txqs { + __le16 cnt; + u8 resv[14]; + struct sxe2_fwc_ena_txq_entry txq[]; +}; + +struct sxe2_fwc_dis_txqs { + __le16 cnt; + u8 resv[2]; + __le16 q_id[]; +}; + +#define SXE2_TXSCHED_PROFIDX_INVALID U16_MAX +#define SXE2_TXSCHED_TEID_INVALID 0x7FFF +#define SXE2_TXSHCED_HW_DEFT_LAYER 2 +#define SXE2_TXSCHED_NODE_CHILD_MAX 8 + +#define SXE2_TXSCHED_MIN_BW 500 +#define SXE2_TXSCHED_MAX_BW 100000000 +#define SXE2_TXSCHED_BW_50G 50000000 +#define SXE2_TXSCHED_BW_25G 25000000 +#define SXE2_TXSCHED_BW_10G 10000000 + +#define SXE2_TXSCHED_DFLT_BW 0xFFFFFFFF +#define SXE2_TXSCHED_CLK_FREQ 500000000 +#define SXE2_TXSCHED_ARB_CREDIT_TOTAL 32768 +#define SXE2_TXSCHED_ARB_CREDIT_UNIT 328 +#define SXE2_TXSCHED_ARB_CREDIT_DFLT SXE2_TXSCHED_ARB_CREDIT_TOTAL + +#define SXE2_NODE_RL_TYPE_CIR BIT(0) +#define SXE2_NODE_RL_TYPE_EIR BIT(1) +#define SXE2_NODE_RL_TYPE_SRL BIT(2) + +#define SXE2_NODE_ARB_MODE_BPS 0 +#define SXE2_NODE_ARB_MODE_PPS 1 + +#define SXE2_NODE_STATUS_ENABLE 0x0 +#define SXE2_NODE_STATUS_SUSPEND 0x1 + +#define SXE2_TXSCHED_DFLT_RL_PROF_ID 0 + +enum sxe2_txsched_node_owner { + SXE2_TXSCHED_NODE_OWNER_LAN = 0, + SXE2_TXSCHED_NODE_OWNER_RDMA, + SXE2_TXSCHED_NODE_OWNER_USER, + SXE2_TXSCHED_NODE_OWNER_UNKOWN, +}; + +enum sxe2_txsched_hw_layer { + SXE2_TXSCHED_HW_LAYER_UNDEFINED = 0, + SXE2_TXSCHED_HW_LAYER_PORT, + SXE2_TXSCHED_HW_LAYER_TC, + SXE2_TXSCHED_HW_LAYER_SW_ENTRY, + SXE2_TXSCHED_HW_LAYER_4, + SXE2_TXSCHED_HW_LAYER_5, + SXE2_TXSCHED_HW_LAYER_6, + SXE2_TXSCHED_HW_LAYER_7, +}; + +struct sxe2_txsched_generic_props { + u8 layer_max; + + __le32 clk_freq; +}; + +struct sxe2_txsched_layer_props { + u8 hw_layer; + __le16 max_rl_cir_prof; + __le16 max_rl_pir_prof; + __le16 max_rl_srl_prof; +}; + +struct sxe2_fwc_txsched_cap_resp { + struct sxe2_txsched_generic_props generic; + + struct sxe2_txsched_layer_props layer[SXE2_TXSCHED_HW_LAYER_7]; +}; + +struct scbge_txsched_node_bw { + __le32 bw; + __le32 prof_id; + __le16 weight; + __le16 rsv; +}; + +struct sxe2_txsched_node_props { + u8 prio; + u8 status; + u8 arb_mode; + u8 rl_type; + enum sxe2_txsched_hw_layer hw_layer; + struct scbge_txsched_node_bw cir; + struct scbge_txsched_node_bw srlPir; + u8 adj_lvl; + u8 rsv[3]; +}; + +struct sxe2_txsched_node_info { + __le16 parent_teid; + __le16 node_teid; + __le32 sibling_idx; + struct sxe2_txsched_node_props data; +}; + +struct sxe2_fwc_txsched_dflt_topo_resp { + struct sxe2_txsched_node_info node_info[SXE2_TXSHCED_HW_DEFT_LAYER]; +}; + +struct sxe2_txsched_topo_upd_hdr { + __le16 parent_teid; + __le16 node_num; + __le16 start_child_idx; + __le16 rsv; +}; + +struct sxe2_fwc_txsched_del_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_move_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_query_node_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_txsched_query_node_resp { + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_txsched_pri_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + u8 prio; +}; + +struct sxe2_fwc_txsched_weight_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + __le16 weight; + enum sxe2_weight_type type; +}; + +struct sxe2_fwc_txsched_add_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + struct sxe2_txsched_node_info node[]; +}; + +struct sxe2_fwc_txsched_add_nodes_resp { + __le32 add_node_num; + __le16 node_teid[SXE2_TXSCHED_NODE_CHILD_MAX]; + __le16 sibling_idx[SXE2_TXSCHED_NODE_CHILD_MAX]; +}; + +struct sxe2_fwc_txq_stats_map_pool_get_resp { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_pool_get_resp { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_pool_set_req { + u8 hw_index; + u32 cfg_info; +}; + +struct sxe2_fwc_rxq_stats_map_pool_set_req { + u8 hw_pool_idx; + u32 cfg_info; +}; + +struct sxe2_fwc_txq_stats_map_get_info_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_info_clear_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_info_clear_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_get_info_resp { + u32 txq_lan_pkt_cnt; + u32 txq_lan_byte_cnt; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_resp { + u64 rxq_lan_in_pkt_cnt; + u64 rxq_lan_in_byte_cnt; + + u64 rxq_fd_in_pkt_cnt; + + u64 rxq_mng_in_pkt_cnt; + u64 rxq_mng_in_byte_cnt; + u64 rxq_mng_out_pkt_cnt; +}; + +struct sxe2_fwc_rxlan_rxq_stats_map_get_info_resp { + u64 rxq_lan_out_pkt_cnt; + u64 rxq_lan_out_byte_cnt; +}; + +struct sxe2_txq_ctxt { + + __le16 q_idx_in_nic; + u8 rsv[2]; + + __le64 base_addr; + + __le16 cgd_idx; + __le16 vmvf_idx; + u8 port_idx; + u8 pf_idx; + u8 vmvf_type; + u8 tsyn_enable; + u8 alt_vlan; + u8 wb_mode; + u8 itr_notify_mode; + u8 legacy_enable; + u8 adv_sso; + u8 rsv1[3]; + + __le16 src_vsi; + __le16 cpuid; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; + u8 rsv2; + + __le16 q_idx_in_func; + u8 rd_desc_ro; + u8 wb_desc_ro; + __le32 qlen; + u8 ptp_en; + u8 rsv3[3]; + + u8 quanta_prof_idx; + + u8 is_tm; + u8 rsv4[2]; +}; + +struct sxe2_txsched_add_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_cfg_txq_req { + struct sxe2_txq_ctxt ctxt; + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_st_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; +}; + +struct sxe2_fwc_st_txq_resp { + u8 state; +}; + +struct sxe2_fwc_add_qset_req { + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_add_qset_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_cfg_txq_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_txsched_del_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + __le16 parent_teid; + __le16 sibling_idx; + __le16 node_teid; + __le16 rsv; +}; + +struct sxe2_txsched_rl_profile_pre_query_req { + u8 hw_layer; + u8 prof_type; + __le16 rsv; + u32 bw; +}; + +struct sxe2_txsched_rl_profile_pre_query_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_req { + u8 hw_layer; + u8 prof_type; + __le16 orig_prof_id; + + u32 bw; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_resp { + u8 hw_layer; + u8 prof_type; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_req { + u8 hw_layer; + u8 rsv; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_srl_req { + u8 hw_layer; + u8 attach; + __le16 prof_id; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_tc_node { + __le16 teid; + __le16 parent_teid; + __le16 silbing_idx; + __le16 rsv; +}; + +struct sxe2_txsched_ets_query_rep { + u8 tc_cnt; + u8 rsv[3]; +}; + +struct sxe2_txsched_ets_query_resp { + u8 tc_cnt; + struct sxe2_txsched_tc_node tc_node[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_del_qset_req { + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_disable_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_txsched_suspend_node_req { + u8 port; + u8 tc; + __le16 node_teid; + __le16 parent_teid; + u8 child_idx; +}; + +struct sxe2_fwc_txsched_resume_node_req { + u8 port; + u8 tc; + struct sxe2_txsched_node_info node; +}; + +enum sxe2QosMode { + SXE2_QOS_MODE_VLAN = 0, + SXE2_QOS_MODE_DSCP, +}; + +enum sxe2_block_id { + SXE2_HW_BLOCK_ID_SWITCH = 0x1, + SXE2_HW_BLOCK_ID_ACL, + SXE2_HW_BLOCK_ID_RSS, + SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_BFD = SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_PE, + SXE2_HW_BLOCK_ID_MAX +}; + +enum sxe2_class_id { + SXE2_XLT0_CLASS_ID = 0x1, + SXE2_XLT2_CLASS_ID, + SXE2_EXTRACTOR_CLASS_ID, + SXE2_MAP_CLASS_ID, + SXE2_TCAM_CLASS_ID, + SXE2_RECIPE_CLASS_ID, +}; + +#define SXE2_CFG_ID(block_id, class_id) ((block_id) << 16 | (class_id)) +#define SXE2_CFG_CLASS_ID_MASK (0xFFFF) + +#define SXE2_CFG_GROUP_SIZE SXE2_DRV_CMD_MAX_MSG_SIZE + +enum { + SXE2_SWITCH_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_SWITCH, SXE2_XLT0_CLASS_ID), + SXE2_SWITCH_XLT2_CLASS_ID, + SXE2_SWITCH_EXTRACTOR_CLASS_ID, + SXE2_SWITCH_MAP_CLASS_ID, + SXE2_SWITCH_TCAM_CLASS_ID, + + SXE2_ACL_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_ACL, SXE2_XLT0_CLASS_ID), +}; + +enum sxe2_udp_tunnel_protocol { + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN = 0, + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN_GPE, + SXE2_UDP_TUNNEL_PROTOCOL_GENEVE, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_C = 4, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_U, + SXE2_UDP_TUNNEL_PROTOCOL_PFCP, + SXE2_UDP_TUNNEL_PROTOCOL_ECPRI, + SXE2_UDP_TUNNEL_PROTOCOL_MPLS, + SXE2_UDP_TUNNEL_PROTOCOL_NVGRE = 10, + SXE2_UDP_TUNNEL_PROTOCOL_L2TP, + SXE2_UDP_TUNNEL_PROTOCOL_TEREDO, + SXE2_UDP_TUNNEL_MAX, +}; + +struct sxe2_cfg_group_hdr { + __le16 class_cnt; + __le16 size; +}; + +struct sxe2_cfg_class { + __le32 class_id; + __le16 offset; + __le16 size; +}; + +struct sxe2_pipeline_group { + struct sxe2_cfg_group_hdr hdr; + struct sxe2_cfg_class class[]; +}; + +struct sxe2_es_fv { + u8 prot_id; + u8 rsv; + __le16 off; +}; + +struct sxe2_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; + __le64 rx_lan_engine_packets; +}; + +struct sxe2_pf_hw_stats { + + __le64 tx_frame_good; + __le64 rx_frame_good; + __le64 rx_crc_errors; + __le64 tx_bytes_good; + __le64 rx_bytes_good; + __le64 tx_multicast_good; + __le64 tx_broadcast_good; + __le64 rx_multicast_good; + __le64 rx_broadcast_good; + __le64 rx_len_errors; + __le64 rx_out_of_range_errors; + __le64 rx_symbol_err; + __le64 rx_pause_frame; + __le64 tx_pause_frame; + + __le64 rx_discards_phy; + __le64 tx_dropped_link_down; + __le64 tx_bytes_good_bad; + __le64 tx_frame_good_bad; + __le64 rx_size_64; + __le64 rx_size_65_127; + __le64 rx_size_128_255; + __le64 rx_size_256_511; + __le64 rx_size_512_1023; + __le64 rx_size_1024_1522; + __le64 rx_size_1523_max; + __le64 rx_illegal_bytes; + __le64 tx_unicast; + __le64 tx_broadcast; + __le64 tx_multicast; + __le64 tx_vlan_packet_good; + __le64 tx_size_64; + __le64 tx_size_65_127; + __le64 tx_size_128_255; + __le64 tx_size_256_511; + __le64 tx_size_512_1023; + __le64 tx_size_1024_1522; + __le64 tx_size_1523_max; + __le64 tx_underflow_error; + __le64 rx_byte_good_bad; + __le64 rx_frame_good_bad; + __le64 rx_unicast_good; + __le64 rx_vlan_packets; + __le64 prio_xoff_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xoff_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_2_xoff[SXE2_MAX_USER_PRIORITY]; + __le64 rx_pause; + __le64 tx_pause; + __le64 rx_undersize_good; + __le64 rx_runt_error; + __le64 rx_oversize_good; + __le64 rx_jabbers; + __le64 rx_oversize_pkts_phy; + + __le64 rx_out_of_buffer; + __le64 rx_qblock_drop; + __le64 rx_discards_ips_phy; + + __le64 rx_pcs_symbol_err_phy; + __le64 rx_corrected_bits_phy; + + __le64 rx_err_lane_0_phy; + __le64 rx_err_lane_1_phy; + __le64 rx_err_lane_2_phy; + __le64 rx_err_lane_3_phy; + __le64 rx_prio_buf_discard[8]; + __le64 fnav_match; + __le64 spoof_mac_packets; + __le64 spoof_vlan_packets; +}; + +#define SXE2_FNAV_INVALID_STATS_IDX (0xFFFF) + +struct sxe2_fwc_pf_stats_req { + __le16 fnav_stats_idx; +}; + +struct sxe2_fwc_pf_stats_resp { + struct sxe2_pf_hw_stats stats; +}; + +struct sxe2_fwc_vsi_stats { + struct sxe2_vsi_hw_stats stats; + __le16 vsi_id; +}; + +struct sxe2_fwc_vsi_stats_req { + __le16 vsi_cnt; + __le16 vsi_ids[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_stats_resp { + __le16 vsi_cnt; + struct sxe2_fwc_vsi_stats vsi_stats[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_pxe_req { + u8 ena; +}; + +#define SXE2_EVENT_SUBSCRIBE_MAX_COUNT 32 + +struct sxe2_fwc_event { + u8 count; + u8 rsv[3]; + __le16 code[SXE2_EVENT_SUBSCRIBE_MAX_COUNT]; +}; + +struct sxe2_fwc_fw_log_ack { + __le32 result; +}; + +enum sxe2_default_recipe_id { + SXE2_DEFAULT_RECIPE_MAC = 0, + SXE2_DEFAULT_RECIPE_VLAN, + SXE2_DEFAULT_RECIPE_TX_ETYPE, + SXE2_DEFAULT_RECIPE_RX_ETYPE, + SXE2_DEFAULT_RECIPE_ALLMULTI, + SXE2_DEFAULT_RECIPE_PROMISC, + SXE2_DEFAULT_RECIPE_SRCVSI, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT, + SXE2_DEFAULT_RECIPE_SRCVSI_EXT, + SXE2_DEFAULT_RECIPE_MAX +}; + +union sxe2_switch_full_key_dw0 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv0 : 16; + u32 rid : 6; + u32 rsvd0 : 9; + u32 is_root : 1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 is_root : 1; + u32 rsvd0 : 9; + u32 rid : 6; + u32 fv0 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw1 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv2 : 16; + u32 fv1 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv1 : 16; + u32 fv2 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw2 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv4 : 16; + u32 fv3 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv3: 16; + u32 fv4: 16; +#endif + } field; +}; + +struct sxe2_fwc_switch_rule { + __le16 flag; + __le16 recipe_id; + __le32 act; + __le16 rule_id; + u8 recv[2]; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + u8 add_fkot; + u8 resv2[3]; +}; + +struct sxe2_fwc_switch_vsi_list { + __le16 flag; + __le16 vsi_list_id; + __le16 vsi_cnt; + __le16 vsi[]; +}; + +union sxe2_switch_large_action { + u32 val; + struct { + u32 rsv0 : 8; + u32 valid : 1; + u32 rsv1 : 2; + u32 list : 1; + u32 vsi_list : 10; + u32 fwd_vsi000 : 3; + u32 rsv2 : 8; + } reg; +}; + +struct sxe2_fwc_switch_large_action { + union sxe2_switch_large_action action[SXE2_LARGE_ACTION_COUNT_IN_GROUP]; + __le32 idx; +}; + +struct sxe2_fwc_switch_recipe { + u8 rid :6; + u8 rcp_rsv0 :1; + u8 is_root :1; + u8 lookup_index0 :7; + u8 lookup_index0_valid :1; + u8 lookup_index1 :7; + u8 lookup_index1_valid :1; + u8 lookup_index2 :7; + u8 lookup_index2_valid :1; + u8 lookup_index3 :7; + u8 lookup_index3_valid :1; + u8 lookup_index4 :7; + u8 lookup_index4_valid :1; + u8 join_priority; + u8 priority :3; + u8 need_pass_l2 :1; + u8 allow_pass_l2 :1; + u8 inverse_action :1; + u8 prune_idx :2; + __le32 default_action :19; + __le32 rcp_rsv1 :4; + __le32 default_action_valid:1; + __le32 rcp_rsv2 :8; + __le32 fv4_bitmask :16; + __le32 fv3_bitmask :16; + __le32 fv2_bitmask :16; + __le32 fv1_bitmask :16; + __le32 fv0_bitmask :16; + __le32 rcp_rsv3 :16; + __le16 ref_cnt; +}; + +struct sxe2_fwc_switch_profile_recipe_map { + __le16 profile_id; + __le32 map[2]; +}; + +struct sxe2_fwc_switch_share_id { + __le32 usage; + __le32 share_id[SXE2_MAX_NUM_RECIPES]; + __le32 bitmap[SXE2_MAX_NUM_RECIPES]; +}; + +struct sxe2_fwc_switch_rule_resp { + __le16 index; + u8 resv1[2]; + __le32 act; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + __le16 ref_cnt; + u8 resv2[2]; +}; + +struct sxe2_fwc_switch_vsi_list_resp { + __le16 index; + u8 resv1[2]; + __le32 vsi[SXE2_VSI_LIST_DAT_LEN]; + u8 resv2[4]; +}; + +struct sxe2_fwc_switch_mac_info { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fwc_switch_mac_info_resp { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fw_mtu_info { + __le32 mtu; + u8 is_set_hw; + u8 resv0; + __le16 resv1; +}; + +struct sxe2_fwc_switch_complex_rule { + __le16 flag; + __le32 act; + u8 priority; + u8 resv; + u8 add_fkot; + + __le16 word_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 profile_cnt; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_switch_complex_rule_resp { + __le32 act; + + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_og_trace_rcd { + u8 done; + u8 status; + u8 profile_id; + u8 resv; + __le16 fv[SXE2_SWITCH_FV_CNT]; +}; + +struct sxe2_recp_trace_rcd { + __le16 recipe_id; + u8 ht1_hit; + u8 ht2_hit; + u8 fkot_hit; + u8 kt_hit; + __le16 index; +}; + +struct sxe2_swe_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + struct sxe2_recp_trace_rcd recp[SXE2_PACKET_MAX_RECIPES]; +}; + +struct sxe2_rg_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + __le32 ppe_info[SXE2_PACKET_INFO_DWORD_CNT]; +}; + +struct sxe2_fwc_switch_trace_req { + u8 is_rx; + u8 resv[3]; +}; + +struct sxe2_fwc_switch_trace_resp { + struct sxe2_og_trace_rcd og; + struct sxe2_swe_trace_rcd swe; + struct sxe2_rg_trace_rcd rg; +}; + +struct sxe2_fwc_recipe_get_req { + __le16 recipe_id; + u8 resv[2]; +}; + +struct sxe2_fwc_recipe_get_resp { + u8 is_root; + u8 priority; + u8 is_inverse; + u8 resv; + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_req { + u8 is_root; + u8 priority; + __le16 profile_cnt; + __le16 recipe_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_resp { + __le16 recipe_cnt; + __le16 recipe_root; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_recipe_del_req { + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +enum sxe2_switch_dfx_stats_index { + SXE2_SW_DFX_PROFILE_ID_BYPASS = 0, + SXE2_SW_DFX_PROFILE_TCAM_HIT, + SXE2_SW_DFX_PROFILE_TCAM_MISS, + SXE2_SW_DFX_RX_FB_INPUT, + SXE2_SW_DFX_TX_PA_INPUT, + SXE2_SW_DFX_OG_PROCESS_RX, + SXE2_SW_DFX_OG_PROCESS_TX, + SXE2_SW_DFX_OUTPUT_TO_SWE, + SXE2_SW_DFX_OUTPUT_TO_RG, + SXE2_SW_DFX_MEMORY_HT1_IN, + SXE2_SW_DFX_MEMORY_HT1_OUT, + SXE2_SW_DFX_MEMORY_HT2_IN, + SXE2_SW_DFX_MEMORY_HT2_OUT, + SXE2_SW_DFX_MEMORY_KT_IN, + SXE2_SW_DFX_MEMORY_KT_OUT, + SXE2_SW_DFX_SWE_OG_IN, + SXE2_SW_DFX_SWE_TX_IN, + SXE2_SW_DFX_SWE_RX_IN, + SXE2_SW_DFX_SWE_OUTPUT_ACTION, + SXE2_SW_DFX_PIPE_HASH_MISS, + SXE2_SW_DFX_PIPE_HASH_HIT, + SXE2_SW_DFX_PIPE_KT_HIT, + SXE2_SW_DFX_PIPE_HI1_HIT, + SXE2_SW_DFX_PIPE_HI2_HIT, + SXE2_SW_DFX_PIPE_FKOT_HIT, + SXE2_SW_DFX_PIPE_HW_SEARCH_ERR, + + SXE2_SW_DFX_MAX, +}; + +struct sxe2_fwc_switch_dfx_stats { + __le32 stats[SXE2_SW_DFX_MAX]; +}; + +enum sxe2_ipsec_stats_index { + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC0, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC0, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC1, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC1, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC2, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC2, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC3, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC3, + + SXE2_IPSEC_STATS_MAX, +}; + +struct sxe2_ipsec_stats { + __le64 stats[SXE2_IPSEC_STATS_MAX]; +}; + +struct sxe2_fwc_vsi_vlan_filter { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_src_prune { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_loopback { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_spoofchk { + __le16 vsi_hw_id; + u8 mac_enable; + u8 vlan_enable; +}; + +struct sxe2_fwc_switch_dfx_irq { + u8 enable; + u8 resv[3]; +}; + +struct sxe2_rss_vsi_hctrl { + __le16 vsi_hw_id; + u8 hash_type; + u8 resv; +}; + +struct sxe2_rss_lut_cfg { + __le16 vsi_hw_id; + u8 lut_type; + u8 global_lut_id; + __le16 lut_size; + u8 resv[2]; + u8 lut[]; +}; + +struct sxe2_rss_core_lut_info { + u8 lut[SXE2_RSS_CORE_LUT_SIZE]; +}; + +struct sxe2_rss_hkey_cfg { + __le16 vsi_hw_id; + u8 resv[2]; + u8 key[]; +}; + +struct sxe2_rss_symm_fv { + u8 fv_idx : 5; + u8 rsv : 2; + u8 valid : 1; +}; + +struct sxe2_rss_symm_fv_cfg { + __le16 prof_id; + u8 rsv[2]; + struct sxe2_rss_symm_fv fv[SXE2_RSS_FV_CNT]; +}; + +struct sxe2_rss_trace_recorder { + u8 trace_status0; + u8 rsv0[3]; + __le32 profile_id0; + __le32 fv[SXE2_RSS_FV_TRACE_CNT]; + u8 trace_status1; + u8 rsv1[3]; + __le32 hash1; + u8 trace_status2; + u8 rsv2[3]; + __le32 hash2; + u8 profile_id2; + u8 bad_profile; + __le16 q_index; + u8 thread_id; + u8 rsv3[1]; + __le16 vsi; +}; + +struct sxe2_fwc_xlt2_entry { + __le16 vsi_hw_idx; + __le16 vsig; +}; + +struct sxe2_fwc_tcam_entry { + __le16 addr; + u8 key[SXE2_TCAM_KEY_LEN]; + u8 prof_id; + u8 rsv; +}; + +struct sxe2_fwc_es_entry { + u8 prof_id; + u8 cnt; + struct sxe2_es_fv fv[SXE2_FV_CNT_MAX]; +}; + +struct sxe2_fwc_prof_section { + u8 type; + u8 rsv[1]; + __le16 offset; + __le16 size; +}; + +struct sxe2_fwc_prof_buf { + __le16 entry_cnt; + __le16 data_end; + struct sxe2_fwc_prof_section sect[]; +}; + +struct sxe2_fwc_prof_pkg { + u8 blk; + u8 buf[]; +}; + +struct sxe2_fwc_tcam_idx { + u8 blk; + u8 rsv; + __le16 tcam_idx; +}; + +enum sxe2_fwc_tcam_action { + SXE2_FWC_TCAM_ACTION_ADD, + SXE2_FWC_TCAM_ACTION_DEL, +}; + +struct sxe2_fwc_tcam_info { + u8 action; + __le16 tcam_idx; +}; + +struct sxe2_fwc_tcam_idx_batch { + u8 blk; + u16 tcam_cnt; + struct sxe2_fwc_tcam_info tcam_info[]; +}; + +struct sxe2_fwc_prof_id { + u8 blk; + u8 rsv; + __le16 prof_id; +}; + +struct sxe2_fwc_mask_idx { + u8 blk; + u8 rsv; + __le16 mask_idx; +}; + +struct sxe2_fwc_mask_val { + u8 blk; + u8 rsv; + __le16 mask_idx; + __le16 fv_idx; + __le16 mask; +}; + +struct sxe2_fwc_mask_sel { + u8 blk; + u8 rsv; + __le16 prof_id; + __le32 mask_sel; +}; + +struct sxe2_fwc_fnav_kt_entry { + u8 is_add; + u8 prof_id; + u8 fdid_prio; + u8 toq_prio; + u8 drop; + u8 stat_ena; + u8 to_queue; + u8 inputset[SXE2_FNAV_INPUT_CNT * 2]; + u8 fd_space; + __le16 ori_vsi; + __le16 vsi; + __le16 flow_id; + __le16 qindex; + __le16 stat_cnt; + u8 rsv1[2]; + __le32 fdid; +}; + +struct sxe2_fwc_fnav_kt_resp { + __le32 gcnt_pf; + __le32 bcnt_global; +}; + +struct sxe2_fnav_hit_info { + + __le32 hit_flg : 4; + __le32 ck1 : 13; + __le32 ht_index1 : 11; + __le32 ck2_3_0 : 4; + + __le32 ck2_12_4 : 9; + __le32 ht_index2 : 11; + __le32 ht1_avl : 8; + __le32 ht2_avl_3_0 : 4; + + __le32 ht2_avl_7_4 : 4; + __le32 kt_index : 15; + __le32 entry_vld : 1; + __le32 qindex : 11; + __le32 stat_0 : 1; + + __le32 stat_13_1 : 13; + __le32 stat_ena : 2; + __le32 evict_ena : 1; + __le32 toqueue : 3; + __le32 toqueue_prio : 3; + __le32 ad_drop : 1; + __le32 fdid_8_0 : 9; + + __le32 fdid_31_9 : 23; + __le32 fdid_prio : 3; + __le32 flow_id_5_0 : 6; + + __le32 flow_id_15_6 : 10; + __le32 ad_fd_vsi : 10; + __le32 gl_space : 1; + __le32 pf_space : 1; + __le32 vsi_space : 1; + __le32 ad2 : 4; + __le32 inset_511_507 : 5; + + __le32 inset_506_475; + + __le32 inset_474_443; + + __le32 inset_442_411; + + __le32 inset_410_379; + + __le32 inset_378_347; + + __le32 inset_346_315; + + __le32 inset_314_283; + + __le32 inset_282_251; + + __le32 inset_250_219; + + __le32 inset_218_187; + + __le32 inset_186_155; + + __le32 inset_154_123; + + __le32 inset_122_91; + + __le32 inset_90_59; + + __le32 inset_58_27; + + __le32 inset_26_0 : 27; + __le32 profile_id_4_0 : 5; + + __le32 profile_id_6_5 : 2; + __le32 rsv0 : 1; + __le32 vsi : 10; + __le32 rsv1 : 6; + __le32 fail_sts : 3; + __le32 cmd : 4; + __le32 thread_id_5_0 : 6; + + __le32 thread_id_6 : 1; + __le32 pf : 3; + __le32 vf_vm : 10; + __le32 function_type : 2; + __le32 bypass_ft : 1; + __le32 pcmd : 2; + __le32 comp_report : 2; + __le32 fd_vsi : 10; + __le32 comp_queue : 1; + + __le32 not_enabled : 1; + __le32 bad_profile_id : 1; + __le32 drop : 1; + __le32 round_drop : 1; + __le32 round_cnt : 4; + __le32 rsv2 : 24; +}; + +struct sxe2_fnav_addition_info { + + __le32 fd_profile_id : 7; + __le32 hit_flg : 4; + __le32 rlt_sel : 2; + __le32 dst_vsi : 10; + __le32 rlt_queue_8_0 : 9; + + __le32 rlt_queue_10_9 : 2; + __le32 rlt_toqueue : 3; + __le32 rlt_toqueue_pri : 3; + __le32 drop : 1; + __le32 cmd : 4; + __le32 bypass_absq : 1; + __le32 fd_search_ena : 1; + __le32 pkt_id : 7; + __le32 deflt_qindx_pri : 3; + __le32 sa_toqueue_pri : 3; + __le32 rsv : 3; + __le32 search_rss_fs_hit : 1; + + __le32 bypass_rss : 1; + __le32 rsv1 : 31; +}; + +struct sxe2_fnav_trace_recorder { + u8 trace_status0; + u8 trace_status1; + struct sxe2_fnav_hit_info hit_info; + struct sxe2_fnav_addition_info addition_info; + u8 trace_status2; +}; + +struct sxe2_fnav_glspace_cnt { + __le32 bcnt; + __le32 gcnt; +}; + +#define SXE2_FNAV_MAX_COUNTER_BANK_NUM (2) + +enum sxe2_fnav_counter_bank_type { + SXE2_FNAV_COUNTER_BANK_0, + SXE2_FNAV_COUNTER_BANK_1, + SXE2_FNAV_COUNTER_BANK_ALL, + SXE2_FNAV_COUNTER_BANK_MAX, +}; + +struct sxe2_fwc_fnav_stats_req { + u8 is_clear; + u8 bank_type; + __le16 counter_idx; +}; + +struct sxe2_fwc_fnav_stats_resp { + __le64 stats[SXE2_FNAV_MAX_COUNTER_BANK_NUM]; +}; + +struct sxe2_fwc_fnav_match_req { + __le16 vsi_id; + __le16 stat_idx; +}; + +struct sxe2_fwc_fnav_match_req_batch { + u8 is_clear; + u8 bank_type; + __le16 stat_cnt; + struct sxe2_fwc_fnav_match_req match_req[]; +}; + +struct sxe2_fwc_fnav_match_rsp_batch { + __le16 stat_cnt; + __le64 fnav_match[]; +}; + +struct sxe2_fwc_fnav_dlft_compq_req { + __le16 vsi_idx_in_dev; + __le16 rxq_idx_in_func; +}; + +struct sxe2_fnav_vsispace_cnt { + __le32 bcnt; + __le32 gcnt; + __le16 vsi_id; +}; + +struct sxe2_fnav_space_cnt { + __le32 bcnt_global; + __le32 gcnt_global; + __le32 bcnt_pf; + __le32 gcnt_pf; + __le32 bcnt_vsi; + __le32 gcnt_vsi; + __le16 vsi_id; +}; + +struct sxe2_vf_fnav_clear_ctxt { + __le16 vf_indev; + u8 io_close; +}; + +enum sxe2_rxft_dbg_ppe_info_type { + SXE2_RXFT_PPE_INFO_TX_IN, + SXE2_RXFT_PPE_INFO_TX_EX, + SXE2_RXFT_PPE_INFO_RX_IN, + SXE2_RXFT_PPE_INFO_RX_EX, + SXE2_RXFT_PPE_INFO_LP_IN, + SXE2_RXFT_PPE_INFO_LP_EX, + SXE2_RXFT_PPE_INFO_TYPE_MAX, +}; + +struct sxe2_rxft_ppe_info { + struct { + __le32 data[SXE2_RXFT_PPE_INFO_REG_CNT]; + } info[SXE2_RXFT_PPE_INFO_TYPE_MAX]; +}; + +struct sxe2_rxq_ctxt { + __le64 base_addr; + __le16 depth; + + __le16 dbuff_len; + __le16 hbuff_len; + u8 hsplit_type; + u8 desc_type; + u8 crc_strip; + u8 l2tag1_show; + u8 hsplit_0; + u8 hsplit_1; + u8 inner_vlan_strip; + + u8 lro_enable; + u8 cpuid; + __le16 max_frame_size; + __le16 lro_desc_max; + u8 relax_data; + u8 relax_wb_desc; + u8 relax_rd_desc; + + u8 tphrdesc_enable; + u8 tphwdesc_enable; + u8 tphdata_enable; + u8 tphhead_enable; + + u8 low_desc_waterline; + __le16 vfid; + u8 pfid; + u8 vfen; + __le16 vsi_id; + + u8 pref_enable; + __le16 head; +}; + +struct sxe2_fwc_cfg_rxq_req { + u8 pf_idx; + __le16 idx_in_dev; + struct sxe2_rxq_ctxt rxq_ctxt; +}; + +struct sxe2_fwc_local_mib_set { + __le16 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_local_mib_get { + u8 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_fw_agent { + u8 enable; + u8 resv[3]; +}; + +#ifndef FW_LLDP_STATE +#define FW_LLDP_STATE +enum sxe2LldpStatus { + sxe2_lldp_enabled_rx_tx = 0, + sxe2_lldp_enabled_tx_only, + sxe2_lldp_enabled_rx_only, + sxe2_lldp_disabled, +}; +#endif + +struct sxe2_fwc_lldp_fw_agent { + u8 status; + u8 resv[3]; +}; + +struct sxe2_fwc_notify_lldp_fw_agent { + u8 stats; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_stats { + u8 rx_state; + u8 tx_state; + u8 lldp_enable; + u8 admin_status; + __le32 tx_failed; + __le32 tx_frames_out_total; + __le32 tx_lldpdu_length_errors; + __le32 rx_ageouts_total; + __le32 rx_frames_discarded_total; + __le32 rx_frames_in_errors_total; + __le32 rx_frames_in_total; + __le32 rx_tlvs_discarded_total; + __le32 rx_tlvs_unrecognized_total; +}; + +struct sxe2_fwc_lldp_mibs_info { + u8 count; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_mibs_dump_req { + u8 index; + u8 resv[3]; +}; + +struct sxe2_lldp_mibs_tl { + __le16 offset; + __le16 length; +}; + +struct sxe2_lldp_mibs_ets { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioTable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tsatable[SXE2_MAX_TRAFFIC_CLASS]; +}; + +struct sxe2_lldp_mibs_pfc { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcena; +}; + +struct sxe2_lldp_mibs_app { + __le16 protId; + u8 priority; + u8 selector; +}; + +struct sxe2_fwc_lldp_mibs_dump_resp { + u8 index; + u8 resv1[3]; + u8 buffer[SXE2_LLDP_FRAME_MAX_SIZE]; + __le16 size; + u8 num_apps; + u8 resv2[3]; + struct sxe2_lldp_mibs_ets ets_cfg; + struct sxe2_lldp_mibs_ets ets_rec; + struct sxe2_lldp_mibs_pfc pfc_cfg; + struct sxe2_lldp_mibs_app app_cfg[SXE2_DCBX_MAX_APPS]; +}; + +enum sxe2FlowCtrlMode { + SXE2_FC_MODE_DISABLE, + SXE2_FC_MODE_LFC, + SXE2_FC_MODE_PFC, + SXE2_FC_MDDE_COUNT, +}; + +struct sxe2_fwc_lfc_info { + u8 rx_en; + u8 tx_en; + u8 tc_num; + u8 fc_mode; + __le32 port_size; + __le32 high_water[SXE2_MAX_TRAFFIC_CLASS]; + __le32 low_water[SXE2_MAX_TRAFFIC_CLASS]; + __le16 pause_time[SXE2_MAX_TRAFFIC_CLASS]; + u8 priority; + u8 resv1; +}; + +struct sxe2_mdd_vf_req { + __le16 vf_idx; + u8 q_mapping_mode; + u8 reserve; +}; + +struct sxe2_fwc_mdd_req { + __le16 vf_cnt; + u8 mdd_check; + u8 reserve; + struct sxe2_mdd_vf_req vfs[]; +}; + +struct sxe2_mdd_vf_resp { + __le16 vf_idx; + u8 mdd; + u8 reserve; +}; + +struct sxe2_fwc_mdd_resp { + __le32 vf_mdd_tx_event; + __le32 pf_mdd_tx_event; + + u8 vf_mdd_rx_event; + u8 pf_mdd_rx_event; + + __le16 mdd_vf_cnt; + struct sxe2_mdd_vf_resp mdd_vfs[]; +}; + +struct sxe2_fwc_ptp_filter_addr { + u8 filter_type; + __le32 ipv4; + __le32 ipv6[4]; + __le32 mac[2]; +}; + +struct sxe2_fwc_ptp_correction { + __le32 ingress_corr_nanosec; + __le32 ingress_corr_subnanosec; + __le32 egress_corr_nanosec; + __le32 egress_corr_subnanosec; + __le32 ingress_sync_corr; + __le32 egress_sync_corr; +}; + +struct sxe2_fwc_ptp_init_req { + u8 sample_type; + u8 threshold; + struct sxe2_fwc_ptp_filter_addr filter_addr; + struct sxe2_fwc_ptp_correction corr; +}; + +enum sxe2_rx_timestamp_mode { + SXE2_RX_TIMESTAMP_MODE_PTP = 0, + SXE2_RX_TIMESTAMP_MODE_ALL_1024, + SXE2_RX_TIMESTAMP_MODE_ALL_2048, + SXE2_RX_TIMESTAMP_MODE_ALL_4096, + SXE2_RX_TIMESTAMP_MODE_ALL_8192, + SXE2_RX_TIMESTAMP_MODE_ALL_16384, + SXE2_RX_TIMESTAMP_MODE_MAX, +}; + +struct sxe2_fwc_ptp_mode_set_req { + u8 mode; +}; + +#define SXE2_IPSEC_KEY_LEN (32) +#define SXE2_IPV6_ADDR_LEN (4) +struct sxe2_fwc_ipsec_txsa_add_req { + __le32 mode; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_set_req { + __le32 mode; + __le32 sa_index; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_add_resp { + __le16 index; +}; + +struct sxe2_fwc_ipsec_rxsa_add_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 is_over_sdn; + u8 sdn_group_id; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_set_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 sa_index; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_add_resp { + u8 ip_id; + u8 udp_group_id; + __le16 sa_idx; +}; + +struct sxe2_fwc_ipsec_txsa_del_req { + __le16 sa_idx; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_del_req { + u8 ip_id; + u8 group_id; + __le16 sa_idx; + __le32 spi; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_switch_op_req { + u8 dir; + u8 op; + __le16 mac_id; +}; + +struct sxe2_fwc_ipsec_drv_clr_req { + u8 func_type; + u8 func_id; + u8 drv_id; + u8 rsv; +}; + +#define SXE2_IPSEC_WDRR_COUNT (4) +struct sxe2_fwc_ipsec_wdrr_req { + __le16 tx_wdrr[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_iqm[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_oqm[SXE2_IPSEC_WDRR_COUNT]; +}; + +struct sxe2_fwc_ipsec_capa_resq { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; + __le16 ip_id_cnt; + __le16 udp_group_cnt; +}; + +#define MACSEC_PN_LEN_MAX (2) +#define MACSEC_SALT_COUNT (3) +#define MACSEC_KEY_LEN (4) + +enum sxe2_macsec_validate_mode { + SXE2_MACSEC_VALIDATE_DISABLED = 0, + SXE2_MACSEC_VALIDATE_CHECK = 1, + SXE2_MACSEC_VALIDATE_STRICT = 2, + SXE2_MACSEC_VALIDATE_END, +}; + +struct sxe2_fw_macsec_sa { + u8 active; + u8 an_value; + __le32 pn[MACSEC_PN_LEN_MAX]; + __le32 ssci; + __le32 salt[MACSEC_SALT_COUNT]; + __le32 key[MACSEC_KEY_LEN]; +}; + +struct sxe2_fw_macsec_txsc { + u8 active; + u8 xpn; + u8 aisci; + u8 es; + u8 encrypt; + __le64 sci; +}; + +struct sxe2_fw_macsec_rxsc { + u8 active; + u8 xpn; + u8 protect; + u8 validate_mode; + __le64 sci; +}; + +struct sxe2_fwc_ddp_state { + u8 act_pfid; + u8 pad; + __le16 ver; + __le32 state; +}; + +struct flm_link_cap { + u32 speed; + u32 fecMode; + + u8 an; + u8 lscEn; +}; + +enum sxe2_fec_mode { + SXE2_ETHTOOL_FEC_NONE = 0, + SXE2_ETHTOOL_FEC_OFF = 1, + SXE2_ETHTOOL_FEC_BASER = 2, + SXE2_ETHTOOL_FEC_RS = 3, + SXE2_ETHTOOL_FEC_AUTO = 15, + SXE2_ETHTOOL_FEC_MAX, +}; + +enum flm_link_speed_fec { + FEC_MOD_UNNKOW = 0x0, + FEC_MOD_10G = 0x7, + FEC_MOD_50G = 0xC, + FEC_MOD_25G = 0xF, + FEC_MOD_100G = 0XD, +}; + +enum flm_link_speed_info { + FEC_MOD_SPEED_UNNKOW = 0x0, + FEC_MOD_SPEED_10G = 0x2, + FEC_MOD_SPEED_25G = 0x4, + FEC_MOD_SPEED_50G = 0x8, + FEC_MOD_SPEED_100G = 0X10, +}; + +enum sxe2_speed_mode { + SXE2_ETHTOOL_SPEED_UNKNOWN = 0, + SXE2_ETHTOOL_SPEED_10GB = 10000, + SXE2_ETHTOOL_SPEED_25GB = 25000, + SXE2_ETHTOOL_SPEED_50GB = 50000, + SXE2_ETHTOOL_SPEED_100GB = 100000, + SXE2_ETHTOOL_SPEED_AUTO = 200000, +}; + +struct flm_link_fec_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct configure_fc { + u8 rx_en; + u8 tx_en; + u8 resv[2]; +}; + +struct configure_an { + u32 port; + u32 speed; + u32 fec_mode; + u32 lt_en; + struct configure_fc fc_mode; + u32 an_en; +}; + +enum sxe2_fw_connect_mode { + SXE2_FW_CONNECT_MODE_DAC, + SXE2_FW_CONNECT_MODE_AOC, + SXE2_FW_CONNECT_MODE_TRANSCEIVER, + SXE2_FW_CONNECT_MODE_BACKPLANE, + SXE2_FW_CONNECT_MDDE_UNKNOW, +}; + +enum flm_link_speed { + FLM_FW_SPEED_10G = 0, + FLM_FW_SPEED_25G = 1, + FLM_FW_SPEED_50G = 2, + FLM_FW_SPEED_100G = 3, + FLM_FW_SPEED_AUTO = 15, + FLM_FW_SPEED_MAX +}; + +struct flm_link_ret { + u32 speed; + u32 module_type; + u32 link_status; + u32 fec_mode; + struct configure_fc fc_mode; +}; + +struct ethtool_flm_link_info { + s32 speed; + u32 link_status; +}; + +struct flm_link_info_pasist { + u8 speed; + u8 link_status; + u8 fec_mode; + u8 resv; + struct configure_fc fc_mode; +}; + +struct sxe2_fw_loop_back_config { + u8 enable; + u8 resv[3]; +}; + +struct flm_link_info { + u32 port_num; + u32 is_link_up; + u32 module_type; + u32 is_an_enable; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct flm_link_config { + u32 port_num; + u32 speed; + u32 fec; + u32 port; +}; + +struct flm_link_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct flm_ethtool_get_link_req { + u32 port_num; +}; + +enum sxe2_support_fec_mode { + SXE2_SUPPORTR_FEC_NONE = 0, + SXE2_SUPPORTR_FEC_BASER = 1, + SXE2_SUPPORTR_FEC_RS = 2, + SXE2_SUPPORTR_FEC_AUTO = 15, + SXE2_SUPPORTR_FEC_MAX, +}; + +enum sxe2_advertis_fec_mode { + SXE2_ADVERTIS_FEC_NONE = 0, + SXE2_ADVERTIS_FEC_BASER = 1, + SXE2_ADVERTIS_FEC_RS = 2, + SXE2_ADVERTIS_FEC_AUTO = 15, + SXE2_ADVERTIS_FEC_MAX, +}; + +enum sxe2_support_speed_duplex_mode { + SXE2_SUPPORTR_SPEED_10G, + SXE2_SUPPORTR_SPEED_25G, + SXE2_SUPPORTR_SPEED_50G, + SXE2_SUPPORTR_SPEED_100G, + SXE2_SUPPORTR_SPEED_AUTO = 15, + SXE2_SUPPORTR_SPEED_MAX, +}; + +enum sxe2_support_duplex { + HALF_DUPLEX = 0, + FULL_DUPLEX = 1, + MAX_DUPLEX, +}; + +enum sxe2_duplex_type { + CURRENT_HALF_DUPLEX = 0, + CURRENT_FULL_DUPLEX = 1, + CURRENT_MAX_DUPLEX, +}; + +enum sxe2_support_media_type { + SXE2_MEDIA_UNKNOWN = 0, + SXE2_MEDIA_FIBER, + SXE2_MEDIA_BASET, + SXE2_MEDIA_BACKPLANE, + SXE2_MEDIA_DA, +}; + +enum sxe2_support_pause_frame { + SCGEB_EN_TX_LINK_PAUSE, + SCGEB_EN_RX_LINK_PAUSE, + SCGEB_EN_TX_RX_LINK_PAUSE, + SCGEB_DIS_EN_LINK_PAUSE, +}; + +enum sxe2_an_status { + SXE2_AN_ENABLE = 0, + SXE2_AN_TRANSMIT_DISABLE = 1, + SXE2_AN_ABILITY_DETECT = 2, + SXE2_AN_ACKNOWLEDGE_DETECT = 3, + SXE2_AN_COMPLETE_ACKNOWLEDGE = 4, + SXE2_AN_NEXT_PAGE_WAIT = 5, + SXE2_AN_LINK_STATUS_CHECK = 6, + SXE2_AN_PARALLET_DETECT_FAULT = 7, + SXE2_AN_GOOD_CHECK = 8, + SXE2_AN_GOOD = 9, +}; + +struct sxe2_pause_publicity_ability { + u8 bit_pause; + u8 bit_asym; + u8 resv[2]; +}; + +struct sxe2_local_suppet_advertis_an_en { + u8 suppert_an; + u8 advertis_an; + u8 resv[2]; +}; + +struct sxe2_peer_suppet_an_en { + u8 suppert_an; + u8 resv[3]; +}; + +enum sxe2_current_media_type { + CURRENT_MEDIA_UNKNOWN = 0, + CURRENT_MEDIA_FIBER, + CURRENT_MEDIA_BASET, + CURRENT_MEDIA_BACKPLANE, + CURRENT_MEDIA_DA, +}; + +struct sxe2_current_an_en { + u8 current_an; + u8 resv[3]; +}; + +enum flm_fec_mode { + FLM_FEC_NONE = 0, + FLM_FEC_BSFEC = 1, + FLM_FEC_528 = 2, + FLM_FEC_544 = 3, + FLM_FEC_AUTO = 15, + FLM_FEC_MAX +}; + +enum flm_link_status { + FLM_PORT_DOWN = 0, + FLM_PORT_UP = 1, + FLM_PORT_MAX = 2 +}; + +struct fec_ability_supported { + u32 fec_br : 1; + u32 fec_528 : 1; + u32 fec_544 : 1; + u32 rec : 29; +}; + +struct spec_entry { + u8 spec_id; + u8 reserved[3]; + enum flm_link_speed speed; + s8 spec_name[16]; +}; + +struct optical_module { + char vendor[SXE2_FLM_VENDOR_LEN]; + char vendor_pn[SXE2_FLM_VENDOR_PN_LEN]; + u8 module_type; + u8 current_connection; +}; + +struct optical_warning_info { + u8 vendor[SXE2_HOST_FLM_VENDOR_LEN]; + u8 vendor_pn[SXE2_HOST_FLM_VENDOR_PN_LEN]; + bool is_warning; +}; + +struct __an_mode { + u32 pause; + u32 speed_ability_10Gkr; + u32 speed_ability_25Gkrcr; + u32 speed_ability_25Gkrcr_s; + u32 speed_ability_100Gcr4; + u32 speed_ability_100Gkr4; + u32 fec_ability_10g; + + u32 fec_en_10g; + u32 fec_bsfec_25g; + u32 fec_rs528_25g; + u8 Consortium_25g_50g_en; +}; + +struct __an_np_mode { + u32 speed_ability_25Gkr; + u32 speed_ability_25Gcr; + u32 speed_ability_50Gkr2; + u32 speed_ability_50Gcr2; + u32 fec_ability_rs528; + u32 fec_ability_bsfec; + u32 fec_en_rs528; + u32 fec_en_bsfec; +}; + +struct __an_orig_speed_fec { + u32 orig_speed; + u32 orig_fec; +}; + +struct sxe2_an_publicity { + + struct __an_mode an_mode; + + struct __an_np_mode an_np_mode; +}; + +struct support_speed_ability_mode { + u32 ability_speed_25Gkr; + u32 ability_speed_25Gcr; + u32 ability_speed_50Gkr2; + u32 ability_speed_50Gcr2; + u32 ability_speed_10Gkr; + u32 ability_speed_25Gkrcr; + u32 ability_speed_25Gkrcr_s; + u32 ability_speed_100Gcr4; + u32 ability_speed_100Gkr4; + u32 ability_speed_100Gsr4; +}; + +struct sxe2_fwc_link_state_resp { + u8 link; + u8 resv[3]; +}; + +struct flm_ethtool_get_link_resp { + u8 specs_list[32]; + u32 sxe2_ana_fsm; + struct optical_module optical_module; + struct configure_fc configed_pause_result; + struct configure_fc partner_pause_result; + struct sxe2_pause_publicity_ability local_pause; + + struct sxe2_local_suppet_advertis_an_en local_an_en; + enum flm_fec_mode local_fec_mode; + struct sxe2_pause_publicity_ability partner_pause; + struct sxe2_peer_suppet_an_en partner_an_en; + enum sxe2_duplex_type support_duplx; + enum sxe2_current_media_type current_media; + struct sxe2_current_an_en current_an_en; + struct fec_ability_supported advertis_fec; + struct fec_ability_supported partner_fec; + struct sxe2_an_publicity an_publicity; +}; + +struct sxe2_msg_ethtool_info { + struct flm_ethtool_get_link_resp cfg; + struct support_speed_ability_mode ability; + u8 usr_link_speed; +}; + +struct sxe2_fwc_udp_tunnel_ref_add_req { + u8 type; + __le16 port; +}; + +struct sxe2_fwc_udp_tunnel_ref_delete_req { + u8 type; + u8 clear; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_req { + u8 type; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_resp { + u8 type; + u8 enable; + u8 dst; + u8 src; + __le16 port; + u8 used; + u8 rsvd; +}; + +struct sxe2_fw_ncd_core_num_config_req { + u8 core_num; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_core_pri_queue { + u8 core_id; + u8 pri; + __le16 queue_id; +}; + +struct sxe2_fw_ncd_switch_tag_req { + u8 loc; + u8 len; + u8 en; + u8 mac_id; +}; + +struct sxe2_fw_ncd_switch_tag_resp { + u8 loc; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_req { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_resp { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdf_req { + __le16 eth_type; + __le16 mask; + u8 en; + u8 resv; +}; + +struct sxe2_fw_ncd_sdf_resp { + __le16 eth_type; + __le16 mask; +}; + +struct sxe2_fw_ncd_sdn_udp_req_resp { + __le16 start_port; + __le16 end_port; + u8 ph_len; + u8 udp_grp_id; + u8 used_count; + u8 resv; +}; + +struct sxe2_fw_ncd_sdn_req_resp { + union { + __le32 ipv4_addr; + __le32 ipv6_addr[4]; + } dest_addr; +__le16 used_count; + u8 is_ipv4; + u8 udp_grp_id; + u8 ip_id; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_req { + __le16 udp_port; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_resp { + u8 find; + u8 udp_grp_id; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_udp_req { + __le32 port_bmp; + __le16 start_port; + u8 udp_grp_id; + u8 resv; +}; + +struct sxe2_fw_ncd_app_port_req { + u8 idx; + u8 is_tcp; + u8 sport_en; + u8 dport_en; +}; + +struct sxe2_fw_ncd_pkt_pri_req { + u8 idx; + u8 pri; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_udf_capa_get_resp { + __le32 sdn_ip_addr; + __le32 sdn_udp_ipsec_bm; + __le16 sdn_udp_ipsec_sp; + __le16 sdn_udp_start_port; + __le16 sdn_udp_end_port; + __le16 sdf_eth_type; + __le16 sdf_mask; + u8 sdf_en; + u8 switch_tag_en; + u8 switch_tag_len; + u8 switch_tag_loc; + u8 txlen_adj_len[4]; + u8 sdn_udp_ph_len; + u8 sdn_ip_type; + u8 sdn_ip_udp_grp_id; + u8 spec_proto_port_type; + u8 spec_ptype_pri_level; + u8 resv; +}; + +#define BFD_INTQ_CMD_SRC_IRQ (0) +#define BFD_INTQ_CMD_SRC_POLLING (1) +#define BFD_INTQ_CMD_BUF_LEN (64) + +struct sxe2_fwc_bfd_config_set_req { + __le16 max_sess; + u8 scan_interval; + u8 bfd_en; +}; + +struct sxe2_fwc_bfd_intq_info_get_req { + u8 src; + u8 sess_num; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_intq_info_get_resp { + u8 buf_empty; + u8 valid_len; + u8 resv[2]; + struct { + __le16 sess_id; + u8 reason; + u8 sess_state; + } data[BFD_INTQ_CMD_BUF_LEN]; +}; + +struct sxe2_fwc_bfd_kt_entry { + u8 is_add; + u8 prof_id; + u8 rsv[2]; + u8 inputset[SXE2_BFD_FV_CNT_MAX * 2]; + __le16 vsi; + __le16 sess_id; + __le32 fdid; +}; + +struct sxe2_fwc_bfd_kt_entry_resp { + __le16 kt_index; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_sess_cfg_set_req { + __le16 sess_id; + u8 valid; + u8 sess_mode; + __le16 kt_index; + u8 mac_id; + u8 tc_id; + u8 tx_en; + u8 rx_en; + __le16 tx_interval; + __le32 rx_interval; + __le16 ppt_id; + u8 state; + u8 rsv; +}; + +struct sxe2_fwc_bfd_sess_state_get_req { + __le16 sess_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_bfd_sess_state_get_resp { + u8 state; + u8 aging_state; + __le16 prof_id; + __le16 rx_cnt; + __le16 tx_cnt; +}; + +struct sxe2_fwc_bfd_capability_get_resp { + __le16 max_sess; + __le16 max_package_profile; + __le16 min_scan_interval; + __le16 bfde_vsi; + __le16 max_pkt_buf; + __le16 max_edit_cmd; + __le16 max_csum_desc; + u8 rsv[2]; +}; + +#define SXE2_BYTES_PER_PKT_BUF_ENT (32) +#define SXE2_PKT_BUF_MAX_PER_PROF (16) +#define SXE2_PKT_BUF_SIZE_MAX \ + (SXE2_BYTES_PER_PKT_BUF_ENT * SXE2_PKT_BUF_MAX_PER_PROF) + +#define SXE2_BYTES_PER_EDIT_CMD (8) +#define SXE2_EDIT_CMD_PER_ENT (4) +#define SXE2_EDIT_CMD_MAX (16) +#define SXE2_EDIT_CMD_ENT_MAX (SXE2_EDIT_CMD_MAX / SXE2_EDIT_CMD_PER_ENT) +#define SXE2_BYTES_PER_EDIT_CMD_ENT (SXE2_BYTES_PER_EDIT_CMD * SXE2_EDIT_CMD_PER_ENT) +#define SXE2_EDIT_CMD_SIZE_MAX (SXE2_EDIT_CMD_MAX * SXE2_BYTES_PER_EDIT_CMD) + +struct sxe2_bfd_edit_cmd_req { + struct { + u8 code; + u8 size; + __le16 loc; + } instr[SXE2_EDIT_CMD_PER_ENT]; +}; + +struct sxe2_fwc_bfd_pack_prof_set_req { + __le16 prof_id; + u8 rsv[2]; + + struct { + __le16 pkt_addr; + u8 pkt_len; + u8 pkt_lbo; + u8 data[SXE2_PKT_BUF_SIZE_MAX]; + } pkt_buffer; + + struct { + __le16 edit_cmd_addr; + u8 edit_cmd_len; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_MAX]; + } edit_cmd; + + __le16 csum_desc_addr; + u8 csum_desc_len; + u8 rsv0; + struct { + __le16 start_addr; + __le16 csum_loc; + __le16 csum_len; + u8 result_negate; + u8 rsv1; + __le32 chk_data; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_PER_ENT]; + } csum_desc[2]; +}; + +struct sxe2_fwc_bfd_meta_key_len_req { + __le16 prof_id; + __le16 key_len; +}; + +struct sxe2_tx_fault_count_mib { + u64 tx_fault_count; +}; + +struct single_link_result { + u32 port_num; + u32 is_link_up; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct configure_fixed { + u32 port; + u32 speed; + u32 fec_mode; + u32 port_mode; + u32 an_en; + u32 lt_en; + struct configure_fc fc_mode; + s32 pcsRet; +}; + +struct all_link_status { + u32 dut_status; + u32 sb_status; +}; + +struct sxe2_fwc_txpa_dfx { + u32 txpa_in_all; + u32 txpa_out_all; + u32 txpa_in_drop; + u32 txpa_out_drop; + u32 txpa_in_err; + u32 txpa_out_err; +}; + +struct sxe2_fwc_txfb_dfx { + u32 txfb_in_all; + u32 txfb_in_drop; + u32 txfb_out_all; + u32 txfb_out_drop; + u32 txfb_internal_drop; +}; + +struct sxe2_fwc_rxpa_dfx { + u32 rxpa_in_all; + u32 rxpa_out_all; + u32 rxpa_in_drop; + u32 rxpa_out_drop; + u32 rxpa_in_err; + u32 rxpa_out_err; +}; + +struct sxe2_fwc_rxfb_dfx { + u32 rxfb_tx_in_all; + u32 rxfb_rx_in_all; + u32 rxfb_tx_in_drop; + u32 rxfb_rx_in_drop; + u32 rxfb_out_all; + u32 rxfb_out_drop; + u32 rxfb_internal_drop; +}; + +struct sxe2_fwc_switch_dfx { + u32 tx_all; + u32 tx_drop; + u32 rx_all; + u32 rx_drop; +}; + +struct sxe2_fwc_rxft_dfx { + u32 tx_in_all; + u32 tx_in_drop; + u32 tx_out_all; + u32 tx_out_drop; + u32 rx_in_all; + u32 rx_in_drop; + u32 rx_out_all; + u32 rx_out_drop; + u32 lp_in_all; + u32 lp_in_drop; + u32 lp_out_all; + u32 lp_out_drop; +}; + +struct sxe2_fwc_ppe_dfx { + struct sxe2_fwc_txpa_dfx txpa[4]; + struct sxe2_fwc_txfb_dfx txfb; + struct sxe2_fwc_rxpa_dfx rxpa[4]; + struct sxe2_fwc_rxfb_dfx rxfb; + struct sxe2_fwc_switch_dfx sw; + struct sxe2_fwc_rxft_dfx rxft; +}; + +#define SXE2_ACL_LUT_ENTRY_WIDTH (5) +#define SXE2_ACL_ACTION_TCAM_CNT (16) +#define SXE2_ACL_ACTION_MEM_CNT (20) +#define SXE2_ACL_ACTION_NUM_PER_ENTRY (2) +#define SXE2_ACL_ACTION_TCAM_DEPTH (512) +#define SXE2_ACL_ACTION_MEM_DEPTH (512) + +struct sxe2_fwc_acl_lut_alloc_req { + __le16 width; + __le16 depth; + u8 act_pairs_per_entry; + + u8 concurr; + u8 num_dependent_alloc_ids; + __le16 alloc_ids[SXE2_ACL_ACTION_TCAM_CNT - 1]; +}; + +struct sxe2_fwc_acl_lut_alloc_resp { + + __le16 alloc_id; + + __le16 first_entry; + __le16 last_entry; + + u8 first_tcam; + u8 last_tcam; + + u8 act_mem[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_lut_dealloc_req { + __le16 alloc_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_sel_base_req { + __le16 prof_id; + + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_entry_data { + struct { + u8 val[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; + u8 reserved[2]; + } entry_key, entry_key_invert; +}; + +struct sxe2_fwc_acl_lut_entry_set_req { + u8 tcam_idx; + __le16 entry_idx; + u8 rsv; + + struct sxe2_acl_entry_data data; +}; + +struct sxe2_acl_act_entry_data { + u8 prio; + u8 mdid; + __le16 value; +}; + +struct sxe2_fwc_acl_act_entry_set_req { + u8 act_mem_idx; + __le16 act_entry_idx; + u8 rsv; + + struct sxe2_acl_act_entry_data data[SXE2_ACL_ACTION_NUM_PER_ENTRY]; +}; + +struct sxe2_fwc_acl_scen_alloc_req { + struct { + u8 tcam_select[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; +#define SXE2_ACL_ALLOC_SCEN_START_CMP BIT(0) +#define SXE2_ACL_ALLOC_SCEN_START_SET BIT(1) + u8 start_cmp_set; + u8 rsv; + } tcam_cfg[SXE2_ACL_ACTION_TCAM_CNT]; + +#define SXE2_ACL_ACT_MEM_EN BIT(4) + u8 act_mem_cfg[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_scen_alloc_resp { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_scen_dealloc_req { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_querey_req { + __le16 prof_id; +}; + +struct sxe2_fwc_acl_prof_querey_resp { + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_hit_info { + + __le32 profile_id : 7; + __le32 rsv0 : 25; + + __le32 fv1 : 16; + __le32 fv0 : 16; + + __le32 fv3 : 16; + __le32 fv2 : 16; + + __le32 fv5 : 16; + __le32 fv4 : 16; + + __le32 fv7 : 16; + __le32 fv6 : 16; + + __le32 fv9 : 16; + __le32 fv8 : 16; + + __le32 fv11 : 16; + __le32 fv10 : 16; + + __le32 fv13 : 16; + __le32 fv12 : 16; + + __le32 fv15 : 16; + __le32 fv14 : 16; + + __le32 fv17 : 16; + __le32 fv16 : 16; + + __le32 fv19 : 16; + __le32 fv18 : 16; + + __le32 fv21 : 16; + __le32 fv20 : 16; + + __le32 fv23 : 16; + __le32 fv22 : 16; + + __le32 fv25 : 16; + __le32 fv24 : 16; + + __le32 fv27 : 16; + __le32 fv26 : 16; + + __le32 fv29 : 16; + __le32 fv28 : 16; + + __le32 fv31 : 16; + __le32 fv30 : 16; +}; + +struct sxe2_acl_dfx_info { + + __le32 og_inbuf_hdr_cnt; + __le32 og_inbuf_info_cnt; + __le32 og_proc_hdr_cnt; + __le32 og_proc_info_cnt; + __le32 og_to_engine_cnt; + __le32 og_in_rg_cnt; + __le32 og_out_rg_cnt; + __le32 sel_base_cnt; + __le32 key_gen_cnt; + __le32 key_gen_to_lkt_cnt; + __le32 act_mem_cnt; + __le32 osc_act_cnt; + __le32 osc_pkt_cnt; + __le32 acl_rxft_cnt; + __le32 acl_recv_drop_cnt; + __le32 acl_action_drop_cnt; + __le32 acl_vsi_disable_drop_cnt; + __le32 prfl_tcam_hit_cnt; + __le32 prfl_tcam_miss_cnt; + __le32 prfl_tcam_bypss_cnt; + __le32 act_tcam_hit_cnt[16]; + __le32 act_tcam_miss_cnt[16]; + + __le16 act_idx_first[16]; + __le16 act_idx_last[16]; + __le32 act_key_first_low[16]; + __le32 act_key_first_high[16]; + __le32 act_key_last_low[16]; + __le32 act_key_last_high[16]; + + __le64 key_first; + __le64 key_last; + + u8 first_prfl_id; + u8 last_prfl_id; + u8 first_scen_id; + u8 last_scen_id; + __le16 first_prfl_tcam_idx; + __le16 last_prfl_tcam_idx; + + __le16 first_cascade; + __le16 last_cascade; + __le16 first_stack; + __le16 last_stack; + __le16 first_tcam_en; + __le16 last_tcam_en; +}; + +struct sxe2_acl_trace_recorder { + u8 trace_status0; + u8 trace_status2; + u8 rsv[2]; + struct sxe2_acl_hit_info hit_info; +}; + +struct sxe2_vf_queue_info { + __le16 rxq_base; + __le16 rxq_cnt; + __le16 txq_base; + __le16 txq_cnt; +}; + +struct sxe2_fwc_vf_queue_info { + u8 pf_id; + u16 vf_cnt; + u8 rsv[1]; + struct sxe2_vf_queue_info queue_info[]; +}; + +#pragma pack() + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ddp_common.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ddp_common.h new file mode 100644 index 0000000000000000000000000000000000000000..ca19af582abb27dc62007958ebb64dd6c36c79eb --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ddp_common.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_ddp_common.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DDP_COMMON_H_ +#define _SXE2_DDP_COMMON_H_ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_type.h" +#include "sxe2_common.h" +#endif + +#define SXE2_DDP_DRV_VER_MAJ 1 +#define SXE2_DDP_DRV_VER_MNR 0 + +#define SXE2_DDP_FW_VER_MAJ 1 +#define SXE2_DDP_FW_VER_MNR 0 + +enum sxe2_ddp_error { + + SXE2_DDP_PKG_SUCCESS = 0, + + SXE2_DDP_PKG_ALREADY_LOADED = 1, + + SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED = 2, + + SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = 3, + + SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED = 4, + + SXE2_DDP_PKG_FW_MISMATCH = 5, + + SXE2_DDP_PKG_INVALID_FILE = 6, + + SXE2_DDP_PKG_FILE_VERSION_TOO_HIGH = 7, + + SXE2_DDP_PKG_FILE_VERSION_TOO_LOW = 8, + + SXE2_DDP_PKG_NO_SEC_MANIFEST = 9, + + SXE2_DDP_PKG_MANIFEST_INVALID = 10, + + SXE2_DDP_PKG_BUFFER_INVALID = 11, + + SXE2_DDP_PKG_BUSY = 12, + + SXE2_DDP_PKG_ERR = 13, +}; + +enum sxe2_ddp_state { + SXE2_DDP_STATE_UNINIT, + SXE2_DDP_STATE_PROC, + SXE2_DDP_STATE_FINISH, + SXE2_DDP_STATE_ERROR, + SXE2_DDP_STATE_INVALID = 0xFFFFFFFF, +}; + +struct sxe2_ddp_pkg_ver { + __le16 major; + __le16 minor; +}; + +union sxe2_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct sxe2_device_id_entry { + union sxe2_device_id device; + union sxe2_device_id sub_device; +}; + +struct sxe2_pkg_hdr { + struct sxe2_ddp_pkg_ver pkg_drv_ver; + struct sxe2_ddp_pkg_ver pkg_fw_ver; + struct sxe2_device_id_entry dev_vend_id; + __le32 seg_count; + + __le32 seg_offset[]; +}; + +#define SEGMENT_SIGN_TYPE_NONE 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 +#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 + +struct sxe2_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_SXE2_DDP 0x00000010 +#define SEGMENT_TYPE_SXE2_RUN_TIME_CFG 0x00000020 + __le32 seg_id; + __le32 seg_type; + __le32 seg_size; +}; + +struct sxe2_buf { +#define SXE2_PKG_BUF_SIZE 4096 + u8 buf[SXE2_PKG_BUF_SIZE]; +}; + +struct sxe2_buf_table { + __le32 buf_count; + struct sxe2_buf buf_array[]; +}; + +struct sxe2_seg { + struct sxe2_generic_seg_hdr hdr; + u8 rsvd[8]; + struct sxe2_buf_table buf_table; +}; + +#define SXE2_MIN_S_OFF 12 +#define SXE2_MAX_S_OFF 4095 +#define SXE2_MIN_S_SZ 1 +#define SXE2_MAX_S_SZ 4084 +#define SXE2_MIN_CFG_SZ (sizeof(struct sxe2_pkg_hdr) + sizeof(struct sxe2_seg)) + +struct sxe2_section_entry { + __le16 type; + __le16 unit_size; + + __le16 offset; + __le16 size; +}; + +#define SXE2_MIN_SECT_COUNT 1 +#define SXE2_MAX_SECT_COUNT 512 +#define SXE2_MIN_SECT_DATA_END 12 +#define SXE2_MAX_SECT_DATA_END 4096 + +struct sxe2_buf_hdr { + __le16 section_count; + + __le16 data_end; + __le32 buf_idx; + + __le32 crc; + struct sxe2_section_entry section_entry[]; +}; + +enum sxe2_segment_type { + SXE2_SGM_BLK_DP = 0, + SXE2_SGM_BLK_MAX +}; + +enum sxe2_section_type { + SXE2_SECT_SWPTG_TYPE = 0, + SXE2_SECT_SWVSIG_TYPE, + SXE2_SECT_SWTCAM_TYPE, + SXE2_SECT_SWEXTRACTOR_TYPE, + SXE2_SECT_SWMAP_TYPE, + SXE2_SECT_SWRCP_TYPE, + SXE2_SECT_SWPROFILERCPBITMAP_TYPE, + SXE2_SECT_RSSPTG_TYPE, + SXE2_SECT_RSSVSIG_TYPE, + SXE2_SECT_RSSTCAM_TYPE, + SXE2_SECT_RSSEXTRACTOR_TYPE, + SXE2_SECT_RSSMAP_TYPE, + SXE2_SECT_RSSIPSET_TYPE, + SXE2_SECT_FNAVPTG_TYPE, + SXE2_SECT_FNAVMASK_TYPE, + SXE2_SECT_ACLPTG_TYPE = 16, + SXE2_SECT_TYPE_MAX, +}; +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_drv_type.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_drv_type.h new file mode 100644 index 0000000000000000000000000000000000000000..f785f4be097483570b74fdb2a9ae4d0616d9b7e2 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_drv_type.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_type.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_TYPEDEF_H__ +#define __SXE2_DRV_TYPEDEF_H__ + +#include "ps3_types.h" + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#endif + +typedef U8 u8; +typedef U16 u16; +typedef U32 u32; +typedef U64 u64; + +#ifndef SXE2_SUPPORT_IPXE +typedef S8 s8; +#endif + +typedef S16 s16; +typedef S32 s32; +typedef S64 s64; + +typedef U16 __le16; +typedef U32 __le32; +typedef U64 __le64; + +#ifndef true +#define true (1) +#endif + +#ifndef false +#define false (0) +#endif + +#ifndef bool +#define bool Ps3Bool_t +#endif + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_host_regs.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_host_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..badb3ebb2c6d42544a0bc12da47bedddb5895adf --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_host_regs.h @@ -0,0 +1,717 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_host_regs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HOST_REGS_H__ +#define __SXE2_HOST_REGS_H__ + +#define SXE2_BITS_MASK(m, s) ((m ## UL) << (s)) + +#define SXE2_RXQ_CTXT(_i, _QRX) (0x0050000 + ((_i) * 4 + (_QRX) * 0x20)) +#define SXE2_RXQ_HEAD(_QRX) (0x0060000 + ((_QRX) * 4)) +#define SXE2_RXQ_TAIL(_QRX) (0x0070000 + ((_QRX) * 4)) +#define SXE2_RXQ_CTRL(_QRX) (0x006d000 + ((_QRX) * 4)) +#define SXE2_RXQ_WB(_QRX) (0x006B000 + ((_QRX) * 4)) + +#define SXE2_RXQ_CTRL_STATUS_ACTIVE 0x00000004 +#define SXE2_RXQ_CTRL_ENABLED 0x00000001 +#define SXE2_RXQ_CTRL_CDE_ENABLE BIT(3) + +#define SXE2_PCIEPROC_BASE 0x002d6000 +#define SXE2_PF_INT_BASE 0x00260000 +#define SXE2_PF_INT_ALLOC (SXE2_PF_INT_BASE + 0x0000) +#define SXE2_PF_INT_ALLOC_FIRST 0x7FF +#define SXE2_PF_INT_ALLOC_LAST_S 12 +#define SXE2_PF_INT_ALLOC_LAST \ + (0x7FF << SXE2_PF_INT_ALLOC_LAST_S) +#define SXE2_PF_INT_ALLOC_VALID BIT(31) + +#define SXE2_PF_INT_OICR (SXE2_PF_INT_BASE + 0x0040) +#define SXE2_PF_INT_OICR_PCIE_TIMEOUT BIT(0) +#define SXE2_PF_INT_OICR_UR BIT(1) +#define SXE2_PF_INT_OICR_CA BIT(2) +#define SXE2_PF_INT_OICR_VFLR BIT(3) +#define SXE2_PF_INT_OICR_VFR_DONE BIT(4) +#define SXE2_PF_INT_OICR_LAN_TX_ERR BIT(5) +#define SXE2_PF_INT_OICR_BFDE BIT(6) +#define SXE2_PF_INT_OICR_LAN_RX_ERR BIT(7) +#define SXE2_PF_INT_OICR_ECC_ERR BIT(8) +#define SXE2_PF_INT_OICR_GPIO BIT(9) +#define SXE2_PF_INT_OICR_TSYN_TX BIT(11) +#define SXE2_PF_INT_OICR_TSYN_EVENT BIT(12) +#define SXE2_PF_INT_OICR_TSYN_TGT BIT(13) +#define SXE2_PF_INT_OICR_EXHAUST BIT(14) +#define SXE2_PF_INT_OICR_FW BIT(15) +#define SXE2_PF_INT_OICR_SWINT BIT(16) +#define SXE2_PF_INT_OICR_LINKSEC_CHG BIT(17) +#define SXE2_PF_INT_OICR_INT_CFG_ADDR_ERR BIT(18) +#define SXE2_PF_INT_OICR_INT_CFG_DATA_ERR BIT(19) +#define SXE2_PF_INT_OICR_INT_CFG_ADR_UNRANGE BIT(20) +#define SXE2_PF_INT_OICR_INT_RAM_CONFLICT BIT(21) +#define SXE2_PF_INT_OICR_GRST BIT(22) +#define SXE2_PF_INT_OICR_FWQ_INT BIT(29) +#define SXE2_PF_INT_OICR_FWQ_TOOL_INT BIT(30) +#define SXE2_PF_INT_OICR_MBXQ_INT BIT(31) + +#define SXE2_PF_INT_OICR_ENABLE (SXE2_PF_INT_BASE + 0x0020) + +#define SXE2_PF_INT_FW_EVENT (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_FW_ABNORMAL BIT(0) +#define SXE2_PF_INT_RDMA_AEQ_OVERFLOW BIT(1) +#define SXE2_PF_INT_CGMAC_LINK_CHG BIT(18) +#define SXE2_PF_INT_VFLR_DONE BIT(2) + +#define SXE2_PF_INT_OICR_CTL (SXE2_PF_INT_BASE + 0x0060) +#define SXE2_PF_INT_OICR_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_OICR_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_OICR_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_OICR_CTL_ITR_IDX_S) +#define SXE2_PF_INT_OICR_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_FWQ_CTL (SXE2_PF_INT_BASE + 0x00C0) +#define SXE2_PF_INT_FWQ_CTL_MSIX_IDX 0x7FFF +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_FWQ_CTL_ITR_IDX_S) +#define SXE2_PF_INT_FWQ_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_MBX_CTL (SXE2_PF_INT_BASE + 0x00A0) +#define SXE2_PF_INT_MBX_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_MBX_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_MBX_CTL_ITR_IDX (0x3 << SXE2_PF_INT_MBX_CTL_ITR_IDX_S) +#define SXE2_PF_INT_MBX_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_GPIO_ENA (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_GPIO_X_ENA(x) BIT(x) + +#define SXE2_PFG_INT_CTL (SXE2_PF_INT_BASE + 0x0120) +#define SXE2_PFG_INT_CTL_ITR_GRAN 0x7 +#define SXE2_PFG_INT_CTL_ITR_GRAN_0 (2) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN BIT(4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_0 (4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_1 (8) + +#define SXE2_VFG_RAM_INIT_DONE \ + (SXE2_PF_INT_BASE + 0x0128) +#define SXE2_VFG_RAM_INIT_DONE_0 BIT(0) +#define SXE2_VFG_RAM_INIT_DONE_1 BIT(1) +#define SXE2_VFG_RAM_INIT_DONE_2 BIT(2) + +#define SXE2_LINK_REG_GET_10G_VALUE 4 +#define SXE2_LINK_REG_GET_25G_VALUE 1 +#define SXE2_LINK_REG_GET_50G_VALUE 2 +#define SXE2_LINK_REG_GET_100G_VALUE 3 + +#define SXE2_PORT0_CNT 0 +#define SXE2_PORT1_CNT 1 +#define SXE2_PORT2_CNT 2 +#define SXE2_PORT3_CNT 3 + +#define SXE2_LINK_STATUS_BASE (0x002ac200) +#define SXE2_LINK_STATUS_PORT0_POS 3 +#define SXE2_LINK_STATUS_PORT1_POS 11 +#define SXE2_LINK_STATUS_PORT2_POS 19 +#define SXE2_LINK_STATUS_PORT3_POS 27 +#define SXE2_LINK_STATUS_MASK 1 + +#define SXE2_LINK_SPEED_BASE (0x002ac200) +#define SXE2_LINK_SPEED_PORT0_POS 0 +#define SXE2_LINK_SPEED_PORT1_POS 8 +#define SXE2_LINK_SPEED_PORT2_POS 16 +#define SXE2_LINK_SPEED_PORT3_POS 24 +#define SXE2_LINK_SPEED_MASK 7 + +#define SXE2_PFVP_INT_ALLOC(vf_idx) (SXE2_PF_INT_BASE + 0x012C + ((vf_idx) * 4)) +#define SXE2_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PFVP_INT_ALLOC_LAST_S 12 +#define SXE2_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCI_PFVP_INT_ALLOC(vf_idx) (SXE2_PCIEPROC_BASE + 0x5800 + ((vf_idx) * 4)) +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_S 12 + +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCIEPROC_INT2FUNC(_INT) (SXE2_PCIEPROC_BASE + 0xe000 + ((_INT) * 4)) +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_S 0 +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_M (0xFF << SXE2_PCIEPROC_INT2FUNC_VF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_S 12 +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_M (0x7 << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_S 16 +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_M BIT(16) + +#define SXE2_VSI_PF(vf_idx) (SXE2_PF_INT_BASE + 0x14000 + ((vf_idx) * 4)) +#define SXE2_VSI_PF_ID_S 0 +#define SXE2_VSI_PF_ID_M (0x7 << SXE2_VSI_PF_ID_S) +#define SXE2_VSI_PF_EN_M BIT(3) + +#define SXE2_MBX_CTL(_VSI) (0x0026692C + ((_VSI) * 4)) +#define SXE2_MBX_CTL_MSIX_INDX_S 0 +#define SXE2_MBX_CTL_MSIX_INDX_M (0x7FF << SXE2_MBX_CTL_MSIX_INDX_S) +#define SXE2_MBX_CTL_CAUSE_ENA_M BIT(30) + +#define SXE2_PF_INT_TQCTL(q_idx) (SXE2_PF_INT_BASE + 0x092C + 4 * (q_idx)) +#define SXE2_PF_INT_TQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_TQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_TQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_TQCTL_ITR_IDX_S) +#define SXE2_PF_INT_TQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RQCTL(q_idx) (SXE2_PF_INT_BASE + 0x292C + 4 * (q_idx)) +#define SXE2_PF_INT_RQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_RQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_RQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_RQCTL_ITR_IDX_S) +#define SXE2_PF_INT_RQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RATE(irq_idx) (SXE2_PF_INT_BASE + 0x7530 + 4 * (irq_idx)) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL (0x3F) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX \ + (0x3F) +#define SXE2_PF_INT_RATE_INTRL_ENABLE (BIT(6)) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT (7) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE \ + (0x3F << SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT) + +#define SXE2_VF_INT_ITR(itr_idx, irq_idx) \ + (SXE2_PF_INT_BASE + 0xB530 + 0x2000 * (itr_idx) + 4 * (irq_idx)) +#define SXE2_VF_INT_ITR_INTERVAL 0xFFF + +#define SXE2_VF_DYN_CTL(irq_idx) (SXE2_PF_INT_BASE + 0x9530 + 4 * (irq_idx)) +#define SXE2_VF_DYN_CTL_INTENABLE BIT(0) +#define SXE2_VF_DYN_CTL_CLEARPBA BIT(1) +#define SXE2_VF_DYN_CTL_SWINT_TRIG BIT(2) +#define SXE2_VF_DYN_CTL_ITR_IDX_S \ + 3 +#define SXE2_VF_DYN_CTL_ITR_IDX_M 0x3 +#define SXE2_VF_DYN_CTL_INTERVAL_S 5 +#define SXE2_VF_DYN_CTL_INTERVAL_M 0xFFF +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_ENABLE BIT(24) +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_S 25 +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_M 0x3 + +#define SXE2_VF_DYN_CTL_INTENABLE_MSK \ + BIT(31) + +#define SXE2_BAR4_MSIX_BASE 0 +#define SXE2_BAR4_MSIX_CTL(_idx) (SXE2_BAR4_MSIX_BASE + 0xC + ((_idx) * 0x10)) +#define SXE2_BAR4_MSIX_ENABLE 0 +#define SXE2_BAR4_MSIX_DISABLE 1 + +#define SXE2_TXQ_LEGACY_DBLL(_DBQM) (0x1000 + ((_DBQM) * 4)) + +#define SXE2_TXQ_CONTEXT0(_pf_idx) (0x10040 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT1(_pf_idx) (0x10044 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT2(_pf_idx) (0x10048 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT3(_pf_idx) (0x1004C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT4(_pf_idx) (0x10050 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7(_pf_idx) (0x1005C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7_HEAD_S 0 +#define SXE2_TXQ_CONTEXT7_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_HEAD_S) +#define SXE2_TXQ_CONTEXT7_READ_HEAD_S 16 +#define SXE2_TXQ_CONTEXT7_READ_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_READ_HEAD_S) + +#define SXE2_TXQ_CTRL(_pf_idx) (0x10064 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CTXT_CTRL(_pf_idx) (0x100C8 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_DIS_CNT(_pf_idx) (0x100D0 + ((_pf_idx) * 0x100)) + +#define SXE2_TXQ_CTXT_CTRL_USED_MASK 0x00000800 +#define SXE2_TXQ_CTRL_SW_EN_M BIT(0) +#define SXE2_TXQ_CTRL_HW_EN_M BIT(1) + +#define SXE2_TXQ_CTXT2_PROT_IDX_S 0 +#define SXE2_TXQ_CTXT2_PROT_IDX_M SXE2_BITS_MASK(0x7, 0) +#define SXE2_TXQ_CTXT2_CGD_IDX_S 4 +#define SXE2_TXQ_CTXT2_CGD_IDX_M SXE2_BITS_MASK(0x1F, 4) +#define SXE2_TXQ_CTXT2_PF_IDX_S 9 +#define SXE2_TXQ_CTXT2_PF_IDX_M SXE2_BITS_MASK(0x7, 9) +#define SXE2_TXQ_CTXT2_VMVF_IDX_S 12 +#define SXE2_TXQ_CTXT2_VMVF_IDX_M SXE2_BITS_MASK(0x3FF, 12) +#define SXE2_TXQ_CTXT2_VMVF_TYPE_S 23 +#define SXE2_TXQ_CTXT2_VMVF_TYPE_M SXE2_BITS_MASK(0x3, 23) +#define SXE2_TXQ_CTXT2_TSYN_ENA_S 25 +#define SXE2_TXQ_CTXT2_TSYN_ENA_M BIT(25) +#define SXE2_TXQ_CTXT2_ALT_VLAN_S 26 +#define SXE2_TXQ_CTXT2_ALT_VLAN_M BIT(26) +#define SXE2_TXQ_CTXT2_WB_MODE_S 27 +#define SXE2_TXQ_CTXT2_WB_MODE_M BIT(27) +#define SXE2_TXQ_CTXT2_ITR_WB_S 28 +#define SXE2_TXQ_CTXT2_ITR_WB_M BIT(28) +#define SXE2_TXQ_CTXT2_LEGACY_EN_S 29 +#define SXE2_TXQ_CTXT2_LEGACY_EN_M BIT(29) +#define SXE2_TXQ_CTXT2_SSO_EN_S 30 +#define SXE2_TXQ_CTXT2_SSO_EN_M BIT(30) + +#define SXE2_TXQ_CTXT3_SRC_VSI_S 0 +#define SXE2_TXQ_CTXT3_SRC_VSI_M SXE2_BITS_MASK(0x3FF, 0) +#define SXE2_TXQ_CTXT3_CPU_ID_S 12 +#define SXE2_TXQ_CTXT3_CPU_ID_M SXE2_BITS_MASK(0xFF, 12) +#define SXE2_TXQ_CTXT3_TPH_RDDESC_S 20 +#define SXE2_TXQ_CTXT3_TPH_RDDESC_M BIT(20) +#define SXE2_TXQ_CTXT3_TPH_RDDATA_S 21 +#define SXE2_TXQ_CTXT3_TPH_RDDATA_M BIT(21) +#define SXE2_TXQ_CTXT3_TPH_WRDESC_S 22 +#define SXE2_TXQ_CTXT3_TPH_WRDESC_M BIT(22) + +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_S 0 +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_TXQ_CTXT3_RDDESC_RO_S 13 +#define SXE2_TXQ_CTXT3_RDDESC_RO_M BIT(13) +#define SXE2_TXQ_CTXT3_WRDESC_RO_S 14 +#define SXE2_TXQ_CTXT3_WRDESC_RO_M BIT(14) +#define SXE2_TXQ_CTXT3_RDDATA_RO_S 15 +#define SXE2_TXQ_CTXT3_RDDATA_RO_M BIT(15) +#define SXE2_TXQ_CTXT3_QLEN_S 16 +#define SXE2_TXQ_CTXT3_QLEN_M SXE2_BITS_MASK(0x1FFF, 16) + +#define SXE2_RX_BUF_CHAINED_MAX 10 +#define SXE2_RX_DESC_BASE_ADDR_UNIT 7 +#define SXE2_RX_HBUF_LEN_UNIT 6 +#define SXE2_RX_DBUF_LEN_UNIT 7 +#define SXE2_RX_DBUF_LEN_MASK (~0x7F) +#define SXE2_RX_HWTAIL_VALUE_MASK (~0x7) + +enum { + SXE2_RX_CTXT0 = 0, + SXE2_RX_CTXT1, + SXE2_RX_CTXT2, + SXE2_RX_CTXT3, + SXE2_RX_CTXT4, + SXE2_RX_CTXT_CNT, +}; + +#define SXE2_RX_CTXT_BASE_L_S 0 +#define SXE2_RX_CTXT_BASE_L_W 32 + +#define SXE2_RX_CTXT_BASE_H_S 0 +#define SXE2_RX_CTXT_BASE_H_W 25 +#define SXE2_RX_CTXT_DEPTH_L_S 25 +#define SXE2_RX_CTXT_DEPTH_L_W 7 + +#define SXE2_RX_CTXT_DEPTH_H_S 0 +#define SXE2_RX_CTXT_DEPTH_H_W 6 + +#define SXE2_RX_CTXT_DBUFF_S 6 +#define SXE2_RX_CTXT_DBUFF_W 7 + +#define SXE2_RX_CTXT_HBUFF_S 13 +#define SXE2_RX_CTXT_HBUFF_W 5 + +#define SXE2_RX_CTXT_HSPLT_TYPE_S 18 +#define SXE2_RX_CTXT_HSPLT_TYPE_W 2 + +#define SXE2_RX_CTXT_DESC_TYPE_S 20 +#define SXE2_RX_CTXT_DESC_TYPE_W 1 + +#define SXE2_RX_CTXT_CRC_S 21 +#define SXE2_RX_CTXT_CRC_W 1 + +#define SXE2_RX_CTXT_L2TAG_FLAG_S 23 +#define SXE2_RX_CTXT_L2TAG_FLAG_W 1 + +#define SXE2_RX_CTXT_HSPLT_0_S 24 +#define SXE2_RX_CTXT_HSPLT_0_W 4 + +#define SXE2_RX_CTXT_HSPLT_1_S 28 +#define SXE2_RX_CTXT_HSPLT_1_W 2 + +#define SXE2_RX_CTXT_INVALN_STP_S 31 +#define SXE2_RX_CTXT_INVALN_STP_W 1 + +#define SXE2_RX_CTXT_LRO_ENABLE_S 0 +#define SXE2_RX_CTXT_LRO_ENABLE_W 1 + +#define SXE2_RX_CTXT_CPUID_S 3 +#define SXE2_RX_CTXT_CPUID_W 8 + +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_S 11 +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_W 14 + +#define SXE2_RX_CTXT_LRO_DESC_MAX_S 25 +#define SXE2_RX_CTXT_LRO_DESC_MAX_W 4 + +#define SXE2_RX_CTXT_RELAX_DATA_S 29 +#define SXE2_RX_CTXT_RELAX_DATA_W 1 + +#define SXE2_RX_CTXT_RELAX_WB_S 30 +#define SXE2_RX_CTXT_RELAX_WB_W 1 + +#define SXE2_RX_CTXT_RELAX_RD_S 31 +#define SXE2_RX_CTXT_RELAX_RD_W 1 + +#define SXE2_RX_CTXT_THPRDESC_ENABLE_S 1 +#define SXE2_RX_CTXT_THPRDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPWDESC_ENABLE_S 2 +#define SXE2_RX_CTXT_THPWDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPRDATA_ENABLE_S 3 +#define SXE2_RX_CTXT_THPRDATA_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPHEAD_ENABLE_S 4 +#define SXE2_RX_CTXT_THPHEAD_ENABLE_W 1 + +#define SXE2_RX_CTXT_LOW_DESC_LINE_S 6 +#define SXE2_RX_CTXT_LOW_DESC_LINE_W 3 + +#define SXE2_RX_CTXT_VF_ID_S 9 +#define SXE2_RX_CTXT_VF_ID_W 8 + +#define SXE2_RX_CTXT_PF_ID_S 17 +#define SXE2_RX_CTXT_PF_ID_W 3 + +#define SXE2_RX_CTXT_VF_ENABLE_S 20 +#define SXE2_RX_CTXT_VF_ENABLE_W 1 + +#define SXE2_RX_CTXT_VSI_ID_S 21 +#define SXE2_RX_CTXT_VSI_ID_W 10 + +#define SXE2_PF_CTRLQ_FW_BASE 0x00312000 +#define SXE2_PF_CTRLQ_FW_ATQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0000) +#define SXE2_PF_CTRLQ_FW_ARQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0080) +#define SXE2_PF_CTRLQ_FW_ATQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0100) +#define SXE2_PF_CTRLQ_FW_ARQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0180) +#define SXE2_PF_CTRLQ_FW_ATQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0200) +#define SXE2_PF_CTRLQ_FW_ARQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0280) +#define SXE2_PF_CTRLQ_FW_ATQH (SXE2_PF_CTRLQ_FW_BASE + 0x0300) +#define SXE2_PF_CTRLQ_FW_ARQH (SXE2_PF_CTRLQ_FW_BASE + 0x0380) +#define SXE2_PF_CTRLQ_FW_ATQT (SXE2_PF_CTRLQ_FW_BASE + 0x0400) +#define SXE2_PF_CTRLQ_FW_ARQT (SXE2_PF_CTRLQ_FW_BASE + 0x0480) + +#define SXE2_PF_CTRLQ_MBX_BASE 0x00316000 +#define SXE2_PF_CTRLQ_MBX_ATQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE100) +#define SXE2_PF_CTRLQ_MBX_ATQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE180) +#define SXE2_PF_CTRLQ_MBX_ATQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE200) +#define SXE2_PF_CTRLQ_MBX_ATQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE280) +#define SXE2_PF_CTRLQ_MBX_ATQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE300) +#define SXE2_PF_CTRLQ_MBX_ARQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE380) +#define SXE2_PF_CTRLQ_MBX_ARQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE400) +#define SXE2_PF_CTRLQ_MBX_ARQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE480) +#define SXE2_PF_CTRLQ_MBX_ARQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE500) +#define SXE2_PF_CTRLQ_MBX_ARQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE580) + +#define SXE2_CMD_REG_LEN_M 0x3FF +#define SXE2_CMD_REG_LEN_VFE_M BIT(28) +#define SXE2_CMD_REG_LEN_OVFL_M BIT(29) +#define SXE2_CMD_REG_LEN_CRIT_M BIT(30) +#define SXE2_CMD_REG_LEN_ENABLE_M BIT(31) + +#define SXE2_CMD_REG_HEAD_M 0x3FF + +#define SXE2_PF_CTRLQ_FW_HW_STS (SXE2_PF_CTRLQ_FW_BASE + 0x0500) +#define SXE2_PF_CTRLQ_FW_ATQ_IDLE_MASK BIT(0) +#define SXE2_PF_CTRLQ_FW_ARQ_IDLE_MASK BIT(1) + +#define SXE2_TOP_CFG_BASE 0x00292000 +#define SXE2_HW_VER (SXE2_TOP_CFG_BASE + 0x48c) +#define SXE2_HW_FPGA_VER_M SXE2_BITS_MASK(0xFFF, 0) + +#define SXE2_FW_VER (SXE2_TOP_CFG_BASE + 0x214) +#define SXE2_FW_VER_BUILD_M SXE2_BITS_MASK(0xFF, 0) +#define SXE2_FW_VER_FIX_M SXE2_BITS_MASK(0xFF, 8) +#define SXE2_FW_VER_SUB_M SXE2_BITS_MASK(0xFF, 16) +#define SXE2_FW_VER_MAIN_M SXE2_BITS_MASK(0xFF, 24) +#define SXE2_FW_VER_FIX_SHIFT (8) +#define SXE2_FW_VER_SUB_SHIFT (16) +#define SXE2_FW_VER_MAIN_SHIFT (24) + +#define SXE2_FW_COMP_VER_ADDR (SXE2_TOP_CFG_BASE + 0x20c) + +#define SXE2_STATUS SXE2_FW_VER + +#define SXE2_FW_STATE (SXE2_TOP_CFG_BASE + 0x210) + +#define SXE2_FW_HEARTBEAT (SXE2_TOP_CFG_BASE + 0x218) + +#define SXE2_FW_MISC (SXE2_TOP_CFG_BASE + 0x21c) +#define SXE2_FW_MISC_MODE_M SXE2_BITS_MASK(0xF, 0) +#define SXE2_FW_MISC_POP_M SXE2_BITS_MASK(0x80000000, 0) + +#define SXE2_TX_OE_BASE 0x00030000 +#define SXE2_RX_OE_BASE 0x00050000 + +#define SXE2_PFP_L2TAGSEN(_i) (SXE2_TX_OE_BASE + 0x00300 + ((_i) * 4)) +#define SXE2_VSI_L2TAGSTXVALID(_i) \ + (SXE2_TX_OE_BASE + 0x01000 + ((_i) * 4)) +#define SXE2_VSI_TIR0(_i) (SXE2_TX_OE_BASE + 0x01C00 + ((_i) * 4)) +#define SXE2_VSI_TIR1(_i) (SXE2_TX_OE_BASE + 0x02800 + ((_i) * 4)) +#define SXE2_VSI_TAR(_i) (SXE2_TX_OE_BASE + 0x04C00 + ((_i) * 4)) +#define SXE2_VSI_TSR(_i) (SXE2_RX_OE_BASE + 0x18000 + ((_i) * 4)) + +#define SXE2_STATS_TX_LAN_CONFIG(_i) (SXE2_TX_OE_BASE + 0x08300 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_PKT_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08340 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_BYTE_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08380 + ((_i) * 4)) + +#define SXE2_STATS_RX_CONFIG(_i) (SXE2_RX_OE_BASE + 0x230B0 + ((_i) * 4)) +#define SXE2_STATS_RX_LAN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230C0 + ((_i) * 8)) +#define SXE2_STATS_RX_LAN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23120 + ((_i) * 8)) +#define SXE2_STATS_RX_FD_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230E0 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23100 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23140 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_OUT_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23160 + ((_i) * 8)) + +#define SXE2_L2TAG_ID_STAG 0 +#define SXE2_L2TAG_ID_OUT_VLAN1 1 +#define SXE2_L2TAG_ID_OUT_VLAN2 2 +#define SXE2_L2TAG_ID_VLAN 3 + +#define SXE2_PFP_L2TAGSEN_ALL_TAG 0xFF +#define SXE2_PFP_L2TAGSEN_DVM BIT(10) + +#define SXE2_VSI_TSR_STRIP_TAG_S 0 +#define SXE2_VSI_TSR_SHOW_TAG_S 4 + +#define SXE2_VSI_TSR_ID_STAG BIT(0) +#define SXE2_VSI_TSR_ID_OUT_VLAN1 BIT(1) +#define SXE2_VSI_TSR_ID_OUT_VLAN2 BIT(2) +#define SXE2_VSI_TSR_ID_VLAN BIT(3) + +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S 0 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID BIT(3) +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S 4 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID BIT(7) +#define SXE2_VSI_L2TAGSTXVALID_TIR0_ID_S 16 +#define SXE2_VSI_L2TAGSTXVALID_TIR0_VALID BIT(19) +#define SXE2_VSI_L2TAGSTXVALID_TIR1_ID_S 20 +#define SXE2_VSI_L2TAGSTXVALID_TIR1_VALID BIT(23) + +#define SXE2_VSI_L2TAGSTXVALID_ID_STAG 0 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 2 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 3 +#define SXE2_VSI_L2TAGSTXVALID_ID_VLAN 4 + +#define SXE2_SWITCH_OG_BASE 0x00140000 +#define SXE2_SWITCH_SWE_BASE 0x00150000 +#define SXE2_SWITCH_RG_BASE 0x00160000 + +#define SXE2_VSI_RX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01074 + ((_i) * 4)) +#define SXE2_VSI_TX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01C74 + ((_i) * 4)) + +#define SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN BIT(1) +#define SXE2_VSI_TX_SW_CTRL_LAN_EN BIT(2) +#define SXE2_VSI_TX_SW_CTRL_MACAS_EN BIT(3) +#define SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TAR_UNTAGGED_SHIFT (16) + +#define SXE2_PCIE_SYS_READY 0x38c +#define SXE2_PCIE_SYS_READY_CORER_ASSERT BIT(0) +#define SXE2_PCIE_SYS_READY_STOP_DROP_DONE BIT(2) +#define SXE2_PCIE_SYS_READY_R5 BIT(3) +#define SXE2_PCIE_SYS_READY_STOP_DROP BIT(16) + +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS 0x78 +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS_TRANS_PENDING BIT(21) + +#define SXE2_TOP_CFG_CORE (SXE2_TOP_CFG_BASE + 0x0630) +#define SXE2_TOP_CFG_CORE_RST_CODE 0x09FBD586 + +#define SXE2_PFGEN_CTRL (0x00336000) +#define SXE2_PFGEN_CTRL_PFSWR BIT(0) + +#define SXE2_VFGEN_CTRL(_vf) (0x00337000 + ((_vf) * 4)) +#define SXE2_VFGEN_CTRL_VFSWR BIT(0) + +#define SXE2_VF_VRC_VFGEN_RSTAT(_vf) (0x00338000 + (_vf) * 4) +#define SXE2_VF_VRC_VFGEN_VFRSTAT (0x3) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VFR (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_COMPLETE (BIT(0)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE (BIT(1)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_MASK \ + (BIT(2)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF \ + (0x300) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_NO_VFR \ + (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_VFR (1) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK \ + (BIT(10)) + +#define SXE2_GLGEN_VFLRSTAT(_reg) (0x0033A000 + ((_reg) * 4)) + +#define SXE2_ACCEPT_RULE_TAGGED_S 0 +#define SXE2_ACCEPT_RULE_UNTAGGED_S 16 + +#define SXE2_VF_RXQ_BASE(_VF) (0x000b0800 + ((_VF) * 4)) +#define SXE2_VF_RXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_RXQ_BASE_FIRST_Q_M (0x7FF << SXE2_VF_RXQ_BASE_FIRST_Q_S) +#define SXE2_VF_RXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_RXQ_BASE_Q_NUM_M (0x7FF << SXE2_VF_RXQ_BASE_Q_NUM_S) + +#define SXE2_VF_RXQ_MAPENA(_VF) (0x000b0400 + ((_VF) * 4)) +#define SXE2_VF_RXQ_MAPENA_M BIT(0) + +#define SXE2_VF_TXQ_BASE(_VF) (0x00040400 + ((_VF) * 4)) +#define SXE2_VF_TXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_TXQ_BASE_FIRST_Q_M (0x3FFF << SXE2_VF_TXQ_BASE_FIRST_Q_S) +#define SXE2_VF_TXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_TXQ_BASE_Q_NUM_M (0xFF << SXE2_VF_TXQ_BASE_Q_NUM_S) + +#define SXE2_VF_TXQ_MAPENA(_VF) (0x00045000 + ((_VF) * 4)) +#define SXE2_VF_TXQ_MAPENA_M BIT(0) + +#define PRI_PTP_BASEADDR 0x2a8000 + +#define GLTSYN (PRI_PTP_BASEADDR + 0x0) +#define GLTSYN_ENA_M BIT(0) + +#define GLTSYN_CMD (PRI_PTP_BASEADDR + 0x4) +#define GLTSYN_CMD_INIT_TIME 0x01 +#define GLTSYN_CMD_INIT_INCVAL 0x02 +#define GLTSYN_CMD_ADJ_TIME 0x04 +#define GLTSYN_CMD_ADJ_TIME_AT_TIME 0x0C +#define GLTSYN_CMD_LATCHING_SHTIME 0x80 + +#define GLTSYN_SYNC (PRI_PTP_BASEADDR + 0x8) +#define GLTSYN_SYNC_PLUS_1NS 0x1 +#define GLTSYN_SYNC_MINUS_1NS 0x2 +#define GLTSYN_SYNC_EXEC 0x3 +#define GLTSYN_SYNC_GEN_PULSE 0x4 + +#define GLTSYN_SEM (PRI_PTP_BASEADDR + 0xC) +#define GLTSYN_SEM_BUSY_M BIT(0) + +#define GLTSYN_STAT (PRI_PTP_BASEADDR + 0x10) +#define GLTSYN_STAT_EVENT0_M BIT(0) +#define GLTSYN_STAT_EVENT1_M BIT(1) +#define GLTSYN_STAT_EVENT2_M BIT(2) + +#define GLTSYN_TIME_SUBNS (PRI_PTP_BASEADDR + 0x20) +#define GLTSYN_TIME_NS (PRI_PTP_BASEADDR + 0x24) +#define GLTSYN_TIME_S_H (PRI_PTP_BASEADDR + 0x28) +#define GLTSYN_TIME_S_L (PRI_PTP_BASEADDR + 0x2C) + +#define GLTSYN_SHTIME_SUBNS (PRI_PTP_BASEADDR + 0x30) +#define GLTSYN_SHTIME_NS (PRI_PTP_BASEADDR + 0x34) +#define GLTSYN_SHTIME_S_H (PRI_PTP_BASEADDR + 0x38) +#define GLTSYN_SHTIME_S_L (PRI_PTP_BASEADDR + 0x3C) + +#define GLTSYN_SHADJ_SUBNS (PRI_PTP_BASEADDR + 0x40) +#define GLTSYN_SHADJ_NS (PRI_PTP_BASEADDR + 0x44) + +#define GLTSYN_INCVAL_NS (PRI_PTP_BASEADDR + 0x50) +#define GLTSYN_INCVAL_SUBNS (PRI_PTP_BASEADDR + 0x54) + +#define GLTSYN_TGT_NS(_i) \ + (PRI_PTP_BASEADDR + 0x60 + ((_i) * 16)) +#define GLTSYN_TGT_S_H(_i) (PRI_PTP_BASEADDR + 0x64 + ((_i) * 16)) +#define GLTSYN_TGT_S_L(_i) (PRI_PTP_BASEADDR + 0x68 + ((_i) * 16)) + +#define GLTSYN_EVENT_NS(_i) \ + (PRI_PTP_BASEADDR + 0xA0 + ((_i) * 16)) + +#define GLTSYN_EVENT_S_H(_i) (PRI_PTP_BASEADDR + 0xA4 + ((_i) * 16)) +#define GLTSYN_EVENT_S_H_MASK (0xFFFF) + +#define GLTSYN_EVENT_S_L(_i) (PRI_PTP_BASEADDR + 0xA8 + ((_i) * 16)) + +#define GLTSYN_AUXOUT(_i) \ + (PRI_PTP_BASEADDR + 0xD0 + ((_i) * 4)) +#define GLTSYN_AUXOUT_OUT_ENA BIT(0) +#define GLTSYN_AUXOUT_OUT_MOD (0x03 << 1) +#define GLTSYN_AUXOUT_OUTLVL BIT(3) +#define GLTSYN_AUXOUT_INT_ENA BIT(4) +#define GLTSYN_AUXOUT_PULSEW (0x1fff << 3) + +#define GLTSYN_CLKO(_i) \ + (PRI_PTP_BASEADDR + 0xE0 + ((_i) * 4)) + +#define GLTSYN_AUXIN(_i) (PRI_PTP_BASEADDR + 0xF4 + ((_i) * 4)) +#define GLTSYN_AUXIN_RISING_EDGE BIT(0) +#define GLTSYN_AUXIN_FALLING_EDGE BIT(1) +#define GLTSYN_AUXIN_ENABLE BIT(4) + +#define CGMAC_CSR_BASE 0x2B4000 + +#define CGMAC_PORT_OFFSET 0x00004000 + +#define PFP_CGM_TX_TSMEM(_port, _i) \ + (CGMAC_CSR_BASE + 0x100 + \ + + CGMAC_PORT_OFFSET * (_port) + ((_i) * 4)) + +#define PFP_CGM_TX_TXHI(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x108 + ((_i) * 8)) +#define PFP_CGM_TX_TXLO(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x10C + ((_i) * 8)) + +#define CGMAC_CSR_MAC0_OFFSET 0x2B4000 +#define CGMAC_CSR_MAC_OFFSET(_i) (CGMAC_CSR_MAC0_OFFSET + ((_i) * 0x4000)) + +#define PFP_CGM_MAC_TX_TSMEM(_phy, _i) \ + (CGMAC_CSR_MAC_OFFSET(_phy) + 0x100 + \ + ((_i) * 4)) + +#define PFP_CGM_MAC_TX_TXHI(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x108 + ((_i) * 8)) +#define PFP_CGM_MAC_TX_TXLO(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x10C + ((_i) * 8)) + +#define SXE2_VF_GLINT_CEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_S 11 +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_GLINT_CEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_GLINT_CEQCTL(_INT) (0x0026492C + ((_INT) * 4)) + +#define SXE2_VF_PFINT_AEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_S 11 +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_VPINT_AEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_VPINT_AEQCTL(_VF) (0x0026052c + ((_VF) * 4)) + +#define SXE2_IPSEC_TX_BASE (0x2A0000) +#define SXE2_IPSEC_RX_BASE (0x2A2000) + +#define SXE2_IPSEC_RX_IPSIDX_ADDR (SXE2_IPSEC_RX_BASE + 0x0084) +#define SXE2_IPSEC_RX_IPSIDX_RST (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_VBI_SHIFT (18) +#define SXE2_IPSEC_RX_IPSIDX_VBI_MASK (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_SHIFT (17) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_MASK (0x00020000) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_SHIFT (4) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_MASK (0x0000fff0) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_SHIFT (2) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_MASK (0x0000000c) + +#define SXE2_IPSEC_RX_IPSIPID_ADDR (SXE2_IPSEC_RX_BASE + 0x0088) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_MASK (0x000000ff) + +#define SXE2_IPSEC_RX_IPSSPI0_ADDR (SXE2_IPSEC_RX_BASE + 0x008c) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_MASK (0xffffffff) + +#define SXE2_IPSEC_RX_IPSSPI1_ADDR (SXE2_IPSEC_RX_BASE + 0x0090) +#define SXE2_IPSEC_RX_IPSSPI1_SPI_Y_MASK (0xffffffff) + +#define SXE2_PAUSE_STATS_BASE(port) (0x002b2000 + (port) * 0x4000) +#define SXE2_TXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0894) +#define SXE2_TXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0a18) +#define SXE2_TXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a20 + 8 * (pri))) +#define SXE2_TXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a60 + 8 * (pri))) +#define SXE2_TXPFCXONTOXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0aa0 + 8 * (pri))) +#define SXE2_RXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0988) +#define SXE2_RXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0b28) +#define SXE2_RXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b30 + 8 * (pri))) +#define SXE2_RXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b70 + 8 * (pri))) + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_internal_ver.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_internal_ver.h new file mode 100644 index 0000000000000000000000000000000000000000..597139e7fc490e1f535833c1a900b4d5243b6c9c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_internal_ver.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_internal_ver.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_INTERNAL_VER_H__ +#define __SXE2_INTERNAL_VER_H__ + +#define SXE2_VER_MAJOR_OFFSET (16) +#define SXE2_MK_VER(major, minor) \ + ((major) << SXE2_VER_MAJOR_OFFSET | (minor)) +#define SXE2_MK_VER_MAJOR(ver) (((ver) >> SXE2_VER_MAJOR_OFFSET) & 0xff) +#define SXE2_MK_VER_MINOR(ver) ((ver) & 0xff) + +#define SXE2_ITR_VER_MAJOR_V100 1 +#define SXE2_ITR_VER_MAJOR_V200 2 + +#define SXE2_ITR_VER_MAJOR 1 +#define SXE2_ITR_VER_MINOR 1 +#define SXE2_ITR_VER SXE2_MK_VER(SXE2_ITR_VER_MAJOR, SXE2_ITR_VER_MINOR) + +#define SXE2_CTRL_VER_IS_V100(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V100) +#define SXE2_CTRL_VER_IS_V200(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V200) + +#define SXE2LIB_ITR_VER_MAJOR 1 +#define SXE2LIB_ITR_VER_MINOR 1 +#define SXE2LIB_ITR_VER SXE2_MK_VER(SXE2LIB_ITR_VER_MAJOR, SXE2LIB_ITR_VER_MINOR) + +#define SXE2_DRV_CLI_VER_MAJOR 1 +#define SXE2_DRV_CLI_VER_MINOR 1 +#define SXE2_DRV_CLI_VER \ + SXE2_MK_VER(SXE2_DRV_CLI_VER_MAJOR, SXE2_DRV_CLI_VER_MINOR) + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ioctl.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..a648b88026b1fc5390642c29209c14f368decf7d --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_ioctl.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_ioctl.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_IOCTL_H__ +#define __SXE2_IOCTL_H__ + +#include "sxe2_internal_ver.h" + +struct sxe2_ioctl_sync_cmd { + u32 ver; + u32 resv; + u64 trace_id; + u32 timeout; + u8 resv1[4]; + void *in_data; + u32 in_len; + u8 resv2[4]; + void *out_data; + u32 out_len; + u8 resv3[4]; +}; + +#define SXE2_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct sxe2_ioctl_sync_cmd) +#define SXE2_CMD_IOCTL_SYNC_DRV_CMD _IOWR('M', 2, struct sxe2_ioctl_sync_cmd) + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_misc.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_misc.h new file mode 100644 index 0000000000000000000000000000000000000000..07e69b08fed6547e82a381a8fff48030d3fddb2c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_misc.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_MISC_H__ +#define __SXE2_MISC_H__ + +#define REG_FW_MISC_MASK_MODE (0xF) +#define REG_FW_MISC_MASK_RSV0 (0xF0) +#define REG_FW_MISC_MASK_RSV1 (0xF00) +#define REG_FW_MISC_MASK_RSV2 (0xF000) +#define REG_FW_MISC_MASK_POP (0x80000000) + +#define REG_FW_MISC_MASK_MODE_OFST (0) +#define REG_FW_MISC_MASK_RSV0_OFST (4) +#define REG_FW_MISC_MASK_RSV1_OFST (8) +#define REG_FW_MISC_MASK_RSV2_OFST (12) +#define REG_FW_MISC_MASK_POP_OFST (31) + +enum sxe2_nic_mode { + SXE2_NIC_MODE_NORMAL = 0, + SXE2_NIC_MODE_NCD = 1, + + SXE2_NIC_MODE_MAX = 0xF, +}; + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_msg.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..fc7389d3466a57dc0065c12a1e9d84a2a1a46cc6 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_msg.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_msg.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MSG_H__ +#define __SXE2_MSG_H__ + +#if defined(SXE2_FW) || defined(SXE2_SUPPORT_UEFI) + +#include "sxe2_drv_type.h" +#endif + +#ifdef PS3_CLI_SXE2 +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +enum sfp_type_identifier { + SXE2_SFP_TYPE_UNKNOW = 0x00, + SXE2_SFP_TYPE_SFP = 0x03, + + SXE2_SFP_TYPE_QSFP_PLUS = 0x0D, + SXE2_SFP_TYPE_QSFP28 = 0x11, + + SXE2_SFP_TYPE_MAX = 0xFF, +}; + +#ifndef SFP_DEFINE +#define SFP_DEFINE + +#define SXE2_SFP_EEP_WR 0x1 +#define SXE2_SFP_EEP_QSFP 0x1 + +enum sfp_bus_addr { + SXE2_SFP_EEP_I2C_ADDR0 = 0xA0, + SXE2_SFP_EEP_I2C_ADDR1 = 0xA2, + + SXE2_SFP_EEP_I2C_ADDR_NR = 0xFFFF, +}; + +struct sxe2_sfp_req { + u8 is_wr; + u8 is_qsfp; + __le16 bus_addr; + __le16 page_cnt; + __le16 offset; + __le16 data_len; + __le16 rvd; + u8 data[]; +}; + +struct sxe2_sfp_resp { + u8 is_wr; + u8 is_qsfp; + __le16 data_len; + u8 data[]; +}; + +enum sfp_page_cnt { + SXE2_SFP_EEP_PAGE_CNT0 = 0, + SXE2_SFP_EEP_PAGE_CNT1, + SXE2_SFP_EEP_PAGE_CNT2, + SXE2_SFP_EEP_PAGE_CNT3, + + SXE2_SFP_EEP_PAGE_CNT20 = 20, + SXE2_SFP_EEP_PAGE_CNT21 = 21, + + SXE2_SFP_EEP_PAGE_CNT_NR = 0xFFFF, +}; + +#define SXE2_SFP_E2P_I2C_7BIT_ADDR0 (SXE2_SFP_EEP_I2C_ADDR0 >> 1) +#define SXE2_SFP_E2P_I2C_7BIT_ADDR1 (SXE2_SFP_EEP_I2C_ADDR1 >> 1) + +#define SXE2_QSFP_PAGE_OFST_START 128 +#define SXE2_SFP_EEP_OFST_MAX 255 +#define SXE2_SFP_EEP_LEN_MAX 256 +#endif + +#ifndef FW_STATE_DEFINE +#define FW_STATE_DEFINE + +#define SXE2_FW_STATUS_MAIN_SHIF (16) +#define SXE2_FW_STATUS_MAIN_MASK (0xFF0000) +#define SXE2_FW_STATUS_SUB_MASK (0xFFFF) +enum Sxe2FwStateMain { + SXE2_FW_STATE_MAIN_UNDEFINED = 0x00, + SXE2_FW_STATE_MAIN_INIT = 0x10000, + SXE2_FW_STATE_MAIN_RUN = 0x20000, + SXE2_FW_STATE_MAIN_ABNOMAL = 0x30000, +}; + +enum Sxe2FwState { + SXE2_FW_START_STATE_UNDEFINED = SXE2_FW_STATE_MAIN_UNDEFINED, + SXE2_FW_START_STATE_INIT_BASE = (SXE2_FW_STATE_MAIN_INIT + 0x1), + SXE2_FW_START_STATE_SCAN_DEVICE = (SXE2_FW_STATE_MAIN_INIT + 0x20), + SXE2_FW_START_STATE_FINISHED = (SXE2_FW_STATE_MAIN_RUN + 0x0), + SXE2_FW_START_STATE_UPGRADE = (SXE2_FW_STATE_MAIN_RUN + 0x1), + SXE2_FW_START_STATE_SYNC = (SXE2_FW_STATE_MAIN_RUN + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x1), + SXE2_FW_RUNNING_STATE_ABNOMAL_CORE1 = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL_HEART = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x3), + + SXE2_FW_START_STATE_MASK = (SXE2_FW_STATUS_MAIN_MASK | SXE2_FW_STATUS_SUB_MASK), +}; +#endif + +#ifndef LED_DEFINE +#define LED_DEFINE +enum sxe2_led_mode { + SXE2_IDENTIFY_LED_BLINK_ON = 0, + SXE2_IDENTIFY_LED_BLINK_OFF, + SXE2_IDENTIFY_LED_ON, + SXE2_IDENTIFY_LED_OFF, + SXE2_IDENTIFY_LED_RESET, +}; + +struct sxe2_led_ctrl { + u32 mode; + u32 duration; + +}; + +struct sxe2_led_ctrl_resp { + u32 ack; +}; +#endif + +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_spec.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..ab6590f5bdb1cf46c3f3c5e71b2407db72d8303c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_spec.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_spec.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_SPEC__ +#define __SXE2_SPEC__ + +#define SXE2_TXSCHED_LAYER_MAX_7 7 +#define SXE2_TXSCHED_LAYER_MAX_4 4 +#define SXE2_TXSCHED_LAYER_MAX_3 3 +#define SXE2_TXSCHED_LEAF_MAX_3072 3072 +#define SXE2_TXSCHED_LEAF_MAX_512 512 +#define SXE2_TXSCHED_LEAF_MAX_256 256 +#define SXE2_TXSCHED_LEAF_MAX_128 128 +#define SXE2_TXSCHED_LEAF_MAX_64 64 + +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 + +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#define SXE2_XDP_TX_Q_NUM 8 + +#ifndef SXE2_TXSCHED_LAYER_MAX +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#endif + +#ifndef SXE2_TXSCHED_LEAF_MAX +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 +#endif + +#ifndef SXE2_VSI_PF_ASSURED_NUM +#define SXE2_VSI_PF_ASSURED_NUM 256 +#endif + +#ifndef SXE2_PF_NUM +#define SXE2_PF_NUM 8 +#endif + +#ifndef SXE2_VSI_NUM +#define SXE2_VSI_NUM 768 +#endif + +#ifndef SXE2_QUEUE_NUM +#define SXE2_QUEUE_NUM 2048 +#endif + +#ifndef SXE2_IRQ_NUM +#define SXE2_IRQ_NUM 2048 +#endif + +#ifndef SXE2_VF_NUM +#define SXE2_VF_NUM 256 +#endif + +#ifndef SXE2_MAX_MACVLANS +#define SXE2_MAX_MACVLANS 16 +#endif + +#define SXE2_BUF_SIZE_FW_TQ (8 * 1024) +#define SXE2_BUF_SIZE_FW_RQ (8 * 1024) + +#ifndef SXE2_BUF_SIZE_MBX_TQ +#define SXE2_BUF_SIZE_MBX_TQ (4 * 1024) +#endif + +#ifndef SXE2_BUF_SIZE_MBX_RQ +#define SXE2_BUF_SIZE_MBX_RQ (4 * 1024) +#endif + +#ifndef SXE2_DFLT_IRQS_MAX_CNT +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#endif + +#ifndef SXE2_DFLT_IRQS_MIN_CNT +#define SXE2_DFLT_IRQS_MIN_CNT 8 + +#endif + +#ifndef SXE2_VF_RSS_Q_NUM +#define SXE2_VF_RSS_Q_NUM 16 +#endif + +#ifndef SXE2_IPSEC_RX_SA_DEPTH +#define SXE2_IPSEC_RX_SA_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_RX_DCAM_DEPTH +#define SXE2_IPSEC_RX_DCAM_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_TX_SA_DEPTH +#define SXE2_IPSEC_TX_SA_DEPTH 4096 +#endif + +#define SXE2_MACSEC_ENABLE + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_type.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_type.h new file mode 100644 index 0000000000000000000000000000000000000000..66d994f334af63c786e5c2d85e6ffe5187e58c39 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_type.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_type.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_TYPES_H__ +#define __SXE2_TYPES_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#if defined __BYTE_ORDER__ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif defined __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#elif defined RTE_TOOLCHAIN_MSVC +#define __LITTLE_ENDIAN_BITFIELD +#else +#error "Unknown endianness." +#endif +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef char s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef s8 S8; +typedef s16 S16; +typedef s32 S32; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#define STATIC static + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_uefi_def.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_uefi_def.h new file mode 100644 index 0000000000000000000000000000000000000000..f18cfef669e9174f57688c8adfecee166cf22ab7 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_uefi_def.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_uefi_def.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_UEFI_DEF_H__ +#define __SXE2_UEFI_DEF_H__ + +#ifdef UEFI_SUPPORT_MIPS +#include +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define __BIG_ENDIAN_BITFIELD +#endif + +#if BYTE_ORDER == LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#else +#define __BIG_ENDIAN_BITFIELD +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_version.h b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_version.h new file mode 100644 index 0000000000000000000000000000000000000000..2c421e1dd2277aae3ecfaa4df62fe28177ec2bba --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/include/drv/sxe2_version.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_version.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_VER_H__ +#define __SXE2_VER_H__ + +#define SXE2_VERSION "0.0.0.0" +#define SXE2_COMMIT_ID "df68e70" +#define SXE2_BRANCH "feature/from_develop-rc-tucana-2.1.0_B009_OpenSource-01-style_kongdl" +#define SXE2_BUILD_TIME "2026-04-30 10:54:24" + +#define SXE2_DRV_ARCH "x86_64" +#define SXE2_DRV_NAME "sxe2" +#define SXE2VF_DRV_NAME "sxe2vf" +#define SXE2_DRV_LICENSE "GPL v2" +#define SXE2_DRV_AUTHOR "SXE2" +#define SXE2_DRV_DESCRIPTION "SXE2 Linux Driver" +#define SXE2VF_DRV_DESCRIPTION "SXE2 Virtual Function Linux Driver" + +#define SXE2_FW_NAME "soc" +#define SXE2_FW_ARCH "arm32" + +#ifndef SXE2_CFG_RELEASE +#define SXE2_FW_BUILD_MODE "debug" +#else +#define SXE2_FW_BUILD_MODE "release" +#endif + +#define SXE2_FW_RUN_MODE 6 + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat.h b/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..34af0044068b67f37d65d16a96afd0084cfa4232 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat.h @@ -0,0 +1,467 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SXE2_COMPAT_H__ +#define __SXE2_COMPAT_H__ + +#include +#include + +#include "sxe2_compat_gcc.h" + +#include +#include +#include + +#ifdef NEED_KERNEL_AUX_DEFINE +#include "auxiliary_bus.h" +#include "rdma/uverbs_ioctl.h" +#endif +#ifdef CTYUNOS_25_03 +#include +#endif + +#ifndef IB_QP_ATTR_STANDARD_BITS +#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0) +#endif +#include + +#ifndef from_tasklet +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) +#endif + +#ifdef NEED_FLOW_CLS_OFFLOAD +struct tc_cls_flower_offload; +#define flow_cls_offload tc_cls_flower_offload +struct tc_block_offload; +#define flow_block_offload tc_block_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#endif + +#ifdef NEED_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifdef NEED_FLOW_BLOCK_BINDER_TYPE +#define FLOW_BLOCK_BINDER_TYPE_UNSPEC TCF_BLOCK_BINDER_TYPE_UNSPEC +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS +#endif + +#ifdef NEED_FLOW_BLOCK_BIND +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND +#endif + +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +#ifndef NETIF_NAPI_ADD_API_NEED_4_PARAMS +static inline void +netif_napi_add_compat(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netif_napi_add(dev, napi, poll); +} + +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add(dev, napi, poll, weight) netif_napi_add_compat(dev, napi, poll, weight) +#endif + +#ifdef DEVLINK_ALLOC_NEED_2_PARAMS +static inline struct devlink * +devlink_alloc_compat(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} + +#define devlink_alloc(ops, priv_size, dev) devlink_alloc_compat(ops, priv_size, dev) +#endif + +#define SXE2_STRCPY strlcpy + +#ifdef NEED_NAPI_BUILD_SKB +#define napi_build_skb(data, frag_size) build_skb(data, frag_size) +#endif + +#ifdef NEED_DEFINE_BITS_TO_U32 +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#endif + +#ifdef NEED_DEFINE_ETHTOOL_SPRINTF +#ifndef ethtool_sprintf +static inline void ethtool_sprintf_compat(u8 **data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + *data += ETH_GSTRING_LEN; +} +#define ethtool_sprintf ethtool_sprintf_compat +#endif +#endif + +#ifdef NEED_DEFINE_KREALLOC_ARRAY +static inline void *__must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} +#endif + +#ifdef NEED_DEFINE_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif + +#ifdef NEED_DEFINE_XDP_PREPARE_BUFF +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + + xdp->data_hard_start = hard_start; + xdp->data = data; + xdp->data_end = data + data_len; + xdp->data_meta = meta_valid ? data : data + 1; +} +#endif + +#ifdef NEED_DEFINE_MUL_U64_U64_DIV_U64 +static inline u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + if (ilog2(a) + ilog2(b) > 62) { + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif + +#ifdef NEED_DEFINE_KFREE_SENSITIVE +static inline void kfree_sensitive(const void *p) +{ + size_t ks; + void *mem = (void *)p; + + ks = ksize(mem); + if (ks) + memzero_explicit(mem, ks); + kfree(mem); +} +#endif + +#ifdef RDMA_IRQ_UPDATE_AFFINITY +static inline int irq_update_affinity_hint(unsigned int irq, const struct cpumask *m) +{ + return irq_set_affinity_hint(irq, m); +} +#endif +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT +static inline size_t sxe2_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz, u64 iova) +{ +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif + + return (size_t)((ALIGN(iova + umem->length, pgsz) - + ALIGN_DOWN(iova, pgsz))) / pgsz; +} +#endif +#ifdef NEED_RDMA_UMEM_BLOCK_ITER_NEXT +struct kc_ib_block_iter { + struct scatterlist *__sg; + dma_addr_t __dma_addr; + size_t __sg_numblocks; + unsigned int __sg_nents; + unsigned int __sg_advance; + unsigned int __pg_bit; +}; + +void kc__rdma_block_iter_start(struct kc_ib_block_iter *biter, + struct scatterlist *sglist, + unsigned int nents, + unsigned long pgsz); +bool kc__rdma_block_iter_next(struct kc_ib_block_iter *biter); + +static inline dma_addr_t +kc_rdma_block_iter_dma_address(struct kc_ib_block_iter *biter) +{ + return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); +} + +static inline void +kc__rdma_umem_block_iter_start(struct kc_ib_block_iter *biter, + struct ib_umem *umem, + unsigned long pgsz) +{ + kc__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, + pgsz); + biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); + biter->__sg_numblocks = + sxe2_ib_umem_num_dma_blocks(umem, pgsz, umem->address); +} + +static inline bool +kc__rdma_umem_block_iter_next(struct kc_ib_block_iter *biter) +{ + return kc__rdma_block_iter_next(biter) && biter->__sg_numblocks--; +} + +#define kc_rdma_umem_for_each_dma_block(umem, biter, pgsz) \ + for (kc__rdma_umem_block_iter_start(biter, umem, pgsz); \ + kc__rdma_umem_block_iter_next(biter);) + +#undef rdma_umem_for_each_dma_block +#define rdma_umem_for_each_dma_block kc_rdma_umem_for_each_dma_block +#define ib_block_iter kc_ib_block_iter +#define rdma_block_iter_dma_address kc_rdma_block_iter_dma_address +#define __rdma_umem_block_iter_start kc__rdma_umem_block_iter_start +#define __rdma_block_iter_next kc__rdma_block_iter_next +#endif +#ifdef UVERBS_CMD_MASK +static inline void kc_set_rdma_uverbs_cmd_mask(struct ib_device *dev) +{ + dev->uverbs_cmd_mask = + BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | + BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | + BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | + BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | + BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | + BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) | + BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) | + BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV) | + BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | + BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | + BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | + BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST); + dev->uverbs_ex_cmd_mask = BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) | + BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | + BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ); + +} +#endif + +#ifdef IRQ_UPDATE_AFFINITY +/** + * irq_update_affinity_hint - Update the affinity hint + * @irq: Interrupt to update + * @m: cpumask pointer (NULL to clear the hint) + * + * Updates the affinity hint, but does not change the affinity of the interrupt. + */ +static inline int +irq_update_affinity_hint(unsigned int irq, const struct cpumask *m) +{ + return irq_set_affinity_hint(irq, m); +} +#endif + +#ifdef NEED_IB_RATE +enum ib_rate_compat { + IB_RATE_28_GBPS = 19, + IB_RATE_50_GBPS = 20, + IB_RATE_400_GBPS = 21, + IB_RATE_600_GBPS = 22, +}; +#endif + +#ifdef NEED_UVERBS_ATTR_CONST_IN +#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \ + UVERBS_ATTR_PTR_IN( \ + _attr_id, \ + UVERBS_ATTR_SIZE( \ + sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \ + sizeof(u64)), \ + __VA_ARGS__) +#endif + +#ifdef NEED_AUXILIARY_SET +static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, + void *data) +{ + auxdev->dev.driver_data = data; +} + +static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev) +{ + return auxdev->dev.driver_data; +} +#endif + +#ifdef NEED_UAPI_CHAIN_OBJ_TREE_NAME +enum uapi_definition_kind { + UAPI_DEF_END = 0, + UAPI_DEF_OBJECT_START, + UAPI_DEF_WRITE, + UAPI_DEF_CHAIN_OBJ_TREE, + UAPI_DEF_CHAIN, + UAPI_DEF_IS_SUPPORTED_FUNC, + UAPI_DEF_IS_SUPPORTED_DEV_FN, +}; + +#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) + +#define UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, _object_ptr, ...) \ + ({ \ + { \ + .kind = UAPI_DEF_CHAIN_OBJ_TREE,\ + .object_start = { .object_id = _object_enum },\ + .chain_obj_tree = _object_ptr, \ + }, \ + ##__VA_ARGS__ \ + }) + +#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \ + UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, \ + PTR_IF(IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS), \ + &UVERBS_OBJECT(_object_enum)), \ + ##__VA_ARGS__) +#endif + +#ifdef IB_UMEM_GET_V3 +static inline struct ib_ucontext *rdma_device_to_drv_context(struct ib_device *device) +{ + void *__ptr = (void *)(device); + + return (struct ib_ucontext *)((char *)__ptr - offsetof(struct ib_ucontext, device)); +} +#endif + +#ifdef NEDD_ONE_PARAM_ALLOC +#define sxe2_ib_alloc_device(drv_struct, member) \ + container_of(ib_alloc_device(sizeof(struct drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof( \ + struct drv_struct, member))), \ + struct drv_struct, member) +#endif + +#ifdef NEED_CREATE_AH_FLAGS +enum rdma_create_ah_flags { + RDMA_CREATE_AH_SLEEPABLE = BIT(0), +}; +#endif + +#ifdef HAVE_NO_IB_DEVICE_OPS +void sxe2_set_device_ops(struct ib_device *dev_ops); +#endif + +#ifdef NEED_RDMA_MMAP_IO +struct sxe2_vma_data { + struct list_head list; + struct vm_area_struct *vma; + struct mutex *vma_list_mutex; +}; + +int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot); +#endif + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ +u64 sxe2_set_best_pagesz(u64 addr, struct ib_umem *region, u64 page_size_cap); +#endif + +#ifdef HAVE_NO_GET_CONST +int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val); + +#define uverbs_get_const(_to, _attrs_bundle, _idx) \ + ({ \ + s64 _val; \ + int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \ + type_min(typeof(*_to)), \ + type_max(typeof(*_to)), NULL); \ + (*_to) = _val; \ + _ret; \ + }) +#endif +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat_gcc.h b/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat_gcc.h new file mode 100644 index 0000000000000000000000000000000000000000..50687fab331a915253e1a8aecf72c2218db1ea00 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/compat/sxe2_compat_gcc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_compat_gcc.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_GCC_H__ +#define __SXE2_COMPAT_GCC_H__ + +#ifndef SXE2_TEST +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +#define fallthrough __attribute__((__fallthrough__)) +#else +#define fallthrough \ + do { \ + } while (0) +#endif +#else +#define fallthrough \ + do { \ + } while (0) +#endif +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2-abi.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2-abi.h new file mode 100644 index 0000000000000000000000000000000000000000..c0cffd91258ba7ec8400b398ddbdcf8948fba7bb --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2-abi.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2-abi.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_ABI_STRUCT_H__ +#define __SXE2_ABI_STRUCT_H__ + +#include + +struct sxe2_create_ah_resp { + __u32 ah_id; + __u8 rsvd[4]; +}; + +struct sxe2_modify_qp_req { + __u8 sq_flush; + __u8 rq_flush; + __u8 llwqe_enable; + __u8 new_page_alloc; + __u32 llwqe_page_index; +}; + +struct sxe2_modify_qp_resp { + __u8 rd_fence_rate; + __u8 rsvd[3]; + __u32 db_mmap_size; + __u32 db_page_id; + __u32 rsvd1; + __u64 db_mmap_offset; +}; + +struct sxe2_create_qp_req { + __u32 sq_depth; + __u32 rq_depth; + __u8 sq_shift; + __u8 rq_shift; + __u8 rsvd[6]; + __u64 user_wqe_bufs; + __u64 doorbell_note; + __u64 user_compl_ctx; +}; +struct sxe2_create_qp_resp { + __u32 qpn; + __u32 qp_caps; +}; + +struct sxe2_create_cq_req { + __aligned_u64 user_cq_buf; + __aligned_u64 user_cq_db_note; +}; + +struct sxe2_create_cq_resp { + __u32 cq_id; + __u32 ncqe; +}; + +struct sxe2_alloc_pd_resp { + __u32 pd_id; + __u8 rsvd[4]; +}; + +struct sxe2_create_srq_req { + __aligned_u64 user_srq_buf; + __aligned_u64 user_srq_db_note; + __aligned_u64 srq_cmpl_ctx; + __u32 srq_buf_size; + __u32 srq_size; + __u32 max_wr_cal; +}; + +struct sxe2_create_srq_resp { + __u32 srq_id; +}; + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_compat.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..c878e1da034590881e153cea93002e2c734fd682 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_compat.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_compat.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_drv_main.h" +#include "sxe2_version.h" +#include "sxe2_drv_aux.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_stats.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_device_port.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_pd.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_ah.h" +#include "sxe2_drv_io.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_srq.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_qos_debugfs.h" +#include "sxe2_drv_mc.h" +#include "sxe2_drv_rdma_inject.h" +#include "sxe2_drv_rdma_inject_debugfs.h" +#include "sxe2_drv_rdma_inject_reg.h" +#include "sxe2_drv_cc_debugfs.h" +#include "sxe2_drv_rdma_configfs.h" + +#ifdef HAVE_NO_IB_DEVICE_OPS +void sxe2_set_device_ops(struct ib_device *dev_ops) +{ + dev_ops->uverbs_abi_ver = 1; + dev_ops->driver_id = RDMA_DRIVER_SXE2; + dev_ops->owner = THIS_MODULE; + + dev_ops->alloc_ucontext = sxe2_rdma_kalloc_ucontext; + dev_ops->dealloc_ucontext = sxe2_rdma_kdealloc_ucontext; + dev_ops->query_device = sxe2_rdma_kquery_device; + dev_ops->query_port = sxe2_rdma_kquery_port; + dev_ops->query_gid = sxe2_rdma_kquery_gid; + dev_ops->get_link_layer = sxe2_rdma_kget_link_layer; + dev_ops->query_pkey = sxe2_query_pkey; + dev_ops->get_dev_fw_str = sxe2_rdma_kget_dev_fw_str; +#ifndef CREATE_AH_NOT_SUPPORT + dev_ops->create_user_ah = sxe2_kcreate_ah; +#endif + dev_ops->create_ah = sxe2_kcreate_ah; + dev_ops->query_ah = sxe2_kquery_ah; + dev_ops->destroy_ah = sxe2_kdestroy_ah; + dev_ops->alloc_mr = sxe2_kalloc_mr; + dev_ops->reg_user_mr = sxe2_kreg_user_mr; + dev_ops->rereg_user_mr = sxe2_krereg_user_mr; + dev_ops->get_dma_mr = sxe2_kget_dma_mr; + dev_ops->dereg_mr = sxe2_kdereg_mr; +#ifndef REG_USER_MR_DMABUF_VER_1 + dev_ops->reg_user_mr_dmabuf = sxe2_kreg_user_mr_dmabuf; +#endif + dev_ops->poll_cq = sxe2_kpoll_cq; + dev_ops->post_recv = sxe2_kpost_recv; + dev_ops->post_send = sxe2_kpost_send; + dev_ops->post_srq_recv = sxe2_kpost_srq_recv; + dev_ops->req_notify_cq = sxe2_kreq_notify_cq; + dev_ops->map_mr_sg = sxe2_kmap_mr_sg; + dev_ops->get_port_immutable = sxe2_kget_port_immutable; + dev_ops->create_qp = sxe2_kcreate_qp; + dev_ops->destroy_qp = sxe2_kdestroy_qp; + dev_ops->modify_qp = sxe2_kmodify_qp; + dev_ops->query_qp = sxe2_kquery_qp; + dev_ops->create_srq = sxe2_kcreate_srq; + dev_ops->modify_srq = sxe2_kmodify_srq; + dev_ops->query_srq = sxe2_kquery_srq; + dev_ops->destroy_srq = sxe2_kdestroy_srq; +#ifdef ALLOC_HW_STATS_V1 + dev_ops->alloc_hw_stats = sxe2_kalloc_hw_port_stats; +#else + dev_ops->alloc_hw_port_stats = sxe2_kalloc_hw_port_stats; +#endif + dev_ops->get_hw_stats = sxe2_kget_hw_stats; + dev_ops->mmap = sxe2_kmmap; +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + dev_ops->mmap_free = sxe2_kmmap_free; +#endif + dev_ops->alloc_pd = sxe2_kalloc_pd; + dev_ops->dealloc_pd = sxe2_kdealloc_pd; + dev_ops->create_cq = sxe2_kcreate_cq; + dev_ops->modify_cq = sxe2_kmodify_cq; + dev_ops->destroy_cq = sxe2_kdestroy_cq; + dev_ops->modify_port = sxe2_rdma_kmodify_port; + dev_ops->get_netdev = sxe2_rdma_kget_net_dev; + dev_ops->disassociate_ucontext = sxe2_rdma_disassociate_ucontext; + dev_ops->attach_mcast = sxe2_kattach_mcast; + dev_ops->detach_mcast = sxe2_kdetach_mcast; +} +#endif + +#ifdef NEED_RDMA_MMAP_IO +static void sxe2_vma_open(struct vm_area_struct *vma) +{ + vma->vm_ops = NULL; +} + +static void sxe2_vma_close(struct vm_area_struct *vma) +{ + struct sxe2_vma_data *vma_data; + + vma_data = vma->vm_private_data; + vma->vm_private_data = NULL; + vma_data->vma = NULL; + mutex_lock(vma_data->vma_list_mutex); + list_del(&vma_data->list); + mutex_unlock(vma_data->vma_list_mutex); + kfree(vma_data); +} + +static const struct vm_operations_struct sxe2_vm_ops = { + .open = sxe2_vma_open, + .close = sxe2_vma_close +}; + +static int sxe2_set_vma_data(struct vm_area_struct *vma, + struct sxe2_rdma_kcontext *context) +{ + struct list_head *vma_head = &context->vma_list; + struct sxe2_vma_data *vma_entry; + + vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL); + if (!vma_entry) + return -ENOMEM; + + vma->vm_private_data = vma_entry; + vma->vm_ops = &sxe2_vm_ops; + + vma_entry->vma = vma; + vma_entry->vma_list_mutex = &context->vma_list_mutex; + + mutex_lock(&context->vma_list_mutex); + list_add(&vma_entry->list, vma_head); + mutex_unlock(&context->vma_list_mutex); + + return 0; +} + +int rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + struct sxe2_rdma_kcontext *kcontext = ibuctxto_kctx(context); + + if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) + return -EAGAIN; + + return sxe2_set_vma_data(vma, kcontext); +} +#endif + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ +u64 sxe2_set_best_pagesz(u64 addr, struct ib_umem *region, u64 page_size_cap) +{ + struct vm_area_struct *vma; + struct hstate *h; + + if (!region->hugetlb) + return PAGE_SIZE; + + vma = find_vma(current->mm, addr); + if (vma && is_vm_hugetlb_page(vma)) { + h = hstate_vma(vma); + if ((huge_page_size(h) == SZ_2M && (page_size_cap & SZ_2M)) || + (huge_page_size(h) == SZ_1G && (page_size_cap & SZ_1G))) { + return huge_page_size(h); + } + } + return PAGE_SIZE; +} +#endif + +#ifdef HAVE_NO_GET_CONST +int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val) +{ + const struct uverbs_attr *attr; + + attr = uverbs_attr_get(attrs_bundle, idx); + if (IS_ERR(attr)) { + if ((PTR_ERR(attr) != -ENOENT) || !def_val) + return PTR_ERR(attr); + + *to = *def_val; + } else { + *to = attr->ptr_attr.data; + } + + if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound)) + return -EINVAL; + + return 0; +} +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.c new file mode 100644 index 0000000000000000000000000000000000000000..1ee0fd5884c88541721eed813ee2410c98ac6c72 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.c @@ -0,0 +1,884 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_ah.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2-abi.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_main.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_ah.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_ah_debugfs.h" + +static bool sxe2_ipv4_is_lpb(u32 loc_addr, u32 rem_addr) +{ + return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); +} + +static bool sxe2_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr) +{ + struct in6_addr raddr6; + + sxe2_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); + + return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); +} +static inline void sxe2_mcast_mac_v4(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac4[ETH_ALEN] = {}; + + mac4[0] = 0x01; + mac4[1] = 0x00; + mac4[2] = 0x5E; + mac4[3] = ip[2] & 0x7F; + mac4[4] = ip[1]; + mac4[5] = ip[0]; + + ether_addr_copy(mac, mac4); +} + +static inline void sxe2_mcast_mac_v6(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac6[ETH_ALEN] = {}; + + mac6[0] = 0x33; + mac6[1] = 0x33; + mac6[2] = ip[3]; + mac6[3] = ip[2]; + mac6[4] = ip[1]; + mac6[5] = ip[0]; + + ether_addr_copy(mac, mac6); +} + +static u8 sxe2_get_vlan_prio(struct net_device __rcu *ndev_rcu, u8 prio) +{ + struct net_device *ndev; + + rcu_read_lock(); + ndev = rcu_dereference(ndev_rcu); + if (!ndev) + goto exit; + if (is_vlan_dev(ndev)) { + u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio); + + prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + } +exit: + rcu_read_unlock(); + return prio; +} + +static void sxe2_fill_ah_info(struct sxe2_rdma_device *rdma_dev, + struct rdma_ah_attr *attr, + union sxe2_ah_info *ah_info, u8 net_type) +{ + union sxe2_sockaddr sgid_addr, dgid_addr; + u8 dmac[ETH_ALEN]; + u16 vlan_prio; + int i; + int j; + u32 tmp_ip[4] = {}; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, &attr->grh.sgid_attr->gid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ether_addr_copy(dmac, attr->roce.dmac); + + if (attr->ah_flags & IB_AH_GRH) { + ah_info->field.flow_label = attr->grh.flow_label; + ah_info->field.hop_ttl = attr->grh.hop_limit; + ah_info->field.tc_tos = attr->grh.traffic_class; + } + + if (net_type == RDMA_NETWORK_IPV4) { + ah_info->field.ipv4_valid = true; + ah_info->field.dest_ip_addr[0] = + ntohl(dgid_addr.saddr_in.sin_addr.s_addr); + ah_info->field.src_ip_addr[0] = + ntohl(sgid_addr.saddr_in.sin_addr.s_addr); + ah_info->field.do_lpbk = + sxe2_ipv4_is_lpb(ah_info->field.src_ip_addr[0], + ah_info->field.dest_ip_addr[0]); + if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) + sxe2_mcast_mac_v4(ah_info->field.dest_ip_addr, dmac); + } else { + sxe2_copy_ip_ntohl( + ah_info->field.dest_ip_addr, + dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); + sxe2_copy_ip_ntohl( + ah_info->field.src_ip_addr, + sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); + ah_info->field.do_lpbk = + sxe2_ipv6_is_lpb(ah_info->field.src_ip_addr, + ah_info->field.dest_ip_addr); + if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) + sxe2_mcast_mac_v6(&ah_info->field.dest_ip_addr[3], + dmac); + DRV_RDMA_LOG_DEV_DEBUG("mc0, SIP6=%pI6, DIP6=%pI6, MAC=%pM\n", + ah_info->field.src_ip_addr, + ah_info->field.dest_ip_addr, dmac); + + DRV_RDMA_LOG_DEV_DEBUG( + "mc0, SIP6=%u.%u.%u.%u, DIP6=%u.%u.%u.%u", + ah_info->field.src_ip_addr[0], + ah_info->field.src_ip_addr[1], + ah_info->field.src_ip_addr[2], + ah_info->field.src_ip_addr[3], + ah_info->field.dest_ip_addr[0], + ah_info->field.dest_ip_addr[1], + ah_info->field.dest_ip_addr[2], + ah_info->field.dest_ip_addr[3]); + + memset(tmp_ip, 0, sizeof(tmp_ip[0]) * 4); + memcpy(tmp_ip, ah_info->field.dest_ip_addr, + sizeof(tmp_ip[0]) * 4); + for (i = 0, j = 3; (i < 4) && (j >= 0); i++, j--) + ah_info->field.dest_ip_addr[i] = tmp_ip[j]; + memset(tmp_ip, 0, sizeof(tmp_ip[0]) * 4); + memcpy(tmp_ip, ah_info->field.src_ip_addr, + sizeof(tmp_ip[0]) * 4); + for (i = 0, j = 3; (i < 4) && (j >= 0); i++, j--) + ah_info->field.src_ip_addr[i] = tmp_ip[j]; + DRV_RDMA_LOG_DEV_DEBUG("mc1, SIP6=%pI6, DIP6=%pI6, MAC=%pM\n", + ah_info->field.src_ip_addr, + ah_info->field.dest_ip_addr, dmac); + + DRV_RDMA_LOG_DEV_DEBUG( + "mc1, SIP6=%u.%u.%u.%u, DIP6=%u.%u.%u.%u", + ah_info->field.src_ip_addr[0], + ah_info->field.src_ip_addr[1], + ah_info->field.src_ip_addr[2], + ah_info->field.src_ip_addr[3], + ah_info->field.dest_ip_addr[0], + ah_info->field.dest_ip_addr[1], + ah_info->field.dest_ip_addr[2], + ah_info->field.dest_ip_addr[3]); + } + + if (ah_info->field.vlan_tag >= VLAN_N_VID && rdma_dev->dcb_vlan_mode) + ah_info->field.vlan_tag = 0; + if (ah_info->field.vlan_tag < VLAN_N_VID) { + ah_info->field.insert_vlan_tag = true; + vlan_prio = (u16)sxe2_get_vlan_prio( + attr->grh.sgid_attr->ndev, + rt_tos2priority(ah_info->field.tc_tos)); + ah_info->field.vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT; + } + + for (i = 0, j = ETH_ALEN - 1; (i < ETH_ALEN) && (j >= 0); i++, j--) + ah_info->field.dest_mac[i] = dmac[j]; + + if (rdma_dev->roce_dcqcn_en) { + ah_info->field.tc_tos &= ~ECN_CODE_PT_MASK; + ah_info->field.tc_tos |= ECN_CODE_PT_VAL; + } +} + +static bool sxe2_ah_exists(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *new_ah) +{ + struct sxe2_ah *ah = NULL; + u32 save_ah_id = new_ah->ctx_ah.ah_info.field.ah_idx; + u32 key = new_ah->ctx_ah.ah_info.field.dest_ip_addr[0] ^ + new_ah->ctx_ah.ah_info.field.dest_ip_addr[1] ^ + new_ah->ctx_ah.ah_info.field.dest_ip_addr[2] ^ + new_ah->ctx_ah.ah_info.field.dest_ip_addr[3]; + + hash_for_each_possible(rdma_dev->ah_hash_tbl, ah, list, key) { + new_ah->ctx_ah.ah_info.field.ah_idx = + ah->ctx_ah.ah_info.field.ah_idx; + DRV_RDMA_LOG_DEV_DEBUG( + "AH CMP: oldinfo == newinfo\n" + "dmac %d:%d:%d:%d:%d:%d == %d:%d:%d:%d:%d:%d\n" + "vlan_tag %u == %u\n" + "tc_tos %u == %u\n" + "pd_idx %u == %u\n" + "flow_label %u == %u\n" + "hop_ttl %u == %u\n" + "arp_idx %u == %u\n" + "ah_idx %u == %u\n" + "op %d == %d\n" + "ipv4_valid %d == %d\n" + "insert_vlan_tag %d == %d\n" + "do_lpbk %d == %d\n" + "wqe_valid %d == %d\n" + "dip %d:%d:%d:%d == %d:%d:%d:%d\n" + "sip %d:%d:%d:%d == %d:%d:%d:%d\n", + ah->ctx_ah.ah_info.field.dest_mac[0], + ah->ctx_ah.ah_info.field.dest_mac[1], + ah->ctx_ah.ah_info.field.dest_mac[2], + ah->ctx_ah.ah_info.field.dest_mac[3], + ah->ctx_ah.ah_info.field.dest_mac[4], + ah->ctx_ah.ah_info.field.dest_mac[5], + new_ah->ctx_ah.ah_info.field.dest_mac[0], + new_ah->ctx_ah.ah_info.field.dest_mac[1], + new_ah->ctx_ah.ah_info.field.dest_mac[2], + new_ah->ctx_ah.ah_info.field.dest_mac[3], + new_ah->ctx_ah.ah_info.field.dest_mac[4], + new_ah->ctx_ah.ah_info.field.dest_mac[5], + ah->ctx_ah.ah_info.field.vlan_tag, + new_ah->ctx_ah.ah_info.field.vlan_tag, + ah->ctx_ah.ah_info.field.tc_tos, + new_ah->ctx_ah.ah_info.field.tc_tos, + ah->ctx_ah.ah_info.field.pd_idx, + new_ah->ctx_ah.ah_info.field.pd_idx, + ah->ctx_ah.ah_info.field.flow_label, + new_ah->ctx_ah.ah_info.field.flow_label, + ah->ctx_ah.ah_info.field.hop_ttl, + new_ah->ctx_ah.ah_info.field.hop_ttl, + ah->ctx_ah.ah_info.field.arp_index, + new_ah->ctx_ah.ah_info.field.arp_index, + ah->ctx_ah.ah_info.field.ah_idx, + new_ah->ctx_ah.ah_info.field.ah_idx, + ah->ctx_ah.ah_info.field.op, + new_ah->ctx_ah.ah_info.field.op, + ah->ctx_ah.ah_info.field.ipv4_valid, + new_ah->ctx_ah.ah_info.field.ipv4_valid, + ah->ctx_ah.ah_info.field.insert_vlan_tag, + new_ah->ctx_ah.ah_info.field.insert_vlan_tag, + ah->ctx_ah.ah_info.field.do_lpbk, + new_ah->ctx_ah.ah_info.field.do_lpbk, + ah->ctx_ah.ah_info.field.wqe_valid, + new_ah->ctx_ah.ah_info.field.wqe_valid, + ah->ctx_ah.ah_info.field.dest_ip_addr[0], + ah->ctx_ah.ah_info.field.dest_ip_addr[1], + ah->ctx_ah.ah_info.field.dest_ip_addr[2], + ah->ctx_ah.ah_info.field.dest_ip_addr[3], + new_ah->ctx_ah.ah_info.field.dest_ip_addr[0], + new_ah->ctx_ah.ah_info.field.dest_ip_addr[1], + new_ah->ctx_ah.ah_info.field.dest_ip_addr[2], + new_ah->ctx_ah.ah_info.field.dest_ip_addr[3], + ah->ctx_ah.ah_info.field.src_ip_addr[0], + ah->ctx_ah.ah_info.field.src_ip_addr[1], + ah->ctx_ah.ah_info.field.src_ip_addr[2], + ah->ctx_ah.ah_info.field.src_ip_addr[3], + new_ah->ctx_ah.ah_info.field.src_ip_addr[0], + new_ah->ctx_ah.ah_info.field.src_ip_addr[1], + new_ah->ctx_ah.ah_info.field.src_ip_addr[2], + new_ah->ctx_ah.ah_info.field.src_ip_addr[3]); + if (!memcmp(&ah->ctx_ah.ah_info, &new_ah->ctx_ah.ah_info, + sizeof(ah->ctx_ah.ah_info))) { + refcount_inc(&ah->refcnt); + new_ah->parent_ah = ah; + return true; + } + } + new_ah->ctx_ah.ah_info.field.ah_idx = save_ah_id; + ah = kmemdup(new_ah, sizeof(*new_ah), GFP_KERNEL); + if (!ah) + return false; + new_ah->parent_ah = ah; + hash_add(rdma_dev->ah_hash_tbl, &ah->list, key); + rdma_dev->ah_list_cnt++; + if (rdma_dev->ah_list_cnt > rdma_dev->ah_list_hwm) + rdma_dev->ah_list_hwm = rdma_dev->ah_list_cnt; + refcount_set(&ah->refcnt, 1); + + return false; +} + +static int sxe2_post_ah_mqinfo(struct sxe2_rdma_pci_f *rf, + struct sxe2_ctx_ah *ctx_ah, u8 cmd, bool wait, + void (*callback_fcn)(struct sxe2_mq_request *), + void *cb_param) +{ + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *cmd_info; + struct sxe2_rdma_device *rdma_dev; + int status; + + rdma_dev = rf->rdma_dev; + if ((cmd != SXE2_MQ_OP_CREATE_ADDR_HANDLE) && + (cmd != SXE2_MQ_OP_DESTROY_ADDR_HANDLE)) { + DRV_RDMA_LOG_DEV_DEBUG("AH:ah mq cmd type error.\n"); + return -EINVAL; + } + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, wait); + if (!mq_request) { + DRV_RDMA_LOG_DEV_DEBUG("AH:failed get ah mq msg.\n"); + return -ENOMEM; + } + + cmd_info = &mq_request->info; + if (cmd == SXE2_MQ_OP_CREATE_ADDR_HANDLE) { + cmd_info->mq_cmd = MQ_OP_CREATE_ADDR_HANDLE; + } else if (cmd == SXE2_MQ_OP_DESTROY_ADDR_HANDLE) { + cmd_info->mq_cmd = MQ_OP_DESTROY_ADDR_HANDLE; + cmd_info->destroy = true; + } + + cmd_info->post_mq = 1; + + if (cmd == SXE2_MQ_OP_CREATE_ADDR_HANDLE) { + if (!wait) + sxe2_kget_mq_request(mq_request); + ctx_ah->mq_request = mq_request; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ah", + &rdma_dev->rdma_func->mq.err_cqe_val, ctx_ah); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ah"); +#endif + + cmd_info->in.u.ah_info.ctx_dev = &rf->ctx_dev; + cmd_info->in.u.ah_info.info = ctx_ah->ah_info; + cmd_info->in.u.ah_info.scratch = (uintptr_t)mq_request; + + if (!wait) { + mq_request->callback_fcn = callback_fcn; + mq_request->param = cb_param; + } + status = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); + if (status) { + DRV_RDMA_LOG_DEV_DEBUG("AH:failed proc ah mq cmd, ret %d.\n", + status); + return status; + } + + if (wait) + ctx_ah->ah_valid = (cmd == SXE2_MQ_OP_CREATE_ADDR_HANDLE); + + return 0; +} + +static int sxe2_create_ah_wait(struct sxe2_rdma_pci_f *rf, + struct sxe2_ctx_ah *ctx_ah, bool sleep) +{ + struct sxe2_rdma_device *rdma_dev = rf->rdma_dev; + int ret = 0; + bool mq_error = false; + u16 maj_err = 0; + u16 min_err = 0; + + if (!sleep) { + u32 cnt = rf->ctx_dev.hw_attrs.max_mq_compl_wait_time_ms * + CQP_TIMEOUT_THRESHOLD; + struct sxe2_mq_request *mq_request = ctx_ah->mq_request; + + do { + sxe2_khandler_mcqe(rf, &rf->mcq.ctx_cq, false); + mdelay(1); + } while (!READ_ONCE(mq_request->request_done) && --cnt); + + if (cnt && !mq_request->cmpl_info.op_ret_val) { + sxe2_kput_mq_request(&rf->mq, mq_request); + ctx_ah->ah_valid = true; + + if (mq_request->cmpl_info.error) + mq_error = mq_request->cmpl_info.error; + if (mq_error) { + maj_err = mq_request->cmpl_info.maj_err_code; + min_err = mq_request->cmpl_info.min_err_code; + ret = -EIO; + } + if ((maj_err == MQ_CRIERR_MAJ_ERRCODE) && + ((min_err == MQ_CRIERR_MQC_NOT_CREATED) || + (min_err == MQ_CRIERR_MQ_BASE_ERR) || + (min_err == MQ_CRIERR_MQC_ECC_ERR) || + ((min_err == MQ_CRIERR_QP_DESTROY_ABORT)))) { + if (!rf->reset) { + DRV_RDMA_LOG_DEV_ERR( + "Critical Err:Request Reset, maj_err %#04X\n" + "min_err %#04X, AH cmd\n", + maj_err, min_err); + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + } + } else { + ret = !cnt ? -ETIMEDOUT : -EINVAL; + sxe2_kput_mq_request(&rf->mq, mq_request); + if (!cnt && !rf->reset) { + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + } + } + + return ret; +} + +static void sxe2_err_inject_ah_id(struct sxe2_rdma_device *rdma_dev, + union sxe2_ah_info *ah_info, u32 ah_id) +{ +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "ah_err_idx", ah_info); + + switch (rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type) { + case AH_ID_DEBUGFS: + ah_info->field.ah_idx = + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG_BDF( + "MQ DEBUGFS:inject rsc_err_type:%#x,rsc_err_val\n" + "\t%#llx,ori ah_id %#llx,err ah_id %#llx\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val, + (u64)ah_id, (u64)ah_info->field.ah_idx); + break; + default: + break; + } +#endif +} + +static void sxe2_process_create_ah_err(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *vendor_ah) +{ + if (vendor_ah->parent_ah) { + hash_del(&vendor_ah->parent_ah->list); + kfree(vendor_ah->parent_ah); + vendor_ah->parent_ah = NULL; + rdma_dev->ah_list_cnt--; + } +} + +#ifdef CREATE_AH_VER_3 +struct ib_ah *sxe2_kcreate_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + struct ib_udata *udata) +{ + u32 flags = attr->ah_flags; + struct sxe2_rdma_pd *pd = ibpd_to_vendor_pd(ibpd); + struct sxe2_ah *vendor_ah = NULL; + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibpd->device); + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_ctx_ah *ctx_ah; + u32 ah_id = 0; + union sxe2_ah_info *ah_info; + struct sxe2_create_ah_resp uresp = {}; + int err; + u16 vlan_id; + bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0; + + DRV_RDMA_LOG_DEV_DEBUG("AH:kcreate enter, sleep %d.\n", sleep); + + if (udata && udata->outlen < sizeof(uresp)) { + DRV_RDMA_LOG_DEV_ERR("AH: invalid udata.\n"); + return ERR_PTR(-EINVAL); + } + if (rf->reset) { + DRV_RDMA_LOG_DEV_ERR("AH: function has reset!\n"); + return ERR_PTR(-EINVAL); + } + err = sxe2_kalloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed alloc ah idx, ret %d\n", err); + return ERR_PTR(err); + } + + vendor_ah = kzalloc(sizeof(*vendor_ah), GFP_KERNEL); + if (!vendor_ah) { + DRV_RDMA_LOG_DEV_ERR("vendor_ah kzalloc failed\n"); + err = -ENOMEM; + goto err_gid_l2; + } + + ctx_ah = &vendor_ah->ctx_ah; + ctx_ah->dev = &rf->ctx_dev; + vendor_ah->av.attrs = *attr; + vendor_ah->av.net_type = + rdma_gid_attr_network_type(attr->grh.sgid_attr); + ah_info = &ctx_ah->ah_info; + vlan_id = rdma_vlan_dev_vlan_id(attr->grh.sgid_attr->ndev); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed read vlan_id, ret %d\n", err); + goto err_kalloc_ah; + } + ah_info->field.pd_idx = pd->pd_ctx.pd_id; + ah_info->field.vlan_tag = vlan_id; + ah_info->field.ah_idx = ah_id; + + sxe2_err_inject_ah_id(rdma_dev, ah_info, ah_id); + + sxe2_fill_ah_info(rdma_dev, attr, ah_info, vendor_ah->av.net_type); + if (sleep) { + mutex_lock(&rdma_dev->ah_tbl_lock); + if (sxe2_ah_exists(rdma_dev, vendor_ah)) { + sxe2_kfree_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_ahs, + ah_id); + ah_id = 0; +#ifdef CONFIG_DEBUG_FS + rdma_dev->ah_reused++; +#endif + goto exit; + } + } + err = sxe2_post_ah_mqinfo(rdma_dev->rdma_func, ctx_ah, + SXE2_MQ_OP_CREATE_ADDR_HANDLE, sleep, NULL, + ctx_ah); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed post ah mq, ret %d\n", err); + goto err_ah_create; + } + err = sxe2_create_ah_wait(rf, ctx_ah, sleep); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed proc ah mq, ret %d\n", err); + goto err_unlock; + } +#ifdef SXE2_CFG_DEBUG + (void)sxe2_debbugfs_ah_add(rdma_dev, vendor_ah); +#endif +exit: + if (udata) { + uresp.ah_id = ah_info->field.ah_idx; + err = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed copy udata, ret %d\n", + err); + if (!vendor_ah->parent_ah || + (vendor_ah->parent_ah && + refcount_dec_and_test( + &vendor_ah->parent_ah->refcnt))) { + sxe2_post_ah_mqinfo( + rdma_dev->rdma_func, &vendor_ah->ctx_ah, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE, false, + NULL, vendor_ah); + ah_id = vendor_ah->ctx_ah.ah_info.field.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&rdma_dev->ah_tbl_lock); + return &vendor_ah->ibah; +err_ah_create: + sxe2_process_create_ah_err(rdma_dev, vendor_ah); +err_unlock: + if (sleep) + mutex_unlock(&rdma_dev->ah_tbl_lock); +err_kalloc_ah: + kfree(vendor_ah); +err_gid_l2: + if (ah_id) + sxe2_kfree_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_ahs, ah_id); + + DRV_RDMA_LOG_DEV_ERR("AH: failed create ah, ret %d\n", err); + + return ERR_PTR(err); +} +#else +#ifdef CREATE_AH_VER_2 +int sxe2_kcreate_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata) +#else +int sxe2_kcreate_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) +#endif +{ +#ifndef CREATE_AH_VER_2 + struct rdma_ah_attr *attr = init_attr->ah_attr; + u32 flags = init_attr->flags; +#endif + + struct sxe2_rdma_pd *pd = ibpd_to_vendor_pd(ibah->pd); + struct sxe2_ah *vendor_ah = container_of(ibah, struct sxe2_ah, ibah); + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibah->pd->device); + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_ctx_ah *ctx_ah; + u32 ah_id = 0; + union sxe2_ah_info *ah_info; + struct sxe2_create_ah_resp uresp = {}; + int err; + u16 vlan_id; + bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0; + + DRV_RDMA_LOG_DEV_DEBUG("AH:kcreate ah(%p) enter, sleep %d.\n", + vendor_ah, sleep); + + if (udata && udata->outlen < sizeof(uresp)) { + DRV_RDMA_LOG_DEV_ERR("AH: invalid udata.\n"); + return -EINVAL; + } + if (rf->reset) { + DRV_RDMA_LOG_DEV_ERR("AH: function has reset!\n"); + return -EINVAL; + } + err = sxe2_kalloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed alloc ah idx, ret %d\n", err); + return err; + } + ctx_ah = &vendor_ah->ctx_ah; + ctx_ah->dev = &rf->ctx_dev; + vendor_ah->av.attrs = *attr; + vendor_ah->av.net_type = + rdma_gid_attr_network_type(attr->grh.sgid_attr); + ah_info = &ctx_ah->ah_info; + err = rdma_read_gid_l2_fields(attr->grh.sgid_attr, &vlan_id, NULL); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed read vlan_id, ret %d\n", err); + goto err_gid_l2; + } + ah_info->field.pd_idx = pd->pd_ctx.pd_id; + ah_info->field.vlan_tag = vlan_id; + ah_info->field.ah_idx = ah_id; + + sxe2_err_inject_ah_id(rdma_dev, ah_info, ah_id); + + sxe2_fill_ah_info(rdma_dev, attr, ah_info, vendor_ah->av.net_type); + if (sleep) { + mutex_lock(&rdma_dev->ah_tbl_lock); + if (sxe2_ah_exists(rdma_dev, vendor_ah)) { + sxe2_kfree_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_ahs, + ah_id); + ah_id = 0; +#ifdef CONFIG_DEBUG_FS + rdma_dev->ah_reused++; +#endif + goto exit; + } + } + err = sxe2_post_ah_mqinfo(rdma_dev->rdma_func, ctx_ah, + SXE2_MQ_OP_CREATE_ADDR_HANDLE, sleep, NULL, + ctx_ah); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed post ah mq, ret %d\n", err); + goto err_ah_create; + } + err = sxe2_create_ah_wait(rf, ctx_ah, sleep); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed proc ah mq, ret %d\n", err); + goto err_unlock; + } +#ifdef SXE2_CFG_DEBUG + (void)sxe2_debbugfs_ah_add(rdma_dev, vendor_ah); +#endif +exit: + if (udata) { + uresp.ah_id = ah_info->field.ah_idx; + err = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err) { + DRV_RDMA_LOG_DEV_ERR("AH: failed copy udata, ret %d\n", + err); + if (!vendor_ah->parent_ah || + (vendor_ah->parent_ah && + refcount_dec_and_test( + &vendor_ah->parent_ah->refcnt))) { + sxe2_post_ah_mqinfo( + rdma_dev->rdma_func, &vendor_ah->ctx_ah, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE, false, + NULL, vendor_ah); + ah_id = vendor_ah->ctx_ah.ah_info.field.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&rdma_dev->ah_tbl_lock); + return 0; +err_ah_create: + sxe2_process_create_ah_err(rdma_dev, vendor_ah); +err_unlock: + if (sleep) + mutex_unlock(&rdma_dev->ah_tbl_lock); +err_gid_l2: + if (ah_id) + sxe2_kfree_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_ahs, ah_id); + + DRV_RDMA_LOG_DEV_ERR("AH: failed create ah, ret %d\n", err); + + return err; +} +#endif +#ifdef DESTROY_AH_VER_3 +void sxe2_kdestroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibah->device); + struct sxe2_ah *vendor_ah = ibah_to_vendor_ah(ibah); + + DRV_RDMA_LOG_DEBUG_BDF("AH:destroy ah enter(%p).\n", vendor_ah); +#ifdef SXE2_CFG_DEBUG + if (vendor_ah->dbg_node) + sxe2_debugfs_ah_remove(rdma_dev, vendor_ah); +#endif + if (vendor_ah->parent_ah) { + mutex_lock(&rdma_dev->ah_tbl_lock); + if (!refcount_dec_and_test(&vendor_ah->parent_ah->refcnt)) { + mutex_unlock(&rdma_dev->ah_tbl_lock); + return; + } + hash_del(&vendor_ah->parent_ah->list); + kfree(vendor_ah->parent_ah); + vendor_ah->parent_ah = NULL; + rdma_dev->ah_list_cnt--; + mutex_unlock(&rdma_dev->ah_tbl_lock); + } + sxe2_post_ah_mqinfo(rdma_dev->rdma_func, &vendor_ah->ctx_ah, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE, false, NULL, + vendor_ah); + + sxe2_kfree_rsrc(rdma_dev->rdma_func, rdma_dev->rdma_func->allocated_ahs, + vendor_ah->ctx_ah.ah_info.field.ah_idx); +} +#elif defined DESTROY_AH_VER_4 +int sxe2_kdestroy_ah(struct ib_ah *ibah) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibah->device); + struct sxe2_ah *vendor_ah = ibah_to_vendor_ah(ibah); + + DRV_RDMA_LOG_DEV_DEBUG("AH:destroy ah enter(%p).\n", vendor_ah); +#ifdef SXE2_CFG_DEBUG + if (vendor_ah->dbg_node) + sxe2_debugfs_ah_remove(rdma_dev, vendor_ah); +#endif + if (vendor_ah->parent_ah) { + mutex_lock(&rdma_dev->ah_tbl_lock); + if (!refcount_dec_and_test(&vendor_ah->parent_ah->refcnt)) { + mutex_unlock(&rdma_dev->ah_tbl_lock); + return 0; + } + hash_del(&vendor_ah->parent_ah->list); + kfree(vendor_ah->parent_ah); + vendor_ah->parent_ah = NULL; + rdma_dev->ah_list_cnt--; + mutex_unlock(&rdma_dev->ah_tbl_lock); + } + sxe2_post_ah_mqinfo(rdma_dev->rdma_func, &vendor_ah->ctx_ah, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE, false, NULL, + vendor_ah); + + sxe2_kfree_rsrc(rdma_dev->rdma_func, rdma_dev->rdma_func->allocated_ahs, + vendor_ah->ctx_ah.ah_info.field.ah_idx); + return 0; +} +#else +int sxe2_kdestroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibah->device); + struct sxe2_ah *vendor_ah = ibah_to_vendor_ah(ibah); + + DRV_RDMA_LOG_DEV_DEBUG("AH:destroy ah enter(%p).\n", vendor_ah); +#ifdef SXE2_CFG_DEBUG + if (vendor_ah->dbg_node) + sxe2_debugfs_ah_remove(rdma_dev, vendor_ah); +#endif + if (vendor_ah->parent_ah) { + mutex_lock(&rdma_dev->ah_tbl_lock); + if (!refcount_dec_and_test(&vendor_ah->parent_ah->refcnt)) { + mutex_unlock(&rdma_dev->ah_tbl_lock); + return 0; + } + hash_del(&vendor_ah->parent_ah->list); + kfree(vendor_ah->parent_ah); + vendor_ah->parent_ah = NULL; + rdma_dev->ah_list_cnt--; + mutex_unlock(&rdma_dev->ah_tbl_lock); + } + sxe2_post_ah_mqinfo(rdma_dev->rdma_func, &vendor_ah->ctx_ah, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE, false, NULL, + vendor_ah); + + sxe2_kfree_rsrc(rdma_dev->rdma_func, rdma_dev->rdma_func->allocated_ahs, + vendor_ah->ctx_ah.ah_info.field.ah_idx); + return 0; +} +#endif + +int sxe2_kquery_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ibah->device); + struct sxe2_ah *vendor_ah = ibah_to_vendor_ah(ibah); + + DRV_RDMA_LOG_DEV_ERR("AH:kquery ah enter\n"); + + (void)vendor_ah; + memset(ah_attr, 0, sizeof(*ah_attr)); + if (vendor_ah->av.attrs.ah_flags & IB_AH_GRH) { + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.flow_label = vendor_ah->av.attrs.grh.flow_label; + ah_attr->grh.traffic_class = + vendor_ah->av.attrs.grh.traffic_class; + ah_attr->grh.hop_limit = vendor_ah->av.attrs.grh.hop_limit; + ah_attr->grh.sgid_index = vendor_ah->av.attrs.grh.sgid_index; + memcpy(&ah_attr->grh.dgid, &vendor_ah->av.attrs.grh.dgid, + sizeof(ah_attr->grh.dgid)); + } + + return 0; +} + +int sxe2_ah_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, + struct mq_cmds_info *pcmdinfo) +{ + __le64 *wqe; + void *wqe_info; + u64 scratch; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + + mq = dev->mq; + rdma_dev = to_rdmadev(dev); + + switch (pcmdinfo->mq_cmd) { + case MQ_OP_CREATE_ADDR_HANDLE: + pcmdinfo->in.u.ah_info.info.field.op = + SXE2_MQ_OP_CREATE_ADDR_HANDLE; + pcmdinfo->in.u.ah_info.info.field.wqe_valid = mq->polarity; + scratch = pcmdinfo->in.u.ah_info.scratch; + wqe_info = (void *)&pcmdinfo->in.u.ah_info.info; + break; + case MQ_OP_MODIFY_ADDR_HANDLE: + pcmdinfo->in.u.ah_info.info.field.op = + SXE2_MQ_OP_MODIFY_ADDR_HANDLE; + pcmdinfo->in.u.ah_info.info.field.wqe_valid = mq->polarity; + scratch = pcmdinfo->in.u.ah_info.scratch; + wqe_info = (void *)&pcmdinfo->in.u.ah_info.info; + break; + case MQ_OP_DESTROY_ADDR_HANDLE: + pcmdinfo->in.u.ah_info.info.field.op = + SXE2_MQ_OP_DESTROY_ADDR_HANDLE; + pcmdinfo->in.u.ah_info.info.field.wqe_valid = mq->polarity; + scratch = pcmdinfo->in.u.ah_info.scratch; + wqe_info = (void *)&pcmdinfo->in.u.ah_info.info; + break; + default: + return -EOPNOTSUPP; + } + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_ERROR_BDF("AH:get next mq wqe failed.\n"); + return -ENOMEM; + } + + sxe2_print_wqe_info(dev, wqe_info, pcmdinfo->mq_cmd); + + sxe2_set_mq_wqe(dev, wqe, wqe_info); + + if (pcmdinfo->post_mq) + sxe2_kpost_mq(mq); + + return 0; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.h new file mode 100644 index 0000000000000000000000000000000000000000..2302f29f92c0034642d0cc6bdffa68224c45e479 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_ah.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_AH_H__ +#define __SXE2_DRV_AH_H__ + +#define ECN_CODE_PT_MASK 3 +#define ECN_CODE_PT_VAL 2 +#define CQP_COMPL_WAIT_TIME_MS 10 +#define CQP_TIMEOUT_THRESHOLD 500 + +#ifdef CREATE_AH_VER_3 +struct ib_ah *sxe2_kcreate_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr, + struct ib_udata *udata); +#elif defined CREATE_AH_VER_2 +int sxe2_kcreate_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata); +#else +int sxe2_kcreate_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata); +#endif + +#ifdef DESTROY_AH_VER_3 +void sxe2_kdestroy_ah(struct ib_ah *ibah, u32 ah_flags); +#elif defined DESTROY_AH_VER_4 +int sxe2_kdestroy_ah(struct ib_ah *ibah); +#else +int sxe2_kdestroy_ah(struct ib_ah *ibah, u32 ah_flags); +#endif +int sxe2_kquery_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..237edc4c2a235d2e9f0aaa454f7961780fa6c752 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_ah_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_ah.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rcms_debugfs.h" +#include "sxe2_drv_ah_debugfs.h" + +enum { AH_VLAN_TAG, AH_PD_IDX, AH_FLOW_LABEL }; + +#ifdef SXE2_CFG_DEBUG +static char *ah_fields[] = { [AH_VLAN_TAG] = "vlan_tag", + [AH_PD_IDX] = "pd_idx", + [AH_FLOW_LABEL] = "flow_label" }; +#endif +#define ADDR_SIZE (4) + +int sxe2_drv_ah_modify_op(struct sxe2_rdma_device *rdma_dev, + union sxe2_hw_ahc *ah_ctx) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_MODIFY_ADDR_HANDLE; + mq_info->post_mq = 1; + mq_info->in.u.ah_info.ctx_dev = &rdma_func->ctx_dev; + mq_info->in.u.ah_info.scratch = (uintptr_t)mq_request; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ah_query", + &rdma_dev->rdma_func->mq.err_cqe_val, ah_ctx); + + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ah_query"); +#endif + + memcpy(&mq_info->in.u.ah_info.info, ah_ctx, sizeof(*ah_ctx)); + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle modify ah failed, ret (%d)\n", + ret); + +end: + return ret; +} + +u64 sxe2_debugfs_ah_read(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + int i; + int ret; + union sxe2_hw_ahc ah_ctx; + union sxe2_hw_ahc *p_ah_ctx; + void *va_addr; + struct sxe2_ah *vendor_ah; + size_t len = 0; + u32 ah_index; + u32 dest_ip_addr[ADDR_SIZE] = { 0 }; + u32 src_ip_addr[ADDR_SIZE] = { 0 }; + + va_addr = NULL; + vendor_ah = (struct sxe2_ah *)data; + ah_index = vendor_ah->ctx_ah.ah_info.field.ah_idx; + + ret = sxe2_rcms_num_to_ctx_va_pointer(rdma_dev, SXE2_RCMS_OBJ_AH, + ah_index, &va_addr); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query ah failed, ret (%d)\n", ret); + goto end; + } + + memset(&ah_ctx, 0, sizeof(ah_ctx)); + p_ah_ctx = (union sxe2_hw_ahc *)va_addr; + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + ah_ctx.buf[i] = le64_to_cpu(p_ah_ctx->buf[i]); + + len += dbg_vsnprintf(buf, len, "AHINFO:%u\n", ah_index); + len += dbg_vsnprintf(buf, len, + "----------------------------------------\n"); + + len += dbg_vsnprintf(buf, len, + "dest_mac:%02x:%02x:%02x:%02x:%02x:%02x\n", + ah_ctx.field.dest_mac[5], ah_ctx.field.dest_mac[4], + ah_ctx.field.dest_mac[3], ah_ctx.field.dest_mac[2], + ah_ctx.field.dest_mac[1], + ah_ctx.field.dest_mac[0]); + len += dbg_vsnprintf(buf, len, "vlan_tag: %d\n", + ah_ctx.field.vlan_tag); + len += dbg_vsnprintf(buf, len, "tc_tos: %d\n", + ah_ctx.field.tc_tos); + len += dbg_vsnprintf(buf, len, "pd_index: %d\n", + ah_ctx.field.pd_idx); + len += dbg_vsnprintf(buf, len, "flow_label: %d\n", + ah_ctx.field.flow_label); + len += dbg_vsnprintf(buf, len, "hop_ttl: %d\n", + ah_ctx.field.hop_ttl); + len += dbg_vsnprintf(buf, len, "ah_id: %d\n", + ah_ctx.field.ah_idx); + len += dbg_vsnprintf(buf, len, "op: %d\n", + ah_ctx.field.op); + len += dbg_vsnprintf(buf, len, "ipv4_valid: %d\n", + ah_ctx.field.ipv4_valid); + len += dbg_vsnprintf(buf, len, "insert_vlan_tag: %d\n", + ah_ctx.field.insert_vlan_tag); + len += dbg_vsnprintf(buf, len, "do_lpbk: %d\n", + ah_ctx.field.do_lpbk); + len += dbg_vsnprintf(buf, len, "wqe_valid: %d\n", + ah_ctx.field.wqe_valid); + if (ah_ctx.field.ipv4_valid) { + src_ip_addr[0] = htonl(ah_ctx.field.src_ip_addr[0]); + dest_ip_addr[0] = htonl(ah_ctx.field.dest_ip_addr[0]); + len += dbg_vsnprintf(buf, len, "dest_ip_addr: %pI4\n", + dest_ip_addr); + len += dbg_vsnprintf(buf, len, "src_ip_addr: %pI4\n", + src_ip_addr); + } else { + sxe2_copy_ip_htonl(src_ip_addr, ah_ctx.field.src_ip_addr); + sxe2_copy_ip_htonl(dest_ip_addr, ah_ctx.field.dest_ip_addr); + len += dbg_vsnprintf(buf, len, "dest_ip_addr: %pI6\n", + dest_ip_addr); + len += dbg_vsnprintf(buf, len, "src_ip_addr: %pI6\n", + src_ip_addr); + } + len += dbg_vsnprintf(buf, len, + "----------------------------------------\n"); + +end: + return len; +} + +int sxe2_debugfs_ah_write(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ +#ifdef SXE2_CFG_DEBUG + u32 i; + int ret; + int argc; + u64 new_value; + u32 ah_index; + union sxe2_hw_ahc ah_ctx; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_ah *vendor_ah; + void *va_addr; + union sxe2_hw_ahc *p_ah_ctx; + bool find_field = false; + + va_addr = NULL; + vendor_ah = (struct sxe2_ah *)data; + ah_index = vendor_ah->ctx_ah.ah_info.field.ah_idx; + + ret = sxe2_rcms_num_to_ctx_va_pointer(rdma_dev, SXE2_RCMS_OBJ_AH, + ah_index, &va_addr); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query ah failed, ret (%d)\n", ret); + goto end; + } + + p_ah_ctx = (union sxe2_hw_ahc *)va_addr; + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + ah_ctx.buf[i] = le64_to_cpu(p_ah_ctx->buf[i]); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param\n"); + goto end; + } + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG("argv:%s\n", argv[0]); + + for (i = 0; i < ARRAY_SIZE(ah_fields); i++) { + if (!strncmp(argv[0], ah_fields[i], strlen(ah_fields[i])) && + (strlen(ah_fields[i]) == strlen(argv[0]))) { + find_field = true; + break; + } + } + + if (!find_field) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("unsupport change ah field %s.\n", + argv[0]); + goto end; + } + + ret = kstrtoull(argv[1], 10, &new_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto end; + } + + DRV_RDMA_LOG_DEV_INFO("modify ah field %s new value %llx\n", + ah_fields[i], new_value); + + switch (i) { + case AH_VLAN_TAG: + ah_ctx.field.vlan_tag = new_value; + break; + case AH_PD_IDX: + ah_ctx.field.pd_idx = new_value; + break; + case AH_FLOW_LABEL: + ah_ctx.field.flow_label = new_value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_DEV_WARN("invalid index %d, ret %d\n", i, ret); + goto end; + } + + ret = sxe2_drv_ah_modify_op(rdma_dev, &ah_ctx); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("modify ah failed, ret (%d)\n", ret); + +end: + return ret; +#else + return 0; +#endif +} +#ifdef SXE2_CFG_DEBUG +int sxe2_debbugfs_ah_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *vendor_ah) +{ + int ret = 0; + int ah_idx; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->ah_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("ah debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + ah_idx = vendor_ah->ctx_ah.ah_info.field.ah_idx; + + vendor_ah->dbg_node = drv_rdma_add_res_tree( + rdma_dev, SXE2_DBG_RSC_AH, rdma_dev->hdl->ah_debugfs, + sxe2_debugfs_ah_read, sxe2_debugfs_ah_write, ah_idx, vendor_ah); + if (!vendor_ah->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + DRV_RDMA_LOG_DEV_DEBUG( + "ah debugfs add ah(%p) dbg_node(%p) ah_idx(%u)\n", vendor_ah, + vendor_ah->dbg_node, ah_idx); + +end: + return ret; +} +void sxe2_debugfs_ah_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *vendor_ah) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->ah_debugfs) { + DRV_RDMA_LOG_DEV_ERR("ah debugfs dir not exist\n"); + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG( + "ah debugfs del ah(%p) dbg_node(%p) ah_idx(%u)\n", vendor_ah, + vendor_ah->dbg_node, vendor_ah->ctx_ah.ah_info.field.ah_idx); + + if (vendor_ah->dbg_node) { + drv_rdma_rm_res_tree(vendor_ah->dbg_node); + vendor_ah->dbg_node = NULL; + } + +end: + return; +} +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..c622281746f5bc7950f132c1f0d2a9973589e67d --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_ah_debugfs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_ah_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_AH_DEBUGFS_H__ +#define __SXE2_DRV_AH_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" +#ifdef SXE2_CFG_DEBUG +int sxe2_debbugfs_ah_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *vendor_ah); +void sxe2_debugfs_ah_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_ah *vendor_ah); +#endif +int sxe2_drv_ah_modify_op(struct sxe2_rdma_device *rdma_dev, + union sxe2_hw_ahc *ah_ctx); + +int sxe2_debugfs_ah_write(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); + +u64 sxe2_debugfs_ah_read(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bc479fbc8adb9fe256bae0bada5a3762a7065645 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cc_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_cc_debugfs.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_aux.h" + +#ifdef SXE2_CFG_DEBUG + +static ssize_t drv_rdma_cc_np_write(struct file *filp, const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[CC_DEBUGFS_WRITE_BUF_MAX_LEN] = {0}; + struct sxe2_rdma_device *rdma_dev; + u32 np_enable; + + if (*off != 0) + goto end; + + if (count >= CC_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR("cc debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("cc debugfs:dev find failed err\n"); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:cmd copy from user failed err\n"); + goto end; + } + + ret = sscanf(cmd, "%u", &np_enable); + if (ret != 1) { + ret = -ENODATA; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:analyze cmd err please input 1/0\n"); + goto end; + } + DRV_RDMA_LOG_DEBUG_BDF("cc debugfs:input ecn enable %u\n", np_enable); + if (!rdma_dev->rdma_func->cc_params.dcqcn_enable) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:cc dcqcn is disable\n"); + } + + if (np_enable) { + rdma_dev->rdma_func->cc_params.cnp_ecn = SXE2_QP_CC_CNP_ECN_ENABLE; + rdma_dev->rdma_func->cc_params.ecn = SXE2_QP_CC_CNP_ECN_ENABLE; + } else { + rdma_dev->rdma_func->cc_params.cnp_ecn = SXE2_QP_CC_CNP_ECN_DISABLE; + rdma_dev->rdma_func->cc_params.ecn = SXE2_QP_CC_CNP_ECN_DISABLE; + } + + ret = count; + *off = count; + +end: + return ret; +} + +static ssize_t drv_rdma_cc_np_read(struct file *filp, + char __user *buf, size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + u32 len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "cc debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("cc debugfs:cc info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + if (rdma_dev->rdma_func->cc_params.dcqcn_enable) { + if (rdma_dev->rdma_func->cc_params.cnp_ecn) + len_total += dbg_vsnprintf(rsp_end, len_total, "np enable\n"); + else + len_total += dbg_vsnprintf(rsp_end, len_total, "np disable\n"); + + } else { + len_total += dbg_vsnprintf(rsp_end, len_total, "please enable cc dcqcn\n"); + } + + ret = simple_read_from_buffer(buf, count, off, rsp, (ssize_t)len_total); + if (ret < 0) + DRV_RDMA_LOG_ERROR("cc debugfs:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_cc_np_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_cc_np_read, + .write = drv_rdma_cc_np_write, +}; + +#endif +static ssize_t drv_rdma_cc_qp_dfx_write(struct file *filp, const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[CC_DEBUGFS_WRITE_BUF_MAX_LEN] = {0}; + struct sxe2_rdma_device *rdma_dev; + u32 cc_qp_idx; + + if (*off != 0) + goto end; + + if (count >= CC_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR("cc debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("cc debugfs:dev find failed err\n"); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:cmd copy from user failed err\n"); + goto end; + } + + ret = sscanf(cmd, "%u", &cc_qp_idx); + if (ret != 1) { + ret = -ENODATA; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:analyze cmd err please input 1/0\n"); + goto end; + } + if (cc_qp_idx > CC_MAX_CC_QP_IDX) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:max cc qp idx is 4096\n"); + goto end; + } + + rdma_dev->rdma_func->cc_params.cc_qp_idx = cc_qp_idx; + + ret = count; + *off = count; + +end: + return ret; +} + +static ssize_t drv_rdma_cc_qp_dfx_read(struct file *filp, + char __user *buf, size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + u32 len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct cc_sw_entry cc_entry; + struct aux_core_dev_info *cdev_info; + struct sxe2_get_cc_qp_dfx_cmd_info cmd_info; + struct cc_timely_entry *timely; + struct cc_dcqcn_entry *dcqcn; + + if (*off != 0) + goto end; + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "cc debugfs:find dev struct from private_data failed err\n"); + goto end; + } + cdev_info = rdma_dev->rdma_func->cdev; + cmd_info.cc_qp_idx = cpu_to_le32(rdma_dev->rdma_func->cc_params.cc_qp_idx); + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("cc debugfs:cc info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_GET_CC_QP_DFX, (u8 *)(&cmd_info), + (u16)sizeof(cmd_info), (u8 *)(&cc_entry), (u16)sizeof(cc_entry)); + if (ret) { + DRV_RDMA_LOG_ERROR("cc debugfs:rdma send cme err\n"); + goto end; + } + timely = &(cc_entry.timely); + dcqcn = &(cc_entry.dcqcn); + len_total += dbg_vsnprintf(rsp_end, len_total, "cc qp idx %u dfx:\n", cmd_info.cc_qp_idx); + len_total += dbg_vsnprintf(rsp_end, len_total, + "------------------timely entry--------------------\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, "timey_min_rtt :%u\n", + timely->min_rtt_h << TIMELY_MIN_RTT_H_SHIFT | timely->min_rtt_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_alpha :%u\n", timely->alpha); + len_total += dbg_vsnprintf(rsp_end, len_total, "timey_thigh :%u\n", + timely->high_h << TIMELY_THIGH_H_SHIFT | timely->high_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_tlow :%u\n", timely->low); + len_total += dbg_vsnprintf(rsp_end, len_total, "timey_pre_rtt :%u\n", + timely->pre_rtt_h << TIMELY_PRE_RTT_H_SHIFT | timely->pre_rtt_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_beta :%u\n", timely->beta); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_rtt_diff_symbol :%u\n", timely->rtt_diff_symbol); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_rtt_diff :%u\n", timely->rtt_diff); + len_total += dbg_vsnprintf(rsp_end, len_total, + "timey_rtt_event_cnt :%u\n", + timely->rtt_event_h << RTT_DFX_H_SHIFIT | dcqcn->rtt_event_l); + + len_total += dbg_vsnprintf(rsp_end, len_total, + "-------------------dcqcn entry---------------------\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, "dcqcn_t :%u\n", + dcqcn->t_h << DCQCN_T_INTERVAL_H_SHIFT | dcqcn->t_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_g :%u\n", dcqcn->g); + len_total += dbg_vsnprintf(rsp_end, len_total, "dcqcn_rhai :%u\n", + dcqcn->rhai_h << DCQCN_RHAI_H_SHIFT | dcqcn->rhai_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_rai :%u\n", dcqcn->rai); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_f :%u\n", dcqcn->f); + len_total += dbg_vsnprintf(rsp_end, len_total, "dcqcn_rreduce_mperiod :%u\n", + dcqcn->rreduce_mperiod_h << DCQCN_RREDUCE_MPERIOD_H_SHIFT + | dcqcn->rreduce_mperiod_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_k :%u\n", dcqcn->k); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_min_dec_factor :%u\n", dcqcn->min_dec_factor); + len_total += dbg_vsnprintf(rsp_end, len_total, "dcqcn_rc :%u\n", + dcqcn->rc_h << DCQCN_RC_H_SHIFT | dcqcn->rc_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_alpha :%u\n", dcqcn->alpha); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_min_rate :%u\n", dcqcn->min_rate); + len_total += dbg_vsnprintf(rsp_end, len_total, "dcqcn_rt :%u\n", + dcqcn->rt_h << DCQCN_RT_H_SHIFT | dcqcn->rt_l); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_func_id :%u\n", dcqcn->func_id); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_t_counter :%u\n", dcqcn->t_counter); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_byte_counter :%u\n", dcqcn->byte_counter); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_decrease_rate_valid :%u\n", dcqcn->decrease_rate_valid); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_qpn :%u\n", dcqcn->qpn); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_cc_en :%u\n", dcqcn->ccEn); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_bc :%u\n", dcqcn->bc); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_increase_event_cnt :%u\n", dcqcn->increase_rate_cnt); + len_total += dbg_vsnprintf(rsp_end, len_total, + "dcqcn_decrease_event_cnt :%u\n", dcqcn->decrease_rate_cnt); + + ret = simple_read_from_buffer(buf, count, off, rsp, (ssize_t)len_total); + if (ret < 0) + DRV_RDMA_LOG_ERROR("cc debugfs:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; +end: + return ret; +} + +static const struct file_operations sxe2_rdma_cc_qp_dfx_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_cc_qp_dfx_read, + .write = drv_rdma_cc_qp_dfx_write, +}; + +int drv_rdma_debug_cc_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:debugfs root dir not exist ret=%d\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->cc_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF("cc debugfs:dir not exist ret=%d\n", + ret); + goto end; + } + +#ifdef SXE2_CFG_DEBUG + debugfs_create_file("np", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->cc_debugfs, rdma_dev, &sxe2_rdma_cc_np_fops); +#endif + debugfs_create_file("cc_qp_dfx", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->cc_debugfs, rdma_dev, &sxe2_rdma_cc_qp_dfx_fops); + +end: + return ret; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..6c055a789a72f05a13920c7ccddd15c8896def0e --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cc_debugfs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cc_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_CC_DEBUGFS_H__ +#define __SXE2_DRV_CC_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_OK 0 +#define CC_DEBUGFS_WRITE_BUF_MAX_LEN 64 +#define CC_MAX_CC_QP_IDX 4096 +#define RTT_DFX_H_SHIFIT (9) +#define RTT_DFX_H_MASK (0x7F) +#define RTT_DFX_L_MASK (0x1FF) + +struct cc_dcqcn_entry { + u32 t_h : 12; + u32 g : 20; + + u32 rhai_h : 8; + u32 rai : 16; + u32 f : 4; + u32 t_l : 4; + + u32 rreduce_mperiod_h : 8; + u32 k : 16; + u32 rhai_l : 8; + + u32 min_dec_factor : 8; + u32 increase_rate_cnt : 16; + u32 rreduce_mperiod_l : 8; + + u32 rc_h : 4; + u32 alpha : 20; + u32 min_rate : 8; + + u32 rt_h : 16; + u32 rc_l : 16; + + u32 func_id : 12; + u32 decrease_rate_cnt : 16; + u32 rt_l : 4; + + u32 t_counter : 3; + u32 byte_counter : 25; + u32 decrease_rate_valid : 4; + + u32 qpn : 18; + u32 ccEn : 2; + u32 rtt_event_l : 9; + u32 bc : 3; +}; + +struct cc_timely_entry { + u32 min_rtt_h :12; + u32 alpha :20; + + u32 high_h :12; + u32 low :16; + u32 min_rtt_l :4; + + u32 pre_rtt_h :8; + u32 beta :20; + u32 high_l :4; + + u32 rtt_event_h :7; + u32 rtt_diff_symbol :1; + u32 rtt_diff :16; + u32 pre_rtt_l :8; +}; + +struct cc_sw_entry { + struct cc_timely_entry timely; + struct cc_dcqcn_entry dcqcn; +}; + +struct sxe2_get_cc_qp_dfx_cmd_info { + u32 cc_qp_idx; +}; + +int drv_rdma_debug_cc_add(struct sxe2_rdma_device *rdma_dev); + +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common.c new file mode 100644 index 0000000000000000000000000000000000000000..1250d9c6717afe00096b5a465964f7671e0a98ca --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_common.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include + +#include "sxe2_drv_aux.h" +#include "sxe2_drv_rdma_common.h" + +int sxe2_kget_aligned_mem(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_dma_mem *memptr, u32 size, u32 mask) +{ + unsigned long va, newva; + unsigned long extra; + int ret_code = 0; + + va = (unsigned long)rdma_func->obj_next.va; + newva = va; + if (mask) + newva = ALIGN(va, (unsigned long)mask + 1ULL); + + extra = newva - va; + memptr->va = (u8 *)va + extra; + memptr->pa = rdma_func->obj_next.pa + extra; + memptr->size = size; + if (((u8 *)memptr->va + size) > + ((u8 *)rdma_func->obj_mem.va + rdma_func->obj_mem.size)) { + ret_code = -ENOMEM; + goto end; + } + + rdma_func->obj_next.va = (u8 *)memptr->va + size; + rdma_func->obj_next.pa = memptr->pa + size; + +end: + return ret_code; +} + +u8 sxe2_kget_encoded_wqe_size(u32 wqsize, enum sxe2_queue_type queue_type) +{ + u8 encoded_size = 0; + + if (queue_type == SXE2_QUEUE_TYPE_MQ) + encoded_size = 1; + + wqsize >>= 2; + while (wqsize >>= 1) + encoded_size++; + + return encoded_size; +} + +int sxe2_kalloc_rsrc(struct sxe2_rdma_pci_f *rf, unsigned long *rsrc_array, + u32 max_rsrc, u32 *req_rsrc_num, u32 *next) +{ + u32 rsrc_num; + unsigned long flags = 0; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + rsrc_num = (u32)find_next_zero_bit(rsrc_array, max_rsrc, *next); + if (rsrc_num >= max_rsrc) { + rsrc_num = (u32)find_first_zero_bit(rsrc_array, max_rsrc); + if (rsrc_num >= max_rsrc) { + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + return -EOVERFLOW; + } + } + __set_bit((int)rsrc_num, rsrc_array); + *next = rsrc_num + 1; + if (*next == max_rsrc) + *next = 0; + + *req_rsrc_num = rsrc_num; + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + + return 0; +} + +void sxe2_kfree_rsrc(struct sxe2_rdma_pci_f *rf, unsigned long *rsrc_array, + u32 rsrc_num) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + __clear_bit((int)rsrc_num, rsrc_array); + spin_unlock_irqrestore(&rf->rsrc_lock, flags); +} + +int sxe2_ucount_bitmap_zero_bits(unsigned long *bitmap, u32 max) +{ + int zero_count = 0; + u32 i; + + for (i = 0; i < max; i++) { + if (!test_bit((int)i, bitmap)) + zero_count++; + } + return zero_count; +} + +u32 sxe2_round_up_pow_2(u32 value) +{ + int count = 1; + + for (value--; count <= 16; count *= 2) + value |= value >> count; + + return ++value; +} + +void sxe2_copy_ip_ntohl(u32 *dst, __be32 *src) +{ + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst = ntohl(*src); +} + +void sxe2_copy_ip_htonl(__be32 *dst, u32 *src) +{ + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst = htonl(*src); +} + +bool sxe2_drv_core_is_tph_enable(struct sxe2_rdma_device *rdma_dev, + bool is_user_enable, u32 *st_mode) +{ + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct pci_dev *pdev = cdev_info->pdev; + u16 pos = 0; + u32 tph_control = 0; + u32 tph_capability = 0; + bool ret = false; + + ret = check_bridge_tph_is_support(rdma_dev); + if (!ret) { + DRV_RDMA_LOG_DEV_INFO("device upstream bridge do NOT support TPH\n"); + goto out; + } + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH); + if (!pos) { + ret = false; + DRV_RDMA_LOG_DEV_INFO("device do NOT support TPH\n"); + goto out; + } + pci_read_config_dword(pdev, pos + OFFSET_TPH_CAPABILITY, + &tph_capability); + if (!(tph_capability & (1 << MODE_DEVICE_SPECIFIC))) { + ret = false; + DRV_RDMA_LOG_DEV_INFO("TPH capability is NOT enabled\n"); + goto out; + } + + pci_read_config_dword(pdev, pos + OFFSET_TPH_CONTROL, &tph_control); + + if (tph_control & (1 << OFFSET_TPHENABLE_IN_TPH_CONTROL)) { + if (is_user_enable) { + ret = true; + *st_mode = tph_control & 0x3; + DRV_RDMA_LOG_DEV_INFO( + "TPH capability is support, st mode is %u\n", + *st_mode); + } + } + +out: + return ret; +} + +static bool pcie_dev_is_support_tph_comp(struct sxe2_rdma_device *rdma_dev, struct pci_dev *pdev) +{ + bool ret; + int pos = 0; + u16 pcie_cap = 0; + u32 devcap2 = 0; + u8 pcie_cap_version = 0; + u8 tph_comp = 0; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!pos) { + DRV_RDMA_LOG_DEV_ERR("PCIe Capability not found\n"); + ret = false; + goto out; + } + + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &pcie_cap); + pcie_cap_version = pcie_cap & PCI_EXP_FLAGS_VERS; + DRV_RDMA_LOG_DEV_DEBUG("PCIe Capability Version: %d\n", pcie_cap_version); + + if (pcie_cap_version < 2) { + DRV_RDMA_LOG_WARN_BDF("PCIe Capability version %d,\n" + "\tDevice Capabilities 2 not supported\n", + pcie_cap_version); + ret = false; + goto out; + } + + pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP2, &devcap2); + DRV_RDMA_LOG_DEV_DEBUG("Device Capabilities 2: 0x%x\n", devcap2); + + tph_comp = (devcap2 & PCI_EXP_DEVCAP2_TPH_COMP_MASK) >> PCI_EXP_DEVCAP2_TPH_COMP_SHIFT; + if (tph_comp) { + ret = true; + } else { + DRV_RDMA_LOG_DEV_DEBUG("upstream rp not support tph comp.\n"); + ret = false; + } + +out: + return ret; +} + +static struct pci_dev *get_upstream_bridge(struct sxe2_rdma_device *rdma_dev) +{ + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct pci_dev *pdev = cdev_info->pdev; + struct pci_bus *bus; + struct pci_dev *bridge; + + if (!pdev || !pdev->bus) { + DRV_RDMA_LOG_ERROR_BDF("pdev or pdev->bus is NULL.\n"); + bridge = NULL; + goto out; + } + + bus = pdev->bus; + + if (pci_is_root_bus(bus)) { + DRV_RDMA_LOG_DEV_WARN("is root bus,no bridge\n"); + bridge = NULL; + goto out; + } + + bridge = bus->self; + if (!bridge) + DRV_RDMA_LOG_ERROR_BDF("get %02x upstream bridge failed.\n", bus->number); + +out: + return bridge; +} + +bool check_bridge_tph_is_support(struct sxe2_rdma_device *rdma_dev) +{ + struct pci_dev *bridge; + bool supports_tph = false; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct pci_dev *pdev = cdev_info->pdev; + + if (!pdev) { + DRV_RDMA_LOG_DEV_ERR("pdev is NULL.\n"); + supports_tph = false; + goto out; + } + + bridge = get_upstream_bridge(rdma_dev); + if (!bridge) { + DRV_RDMA_LOG_DEV_WARN("bridge is NULL\n"); + supports_tph = false; + goto out; + } + + supports_tph = pcie_dev_is_support_tph_comp(rdma_dev, bridge); + +out: + return supports_tph; +} + +int pci_dev_set_tph_request_cap(struct sxe2_rdma_device *rdma_dev, bool state) +{ + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct pci_dev *pdev = cdev_info->pdev; + int pos = 0; + u32 cap = 0; + int ret = 0; + + if (!pdev) { + DRV_RDMA_LOG_DEV_ERR("pdev is NULL.\n"); + ret = -EINVAL; + goto out; + } + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH); + if (!pos) { + DRV_RDMA_LOG_DEV_ERR("device %04x:%02x:%02x.%x not support TPH request\n", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + ret = -ENOENT; + goto out; + } + + pci_read_config_dword(pdev, pos + OFFSET_TPH_CONTROL, &cap); + if (state) + cap = cap | PCI_EXP_EXT_TPH_REQ_ST_DEVICE_MODE_MASK | + PCI_EXP_EXT_TPH_REQ_ENABLE_MASK; + else + cap = 0; + + pci_write_config_dword(pdev, pos + OFFSET_TPH_CONTROL, cap); + DRV_RDMA_LOG_DEV_DEBUG("device %04x:%02x:%02x.%x TPH cap: 0x%08x\n", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), cap); + +out: + return ret; +} + +int sxe2_rdma_adminq_send(struct aux_core_dev_info *cdev_info, + int opcode, + u8 *msg, u16 len, u8 *recv_msg, u16 recv_len) +{ + int ret = 0; + + switch (opcode) { + case SXE2_CMD_RDMA_QP_ATTACH_MC: + case SXE2_CMD_RDMA_QP_DETACH_MC: + case SXE2_CMD_RDMA_GET_CC_QP_DFX: + if (!recv_msg || !recv_len) { + ret = -EINVAL; + goto out; + } + break; + default: + break; + } + + ret = cdev_info->ops->rdma_send_cmd(cdev_info, opcode, msg, len, recv_msg, recv_len); + +out: + return ret; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..76f0b1bdc75053e68de94337574a2ad7df4f5914 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_common_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_common_debugfs.h" +#include "sxe2_drv_aux.h" + +#ifndef ether_addr_copy +#define ether_addr_copy(mac_addr, new_mac_addr) \ + memcpy(mac_addr, new_mac_addr, ETH_ALEN) +#endif + +#ifdef SXE2_CFG_DEBUG +static ssize_t drv_rdma_common_reset_en_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[COMMON_RESET_EN_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 input_val; + + if (*off != 0) + goto end; + + if (count >= COMMON_RESET_EN_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:Cmd copy from user failed err\n"); + goto end; + } + ret = sscanf(cmd, "%u", &input_val); + if (ret != 1) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:analyze cmd err please input enable:1 disable:0\n"); + goto end; + } + + if (input_val) { + if (!rdma_dev->rdma_func->reset) { + rdma_dev->rdma_func->reset = true; + rdma_dev->rdma_func->gen_ops.request_reset( + rdma_dev->rdma_func); + DRV_RDMA_LOG_DEV_DEBUG("common debugfs:start reset\n"); + } else { + DRV_RDMA_LOG_DEV_DEBUG( + "common debugfs:already reset\n"); + } + } else { + DRV_RDMA_LOG_DEV_DEBUG( + "common debugfs:input 1 reset function\n"); + } + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static ssize_t drv_rdma_common_reset_en_read(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + if (rdma_dev->rdma_func->reset) + len_total += dbg_vsnprintf(rsp_end, len_total, "ready reset\n"); + else + len_total += + dbg_vsnprintf(rsp_end, len_total, "not ready reset\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("common debugfs:simple read error %zu\n", + ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_common_reset_en_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_common_reset_en_read, + .write = drv_rdma_common_reset_en_write, +}; + +static ssize_t drv_rdma_common_reset_info_read(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + int i; + u32 reset_cnt; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_reset_debug_func_info *reset_func_info; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + goto end; + } + + reset_func_info = rdma_dev->reset_func_info; + if (!reset_func_info) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:reset func info ptr is null\n"); + goto end; + } + + mutex_lock(&g_reset_debug.reset_debug_mutex); + if (reset_func_info->reset_info_idx >= MAX_RESET_INFO_CNT) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:reset info idx err idx=%u\n", + reset_func_info->reset_info_idx); + mutex_unlock(&g_reset_debug.reset_debug_mutex); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG( + "common debugfs:reset cnt=%u reset info idx=%i\n", + reset_func_info->reset_cnt, reset_func_info->reset_info_idx); + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "common debugfs:rcms info rsp kmalloc failed err\n"); + mutex_unlock(&g_reset_debug.reset_debug_mutex); + goto end; + } + rsp_end = rsp; + len_total += dbg_vsnprintf(rsp_end, len_total, "reset info:\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, "bdf:%s\n", + reset_func_info->bdf); + len_total += dbg_vsnprintf(rsp_end, len_total, "reset cnt=%u\n", + reset_func_info->reset_cnt); + + if (!reset_func_info->reset_cnt) { + mutex_unlock(&g_reset_debug.reset_debug_mutex); + goto show_to_buf; + } + reset_cnt = reset_func_info->reset_cnt; + if (reset_func_info->reset_info_idx > 0) { + for (i = reset_func_info->reset_info_idx - 1; i >= 0; i--) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "reset %u entry type: %s time: %s\n", + reset_cnt, + reset_func_info->reset_info[i].reset_type == + FUNC_REQUEST_RESET ? + "request reset" : + "warning reset", + reset_func_info->reset_info[i].time); + reset_cnt--; + } + } + + if (reset_func_info->reset_cnt >= MAX_RESET_INFO_CNT) { + for (i = MAX_RESET_INFO_CNT - 1; + i >= reset_func_info->reset_info_idx; i--) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "reset %u entry type: %s time: %s\n", + reset_cnt, + reset_func_info->reset_info[i].reset_type == + FUNC_REQUEST_RESET ? + "request reset" : + "warning reset", + reset_func_info->reset_info[i].time); + reset_cnt--; + } + } + mutex_unlock(&g_reset_debug.reset_debug_mutex); +show_to_buf: + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("common debugfs:simple read error %zu\n", + ret); + + kfree(rsp); + rsp = NULL; +end: + return ret; +} + +static const struct file_operations sxe2_rdma_common_reset_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_common_reset_info_read, +}; +#endif + +static ssize_t drv_rdma_common_dump_pcap_en_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[COMMON_DUMP_PCAP_EN_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 input_val; + struct aux_core_dev_info *cdev_info; + u8 mac_addr[ETH_ALEN]; + + if (*off != 0) + goto end; + + if (count >= COMMON_DUMP_PCAP_EN_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "common debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("common debugfs:dev find failed err\n"); + goto end; + } + cdev_info = rdma_dev->rdma_func->cdev; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR_BDF( + "common debugfs:Cmd copy from user failed err\n"); + goto end; + } + ret = sscanf(cmd, "%u", &input_val); + if (ret != 1) { + ret = -ENODATA; + DRV_RDMA_LOG_ERROR_BDF( + "common debugfs:analyze cmd err please input enable:1 disable:0\n"); + goto end; + } + + ether_addr_copy(mac_addr, rdma_dev->netdev->dev_addr); + if (cdev_info == NULL) { + DRV_RDMA_LOG_ERROR("common debugfs:cdev_info is NULL\n"); + goto end; + } + if (cdev_info->ops == NULL) { + DRV_RDMA_LOG_ERROR("common debugfs:cdev_info->ops is NULL\n"); + goto end; + } + if (cdev_info->ops->dump_pcap_cmd == NULL) { + DRV_RDMA_LOG_ERROR( + "common debugfs:cdev_info->ops->dump_pcap_cmd is NULL\n"); + goto end; + } + if (input_val) { + ret = cdev_info->ops->dump_pcap_cmd(cdev_info, mac_addr, true); + if (ret) { + DRV_RDMA_LOG_ERROR( + "common debugfs:dump_pcap_cmd, ret %d err\n", + ret); + goto end; + } + rdma_dev->rdma_dump_pcap = true; + DRV_RDMA_LOG_DEBUG_BDF( + "common debugfs:enable dump pcap success\n"); + } else { + ret = cdev_info->ops->dump_pcap_cmd(cdev_info, mac_addr, false); + if (ret) { + DRV_RDMA_LOG_ERROR( + "common debugfs:dump_pcap_cmd, ret %d err\n", + ret); + goto end; + } + rdma_dev->rdma_dump_pcap = false; + DRV_RDMA_LOG_DEBUG_BDF( + "common debugfs:disable dump pcap success\n"); + } + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static ssize_t drv_rdma_common_dump_pcap_en_read(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "common debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("common debugfs:rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + if (rdma_dev->rdma_dump_pcap) + len_total += + dbg_vsnprintf(rsp_end, len_total, "dump pcap enable\n"); + else + len_total += dbg_vsnprintf(rsp_end, len_total, + "dump pcap disable\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_ERROR("common debugfs:simple read error %zu\n", + ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_common_dump_pcap_en_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_common_dump_pcap_en_read, + .write = drv_rdma_common_dump_pcap_en_write, +}; + +int drv_rdma_debug_common_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "mac loop back debugfs:debugfs root dir not exist ret=%d\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->common_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "mac loop back debugfs:dir not exist ret=%d\n", ret); + goto end; + } +#ifdef SXE2_CFG_DEBUG + debugfs_create_file("reset_en", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->common_debugfs, rdma_dev, + &sxe2_rdma_common_reset_en_fops); + debugfs_create_file("reset_info", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->common_debugfs, rdma_dev, + &sxe2_rdma_common_reset_info_fops); +#endif + debugfs_create_file("dump_pcap_en", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->common_debugfs, rdma_dev, + &sxe2_rdma_common_dump_pcap_en_fops); +end: + return ret; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..d96895c1cd01cb9b355810398ee2baca4c292031 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_common_debugfs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_common_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_COMMON_DEBUGFS_H__ +#define __SXE2_DRV_COMMON_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_OK 0 +#define COMMON_MAC_LOOP_BACK_EN_DEBUGFS_WRITE_BUF_MAX_LEN 4 +#define COMMON_RESET_EN_DEBUGFS_WRITE_BUF_MAX_LEN 4 +#define COMMON_CONFIG_IP_DEBUGFS_WRITE_BUF_MAX_LEN 16 +#define COMMON_DUMP_PCAP_EN_DEBUGFS_WRITE_BUF_MAX_LEN 4 + +#ifdef SXE2_CFG_DEBUG + +extern struct sxe2_reset_debug g_reset_debug; + +#endif + +int drv_rdma_debug_common_add(struct sxe2_rdma_device *rdma_dev); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.c new file mode 100644 index 0000000000000000000000000000000000000000..792898130f646687f9d05a410ae17613d70df1b4 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cq.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include "linux/sysfs.h" + +#include "sxe2_compat.h" +#include "sxe2-abi.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_cq_debugfs.h" + +#define DRV_CQC_PAGESZ_WIDTH (5) +#define DRV_PBL_INDEX_HIGH_SHIFT (32) +#define SXE2_DRV_RDMA_MINCQ_SIZE (4) +#define SXE2_CQC_PHY_ADDR_OFFSET 8 + +#define SXE2_CQ_DB_NOTE_CMD_SN_SHIFT (29) +#define SXE2_CQ_DB_NOTE_CMD_SN (3) + +static inline u32 drv_get_cqe_count(u32 ncqe) +{ + ncqe++; + ncqe = (u32)roundup_pow_of_two(ncqe); + if (ncqe < SXE2_DRV_RDMA_MINCQ_SIZE) + ncqe = SXE2_DRV_RDMA_MINCQ_SIZE; + return ncqe; +} + +void drv_rdma_cq_set_pbl(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_rdma_cq *cq, size_t page_sz, u32 ncont, + struct drv_rdma_soft_cqc *cqc) +{ + u32 pbl_cnt = 0; + u64 page_addr; + u64 temp_liner_addr; + struct ib_block_iter biter; + + if (ncont != 1) { + temp_liner_addr = cq->palloc.pble_info.liner_addr; + rdma_umem_for_each_dma_block(cq->cq_umem, &biter, page_sz) { + page_addr = rdma_block_iter_dma_address(&biter); + sxe2_pbl_set_pble(pble_rsrc, temp_liner_addr, page_addr, + false); + if (++pbl_cnt == cq->palloc.needed_pble_cnt) + break; + temp_liner_addr += sizeof(u64); + } + } else { + rdma_umem_for_each_dma_block(cq->cq_umem, &biter, page_sz) { + cqc->pbl_index = rdma_block_iter_dma_address(&biter); + } + } +} + +#ifdef CREATE_CQ_V1 +STATIC long sxe2_drv_create_cq_user(struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata, + struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info, + u32 entries, struct ib_ucontext *ibucontext) +#else +STATIC long sxe2_drv_create_cq_user(struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata, + struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info, u32 entries) + +#endif +{ + long ret; + u32 ncont; + ulong page_size; + struct sxe2_create_cq_req ucmd; + struct drv_rdma_soft_cqc *cqc; +#ifdef IB_UMEM_GET_V3 +#endif + if (udata->inlen < sizeof(ucmd)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid udata create cq, ret (%ld)\n", + ret); + goto end; + } + + cq->user_mode = true; + memset(&ucmd, 0, sizeof(ucmd)); + if (ib_copy_from_udata(&ucmd, udata, udata->inlen)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR("copy fail len %zu , ret (%ld)\n", + udata->inlen, ret); + goto end; + } +#ifdef IB_UMEM_GET_V2 + cq->cq_umem = ib_umem_get(udata, ucmd.user_cq_buf, + entries * sizeof(struct sxe2_cqe), + IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + cq->cq_umem = ib_umem_get(udata, ucmd.user_cq_buf, + entries * sizeof(struct sxe2_cqe), + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined(IB_UMEM_GET_V3) + cq->cq_umem = ib_umem_get(ibucontext, ucmd.user_cq_buf, + entries * sizeof(struct sxe2_cqe), + IB_ACCESS_LOCAL_WRITE, 0); +#else + cq->cq_umem = ib_umem_get(&rdma_dev->ibdev, ucmd.user_cq_buf, + entries * sizeof(struct sxe2_cqe), + IB_ACCESS_LOCAL_WRITE); +#endif + if (IS_ERR(cq->cq_umem)) { + ret = PTR_ERR(cq->cq_umem); + DRV_RDMA_LOG_DEV_ERR( + "pin buf ret bufaddr %#llx entries %u, ret (%ld)\n", + ucmd.user_cq_buf, entries, ret); + goto end; + } +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + page_size = sxe2_set_best_pagesz(ucmd.user_cq_buf, cq->cq_umem, + log_page_size_2_bitmap(DRV_CQC_PAGESZ_WIDTH, PAGE_SHIFT)); +#else + page_size = sxe2_umem_find_best_pgsz(cq->cq_umem, DRV_CQC_PAGESZ_WIDTH, + PAGE_SHIFT, 0); +#endif + if (!page_size) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("pin buf ret bufaddr %#llx, ret (%ld)\n", + ucmd.user_cq_buf, ret); + goto err_cq_umem; + } +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT + ncont = sxe2_ib_umem_num_dma_blocks(cq->cq_umem, page_size, 0); +#else + ncont = ib_umem_num_dma_blocks(cq->cq_umem, page_size); +#endif + DRV_RDMA_LOG_DEV_INFO( + "addr %#llx, size %lu, npages %zu, page_size %lu, ncont %u\n", + ucmd.user_cq_buf, entries * sizeof(struct sxe2_cqe), + ib_umem_num_pages(cq->cq_umem), page_size, ncont); + + cqc = &info->cqc; + if (ncont != 1) { + ret = sxe2_pbl_get_pble(rdma_dev->rdma_func->pble_rsrc, + &cq->palloc, ncont, PBL_OBJ_CQ); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("cq get pble failed. ret:%ld\n", + ret); + goto err_cq_umem; + } + drv_rdma_cq_set_pbl(rdma_dev->rdma_func->pble_rsrc, cq, + page_size, ncont, cqc); + cqc->pbl_index = cq->palloc.pbl_index; + cqc->pbl_mode = cq->palloc.pbl_mode.cq_eq_mode; + } else { + drv_rdma_cq_set_pbl(rdma_dev->rdma_func->pble_rsrc, cq, + page_size, ncont, cqc); + cqc->pbl_mode = CQ_EQ_PA_FIRST_MODE; + } +#ifdef IB_UMEM_GET_V2 + cq->db_umem = + ib_umem_get(udata, ucmd.user_cq_db_note, SXE2_RDMA_DB_NOTE_SIZE, + IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + cq->db_umem = + ib_umem_get(udata, ucmd.user_cq_db_note, SXE2_RDMA_DB_NOTE_SIZE, + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined(IB_UMEM_GET_V3) + cq->db_umem = ib_umem_get(ibucontext, ucmd.user_cq_db_note, + SXE2_RDMA_DB_NOTE_SIZE, + IB_ACCESS_LOCAL_WRITE, 0); +#else + cq->db_umem = + ib_umem_get(&rdma_dev->ibdev, ucmd.user_cq_db_note, + SXE2_RDMA_DB_NOTE_SIZE, IB_ACCESS_LOCAL_WRITE); +#endif + if (IS_ERR(cq->db_umem)) { + ret = PTR_ERR(cq->db_umem); + DRV_RDMA_LOG_DEV_ERR("pin buf ret bufaddr %#llx , ret (%ld)\n", + ucmd.user_cq_db_note, ret); + goto err_db_umem; + } + +#ifdef HAVE_IB_UMEM_SG_HEAD + info->db_pa = sg_dma_address(cq->db_umem->sg_head.sgl) + + (ucmd.user_cq_db_note & ~PAGE_MASK); +#else + info->db_pa = sg_dma_address(cq->db_umem->sgt_append.sgt.sgl) + + (ucmd.user_cq_db_note & ~PAGE_MASK); +#endif + info->cq_uk_init_info.ncqe = entries; + + cqc->log_page_size = order_base_2(page_size); + cqc->page_offset = 0; + + ret = 0; + goto end; + +err_db_umem: + if (ncont != 1) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + cq->palloc.pble_info.liner_addr, + cq->palloc.needed_pble_cnt, false); + } +err_cq_umem: + ib_umem_release(cq->cq_umem); + cq->cq_umem = NULL; +end: + return ret; +} + +STATIC int sxe2_drv_create_cq_kernel(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info, + u32 entries) +{ + int ret = 0; + u32 cqe_count; + u32 cq_size; + struct sxe2_rdma_ctx_dev *dev_ctx; + struct drv_rdma_soft_cqc *cqc; + struct sxe2_rdma_cq_uk_init_info *uk_info; + + if ((entries < 1) || (entries > rdma_dev->rdma_func->max_cqe)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid entries:%d, ret:%d\n", entries, + ret); + goto end; + } + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + cqe_count = drv_get_cqe_count(entries); + if ((cqe_count < SXE2_RDMA_MIN_CQ_SIZE) || + (cqe_count > SXE2_RDMA_MAX_CQ_SIZE)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("cq size(%d) invalid.ret:%d", cqe_count, + ret); + goto end; + } + + cq_size = (u32)cqe_count * sizeof(struct sxe2_cqe); + + cq->kmem.size = ALIGN(round_up(cq_size, 4096), 4096); + cq->kmem.va = dma_alloc_coherent(dev_ctx->hw->device, cq->kmem.size, + &cq->kmem.pa, GFP_KERNEL); + if (!cq->kmem.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("kernel cq buf alloc failed, ret:%d\n", + ret); + goto end; + } + memset(cq->kmem.va, 0, cq->kmem.size); + + cq->kmem_db.size = SXE2_RDMA_DB_NOTE_SIZE; + cq->kmem_db.va = + dma_alloc_coherent(dev_ctx->hw->device, cq->kmem_db.size, + &cq->kmem_db.pa, GFP_KERNEL); + if (!cq->kmem_db.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "kernel cq db note buf alloc failed, ret:%d\n", ret); + goto err_db_alloc; + } + memset(cq->kmem_db.va, 0, cq->kmem_db.size); + set_32bit_val(cq->kmem_db.va, 4, + (__u32)(SXE2_CQ_DB_NOTE_CMD_SN) + << SXE2_CQ_DB_NOTE_CMD_SN_SHIFT); + + info->cq_pa = cq->kmem.pa; + info->db_pa = cq->kmem_db.pa; + + uk_info = &info->cq_uk_init_info; + uk_info->ncqe = cqe_count; + uk_info->cq_base = cq->kmem.va; + uk_info->doorbell_note = cq->kmem_db.va; + + cqc = &info->cqc; + cqc->log_page_size = order_base_2(PAGE_SIZE); + cqc->page_offset = offset_in_page(cq->kmem.pa); + cqc->pbl_mode = CQ_EQ_PA_FIRST_MODE; + cqc->pbl_index = cq->kmem.pa; + goto end; + +err_db_alloc: + dma_free_coherent(dev_ctx->hw->device, cq->kmem.size, cq->kmem.va, + cq->kmem.pa); + cq->kmem.va = NULL; + +end: + return ret; +} + +STATIC void sxe2_drv_destroy_cq_user(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq, + struct ib_udata *udata) +{ + ib_umem_release(cq->cq_umem); + cq->cq_umem = NULL; + + ib_umem_release(cq->db_umem); + cq->db_umem = NULL; + + if (cq->cq_ctx.cqc.pbl_mode != CQ_EQ_PA_FIRST_MODE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + cq->palloc.pble_info.liner_addr, + cq->palloc.needed_pble_cnt, false); + } +} + +STATIC void sxe2_drv_destroy_cq_kernel(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_cq *cq) +{ + dma_free_coherent(rdma_func->ctx_dev.hw->device, cq->kmem.size, + cq->kmem.va, cq->kmem.pa); + cq->kmem.va = NULL; + dma_free_coherent(rdma_func->ctx_dev.hw->device, cq->kmem_db.size, + cq->kmem_db.va, cq->kmem_db.pa); + cq->kmem_db.va = NULL; +} + +void sxe2_rdma_uk_cq_init(struct sxe2_rdma_cq_uk *cq, + struct sxe2_rdma_cq_uk_init_info *info) +{ + cq->cq_base = info->cq_base; + cq->cqe_alloc_db = info->cqe_alloc_db; + cq->cq_id = info->cq_id; + cq->ncqe = info->ncqe; + cq->doorbell_note = info->doorbell_note; + SXE2_RING_INIT(cq->cq_ring, cq->ncqe); + cq->polarity = 1; + cq->arm_sn = SXE2_RDMA_CQ_ARM_SN; +} + +void sxe2_drv_cq_ctx_init(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info) +{ + struct sxe2_rdma_ctx_cq *cq_ctx; + u32 st_mode = 0; + s32 cpu_id = 0; + + cq_ctx = &cq->cq_ctx; + + cq_ctx->back_cq = cq; + cq_ctx->cq_type = SXE2_RDMA_CQ_TYPE_IO; + cq_ctx->cq_pa = info->cq_pa; + cq_ctx->db_pa = info->db_pa; + cq_ctx->dev = &(rdma_dev->rdma_func->ctx_dev); + cq_ctx->vsi = &rdma_dev->vsi; + + sxe2_rdma_uk_cq_init(&cq_ctx->cq_uk, &info->cq_uk_init_info); + + cq_ctx->cqc.eqn = info->cqc.eqn; + cq_ctx->cqc.log_dbr_size = info->cqc.log_dbr_size; + cq_ctx->cqc.log_cq_size = info->cqc.log_cq_size; + cq_ctx->cqc.scqe_break_moderation_en = + rdma_dev->rdma_func->scqe_break_moderation_en; + cq_ctx->cqc.oi = info->cqc.oi; + cq_ctx->cqc.sw_owner_bit = 0; + cq_ctx->cqc.sw_status = 0XF; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "cq_sw_status_err", rdma_dev, cq_ctx); +#endif + cq_ctx->cqc.pbl_index = info->cqc.pbl_index; + cq_ctx->cqc.dbr_addr = info->cqc.dbr_addr; + cq_ctx->cqc.log_page_size = info->cqc.log_page_size; + cq_ctx->cqc.page_offset = info->cqc.page_offset; + cq_ctx->cqc.pbl_mode = info->cqc.pbl_mode; + cq_ctx->cqc.cq_max_count = 0; + cq_ctx->cqc.cq_period = 0; + cq_ctx->cqc.TPH_en = + sxe2_drv_core_is_tph_enable(rdma_dev, true, &st_mode); + if (cq_ctx->cqc.TPH_en) { + if (st_mode != MODE_NO_ST) + cpu_id = TPH_GET_CPU(); + cq_ctx->cqc.TPH_value = + (cpu_id & TPH_CPUID_MASK) | PH_DWHR << 8; + } + cq_ctx->cqc.vsi_index = info->cqc.vsi_index; + + DRV_RDMA_LOG_DEV_DEBUG( + "cq:%d cqc : ceqn %#x, dbsz %#x, cqsz %#x, scqe_break_moderation_en %x,\n" + "\toi %d, sw_owner_bit %d, sw_status %#x, pbl %#llx,\n" + "\tdbr %#llx, pgsz %#x, pg_offset %#x, pblm %#x,\n\n" + "\tcq_max_count %d, cq_period %d, TPH_value %d, TPH_en %d, vsi_id %#x\n", + cq_ctx->cq_uk.cq_id, cq_ctx->cqc.eqn, cq_ctx->cqc.log_dbr_size, + cq_ctx->cqc.log_cq_size, cq_ctx->cqc.scqe_break_moderation_en, + cq_ctx->cqc.oi, cq_ctx->cqc.sw_owner_bit, cq_ctx->cqc.sw_status, + cq_ctx->cqc.pbl_index, cq_ctx->cqc.dbr_addr, + cq_ctx->cqc.log_page_size, cq_ctx->cqc.page_offset, + cq_ctx->cqc.pbl_mode, cq_ctx->cqc.cq_max_count, + cq_ctx->cqc.cq_period, cq_ctx->cqc.TPH_value, + cq_ctx->cqc.TPH_en, cq_ctx->cqc.vsi_index); +} + +int sxe2_drv_cq_destroy(struct sxe2_rdma_ctx_cq *cq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct sxe2_cq_wqe *cq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = cq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + cq_wqe = (struct sxe2_cq_wqe *)wqe; + cq_wqe->sw_status = 0; + cq_wqe->op = SXE2_MQ_OP_DESTROY_CQ; + cq_wqe->cqn = cq->cq_uk.cq_id; + cq_wqe->WQE_Valid = mq->polarity; + cq_wqe->physical_buffer_address = 0; + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +void sxe2_drv_cq_destroy_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq) +{ + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + DRV_RDMA_LOG_DEV_ERR("get mq_request failed\n"); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_DESTROY_CQ; + mq_info->post_mq = 1; + mq_info->in.u.cq_destroy.cq = cq; + mq_info->in.u.cq_destroy.scratch = (uintptr_t)mq_request; + mq_info->destroy = true; + sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return; +} + +static void sxe2_drv_cq_wqe_init(struct sxe2_cq_wqe *cq_wqe, + struct sxe2_rdma_ctx_cq *cq, + struct sxe2_mq_ctx *mq) +{ + cq_wqe->eqn = cq->cqc.eqn; + cq_wqe->log_dbr_size = cq->cqc.log_dbr_size; + cq_wqe->log_cq_size = cq->cqc.log_cq_size; + cq_wqe->scqe_break_moderation_en = cq->cqc.scqe_break_moderation_en; + cq_wqe->oi = cq->cqc.oi; + cq_wqe->sw_owner_bit = cq->cqc.sw_owner_bit; + cq_wqe->sw_status = cq->cqc.sw_status; + cq_wqe->pbl_index = cq->cqc.pbl_index; + cq_wqe->dbr_addr = cq->cqc.dbr_addr; + cq_wqe->op = SXE2_MQ_OP_CREATE_CQ; + cq_wqe->cqn = cq->cq_uk.cq_id; + cq_wqe->WQE_Valid = mq->polarity; + cq_wqe->physical_buffer_address = 0; + cq_wqe->log_page_size = cq->cqc.log_page_size; + cq_wqe->page_offset = cq->cqc.page_offset; + cq_wqe->pbl_mode = cq->cqc.pbl_mode; + cq_wqe->cq_max_count = cq->cqc.cq_max_count; + cq_wqe->cq_period = cq->cqc.cq_period; + cq_wqe->TPH_value = cq->cqc.TPH_value; + cq_wqe->TPH_en = cq->cqc.TPH_en; + cq_wqe->vsi_index = cq->cqc.vsi_index; +} + +static void sxe2_drv_cq_create_err_inject(struct sxe2_rdma_device *rdma_dev, + struct sxe2_cq_wqe *cq_wqe, + struct sxe2_rdma_ctx_cq *cq) +{ +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cqn", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cqcn", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqcn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_size"); +#endif +} + +int sxe2_drv_cq_create(struct sxe2_rdma_ctx_cq *cq, u64 scratch, + bool check_overflow, bool post_sq) +{ + __le64 *wqe; + struct sxe2_cq_wqe *cq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = cq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + if (((cq->cq_uk.cq_id > + (mq->dev->rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].max_cnt - 1)) || + (cq->cqc.eqn) > (cq->dev->rcms_info->max_ceqs - 1))) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF( + "invalid cq_id:%d or ceq_id:%d , ret (%d)\n", + cq->cq_uk.cq_id, cq->cqc.eqn, ret); + goto end; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_CREATE_CQ]) { + DRV_RDMA_LOG_DEBUG_BDF("MQ:opcode %u inject op failed tri\n", + MQ_OP_CREATE_CQ); + ret = -EBUSY; + goto end; + } +#endif + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR_BDF("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + cq_wqe = (struct sxe2_cq_wqe *)wqe; + sxe2_drv_cq_wqe_init(cq_wqe, cq, mq); + + sxe2_drv_cq_create_err_inject(rdma_dev, cq_wqe, cq); + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_cq_create_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_CREATE_CQ; + mq_info->post_mq = 1; + mq_info->in.u.cq_create.cq = cq; + mq_info->in.u.cq_create.check_overflow = true; + mq_info->in.u.cq_create.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle create cq failed, ret (%d)\n", + ret); + +end: + return ret; +} + +#ifdef CREATE_CQ_V1 +static int sxe2_create_cq_param_check(const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +{ + int ret = 0; + + if (!attr) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("invalid param create cq, ret (%d)\n", ret); + goto end; + } + + if (udata && + (udata->outlen < offsetofend(struct sxe2_create_cq_resp, ncqe))) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("invalid udata create cq, ret (%d)\n", ret); + goto end; + } + + if ((attr->flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | + IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION))) { + ret = -EOPNOTSUPP; + DRV_RDMA_LOG_ERROR("inv flags %u, ret (%d)\n", attr->flags, + ret); + goto end; + } +end: + return ret; +} +#else +static int sxe2_create_cq_param_check(struct ib_cq *ibcq, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +{ + int ret = 0; + + if (!ibcq || !attr) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("invalid param create cq, ret (%d)\n", ret); + goto end; + } + + if (udata && + (udata->outlen < offsetofend(struct sxe2_create_cq_resp, ncqe))) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("invalid udata create cq, ret (%d)\n", ret); + goto end; + } + + if ((attr->flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | + IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION))) { + ret = -EOPNOTSUPP; + DRV_RDMA_LOG_ERROR("inv flags %u, ret (%d)\n", attr->flags, + ret); + goto end; + } +end: + return ret; +} + +#endif +#ifdef CREATE_CQ_V1 +static int sxe2_drv_create_cq(struct ib_udata *udata, struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info, + struct sxe2_rdma_device *rdma_dev, + int entries, struct ib_ucontext *ibucontext) +#else +static int sxe2_drv_create_cq(struct ib_udata *udata, struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info, + struct sxe2_rdma_device *rdma_dev, int entries) +#endif +{ + int ret = 0; + + if (udata) { + #ifdef CREATE_CQ_V1 + ret = sxe2_drv_create_cq_user(rdma_dev, udata, cq, info, + entries, ibucontext); + #else + ret = sxe2_drv_create_cq_user(rdma_dev, udata, cq, info, + entries); + #endif + if (unlikely(ret)) + DRV_RDMA_LOG_DEV_ERR( + "user create cq failed, ret (%d)\n", ret); + } else { + ret = sxe2_drv_create_cq_kernel(rdma_dev, cq, info, entries); + if (unlikely(ret)) + DRV_RDMA_LOG_DEV_ERR( + "kernel create cq failed, ret (%d)\n", ret); + } + + return ret; +} + +static void sxe2_drv_destroy_cq(struct ib_udata *udata, struct sxe2_rdma_cq *cq, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_pci_f *rdma_func) +{ + if (udata) + sxe2_drv_destroy_cq_user(rdma_dev, cq, udata); + else + sxe2_drv_destroy_cq_kernel(rdma_func, cq); +} + +#ifdef CREATE_CQ_V1 +struct ib_cq *sxe2_kcreate_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, + struct ib_ucontext *ibucontext, struct ib_udata *udata) +{ + int ret; + int entries; + u32 cq_num = 0; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_pci_f *rdma_func; + struct sxe2_rdma_cq *cq; + struct sxe2_cq_init_info info; + struct sxe2_create_cq_resp resp = {}; + + ret = sxe2_create_cq_param_check(attr, udata); + if (ret != 0) + goto end; + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("cq kzalloc failed\n"); + goto end; + } + + entries = attr->cqe; + rdma_func = rdma_dev->rdma_func; + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_cqs, + rdma_func->max_cq, &cq_num, &rdma_func->next_cq); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get cq_num failed, ret (%d)\n", ret); + goto free_cq; + } + refcount_set(&cq->refcnt, 1); + spin_lock_init(&cq->lock); + INIT_LIST_HEAD(&cq->cmpl_generated); + atomic_set(&cq->armed, 0); + memset(&info, 0, sizeof(struct sxe2_cq_init_info)); + info.dev = &rdma_func->ctx_dev; + info.cq_uk_init_info.cq_id = cq_num; + + DRV_RDMA_LOG_INFO("cq db map:%p\n", rdma_func->db->map); + info.cq_uk_init_info.cqe_alloc_db = + (__le64 *)(rdma_func->db->map + SXE2_RDMA_DB_CQ_ARM_OFFSET); + DRV_RDMA_LOG_INFO("cqe_alloc_db :%p\n", + info.cq_uk_init_info.cqe_alloc_db); + cq->cq_num = (u16)cq_num; + ret = sxe2_drv_create_cq(udata, cq, &info, rdma_dev, entries, ibucontext); + if (ret != 0) + goto free_rsrc; + + cq->ibcq.cqe = info.cq_uk_init_info.ncqe; + if (attr->comp_vector < rdma_func->ceqs_count) + info.cqc.eqn = attr->comp_vector; + + if (attr->flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) + info.cqc.oi = 1; + + info.cqc.log_cq_size = ilog2(info.cq_uk_init_info.ncqe); + info.cqc.log_dbr_size = ilog2(info.cq_uk_init_info.ncqe / 2); + info.cqc.dbr_addr = info.db_pa; + info.cqc.vsi_index = rdma_dev->vsi.vsi_idx; + sxe2_drv_cq_ctx_init(rdma_dev, cq, &info); + ret = sxe2_drv_cq_create_op(rdma_dev, &cq->cq_ctx); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("kcreate cq op failed, ret (%d)\n", ret); + goto err_op; + } + + ret = drv_rdma_debug_cq_add(rdma_dev, cq); + if (unlikely(ret)) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding CQ %#x to debug file system, ret (%d)\n", + cq->cq_num, ret); + goto destroy_cq; + } + + if (udata) { + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.ncqe = info.cq_uk_init_info.ncqe; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("copy2user failed cqn %#x\n", + resp.cq_id); + ret = -EFAULT; + + drv_rdma_debug_cq_remove(rdma_dev, cq); + + goto destroy_cq; + } + } + + DRV_RDMA_LOG_DEV_INFO("create cq end, cqn:%d\n", + info.cq_uk_init_info.cq_id); + + rdma_func->cq_table[cq_num] = cq; + init_completion(&cq->free_cq); + ret = 0; + goto end; + +destroy_cq: + sxe2_drv_cq_destroy_op(rdma_dev, &cq->cq_ctx); +err_op: + sxe2_drv_destroy_cq(udata, cq, rdma_dev, rdma_func); +free_rsrc: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_cqs, cq_num); +free_cq: + kfree(cq); +end: + return ret ? ERR_PTR(ret) : &cq->ibcq; +} + +#else +int sxe2_kcreate_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +{ + int ret; + int entries; + u32 cq_num = 0; + struct ib_device *ibdev; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_pci_f *rdma_func; + struct sxe2_rdma_cq *cq; + struct sxe2_cq_init_info info; + struct sxe2_create_cq_resp resp = {}; + + ret = sxe2_create_cq_param_check(ibcq, attr, udata); + if (ret != 0) + goto end; + + ibdev = ibcq->device; + entries = attr->cqe; + rdma_dev = to_dev(ibdev); + cq = to_scq(ibcq); + rdma_func = rdma_dev->rdma_func; + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_cqs, + rdma_func->max_cq, &cq_num, &rdma_func->next_cq); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get cq_num failed, ret (%d)\n", ret); + goto end; + } + refcount_set(&cq->refcnt, 1); + spin_lock_init(&cq->lock); + INIT_LIST_HEAD(&cq->cmpl_generated); + atomic_set(&cq->armed, 0); + memset(&info, 0, sizeof(struct sxe2_cq_init_info)); + info.dev = &rdma_func->ctx_dev; + info.cq_uk_init_info.cq_id = cq_num; + + DRV_RDMA_LOG_INFO("cq db map:%p\n", rdma_func->db->map); + info.cq_uk_init_info.cqe_alloc_db = + (__le64 *)(rdma_func->db->map + SXE2_RDMA_DB_CQ_ARM_OFFSET); + DRV_RDMA_LOG_INFO("cqe_alloc_db :%p\n", + info.cq_uk_init_info.cqe_alloc_db); + cq->cq_num = cq_num; + ret = sxe2_drv_create_cq(udata, cq, &info, rdma_dev, entries); + if (ret != 0) + goto free_rsrc; + + cq->ibcq.cqe = info.cq_uk_init_info.ncqe; + if (attr->comp_vector < rdma_func->ceqs_count) + info.cqc.eqn = attr->comp_vector; + + if (attr->flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) + info.cqc.oi = 1; + + info.cqc.log_cq_size = ilog2(info.cq_uk_init_info.ncqe); + info.cqc.log_dbr_size = ilog2(info.cq_uk_init_info.ncqe / 2); + info.cqc.dbr_addr = info.db_pa; + info.cqc.vsi_index = rdma_dev->vsi.vsi_idx; + sxe2_drv_cq_ctx_init(rdma_dev, cq, &info); + ret = sxe2_drv_cq_create_op(rdma_dev, &cq->cq_ctx); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("kcreate cq op failed, ret (%d)\n", ret); + goto err_op; + } + + ret = drv_rdma_debug_cq_add(rdma_dev, cq); + if (unlikely(ret)) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding CQ %#x to debug file system, ret (%d)\n", + cq->cq_num, ret); + goto destroy_cq; + } + + if (udata) { + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.ncqe = info.cq_uk_init_info.ncqe; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("copy2user failed cqn %#x\n", + resp.cq_id); + ret = -EFAULT; + + drv_rdma_debug_cq_remove(rdma_dev, cq); + + goto destroy_cq; + } + } + + DRV_RDMA_LOG_DEV_INFO("create cq end, cqn:%d\n", + info.cq_uk_init_info.cq_id); + + rdma_func->cq_table[cq_num] = cq; + init_completion(&cq->free_cq); + ret = 0; + goto end; + +destroy_cq: + sxe2_drv_cq_destroy_op(rdma_dev, &cq->cq_ctx); +err_op: + sxe2_drv_destroy_cq(udata, cq, rdma_dev, rdma_func); +free_rsrc: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_cqs, cq_num); +end: + return ret; +} +#endif + +int sxe2_drv_cq_query(struct sxe2_rdma_ctx_cq *cq, u64 query_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_cq_wqe *cq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = cq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + cq_wqe = (struct sxe2_cq_wqe *)wqe; + cq_wqe->op = SXE2_MQ_OP_QUERY_CQ; + cq_wqe->cqn = cq->cq_uk.cq_id; + cq_wqe->WQE_Valid = mq->polarity; + cq_wqe->physical_buffer_address = query_pa >> SXE2_CQC_PHY_ADDR_OFFSET; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cqn", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqn"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_cq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq, u64 query_pa) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_QUERY_CQ; + mq_info->post_mq = 1; + mq_info->in.u.cq_query.cq = cq; + mq_info->in.u.cq_query.scratch = (uintptr_t)mq_request; + mq_info->in.u.cq_query.query_pa = query_pa; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle query cq failed, ret (%d)\n", ret); + +end: + return ret; +} + +int sxe2_drv_cq_modify(struct sxe2_rdma_ctx_cq *cq, struct sxe2_rdma_cqc *cqc, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct sxe2_cq_wqe *cq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = cq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + cq_wqe = (struct sxe2_cq_wqe *)wqe; + cq_wqe->eqn = cqc->scqc.eqn; + cq_wqe->log_dbr_size = cqc->scqc.log_dbr_size; + cq_wqe->log_cq_size = cqc->scqc.log_cq_size; + cq_wqe->scqe_break_moderation_en = cqc->scqc.scqe_break_moderation_en; + cq_wqe->oi = cqc->scqc.oi; + cq_wqe->sw_owner_bit = cqc->scqc.sw_owner_bit; + cq_wqe->sw_status = cqc->scqc.sw_status; + cq_wqe->pbl_index = cqc->scqc.pbl_index; + cq_wqe->dbr_addr = cqc->scqc.dbr_addr; + cq_wqe->op = SXE2_MQ_OP_MODIFY_CQ; + cq_wqe->cqn = cq->cq_uk.cq_id; + cq_wqe->WQE_Valid = mq->polarity; + cq_wqe->physical_buffer_address = 0; + cq_wqe->log_page_size = cqc->scqc.log_page_size; + cq_wqe->page_offset = cqc->scqc.page_offset; + cq_wqe->pbl_mode = cqc->scqc.pbl_mode; + cq_wqe->cq_max_count = cqc->scqc.cq_max_count; + cq_wqe->cq_period = cqc->scqc.cq_period; + cq_wqe->TPH_value = cqc->scqc.TPH_value; + cq_wqe->TPH_en = cqc->scqc.TPH_en; + cq_wqe->vsi_index = cqc->scqc.vsi_index; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cqn", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_cq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, cq_wqe, cq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_size"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + ret = 0; + +end: + return ret; +} + +int sxe2_drv_cq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq, + struct sxe2_rdma_cqc *cqc) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_MODIFY_CQ; + mq_info->post_mq = 1; + mq_info->in.u.cq_modify.cq = cq; + mq_info->in.u.cq_modify.scratch = (uintptr_t)mq_request; + mq_info->in.u.cq_modify.cqc = cqc; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle modify cq failed, ret (%d)\n", + ret); + +end: + return ret; +} + +int sxe2_kmodify_cq(struct ib_cq *cq_in, u16 cq_count, u16 cq_period) +{ + int ret; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_cq *cq; + struct sxe2_rdma_cqc *ctx; + struct sxe2_rdma_dma_mem query_cq; + struct sxe2_rdma_ctx_dev *dev_ctx; + + rdma_dev = to_dev(cq_in->device); + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + cq = to_scq(cq_in); + + if (unlikely(cq_period > SXE2_MAX_CQ_PERIOD)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("cq period inv %u, ret (%d)\n", cq_period, + ret); + goto end; + } + + memset(&query_cq, 0, sizeof(query_cq)); + query_cq.size = sizeof(struct sxe2_rdma_cqc); + query_cq.va = dma_alloc_coherent(dev_ctx->hw->device, query_cq.size, + &query_cq.pa, GFP_KERNEL); + if (!query_cq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query cq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_cq.va, 0, query_cq.size); + + ret = sxe2_drv_cq_query_op(rdma_dev, &cq->cq_ctx, query_cq.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query cq failed, ret (%d)\n", ret); + goto free_ctx; + } + + ctx = (struct sxe2_rdma_cqc *)query_cq.va; + ctx->scqc.cq_max_count = cq_count; + ctx->scqc.cq_period = cq_period; + + ret = sxe2_drv_cq_modify_op(rdma_dev, &cq->cq_ctx, ctx); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR( + "modify cq max_count:%d, period:%d failed, ret (%d)\n", + cq_count, cq_period, ret); + goto free_ctx; + } + + DRV_RDMA_LOG_DEV_INFO("modify cq max_count:%d, period:%d\n", cq_count, + cq_period); + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_cq.size, query_cq.va, + query_cq.pa); + query_cq.va = NULL; + +end: + return ret; +} + +void sxe2_drv_cq_add_ref(struct sxe2_rdma_cq *cq) +{ + refcount_inc(&cq->refcnt); +} + +void sxe2_drv_cq_rem_ref(struct sxe2_rdma_cq *cq) +{ + struct sxe2_rdma_pci_f *rdma_func = + container_of(cq->cq_ctx.dev, struct sxe2_rdma_pci_f, ctx_dev); + unsigned long flag = 0; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = to_rdmadev(cq->cq_ctx.dev); + + spin_lock_irqsave(&rdma_func->cqtable_lock, flag); + if (!refcount_dec_and_test(&cq->refcnt)) { + spin_unlock_irqrestore(&rdma_func->cqtable_lock, flag); + DRV_RDMA_LOG_DEV_ERR("cq is used by other\n"); + goto end; + } + + rdma_func->cq_table[cq->cq_num] = NULL; + spin_unlock_irqrestore(&rdma_func->cqtable_lock, flag); + complete(&cq->free_cq); + +end: + return; +} + +void sxe2_cq_remove_cmpls_list(struct sxe2_rdma_cq *cq) +{ + struct sxe2_cq_cmpl_gen *cmpl_node; + struct list_head *tmp_node, *list_node; + + list_for_each_safe(list_node, tmp_node, &cq->cmpl_generated) { + cmpl_node = + list_entry(list_node, struct sxe2_cq_cmpl_gen, list); + list_del(&cmpl_node->list); + kfree(cmpl_node); + } +} + +void sxe2_drv_cleanup_ceqes(struct sxe2_rdma_ctx_cq *cq, + struct sxe2_rdma_ctx_ceq *ceq) +{ + struct sxe2_rdma_ctx_cq *next_cq; + u8 ceq_polarity = ceq->polarity; + __le64 *ceqe; + u8 polarity; + u64 temp; + u32 next; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(ceq->dev); + + next = SXE2_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); + + for (i = 1; i <= SXE2_RING_SIZE(ceq->ceq_ring); i++) { + if (rdma_dev->cache_line_64_en == false) + ceqe = SXE2_GET_CEQ_ELEM_AT_POS(ceq, next); + else + ceqe = SXE2_GET_CEQ_ELEM_AT_POS_HYGON(ceq, next); + + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)FIELD_GET(SXE2_CEQE_VALID, temp); + if (polarity != ceq_polarity) + return; + + next_cq = (struct sxe2_rdma_ctx_cq *)(unsigned long)LS_64_1( + temp, 1); + if (cq == next_cq) + set_64bit_val(ceqe, 0, temp & SXE2_CEQE_VALID); + + next = SXE2_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); + if (!next) + ceq_polarity ^= 1; + } +} + +#ifdef RDMA_DESTROY_CQ_VER_3 +void sxe2_kdestroy_cq(struct ib_cq *cq_in, struct ib_udata *udata) +#elif defined RDMA_DESTROY_CQ_VER_4 +int sxe2_kdestroy_cq(struct ib_cq *cq_in) +#else +int sxe2_kdestroy_cq(struct ib_cq *cq_in, struct ib_udata *udata) +#endif + +{ + int ret; + unsigned long flag = 0; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_cq *cq; + struct sxe2_rdma_ctx_dev *ctx_dev; + struct sxe2_rdma_ctx_ceq *ceq_ctx; + struct sxe2_rdma_ceq *ceq; + struct sxe2_rdma_pci_f *rdma_func; + + if (!cq_in) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("invalid cq resource\n"); + goto end; + } + + rdma_dev = to_dev(cq_in->device); + cq = to_scq(cq_in); + ctx_dev = cq->cq_ctx.dev; + ceq_ctx = ctx_dev->ceq[cq->cq_ctx.cqc.eqn]; + ceq = container_of(ceq_ctx, struct sxe2_rdma_ceq, ctx_ceq); + rdma_func = rdma_dev->rdma_func; + + spin_lock_irqsave(&cq->lock, flag); + if (!list_empty(&cq->cmpl_generated)) + sxe2_cq_remove_cmpls_list(cq); + + spin_unlock_irqrestore(&cq->lock, flag); + + sxe2_drv_cq_rem_ref(cq); + wait_for_completion(&cq->free_cq); + + sxe2_drv_cq_destroy_op(rdma_dev, &cq->cq_ctx); + + spin_lock_irqsave(&ceq->ce_lock, flag); + sxe2_drv_cleanup_ceqes(&cq->cq_ctx, ceq_ctx); + spin_unlock_irqrestore(&ceq->ce_lock, flag); + +#ifndef RDMA_DESTROY_CQ_VER_4 + if (udata) + sxe2_drv_destroy_cq_user(rdma_dev, cq, udata); + else + sxe2_drv_destroy_cq_kernel(rdma_func, cq); +#endif + + drv_rdma_debug_cq_remove(rdma_dev, cq); + + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_cqs, + cq->cq_ctx.cq_uk.cq_id); + ret = 0; + +end: +#ifdef RDMA_DESTROY_CQ_VER_3 + return; +#else + return ret; +#endif +} + +static inline int sxe2_drv_mcq_create_done(struct sxe2_rdma_ctx_cq *mcq) +{ + struct sxe2_mq_ctx *mq; + + mq = mcq->dev->mq; + + return sxe2_kpoll_mcq(mq, SXE2_MQ_OP_CREATE_CQ, NULL); +} + +int sxe2_drv_mcq_create(struct sxe2_rdma_ctx_cq *mcq, u64 scratch, + bool check_overflow, bool post_sq) +{ + int ret; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = to_rdmadev(mcq->dev); + + if (rdma_dev->rdma_func->reset) + return -EBUSY; + + ret = sxe2_drv_cq_create(mcq, scratch, check_overflow, post_sq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("mcq create failed, ret (%d)\n", ret); + goto end; + } + + if (post_sq) { + ret = sxe2_drv_mcq_create_done(mcq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "wait mcq create done err, ret (%d)\n", ret); + goto end; + } + } + mcq->dev->mq->process_mq_fpt = sxe2_rcms_update_fptes_cmd_complete; + ret = 0; + +end: + return ret; +} + +void sxe2_drv_mcq_ctx_init(struct sxe2_rdma_ctx_cq *cq_ctx, + struct sxe2_rdma_mcq_init_info *info) +{ + struct sxe2_rdma_device *rdma_dev; + u32 st_mode = 0; + s32 cpu_id = 0; + + rdma_dev = to_rdmadev(cq_ctx->dev); + + cq_ctx->cq_type = SXE2_RDMA_CQ_TYPE_MQ; + cq_ctx->cq_pa = info->cq_pa; + cq_ctx->db_pa = info->db_pa; + cq_ctx->dev = info->dev; + cq_ctx->vsi = info->vsi; + + cq_ctx->cq_uk.cq_base = info->cq_base; + cq_ctx->cq_uk.cqe_alloc_db = info->cqe_alloc_db; + cq_ctx->cq_uk.doorbell_note = info->doorbell_note; + cq_ctx->cq_uk.cq_id = 0; + cq_ctx->cq_uk.ncqe = SXE2_MCQ_SIZE; + cq_ctx->cq_uk.polarity = 1; + cq_ctx->cq_uk.arm_sn = SXE2_RDMA_CQ_ARM_SN; + SXE2_RING_INIT(cq_ctx->cq_uk.cq_ring, SXE2_MCQ_SIZE); + + cq_ctx->cqc.eqn = info->cqc.eqn; + cq_ctx->cqc.log_dbr_size = info->cqc.log_dbr_size; + cq_ctx->cqc.log_cq_size = info->cqc.log_cq_size; + cq_ctx->cqc.scqe_break_moderation_en = + rdma_dev->rdma_func->scqe_break_moderation_en; + cq_ctx->cqc.oi = rdma_dev->rdma_func->oi; + cq_ctx->cqc.sw_owner_bit = 0; + cq_ctx->cqc.sw_status = 0xF; + cq_ctx->cqc.pbl_index = info->cqc.pbl_index; + cq_ctx->cqc.dbr_addr = info->cqc.dbr_addr; + cq_ctx->cqc.log_page_size = info->cqc.log_page_size; + cq_ctx->cqc.page_offset = info->cqc.page_offset; + cq_ctx->cqc.pbl_mode = info->cqc.pbl_mode; + cq_ctx->cqc.cq_max_count = 0; + cq_ctx->cqc.cq_period = 0; + cq_ctx->cqc.TPH_en = + sxe2_drv_core_is_tph_enable(rdma_dev, true, &st_mode); + if (cq_ctx->cqc.TPH_en) { + if (st_mode != MODE_NO_ST) + cpu_id = TPH_GET_CPU(); + cq_ctx->cqc.TPH_value = + (cpu_id & TPH_CPUID_MASK) | PH_DWHR << 8; + } + cq_ctx->cqc.vsi_index = info->cqc.vsi_index; + + DRV_RDMA_LOG_DEV_DEBUG( + "cq:%d cqc : ceqn %#x, dbsz %#x, cqsz %#x, scqe_break_moderation_en %x,\n" + "\toi %d, sw_owner_bit %d, sw_status %#x, pbl %#llx,\n" + "\tdbr %#llx, pgsz %#x, pg_offset %#x, pblm %#x,\n" + "\tcq_max_count %d, cq_period %d, TPH_value %d, TPH_en %d, vsi_id %#x\n", + cq_ctx->cq_uk.cq_id, cq_ctx->cqc.eqn, cq_ctx->cqc.log_dbr_size, + cq_ctx->cqc.log_cq_size, cq_ctx->cqc.scqe_break_moderation_en, + cq_ctx->cqc.oi, cq_ctx->cqc.sw_owner_bit, cq_ctx->cqc.sw_status, + cq_ctx->cqc.pbl_index, cq_ctx->cqc.dbr_addr, + cq_ctx->cqc.log_page_size, cq_ctx->cqc.page_offset, + cq_ctx->cqc.pbl_mode, cq_ctx->cqc.cq_max_count, + cq_ctx->cqc.cq_period, cq_ctx->cqc.TPH_value, + cq_ctx->cqc.TPH_en, cq_ctx->cqc.vsi_index); +} + +static int sxe2_drv_mcq_destroy(struct sxe2_rdma_ctx_cq *mcq, u64 scratch, + bool post_sq) +{ + u32 tail, val, error; + __le64 *wqe; + struct sxe2_cq_wqe *cq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret = 0; + + mq = mcq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + cq_wqe = (struct sxe2_cq_wqe *)wqe; + cq_wqe->sw_status = 0; + cq_wqe->op = SXE2_MQ_OP_DESTROY_CQ; + cq_wqe->cqn = mcq->cq_uk.cq_id; + cq_wqe->WQE_Valid = mq->polarity; + cq_wqe->physical_buffer_address = 0; + + sxe2_kget_mq_reg_info(mq, &val, &tail, &error); + + if (post_sq) { + sxe2_kpost_mq(mq); + ret = sxe2_kpoll_mq_registers(mq, tail, + mq->dev->hw_attrs.max_done_count); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_DESTROY_CQ] && + !ret) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ:opcode %u inject op failed tri\n", + MQ_OP_DESTROY_CQ); + ret = -EBUSY; + } +#endif + } + mq->process_mq_fpt = sxe2_rcms_update_fptes_cmd; + +end: + return ret; +} + +int sxe2_create_mcq(struct sxe2_rdma_device *rdma_dev) +{ + int ret; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_mcq_init_info info = {}; + struct sxe2_rdma_mcq *mcq = &rdma_func->mcq; + + dev->mcq = &mcq->ctx_cq; + dev->mcq->dev = dev; + info.dev = dev; + mcq->mem_cq.size = + ALIGN(sizeof(struct sxe2_cqe) * SXE2_MCQ_SIZE, 0x100); + mcq->mem_cq.va = dma_alloc_coherent(dev->hw->device, mcq->mem_cq.size, + &mcq->mem_cq.pa, GFP_KERNEL); + if (!mcq->mem_cq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("mcq buf alloc failed, ret (%d)\n", ret); + goto end; + } + memset(mcq->mem_cq.va, 0, mcq->mem_cq.size); + + mcq->mem_db_note.size = SXE2_RDMA_DB_NOTE_SIZE; + ret = sxe2_kget_aligned_mem(rdma_func, &mcq->mem_db_note, + mcq->mem_db_note.size, SXE2_DB_NOTE_M); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("mcq db note buf alloc failed, ret (%d)\n", + ret); + goto db_mem_alloc_err; + } + set_32bit_val(mcq->mem_db_note.va, 4, + (__u32)(SXE2_CQ_DB_NOTE_CMD_SN) + << SXE2_CQ_DB_NOTE_CMD_SN_SHIFT); + + mcq->ctx_cq.back_cq = mcq; + mcq->ctx_cq.dev = dev; + mcq->ctx_cq.vsi = &rdma_dev->vsi; + + info.cq_base = mcq->mem_cq.va; + info.cq_pa = mcq->mem_cq.pa; + info.doorbell_note = mcq->mem_db_note.va; + info.db_pa = mcq->mem_db_note.pa; + info.dev = &rdma_func->ctx_dev; + info.cqc.eqn = 0; + info.cqc.log_cq_size = ilog2(SXE2_MCQ_SIZE); + info.cqc.log_dbr_size = ilog2(SXE2_MCQ_SIZE / 2); + info.cqc.log_page_size = order_base_2(PAGE_SIZE); + info.cqc.page_offset = offset_in_page(info.cq_pa); + info.cqc.dbr_addr = info.db_pa; + info.cqc.vsi_index = rdma_dev->vsi.vsi_idx; + info.vsi = &rdma_func->default_vsi; + info.cqc.pbl_mode = CQ_EQ_PA_FIRST_MODE; + info.cqc.pbl_index = info.cq_pa; + + DRV_RDMA_LOG_DEV_INFO("mcq db map:%p\n", rdma_func->db->map); + info.cqe_alloc_db = + (__le64 *)(rdma_func->db->map + SXE2_RDMA_DB_CQ_ARM_OFFSET); + DRV_RDMA_LOG_DEV_INFO("mcqe_alloc_db :%p\n", info.cqe_alloc_db); + + dev->mcq->back_cq = mcq; + sxe2_drv_mcq_ctx_init(dev->mcq, &info); + + ret = sxe2_drv_mcq_create(dev->mcq, 0, true, true); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("mcq create failed, ret (%d)\n", ret); + goto db_mem_alloc_err; + } + + ret = drv_rdma_debug_mcq_add(rdma_dev, mcq); + if (unlikely(ret)) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding CQ %#x to debug file system, ret (%d)\n", + 0, ret); + sxe2_drv_mcq_destroy(dev->mcq, 0, true); + } + +db_mem_alloc_err: + if (ret) { + dma_free_coherent(dev->hw->device, mcq->mem_cq.size, + mcq->mem_cq.va, mcq->mem_cq.pa); + mcq->mem_cq.va = NULL; + } + +end: + return ret; +} + +void sxe2_destroy_mcq(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_mcq *mcq = &rdma_func->mcq; + bool hw_rsrc_clean = false; + + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(dev); + if (!rdma_func->reset || !hw_rsrc_clean) { + ret = sxe2_drv_mcq_destroy(dev->mcq, 0, true); + if (ret) + DRV_RDMA_LOG_DEV_ERR("mcq destroy failed, ret (%d)\n", + ret); + } + + drv_rdma_debug_mcq_remove(rdma_dev, mcq); + + dma_free_coherent(dev->hw->device, mcq->mem_cq.size, mcq->mem_cq.va, + mcq->mem_cq.pa); + mcq->mem_cq.va = NULL; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.h new file mode 100644 index 0000000000000000000000000000000000000000..2c7a3e365d77eead7c1576f6fd0750f4002eb255 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cq.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_CQ_H__ +#define __SXE2_DRV_CQ_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_RDMA_MIN_CQ_SIZE 4 +#define SXE2_RDMA_MAX_CQ_SIZE 2097152 + +#define SXE2_MAX_CQ_PERIOD (0xFFF) + +#define SXE2_RDMA_DB_NOTE_SIZE (64) + +#define SXE2_RDMA_CQ_TYPE_IO (1) +#define SXE2_RDMA_CQ_TYPE_MQ (2) + +#define SXE2_RDMA_DB_CQ_ARM_OFFSET (8) +#define SXE2_RDMA_DB_CQ_INFO_OFFSET (12) + +#define SXE2_RDMA_CQ_ARM_SN (3) + +struct sxe2_rdma_cq_uk_init_info { + __le64 *cqe_alloc_db; + struct sxe2_cqe *cq_base; + __le32 *doorbell_note; + __u32 cq_id; + __u32 ncqe; +}; + +struct sxe2_cq_init_info { + struct sxe2_rdma_ctx_dev *dev; + u64 cq_pa; + u64 db_pa; + struct sxe2_rdma_cq_uk_init_info cq_uk_init_info; + struct drv_rdma_soft_cqc cqc; +}; + +struct sxe2_rdma_mcq_init_info { + u64 cq_pa; + u64 db_pa; + __le64 *cqe_alloc_db; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_ctx_vsi *vsi; + struct sxe2_cqe *cq_base; + __le32 *doorbell_note; + struct drv_rdma_soft_cqc cqc; +}; + +static inline struct sxe2_rdma_cq *to_scq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct sxe2_rdma_cq, ibcq); +} + +#ifdef CREATE_CQ_V1 +struct ib_cq *sxe2_kcreate_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, + struct ib_ucontext *ibucontext, struct ib_udata *udata); +#else +int sxe2_kcreate_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +#endif +int sxe2_kmodify_cq(struct ib_cq *cq_in, u16 cq_count, u16 cq_period); + +#ifdef RDMA_DESTROY_CQ_VER_3 +void sxe2_kdestroy_cq(struct ib_cq *cq_in, struct ib_udata *udata); +#elif defined RDMA_DESTROY_CQ_VER_4 +int sxe2_kdestroy_cq(struct ib_cq *cq_in); +#else +int sxe2_kdestroy_cq(struct ib_cq *cq_in, struct ib_udata *udata); +#endif + +int sxe2_create_mcq(struct sxe2_rdma_device *rdma_dev); +void sxe2_destroy_mcq(struct sxe2_rdma_device *rdma_dev); + +int sxe2_drv_cq_destroy(struct sxe2_rdma_ctx_cq *cq, u64 scratch, bool post_sq); +int sxe2_drv_cq_create(struct sxe2_rdma_ctx_cq *cq, u64 scratch, + bool check_overflow, bool post_sq); +int sxe2_drv_cq_query(struct sxe2_rdma_ctx_cq *cq, u64 query_pa, u64 scratch, + bool post_sq); +int sxe2_drv_cq_modify(struct sxe2_rdma_ctx_cq *cq, struct sxe2_rdma_cqc *cqc, + u64 scratch, bool post_sq); +void sxe2_drv_cq_add_ref(struct sxe2_rdma_cq *cq); +void sxe2_drv_cq_rem_ref(struct sxe2_rdma_cq *cq); + +int sxe2_drv_cq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq, u64 query_pa); +int sxe2_drv_cq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq, + struct sxe2_rdma_cqc *cqc); +void drv_rdma_cq_set_pbl(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_rdma_cq *cq, size_t page_sz, u32 ncont, + struct drv_rdma_soft_cqc *cqc); +void sxe2_rdma_uk_cq_init(struct sxe2_rdma_cq_uk *cq, + struct sxe2_rdma_cq_uk_init_info *info); +void sxe2_drv_cq_ctx_init(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq, + struct sxe2_cq_init_info *info); +void sxe2_drv_cq_destroy_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq); +int sxe2_drv_cq_create_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq); +void sxe2_cq_remove_cmpls_list(struct sxe2_rdma_cq *cq); +void sxe2_drv_cleanup_ceqes(struct sxe2_rdma_ctx_cq *cq, + struct sxe2_rdma_ctx_ceq *ceq); +int sxe2_drv_mcq_create(struct sxe2_rdma_ctx_cq *mcq, u64 scratch, + bool check_overflow, bool post_sq); + +void sxe2_drv_mcq_ctx_init(struct sxe2_rdma_ctx_cq *cq_ctx, + struct sxe2_rdma_mcq_init_info *info); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..9aa0c036139fb17bfc1371adfcdd3d90fc592644 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cq_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_cq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_cq_debugfs.h" + +enum { + CQ_EQ_NUM, + CQ_LOG_DBR_SIZE, + CQ_LOG_CQ_SIZE, + CQ_BRK_MODERATION_EN, + CQ_OI, + CQ_OWNER_BIT, + CQ_STATUS, + CQ_LOG_PG_SZ, + CQ_PAGE_OFFSET, + CQ_PBL_MODE, + CQ_TPH_VALUE, + CQ_MAX_CNT, + CQ_PERIOD, + CQ_TPH_EN, + CQ_VSI_INDEX, + CQ_DBR_ADDR, + CQ_PBL_INDEX, +}; + +#ifdef SXE2_CFG_DEBUG +static char *cq_fields[] = { + [CQ_EQ_NUM] = "c_eqn", + [CQ_LOG_DBR_SIZE] = "log_doorbell_size", + [CQ_LOG_CQ_SIZE] = "log_cq_size", + [CQ_BRK_MODERATION_EN] = "sceq_break_moderation_en", + [CQ_OI] = "oi", + [CQ_OWNER_BIT] = "owner_bit", + [CQ_STATUS] = "sw_status", + [CQ_LOG_PG_SZ] = "log_page_size", + [CQ_PAGE_OFFSET] = "page_offset", + [CQ_PBL_MODE] = "pbl_mode", + [CQ_TPH_VALUE] = "TPH_value", + [CQ_MAX_CNT] = "cq_max_count", + [CQ_PERIOD] = "cq_period", + [CQ_TPH_EN] = "TPH_en", + [CQ_VSI_INDEX] = "vsi_index", + [CQ_DBR_ADDR] = "doorbell_addr", + [CQ_PBL_INDEX] = "pbl_index", +}; +#endif +u64 drv_rdma_cq_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + int ret; + struct sxe2_rdma_cqc *ctx; + struct sxe2_rdma_mcq *mcq; + struct sxe2_rdma_cq *cq; + size_t len = 0; + u32 cq_num = 0; + struct sxe2_rdma_ctx_cq *cq_ctx = NULL; + struct sxe2_rdma_dma_mem query_cq; + struct sxe2_rdma_ctx_dev *dev_ctx; + + if (type == SXE2_DBG_RSC_MCQ) { + mcq = (struct sxe2_rdma_mcq *)data; + cq_num = 0; + cq_ctx = &mcq->ctx_cq; + } else if (type == SXE2_DBG_RSC_CQ) { + cq = (struct sxe2_rdma_cq *)data; + cq_num = cq->cq_num; + cq_ctx = &cq->cq_ctx; + } + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + memset(&query_cq, 0, sizeof(query_cq)); + query_cq.size = sizeof(struct sxe2_rdma_cqc); + query_cq.va = dma_alloc_coherent(dev_ctx->hw->device, query_cq.size, + &query_cq.pa, GFP_KERNEL); + if (!query_cq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query cq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_cq.va, 0, query_cq.size); + + ret = sxe2_drv_cq_query_op(rdma_dev, cq_ctx, query_cq.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query cq failed, ret (%d)\n", ret); + goto free_ctx; + } + + ctx = (struct sxe2_rdma_cqc *)query_cq.va; + + len += dbg_vsnprintf(buf, len, "cq:%d context:\n\n", cq_num); + len += dbg_vsnprintf(buf, len, "soft context\n"); + len += dbg_vsnprintf(buf, len, "eqn: %lld\n", + ctx->scqc.eqn); + len += dbg_vsnprintf(buf, len, "log_doorbell_size: %lld\n", + ctx->scqc.log_dbr_size); + len += dbg_vsnprintf(buf, len, "log_cq_size: %lld\n", + ctx->scqc.log_cq_size); + len += dbg_vsnprintf(buf, len, "scqe_break_moderation_en: %lld\n", + ctx->scqc.scqe_break_moderation_en); + len += dbg_vsnprintf(buf, len, "oi: %lld\n", + ctx->scqc.oi); + len += dbg_vsnprintf(buf, len, "sw_owner_bit: %d\n", + ctx->scqc.sw_owner_bit); + len += dbg_vsnprintf(buf, len, "sw_status: %d\n", + ctx->scqc.sw_status); + len += dbg_vsnprintf(buf, len, "log_page_size: %d\n", + ctx->scqc.log_page_size); + len += dbg_vsnprintf(buf, len, "page_offset: %d\n", + ctx->scqc.page_offset); + len += dbg_vsnprintf(buf, len, "pbl_mode: %d\n", + ctx->scqc.pbl_mode); + len += dbg_vsnprintf(buf, len, "TPH_value: %d\n", + ctx->scqc.TPH_value); + len += dbg_vsnprintf(buf, len, "cq_max_count: %d\n", + ctx->scqc.cq_max_count); + len += dbg_vsnprintf(buf, len, "cq_period: %d\n", + ctx->scqc.cq_period); + len += dbg_vsnprintf(buf, len, "TPH_en: %d\n", + ctx->scqc.TPH_en); + len += dbg_vsnprintf(buf, len, "vsi_index: %d\n", + ctx->scqc.vsi_index); + len += dbg_vsnprintf(buf, len, "doorbell_addr: %llx\n", + ctx->scqc.dbr_addr); + len += dbg_vsnprintf(buf, len, "pbl_index: %llx\n\n", + ctx->scqc.pbl_index); + + len += dbg_vsnprintf(buf, len, "hw context\n"); + len += dbg_vsnprintf(buf, len, "hw_owner_bit: %d\n", + ctx->hcqc.hw_owner_bit); + len += dbg_vsnprintf(buf, len, "st: %d\n", + ctx->hcqc.st); + len += dbg_vsnprintf(buf, len, "cmd: %d\n", + ctx->hcqc.cmd); + len += dbg_vsnprintf(buf, len, "cmd_sn: %d\n", + ctx->hcqc.cmd_sn); + len += dbg_vsnprintf(buf, len, "hw_status: %d\n", + ctx->hcqc.hw_status); + len += dbg_vsnprintf(buf, len, "last_sol_index_en: %d\n", + ctx->hcqc.last_sol_index_en); + len += dbg_vsnprintf(buf, len, "fid: %d\n", + ctx->hcqc.fid); + len += dbg_vsnprintf(buf, len, "hw_eqn: %d\n", + ctx->hcqc.hw_eqn); + len += dbg_vsnprintf(buf, len, "last_notified_index: %d\n", + ctx->hcqc.last_notified_index); + len += dbg_vsnprintf(buf, len, "last_solicited_index_l: %d\n", + ctx->hcqc.last_solicited_index_l); + len += dbg_vsnprintf(buf, len, "last_solicited_index_h: %d\n", + ctx->hcqc.last_solicited_index_h); + len += dbg_vsnprintf(buf, len, "consumer_counter_l: %d\n", + ctx->hcqc.consumer_counter_l); + len += dbg_vsnprintf(buf, len, "consumer_counter_h: %d\n", + ctx->hcqc.consumer_counter_h); + len += dbg_vsnprintf(buf, len, "producer_counter: %d\n", + ctx->hcqc.producer_counter); + len += dbg_vsnprintf(buf, len, "page_addr_odd_l: %#x\n", + ctx->hcqc.page_addr_odd_l); + len += dbg_vsnprintf(buf, len, "page_addr_odd_h: %#x\n", + ctx->hcqc.page_addr_odd_h); + len += dbg_vsnprintf(buf, len, "page_addr_even_l: %#x\n", + ctx->hcqc.page_addr_even_l); + len += dbg_vsnprintf(buf, len, "page_addr_even_h: %#x\n", + ctx->hcqc.page_addr_even_h); +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_cq.size, query_cq.va, + query_cq.pa); + query_cq.va = NULL; + +end: + return len; +} + +#ifdef SXE2_CFG_DEBUG +static int drv_rdma_cq_ctx_modify(struct sxe2_rdma_cqc *ctx, size_t field, + u64 value) +{ + int ret = 0; + + switch (field) { + case CQ_EQ_NUM: + ctx->scqc.eqn = value; + break; + case CQ_LOG_DBR_SIZE: + ctx->scqc.log_dbr_size = value; + break; + case CQ_LOG_CQ_SIZE: + ctx->scqc.log_cq_size = value; + break; + case CQ_BRK_MODERATION_EN: + ctx->scqc.scqe_break_moderation_en = value; + break; + case CQ_OI: + ctx->scqc.oi = value; + break; + case CQ_OWNER_BIT: + ctx->scqc.sw_owner_bit = value; + break; + case CQ_STATUS: + ctx->scqc.sw_status = value; + break; + case CQ_LOG_PG_SZ: + ctx->scqc.log_page_size = value; + break; + case CQ_PAGE_OFFSET: + ctx->scqc.page_offset = value; + break; + case CQ_PBL_MODE: + ctx->scqc.pbl_mode = value; + break; + case CQ_TPH_VALUE: + ctx->scqc.TPH_value = value; + break; + case CQ_MAX_CNT: + ctx->scqc.cq_max_count = value; + break; + case CQ_PERIOD: + ctx->scqc.cq_period = value; + break; + case CQ_TPH_EN: + ctx->scqc.TPH_en = value; + break; + case CQ_VSI_INDEX: + ctx->scqc.vsi_index = value; + break; + case CQ_DBR_ADDR: + ctx->scqc.dbr_addr = value; + break; + case CQ_PBL_INDEX: + ctx->scqc.pbl_index = value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_WARN("invalid index %d, ret %ld\n", field, ret); + } + + return ret; +} +#endif +int drv_rdma_cq_write_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ +#ifdef SXE2_CFG_DEBUG + size_t i; + int ret; + u64 temp_value; + struct sxe2_rdma_cqc *ctx; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_mcq *mcq; + struct sxe2_rdma_cq *cq; + struct sxe2_rdma_ctx_cq *cq_ctx = NULL; + struct sxe2_rdma_dma_mem query_cq; + struct sxe2_rdma_ctx_dev *dev_ctx; + + if (type == SXE2_DBG_RSC_MCQ) { + mcq = (struct sxe2_rdma_mcq *)data; + cq_ctx = &mcq->ctx_cq; + } else if (type == SXE2_DBG_RSC_CQ) { + cq = (struct sxe2_rdma_cq *)data; + cq_ctx = &cq->cq_ctx; + } + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + memset(&query_cq, 0, sizeof(query_cq)); + query_cq.size = sizeof(struct sxe2_rdma_cqc); + query_cq.va = dma_alloc_coherent(dev_ctx->hw->device, query_cq.size, + &query_cq.pa, GFP_KERNEL); + if (!query_cq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query cq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_cq.va, 0, query_cq.size); + + ret = sxe2_drv_cq_query_op(rdma_dev, cq_ctx, query_cq.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query cq failed, ret (%d)\n", ret); + goto free_ctx; + } + ctx = (struct sxe2_rdma_cqc *)query_cq.va; + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) + goto free_ctx; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto free_ctx; + } + + for (i = 0; i < ARRAY_SIZE(cq_fields); i++) { + if (!strncmp(argv[0], cq_fields[i], strlen(cq_fields[i])) && + (strlen(cq_fields[i]) == strlen(argv[0]))) + break; + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto free_ctx; + } + + DRV_RDMA_LOG_DEV_INFO("query cq i:%ld, temp_value:%llx\n", i, + temp_value); + + ret = drv_rdma_cq_ctx_modify(ctx, i, temp_value); + if (ret) + goto free_ctx; + + ret = sxe2_drv_cq_modify_op(rdma_dev, cq_ctx, ctx); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("modify cq ctx:%s failed, ret (%d)\n", + argv[0], ret); + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_cq.size, query_cq.va, + query_cq.pa); + query_cq.va = NULL; + +end: + return ret; +#else + return 0; +#endif +} + +int drv_rdma_debug_cq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->cq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("cq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + cq->dbg_node = + drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_CQ, + rdma_dev->hdl->cq_debugfs, + drv_rdma_cq_read_field, + drv_rdma_cq_write_field, cq->cq_num, cq); + if (!cq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} + +void drv_rdma_debug_cq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->cq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("cq debugfs dir not exist\n"); + goto end; + } + + if (cq->dbg_node) { + drv_rdma_rm_res_tree(cq->dbg_node); + cq->dbg_node = NULL; + } + +end: + return; +} + +int drv_rdma_debug_mcq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_mcq *mcq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->cq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("mcq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + mcq->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_MCQ, + rdma_dev->hdl->cq_debugfs, + drv_rdma_cq_read_field, + drv_rdma_cq_write_field, 0, mcq); + if (!mcq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} + +void drv_rdma_debug_mcq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_mcq *mcq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->cq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("cq debugfs dir not exist\n"); + goto end; + } + + if (mcq->dbg_node) { + drv_rdma_rm_res_tree(mcq->dbg_node); + mcq->dbg_node = NULL; + } + +end: + return; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..9912e638b9d253229b47d834bc3c61913934a02f --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_cq_debugfs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_cq_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_CQ_DEBUGFS_H__ +#define __SXE2_DRV_CQ_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +int drv_rdma_debug_cq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq); +void drv_rdma_debug_cq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_cq *cq); + +int drv_rdma_debug_mcq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_mcq *mcq); +void drv_rdma_debug_mcq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_mcq *mcq); +u64 drv_rdma_cq_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); +int drv_rdma_cq_write_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.c new file mode 100644 index 0000000000000000000000000000000000000000..8bc4a3d284915752c726e6342830a1f1e8c5ee06 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_db.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#define UVERBS_MODULE_NAME sxe2 +#include +#include +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_rdma_debugfs.h" + +#define SXE2_DRV_LLWQE_PER_DB 15 +#define SXE2_DRV_LLWQE_SIZE 256 +#define SXE2_DRV_LLWQE_OFFSET 256 +#define SXE2_DRV_LLWQE_DB_SIZE 16 +#define SXE2_DRV_LLWQE_DB_OFFSET 16 +#define SXE2_DRV_DB_PAGE_SIZE 4096 +#define SXE2_DRV_DB_PAGE_RSV_NUM 1 + +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +struct rdma_user_mmap_entry * +sxe2_kinsert_user_mmap_entry(struct sxe2_rdma_kcontext *ctx, + enum sxe2_drv_db_mmap_type mmap_flag, + u64 *mmap_offset) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ctx->ibucontext.device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + int ret_code = 0; + + struct sxe2_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + + if (!entry) { + DRV_RDMA_LOG_ERROR_BDF("kzalloc entry fail\n"); + return NULL; + } + + entry->page_idx = SXE2_DRV_DB_PAGE_RSV_NUM; + if (SXE2_DRV_DB_PAGE_SIZE == PAGE_SIZE) + entry->address = (rdma_func->bar_db_addr + PAGE_SIZE); + else + entry->address = rdma_func->bar_db_addr; + if (mmap_flag == SXE2_DRV_DB_MMAP_TYPE_WC) + entry->mmap_flag = SXE2_DRV_DB_MMAP_TYPE_WC; + else + entry->mmap_flag = SXE2_DRV_DB_MMAP_TYPE_NC; + + ret_code = rdma_user_mmap_entry_insert( + &ctx->ibucontext, &entry->rdma_entry, PAGE_SIZE); + if (ret_code) { + DRV_RDMA_LOG_ERROR_BDF( + "db mmap entry insert fail, ret_code:%d\n", ret_code); + kfree(entry); + return NULL; + } + + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} +#else +struct sxe2_user_mmap_entry * +rdma_find_user_mmap_entry(struct sxe2_rdma_kcontext *kcontext, + struct vm_area_struct *vma) +{ + struct sxe2_user_mmap_entry *entry; + unsigned long flags; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return NULL; + + spin_lock_irqsave(&kcontext->mmap_tbl_lock, flags); + hash_for_each_possible(kcontext->mmap_hash_tbl, entry, hlist, + vma->vm_pgoff) { + if (entry->pgoff_key == vma->vm_pgoff) { + spin_unlock_irqrestore(&kcontext->mmap_tbl_lock, flags); + return entry; + } + } + + spin_unlock_irqrestore(&kcontext->mmap_tbl_lock, flags); + + return NULL; +} + +bool find_key_in_mmap_tbl(struct sxe2_rdma_kcontext *ucontext, u64 key) +{ + struct sxe2_user_mmap_entry *entry; + int ret_code = 0; + + hash_for_each_possible(ucontext->mmap_hash_tbl, entry, hlist, key) { + if (entry->pgoff_key == key) + ret_code = true; + goto end; + } + + ret_code = false; + +end: + return ret_code; +} + +struct sxe2_user_mmap_entry * +rdma_user_mmap_entry_add_hash(struct sxe2_rdma_kcontext *ucontext, + enum sxe2_drv_db_mmap_type mmap_flag, + u64 *mmap_offset) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ucontext->ibucontext.device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + struct sxe2_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + unsigned long flags; + int retry_cnt = 0; + + if (!entry) + return NULL; + + entry->address = (rdma_func->bar_db_addr + PAGE_SIZE); + entry->mmap_flag = mmap_flag; + entry->ucontext = ucontext; + do { + get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key)); + + entry->pgoff_key >>= PAGE_SHIFT; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) { + hash_add(ucontext->mmap_hash_tbl, &entry->hlist, + entry->pgoff_key); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + goto hash_add_done; + } + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + } while (retry_cnt++ < 10); + + DRV_RDMA_LOG_ERROR_BDF( + "mmap table add failed: Cannot find a unique key\n", + &ucontext->rdma_dev->ibdev); + kfree(entry); + return NULL; + +hash_add_done: + *mmap_offset = entry->pgoff_key << PAGE_SHIFT; + + return entry; +} + +void rdma_user_mmap_entry_del_hash(struct sxe2_user_mmap_entry *entry) +{ + struct sxe2_rdma_kcontext *ucontext; + unsigned long flags; + + if (!entry) + return; + + ucontext = entry->ucontext; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + hash_del(&entry->hlist); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + + kfree(entry); +} +#endif +int sxe2_kmmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ibcontext->device); + struct sxe2_user_mmap_entry *mentry; +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + struct rdma_user_mmap_entry *entry = NULL; +#else + struct sxe2_rdma_kcontext *kcontext = ibuctxto_kctx(ibcontext); +#endif + pgprot_t prot; + phys_addr_t pfn; + int ret_code = 0; + +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + entry = rdma_user_mmap_entry_get(ibcontext, vma); + if (!entry) { + ret_code = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("pgoff %#lx does not has valid entry\n", + vma->vm_pgoff); + goto end; + } + mentry = to_mmap(entry); +#else + mentry = rdma_find_user_mmap_entry(kcontext, vma); + if (!mentry) { + DRV_RDMA_LOG_ERROR_BDF( + "verbs: pgoff[0x%lx] does not have valid entry\n", + vma->vm_pgoff); + ret_code = -EINVAL; + goto end; + } +#endif + + pfn = (mentry->address >> PAGE_SHIFT); + if (mentry->mmap_flag == SXE2_DRV_DB_MMAP_TYPE_NC) + prot = pgprot_noncached(vma->vm_page_prot); + else + prot = pgprot_writecombine(vma->vm_page_prot); +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + ret_code = rdma_user_mmap_io(ibcontext, vma, pfn, + entry->npages * PAGE_SIZE, prot, + entry); + if (ret_code) + DRV_RDMA_LOG_DEV_ERR("mmap error ret %d, npages %ld\n", + ret_code, entry->npages); +#else + ret_code = rdma_user_mmap_io(ibcontext, vma, pfn, + (u32)DIV_ROUND_UP(PAGE_SIZE, + PAGE_SIZE) * + PAGE_SIZE, + prot); + if (ret_code) + DRV_RDMA_LOG_ERROR_BDF("mmap error ret %d\n", ret_code); + +#endif +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + rdma_user_mmap_entry_put(&mentry->rdma_entry); +#endif +end: + return ret_code; +} +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +void sxe2_kmmap_free(struct rdma_user_mmap_entry *entry) +{ + struct sxe2_user_mmap_entry *mentry = to_mmap(entry); + struct sxe2_rdma_device *rdma_dev = to_dev(entry->ucontext->device); + + switch (mentry->mmap_flag) { + case SXE2_DRV_DB_MMAP_TYPE_WC: + case SXE2_DRV_DB_MMAP_TYPE_NC: + kfree(mentry); + break; + default: + DRV_RDMA_LOG_DEV_ERR("mmap free flag invalid %#x\n", + mentry->mmap_flag); + } +} +#endif +static void db_kdec_count_and_free(struct kref *kref) +{ + struct sxe2_db_page *db_page = + container_of(kref, struct sxe2_db_page, ref_count); + struct sxe2_rdma_pci_f *rdma_func = db_page->rdma_func; + + list_del(&db_page->list); + iounmap(db_page->map); + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_dbs, db_page->index); + bitmap_free(db_page->llwqe_bitmap); + kfree(db_page); +} + +static void db_kput_llwqe_kernel(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_llwqe *llwqe) +{ + struct sxe2_db_llwqe_head *db_head; + struct sxe2_db_page *db_page; + struct list_head *head; + struct mutex *lock; + u32 dbi; + + db_head = &rdma_func->db_head; + head = &db_head->list; + lock = &db_head->lock; + db_page = llwqe->db_page; + + dbi = (u32)((uintptr_t)llwqe->wqe_addr - (uintptr_t)db_page->map - + SXE2_DRV_LLWQE_OFFSET) / + SXE2_DRV_LLWQE_SIZE; + + mutex_lock(lock); + db_page->llwqe_avail++; + clear_bit((int)dbi, db_page->llwqe_bitmap); + + if (db_page->llwqe_avail == 1) + list_add_tail(&db_page->list, head); + + if (refcount_dec_and_test(&db_page->ref_count.refcount)) { + db_kdec_count_and_free(&db_page->ref_count); + rdma_func->db = NULL; + } + + memset(llwqe, 0, sizeof(*llwqe)); + mutex_unlock(lock); +} + +static void db_kfree_db_kernel(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + + mutex_lock(&rdma_func->db_head.lock); + if (refcount_dec_and_test(&rdma_func->db->ref_count.refcount)) { + db_kdec_count_and_free(&rdma_func->db->ref_count); + rdma_func->db = NULL; + } else { + DRV_RDMA_LOG_DEV_ERR( + "db ref_count %#x, > 0\n", + refcount_read(&rdma_func->db->ref_count.refcount)); + } + mutex_unlock(&rdma_func->db_head.lock); +} + +u32 get_db_page_multiplier(void) +{ + return PAGE_SIZE/SXE2_RDMA_DB_PAGE_SIZE; +} + +static int db_kalloc_page(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_db_page *db_page, bool map_wc) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + phys_addr_t pfn; + int ret_code = 0; + u32 page_index_mod; + u32 db_page_multiplier = get_db_page_multiplier(); + + db_page->rdma_func = rdma_func; + db_page->llwqe_bitmap = + bitmap_zalloc(SXE2_DRV_LLWQE_PER_DB, GFP_KERNEL); + if (unlikely(!db_page->llwqe_bitmap)) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("db page bitmap zalloc fail, ret:%d\n", + ret_code); + goto end; + } + + db_page->llwqe_num = SXE2_DRV_LLWQE_PER_DB; + db_page->llwqe_avail = SXE2_DRV_LLWQE_PER_DB; + + ret_code = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_dbs, + rdma_func->max_dbs, &db_page->index, + &rdma_func->next_db); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("db page index alloc fail, ret:%d\n", + ret_code); + goto free_bitmap; + } + + pfn = (rdma_func->bar_db_addr >> PAGE_SHIFT) + + db_page->index / (db_page_multiplier); + + if (map_wc) { + db_page->wc = true; + db_page->map = ioremap_wc(pfn << PAGE_SHIFT, + PAGE_SIZE); + if (!db_page->map) { + ret_code = -EAGAIN; + DRV_RDMA_LOG_DEV_ERR( + "db page ioremap_wc fail, pfn:%llu ret:%d\n", + pfn, ret_code); + goto free_index; + } + } else { + db_page->wc = false; + db_page->map = + ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!db_page->map) { + ret_code = -EAGAIN; + DRV_RDMA_LOG_DEV_ERR( + "db page ioremap fail, pfn:%llu ret:%d\n", pfn, + ret_code); + goto free_index; + } + } + + page_index_mod = db_page->index % db_page_multiplier; + if (page_index_mod != 0) + db_page->map = (void *)((u64)db_page->map + + page_index_mod * SXE2_RDMA_DB_PAGE_SIZE); + kref_init(&db_page->ref_count); + + if (db_page->map) { + DRV_RDMA_LOG_DEV_DEBUG( + "alloc db page: map_addr %p, map_addrlx %#lx, index %d, llwqe_num %d\n", + db_page->map, (uintptr_t)db_page->map, db_page->index, + db_page->llwqe_num); + DRV_RDMA_LOG_DEV_DEBUG( + "alloc db page: [void++] cq_arm %p, cq_info %p, eq %p\n", + (db_page->map + SXE2_RDMA_DB_CQ_ARM_OFFSET), + (db_page->map + SXE2_RDMA_DB_CQ_INFO_OFFSET), + (db_page->map + SXE2_RDMA_DB_EQ_INFO_OFFSET)); + DRV_RDMA_LOG_DEV_DEBUG( + "alloc db page: [uintptr++] cq_arm %#lx, cq_info %#lx, eq %#lx\n", + ((uintptr_t)db_page->map + 0x8), + ((uintptr_t)db_page->map + 0xC), + ((uintptr_t)db_page->map + 0x4)); + DRV_RDMA_LOG_DEV_DEBUG( + "alloc db page: [u32*++] cq_arm %p, cq_info %p, eq %p\n", + (u32 __iomem *)((uintptr_t)db_page->map + 0x8), + (u32 __iomem *)((uintptr_t)db_page->map + 0xC), + (u32 __iomem *)((uintptr_t)db_page->map + 0x4)); + } else { + DRV_RDMA_LOG_DEV_DEBUG( + "alloc db page: index %d, llwqe_num %d\n", + db_page->index, db_page->llwqe_num); + } + + goto end; + +free_index: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_dbs, db_page->index); +free_bitmap: + bitmap_free(db_page->llwqe_bitmap); +end: + return ret_code; +} + +static int db_kget_llwqe_kernel(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_llwqe *llwqe, bool map_wc) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_db_llwqe_head *db_head; + struct sxe2_db_page *db_page = NULL; + struct list_head *head; + struct mutex *lock; + u64 dbi; + int ret_code = 0; + + db_head = &rdma_func->db_head; + head = &db_head->list; + lock = &db_head->lock; + + mutex_lock(lock); + if (list_empty(head)) { + db_page = kzalloc_node(sizeof(*db_page), GFP_KERNEL, + rdma_dev->numa_node); + if (unlikely(!db_page)) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("db page kzalloc fail, ret:%d\n", + ret_code); + goto end; + } + ret_code = db_kalloc_page(rdma_func, db_page, map_wc); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR( + "alloc kernel db page fail, ret:%d\n", + ret_code); + goto free_db; + } + rdma_func->db = db_page; + list_add(&db_page->list, head); + } else { + db_page = list_entry(head->next, struct sxe2_db_page, list); + kref_get(&db_page->ref_count); + } + + dbi = find_first_zero_bit(db_page->llwqe_bitmap, db_page->llwqe_num); + set_bit((int)dbi, db_page->llwqe_bitmap); + db_page->llwqe_avail--; + if (db_page->llwqe_avail == 0) + list_del(&db_page->list); + + llwqe->wqe_addr = db_page->map + SXE2_DRV_LLWQE_OFFSET + + dbi * SXE2_DRV_LLWQE_SIZE; + llwqe->db_addr = db_page->map + SXE2_DRV_LLWQE_DB_OFFSET + + dbi * SXE2_DRV_LLWQE_DB_SIZE; + llwqe->db_page = db_page; + llwqe->wc = db_page->wc; + llwqe->index = db_page->index; + + goto end; + +free_db: + kfree(db_page); +end: + mutex_unlock(lock); + return ret_code; +} + +static int db_kalloc_page_kernel(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_db_page *db_page = NULL; + struct sxe2_db_llwqe_head *db_head = &rdma_func->db_head; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + int ret_code = 0; + + mutex_lock(&db_head->lock); + + if (!list_empty(&db_head->list)) { + db_page = list_first_entry(&db_head->list, struct sxe2_db_page, + list); + kref_get(&db_page->ref_count); + rdma_func->db = db_page; + goto end; + } + + db_page = + kzalloc_node(sizeof(*db_page), GFP_KERNEL, rdma_dev->numa_node); + if (unlikely(!db_page)) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("db page kzalloc fail, ret:%d\n", + ret_code); + goto end; + } + + ret_code = db_kalloc_page(rdma_func, db_page, false); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("alloc kernel db page fail, ret:%d\n", + ret_code); + goto free_db; + } + rdma_func->db = db_page; + list_add(&db_page->list, &db_head->list); + + goto end; + +free_db: + kfree(db_page); +end: + mutex_unlock(&db_head->lock); + return ret_code; +} + +int sxe2_kinit_doorbell(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + int ret_code = 0; + +#ifdef SXE2_CFG_DEBUG + ret_code = drv_rdma_debug_db_add(rdma_dev); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("init db debugfs fail, ret:%d\n", + ret_code); + goto end; + } +#endif + + mutex_init(&rdma_func->db_head.lock); + INIT_LIST_HEAD(&rdma_func->db_head.list); + mutex_init(&rdma_func->db_mmap_entry_head.lock); + INIT_LIST_HEAD(&rdma_func->db_mmap_entry_head.list); + + ret_code = db_kalloc_page_kernel(rdma_func); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("alloc db page fail, ret:%d\n", ret_code); + goto err_db_page; + } + + ret_code = db_kget_llwqe_kernel(rdma_func, &rdma_func->llwqe, false); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("get LL_WQE fail, ret:%d\n", ret_code); + goto err_llwqe; + } + spin_lock_init(&rdma_func->llwqe.lock); + + goto end; + +err_llwqe: + db_kfree_db_kernel(rdma_func); +err_db_page: + mutex_destroy(&rdma_func->db_mmap_entry_head.lock); + mutex_destroy(&rdma_func->db_head.lock); +end: + return ret_code; +} + +void sxe2_kfree_doorbell(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + db_kput_llwqe_kernel(rdma_func, &rdma_func->llwqe); + db_kfree_db_kernel(rdma_func); +} +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +static struct sxe2_user_mmap_entry * +db_kalloc_entry(struct sxe2_rdma_kcontext *ctx, + enum sxe2_drv_db_page_type alloc_type, u32 db_page_id_align) +{ + struct sxe2_user_mmap_entry *entry; + struct sxe2_rdma_device *rdma_dev = to_dev(ctx->ibucontext.device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + int ret_code; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + entry = ERR_PTR(-ENOMEM); + DRV_RDMA_LOG_DEV_ERR("kzalloc entry fail\n"); + goto end; + } + + entry->page_idx = db_page_id_align; + entry->address = + (rdma_func->bar_db_addr + db_page_id_align * SXE2_DRV_DB_PAGE_SIZE); + if (alloc_type == SXE2_DRV_DB_PAGE_TYPE_LLWQE) + entry->mmap_flag = SXE2_DRV_DB_MMAP_TYPE_WC; + else + entry->mmap_flag = SXE2_DRV_DB_MMAP_TYPE_NC; + + ret_code = rdma_user_mmap_entry_insert( + &ctx->ibucontext, &entry->rdma_entry, PAGE_SIZE); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("db mmap entry insert fail, ret_code:%d\n", + ret_code); + goto free_entry; + } + goto end; +free_entry: + kfree(entry); + entry = ERR_PTR(ret_code); +end: + return entry; +} +#endif +#ifdef UVERBS_UOBJ_CREATE_NOT_SUPPORT +void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle, + u16 idx) +{ + struct bundle_priv *pbundle = + container_of(bundle, struct bundle_priv, bundle); + + __set_bit(uapi_bkey_attr(uapi_key_attr(idx)), + pbundle->uobj_hw_obj_valid); +} +EXPORT_SYMBOL(uverbs_finalize_uobj_create); +#endif + +bool uctx_db_page_has_alloced(struct sxe2_rdma_device *rdma_dev, + struct ib_ucontext *uctx, u32 page_idx) +{ + bool ret = false; + struct sxe2_db_ucontext *db_ucontext_entry; + struct sxe2_db_ucontext *uconetxt_next; + struct sxe2_db_mmap_entry *db_mmap_entry; + struct sxe2_db_mmap_entry *entry_next; + + mutex_lock(&rdma_dev->rdma_func->db_mmap_entry_head.lock); + list_for_each_entry_safe(db_ucontext_entry, uconetxt_next, + &rdma_dev->rdma_func->db_mmap_entry_head.list, list) { + if (db_ucontext_entry->ibucontext == uctx) { + list_for_each_entry_safe(db_mmap_entry, + entry_next, &db_ucontext_entry->entry_list, list) { + if (db_mmap_entry->page_idx == page_idx) { + ret = true; + mutex_unlock(&rdma_dev->rdma_func->db_mmap_entry_head.lock); + goto end; + } + } + } + } + mutex_unlock(&rdma_dev->rdma_func->db_mmap_entry_head.lock); + +end: + return ret; +} + +int db_kalloc_llwqe_mmap_entry(struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata, + struct sxe2_rdma_qp *qp, + u32 *page_id, + u32 *length, + u64 *mmap_offset) +{ + struct sxe2_user_mmap_entry *entry; + int ret_code = 0; + struct sxe2_rdma_pci_f *rdma_func; + u32 db_index; + u32 db_page_id_align; + u32 db_paga_id_mod; + struct sxe2_rdma_kcontext *ucontext; + struct sxe2_db_ucontext *db_ucontext_entry; + struct sxe2_db_ucontext *next; + struct sxe2_db_mmap_entry *db_mmap_entry = NULL; + struct sxe2_db_page_idx *db_page_idx_entry = NULL; + u32 db_page_multiplier = get_db_page_multiplier(); +#ifdef HAVE_NO_RDMA_UDATA_TO_DRV_CONTEXT + ucontext = to_rdma_kcontext(qp->pd->ibpd.uobject->context); +#else + ucontext = rdma_udata_to_drv_context( + udata, struct sxe2_rdma_kcontext, ibucontext); +#endif + rdma_func = rdma_dev->rdma_func; + + ret_code = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_dbs, + rdma_func->max_dbs, &db_index, + &rdma_func->next_db); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("db page index alloc fail, ret_code:%d\n", + ret_code); + goto end; + } + *page_id = db_index; + db_paga_id_mod = db_index % db_page_multiplier; + db_page_id_align = db_index - db_paga_id_mod; + db_page_idx_entry = kzalloc(sizeof(*db_page_idx_entry), GFP_KERNEL); + if (!db_page_idx_entry) { + DRV_RDMA_LOG_DEV_ERR("kzalloc db_page_idx_entry fail\n"); + goto free_rsrc; + } + db_page_idx_entry->db_page_idx = db_index; + mutex_lock(&rdma_func->db_mmap_entry_head.lock); + list_for_each_entry_safe(db_ucontext_entry, next, + &rdma_func->db_mmap_entry_head.list, list) { + if (db_ucontext_entry->ibucontext == &ucontext->ibucontext) + list_add_tail(&db_page_idx_entry->list, &db_ucontext_entry->db_pageid_list); + } + mutex_unlock(&rdma_func->db_mmap_entry_head.lock); + if ((db_paga_id_mod != 0) && + uctx_db_page_has_alloced(rdma_dev, + &ucontext->ibucontext, db_page_id_align)) { + DRV_RDMA_LOG_DEV_DEBUG("uctx(%p) has alloced page_id(%u).\n", + &ucontext->ibucontext, *page_id); + *length = 0; + *mmap_offset = 0; + goto end; + } +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + entry = db_kalloc_entry(ucontext, SXE2_DRV_DB_PAGE_TYPE_LLWQE, db_page_id_align); + if (IS_ERR(entry)) { + ret_code = (int)PTR_ERR(entry); + DRV_RDMA_LOG_DEV_ERR("alloc db entry fail, ret_code %d\n", + ret_code); + goto free_idx_entry; + } + + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + *length = entry->rdma_entry.npages * PAGE_SIZE; +#else + spin_lock_init(&ucontext->mmap_tbl_lock); + entry = rdma_user_mmap_entry_add_hash(ucontext, SXE2_DRV_DB_MMAP_TYPE_NC, + mmap_offset); + if (!entry) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("alloc db entry fail, ret_code %d\n", + ret_code); + goto free_idx_entry; + } + entry->mmap_flag = SXE2_DRV_DB_MMAP_TYPE_WC; + entry->page_idx = db_page_id_align; + entry->address = + (rdma_func->bar_db_addr + db_page_id_align * SXE2_DRV_DB_PAGE_SIZE); + *length = (u32)DIV_ROUND_UP(PAGE_SIZE, PAGE_SIZE) * + PAGE_SIZE; +#endif + db_mmap_entry = kzalloc(sizeof(*db_mmap_entry), GFP_KERNEL); + if (!db_mmap_entry) { + DRV_RDMA_LOG_DEV_ERR("kzalloc db_mmap_entry fail\n"); + goto free_entry; + } +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + db_mmap_entry->mmap_entry = &entry->rdma_entry; +#else + db_mmap_entry->mmap_entry = entry; +#endif + db_mmap_entry->page_idx = db_page_id_align; + mutex_lock(&rdma_func->db_mmap_entry_head.lock); + list_for_each_entry_safe(db_ucontext_entry, next, + &rdma_func->db_mmap_entry_head.list, list) { + if (db_ucontext_entry->ibucontext == &ucontext->ibucontext) + list_add_tail(&db_mmap_entry->list, &db_ucontext_entry->entry_list); + } + mutex_unlock(&rdma_func->db_mmap_entry_head.lock); + goto end; + +free_entry: +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + rdma_user_mmap_entry_remove(&entry->rdma_entry); +#else + rdma_user_mmap_entry_del_hash(entry); +#endif +free_idx_entry: + list_del(&db_page_idx_entry->list); + kfree(db_page_idx_entry); + db_page_idx_entry = NULL; +free_rsrc: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_dbs, db_index); +end: + return ret_code; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.h new file mode 100644 index 0000000000000000000000000000000000000000..bb7220a0aa5e16d73e774f774778a48a5773dd3e --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_db.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_DB_H +#define SXE2_DRV_DB_H + +#include +#include +#include +#include "sxe2_drv_rdma_common.h" + +enum sxe2_drv_db_page_type { + SXE2_DRV_DB_PAGE_TYPE_LLWQE = 0x0, + SXE2_DRV_DB_PAGE_TYPE_NO_LLWQE = 0x1, +}; + +enum sxe2_drv_db_mmap_type { + SXE2_DRV_DB_MMAP_TYPE_WC = 1, + SXE2_DRV_DB_MMAP_TYPE_NC = 2, +}; + +enum drv_db_alloc_attrs { + SXE2_DRV_ATTR_DB_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + SXE2_DRV_ATTR_DB_OBJ_ALLOC_TYPE, + SXE2_DRV_ATTR_DB_OBJ_ALLOC_MMAP_OFFSET, + SXE2_DRV_ATTR_DB_OBJ_ALLOC_MMAP_LENGTH, + SXE2_DRV_ATTR_DB_OBJ_ALLOC_PAGE_ID, +}; + +enum sxe2_drv_db_obj_destroy_attrs { + SXE2_DRV_ATTR_DB_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum sxe2_drv_db_obj_methods { + SXE2_DRV_METHOD_DB_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT), + SXE2_DRV_METHOD_DB_OBJ_DESTROY, +}; + +enum sxe2_drv_objects { + SXE2_DRV_OBJECT_DB = (1U << UVERBS_ID_NS_SHIFT), +}; + +struct sxe2_db_page { + void __iomem *map; + bool wc; + u32 index; + struct list_head list; + u32 llwqe_num; + u32 llwqe_avail; + unsigned long *llwqe_bitmap; + struct kref ref_count; + struct sxe2_rdma_pci_f *rdma_func; +}; + +struct sxe2_db_ucontext { + struct ib_ucontext *ibucontext; + struct list_head db_pageid_list; + struct list_head entry_list; + struct list_head list; +}; + +struct sxe2_db_mmap_entry { +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + struct rdma_user_mmap_entry *mmap_entry; +#else + struct sxe2_user_mmap_entry *mmap_entry; +#endif + u32 page_idx; + struct list_head list; +}; + +struct sxe2_db_page_idx { + u32 db_page_idx; + struct list_head list; +}; + +int sxe2_kmmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma); +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +void sxe2_kmmap_free(struct rdma_user_mmap_entry *entry); +#endif +int sxe2_kinit_doorbell(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kfree_doorbell(struct sxe2_rdma_device *rdma_dev); +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +struct rdma_user_mmap_entry * +sxe2_kinsert_user_mmap_entry(struct sxe2_rdma_kcontext *ctx, + enum sxe2_drv_db_mmap_type mmap_flag, + u64 *mmap_offset); +#else +struct sxe2_user_mmap_entry * +rdma_find_user_mmap_entry(struct sxe2_rdma_kcontext *kcontext, + struct vm_area_struct *vma); +bool find_key_in_mmap_tbl(struct sxe2_rdma_kcontext *ucontext, u64 key); +struct sxe2_user_mmap_entry * +rdma_user_mmap_entry_add_hash(struct sxe2_rdma_kcontext *ucontext, + enum sxe2_drv_db_mmap_type mmap_flag, + u64 *mmap_offset); +void rdma_user_mmap_entry_del_hash(struct sxe2_user_mmap_entry *entry); +#endif + +int db_kfree_mmap_entry(struct ib_uobject *uobject, enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs); + +u32 get_db_page_multiplier(void); + +bool uctx_db_page_has_alloced(struct sxe2_rdma_device *rdma_dev, + struct ib_ucontext *uctx, u32 page_idx); + +int db_kalloc_llwqe_mmap_entry(struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata, + struct sxe2_rdma_qp *qp, + u32 *page_id, u32 *length, + u64 *mmap_offset); + +#ifdef SXE2_CFG_DEBUG +int drv_rdma_debug_db_add(struct sxe2_rdma_device *rdma_dev); +#endif + +#ifdef UVERBS_UOBJ_CREATE_NOT_SUPPORT +struct uverbs_api_ioctl_method { + int(__rcu *handler)(struct uverbs_attr_bundle *attrs); + DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN); + u16 bundle_size; + u8 use_stack : 1; + u8 driver_method : 1; + u8 disabled : 1; + u8 has_udata : 1; + u8 key_bitmap_len; + u8 destroy_bkey; +}; + +struct bundle_alloc_head { + struct bundle_alloc_head *next; + u8 data[]; +}; + +struct bundle_priv { + struct bundle_alloc_head alloc_head; + struct bundle_alloc_head *allocated_mem; + size_t internal_avail; + size_t internal_used; + + struct radix_tree_root *radix; + const struct uverbs_api_ioctl_method *method_elm; + void __rcu **radix_slots; + unsigned long radix_slots_len; + u32 method_key; + + struct ib_uverbs_attr __user *user_attrs; + struct ib_uverbs_attr *uattrs; + + DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN); + DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN); + DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN); + + struct uverbs_attr_bundle bundle; + u64 internal_buffer[32]; +}; +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..3f1a399267f1e8bf501118becebfefe7dd6692c6 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_db_debugfs.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_db_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_db.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" + +#ifdef SXE2_CFG_DEBUG + +static ssize_t drv_rdma_db_read_bitmap(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + u32 zero_count = 0; + u32 used_count = 0; + u32 i = 0; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_pci_f *rdma_func; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "DB DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rdma_func = rdma_dev->rdma_func; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "DB DEBUGFS:db bitmap rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "DB Status:\n"); + len_total += + dbg_vsnprintf(rsp_end, len_total, "Max DB Page count: %#x\n", + rdma_func->max_dbs); + + len_total += dbg_vsnprintf(rsp_end, len_total, + "Bar DB Phys Addr is from: %#llx\n", + (u64)rdma_func->bar_db_addr); + len_total += dbg_vsnprintf( + rsp_end, len_total, + "Kernel DB Page :Index %#x, Vir Map Addr %#llx, wc %d\n" + "ll_wqe num%#x, ll_wqe avail %#x, ref_count %#x\n", + rdma_func->db->index, rdma_func->db->map, rdma_func->db->wc, + rdma_func->db->llwqe_num, rdma_func->db->llwqe_avail, + refcount_read(&rdma_func->db->ref_count.refcount)); + + len_total += dbg_vsnprintf(rsp_end, len_total, "DB Bitmap:\n"); + for (i = 0; i < rdma_func->max_dbs; i++) { + if (!test_bit((int)i, rdma_func->allocated_dbs)) + zero_count++; + else + used_count++; + } + len_total += dbg_vsnprintf(rsp_end, len_total, + "DB Page USED %#x, NULL %#x\n", used_count, + zero_count); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("DB DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_db_bitmap_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_db_read_bitmap, +}; + +int drv_rdma_debug_db_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "DB DEBUGFS:debugfs root dir not exist, ret (%d)\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->db_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "DB DEBUGFS:db debugfs dir not exist, ret (%d)\n", ret); + goto end; + } + + debugfs_create_file("db_bitmap", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->db_debugfs, rdma_dev, + &sxe2_rdma_db_bitmap_fops); + +end: + return ret; +} + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.c new file mode 100644 index 0000000000000000000000000000000000000000..ebdb602d7ad5e74298274d213756e7dea5108386 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.c @@ -0,0 +1,3046 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_eq.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/sysfs.h" +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_eq_debugfs.h" +#include "sxe2_drv_aux.h" +#include "sxe2_drv_srq.h" + +#define SXE2_EQC_PHY_ADDR_OFFSET (8) + +static void sxe2_rdma_ena_intr(struct sxe2_rdma_ctx_dev *dev, u32 msix_id) +{ + dev->irq_ops->sxe2_rdma_en_irq(dev, msix_id); +} + +static void sxe2_eq_update_ci(u32 __iomem *addr, u32 cons_index, u32 eqn) +{ + u32 val = (eqn & SXE2_DB_EQN_MASK) | (cons_index << SXE2_DB_CI_OFFSET); + + DRV_RDMA_LOG_DEBUG("db eqn:%d addr:%p cons_index:%d\n", eqn, addr, + cons_index); + set_32bit_val(addr, 0, val); + mb(); +} + +static void *sxe2_ctx_process_ceq(struct sxe2_rdma_pci_f *rf, + struct sxe2_rdma_ctx_ceq *ceq) +{ + u64 temp; + __le64 *ceqe; + struct sxe2_rdma_ctx_cq *cq = NULL; + struct sxe2_rdma_cq *temp_cq; + struct sxe2_rdma_mcq *temp_mcq; + u8 polarity; + u32 cq_idx; + struct sxe2_rdma_device *rdma_dev = rf->rdma_dev; + + do { + if (rdma_dev->cache_line_64_en == false) + ceqe = SXE2_GET_CURRENT_CEQ_ELEM(ceq); + else + ceqe = SXE2_GET_CURRENT_CEQ_ELEM_HYGON(ceq); + + get_64bit_val(ceqe, 0, &temp); + DRV_RDMA_LOG_DEV_DEBUG("ceq:tail %#x, ceqe %#llx\n", + SXE2_RING_CURRENT_TAIL((ceq)->ceq_ring), + temp); + polarity = (u8)FIELD_GET(SXE2_CEQE_VALID, temp); + cq_idx = FIELD_GET(SXE2_CEQE_CQID, temp); + DRV_RDMA_LOG_DEV_DEBUG( + "ceq valid:%d ceq->polarity:%d. cq_idx:%d\n", polarity, + ceq->polarity, cq_idx); + if (polarity != ceq->polarity) + return NULL; + + if (cq_idx > rf->max_cq) { + cq_idx = SXE2_INVALID_CQ_IDX; + SXE2_RING_MOVE_TAIL(ceq->ceq_ring); + + if (!SXE2_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + continue; + } + + if (cq_idx == 0) { + temp_mcq = &rf->mcq; + cq = &temp_mcq->ctx_cq; + } else { + temp_cq = rf->cq_table[cq_idx]; + if (!temp_cq) { + cq_idx = SXE2_INVALID_CQ_IDX; + SXE2_RING_MOVE_TAIL(ceq->ceq_ring); + + if (!SXE2_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + continue; + } + + cq = &temp_cq->cq_ctx; + } + + SXE2_RING_MOVE_TAIL(ceq->ceq_ring); + ceq->cons_index = SXE2_RING_CURRENT_TAIL(ceq->ceq_ring); + if (!SXE2_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + } while (cq_idx == SXE2_INVALID_CQ_IDX); + + return cq; +} + +void sxe2_rdma_comp_handler(struct sxe2_rdma_ctx_cq *cq_ctx) +{ + struct sxe2_rdma_cq *cq = cq_ctx->back_cq; + + if (!cq->user_mode) + atomic_set(&cq->armed, 0); + + if (cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +static void sxe2_rdma_err_inject_ci_no_update(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ctx_ceq, u32 update_ci_eqn) +{ + if ((rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate == + SXE2_INJECT_EQ_CEQ0_VALUE && + ctx_ceq->ceq_id == 0) || + (rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate == + SXE2_INJECT_EQ_CEQS_VALUE && + ctx_ceq->ceq_id != 0)) { + DRV_RDMA_LOG_DEBUG_BDF("inject ceq%u ci no update err.\n", ctx_ceq->ceq_id); + } else { + sxe2_eq_update_ci(ctx_ceq->doorbell, ctx_ceq->cons_index, + update_ci_eqn); + DRV_RDMA_LOG_DEBUG_BDF("ceq:%d process update ci:%d.\n", + ctx_ceq->ceq_id, update_ci_eqn); + } +} +#endif +static void sxe2_process_ceq(struct sxe2_rdma_pci_f *rf, + struct sxe2_rdma_ceq *ceq) +{ + struct sxe2_rdma_ctx_ceq *ctx_ceq; + struct sxe2_rdma_ctx_cq *cq; + unsigned long flags = 0; + u32 num_eqes = 0; + u32 update_ci_eqn; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_device *rdma_dev; + + ctx_ceq = &ceq->ctx_ceq; + dev = ctx_ceq->dev; + rdma_dev = to_rdmadev(dev); + DRV_RDMA_LOG_DEV_DEBUG("ceq:%d process start.\n", ctx_ceq->ceq_id); + + update_ci_eqn = ctx_ceq->ceq_id + 1; + + do { + DRV_RDMA_LOG_DEV_DEBUG("ceqe:%d process.\n", num_eqes); + spin_lock_irqsave(&ceq->ce_lock, flags); + cq = sxe2_ctx_process_ceq(rf, ctx_ceq); + if (!cq) { + DRV_RDMA_LOG_DEV_DEBUG("ceq process end.\n"); + spin_unlock_irqrestore(&ceq->ce_lock, flags); + break; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->inject_aeq.db_ceqn_err) { + update_ci_eqn = 0xffff; + DRV_RDMA_LOG_DEV_DEBUG("inject db ceqn 0xffff err.\n"); + } +#endif + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + sxe2_rdma_err_inject_ci_no_update(rdma_dev, ctx_ceq, update_ci_eqn); +#else + sxe2_eq_update_ci(ctx_ceq->doorbell, ctx_ceq->cons_index, + update_ci_eqn); + DRV_RDMA_LOG_DEV_DEBUG("ceq:%d process update ci:%d.\n", + ctx_ceq->ceq_id, update_ci_eqn); +#endif + + ++num_eqes; + + if (cq->cq_type == SXE2_RDMA_CQ_TYPE_IO) + sxe2_rdma_comp_handler(cq); + spin_unlock_irqrestore(&ceq->ce_lock, flags); + + if (cq->cq_type == SXE2_RDMA_CQ_TYPE_MQ && rf->mq_cmpl_wq != NULL) + queue_work(rf->mq_cmpl_wq, &rf->mq_cmpl_work); + } while (num_eqes < SXE2_EQ_POLLING_BUDGET); +} + +struct sxe2_ae_desc ae_desc_list[] = { + { SXE2_AE_AMP_UNALLOCATED_STAG, "SXE2_AE_AMP_UNALLOCATED_STAG", 0, + "Unallocated memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_INVALID_STAG, "SXE2_AE_AMP_INVALID_STAG", 0, + "Invalid memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_BAD_QP, "SXE2_AE_AMP_BAD_QP", 0, + "Memory protection error: Accessing Memory Window (MW) which belongs to a different QP" }, + { SXE2_AE_AMP_BAD_PD, "SXE2_AE_AMP_BAD_PD", 0, + "Memory protection error: Accessing Memory Window\n" + "\t(MW)/Memory Region (MR) which belongs to a different PD" }, + { SXE2_AE_AMP_BAD_STAG_KEY, "SXE2_AE_AMP_BAD_STAG_KEY", 0, + "Bad memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_BAD_STAG_INDEX, "SXE2_AE_AMP_BAD_STAG_INDEX", 0, + "Bad memory key (L-Key/R-Key): Too large memory key index" }, + { SXE2_AE_AMP_BOUNDS_VIOLATION, "SXE2_AE_AMP_BOUNDS_VIOLATION", 0, + "Memory Window (MW)/Memory Region (MR) bounds violation" }, + { SXE2_AE_AMP_RIGHTS_VIOLATION, "SXE2_AE_AMP_RIGHTS_VIOLATION", 0, + "Memory Window (MW)/Memory Region (MR) rights violation" }, + { SXE2_AE_AMP_FASTREG_VALID_STAG, "SXE2_AE_AMP_FASTREG_VALID_STAG", 0, + "Fastreg error: Registration to a valid MR" }, + { SXE2_AE_AMP_FASTREG_MW_STAG, "SXE2_AE_AMP_FASTREG_MW_STAG", 0, + "Fastreg error: Registration to a valid Memory Window (MW)" }, + { SXE2_AE_AMP_FASTREG_INVALID_RIGHTS, + "SXE2_AE_AMP_FASTREG_INVALID_RIGHTS", 0, + "Fastreg error: Invalid rights" }, + { SXE2_AE_AMP_FASTREG_INVALID_LENGTH, + "SXE2_AE_AMP_FASTREG_INVALID_LENGTH", 0, + "Fastreg error: Invalid length" }, + { SXE2_AE_AMP_REMOTE_INVALIDATE_MR_RD_ERR, + "SXE2_AE_AMP_REMOTE_INVALIDATE_MR_RD_ERR", 0, + "Remote invalidate key error: mr dma error" }, + { SXE2_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS, + "SXE2_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS", 0, + "Attempt to remotely invalidate Memory Window (MW)/Memory Region (MR) without rights" }, + { SXE2_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS, + "SXE2_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS", 0, + "Attempt to invalidate MR with a bound Memory Window (MW)" }, + { SXE2_AE_AMP_MWBIND_VALID_STAG, "SXE2_AE_AMP_MWBIND_VALID_STAG", 0, + "Attempt to bind an Memory Window (MW) with a valid MW memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_MWBIND_OF_MR_STAG, "SXE2_AE_AMP_MWBIND_OF_MR_STAG", 0, + "Attempt to bind an Memory Window (MW) with an MR memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_MWBIND_TO_ZERO_BASED_STAG, + "SXE2_AE_AMP_MWBIND_TO_ZERO_BASED_STAG", 0, + "Attempt to bind an Memory Window (MW) to a zero based MR" }, + { SXE2_AE_AMP_MWBIND_TO_MW_STAG, "SXE2_AE_AMP_MWBIND_TO_MW_STAG", 0, + "Attempt to bind an Memory Window (MW) using MW memory key\n" + "\t(L-Key/R-Key) instead of MR memory key (L-Key/R-Key)" }, + { SXE2_AE_AMP_MWBIND_INVALID_RIGHTS, + "SXE2_AE_AMP_MWBIND_INVALID_RIGHTS", 0, + "Memory Window (MW) bind error: Invalid rights" }, + { SXE2_AE_AMP_MWBIND_INVALID_BOUNDS, + "SXE2_AE_AMP_MWBIND_INVALID_BOUNDS", 0, + "Memory Window (MW) bind error: Invalid bounds" }, + { SXE2_AE_AMP_MWBIND_TO_INVALID_PARENT, + "SXE2_AE_AMP_MWBIND_TO_INVALID_PARENT", 0, + "Memory Window (MW) bind error: Invalid parent MR" }, + { SXE2_AE_AMP_MWBIND_BIND_DISABLED, "SXE2_AE_AMP_MWBIND_BIND_DISABLED", + 0, "Memory Window (MW) bind error: Disabled bind support" }, + { SXE2_AE_AMP_INVALIDATE_TYPE1_MW, "SXE2_AE_AMP_INVALIDATE_TYPE1_MW", 0, + "Memory Window (MW) error: Invalidate type 1 MW" }, + { SXE2_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW, + "SXE2_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW", 0, + "Memory Window (MW) bind error: Zero-based addressing for type 1 MW" }, + { SXE2_AE_AMP_MWBIND_WRONG_TYPE, "SXE2_AE_AMP_MWBIND_WRONG_TYPE", 0, + "MB bind error: Wrong Memory Window (MW) type" }, + { SXE2_AE_AMP_FASTREG_PBLE_MISMATCH, + "SXE2_AE_AMP_FASTREG_PBLE_MISMATCH", 0, + "Fastreg error: Invalid request to change physical MR to virtual or vice versa" }, + { SXE2_AE_CQ_OPERATION_ERROR, "SXE2_AE_CQ_OPERATION_ERROR", 0, + "CQ overflow" }, + { SXE2_AE_LLP_TOO_MANY_RETRIES, "SXE2_AE_LLP_TOO_MANY_RETRIES", 0, + "Connection error: The max number of retries has been reached" }, + { SXE2_AE_QP_SUSPEND_COMPLETE, "SXE2_AE_QP_SUSPEND_COMPLETE", 0, + "QP event: Suspend complete" }, + { SXE2_AE_LLWQE_ERR, "SXE2_AE_LLWQE_ERR", 0, + "DB_PROC error: The LLWQE received data less than 8B." }, + { SXE2_AE_DOORBELL_ERR, "SXE2_AE_DOORBELL_ERR", 0, + "DB_PROC error: The DB received data less than 4B." }, + { SXE2_AE_LLWQE_8B_INVALID, "SXE2_AE_LLWQE_8B_INVALID", 0, + "DB_PROC error: The LLWQE received data invalid, not aligned with 8B." }, + { SXE2_AE_DOORBELL_QSET_ERR, "SXE2_AE_DOORBELL_QSET_ERR", 0, + "DB_PROC error: The FID was not assigned Qset." }, + { SXE2_AE_DOORBELL_QPN_ERR, "SXE2_AE_DOORBELL_QPN_ERR", 0, + "DB_PROC error: QPN is not in FID." }, + { SXE2_AE_DOORBELL_EQN_ERR, "SXE2_AE_DOORBELL_EQN_ERR", 0, + "DB_PROC error: EQN is not in FID." }, + { SXE2_AE_DOORBELL_CQN_ERR, "SXE2_AE_DOORBELL_CQN_ERR", 0, + "DB_PROC error: CQN is not in FID." }, + { SXE2_AE_DOORBELL_QPN_NUM_ERR, "SXE2_AE_DOORBELL_QPN_NUM_ERR", 0, + "DB_PROC error: QPN conversion successful, but absolute QPN=0." }, + { SXE2_AE_WQE_LOAD_QPC_PBL_ERR, "SXE2_AE_WQE_LOAD_QPC_PBL_ERR", 0, + "Load qpc PBL err." }, + { SXE2_AE_WQE_LOAD_QPC_DMA_ERR, "SXE2_AE_WQE_LOAD_QPC_DMA_ERR", 0, + "Load qpc DMA err." }, + { SXE2_AE_WQE_QPC_CFG_ERR, "SXE2_AE_WQE_QPC_CFG_ERR", 0, + "QP error: QPC cfg err." }, + { SXE2_AE_WQE_RD_DMA_ERR, "SXE2_AE_WQE_RD_DMA_ERR", 0, + "QP error: DMA arb err." }, + { SXE2_AE_WQE_CFG_ERR, "SXE2_AE_WQE_CFG_ERR", 0, + "QP error: WQE info check err." }, + { SXE2_AE_WQE_OPCODE_ERR, "SXE2_AE_WQE_OPCODE_ERR", 0, + "QP error: WQE opcode verification failed." }, + { SXE2_AE_WQE_LENGTH_ERR, "SXE2_AE_WQE_LENGTH_ERR", 0, + "QP error: WQE length err." }, + { SXE2_AE_WQE_LLWQE_ERR, "SXE2_AE_WQE_LLWQE_ERR", 0, + "QP error: WQE read err." }, + { SXE2_AE_WQE_RD_RESP_DMA_ERR, "SXE2_AE_WQE_RD_RESP_DMA_ERR", 0, + "Read response err." }, + { SXE2_AE_WQE_RD_AH_ERR, "SXE2_AE_WQE_RD_AH_ERR", 0, + "QP error: Read AH DMA err." }, + { SXE2_AE_WQE_AH_CFG_ERR, "SXE2_AE_WQE_AH_CFG_ERR", 0, + "QP error: Read AH info err." }, + { SXE2_AE_WQE_PBL_DMA_ERR, "SXE2_AE_WQE_PBL_DMA_ERR", 0, + "pbl dma err." }, + { SXE2_AE_RXENG_RXT_OPCODE_ERR, "SXE2_AE_RXENG_RXT_OPCODE_ERR", 0, + "QP error: RXT opcode err." }, + { SXE2_AE_RXENG_RXT_LENGTH_ERR, "SXE2_AE_RXENG_RXT_LENGTH_ERR", 0, + "QP error: RXT pkg length err." }, + { SXE2_AE_RXENG_RXT_RDMA_READ_OUTSTANDING_ERR, + "SXE2_AE_RXENG_RXT_RDMA_READ_OUTSTANDING_ERR", 0, + "QP error: RXT outstanding err." }, + { SXE2_AE_RXENG_RXI_BAD_RESPONSE, "SXE2_AE_RXENG_RXI_BAD_RESPONSE", 0, + "QP error: RXI bad response." }, + { SXE2_AE_RXENG_RXI_LENGTH_ERR, "SXE2_AE_RXENG_RXI_LENGTH_ERR", 0, + "QP error: RXI length err." }, + { SXE2_AE_RXENG_RXI_IRRL_DMA_ERR, "SXE2_AE_RXENG_RXI_IRRL_DMA_ERR", 0, + "QP error: RXI irrl dma err." }, + { SXE2_AE_RXENG_RXT_RESP_WR_PADDR_ERR, + "SXE2_AE_RXENG_RXT_RESP_WR_PADDR_ERR", 0, + "QP error: RXT resp wr paddr err." }, + { SXE2_AE_RXENG_RXT_RESP_RD_DMA_ERR, + "SXE2_AE_RXENG_RXT_RESP_RD_DMA_ERR", 0, + "QP error: RXT resp rd dma err." }, + { SXE2_AE_RXENG_RXI_SSNT_OVERSIZE_ERR, + "SXE2_AE_RXENG_RXI_SSNT_OVERSIZE_ERR", 0, + "QP error: RXI ssnt oversize err." }, + { SXE2_AE_RXENG_RXI_SSNT_DMA_ERR, "SXE2_AE_RXENG_RXI_SSNT_DMA_ERR", 0, + "QP error: RXI ssnt dma err." }, + { SXE2_AE_RXENG_RXT_RQE_LIMIT, "SXE2_AE_RXENG_RXT_RQE_LIMIT", 0, + "RQE num reaches watermark." }, + { SXE2_AE_RXENG_RX_LOAD_QPC_DMA_ERR, + "SXE2_AE_RXENG_RX_LOAD_QPC_DMA_ERR", 0, "QPC load DMA err." }, + { SXE2_AE_RXENG_RX_LOAD_QPC_PA_ERR, "SXE2_AE_RXENG_RX_LOAD_QPC_PA_ERR", + 0, "QPC load PA err." }, + { SXE2_AE_RXENG_RX_LOAD_SRQC_DMA_ERR, + "SXE2_AE_RXENG_RX_LOAD_SRQC_DMA_ERR", 0, "SRQC load DMA err." }, + { SXE2_AE_RXENG_RX_LOAD_SRQC_PA_ERR, + "SXE2_AE_RXENG_RX_LOAD_SRQC_PA_ERR", 0, "SRQC load PA err." }, + { SXE2_AE_RXENG_RQ_PI_DMA_ERR, "SXE2_AE_RXENG_RQ_PI_DMA_ERR", 0, + "QP error: DMA timeout to get srq/rp PI pointer." }, + { SXE2_AE_RXENG_RQ_ADDR_ERR, "SXE2_AE_RXENG_RQ_ADDR_ERR", 0, + "QP error: FPT, get srq/rq page table addr err." }, + { SXE2_AE_RXENG_RQ_DMA_ERR, "SXE2_AE_RXENG_RQ_DMA_ERR", 0, + "QP error: DMA timeout or ECC err to get srqe/rqe." }, + { SXE2_AE_RXENG_RQ_WQE_VLD_ERR, "SXE2_AE_RXENG_RQ_WQE_VLD_ERR", 0, + "QP error: Get srqe/rqe err, wqe_valid != qpc valid." }, + { SXE2_AE_RXENG_SRQC_STS_ERR, "SXE2_AE_RXENG_SRQC_STS_ERR", 0, + "QP error: Get srqc err : return reserved." }, + { SXE2_AE_RXENG_SRQC_STS_RSV, "SXE2_AE_RXENG_SRQC_STS_RSV", 0, + "QP error: Get srqc state err." }, + { SXE2_AE_RXENG_SRQ_LAST_WQE, "SXE2_AE_RXENG_SRQ_LAST_WQE", 0, + "SRQ: srq qp modify error complete." }, + { SXE2_AE_RXENG_DBR_VALUE_ERR, "SXE2_AE_RXENG_DBR_VALUE_ERR", 0, + "QP error: |PI - CI| more than SIZE ." }, + { SXE2_AE_RXENG_RTR_FIRST_PKT, "SXE2_AE_RXENG_RTR_FIRST_PKT", 0, + "QP: Get the first valid data at RTR state." }, + { SXE2_AE_SRQC_LOAD_SRQC_DMA_ERR, "SXE2_AE_SRQC_LOAD_SRQC_DMA_ERR", 0, + "SRQ: Load srq DMA read err." }, + { SXE2_AE_SRQC_LOAD_SRQC_PA_ERR, "SXE2_AE_SRQC_LOAD_SRQC_PA_ERR", 0, + "SRQ:Load srq PA err." }, + { SXE2_AE_SRQC_STS_ERR, "SXE2_AE_SRQC_STS_ERR", 0, + "SRQ: srq state err." }, + { SXE2_AE_RXENG_REMOTE_INVALID_ERROR, + "SXE2_AE_RXENG_REMOTE_INVALID_ERROR", 0, + "QP: Remote invalid request err." }, + { SXE2_AE_RXENG_REMOTE_ACCESS_ERROR, + "SXE2_AE_RXENG_REMOTE_ACCESS_ERROR", 0, "QP: Remote access err." }, + { SXE2_AE_RXENG_REMOTE_QP_ERROR, "SXE2_AE_RXENG_REMOTE_QP_ERROR", 0, + "QP: Remote operation err." }, + { SXE2_AE_RXENG_RXI_WQE_MISMATCH, "SXE2_AE_RXENG_RXI_WQE_MISMATCH", 0, + "QP: psn not match msn." }, + { SXE2_AE_TXENG_PAYLOAD_RD_DMA_ERR, "SXE2_AE_TXENG_PAYLOAD_RD_DMA_ERR", + 0, "QP error: The sq request pcie data err: dma err." }, + { SXE2_AE_TXENG_RD_PKEY_ERR, "SXE2_AE_TXENG_RD_PKEY_ERR", 0, + "QP error: PHG get pkey err." }, + { SXE2_AE_TXENG_RD_SSNT_PBL_ERR, "SXE2_AE_TXENG_RD_SSNT_PBL_ERR", 0, + "QP error: Get SSNT PA err." }, + { SXE2_AE_TXENG_RD_IRRL_PBL_ERR, "SXE2_AE_TXENG_RD_IRRL_PBL_ERR", 0, + "QP error: Get IRRL PA err." }, + { SXE2_AE_TXENG_RESP_PAYLOAD_RD_DMA_ERR, + "SXE2_AE_TXENG_RESP_PAYLOAD_RD_DMA_ERR", 0, + "QP error: RDMA read resp request pcie data err: dma err." }, + { SXE2_AE_TMO_WR_PA_ERR, "SXE2_AE_TMO_WR_PA_ERR", 0, + "TimeOut queue's write PA err: DMA FPT ERR." }, + { SXE2_AE_TMO_RD_PA_ERR, "SXE2_AE_TMO_RD_PA_ERR", 0, + "TimeOut queue's read PA err: DMA FPT ERR." }, + { SXE2_AE_TMO_RD_DMA_ERR, "SXE2_AE_TMO_RD_DMA_ERR", 0, + "Read host memory err: DMA err." }, + { SXE2_AE_TMO_RD_QPC_ERR, "SXE2_AE_TMO_RD_QPC_ERR", 0, + "TimeOut queue read QPC err." }, + { SXE2_AE_CC_QP_RATE, "SXE2_AE_CC_QP_RATE", 0, + "The flow of QP is reduced to the minimum rate through congestion control." }, + { SXE2_AE_CEQ_OVERFLOW, "SXE2_AE_CEQ_OVERFLOW", 0, "CEQ overflow." }, + { SXE2_AE_EQ_STATUS_ERR, "SXE2_AE_EQ_STATUS_ERR", 0, + "CEQ access err." }, + { SXE2_AE_CQ_STATUS_ERR, "SXE2_AE_CQ_STATUS_ERR", 0, "CQ access err." }, + { SXE2_AE_QP_CQ_OVERFLOW, "SXE2_AE_QP_CQ_OVERFLOW", 0, + "The QP's CQE causes CQ overflows." }, +}; + +const char *sxe2_get_ae_desc(u16 ae_code) +{ + const char *desc = ""; + u32 i; + + for (i = 0; i < ARRAY_SIZE(ae_desc_list); i++) { + if (ae_desc_list[i].id == ae_code) { + desc = ae_desc_list[i].desc; + break; + } + } + return desc; +} + +void sxe2_set_ae_count(u16 ae_code) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(ae_desc_list); i++) { + if (ae_desc_list[i].id == ae_code) { + ae_desc_list[i].count++; + break; + } + } +} + +struct sxe2_ae_desc *sxe2_get_ae_desc_list(void) +{ + return ae_desc_list; +} + +int sxe2_get_ae_desc_list_size(void) +{ + return ARRAY_SIZE(ae_desc_list); +} + +static void sxe2_set_flush_fields(struct sxe2_rdma_ctx_qp *qp_ctx, + struct sxe2_aeqe_info *info) +{ + enum sxe2_qp_event_type qp_event; + + switch (info->ae_code) { + case SXE2_AE_AMP_UNALLOCATED_STAG: + case SXE2_AE_AMP_INVALID_STAG: + case SXE2_AE_AMP_BAD_QP: + case SXE2_AE_AMP_BAD_PD: + case SXE2_AE_AMP_BAD_STAG_KEY: + case SXE2_AE_AMP_BAD_STAG_INDEX: + case SXE2_AE_AMP_BOUNDS_VIOLATION: + case SXE2_AE_AMP_RIGHTS_VIOLATION: + case SXE2_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: + case SXE2_AE_AMP_MWBIND_INVALID_RIGHTS: + case SXE2_AE_AMP_MWBIND_BIND_DISABLED: + case SXE2_AE_AMP_MWBIND_INVALID_BOUNDS: + case SXE2_AE_AMP_MWBIND_VALID_STAG: + case SXE2_AE_RXENG_RXT_RESP_WR_PADDR_ERR: + case SXE2_AE_RXENG_RXT_RESP_RD_DMA_ERR: + qp_event = SXE2_QP_EVENT_ACCESS_ERR; + break; + case SXE2_AE_RXENG_RXT_OPCODE_ERR: + case SXE2_AE_RXENG_RXT_LENGTH_ERR: + case SXE2_AE_RXENG_RXT_RDMA_READ_OUTSTANDING_ERR: + qp_event = SXE2_QP_EVENT_REQ_ERR; + break; + case SXE2_AE_RXENG_RTR_FIRST_PKT: + qp_event = SXE2_QP_EVENT_COMM_EST; + break; + case SXE2_AE_RXENG_SRQ_LAST_WQE: + qp_event = SXE2_QP_EVENT_QP_LASTWQE_REACHED; + break; + default: + qp_event = SXE2_QP_EVENT_CATASTROPHIC; + break; + } + + qp_ctx->event_type = qp_event; +} + +static int sxe2_get_next_aeqe(struct sxe2_rdma_ctx_aeq *aeq, + struct sxe2_aeqe_info *info) +{ + u64 temp, compl_ctx; + __le64 *aeqe; + u8 ae_src; + u8 polarity; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(aeq->dev); + + if (rdma_dev->cache_line_64_en == false) + aeqe = SXE2_GET_CURRENT_AEQ_ELEM(aeq); + else + aeqe = SXE2_GET_CURRENT_AEQ_ELEM_HYGON(aeq); + + get_64bit_val(aeqe, 8, &temp); + DRV_RDMA_LOG_DEV_DEBUG("aeq:tail %#x, aeqe_8 %#llx\n", + SXE2_RING_CURRENT_TAIL((aeq)->aeq_ring), temp); + polarity = (u8)FIELD_GET(SXE2_AEQE_VALID, temp); + DRV_RDMA_LOG_DEV_DEBUG("aeq valid:%d aeq->polarity:%d.\n", polarity, + aeq->polarity); + + if (aeq->polarity != polarity) + return -ENOENT; + + dma_rmb(); + + get_64bit_val(aeqe, 0, &compl_ctx); + + ae_src = (u8)FIELD_GET(SXE2_AEQE_AESRC, temp); + info->wqe_idx = (u16)FIELD_GET(SXE2_AEQE_WQDESCIDX, temp); + info->qp_cq_id = (u32)FIELD_GET(SXE2_AEQE_QPCQID_L, temp); + info->ae_code = (u16)FIELD_GET(SXE2_AEQE_AECODE, temp); + info->rdma_state = (u8)FIELD_GET(SXE2_AEQE_RDMA_STATE, temp); + info->aeqe_overflow = (bool)FIELD_GET(SXE2_AEQE_OVERFLOW, temp); + info->ae_source = ae_src; + + switch (ae_src) { + case SXE2_AE_SOURCE_RQ: + info->qp = true; + info->rq = true; + info->err_rq_idx_valid = true; + info->compl_ctx = compl_ctx; + break; + case SXE2_AE_SOURCE_CQ: + info->cq = true; + info->compl_ctx = LS_64_1(compl_ctx, 1); + break; + case SXE2_AE_SOURCE_SQ: + info->qp = true; + info->sq = true; + info->compl_ctx = compl_ctx; + break; + case SXE2_AE_SOURCE_SRQ: + info->srq = true; + info->compl_ctx = compl_ctx; + break; + case SXE2_AE_SOURCE_TMO: + info->tmo = true; + if ((info->ae_code == SXE2_AE_TMO_RD_DMA_ERR) || + (info->ae_code == SXE2_AE_TMO_RD_QPC_ERR)) + info->qp = true; + break; + case SXE2_AE_SOURCE_CC: + info->cc = true; + break; + case SXE2_AE_SOURCE_DB_PROC: + info->db = true; + break; + case SXE2_AE_SOURCE_RSVD: + default: + break; + } + + SXE2_RING_MOVE_TAIL(aeq->aeq_ring); + aeq->cons_index = SXE2_RING_CURRENT_TAIL(aeq->aeq_ring); + if (!SXE2_RING_CURRENT_TAIL(aeq->aeq_ring)) + aeq->polarity ^= 1; + + return 0; +} + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +static void sxe2_process_aeq_err_inject_noupdate_ci( + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *ctx_aeq) +{ + if (rdma_dev->rdma_func->inject_aeq.aeq_ci_noupdate == + SXE2_INJECT_EQ_AEQ_VALUE) { + DRV_RDMA_LOG_DEBUG_BDF("inject aeq ci no update err.\n"); + } else { + sxe2_eq_update_ci(ctx_aeq->doorbell, ctx_aeq->cons_index, 0); + DRV_RDMA_LOG_DEBUG_BDF("aeq process update ci:%u.\n", ctx_aeq->cons_index); + } +} +#endif +static void sxe2_process_aeq(struct sxe2_rdma_pci_f *rf) +{ + struct sxe2_rdma_aeq *aeq = &rf->aeq; + struct sxe2_rdma_ctx_aeq *ctx_aeq = &aeq->ctx_aeq; + struct sxe2_aeqe_info aeinfo; + struct sxe2_aeqe_info *info = &aeinfo; + struct sxe2_rdma_cq *cq = NULL; + struct sxe2_rdma_qp *qp = NULL; + struct sxe2_rdma_srq *srq = NULL; + struct sxe2_rdma_device *rdma_dev = rf->rdma_dev; + unsigned long flags = 0; + int ret; + u32 aeqcnt = 0; + struct ib_event srq_ibevent = {}; + + if (!ctx_aeq->size) + return; + DRV_RDMA_LOG_DEV_DEBUG("aeq process start.\n"); + + do { + memset(info, 0, sizeof(*info)); + ret = sxe2_get_next_aeqe(ctx_aeq, info); + if (ret) { + DRV_RDMA_LOG_DEV_DEBUG("aeq process end.\n"); + break; + } + + if (info->aeqe_overflow) { + DRV_RDMA_LOG_DEV_DEBUG("AEQ has overflowed\n"); + if (!rf->reset) { + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + return; + } + + ++aeqcnt; + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: ae_id = 0x%x (%s), is_qp = %d,\n" + "\tcq_qp_id = %u, rdma_state = %d, ae_src = %d\n", + info->ae_code, sxe2_get_ae_desc(info->ae_code), + info->qp, info->qp_cq_id, info->rdma_state, + info->ae_source); + +#ifdef SXE2_CFG_DEBUG + sxe2_set_ae_count(info->ae_code); +#endif + + switch (info->ae_code) { + case SXE2_AE_CQ_OPERATION_ERROR: + case SXE2_AE_CQ_STATUS_ERR: + DRV_RDMA_LOG_DEV_DEBUG( + "Processing CQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + + if (info->qp_cq_id > rf->max_cq) { + DRV_RDMA_LOG_DEV_ERR( + "AEQ: cq_id %u invalid, max_cq:%u\n", + info->qp_cq_id, rf->max_cq); + break; + } + + if (info->qp_cq_id != 0) { + spin_lock_irqsave(&rf->cqtable_lock, flags); + cq = rf->cq_table[info->qp_cq_id]; + if (!cq) { + spin_unlock_irqrestore( + &rf->cqtable_lock, flags); + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: cq_id %u is already freed\n", + info->qp_cq_id); + continue; + } + sxe2_drv_cq_add_ref(cq); + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + + if (cq->ibcq.event_handler) { + struct ib_event ibevent; + + ibevent.device = cq->ibcq.device; + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &cq->ibcq; + cq->ibcq.event_handler( + &ibevent, cq->ibcq.cq_context); + } + sxe2_drv_cq_rem_ref(cq); + } + + if ((info->qp_cq_id == 0) || + (info->ae_code == SXE2_AE_CQ_STATUS_ERR)) { + if (!rf->reset) { + DRV_RDMA_LOG_DEV_ERR( + "Request Reset, cq err\n"); + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + } + break; + case SXE2_AE_LLWQE_ERR: + case SXE2_AE_DOORBELL_ERR: + case SXE2_AE_LLWQE_8B_INVALID: + case SXE2_AE_DOORBELL_QSET_ERR: + case SXE2_AE_DOORBELL_QPN_ERR: + case SXE2_AE_DOORBELL_EQN_ERR: + case SXE2_AE_DOORBELL_CQN_ERR: + case SXE2_AE_DOORBELL_QPN_NUM_ERR: + DRV_RDMA_LOG_DEV_WARN("AEQ: DB PROC ae_id:0x%x (%s)\n", + info->ae_code, + sxe2_get_ae_desc(info->ae_code)); + break; + case SXE2_AE_CC_QP_RATE: + DRV_RDMA_LOG_DEV_WARN("AEQ: CC ae_id:0x%x (%s)\n", + info->ae_code, + sxe2_get_ae_desc(info->ae_code)); + break; + case SXE2_AE_RXENG_RXT_RQE_LIMIT: + if ((info->qp_cq_id < + rf->ctx_dev.hw_attrs.min_hw_srq_id) || + (info->qp_cq_id > + (rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ] + .cnt - + 1))) { + DRV_RDMA_LOG_DEV_ERR( + "srqn err, SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + continue; + } + spin_lock_irqsave(&rf->srqtable_lock, flags); + srq = rf->srq_table[info->qp_cq_id]; + if (!srq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + DRV_RDMA_LOG_DEV_ERR( + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + sxe2_kadd_srq_ref(&srq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + srq->srq_ctx.srq_limit = 0; + if (srq->ibsrq.event_handler) { + DRV_RDMA_LOG_DEV_DEBUG( + "Processing SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + srq_ibevent.device = srq->ibsrq.device; + srq_ibevent.element.port_num = 1; + srq_ibevent.element.srq = &srq->ibsrq; + srq_ibevent.event = IB_EVENT_SRQ_LIMIT_REACHED; + srq->ibsrq.event_handler( + &srq_ibevent, srq->ibsrq.srq_context); + } else { + DRV_RDMA_LOG_DEV_ERR( + "handle err, Processing SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + } + sxe2_krem_srq_ref(&srq->ibsrq); + break; + case SXE2_AE_SRQC_LOAD_SRQC_DMA_ERR: + case SXE2_AE_SRQC_LOAD_SRQC_PA_ERR: + case SXE2_AE_SRQC_STS_ERR: + if ((info->qp_cq_id < + rf->ctx_dev.hw_attrs.min_hw_srq_id) || + (info->qp_cq_id > + (rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ] + .cnt - + 1))) { + DRV_RDMA_LOG_DEV_ERR( + "srqn err, SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + continue; + } + spin_lock_irqsave(&rf->srqtable_lock, flags); + srq = rf->srq_table[info->qp_cq_id]; + if (!srq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + DRV_RDMA_LOG_DEV_ERR( + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + sxe2_kadd_srq_ref(&srq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + if (srq->ibsrq.event_handler) { + DRV_RDMA_LOG_DEV_DEBUG( + "Processing SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + srq_ibevent.device = srq->ibsrq.device; + srq_ibevent.element.srq = &srq->ibsrq; + srq_ibevent.event = IB_EVENT_SRQ_ERR; + srq->ibsrq.event_handler( + &srq_ibevent, srq->ibsrq.srq_context); + } else { + DRV_RDMA_LOG_DEV_ERR( + "handle err, Processing SRQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_code); + } + sxe2_krem_srq_ref(&srq->ibsrq); + if (!rf->reset) { + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + break; + case SXE2_AE_RXENG_SRQ_LAST_WQE: + DRV_RDMA_LOG_DEV_WARN( + "qp modify to err state finish\n"); + if ((info->qp) && (info->qp_cq_id > 0) && + (info->qp_cq_id < rf->max_qp)) { + spin_lock_irqsave(&rf->qptable_lock, flags); + qp = rf->qp_table[info->qp_cq_id]; + if (!qp) { + spin_unlock_irqrestore( + &rf->qptable_lock, flags); + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: qp_id %u is already freed\n", + info->qp_cq_id); + continue; + } + + sxe2_qp_add_ref(&qp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + + sxe2_set_flush_fields(&qp->qp_ctx, info); + sxe2_ib_qp_event(qp, qp->qp_ctx.event_type); + sxe2_qp_rem_ref(&qp->ibqp); + } + break; + case SXE2_AE_WQE_LOAD_QPC_PBL_ERR: + case SXE2_AE_WQE_LOAD_QPC_DMA_ERR: + case SXE2_AE_RXENG_RX_LOAD_QPC_DMA_ERR: + case SXE2_AE_RXENG_RX_LOAD_QPC_PA_ERR: + case SXE2_AE_RXENG_SRQC_STS_ERR: + case SXE2_AE_RXENG_SRQC_STS_RSV: + case SXE2_AE_TMO_WR_PA_ERR: + case SXE2_AE_TMO_RD_PA_ERR: + case SXE2_AE_TMO_RD_DMA_ERR: + case SXE2_AE_CEQ_OVERFLOW: + case SXE2_AE_EQ_STATUS_ERR: + if (!rf->reset) { + DRV_RDMA_LOG_DEV_WARN("aeq request reset.\n"); + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + break; + case SXE2_AE_RXENG_RQ_PI_DMA_ERR: + case SXE2_AE_RXENG_RQ_ADDR_ERR: + case SXE2_AE_RXENG_RQ_DMA_ERR: + case SXE2_AE_RXENG_RX_LOAD_SRQC_DMA_ERR: + case SXE2_AE_RXENG_RX_LOAD_SRQC_PA_ERR: + if ((info->qp) && (info->qp_cq_id > 0) && + (info->qp_cq_id < rf->max_qp)) { + spin_lock_irqsave(&rf->qptable_lock, flags); + qp = rf->qp_table[info->qp_cq_id]; + if (!qp) { + spin_unlock_irqrestore( + &rf->qptable_lock, flags); + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: qp_id %u is already freed\n", + info->qp_cq_id); + continue; + } + + sxe2_qp_add_ref(&qp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + + sxe2_set_flush_fields(&qp->qp_ctx, info); + sxe2_ib_qp_event(qp, qp->qp_ctx.event_type); + sxe2_qp_rem_ref(&qp->ibqp); + } + + if (!rf->reset) { + DRV_RDMA_LOG_DEV_WARN("aeq request reset.\n"); + rf->reset = true; + rf->gen_ops.request_reset(rf); + } + break; + case SXE2_AE_QP_SUSPEND_COMPLETE: + DRV_RDMA_LOG_WARN_BDF("qp:%u modify to SQD finish.\n", + info->qp_cq_id); + + if (info->qp_cq_id > rf->max_qp) { + DRV_RDMA_LOG_DEV_ERR( + "AEQ: qp_id %u invalid, max_qp:%u\n", + info->qp_cq_id, rf->max_qp); + break; + } + + spin_lock_irqsave(&rf->qptable_lock, flags); + qp = rf->qp_table[info->qp_cq_id]; + if (!qp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + + if (!rdma_dev->vsi.tc_change_pending) + continue; + + atomic_dec(&rdma_dev->vsi.qp_suspend_reqs); + wake_up(&rdma_dev->suspend_wq); + + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: qp_id %u is already freed\n", + info->qp_cq_id); + break; + } + + sxe2_qp_add_ref(&qp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, flags); + + if (rdma_dev->vsi.tc_change_pending) { + if (!atomic_dec_return( + &rdma_dev->vsi.qp_suspend_reqs)) + wake_up(&rdma_dev->suspend_wq); + } + + if (qp->suspend_pending) { + qp->suspend_pending = false; + wake_up(&qp->dev->suspend_wq); + } + + sxe2_qp_rem_ref(&qp->ibqp); + break; + case SXE2_AE_AMP_UNALLOCATED_STAG: + case SXE2_AE_AMP_INVALID_STAG: + case SXE2_AE_AMP_BAD_QP: + case SXE2_AE_AMP_BAD_PD: + case SXE2_AE_AMP_BAD_STAG_KEY: + case SXE2_AE_AMP_BAD_STAG_INDEX: + case SXE2_AE_AMP_BOUNDS_VIOLATION: + case SXE2_AE_AMP_RIGHTS_VIOLATION: + case SXE2_AE_AMP_FASTREG_VALID_STAG: + case SXE2_AE_AMP_FASTREG_MW_STAG: + case SXE2_AE_AMP_FASTREG_INVALID_RIGHTS: + case SXE2_AE_AMP_FASTREG_INVALID_LENGTH: + case SXE2_AE_AMP_REMOTE_INVALIDATE_MR_RD_ERR: + case SXE2_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: + case SXE2_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS: + case SXE2_AE_AMP_MWBIND_VALID_STAG: + case SXE2_AE_AMP_MWBIND_OF_MR_STAG: + case SXE2_AE_AMP_MWBIND_TO_ZERO_BASED_STAG: + case SXE2_AE_AMP_MWBIND_TO_MW_STAG: + case SXE2_AE_AMP_MWBIND_INVALID_RIGHTS: + case SXE2_AE_AMP_MWBIND_INVALID_BOUNDS: + case SXE2_AE_AMP_MWBIND_TO_INVALID_PARENT: + case SXE2_AE_AMP_MWBIND_BIND_DISABLED: + case SXE2_AE_AMP_INVALIDATE_TYPE1_MW: + case SXE2_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW: + case SXE2_AE_AMP_MWBIND_WRONG_TYPE: + case SXE2_AE_AMP_FASTREG_PBLE_MISMATCH: + case SXE2_AE_LLP_TOO_MANY_RETRIES: + case SXE2_AE_RXENG_RXT_RESP_WR_PADDR_ERR: + case SXE2_AE_RXENG_RXT_RESP_RD_DMA_ERR: + case SXE2_AE_WQE_RD_RESP_DMA_ERR: + case SXE2_AE_TXENG_RESP_PAYLOAD_RD_DMA_ERR: + case SXE2_AE_TMO_RD_QPC_ERR: + case SXE2_AE_QP_CQ_OVERFLOW: + case SXE2_AE_WQE_QPC_CFG_ERR: + case SXE2_AE_WQE_RD_DMA_ERR: + case SXE2_AE_WQE_CFG_ERR: + case SXE2_AE_WQE_OPCODE_ERR: + case SXE2_AE_WQE_LENGTH_ERR: + case SXE2_AE_WQE_LLWQE_ERR: + case SXE2_AE_WQE_RD_AH_ERR: + case SXE2_AE_WQE_AH_CFG_ERR: + case SXE2_AE_WQE_PBL_DMA_ERR: + case SXE2_AE_RXENG_RXT_OPCODE_ERR: + case SXE2_AE_RXENG_RXT_LENGTH_ERR: + case SXE2_AE_RXENG_RXT_RDMA_READ_OUTSTANDING_ERR: + case SXE2_AE_RXENG_RXI_BAD_RESPONSE: + case SXE2_AE_RXENG_RXI_LENGTH_ERR: + case SXE2_AE_RXENG_RXI_IRRL_DMA_ERR: + case SXE2_AE_RXENG_RXI_SSNT_OVERSIZE_ERR: + case SXE2_AE_RXENG_RXI_SSNT_DMA_ERR: + case SXE2_AE_RXENG_RQ_WQE_VLD_ERR: + case SXE2_AE_TXENG_PAYLOAD_RD_DMA_ERR: + case SXE2_AE_TXENG_RD_PKEY_ERR: + case SXE2_AE_TXENG_RD_SSNT_PBL_ERR: + case SXE2_AE_TXENG_RD_IRRL_PBL_ERR: + case SXE2_AE_RXENG_DBR_VALUE_ERR: + case SXE2_AE_RXENG_RTR_FIRST_PKT: + case SXE2_AE_RXENG_REMOTE_INVALID_ERROR: + case SXE2_AE_RXENG_REMOTE_ACCESS_ERROR: + case SXE2_AE_RXENG_REMOTE_QP_ERROR: + case SXE2_AE_RXENG_RXI_WQE_MISMATCH: + DRV_RDMA_LOG_DEV_WARN( + "AEQ: abnormal ae_id = 0x%x , is_qp = %d, qp_id = %u\n", + info->ae_code, info->qp, info->qp_cq_id); + + if (info->qp_cq_id > rf->max_qp) { + DRV_RDMA_LOG_DEV_ERR( + "AEQ: qp_id %u invalid, max_qp:%u\n", + info->qp_cq_id, rf->max_qp); + break; + } + + spin_lock_irqsave(&rf->qptable_lock, flags); + qp = rf->qp_table[info->qp_cq_id]; + if (!qp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + DRV_RDMA_LOG_DEV_DEBUG( + "AEQ: qp_id %u is already freed\n", + info->qp_cq_id); + break; + } + + sxe2_qp_add_ref(&qp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, flags); + + if ((info->ae_code == SXE2_AE_RXENG_RXT_OPCODE_ERR) || + (info->ae_code == SXE2_AE_RXENG_RXT_LENGTH_ERR)) { + if (qp->qp_ctx.qp_common.qp_type != IB_QPT_RC) { + DRV_RDMA_LOG_DEV_INFO( + "UD type ae_id = 0x%x ignore\n", + info->ae_code); + sxe2_qp_rem_ref(&qp->ibqp); + break; + } + } + + sxe2_set_flush_fields(&qp->qp_ctx, info); + if (info->ae_code == SXE2_AE_RXENG_RTR_FIRST_PKT) + sxe2_ib_qp_event(qp, qp->qp_ctx.event_type); + else + sxe2_cm_disconn(qp); + + sxe2_qp_rem_ref(&qp->ibqp); + break; + default: + break; + } + } while (aeqcnt < SXE2_EQ_POLLING_BUDGET); + + if (aeqcnt) { +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + sxe2_process_aeq_err_inject_noupdate_ci(rdma_dev, ctx_aeq); +#else + sxe2_eq_update_ci(ctx_aeq->doorbell, ctx_aeq->cons_index, 0); + DRV_RDMA_LOG_DEV_DEBUG("aeq process update ci:%u.\n", + ctx_aeq->cons_index); +#endif + } +} + +#ifndef NO_HAVE_TASKLET_SETUP +static void sxe2_eq_cb(struct tasklet_struct *t) +{ + struct sxe2_rdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); + + if (rf->msix_shared) + sxe2_process_ceq(rf, rf->ceqlist); + + if (!atomic_read(&rf->aeq_created)) + goto end; + + sxe2_process_aeq(rf); + +end: + sxe2_rdma_ena_intr(&rf->ctx_dev, rf->sxe2_msixtbl[0].idx); +} + +irqreturn_t sxe2_eq_handler(int irq, void *data) +{ + struct sxe2_rdma_pci_f *rf = data; + + tasklet_schedule(&rf->dpc_tasklet); + + return IRQ_HANDLED; +} + +irqreturn_t sxe2_ceq_handler(int irq, void *data) +{ + struct sxe2_rdma_ceq *ceq = data; + struct sxe2_rdma_device *rdma_dev = ceq->rf->rdma_dev; + + if (ceq->irq != irq) { + DRV_RDMA_LOG_DEV_ERR("expected irq = %d received irq = %d\n", + ceq->irq, irq); + } + tasklet_schedule(&ceq->dpc_tasklet); + + return IRQ_HANDLED; +} + +void sxe2_ceq_cb(struct tasklet_struct *t) +{ + struct sxe2_rdma_ceq *ceq = from_tasklet(ceq, t, dpc_tasklet); + struct sxe2_rdma_pci_f *rf = ceq->rf; + + sxe2_process_ceq(rf, ceq); + sxe2_rdma_ena_intr(&rf->ctx_dev, ceq->msix_idx); +} +#else +static irqreturn_t sxe2_ceq_cb(int irq, void *private) +{ + struct sxe2_rdma_ceq *ceq = private; + struct sxe2_rdma_pci_f *rf = ceq->rf; + + sxe2_process_ceq(rf, ceq); + sxe2_rdma_ena_intr(&rf->ctx_dev, ceq->msix_idx); + + return IRQ_HANDLED; +} + +static irqreturn_t sxe2_aeq_ceq0_cb(int irq, void *private) +{ + struct sxe2_rdma_pci_f *rf = private; + + if (rf->msix_shared) + sxe2_process_ceq(rf, rf->ceqlist); + + if (!atomic_read(&rf->aeq_created)) + goto end; + + sxe2_process_aeq(rf); + +end: + sxe2_rdma_ena_intr(&rf->ctx_dev, rf->sxe2_msixtbl[0].idx); + + return IRQ_HANDLED; +} + +#endif + +static u32 sxe2_get_ceq_abs_id(struct sxe2_rdma_pci_f *rdma_func, + u32 ceq_rel_id) +{ + struct sxe2_rdma_ctx_dev *dev = NULL; + struct sxe2_rdma_device *rdma_dev = NULL; + int fid = 0; + u32 offset = 0; + u32 ceq_abs_id = 0; + + rdma_dev = rdma_func->rdma_dev; + dev = &rdma_func->ctx_dev; + if (dev->privileged) { + fid = rdma_func->pf_id; + } else { + fid = dev->rcms_info->pmf_index - SXE2_FIRST_VF_FPM_ID; + dev->pf_cnt = rdma_func->pf_cnt; + offset = dev->pf_cnt * dev->rcms_info->pf_max_ceqs; + } + ceq_abs_id = offset + dev->rcms_info->max_ceqs * fid + ceq_rel_id; + DRV_RDMA_LOG_DEV_DEBUG( + "CEQ:ceq_abs_id=%#x, pf_cnt=%#x, pf_max_ceqs=%#x,\n" + "offset=%#x, max_ceqs=%#x, pf_id=%#x, pmf_index=%#x, fid=%#x, ceq_rel_id=%#x\n", + ceq_abs_id, dev->pf_cnt, dev->rcms_info->pf_max_ceqs, offset, + dev->rcms_info->max_ceqs, rdma_func->pf_id, + dev->rcms_info->pmf_index, fid, ceq_rel_id); + + return ceq_abs_id; +} + +static int sxe2_vchnl_cfg_ceq(struct sxe2_rdma_ctx_dev *dev, u32 ceq_id, + u32 idx, bool map) +{ + int ret; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct aux_core_dev_info *cdev_info = rdma_func->cdev; + struct aux_qvlist_info *qvl_info; + struct aux_qv_info *qvinfo; + u32 qvl_size = 0; + + qvl_size = sizeof(struct aux_qvlist_info) + sizeof(struct aux_qv_info); + + qvl_info = kzalloc(qvl_size, GFP_KERNEL); + if (!qvl_info) { + DRV_RDMA_LOG_DEV_ERR( + "eq: qvl info alloc failed\n"); + ret = -ENOMEM; + goto end; + } + + qvinfo = &qvl_info->qv_info[0]; + + qvl_info->num_vectors = 1; + qvinfo->aeq_idx = SXE2_Q_INVALID_IDX; + qvinfo->ceq_idx = (u16)ceq_id; + qvinfo->v_idx = idx; + qvinfo->itr_idx = 0; + + DRV_RDMA_LOG_DEV_DEBUG("ceq_id:%#x, v_idx=%#x, map %d\n", + qvinfo->ceq_idx, qvinfo->v_idx, map); + + if (!rdma_func->reset) { + ret = cdev_info->ops->vc_queue_vec_map_unmap(cdev_info, + qvl_info, map); + } else { + ret = 0; + DRV_RDMA_LOG_WARN_BDF( + "ceq map/unmap not processed, need reset\n"); + } + + kfree(qvl_info); + qvl_info = NULL; + +end: + return ret; +} + +static int sxe2_vchnl_cfg_aeq(struct sxe2_rdma_ctx_dev *dev, u32 idx, bool map) +{ + int ret; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct aux_core_dev_info *cdev_info = rdma_func->cdev; + struct aux_qvlist_info *qvl_info; + struct aux_qv_info *qvinfo; + u32 qvl_size = 0; + + qvl_size = sizeof(struct aux_qvlist_info) + sizeof(struct aux_qv_info); + + qvl_info = kzalloc(qvl_size, GFP_KERNEL); + if (!qvl_info) { + DRV_RDMA_LOG_DEV_ERR( + "eq: qvl info alloc failed\n"); + ret = -ENOMEM; + goto end; + } + + qvinfo = &qvl_info->qv_info[0]; + + qvl_info->num_vectors = 1; + qvinfo->aeq_idx = 0; + qvinfo->ceq_idx = SXE2_Q_INVALID_IDX; + qvinfo->v_idx = idx; + qvinfo->itr_idx = 0; + + DRV_RDMA_LOG_DEV_DEBUG("aeq_id:%#x, v_idx=%#x, map %d\n", + qvinfo->aeq_idx, qvinfo->v_idx, map); + + if (!rdma_func->reset) { + ret = cdev_info->ops->vc_queue_vec_map_unmap(cdev_info, + qvl_info, map); + if (ret) + DRV_RDMA_LOG_WARN_BDF("aeq map/unmap fail, ret %d\n", + ret); + } else { + ret = 0; + DRV_RDMA_LOG_WARN_BDF( + "aeq map/unmap not processed, need reset\n"); + } + + kfree(qvl_info); + qvl_info = NULL; + +end: + return ret; +} + +static void sxe2_destroy_irq(struct sxe2_rdma_pci_f *rf, + struct sxe2_rdma_msix_vector *msix_vec, + void *dev_id) +{ + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + + dev->irq_ops->sxe2_rdma_dis_irq(dev, msix_vec->idx); + irq_update_affinity_hint(msix_vec->irq, NULL); + free_irq(msix_vec->irq, dev_id); + if (rf == dev_id) { + tasklet_kill(&rf->dpc_tasklet); + } else { + struct sxe2_rdma_ceq *iwceq = (struct sxe2_rdma_ceq *)dev_id; + + tasklet_kill(&iwceq->dpc_tasklet); + } +} + +#ifndef NO_HAVE_TASKLET_SETUP +static int sxe2_cfg_ceq_vector(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq, u32 ceq_id, + struct sxe2_rdma_msix_vector *msix_vec) +{ + int ret; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + u32 ceq_abs_id = 0; + + if (rf->msix_shared && !ceq_id) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2rdma-%s-AEQMCEQ", dev_name(&rf->pcidev->dev)); + + tasklet_setup(&rf->dpc_tasklet, sxe2_eq_cb); + ret = request_irq(msix_vec->irq, sxe2_eq_handler, 0, + msix_vec->name, rf); + DRV_RDMA_LOG_DEV_DEBUG( + "EQ:msix shared request irq ret=%d irq=%u name=%s\n", + ret, msix_vec->irq, msix_vec->name); + } else { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2rdma-%s-CEQ-%d", dev_name(&rf->pcidev->dev), + ceq_id); + + tasklet_setup(&ceq->dpc_tasklet, sxe2_ceq_cb); + ret = request_irq(msix_vec->irq, sxe2_ceq_handler, 0, + msix_vec->name, ceq); + DRV_RDMA_LOG_DEV_DEBUG( + "EQ:msix no shared request irq ret=%d irq=%u name=%s\n", + ret, msix_vec->irq, msix_vec->name); + } + + cpumask_clear(&msix_vec->mask); + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); + irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: ceq irq config fail\n"); + goto end; + } + + msix_vec->ceq_id = ceq_id; + + ceq_abs_id = sxe2_get_ceq_abs_id(rf, ceq_id); + msix_vec->ceq_abs_id = ceq_abs_id; + + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_ceq(&rf->ctx_dev, ceq_abs_id, + msix_vec->idx, true); + } else { + ret = sxe2_vchnl_cfg_ceq(&rf->ctx_dev, ceq_abs_id, + msix_vec->idx, true); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: ceq vchnl config fail, ret %d\n", ret); + goto free_irq; + } + } + + goto end; +free_irq: + if (rf->msix_shared && !ceq_id) + sxe2_destroy_irq(rf, msix_vec, rf); + else + sxe2_destroy_irq(rf, msix_vec, ceq); +end: + return ret; +} +#else +static int sxe2_cfg_ceq_vector(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq, u32 ceq_id, + struct sxe2_rdma_msix_vector *msix_vec) +{ + int ret; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + u32 ceq_abs_id = 0; + + if (rf->msix_shared && !ceq_id) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2rdma-%s-AEQMCEQ", dev_name(&rf->pcidev->dev)); + + ret = request_threaded_irq(msix_vec->irq, NULL, + sxe2_aeq_ceq0_cb, IRQF_ONESHOT, + msix_vec->name, rf); + DRV_RDMA_LOG_DEBUG_BDF( + "EQ:msix shared request irq ret=%d irq=%u name=%u\n", + ret, msix_vec->irq, msix_vec->name); + } else { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2rdma-%s-CEQ-%d", dev_name(&rf->pcidev->dev), + ceq_id); + ret = request_threaded_irq(msix_vec->irq, NULL, sxe2_ceq_cb, + IRQF_ONESHOT, msix_vec->name, ceq); + DRV_RDMA_LOG_DEBUG_BDF( + "EQ:msix no shared request irq ret=%d irq=%u name=%u\n", + ret, msix_vec->irq, msix_vec->name); + } + + cpumask_clear(&msix_vec->mask); + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); + irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("ERR: ceq irq config fail\n"); + goto end; + } + + msix_vec->ceq_id = ceq_id; + + ceq_abs_id = sxe2_get_ceq_abs_id(rf, ceq_id); + msix_vec->ceq_abs_id = ceq_abs_id; + + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_ceq(&rf->ctx_dev, ceq_abs_id, + msix_vec->idx, true); + } else { + ret = sxe2_vchnl_cfg_ceq(&rf->ctx_dev, ceq_abs_id, + msix_vec->idx, true); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF( + "ERR: ceq vchnl config fail, ret %d\n", ret); + goto free_irq; + } + } + + goto end; +free_irq: + if (rf->msix_shared && !ceq_id) + sxe2_destroy_irq(rf, msix_vec, rf); + else + sxe2_destroy_irq(rf, msix_vec, ceq); +end: + return ret; +} +#endif + +#ifndef NO_HAVE_TASKLET_SETUP +static int sxe2_cfg_aeq_vector(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_msix_vector *msix_vec = rf->sxe2_msixtbl; + int ret = 0; + + if (!rf->msix_shared) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2rdma-%s-AEQ", dev_name(&rf->pcidev->dev)); + tasklet_setup(&rf->dpc_tasklet, sxe2_eq_cb); + ret = request_irq(msix_vec->irq, sxe2_eq_handler, 0, + msix_vec->name, rf); + } + + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: aeq irq config fail\n"); + return ret; + } + + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_aeq(&rf->ctx_dev, + msix_vec->idx, true); + } else { + ret = sxe2_vchnl_cfg_aeq(&rf->ctx_dev, msix_vec->idx, true); + if (ret) + DRV_RDMA_LOG_DEV_ERR( + "ERR: aeq vchnl config fail, ret %d\n", ret); + } + + return ret; +} +#else +static int sxe2_cfg_aeq_vector(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_msix_vector *msix_vec = rf->sxe2_msixtbl; + int ret = 0; + + if (!rf->msix_shared) { + snprintf(msix_vec->name, sizeof(msix_vec->name) - 1, + "sxe2_rdma-%s-AEQ", dev_name(&rf->pcidev->dev)); + ret = request_threaded_irq(msix_vec->irq, NULL, + sxe2_aeq_ceq0_cb, IRQF_ONESHOT, + msix_vec->name, rf); + } + + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("ERR: aeq irq config fail\n"); + return ret; + } + + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_aeq(&rf->ctx_dev, + msix_vec->idx, true); + } else { + ret = sxe2_vchnl_cfg_aeq(&rf->ctx_dev, msix_vec->idx, true); + if (ret) + DRV_RDMA_LOG_ERROR_BDF( + "ERR: aeq vchnl config fail, ret %d\n", ret); + } + + return ret; +} + +#endif +int sxe2_drv_ceq_create(struct sxe2_rdma_ctx_ceq *ceq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = ceq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_CREATE_CEQ]) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + MQ_OP_CREATE_CEQ); + ret = -EBUSY; + goto end; + } +#endif + + if (rdma_dev->rdma_func->reset) + return -EBUSY; + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = ceq->eqc.oi; + eq_wqe->vsi_index = ceq->eqc.vsi_index; + eq_wqe->sw_owner_bit = ceq->eqc.sw_owner_bit; + eq_wqe->sw_status = ceq->eqc.sw_status; + eq_wqe->uar_page = ceq->eqc.uar_page; + eq_wqe->log_eq_size = ceq->eqc.log_eq_size; + eq_wqe->pbl_mode = ceq->eqc.pbl_mode; + eq_wqe->TPH_value = ceq->eqc.TPH_value; + eq_wqe->TPH_en = ceq->eqc.TPH_en; + eq_wqe->page_offset = ceq->eqc.page_offset; + eq_wqe->log_page_size = ceq->eqc.log_page_size; + eq_wqe->pbl_index = ceq->eqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_CREATE_CEQ; + eq_wqe->eqn = ceq->ceq_id; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_size"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + ret = 0; + +end: + return ret; +} + +static int sxe2_ctx_mceq_create_done(struct sxe2_rdma_ctx_ceq *ceq) +{ + struct sxe2_mq_ctx *mq; + + mq = ceq->dev->mq; + return sxe2_kpoll_mcq(mq, SXE2_MQ_OP_CREATE_CEQ, NULL); +} + +static int sxe2_ctx_mceq_create(struct sxe2_rdma_ctx_ceq *ceq, u64 scratch) +{ + int ret; + struct sxe2_rdma_ctx_dev *dev = ceq->dev; + + dev->mcq->vsi = ceq->vsi; + ret = sxe2_drv_ceq_create(ceq, scratch, true); + if (!ret) + ret = sxe2_ctx_mceq_create_done(ceq); + + return ret; +} + +static int sxe2_mq_ceq_cmd(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ctx_ceq, u8 op) +{ + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + int ret; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->post_mq = 1; + mq_info->mq_cmd = op; + mq_info->in.u.ceq_ops.ceq = ctx_ceq; + mq_info->in.u.ceq_ops.scratch = (uintptr_t)mq_request; + if (op == MQ_OP_DESTROY_CEQ) + mq_info->destroy = true; + + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle ceq failed, ret (%d)\n", ret); + +end: + return ret; +} + +static int sxe2_drv_ceq_ctx_init(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ceq_ctx, + struct sxe2_ceq_init_info *info) +{ + u32 st_mode = 0; + s32 cpu_id = 0; + + if (info->ceq_id > (info->dev->rcms_info->max_ceqs - 1)) { + DRV_RDMA_LOG_DEV_ERR("ceq id:%d more than max_ceqs:%d.", + info->ceq_id, + info->dev->rcms_info->max_ceqs); + return -EINVAL; + } + + ceq_ctx->size = info->ceq_size; + if (rdma_dev->cache_line_64_en == false) + ceq_ctx->ceqe_base = info->ceqe_base; + else + ceq_ctx->ceqe_hygon_base = info->ceqe_hygon_base; + + ceq_ctx->dev = &(rdma_dev->rdma_func->ctx_dev); + ceq_ctx->vsi = &rdma_dev->vsi; + ceq_ctx->polarity = 1; + ceq_ctx->ceq_id = info->ceq_id; + ceq_ctx->cons_index = 0; + ceq_ctx->doorbell = info->doorbell; + SXE2_RING_INIT(ceq_ctx->ceq_ring, ceq_ctx->size); + ceq_ctx->dev->ceq[info->ceq_id] = ceq_ctx; + ceq_ctx->eqc.oi = rdma_dev->rdma_func->oi; + ceq_ctx->eqc.vsi_index = info->eqc.vsi_index; + ceq_ctx->eqc.sw_owner_bit = 0; + ceq_ctx->eqc.sw_status = 0xA; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "ceq_sw_status_err", rdma_dev, ceq_ctx); +#endif + ceq_ctx->eqc.uar_page = info->eqc.uar_page; + ceq_ctx->eqc.log_eq_size = info->eqc.log_eq_size; + ceq_ctx->eqc.pbl_mode = info->eqc.pbl_mode; + ceq_ctx->eqc.TPH_en = sxe2_drv_core_is_tph_enable( + rdma_dev, SXE2_KERNEL_TPH_EN_DEFAULT, &st_mode); + if (ceq_ctx->eqc.TPH_en) { + if (st_mode != MODE_NO_ST) + cpu_id = TPH_GET_CPU(); + ceq_ctx->eqc.TPH_value = + (cpu_id & TPH_CPUID_MASK) | (PH_DWHR << 8); + } + ceq_ctx->eqc.page_offset = info->eqc.page_offset; + ceq_ctx->eqc.log_page_size = info->eqc.log_page_size; + ceq_ctx->eqc.pbl_index = info->eqc.pbl_index; + + DRV_RDMA_LOG_DEV_INFO("ceq_ctx db map :%p\n", ceq_ctx->doorbell); + + DRV_RDMA_LOG_DEV_DEBUG( + "ceq:%d ceqc : ceqsz %#x, pbl %#llx\n" + "pgsz %#x, pg_offset %#x, pblm %#x, vsi_id %#x, oi %#x, sw_owner_bit %#x\n" + "sw_status %#x, uar_page %#x, TPH_value %#x, TPH_en %#x\n", + ceq_ctx->ceq_id, ceq_ctx->eqc.log_eq_size, + ceq_ctx->eqc.pbl_index, ceq_ctx->eqc.log_page_size, + ceq_ctx->eqc.page_offset, ceq_ctx->eqc.pbl_mode, + ceq_ctx->eqc.vsi_index, ceq_ctx->eqc.oi, + ceq_ctx->eqc.sw_owner_bit, ceq_ctx->eqc.sw_status, + ceq_ctx->eqc.uar_page, ceq_ctx->eqc.TPH_value, + ceq_ctx->eqc.TPH_en); + return 0; +} + +static int sxe2_create_ceq_pble(struct sxe2_rdma_pci_f *rf, struct sxe2_rdma_ceq *ceq) +{ + u32 pg_cnt; + int ret; + u32 i; + u64 temp_liner_addr; + u8 *addr; + struct page *vm_page; + struct sxe2_ceq_pble_buf *mem; + struct sxe2_rdma_device *rdma_dev = rf->rdma_dev; + + pg_cnt = DIV_ROUND_UP(ceq->mem.size, PAGE_SIZE); + ceq->mem.va = vzalloc(ceq->mem.size); + if (ceq->mem.va == NULL) { + ret = -ENOMEM; + goto end; + } + + ceq->pble_buf = + kcalloc(pg_cnt, sizeof(struct sxe2_rdma_dma_mem), GFP_KERNEL); + if (unlikely(!ceq->pble_buf)) { + DRV_RDMA_LOG_DEV_ERR("buf alloc err npages %d, sz %lu\n", + pg_cnt, sizeof(struct sxe2_rdma_dma_mem)); + ret = -ENOMEM; + goto free_va; + } + + ret = sxe2_pbl_get_pble(rf->pble_rsrc, &ceq->palloc, pg_cnt, + PBL_OBJ_EQ); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq get pble failed. ret:%d\n", ret); + goto free_mem; + } + + temp_liner_addr = ceq->palloc.pble_info.liner_addr; + + addr = (u8 *)(uintptr_t)ceq->mem.va; + for (i = 0; i < pg_cnt; i++) { + vm_page = vmalloc_to_page(addr); + if (vm_page == NULL) { + ret = -ENOMEM; + goto free_dma; + } + + mem = &ceq->pble_buf[i]; + mem->map = dma_map_page(rf->hw.device, vm_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(rf->hw.device, mem->map)) { + ret = -ENOMEM; + goto free_dma; + } + + ret = sxe2_pbl_set_pble(rf->pble_rsrc, temp_liner_addr, + mem->map, false); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("set pble failed %d\n", ret); + goto free_dma; + } + + temp_liner_addr += sizeof(u64); + addr += PAGE_SIZE; + } + + goto end; + +free_dma: + while (i--) { + mem = &ceq->pble_buf[i]; + dma_unmap_page(rf->hw.device, mem->map, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + +free_mem: + kfree(ceq->pble_buf); + ceq->pble_buf = NULL; +free_va: + vfree(ceq->mem.va); + ceq->mem.va = NULL; +end: + return ret; +} + +static void sxe2_destroy_ceq_pble(struct sxe2_rdma_pci_f *rf, struct sxe2_rdma_ceq *ceq) +{ + u32 i; + struct sxe2_ceq_pble_buf *mem; + + sxe2_pbl_free_pble(rf->pble_rsrc, ceq->palloc.pble_info.liner_addr, + ceq->palloc.needed_pble_cnt, false); + + for (i = 0; i < ceq->palloc.needed_pble_cnt; i++) { + mem = &ceq->pble_buf[i]; + dma_unmap_page(rf->hw.device, mem->map, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + kfree(ceq->pble_buf); + ceq->pble_buf = NULL; + vfree(ceq->mem.va); + ceq->mem.va = NULL; +} + +static int sxe2_rdma_create_ceq(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq, u32 ceq_id, + struct sxe2_rdma_ctx_vsi *vsi) +{ + int ret; + struct sxe2_ceq_init_info info = {}; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + u64 scratch; + u32 ceq_size = 0; + + info.ceq_id = ceq_id; + ceq->rf = rf; + ceq_size = min(rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].cnt, + dev->hw_attrs.max_hw_ceq_size); + ceq_size = (unsigned long)roundup_pow_of_two((unsigned long)ceq_size); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "ceq_ci_noupdate", rdma_dev, &ceq_size, ceq_id); +#endif + if ((ceq_size < SXE2_MIN_EQ_SIZE) || (ceq_size > SXE2_MAX_EQ_SIZE)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("ceq size(%d) invalid.ret:%d", ceq_size, + ret); + goto end; + } + + if (rdma_dev->cache_line_64_en == false) { + ceq->mem.size = (u32)ALIGN(sizeof(struct sxe2_eqe) * ceq_size, + SXE2_CEQ_ALIGNMENT); + DRV_RDMA_LOG_DEV_DEBUG( + "ceq:no_hygon,EQE=32B, ceq->mem.size %#x\n", + ceq->mem.size); + } else { + ceq->mem.size = + (u32)ALIGN(sizeof(struct sxe2_eqe_hygon) * ceq_size, + SXE2_CEQ_ALIGNMENT); + DRV_RDMA_LOG_DEV_DEBUG("ceq:hygon,EQE=64B, ceq->mem.size %#x\n", + ceq->mem.size); + } + + ceq->mem.va = dma_alloc_coherent(dev->hw->device, ceq->mem.size, + &ceq->mem.pa, GFP_KERNEL); + if (ceq->mem.va) { + memset(ceq->mem.va, 0, ceq->mem.size); + info.ceqe_pa = ceq->mem.pa; + info.eqc.pbl_mode = CQ_EQ_PA_FIRST_MODE; + info.eqc.pbl_index = info.ceqe_pa; + goto skip_ceq_pble; + } + + ret = sxe2_create_ceq_pble(rf, ceq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq mem create use pble failed. ret:%d\n", + ret); + goto end; + } + + info.eqc.pbl_index = ceq->palloc.pbl_index; + info.eqc.pbl_mode = ceq->palloc.pbl_mode.cq_eq_mode; + info.pble_map = true; + ceq->pble_map = info.pble_map; + +skip_ceq_pble: + + info.ceq_id = ceq_id; + info.ceq_size = ceq_size; + if (rdma_dev->cache_line_64_en == false) + info.ceqe_base = ceq->mem.va; + else + info.ceqe_hygon_base = ceq->mem.va; + + info.dev = dev; + info.vsi = vsi; + DRV_RDMA_LOG_DEV_INFO("ceq db map:%p\n", rf->db->map); + info.doorbell = rf->db->map + SXE2_RDMA_DB_EQ_INFO_OFFSET; + DRV_RDMA_LOG_DEV_INFO("ceq db map offset 4 :%p\n", info.doorbell); + + info.eqc.vsi_index = vsi->vsi_idx; + info.eqc.log_eq_size = order_base_2(ceq_size); + info.eqc.log_page_size = order_base_2(PAGE_SIZE); + info.eqc.page_offset = offset_in_page(ceq->mem.pa); + + ret = sxe2_drv_ceq_ctx_init(rdma_dev, &ceq->ctx_ceq, &info); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("ceq ctx init failed.ret:%d pble_map %d\n", + ret, ceq->pble_map); + goto free_dma; + } + + if (dev->ceq_valid) { + ret = sxe2_mq_ceq_cmd(rdma_dev, &ceq->ctx_ceq, + MQ_OP_CREATE_CEQ); + } else { + scratch = (uintptr_t)&rf->mq.mq; + ret = sxe2_ctx_mceq_create(&ceq->ctx_ceq, scratch); + } + + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ceq create failed.ret:%d pble_map %d\n", + ret, ceq->pble_map); + goto free_dma; + } + + DRV_RDMA_LOG_DEV_INFO("ceq:%d create success.ret:%d pble_map:%d\n", + ceq_id, ret, ceq->pble_map); + goto end; + +free_dma: + if (ceq->pble_map) { + sxe2_destroy_ceq_pble(rf, ceq); + } else { + dma_free_coherent(dev->hw->device, ceq->mem.size, ceq->mem.va, + ceq->mem.pa); + ceq->mem.va = NULL; + } + +end: + return ret; +} + +int sxe2_drv_ceq_destroy(struct sxe2_rdma_ctx_ceq *ceq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = ceq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = ceq->eqc.oi; + eq_wqe->vsi_index = ceq->eqc.vsi_index; + eq_wqe->sw_owner_bit = ceq->eqc.sw_owner_bit; + eq_wqe->sw_status = ceq->eqc.sw_status; + eq_wqe->uar_page = ceq->eqc.uar_page; + eq_wqe->log_eq_size = ceq->eqc.log_eq_size; + eq_wqe->pbl_mode = ceq->eqc.pbl_mode; + eq_wqe->TPH_value = ceq->eqc.TPH_value; + eq_wqe->TPH_en = ceq->eqc.TPH_en; + eq_wqe->page_offset = ceq->eqc.page_offset; + eq_wqe->log_page_size = ceq->eqc.log_page_size; + eq_wqe->pbl_index = ceq->eqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_DESTROY_CEQ; + eq_wqe->eqn = ceq->ceq_id; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +static int sxe2_ctx_mceq_destroy_done(struct sxe2_rdma_ctx_ceq *ceq) +{ + struct sxe2_mq_ctx *mq; + + mq = ceq->dev->mq; + mq->process_mq_fpt = sxe2_rcms_update_fptes_cmd; + + return sxe2_kpoll_mcq(mq, SXE2_MQ_OP_DESTROY_CEQ, NULL); +} + +static void sxe2_rdma_destroy_ceq(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + int ret; + bool hw_rsrc_clean = false; + unsigned long flags = 0; + + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(&rf->ctx_dev); + if (rf->reset && hw_rsrc_clean) { + DRV_RDMA_LOG_DEV_DEBUG("already reset\n"); + goto exit; + } + + spin_lock_irqsave(&rf->ctx_dev.mq_lock, flags); + ret = sxe2_drv_ceq_destroy(&ceq->ctx_ceq, 0, 1); + spin_unlock_irqrestore(&rf->ctx_dev.mq_lock, flags); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: CEQ destroy command failed %d\n", + ret); + goto exit; + } + + ret = sxe2_ctx_mceq_destroy_done(&ceq->ctx_ceq); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_DESTROY_CEQ] && !ret) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + MQ_OP_DESTROY_CEQ); + ret = -EBUSY; + } +#endif + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: CEQ destroy completion failed %d\n", + ret); + } +exit: + if (ceq->pble_map) { + sxe2_destroy_ceq_pble(rf, ceq); + } else { + dma_free_coherent(dev->hw->device, ceq->mem.size, ceq->mem.va, + ceq->mem.pa); + ceq->mem.va = NULL; + } +} + +void sxe2_del_mceq(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ceq *ceq = rf->ceqlist; + struct sxe2_rdma_msix_vector *msix_vec; + int ret = 0; + + if (rf->msix_shared) { + msix_vec = &rf->sxe2_msixtbl[0]; + DRV_RDMA_LOG_DEV_DEBUG( + "ceq unmap in, ceq_rel_id=%#x, ceq_abs_id=%#x, idx=%#x\n", + msix_vec->ceq_id, msix_vec->ceq_abs_id, msix_vec->idx); + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_ceq( + &rf->ctx_dev, msix_vec->ceq_abs_id, + msix_vec->idx, false); + } else { + ret = sxe2_vchnl_cfg_ceq(&rf->ctx_dev, + msix_vec->ceq_abs_id, + msix_vec->idx, false); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF( + "ceq %#x unmap fail, ceq_abs_id=%#x, ret %d\n", + msix_vec->ceq_id, msix_vec->ceq_abs_id, + ret); + } + } + sxe2_destroy_irq(rf, msix_vec, rf); + } else { + msix_vec = &rf->sxe2_msixtbl[1]; + DRV_RDMA_LOG_DEV_DEBUG( + "ceq unmap in, ceq_rel_id=%#x, ceq_abs_id=%#x, idx=%#x\n", + msix_vec->ceq_id, msix_vec->ceq_abs_id, msix_vec->idx); + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_ceq( + &rf->ctx_dev, msix_vec->ceq_abs_id, + msix_vec->idx, false); + } else { + ret = sxe2_vchnl_cfg_ceq(&rf->ctx_dev, + msix_vec->ceq_abs_id, + msix_vec->idx, false); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF( + "ceq %#x unmap fail, ceq_abs_id=%#x, ret %d\n", + msix_vec->ceq_id, msix_vec->ceq_abs_id, + ret); + } + } + sxe2_destroy_irq(rf, msix_vec, ceq); + } + + sxe2_kuninit_mq_handler(rdma_dev); + + drv_rdma_debug_ceq_remove(rdma_dev, ceq); + sxe2_rdma_destroy_ceq(rdma_dev, ceq); + rf->ctx_dev.ceq_valid = false; + rf->ceqs_count = 0; +} + +void sxe2_del_ceqs(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ceq *ceq = &rf->ceqlist[1]; + struct sxe2_rdma_msix_vector *msix_vec; + u32 i = 0; + int ret; + + if (rf->msix_shared) + msix_vec = &rf->sxe2_msixtbl[1]; + else + msix_vec = &rf->sxe2_msixtbl[2]; + + for (i = 1; i < rf->ceqs_count; i++, msix_vec++, ceq++) { + if (rf->ctx_dev.privileged) { + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_ceq( + &rf->ctx_dev, msix_vec->ceq_abs_id, + msix_vec->idx, false); + } else { + ret = sxe2_vchnl_cfg_ceq(&rf->ctx_dev, + msix_vec->ceq_abs_id, + msix_vec->idx, false); + if (ret) + DRV_RDMA_LOG_ERROR_BDF( + "ceq %#x unmap fail, ceq_abs_id=%#x, ret %d\n", + msix_vec->ceq_id, msix_vec->ceq_abs_id, + ret); + } + + sxe2_destroy_irq(rf, msix_vec, ceq); + + drv_rdma_debug_ceq_remove(rdma_dev, ceq); + + ret = sxe2_mq_ceq_cmd(rdma_dev, &ceq->ctx_ceq, + MQ_OP_DESTROY_CEQ); + if (ret) + DRV_RDMA_LOG_DEV_ERR( + "ERR: CEQ:%d destroy command failed %d\n", + ceq->ctx_ceq.ceq_id, ret); + if (ceq->pble_map) { + sxe2_destroy_ceq_pble(rf, ceq); + } else { + dma_free_coherent(rf->ctx_dev.hw->device, ceq->mem.size, + ceq->mem.va, ceq->mem.pa); + ceq->mem.va = NULL; + } + } + rf->ceqs_count = 1; +} + +int sxe2_setup_mceq(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_ceq *ceq; + struct sxe2_rdma_msix_vector *msix_vec; + u32 i; + int ret = 0; + u32 num_ceqs = 0; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + + num_ceqs = min(rf->msix_count, rf->ctx_dev.rcms_info->max_ceqs); + rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); + if (!rf->ceqlist) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("ERR: ceq buf alloc failed. ret = %d\n", + ret); + goto exit; + } + + ceq = &rf->ceqlist[0]; + ret = sxe2_rdma_create_ceq(rdma_dev, ceq, 0, &rf->default_vsi); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: create ceq ret = %d\n", ret); + goto exit; + } + + ret = drv_rdma_debug_ceq_add(rdma_dev, ceq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding ceq:%d to debug file system, ret (%d)\n", + 0, ret); + sxe2_rdma_destroy_ceq(rdma_dev, ceq); + goto exit; + } + + spin_lock_init(&ceq->ce_lock); + i = rf->msix_shared ? 0 : 1; + msix_vec = &rf->sxe2_msixtbl[i]; + ceq->irq = msix_vec->irq; + ceq->msix_idx = msix_vec->idx; + ret = sxe2_cfg_ceq_vector(rdma_dev, ceq, 0, msix_vec); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: cfg ceq:%d vector failed. ret = %d\n", 0, ret); + + drv_rdma_debug_ceq_remove(rdma_dev, ceq); + sxe2_rdma_destroy_ceq(rdma_dev, ceq); + goto exit; + } + + sxe2_rdma_ena_intr(&rf->ctx_dev, msix_vec->idx); + rf->ceqs_count++; + rf->ctx_dev.ceq_valid = true; + +exit: + if (ret && !rf->ceqs_count) { + kfree(rf->ceqlist); + rf->ceqlist = NULL; + } + + return ret; +} + +int sxe2_setup_ceqs(struct sxe2_rdma_device *rdma_dev) +{ + u32 msix_cnt; + u32 ceq_id; + struct sxe2_rdma_ceq *ceq; + struct sxe2_rdma_msix_vector *msix_vec; + int ret; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + + msix_cnt = (rf->msix_shared) ? 1 : 2; + for (ceq_id = 1; ceq_id < rf->ctx_dev.rcms_info->max_ceqs + && msix_cnt < rf->msix_count; msix_cnt++, ceq_id++) { + ceq = &rf->ceqlist[ceq_id]; + ret = sxe2_rdma_create_ceq(rdma_dev, ceq, ceq_id, + &rdma_dev->vsi); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: create ceq ret = %d\n", ret); + goto del_ceqs; + } + spin_lock_init(&ceq->ce_lock); + + ret = drv_rdma_debug_ceq_add(rdma_dev, ceq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding ceq:%d to debug file system, ret (%d)\n", + ceq_id, ret); + sxe2_rdma_destroy_ceq(rdma_dev, ceq); + goto del_ceqs; + } + + msix_vec = &rf->sxe2_msixtbl[msix_cnt]; + ceq->irq = msix_vec->irq; + ceq->msix_idx = msix_vec->idx; + ret = sxe2_cfg_ceq_vector(rdma_dev, ceq, ceq_id, msix_vec); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: cfg ceq:%d vector failed. ret = %d\n", + ceq_id, ret); + drv_rdma_debug_ceq_remove(rdma_dev, ceq); + sxe2_rdma_destroy_ceq(rdma_dev, ceq); + goto del_ceqs; + } + sxe2_rdma_ena_intr(&rf->ctx_dev, msix_vec->idx); + rf->ceqs_count++; + } + + return 0; + +del_ceqs: + sxe2_del_ceqs(rdma_dev); + return ret; +} + +int sxe2_drv_aeq_create(struct sxe2_rdma_ctx_aeq *aeq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = aeq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = aeq->eqc.oi; + eq_wqe->vsi_index = aeq->eqc.vsi_index; + eq_wqe->sw_owner_bit = aeq->eqc.sw_owner_bit; + eq_wqe->sw_status = aeq->eqc.sw_status; + eq_wqe->uar_page = aeq->eqc.uar_page; + eq_wqe->log_eq_size = aeq->eqc.log_eq_size; + eq_wqe->pbl_mode = aeq->eqc.pbl_mode; + eq_wqe->TPH_value = aeq->eqc.TPH_value; + eq_wqe->TPH_en = aeq->eqc.TPH_en; + eq_wqe->page_offset = aeq->eqc.page_offset; + eq_wqe->log_page_size = aeq->eqc.log_page_size; + eq_wqe->pbl_index = aeq->eqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_CREATE_AEQ; + eq_wqe->eqn = 0; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + switch (rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type) { + case AEQN_DEBUGFS: + eq_wqe->eqn = + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val; + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqn 0, err ceqn %#llx\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val, + (u64)eq_wqe->eqn); + break; + case AEQC_SW_STATE_DEBUGFS: + eq_wqe->sw_status = + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val; + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqc_state %#llx, err ceqc_state %#llx\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val, + (u64)aeq->eqc.sw_status, (u64)eq_wqe->sw_status); + break; + case AEQC_SIZE_DEBUGFS: + eq_wqe->log_eq_size = + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val; + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqc_size %#llx, err ceqc_size %#llx\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val, + (u64)aeq->eqc.log_eq_size, (u64)eq_wqe->log_eq_size); + break; + default: + break; + } +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_aeq_destroy(struct sxe2_rdma_ctx_aeq *aeq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_dev *dev; + int ret; + + mq = aeq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + + dev = aeq->dev; + if (dev->privileged) + writel(0, dev->hw_regs[PF_INT_AEQCTL]); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = aeq->eqc.oi; + eq_wqe->vsi_index = aeq->eqc.vsi_index; + eq_wqe->sw_owner_bit = aeq->eqc.sw_owner_bit; + eq_wqe->sw_status = aeq->eqc.sw_status; + eq_wqe->uar_page = aeq->eqc.uar_page; + eq_wqe->log_eq_size = aeq->eqc.log_eq_size; + eq_wqe->pbl_mode = aeq->eqc.pbl_mode; + eq_wqe->TPH_value = aeq->eqc.TPH_value; + eq_wqe->TPH_en = aeq->eqc.TPH_en; + eq_wqe->page_offset = aeq->eqc.page_offset; + eq_wqe->log_page_size = aeq->eqc.log_page_size; + eq_wqe->pbl_index = aeq->eqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_DESTROY_AEQ; + eq_wqe->eqn = 0; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +static int sxe2_mq_aeq_cmd(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *aeq_ctx, u8 op) +{ + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *cqp_info; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + int ret; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + cqp_info = &mq_request->info; + cqp_info->post_mq = 1; + cqp_info->mq_cmd = op; + cqp_info->in.u.aeq_ops.aeq = aeq_ctx; + cqp_info->in.u.aeq_ops.scratch = (uintptr_t)mq_request; + if (op == MQ_OP_DESTROY_AEQ) + cqp_info->destroy = true; + + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle aeq failed, ret (%d)\n", ret); + +end: + return ret; +} + +static int sxe2_drv_aeq_ctx_init(struct sxe2_rdma_ctx_aeq *aeq_ctx, + struct sxe2_aeq_init_info *info) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(info->dev); + u32 st_mode = 0; + s32 cpu_id = 0; + + aeq_ctx->size = info->aeq_size; + if (rdma_dev->cache_line_64_en == false) + aeq_ctx->aeqe_base = info->aeqe_base; + else + aeq_ctx->aeqe_hygon_base = info->aeqe_hygon_base; + + aeq_ctx->dev = info->dev; + aeq_ctx->vsi = info->vsi; + aeq_ctx->polarity = 1; + aeq_ctx->cons_index = 0; + aeq_ctx->doorbell = info->doorbell; + aeq_ctx->msix_idx = info->msix_idx; + aeq_ctx->irq = info->irq; + SXE2_RING_INIT(aeq_ctx->aeq_ring, aeq_ctx->size); + aeq_ctx->dev->aeq = aeq_ctx; + + if (info->dev->privileged) + aeq_ctx->eqc.oi = rdma_dev->rdma_func->oi; + else + aeq_ctx->eqc.oi = 1; + + aeq_ctx->eqc.vsi_index = info->eqc.vsi_index; + aeq_ctx->eqc.sw_owner_bit = 0; + aeq_ctx->eqc.sw_status = 0xA; + aeq_ctx->eqc.uar_page = info->eqc.uar_page; + aeq_ctx->eqc.log_eq_size = info->eqc.log_eq_size; + aeq_ctx->eqc.pbl_mode = info->eqc.pbl_mode; + aeq_ctx->eqc.TPH_en = sxe2_drv_core_is_tph_enable( + rdma_dev, SXE2_KERNEL_TPH_EN_DEFAULT, &st_mode); + if (aeq_ctx->eqc.TPH_en) { + if (st_mode != MODE_NO_ST) + cpu_id = TPH_GET_CPU(); + aeq_ctx->eqc.TPH_value = + (cpu_id & TPH_CPUID_MASK) | (PH_DWHR << 8); + } + aeq_ctx->eqc.page_offset = info->eqc.page_offset; + aeq_ctx->eqc.log_page_size = info->eqc.log_page_size; + aeq_ctx->eqc.pbl_index = info->eqc.pbl_index; + + DRV_RDMA_LOG_DEV_DEBUG( + "aeqc : aeqsz %#x, pbl_l %#llx,\n" + "pgsz %#x, pg_offset %#x, pblm %#x, vsi_id %#x, oi %#x, sw_owner_bit %#x\n" + "sw_status %#x, uar_page %#x, TPH_value %#x, TPH_en %#x\n", + aeq_ctx->eqc.log_eq_size, aeq_ctx->eqc.pbl_index, + aeq_ctx->eqc.log_page_size, aeq_ctx->eqc.page_offset, + aeq_ctx->eqc.pbl_mode, aeq_ctx->eqc.vsi_index, aeq_ctx->eqc.oi, + aeq_ctx->eqc.sw_owner_bit, aeq_ctx->eqc.sw_status, + aeq_ctx->eqc.uar_page, aeq_ctx->eqc.TPH_value, + aeq_ctx->eqc.TPH_en); + + return 0; +} + +static int sxe2_create_aeq_pble(struct sxe2_rdma_pci_f *rf, u32 size) +{ + u32 pg_cnt; + int ret; + u32 i; + u64 temp_liner_addr; + u8 *addr; + struct page *vm_page; + struct sxe2_aeq_pble_buf *mem; + struct sxe2_rdma_device *rdma_dev = rf->rdma_dev; + struct sxe2_rdma_aeq *aeq = &rf->aeq; + + if (rdma_dev->cache_line_64_en == false) + aeq->mem.size = (u32)sizeof(struct sxe2_eqe) * size; + else + aeq->mem.size = (u32)sizeof(struct sxe2_eqe_hygon) * size; + + pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); + + aeq->mem.va = vzalloc(aeq->mem.size); + if (aeq->mem.va == NULL) { + ret = -ENOMEM; + goto end; + } + + aeq->pble_buf = + kcalloc(pg_cnt, sizeof(struct sxe2_rdma_dma_mem), GFP_KERNEL); + if (unlikely(!aeq->pble_buf)) { + DRV_RDMA_LOG_DEV_ERR("buf alloc err npages %d, sz %lu\n", + pg_cnt, sizeof(struct sxe2_rdma_dma_mem)); + ret = -ENOMEM; + goto free_va; + } + + ret = sxe2_pbl_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, + PBL_OBJ_EQ); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq get pble failed. ret:%d\n", ret); + goto free_mem; + } + + temp_liner_addr = aeq->palloc.pble_info.liner_addr; + + addr = (u8 *)(uintptr_t)aeq->mem.va; + for (i = 0; i < pg_cnt; i++) { + vm_page = vmalloc_to_page(addr); + if (vm_page == NULL) { + ret = -ENOMEM; + goto free_dma; + } + + mem = &aeq->pble_buf[i]; + mem->map = dma_map_page(rf->hw.device, vm_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(rf->hw.device, mem->map)) { + ret = -ENOMEM; + goto free_dma; + } + + ret = sxe2_pbl_set_pble(rf->pble_rsrc, temp_liner_addr, + mem->map, false); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("set pble failed %d\n", ret); + goto free_dma; + } + + temp_liner_addr += sizeof(u64); + addr += PAGE_SIZE; + } + + goto end; + +free_dma: + while (i--) { + mem = &aeq->pble_buf[i]; + dma_unmap_page(rf->hw.device, mem->map, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + +free_mem: + kfree(aeq->pble_buf); + aeq->pble_buf = NULL; +free_va: + vfree(aeq->mem.va); + aeq->mem.va = NULL; +end: + return ret; +} + +static void sxe2_destroy_aeq_pble(struct sxe2_rdma_pci_f *rf) +{ + u32 i; + struct sxe2_aeq_pble_buf *mem; + struct sxe2_rdma_aeq *aeq = &rf->aeq; + + sxe2_pbl_free_pble(rf->pble_rsrc, aeq->palloc.pble_info.liner_addr, + aeq->palloc.needed_pble_cnt, false); + + for (i = 0; i < aeq->palloc.needed_pble_cnt; i++) { + mem = &aeq->pble_buf[i]; + dma_unmap_page(rf->hw.device, mem->map, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + kfree(aeq->pble_buf); + aeq->pble_buf = NULL; + vfree(aeq->mem.va); + aeq->mem.va = NULL; +} + +static int sxe2_rdma_aeq_alloc_coherent_mem(struct sxe2_rdma_device *rdma_dev, + struct sxe2_aeq_init_info *info) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_rdma_aeq *aeq = &rf->aeq; + int ret = 0; + +#ifndef SXE2_CFG_DEBUG + aeq->mem.va = + dma_alloc_coherent(dev->hw->device, aeq->mem.size, &aeq->mem.pa, + GFP_KERNEL | __GFP_NOWARN); + if (aeq->mem.va) { + memset(aeq->mem.va, 0, aeq->mem.size); + info->aeq_elem_pa = aeq->mem.pa; + info->eqc.pbl_index = info->aeq_elem_pa; + info->eqc.pbl_mode = CQ_EQ_PA_FIRST_MODE; + } else { + ret = -ENOMEM; + DRV_RDMA_LOG_WARN_BDF("aeq dma alloc failed.\n"); + } +#else + if (rf->aeq_pble_en) { + DRV_RDMA_LOG_INFO_BDF("aeq alloc use pble.\n"); + } else { + aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size, + &aeq->mem.pa, + GFP_KERNEL | __GFP_NOWARN); + if (aeq->mem.va) { + memset(aeq->mem.va, 0, aeq->mem.size); + info->aeq_elem_pa = aeq->mem.pa; + info->eqc.pbl_index = info->aeq_elem_pa; + info->eqc.pbl_mode = CQ_EQ_PA_FIRST_MODE; + } else { + ret = -ENOMEM; + DRV_RDMA_LOG_WARN_BDF("aeq dma alloc failed.\n"); + } + } +#endif + + return ret; +} + +static int sxe2_rdma_create_aeq(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_aeq_init_info info = {}; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_rdma_aeq *aeq = &rf->aeq; + u32 aeq_size; + int ret; + + aeq_size = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].cnt + + rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].cnt; + aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); + aeq_size = (u32)roundup_pow_of_two(aeq_size); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "aeq_ci_noupdate", + rdma_dev, &aeq_size); +#endif + + if ((aeq_size < SXE2_MIN_EQ_SIZE) || (aeq_size > SXE2_MAX_EQ_SIZE)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("aeq size(%d) invalid.ret:%d", aeq_size, + ret); + goto end; + } + + if (rdma_dev->cache_line_64_en == false) { + aeq->mem.size = (u32)ALIGN(sizeof(struct sxe2_eqe) * aeq_size, + SXE2_AEQ_ALIGNMENT); + DRV_RDMA_LOG_DEV_DEBUG( + "aeq:no_hygon,EQE=32B, aeq->mem.size %#x", + aeq->mem.size); + } else { + aeq->mem.size = + (u32)ALIGN(sizeof(struct sxe2_eqe_hygon) * aeq_size, + SXE2_AEQ_ALIGNMENT); + DRV_RDMA_LOG_DEV_DEBUG("aeq:hygon,EQE=64B, aeq->mem.size %#x", + aeq->mem.size); + } + + if (!sxe2_rdma_aeq_alloc_coherent_mem(rdma_dev, &info)) + goto skip_aeq_pble; + + ret = sxe2_create_aeq_pble(rf, aeq_size); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq mem create use pble failed. ret:%d\n", + ret); + goto end; + } + + info.eqc.pbl_index = aeq->palloc.pbl_index; + info.eqc.pbl_mode = aeq->palloc.pbl_mode.cq_eq_mode; + info.pble_map = true; + aeq->pble_map = info.pble_map; + +skip_aeq_pble: + if (rdma_dev->cache_line_64_en == false) + info.aeqe_base = aeq->mem.va; + else + info.aeqe_hygon_base = aeq->mem.va; + + info.aeq_size = aeq_size; + info.dev = dev; + info.irq = rf->sxe2_msixtbl->irq; + info.msix_idx = rf->sxe2_msixtbl->idx; + info.vsi = &rf->default_vsi; + DRV_RDMA_LOG_DEV_INFO("aeq db map:%p\n", rf->db->map); + info.doorbell = rf->db->map + SXE2_RDMA_DB_EQ_INFO_OFFSET; + DRV_RDMA_LOG_DEV_INFO("aeq db map offset 4 :%p\n", info.doorbell); + + info.eqc.vsi_index = rf->default_vsi.vsi_idx; + info.eqc.log_eq_size = order_base_2(aeq_size); + info.eqc.log_page_size = order_base_2(PAGE_SIZE); + info.eqc.page_offset = offset_in_page(aeq->mem.pa); + + sxe2_drv_aeq_ctx_init(&aeq->ctx_aeq, &info); + + ret = sxe2_mq_aeq_cmd(rdma_dev, &aeq->ctx_aeq, MQ_OP_CREATE_AEQ); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq create ops failed. ret:%d\n", ret); + if (aeq->pble_map) { + sxe2_destroy_aeq_pble(rf); + } else { + dma_free_coherent(dev->hw->device, aeq->mem.size, + aeq->mem.va, aeq->mem.pa); + aeq->mem.va = NULL; + } + } + +end: + return ret; +} + +void sxe2_del_aeq(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_rdma_aeq *aeq = &rf->aeq; + int ret; + + atomic_set(&rf->aeq_created, 0); + + if (rf->ctx_dev.privileged) + rf->ctx_dev.irq_ops->sxe2_rdma_cfg_aeq( + &rf->ctx_dev, rf->sxe2_msixtbl->idx, false); + else + ret = sxe2_vchnl_cfg_aeq(&rf->ctx_dev, rf->sxe2_msixtbl->idx, + false); + + if (!rf->msix_shared) + sxe2_destroy_irq(rf, rf->sxe2_msixtbl, rf); + + drv_rdma_debug_aeq_remove(rdma_dev, aeq); + + aeq->ctx_aeq.size = 0; + ret = sxe2_mq_aeq_cmd(rdma_dev, &aeq->ctx_aeq, MQ_OP_DESTROY_AEQ); + if (ret) + DRV_RDMA_LOG_DEV_ERR("del AEQ failed %d\n", ret); + + if (aeq->pble_map) { + sxe2_destroy_aeq_pble(rf); + } else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } +} + +int sxe2_setup_aeq(struct sxe2_rdma_device *rdma_dev) +{ + int ret; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + + ret = sxe2_rdma_create_aeq(rdma_dev); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: create aeq ret = %d\n", ret); + goto end; + } + + ret = drv_rdma_debug_aeq_add(rdma_dev, &rdma_dev->rdma_func->aeq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "failed adding AEQ to debug file system, ret (%d)\n", + ret); + sxe2_del_aeq(rdma_dev); + goto end; + } + + ret = sxe2_cfg_aeq_vector(rdma_dev); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("ERR: cfg aeq vector failed. ret = %d\n", + ret); + drv_rdma_debug_aeq_remove(rdma_dev, &rdma_dev->rdma_func->aeq); + sxe2_del_aeq(rdma_dev); + goto end; + } + + if (!rf->msix_shared) + sxe2_rdma_ena_intr(&rf->ctx_dev, rf->sxe2_msixtbl[0].idx); + + atomic_set(&rf->aeq_created, 1); +end: + return ret; +} + +int sxe2_drv_ceq_query(struct sxe2_rdma_ctx_ceq *ceq, u64 query_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = ceq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->op = SXE2_MQ_OP_QUERY_CEQ; + eq_wqe->eqn = ceq->ceq_id; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = query_pa >> SXE2_EQC_PHY_ADDR_OFFSET; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_ceq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ceq, u64 query_pa) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_QUERY_CEQ; + mq_info->post_mq = 1; + mq_info->in.u.ceq_query.ceq = ceq; + mq_info->in.u.ceq_query.scratch = (uintptr_t)mq_request; + mq_info->in.u.ceq_query.query_pa = query_pa; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle query ceq failed, ret (%d)\n", + ret); + +end: + return ret; +} + +int sxe2_drv_ceq_modify(struct sxe2_rdma_ctx_ceq *ceq, + struct sxe2_rdma_eqc *ceqc, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = ceq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = ceqc->seqc.oi; + eq_wqe->vsi_index = ceqc->seqc.vsi_index; + eq_wqe->sw_owner_bit = ceqc->seqc.sw_owner_bit; + eq_wqe->sw_status = ceqc->seqc.sw_status; + eq_wqe->uar_page = ceqc->seqc.uar_page; + eq_wqe->log_eq_size = ceqc->seqc.log_eq_size; + eq_wqe->pbl_mode = ceqc->seqc.pbl_mode; + eq_wqe->TPH_value = ceqc->seqc.TPH_value; + eq_wqe->TPH_en = ceqc->seqc.TPH_en; + eq_wqe->page_offset = ceqc->seqc.page_offset; + eq_wqe->log_page_size = ceqc->seqc.log_page_size; + eq_wqe->pbl_index = ceqc->seqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_MODIFY_CEQ; + eq_wqe->eqn = ceq->ceq_id; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, eq_wqe, ceq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_size"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_ceq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ceq, + struct sxe2_rdma_eqc *ceqc) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_MODIFY_CEQ; + mq_info->post_mq = 1; + mq_info->in.u.ceq_modify.ceq = ceq; + mq_info->in.u.ceq_modify.scratch = (uintptr_t)mq_request; + mq_info->in.u.ceq_modify.ceqc = ceqc; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle modify ceq failed, ret (%d)\n", + ret); + +end: + return ret; +} + +int sxe2_drv_aeq_query(struct sxe2_rdma_ctx_aeq *aeq, u64 query_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = aeq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->op = SXE2_MQ_OP_QUERY_AEQ; + eq_wqe->eqn = 0; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = query_pa >> SXE2_EQC_PHY_ADDR_OFFSET; + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_aeq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *aeq, u64 query_pa) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_QUERY_AEQ; + mq_info->post_mq = 1; + mq_info->in.u.aeq_query.aeq = aeq; + mq_info->in.u.aeq_query.scratch = (uintptr_t)mq_request; + mq_info->in.u.aeq_query.query_pa = query_pa; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle query aeq failed, ret (%d)\n", + ret); + +end: + return ret; +} + +int sxe2_drv_aeq_modify(struct sxe2_rdma_ctx_aeq *aeq, + struct sxe2_rdma_eqc *aeqc, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct sxe2_eq_wqe *eq_wqe; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + int ret; + + mq = aeq->dev->mq; + rdma_dev = to_rdmadev(mq->dev); + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", ret); + goto end; + } + + eq_wqe = (struct sxe2_eq_wqe *)wqe; + eq_wqe->oi = aeqc->seqc.oi; + eq_wqe->vsi_index = aeqc->seqc.vsi_index; + eq_wqe->sw_owner_bit = aeqc->seqc.sw_owner_bit; + eq_wqe->sw_status = aeqc->seqc.sw_status; + eq_wqe->uar_page = aeqc->seqc.uar_page; + eq_wqe->log_eq_size = aeqc->seqc.log_eq_size; + eq_wqe->pbl_mode = aeqc->seqc.pbl_mode; + eq_wqe->TPH_value = aeqc->seqc.TPH_value; + eq_wqe->TPH_en = aeqc->seqc.TPH_en; + eq_wqe->page_offset = aeqc->seqc.page_offset; + eq_wqe->log_page_size = aeqc->seqc.log_page_size; + eq_wqe->pbl_index = aeqc->seqc.pbl_index; + + eq_wqe->op = SXE2_MQ_OP_MODIFY_AEQ; + eq_wqe->eqn = 0; + eq_wqe->WQE_Valid = mq->polarity; + eq_wqe->physical_buffer_address = 0; + + if (post_sq) + sxe2_kpost_mq(mq); + + ret = 0; + +end: + return ret; +} + +int sxe2_drv_aeq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *aeq, + struct sxe2_rdma_eqc *aeqc) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_MODIFY_AEQ; + mq_info->post_mq = 1; + mq_info->in.u.aeq_modify.aeq = aeq; + mq_info->in.u.aeq_modify.scratch = (uintptr_t)mq_request; + mq_info->in.u.aeq_modify.aeqc = aeqc; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle modify aeq failed, ret (%d)\n", ret); + +end: + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.h new file mode 100644 index 0000000000000000000000000000000000000000..71751bfce50f7b2ebd898083fb95106037accd7d --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_eq.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_EQ_H__ +#define __SXE2_DRV_EQ_H__ + +#include "sxe2_compat.h" +#include "sxe2_drv_rdma_common.h" + +#define SXE2_INJECT_EQ_AEQ_VALUE (1) +#define SXE2_INJECT_EQ_CEQ0_VALUE (1) +#define SXE2_INJECT_EQ_CEQS_VALUE (2) + +#define SXE2_INVALID_CQ_IDX 0xffffffff + +#define SXE2_MIN_EQ_SIZE 4 +#define SXE2_MAX_EQ_SIZE 262144 + +#define SXE2_CEQE_CQID GENMASK(18, 0) +#define SXE2_CEQE_VALID BIT_ULL(63) + +#define SXE2_AEQE_QPCQID_L GENMASK(18, 0) +#define SXE2_AEQE_WQDESCIDX GENMASK(33, 19) +#define SXE2_AEQE_OVERFLOW GENMASK(34, 34) +#define SXE2_AEQE_AECODE GENMASK(46, 35) +#define SXE2_AEQE_QPCQID_H GENMASK(47, 47) +#define SXE2_AEQE_AESRC GENMASK(53, 50) +#define SXE2_AEQE_RDMA_STATE GENMASK(56, 54) +#define SXE2_AEQE_VALID BIT_ULL(63) + +#define SXE2_EQE_SIZE (4) +#define SXE2_EQE_SIZE_HYGON (8) + +#define SXE2_DB_EQN_MASK (0X3FF) +#define SXE2_DB_CI_OFFSET (10) + +#define SXE2_GET_CURRENT_AEQ_ELEM(_aeq) \ + ((_aeq)->aeqe_base[SXE2_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf) + +#define SXE2_GET_CURRENT_AEQ_ELEM_HYGON(_aeq) \ + ((_aeq)->aeqe_hygon_base[SXE2_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf) + +#define SXE2_GET_CURRENT_CEQ_ELEM(_ceq) \ + ((_ceq)->ceqe_base[SXE2_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf) + +#define SXE2_GET_CURRENT_CEQ_ELEM_HYGON(_ceq) \ + ((_ceq)->ceqe_hygon_base[SXE2_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf) + +#define SXE2_GET_CEQ_ELEM_AT_POS(_ceq, _pos) ((_ceq)->ceqe_base[_pos].buf) + +#define SXE2_GET_CEQ_ELEM_AT_POS_HYGON(_ceq, _pos) \ + ((_ceq)->ceqe_hygon_base[_pos].buf) + +#define SXE2_RING_GET_NEXT_TAIL(_ring, _idx) \ + (((_ring).tail + (_idx)) % (_ring).size) + +enum { SXE2_EQ_POLLING_BUDGET = 128, +}; + +struct sxe2_ae_desc { + u16 id; + const char *name; + u64 count; + const char *desc; +}; + +struct sxe2_aeqe_info { + u64 compl_ctx; + u32 qp_cq_id; + u32 wqe_idx; + u16 ae_code; + u8 rdma_state; + u8 ae_source; + bool qp : 1; + bool cq : 1; + bool sq : 1; + bool rq : 1; + bool srq : 1; + bool db : 1; + bool tmo : 1; + bool cc : 1; + bool aeqe_overflow; + u8 err_rq_idx_valid; +}; + +struct sxe2_ceq_init_info { + u64 ceqe_pa; + u32 __iomem *doorbell; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_eqe *ceqe_base; + struct sxe2_eqe_hygon *ceqe_hygon_base; + u32 ceq_id; + u32 ceq_size; + struct sxe2_rdma_ctx_vsi *vsi; + struct drv_rdma_soft_eqc eqc; + bool pble_map : 1; + u64 pbl_count; + u64 pbl[]; +}; + +struct sxe2_eqe { + __le64 buf[SXE2_EQE_SIZE]; +}; + +struct sxe2_eqe_hygon { + __le64 buf[SXE2_EQE_SIZE_HYGON]; +}; + +struct sxe2_aeq_init_info { + u64 aeq_elem_pa; + u32 __iomem *doorbell; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_eqe *aeqe_base; + struct sxe2_eqe_hygon *aeqe_hygon_base; + void *pbl_list; + bool pble_map : 1; + u32 irq; + u32 msix_idx; + u32 aeq_size; + struct sxe2_rdma_ctx_vsi *vsi; + struct drv_rdma_soft_eqc eqc; + u64 pbl_count; + u64 pbl[]; +}; + +int sxe2_drv_aeq_create(struct sxe2_rdma_ctx_aeq *aeq, u64 scratch, + bool post_sq); +int sxe2_drv_aeq_destroy(struct sxe2_rdma_ctx_aeq *aeq, u64 scratch, + bool post_sq); +int sxe2_drv_aeq_modify(struct sxe2_rdma_ctx_aeq *aeq, + struct sxe2_rdma_eqc *aeqc, u64 scratch, bool post_sq); +int sxe2_drv_aeq_query(struct sxe2_rdma_ctx_aeq *aeq, u64 query_pa, u64 scratch, + bool post_sq); + +int sxe2_drv_ceq_create(struct sxe2_rdma_ctx_ceq *ceq, u64 scratch, + bool post_sq); +int sxe2_drv_ceq_destroy(struct sxe2_rdma_ctx_ceq *ceq, u64 scratch, + bool post_sq); +int sxe2_drv_ceq_modify(struct sxe2_rdma_ctx_ceq *ceq, + struct sxe2_rdma_eqc *ceqc, u64 scratch, bool post_sq); +int sxe2_drv_ceq_query(struct sxe2_rdma_ctx_ceq *ceq, u64 query_pa, u64 scratch, + bool post_sq); + +int sxe2_drv_ceq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ceq, u64 query_pa); +int sxe2_drv_ceq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_ceq *ceq, + struct sxe2_rdma_eqc *ceqc); +int sxe2_drv_aeq_query_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *aeq, u64 query_pa); +int sxe2_drv_aeq_modify_op(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_aeq *aeq, + struct sxe2_rdma_eqc *aeqc); + +int sxe2_setup_aeq(struct sxe2_rdma_device *rdma_dev); +void sxe2_del_aeq(struct sxe2_rdma_device *rdma_dev); +int sxe2_setup_ceqs(struct sxe2_rdma_device *rdma_dev); +int sxe2_setup_mceq(struct sxe2_rdma_device *rdma_dev); +void sxe2_del_ceqs(struct sxe2_rdma_device *rdma_dev); +void sxe2_del_mceq(struct sxe2_rdma_device *rdma_dev); + +void sxe2_rdma_comp_handler(struct sxe2_rdma_ctx_cq *cq_ctx); + +struct sxe2_ae_desc *sxe2_get_ae_desc_list(void); +int sxe2_get_ae_desc_list_size(void); + +const char *sxe2_get_ae_desc(u16 ae_code); +void sxe2_set_ae_count(u16 ae_code); + +#ifndef NO_HAVE_TASKLET_SETUP +irqreturn_t sxe2_eq_handler(int irq, void *data); + +irqreturn_t sxe2_ceq_handler(int irq, void *data); + +void sxe2_ceq_cb(struct tasklet_struct *t); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..cae900b08eb78d70bf99f28abd448be0531a1fe2 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.c @@ -0,0 +1,776 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_eq_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_eq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_eq_debugfs.h" + +#define INJECT_CMD_LEN 32 +#define IN_CMD_LEN 64 +#define ALIVE 0x7fffffff +enum { + EQ_OI, + EQ_VSI_INDEX, + EQ_OWNER_BIT, + EQ_STATUS, + EQ_UAR_PAGE, + EQ_LOG_EQ_SIZE, + EQ_PBL_MODE, + EQ_TPH_EN, + EQ_TPH_VALUE, + EQ_PAGE_OFFSET, + EQ_LOG_PG_SZ, + EQ_PBL_INDEX, +}; + +#ifdef SXE2_CFG_DEBUG +static char *eq_fields[] = { + [EQ_OI] = "oi", + [EQ_VSI_INDEX] = "vsi_index", + [EQ_OWNER_BIT] = "owner_bit", + [EQ_STATUS] = "sw_status", + [EQ_UAR_PAGE] = "uar_page", + [EQ_LOG_EQ_SIZE] = "log_eq_size", + [EQ_PBL_MODE] = "pbl_mode", + [EQ_TPH_EN] = "TPH_en", + [EQ_TPH_VALUE] = "TPH_value", + [EQ_PAGE_OFFSET] = "page_offset", + [EQ_LOG_PG_SZ] = "log_page_size", + [EQ_PBL_INDEX] = "pbl_index", +}; +#endif +u64 drv_rdma_eq_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + int ret; + struct sxe2_rdma_eqc *ctx; + struct sxe2_rdma_ceq *ceq; + struct sxe2_rdma_aeq *aeq; + size_t len = 0; + struct sxe2_rdma_dma_mem query_eq; + struct sxe2_rdma_ctx_dev *dev_ctx; + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + query_eq.size = sizeof(struct sxe2_rdma_eqc); + query_eq.va = dma_alloc_coherent(dev_ctx->hw->device, query_eq.size, + &query_eq.pa, GFP_KERNEL); + if (!query_eq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query eq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_eq.va, 0, query_eq.size); + + if (type == SXE2_DBG_RSC_CEQ) { + ceq = (struct sxe2_rdma_ceq *)data; + + ret = sxe2_drv_ceq_query_op(rdma_dev, &ceq->ctx_ceq, + query_eq.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query ceq failed, ret (%d)\n", + ret); + goto free_ctx; + } + + len += dbg_vsnprintf(buf, len, "ceq:%d context:\n\n", + ceq->ctx_ceq.ceq_id); + len += dbg_vsnprintf(buf, len, "irq_num: %d\n", ceq->irq); + len += dbg_vsnprintf(buf, len, "msix_idx: %d\n\n", + ceq->msix_idx); + } else if (type == SXE2_DBG_RSC_AEQ) { + aeq = (struct sxe2_rdma_aeq *)data; + + ret = sxe2_drv_aeq_query_op(rdma_dev, &aeq->ctx_aeq, + query_eq.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query aeq failed, ret (%d)\n", + ret); + goto free_ctx; + } + + len += dbg_vsnprintf(buf, len, "aeq context:\n\n"); + len += dbg_vsnprintf(buf, len, "irq_num: %d\n", + aeq->ctx_aeq.irq); + len += dbg_vsnprintf(buf, len, "msix_idx: %d\n\n", + aeq->ctx_aeq.msix_idx); + } + + ctx = (struct sxe2_rdma_eqc *)query_eq.va; + + len += dbg_vsnprintf(buf, len, "soft context\n"); + len += dbg_vsnprintf(buf, len, "oi: %lld\n", + ctx->seqc.oi); + len += dbg_vsnprintf(buf, len, "vsi_index: %d\n", + ctx->seqc.vsi_index); + len += dbg_vsnprintf(buf, len, "sw_owner_bit: %d\n", + ctx->seqc.sw_owner_bit); + len += dbg_vsnprintf(buf, len, "sw_status: %d\n", + ctx->seqc.sw_status); + len += dbg_vsnprintf(buf, len, "uar_page: %d\n", + ctx->seqc.uar_page); + len += dbg_vsnprintf(buf, len, "log_eq_size: %lld\n", + ctx->seqc.log_eq_size); + len += dbg_vsnprintf(buf, len, "pbl_mode: %d\n", + ctx->seqc.pbl_mode); + len += dbg_vsnprintf(buf, len, "TPH_en: %d\n", + ctx->seqc.TPH_en); + len += dbg_vsnprintf(buf, len, "TPH_value: %d\n", + ctx->seqc.TPH_value); + len += dbg_vsnprintf(buf, len, "page_offset: %d\n", + ctx->seqc.page_offset); + len += dbg_vsnprintf(buf, len, "log_page_size: %d\n", + ctx->seqc.log_page_size); + len += dbg_vsnprintf(buf, len, "pbl_index: %llx\n\n", + ctx->seqc.pbl_index); + + len += dbg_vsnprintf(buf, len, "hw context\n"); + len += dbg_vsnprintf(buf, len, "hw_owner_bit: %d\n", + ctx->heqc.hw_owner_bit); + len += dbg_vsnprintf(buf, len, "over_flag: %d\n", + ctx->heqc.over_flag); + len += dbg_vsnprintf(buf, len, "hw_status: %d\n", + ctx->heqc.hw_status); + len += dbg_vsnprintf(buf, len, "pfvf_id: %d\n", + ctx->heqc.pfvf_id); + len += dbg_vsnprintf(buf, len, "consumer_counter: %d\n", + ctx->heqc.consumer_counter); + len += dbg_vsnprintf(buf, len, "producer_counter: %d\n", + ctx->heqc.producer_counter); + len += dbg_vsnprintf(buf, len, "page_addr_odd_l: %d\n", + ctx->heqc.page_addr_odd_l); + len += dbg_vsnprintf(buf, len, "page_addr_odd_h: %d\n", + ctx->heqc.page_addr_odd_h); + len += dbg_vsnprintf(buf, len, "page_addr_even_l: %d\n", + ctx->heqc.page_addr_even_l); + len += dbg_vsnprintf(buf, len, "page_addr_even_h: %d\n", + ctx->heqc.page_addr_even_h); + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_eq.size, query_eq.va, + query_eq.pa); + query_eq.va = NULL; + +end: + return len; +} + +#ifdef SXE2_CFG_DEBUG +static int drv_rdma_eq_ctx_modify(struct sxe2_rdma_eqc *ctx, int field, + u64 value) +{ + int ret = 0; + + switch (field) { + case EQ_OI: + ctx->seqc.oi = value; + break; + case EQ_VSI_INDEX: + ctx->seqc.vsi_index = value; + break; + case EQ_OWNER_BIT: + ctx->seqc.sw_owner_bit = value; + break; + case EQ_STATUS: + ctx->seqc.sw_status = value; + break; + case EQ_UAR_PAGE: + ctx->seqc.uar_page = value; + break; + case EQ_LOG_EQ_SIZE: + ctx->seqc.log_eq_size = value; + break; + case EQ_PBL_MODE: + ctx->seqc.pbl_mode = value; + break; + case EQ_TPH_EN: + ctx->seqc.TPH_en = value; + break; + case EQ_TPH_VALUE: + ctx->seqc.TPH_value = value; + break; + case EQ_PAGE_OFFSET: + ctx->seqc.page_offset = value; + break; + case EQ_LOG_PG_SZ: + ctx->seqc.log_page_size = value; + break; + case EQ_PBL_INDEX: + ctx->seqc.pbl_index = value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_WARN("invalid index %d, ret %d\n", field, ret); + } + + return ret; +} + +static int drv_rdma_query_eq(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, + struct sxe2_rdma_dma_mem query_eq) +{ + int ret = 0; + struct sxe2_rdma_ceq *ceq = NULL; + struct sxe2_rdma_aeq *aeq = NULL; + + if (type == SXE2_DBG_RSC_CEQ) { + ceq = (struct sxe2_rdma_ceq *)data; + + ret = sxe2_drv_ceq_query_op(rdma_dev, &ceq->ctx_ceq, + query_eq.pa); + if (ret != 0) { + DRV_RDMA_LOG_ERROR_BDF("query ceq failed, ret (%d)\n", + ret); + goto end; + } + + } else if (type == SXE2_DBG_RSC_AEQ) { + aeq = (struct sxe2_rdma_aeq *)data; + + ret = sxe2_drv_aeq_query_op(rdma_dev, &aeq->ctx_aeq, + query_eq.pa); + if (ret != 0) { + DRV_RDMA_LOG_ERROR_BDF("query aeq failed, ret (%d)\n", + ret); + goto end; + } + } + +end: + return ret; +} +#endif + +int drv_rdma_eq_write_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ +#ifdef SXE2_CFG_DEBUG + u32 i; + int ret; + u64 temp_value; + struct sxe2_rdma_eqc *ctx; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_ceq *ceq = NULL; + struct sxe2_rdma_aeq *aeq = NULL; + struct sxe2_rdma_dma_mem query_eq; + struct sxe2_rdma_ctx_dev *dev_ctx; + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + query_eq.size = sizeof(struct sxe2_rdma_eqc); + query_eq.va = dma_alloc_coherent(dev_ctx->hw->device, query_eq.size, + &query_eq.pa, GFP_KERNEL); + if (!query_eq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query eq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_eq.va, 0, query_eq.size); + + ret = drv_rdma_query_eq(rdma_dev, data, type, query_eq); + if (ret) + goto free_ctx; + + ctx = (struct sxe2_rdma_eqc *)query_eq.va; + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) + goto free_ctx; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto free_ctx; + } + + for (i = 0; i < ARRAY_SIZE(eq_fields); i++) { + if (!strncmp(argv[0], eq_fields[i], strlen(eq_fields[i])) && + (strlen(eq_fields[i]) == strlen(argv[0]))) { + break; + } + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto free_ctx; + } + + DRV_RDMA_LOG_DEV_INFO("query eq i:%d, temp_value:%llx\n", i, + temp_value); + + ret = drv_rdma_eq_ctx_modify(ctx, i, temp_value); + if (ret) + goto free_ctx; + + if (type == SXE2_DBG_RSC_CEQ) { + ret = sxe2_drv_ceq_modify_op(rdma_dev, &ceq->ctx_ceq, ctx); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR( + "modify ceq ctx:%s failed, ret (%d)\n", argv[0], + ret); + } else if (type == SXE2_DBG_RSC_AEQ) { + ret = sxe2_drv_aeq_modify_op(rdma_dev, &aeq->ctx_aeq, ctx); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR( + "modify aeq ctx:%s failed, ret (%d)\n", argv[0], + ret); + } + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_eq.size, query_eq.va, + query_eq.pa); + query_eq.va = NULL; + +end: + return ret; +#else + return 0; +#endif +} + +static ssize_t drv_rdma_ae_codes_info_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + int i = 0; + struct sxe2_ae_desc *ae_desc_debug; + int ae_list_count; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "ae codes info:\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "---value---count---name---\n"); + + ae_desc_debug = sxe2_get_ae_desc_list(); + ae_list_count = sxe2_get_ae_desc_list_size(); + for (i = 0; i < ae_list_count; i++) { + len_total += dbg_vsnprintf(rsp_end, len_total, + "%#llx %#llx %s\n", + ae_desc_debug[i].id, + ae_desc_debug[i].count, + ae_desc_debug[i].name); + } + + ret = simple_read_from_buffer(buf, count, off, rsp, (ssize_t)len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_ae_codes_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_ae_codes_info_read, +}; + +int drv_rdma_debug_aeq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_aeq *aeq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->aeq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("aeq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + aeq->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_AEQ, + rdma_dev->hdl->aeq_debugfs, + drv_rdma_eq_read_field, + drv_rdma_eq_write_field, 0, aeq); + if (!aeq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + goto end; + } + + debugfs_create_file("ae_codes_info", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->aeq_debugfs, rdma_dev, + &sxe2_rdma_ae_codes_fops); + +end: + return ret; +} + +void drv_rdma_debug_aeq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_aeq *aeq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->aeq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("aeq debugfs dir not exist\n"); + goto end; + } + + kfree(aeq->dbg_node); + aeq->dbg_node = NULL; + +end: + return; +} + +int drv_rdma_debug_ceq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->ceq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("eq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + ceq->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_CEQ, + rdma_dev->hdl->ceq_debugfs, + drv_rdma_eq_read_field, + drv_rdma_eq_write_field, + (int)ceq->ctx_ceq.ceq_id, ceq); + if (!ceq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} + +void drv_rdma_debug_ceq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->ceq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("ceq debugfs dir not exist\n"); + goto end; + } + + if (ceq->dbg_node) { + drv_rdma_rm_res_tree(ceq->dbg_node); + ceq->dbg_node = NULL; + } + +end: + return; +} + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + +enum { AEQ_INJECT_CQ_SWSTATUS_ERR, + AEQ_INJECT_CEQ_SWSTATUS_ERR, + AEQ_INJECT_DB_CEQN_ERR, + AEQ_INJECT_CEQ_CI_NOUDPATE_ERR, + AEQ_INJECT_AEQ_CI_NOUDPATE_ERR, + AEQ_INJECT_TMO_FPTE_VALID_ERR, + AEQ_INJECT_CQ_DB_NO_UPDATE_ERR, + AEQ_INJECT_SRQ_LIMIT_ERR, + AEQ_INJECT_LLWQE_ERR, +}; + +static char *inject_flag_fields[] = { + [AEQ_INJECT_CQ_SWSTATUS_ERR] = "cq_sw_status_err", + [AEQ_INJECT_CEQ_SWSTATUS_ERR] = "ceq_sw_status_err", + [AEQ_INJECT_DB_CEQN_ERR] = "db_ceqn_err", + [AEQ_INJECT_CEQ_CI_NOUDPATE_ERR] = "ceq_ci_noupdate", + [AEQ_INJECT_AEQ_CI_NOUDPATE_ERR] = "aeq_ci_noupdate", + [AEQ_INJECT_TMO_FPTE_VALID_ERR] = "tmo_fpte_valid_0", + [AEQ_INJECT_CQ_DB_NO_UPDATE_ERR] = "cq_db_no_update", + [AEQ_INJECT_SRQ_LIMIT_ERR] = "srq_limit", + [AEQ_INJECT_LLWQE_ERR] = "llwqe_err", +}; + +STATIC ssize_t drv_aeq_codes_inject_flag_read(struct file *filp, + char __user *buf, size_t count, + loff_t *pos) +{ + ssize_t ret; + char *rsp = NULL; + char *rsp_end; + size_t len = 0; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zd\n", + ret); + goto end; + } + rsp_end = rsp; + + len += dbg_vsnprintf(rsp_end, len, "aeq codes inject flag\n"); + len += dbg_vsnprintf(rsp_end, len, + "echo xxx 1 > inject_flag; inject err\n"); + len += dbg_vsnprintf(rsp_end, len, "cq_sw_status_err: %d\n", + rdma_dev->rdma_func->inject_aeq.cq_sw_status_err); + len += dbg_vsnprintf(rsp_end, len, "ceq_sw_status_err: %d\n", + rdma_dev->rdma_func->inject_aeq.ceq_sw_status_err); + len += dbg_vsnprintf(rsp_end, len, "db_ceqn_err: %d\n", + rdma_dev->rdma_func->inject_aeq.db_ceqn_err); + len += dbg_vsnprintf(rsp_end, len, "ceq_ci_noupdate: %d\n", + rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate); + len += dbg_vsnprintf(rsp_end, len, "aeq_ci_noupdate: %d\n", + rdma_dev->rdma_func->inject_aeq.aeq_ci_noupdate); + len += dbg_vsnprintf(rsp_end, len, "tmo_fpte_valid_0: %d\n", + rdma_dev->rdma_func->inject_aeq.tmo_fpte_valid_0); + len += dbg_vsnprintf(rsp_end, len, "cq_db_no_update: %d\n", + rdma_dev->rdma_func->inject_aeq.cq_db_no_update); + len += dbg_vsnprintf(rsp_end, len, "srq_limit_flag: %d\n", + rdma_dev->rdma_func->inject_aeq.srq_limit_flag); + len += dbg_vsnprintf(rsp_end, len, "llwqe_flag: %d\n", + rdma_dev->rdma_func->inject_aeq.llwqe_flag); + + ret = simple_read_from_buffer(buf, count, pos, rsp, (ssize_t)len); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zd\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +STATIC ssize_t drv_aeq_codes_inject_flag_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *pos) +{ + int ret; + char in_buf[IN_CMD_LEN] = { 0 }; + u32 i; + u64 temp_value; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + char cmd_buf[INJECT_CMD_LEN] = {0}; + u64 alive = ALIVE; + + rdma_dev = filp->private_data; + + if ((count >= IN_CMD_LEN) || copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto end; + } + + argc = 0; + + ret = split_command(in_buf, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(inject_flag_fields); i++) { + if (!strncmp(argv[0], inject_flag_fields[i], + strlen(inject_flag_fields[i]))) + break; + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto end; + } + + DRV_RDMA_LOG_DEV_INFO("inject flag i:%d, to temp_value:%llx\n", i, + temp_value); + snprintf(cmd_buf, sizeof(cmd_buf), "-u %llx -a %llx", temp_value, alive); + + switch (i) { + case AEQ_INJECT_CQ_SWSTATUS_ERR: + rdma_dev->rdma_func->inject_aeq.cq_sw_status_err = + (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "cq_sw_status_err", cmd_buf); + break; + case AEQ_INJECT_CEQ_SWSTATUS_ERR: + rdma_dev->rdma_func->inject_aeq.ceq_sw_status_err = + (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "ceq_sw_status_err", cmd_buf); + break; + case AEQ_INJECT_DB_CEQN_ERR: + rdma_dev->rdma_func->inject_aeq.db_ceqn_err = (u8)temp_value; + break; + case AEQ_INJECT_CEQ_CI_NOUDPATE_ERR: + rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate = + (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "ceq_ci_noupdate", cmd_buf); + break; + case AEQ_INJECT_AEQ_CI_NOUDPATE_ERR: + rdma_dev->rdma_func->inject_aeq.aeq_ci_noupdate = + (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "aeq_ci_noupdate", cmd_buf); + break; + case AEQ_INJECT_TMO_FPTE_VALID_ERR: + rdma_dev->rdma_func->inject_aeq.tmo_fpte_valid_0 = + (u8)temp_value; + break; + case AEQ_INJECT_CQ_DB_NO_UPDATE_ERR: + rdma_dev->rdma_func->inject_aeq.cq_db_no_update = + (u8)temp_value; + break; + case AEQ_INJECT_SRQ_LIMIT_ERR: + rdma_dev->rdma_func->inject_aeq.srq_limit_flag = (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "srq_limit_flag", cmd_buf); + break; + case AEQ_INJECT_LLWQE_ERR: + rdma_dev->rdma_func->inject_aeq.llwqe_flag = (u8)temp_value; + INJECT_ACTIVE(rdma_dev->rdma_func, "llwqe_flag", cmd_buf); + break; + default: + DRV_RDMA_LOG_DEV_WARN("invalid index %d\n", i); + ret = -EINVAL; + goto end; + } + + *pos = (loff_t)count; + ret = (int)count; + +end: + return ret; +} + +static const struct file_operations aeq_codes_inject_flag_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_aeq_codes_inject_flag_read, + .write = drv_aeq_codes_inject_flag_write, +}; + +int sxe2_rdma_aeq_codes_inject_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->aeq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("aeq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + hdl->aeq_codes_err_debugfs = + debugfs_create_dir("aeq_codes_inject", hdl->aeq_debugfs); + if (!hdl->aeq_codes_err_debugfs) { + DRV_RDMA_LOG_DEV_ERR( + "debugfs create aeq_codes_inject dir failed.\n"); + ret = -ENOMEM; + goto end; + } + + debugfs_create_file("inject_flag", SXE2_DEBUG_FILE_READ_WRITE, + hdl->aeq_codes_err_debugfs, rdma_dev, + &aeq_codes_inject_flag_fops); + +end: + return ret; +} + +void drv_rdma_aeq_codes_inject_del(struct sxe2_rdma_device *rdma_dev) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->aeq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("aeq debugfs dir not exist\n"); + goto end; + } + + debugfs_remove_recursive(rdma_dev->hdl->aeq_codes_err_debugfs); + rdma_dev->hdl->aeq_codes_err_debugfs = NULL; + +end: + return; +} + +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..f94febbaebf39aac57b0820fd7a641eb15e83df0 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_eq_debugfs.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_eq_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_EQ_DEBUGFS_H__ +#define __SXE2_DRV_EQ_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +int drv_rdma_debug_aeq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_aeq *aeq); +void drv_rdma_debug_aeq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_aeq *aeq); +int drv_rdma_debug_ceq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq); +void drv_rdma_debug_ceq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ceq *ceq); +u64 drv_rdma_eq_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); +int drv_rdma_eq_write_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); + +#ifdef SXE2_SUPPORT_INJECT +int sxe2_rdma_aeq_codes_inject_add(struct sxe2_rdma_device *rdma_dev); +void drv_rdma_aeq_codes_inject_del(struct sxe2_rdma_device *rdma_dev); +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..e43d9bbc3960b892a4951ca28e637551628f03d6 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_hw.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_rdma_log.h" + +static u32 sxe2_pf_bar_regs_offset[SXE2_PF_MAX_BAR_REGS] = { + PF_QSET_APPLY_REQ_OFFSET, + PF_QSET_APPLY_RESP_OFFSET, + PF_QSET_QUERY_REQ_OFFSET, + PF_QSET_QUERY_RESP_OFFSET, + PF_QSET_RELEASE_REQ_OFFSET, + PF_QSET_RELEASE_RESP_OFFSET, + PF_QSET_QP_BIND_REQ_OFFSET, + PF_QSET_QP_BIND_RESP_OFFSET, + PF_GLINT_CTRL_DYN_CTL_OFFSET(0), + PF_MQC_ADDR_HIGH_OFFEST, + PF_MQC_ADDR_LOW_OFFEST, + PF_MQC_ADDR_VLD_OFFEST, + PF_MQ_STATUS_OFFEST, + PF_MQ_DB_OFFEST, + PF_MQ_WQE_DONE_OFFEST, + PF_MQ_ERRCODES_OFFEST, + PF_RCMS_SPT_CACHE_FAST_INVALID_MASK_OFFSET, + PF_RCMS_SPT_CACHE_FAST_INVALID_IDX_OFFSET, + PF_RDMA_FEATURE_HW_VERSION_LOW_OFFSET, + PF_RDMA_FEATURE_HW_VERSION_HIGH_OFFSET, + PF_RDMA_FEATURE_ENDPT_TRK_OFFSET, + PF_RDMA_FEATURE_QSETS_MAX_OFFSET, + PF_RDMA_FEATURE_FW_VERSION_OFFSET, + PF_RDMA_CONFIG_PKEY_OFFSET, + GLINT_CTRL_PF_INT_AEQCTL_OFFSET, + GLINT_CTRL_PF_INT_CEQCTL_OFFSET(0), + SXE2_PF_INT_RATE(0), +}; + +static u32 sxe2_vf_bar_regs_offset[SXE2_VF_MAX_BAR_REGS] = { + VF_QSET_APPLY_REQ_OFFSET, + VF_QSET_APPLY_RESP_OFFSET, + VF_QSET_QUERY_REQ_OFFSET, + VF_QSET_QUERY_RESP_OFFSET, + VF_QSET_RELEASE_REQ_OFFSET, + VF_QSET_RELEASE_RESP_OFFSET, + VF_QSET_QP_BIND_REQ_OFFSET, + VF_QSET_QP_BIND_RESP_OFFSET, + VF_GLINT_CTRL_DYN_CTL_OFFSET(0), + VF_MQC_ADDR_HIGH_OFFEST, + VF_MQC_ADDR_LOW_OFFEST, + VF_MQC_ADDR_VLD_OFFEST, + VF_MQ_STATUS_OFFEST, + VF_MQ_DB_OFFEST, + VF_MQ_WQE_DONE_OFFEST, + VF_MQ_ERRCODES_OFFEST, + VF_RCMS_SPT_CACHE_FAST_INVALID_MASK_OFFSET, + VF_RCMS_SPT_CACHE_FAST_INVALID_IDX_OFFSET, + VF_RDMA_FEATURE_HW_VERSION_LOW_OFFSET, + VF_RDMA_FEATURE_HW_VERSION_HIGH_OFFSET, + VF_RDMA_FEATURE_ENDPT_TRK_OFFSET, + VF_RDMA_FEATURE_QSETS_MAX_OFFSET, + VF_RDMA_FEATURE_FW_VERSION_OFFSET, + VF_RDMA_CONFIG_PKEY_OFFSET, +}; + +void sxe2_hw_ena_irq(struct sxe2_rdma_ctx_dev *dev, u32 idx) +{ + u32 val; + u32 interval = 0; + + if (dev->ceq_itr && dev->aeq->msix_idx != idx) + interval = dev->ceq_itr >> 1; + + val = (u32)(FIELD_PREP((s64)SXE2_GLINT_DYN_CTL_ITR_INDEX, + SXE2_RDMA_IDX_ITR0) | + FIELD_PREP((s64)SXE2_GLINT_DYN_CTL_INTERVAL, interval) | + FIELD_PREP((s64)SXE2_GLINT_DYN_CTL_INTENA, true) | + FIELD_PREP((s64)SXE2_GLINT_DYN_CTL_CLEARPBA, true)); + + writel(val, dev->hw_regs[IRQ_DYN_CTL] + idx); +} + +void sxe2_hw_disable_irq(struct sxe2_rdma_ctx_dev *dev, u32 idx) +{ + writel(0, dev->hw_regs[IRQ_DYN_CTL] + idx); +} + +void sxe2_hw_cfg_aeq(struct sxe2_rdma_ctx_dev *dev, u32 idx, bool enable) +{ + u32 reg_val; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + reg_val = (u32)(FIELD_PREP((s64)SXE2_PFINT_AEQCTL_CAUSE_ENA, enable) | + FIELD_PREP((s64)SXE2_PFINT_AEQCTL_MSIX_INDEX, idx) | + FIELD_PREP((s64)SXE2_PFINT_AEQCTL_ITR_INDEX, + SXE2_RDMA_IDX_NOITR)); + + DRV_RDMA_LOG_DEV_DEBUG("aeq idx: %#x, enable: %u\n", idx, enable); + writel(reg_val, dev->hw_regs[PF_INT_AEQCTL]); +} + +void sxe2_hw_cfg_ceq(struct sxe2_rdma_ctx_dev *dev, u32 ceq_id, u32 idx, + bool enable) +{ + u32 reg_val; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + reg_val = enable ? SXE2_GLINT_CEQCTL_CAUSE_ENA : 0; + reg_val |= (idx << SXE2_GLINT_CEQCTL_MSIX_INDEX_S) | + (SXE2_RDMA_IDX_ITR0 << SXE2_GLINT_CEQCTL_ITR_INDEX_S); + + DRV_RDMA_LOG_DEV_DEBUG( + "ceq_id: %#x, idx: %#x, enable: %u, ceq_itr: %u\n", ceq_id, idx, + enable, dev->ceq_itr); + + writel(reg_val, dev->hw_regs[PF_INT_CEQCTL] + ceq_id); +} + +static const struct sxe2_rdma_irq_ops sxe2_irq_ops = { + .sxe2_rdma_cfg_aeq = sxe2_hw_cfg_aeq, + .sxe2_rdma_cfg_ceq = sxe2_hw_cfg_ceq, + .sxe2_rdma_dis_irq = sxe2_hw_disable_irq, + .sxe2_rdma_en_irq = sxe2_hw_ena_irq, +}; + +static const struct sxe2_rdma_hw_stat_map sxe2_rdma_hw_stats_map[] = { + [SXE2_RDMA_HW_STAT_INDEX_IP4TXOCTS] = { 0, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4TXPKTS] = { 8, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 16, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 24, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6TXOCTS] = { 32, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6TXPKTS] = { 40, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 48, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 56, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMATXWRS] = { 64, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMATXRDS] = { 72, 0, SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMATXSNDS] = { 80, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMATXBND] = { 88, 0, SXE2_RDMA_MAX_STATS_32 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMATXINV] = { 88, 32, + SXE2_RDMA_MAX_STATS_32 }, + [SXE2_RDMA_HW_STAT_INDEX_TXCNPSENT] = { 96, 0, SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS] = { 128, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4RXPKTS] = { 136, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 144, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 152, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 160, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6RXOCTS] = { 168, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6RXPKTS] = { 176, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 184, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 192, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 200, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMARXWRS] = { 208, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMARXRDS] = { 216, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMARXSNDS] = { 224, 0, + SXE2_RDMA_MAX_STATS_48 }, + [SXE2_RDMA_HW_STAT_INDEX_RDMARXINV] = { 232, 0, + SXE2_RDMA_MAX_STATS_32 }, + [SXE2_RDMA_HW_STAT_INDEX_RXECNMARKEDPKTS] = { 232, 32, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXCNPHANDLED] = { 240, 0, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXCNPIGNORED] = { 240, 32, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXNAKPKTS] = { 248, 0, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXSEQERR] = { 248, 32, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXRNRNAKPKTS] = { 256, 0, + SXE2_RDMA_MAX_STATS_24 }, + [SXE2_RDMA_HW_STAT_INDEX_RXRETRANS] = { 256, 32, + SXE2_RDMA_MAX_STATS_24 }, +}; + +void sxe2_rdma_init_hw(struct sxe2_rdma_ctx_dev *dev) +{ + int i; + u8 __iomem *hw_addr; + + hw_addr = dev->hw->hw_addr; + + if (dev->privileged) { + dev->hw_attrs.max_hw_vf_fpm_id = SXE2_MAX_VF_FPM_ID; + dev->hw_attrs.first_hw_vf_fpm_id = SXE2_FIRST_VF_FPM_ID; + for (i = 0; i < SXE2_PF_MAX_BAR_REGS; i++) + dev->hw_regs[i] = + (u32 __iomem *)(hw_addr + + sxe2_pf_bar_regs_offset[i]); + + } else { + for (i = 0; i < SXE2_VF_MAX_BAR_REGS; i++) + dev->hw_regs[i] = + (u32 __iomem *)(hw_addr + + sxe2_vf_bar_regs_offset[i]); + } + + dev->irq_ops = &sxe2_irq_ops; + + dev->hw_stats_map = sxe2_rdma_hw_stats_map; + dev->hw_attrs.max_stat_idx = SXE2_RDMA_HW_STAT_INDEX_MAX; + dev->hw_attrs.max_rra = SXE2_MAX_RRA_SIZE; + dev->hw_attrs.max_sra = SXE2_MAX_SRA_SIZE; + + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; + dev->hw_attrs.max_hw_device_pages = SXE2_MAX_PUSH_PAGE_COUNT; + dev->hw_attrs.uk_attrs.max_hw_wq_frags = SXE2_MAX_WQ_FRAGMENT_COUNT; + dev->hw_attrs.uk_attrs.max_hw_read_sges = SXE2_MAX_SGE_RD; + dev->hw_attrs.uk_attrs.min_hw_wq_size = SXE2_MIN_WQ_SIZE; + dev->hw_attrs.uk_attrs.max_hw_sq_chunk = SXE2_MAX_QUANTA_PER_WR; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..6ecb76dc432774b1f4e252f337a60eac7dced2da --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_hw.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_hw.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_HW_H +#define SXE2_DRV_HW_H +#include "sxe2_drv_rdma_common.h" + +#define SXE2_FIRST_VF_FPM_ID 8 +#define SXE2_MAX_STATS_COUNT 0 +#define SXE2_HW_STAT_MAX_INDEX 0 + +#define SXE2_TOP_REG_COMPAT_OFFSET 0x0029220c + +#define PF_RDMA_FEATURE_HW_VERSION_LOW_OFFSET \ + (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0x98) +#define PF_RDMA_FEATURE_HW_VERSION_HIGH_OFFSET \ + (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0x9C) +#define PF_RDMA_FEATURE_ENDPT_TRK_OFFSET (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA0) +#define PF_RDMA_FEATURE_QSETS_MAX_OFFSET (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA4) +#define PF_RDMA_FEATURE_FW_VERSION_OFFSET (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA8) + +#define VF_RDMA_FEATURE_HW_VERSION_LOW_OFFSET \ + (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0x98) +#define VF_RDMA_FEATURE_HW_VERSION_HIGH_OFFSET \ + (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0x9C) +#define VF_RDMA_FEATURE_ENDPT_TRK_OFFSET (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA0) +#define VF_RDMA_FEATURE_QSETS_MAX_OFFSET (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA4) +#define VF_RDMA_FEATURE_FW_VERSION_OFFSET (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0xA8) + +enum sxe2_vf_bar_regs_idx { + QSET_APPLY_REQ, + QSET_APPLY_RESP, + QSET_QUERY_REQ, + QSET_QUERY_RESP, + QSET_RELEASE_REQ, + QSET_RELEASE_RESP, + QSET_QP_BIND_REQ, + QSET_QP_BIND_RESP, + IRQ_DYN_CTL, + MQC_ADDR_HIGH, + MQC_ADDR_LOW, + MQC_ADDR_VLD, + MQ_STATUS, + MQ_DB, + MQ_WQE_DONE, + MQ_ERRCODES, + RCMS_SPT_CACHE_FAST_INVALID_MASK, + RCMS_SPT_CACHE_FAST_INVALID_IDX, + RDMA_FEATURE_HW_VERSION_LOW, + RDMA_FEATURE_HW_VERSION_HIGH, + RDMA_FEATURE_ENDPT_TRK, + RDMA_FEATURE_QSETS_MAX, + RDMA_FEATURE_FW_VERSION, + RDMA_CONFIG_PKEY, + SXE2_VF_MAX_BAR_REGS, +}; + +enum sxe2_pf_bar_regs_idx { + PF_INT_AEQCTL = SXE2_VF_MAX_BAR_REGS, + PF_INT_CEQCTL, + PF_INT_RATE, + SXE2_PF_MAX_BAR_REGS, +}; + +enum sxe2_rdma_dyn_idx_t { + SXE2_RDMA_IDX_ITR0 = 0, + SXE2_RDMA_IDX_ITR1 = 1, + SXE2_RDMA_IDX_ITR2 = 2, + SXE2_RDMA_IDX_NOITR = 3, +}; + +#define SXE2_PFINT_AEQCTL_MSIX_INDEX_S 0 +#define SXE2_PFINT_AEQCTL_MSIX_INDEX GENMASK(10, 0) +#define SXE2_PFINT_AEQCTL_ITR_INDEX_S 11 +#define SXE2_PFINT_AEQCTL_ITR_INDEX GENMASK(12, 11) +#define SXE2_PFINT_AEQCTL_CAUSE_ENA_S 30 +#define SXE2_PFINT_AEQCTL_CAUSE_ENA BIT(30) + +#define SXE2_GLINT_CEQCTL_MSIX_INDEX_S 0 +#define SXE2_GLINT_CEQCTL_MSIX_INDEX GENMASK(10, 0) +#define SXE2_GLINT_CEQCTL_ITR_INDEX_S 11 +#define SXE2_GLINT_CEQCTL_ITR_INDEX GENMASK(12, 11) +#define SXE2_GLINT_CEQCTL_CAUSE_ENA_S 30 +#define SXE2_GLINT_CEQCTL_CAUSE_ENA BIT(30) + +#define SXE2_GLINT_DYN_CTL_INTENA_S 0 +#define SXE2_GLINT_DYN_CTL_INTENA BIT(0) +#define SXE2_GLINT_DYN_CTL_CLEARPBA_S 1 +#define SXE2_GLINT_DYN_CTL_CLEARPBA BIT(1) +#define SXE2_GLINT_DYN_CTL_ITR_INDEX_S 3 +#define SXE2_GLINT_DYN_CTL_ITR_INDEX GENMASK(4, 3) +#define SXE2_GLINT_DYN_CTL_INTERVAL_S 5 +#define SXE2_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5) + +enum sxe2_rdma_hw_stats_index { + SXE2_RDMA_HW_STAT_INDEX_IP4TXOCTS = 0, + SXE2_RDMA_HW_STAT_INDEX_IP4TXPKTS = 1, + SXE2_RDMA_HW_STAT_INDEX_IP4TXMCOCTS = + 2, + SXE2_RDMA_HW_STAT_INDEX_IP4TXMCPKTS = + 3, + SXE2_RDMA_HW_STAT_INDEX_IP6TXOCTS = 4, + SXE2_RDMA_HW_STAT_INDEX_IP6TXPKTS = 5, + SXE2_RDMA_HW_STAT_INDEX_IP6TXMCOCTS = + 6, + SXE2_RDMA_HW_STAT_INDEX_IP6TXMCPKTS = + 7, + SXE2_RDMA_HW_STAT_INDEX_RDMATXWRS = 8, + SXE2_RDMA_HW_STAT_INDEX_RDMATXRDS = 9, + SXE2_RDMA_HW_STAT_INDEX_RDMATXSNDS = 10, + SXE2_RDMA_HW_STAT_INDEX_RDMATXBND = 11, + SXE2_RDMA_HW_STAT_INDEX_RDMATXINV = 12, + SXE2_RDMA_HW_STAT_INDEX_TXCNPSENT = 13, + SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS = 14, + SXE2_RDMA_HW_STAT_INDEX_IP4RXPKTS = 15, + SXE2_RDMA_HW_STAT_INDEX_IP4RXDISCARD = 16, + SXE2_RDMA_HW_STAT_INDEX_IP4RXMCOCTS = 17, + SXE2_RDMA_HW_STAT_INDEX_IP4RXMCPKTS = 18, + SXE2_RDMA_HW_STAT_INDEX_IP6RXOCTS = 19, + SXE2_RDMA_HW_STAT_INDEX_IP6RXPKTS = 20, + SXE2_RDMA_HW_STAT_INDEX_IP6RXDISCARD = 21, + SXE2_RDMA_HW_STAT_INDEX_IP6RXMCOCTS = 22, + SXE2_RDMA_HW_STAT_INDEX_IP6RXMCPKTS = 23, + SXE2_RDMA_HW_STAT_INDEX_RDMARXWRS = 24, + SXE2_RDMA_HW_STAT_INDEX_RDMARXRDS = 25, + SXE2_RDMA_HW_STAT_INDEX_RDMARXSNDS = 26, + SXE2_RDMA_HW_STAT_INDEX_RDMARXINV = + 27, + SXE2_RDMA_HW_STAT_INDEX_RXECNMARKEDPKTS = + 28, + SXE2_RDMA_HW_STAT_INDEX_RXCNPHANDLED = 29, + SXE2_RDMA_HW_STAT_INDEX_RXCNPIGNORED = 30, + SXE2_RDMA_HW_STAT_INDEX_RXNAKPKTS = 31, + SXE2_RDMA_HW_STAT_INDEX_RXSEQERR = 32, + SXE2_RDMA_HW_STAT_INDEX_RXRNRNAKPKTS = 33, + SXE2_RDMA_HW_STAT_INDEX_RXRETRANS = 34, + SXE2_RDMA_HW_STAT_INDEX_MAX = 35, +}; + +void sxe2_rdma_init_hw(struct sxe2_rdma_ctx_dev *dev); +void sxe2_hw_ena_irq(struct sxe2_rdma_ctx_dev *dev, u32 idx); +void sxe2_hw_disable_irq(struct sxe2_rdma_ctx_dev *dev, u32 idx); + +void sxe2_hw_cfg_aeq(struct sxe2_rdma_ctx_dev *dev, u32 idx, bool enable); + +void sxe2_hw_cfg_ceq(struct sxe2_rdma_ctx_dev *dev, u32 ceq_id, u32 idx, + bool enable); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.c new file mode 100644 index 0000000000000000000000000000000000000000..c96b1c78896995bcedbd815ef63202831b01fb2b --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.c @@ -0,0 +1,2390 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_io.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_io.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_rdma_log.h" + +static const rdma_disp_func g_rdma_op[SXE2_RDMA_MAX_ID] = { + sxe2_hw_send, + sxe2_hw_inline_send, + sxe2_hw_rdma_write, + sxe2_hw_inline_rdma_write, + sxe2_hw_rdma_read, + sxe2_hw_local_invalidate, + sxe2_hw_mr_fast_register, +}; + +static const int g_frag2quanta[SXE2_MAX_FRAGCNT] = { 1, 1, 2, 2, 3, 3, 4, 4, + 5, 5, 6, 6, 7, 7, 8, 8 }; + +static void sxe2_dump_wqe(struct sxe2_qp_common *qp, __le64 *wqe, __u16 quanta, + __u32 wqe_idx, const char *desc) +{ + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + __le32 *p = (__le32 *)wqe; + int i, offset = 0; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + if (desc) { + DRV_RDMA_LOG_DEV_DEBUG("SQWQE DUMP TYPE=[%s], qpn [0x%x]\n" + "wqe_idx [0x%x] quanta [%u]:\n", + desc, qp->qpn, wqe_idx, quanta); + } else + DRV_RDMA_LOG_DEV_DEBUG( + "RQWQE DUMP, qpn [0x%x], wqe_idx [0x%x]:\n", qp->qpn, + wqe_idx); + + for (i = 0; i < quanta * SXE2_QP_WQE_MIN_SIZE; i += 32) { + DRV_RDMA_LOG_DEV_DEBUG( + "[qpn 0x%x][offset %u] %08X %08X %08X %08X\n" + "%08X %08X %08X %08X\n", + qp->qpn, offset, le32_to_cpu(p[0]), le32_to_cpu(p[1]), + le32_to_cpu(p[2]), le32_to_cpu(p[3]), le32_to_cpu(p[4]), + le32_to_cpu(p[5]), le32_to_cpu(p[6]), + le32_to_cpu(p[7])); + p += 8; + offset += 32; + } +} + +static inline int sxe2_fragcnt_to_quanta_cnt(__u32 frag_cnt, __u16 *quanta) +{ + if (frag_cnt >= SXE2_MAX_FRAGCNT) + return -EINVAL; + + *quanta = (__u16)g_frag2quanta[frag_cnt]; + return 0; +} + +static inline __u16 sxe2_inline_to_quanta_cnt(__u32 data_size) +{ + if (data_size <= 8) + return SXE2_QP_WQE_MIN_QUANTA; + else if (data_size <= 39) + return 2; + else if (data_size <= 70) + return 3; + else if (data_size <= 101) + return 4; + else if (data_size <= 132) + return 5; + else if (data_size <= 163) + return 6; + else if (data_size <= 194) + return 7; + else + return 8; +} + +static void sxe2_qp_ring_normal_db(struct sxe2_qp_common *qp) +{ + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + mb(); + writel(qp->qpn, qp->qp_db_no_llwqe); + + qp->initial_ring.head = qp->sq_ring.head; + DRV_RDMA_LOG_DEV_DEBUG("DB NOTIFY: QPN (%u -> %p) PI %u\n", qp->qpn, + qp->qp_db_no_llwqe, qp->sq_ring.head); +} + +static void sxe2_qp_push_wqe(struct sxe2_qp_common *qp, __le64 *wqe, + __u16 quanta, __u32 wqe_idx) +{ + __le64 *push; + struct sxe2_llwqe *llwqe; + unsigned long flags = 0; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + llwqe = qp->llwqe; + + if (!qp->llwqe_mode) { + sxe2_qp_ring_normal_db(qp); + } else { + spin_lock_irqsave(&llwqe->lock, flags); + + push = (__le64 *)((uintptr_t)qp->push_wqe + + (wqe_idx & 0x7) * 0x20); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "llwqe_flag", rdma_dev, push, wqe); +#else + sxe2_memcpy_x64(push, wqe, quanta * SXE2_QP_WQE_MIN_SIZE); +#endif + wmb(); + + set_32bit_val(qp->push_db, 0, + FIELD_PREP(SXE2_WQEALLOC_WQE_DESC_INDEX, + wqe_idx >> 3) | + qp->qpn); + qp->initial_ring.head = qp->sq_ring.head; + qp->llwqe_mode = true; + qp->push_dropped = false; + spin_unlock_irqrestore(&llwqe->lock, flags); + DRV_RDMA_LOG_DEV_DEBUG( + "DB NOTIFY(LLWQE): QPN (%#x -> %#lx llwqe %#lx) idx %#x\n", + qp->qpn, (uintptr_t)qp->push_db, + (uintptr_t)qp->push_wqe, wqe_idx >> 3); + } +} + +static inline void sxe2_set_qkeyqpn(__le64 *wqe, __u32 offset, __u32 qkey, + __u32 qpn) +{ + union sxe2_dqpn_data msg; + + msg.val = 0; + msg.field.dest_qkey = qkey; + msg.field.dest_qpn = qpn; + + wqe[offset >> 3] = cpu_to_le64(msg.val); +} + +static inline void sxe2_set_remote_offset(__le64 *wqe, __u32 offset, + __u64 remote_offset) +{ + wqe[offset >> 3] = cpu_to_le64(remote_offset); +} + +static void sxe2_set_send_hdr(__le64 *wqe, __u32 value, + struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp) +{ + union sxe2_send_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.remote_inv_rkey = + wr_info->rkey_to_inv | wr_info->op_info.send.ah_id; + hdr.field.op = wr_info->op_type; + hdr.field.addfragcnt = value; + hdr.field.report_rtt = wr_info->report_rtt; + hdr.field.imme_data_flag = wr_info->imm_data_valid; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.read_fence = wr_info->read_fence; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("SND_WQE_HDR: (%#llx)\n" + "remote_inv_key: %#x\n" + "op : %#x\n" + "addfragcnt : %d\n" + "report_rtt : %d\n" + "imme_data_flag: %d\n" + "push_wqe : %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.remote_inv_rkey, + hdr.field.op, hdr.field.addfragcnt, + hdr.field.report_rtt, hdr.field.imme_data_flag, + hdr.field.push_wqe, hdr.field.read_fence, + hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_inlinesnd_hdr(__le64 *wqe, __u32 value, + struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp) +{ + union sxe2_send_inline_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.remote_inv_rkey = + wr_info->rkey_to_inv | wr_info->op_info.send.ah_id; + hdr.field.op = wr_info->op_type; + hdr.field.report_rtt = wr_info->report_rtt; + hdr.field.imme_data_flag = wr_info->imm_data_valid; + hdr.field.inline_data_len = value; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.inline_data_flag = 1; + hdr.field.read_fence = wr_info->read_fence; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("INSND_WQE_HDR: (%#llx)\n" + "remote_inv_key: %#x\n" + "op : %#x\n" + "report_rtt : %d\n" + "imme_data_flag: %d\n" + "inline_datalen: %d\n" + "push_wqe : %d\n" + "inline_dataflg: %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.remote_inv_rkey, + hdr.field.op, hdr.field.report_rtt, + hdr.field.imme_data_flag, + hdr.field.inline_data_len, hdr.field.push_wqe, + hdr.field.inline_data_flag, hdr.field.read_fence, + hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_write_hdr(__le64 *wqe, __u32 value, + struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp) +{ + union sxe2_write_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.remote_key = wr_info->op_info.rdma_write.rem_addr.lkey; + hdr.field.op = wr_info->op_type; + hdr.field.addfragcnt = value; + hdr.field.report_rtt = wr_info->report_rtt; + hdr.field.imme_data_flag = wr_info->imm_data_valid; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.read_fence = wr_info->read_fence; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("WRITE_WQE_HDR: (%#llx)\n" + "remote_key : %#x\n" + "op : %#x\n" + "addfragcnt : %d\n" + "report_rtt : %d\n" + "imme_data_flag: %d\n" + "push_wqe : %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.remote_key, + hdr.field.op, hdr.field.addfragcnt, + hdr.field.report_rtt, hdr.field.imme_data_flag, + hdr.field.push_wqe, hdr.field.read_fence, + hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_inlinewrite_hdr(__le64 *wqe, __u32 value, + struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp) +{ + union sxe2_write_inline_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.remote_key = wr_info->op_info.rdma_write.rem_addr.lkey; + hdr.field.op = wr_info->op_type; + hdr.field.report_rtt = wr_info->report_rtt; + hdr.field.imme_data_flag = wr_info->imm_data_valid; + hdr.field.inline_data_len = value; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.inline_data_flag = 1; + hdr.field.read_fence = wr_info->read_fence; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("INWRITE_WQE_HDR: (%#llx)\n" + "remote_key : %#x\n" + "op : %#x\n" + "report_rtt : %d\n" + "imme_data_flag: %d\n" + "inline_datalen: %d\n" + "push_wqe : %d\n" + "inline_dataflg: %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.remote_key, + hdr.field.op, hdr.field.report_rtt, + hdr.field.imme_data_flag, + hdr.field.inline_data_len, hdr.field.push_wqe, + hdr.field.inline_data_flag, hdr.field.read_fence, + hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_read_hdr(__le64 *wqe, __u32 addfragcnt, + struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp, bool ord_fence) +{ + union sxe2_read_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.remote_key = wr_info->op_info.rdma_read.rem_addr.lkey; + hdr.field.op = wr_info->op_type; + hdr.field.addfragcnt = addfragcnt; + hdr.field.report_rtt = wr_info->report_rtt; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.read_fence = wr_info->read_fence || ord_fence ? 1 : 0; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("READ_WQE_HDR: (%#llx)\n" + "remote_key : %#x\n" + "op : %#x\n" + "addfragcnt : %d\n" + "report_rtt : %d\n" + "push_wqe : %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.remote_key, + hdr.field.op, hdr.field.addfragcnt, + hdr.field.report_rtt, hdr.field.push_wqe, + hdr.field.read_fence, hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_invalidate_hdr(__le64 *wqe, struct sxe2_wr_info *wr_info, + struct sxe2_qp_common *qp) +{ + union sxe2_inval_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.op = wr_info->op_type; + hdr.field.push_wqe = wr_info->push_wqe; + hdr.field.read_fence = wr_info->read_fence; + hdr.field.local_fence = wr_info->local_fence; + hdr.field.signaled_completion = wr_info->signaled; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("LOCALINVKEY_WQE_HDR: (%#llx)\n" + "op : %#x\n" + "push_wqe : %d\n" + "read_fence : %d\n" + "local_fence : %d\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.op, + hdr.field.push_wqe, hdr.field.read_fence, + hdr.field.local_fence, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static void sxe2_set_nop_hdr(__le64 *wqe, __u32 offset, + struct sxe2_qp_common *qp) +{ + union sxe2_nop_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.op = SXE2_OP_TYPE_NOP; + hdr.field.signaled_completion = false; + hdr.field.wqe_valid = qp->swqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, offset, hdr.val); + + DRV_RDMA_LOG_DEV_DEBUG("NOP_WQE_HDR: (%#llx)\n" + "op : %#x\n" + "signaled : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.op, + hdr.field.signaled_completion, + hdr.field.wqe_valid); +} + +static int sxe2_hw_nop(struct sxe2_qp_common *qp) +{ + __le64 *wqe; + __u32 wqe_idx; + + if (!qp->sq_ring.head) + return -EINVAL; + + wqe_idx = SXE2_RING_CURRENT_HEAD(qp->sq_ring); + wqe = qp->sq_base[wqe_idx].elem; + + qp->sq_wrtrk_array[wqe_idx].quanta = SXE2_QP_WQE_MIN_QUANTA; + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + sxe2_set_nop_hdr(wqe, 24, qp); + + return 0; +} + +static void sxe2_set_rcvq_hdr(__le64 *wqe, __u32 addl_frag_cnt, + struct sxe2_qp_common *qp) +{ + union sxe2_rq_hdr hdr; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + hdr.val = 0; + hdr.field.addfragcnt = addl_frag_cnt; + hdr.field.wqe_valid = qp->rwqe_polarity; + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr.val); + DRV_RDMA_LOG_DEV_DEBUG("RCV_WQE_HDR: (%#llx)\n" + "addfragcnt : %d\n" + "wqe_valid : %d\n", + cpu_to_le64(hdr.val), hdr.field.addfragcnt, + hdr.field.wqe_valid); +} + +static inline void sxe2_set_immedata(__le64 *wqe, __u32 offset, __u64 imm_data) +{ + wqe[offset >> 3] = cpu_to_le64(imm_data); +} + +static inline void sxe2_set_sgelist_data(void *wqe, __u32 offset, + struct ib_sge *sge, __u8 valid) +{ + struct sxe2_frag_data *pmsg = (struct sxe2_frag_data *)wqe; + uint32_t len; + + pmsg = pmsg + offset / sizeof(*pmsg); + if (sge) { + len = (sge->length & (1U << 31)) ? 0 : sge->length; + pmsg->tag_offset = cpu_to_le64(sge->addr); + pmsg->offset8.field.frag_valid = valid; + pmsg->offset8.field.frag_len = len; + pmsg->offset8.field.stag = sge->lkey; + pmsg->offset8.val = cpu_to_le64(pmsg->offset8.val); + } else { + pmsg->tag_offset = 0; + pmsg->offset8.field.frag_valid = valid; + pmsg->offset8.val = cpu_to_le64(pmsg->offset8.val); + } +} + +static void sxe2_set_inline_data_seg_list(__u8 *wqe, struct ib_sge *sge_list, + __u32 num_sges, __u8 polarity) +{ + __u8 inline_valid = (__u8)(polarity << SXE2_INLINE_VALID_S); + __u32 quanta_bytes_remaining = 8; + __u32 i; + bool first_quanta = true; + __u32 bytes_copied = 0; + + wqe += 8; + + for (i = 0; i < num_sges; i++) { + __u8 *cur_sge = (__u8 *)(uintptr_t)sge_list[i].addr; + __u32 sge_len = sge_list[i].length; + + while (sge_len) { + bytes_copied = min(sge_len, quanta_bytes_remaining); + memcpy(wqe, cur_sge, bytes_copied); + wqe += bytes_copied; + cur_sge += bytes_copied; + quanta_bytes_remaining -= bytes_copied; + sge_len -= bytes_copied; + + if (!quanta_bytes_remaining) { + quanta_bytes_remaining = 31; + + if (first_quanta) { + first_quanta = false; + wqe += 16; + } else { + *wqe = inline_valid; + wqe++; + } + } + } + } + if (!first_quanta && quanta_bytes_remaining < 31) + *(wqe + quanta_bytes_remaining) = inline_valid; +} + +static __le64 *sxe2_qp_get_next_send_wqe(struct sxe2_qp_common *qp, + __u32 *wqe_idx, __u16 *quanta, + __u32 total_size, u64 wr_id, + bool push_wqe) +{ + __le64 *wqe; + __u32 nop_wqe_idx; + __u16 org_wqe_quanta = *quanta; + __u16 wqe_quanta = *quanta; + bool push_wqe_pad = false; + __u16 avail_quanta; + __u16 i; + __le64 *nop_wqe; + + if (push_wqe && (wqe_quanta & SXE2_WQE_QUANTA_ODD_NUMBER)) { + wqe_quanta = wqe_quanta + 1; + push_wqe_pad = true; + } + + avail_quanta = qp->common_attrs->max_hw_sq_chunk - + (SXE2_RING_CURRENT_HEAD(qp->sq_ring) % + qp->common_attrs->max_hw_sq_chunk); + if (wqe_quanta <= avail_quanta) { + if (wqe_quanta > SXE2_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + } else { + if (wqe_quanta + avail_quanta > + SXE2_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + + nop_wqe_idx = SXE2_RING_CURRENT_HEAD(qp->sq_ring); + for (i = 0; i < avail_quanta; i++) { + (void)sxe2_hw_nop(qp); + SXE2_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); + } + if (qp->push_db && push_wqe) + sxe2_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem, + avail_quanta, nop_wqe_idx); + } + + *wqe_idx = SXE2_RING_CURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + SXE2_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, wqe_quanta); + + wqe = qp->sq_base[*wqe_idx].elem; + + qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; + qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[*wqe_idx].quanta = org_wqe_quanta; + + if (push_wqe_pad) { + nop_wqe_idx = *wqe_idx + org_wqe_quanta; + nop_wqe = qp->sq_base[nop_wqe_idx].elem; + qp->sq_wrtrk_array[nop_wqe_idx].quanta = SXE2_QP_WQE_MIN_QUANTA; + + set_64bit_val(nop_wqe, 0, 0); + set_64bit_val(nop_wqe, 8, 0); + set_64bit_val(nop_wqe, 16, 0); + sxe2_set_nop_hdr(nop_wqe, 24, qp); + } + *quanta = wqe_quanta; + + return wqe; +} + +static __le64 *sxe2_qp_get_next_recv_wqe(struct sxe2_qp_common *qp, + __u32 *wqe_idx) +{ + __le64 *wqe; + int ret_code; + + if (SXE2_RING_FULL_ERR(qp->rq_ring)) + return NULL; + + SXE2_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + if (!*wqe_idx) + qp->rwqe_polarity = !qp->rwqe_polarity; + + wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; + + return wqe; +} + +int sxe2_hw_send(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_post_send *op_info; + __u32 i, wqe_idx, total_size = 0, byte_off; + int ret_code; + __u32 frag_cnt, addl_frag_cnt; + __u16 quanta; + u64 frag_info = 0; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + op_info = &wr_info->op_info.send; + if (qp->max_sq_sge_cnt < op_info->num_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) + total_size += op_info->sg_list[i].length; + + if (wr_info->imm_data_valid) + frag_cnt = op_info->num_sges + 1; + else + frag_cnt = op_info->num_sges; + + ret_code = sxe2_fragcnt_to_quanta_cnt(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; + + if (wr_info->imm_data_valid) { + sxe2_set_immedata(wqe, 0, wr_info->imm_data); + i = 0; + } else { + sxe2_set_sgelist_data(wqe, 0, + frag_cnt ? op_info->sg_list : NULL, + qp->swqe_polarity); + i = 1; + } + + if (total_size == 0) { + get_64bit_val(wqe, 8, &frag_info); + frag_info = frag_info & (~SXE2_WQE_FRAG_VALID); + SXE2_SET_FIELD(frag_info, SXE2_WQE_FRAG_VALID, + !qp->swqe_polarity); + set_64bit_val(wqe, 8, frag_info); + DRV_RDMA_LOG_DEV_DEBUG("wr set frag_info [%#llx] field [%d]\n", + frag_info, !qp->swqe_polarity); + } + + sxe2_set_qkeyqpn(wqe, 16, op_info->qkey, op_info->dest_qp); + + for (byte_off = 32; i < op_info->num_sges;) { + sxe2_set_sgelist_data(wqe, byte_off, &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += 16; + i++; + } + + if (!(frag_cnt & 0x01) && frag_cnt) + sxe2_set_sgelist_data(wqe, byte_off, NULL, qp->swqe_polarity); + + sxe2_set_send_hdr(wqe, addl_frag_cnt, wr_info, qp); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "send"); + + return 0; +} + +int sxe2_hw_inline_send(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_post_send *op_info; + __u32 wqe_idx; + __u32 i, total_size = 0; + __u16 quanta; + + op_info = &wr_info->op_info.send; + + if (unlikely(qp->max_sq_sge_cnt < op_info->num_sges)) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) + total_size += op_info->sg_list[i].length; + + if (unlikely(total_size > qp->max_inline_data)) + return -EINVAL; + + quanta = sxe2_inline_to_quanta_cnt(total_size); + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + if (wr_info->imm_data_valid) + sxe2_set_immedata(wqe, 0, wr_info->imm_data); + + sxe2_set_qkeyqpn(wqe, 16, op_info->qkey, op_info->dest_qp); + + sxe2_set_inline_data_seg_list((__u8 *)wqe, op_info->sg_list, + op_info->num_sges, qp->swqe_polarity); + + sxe2_set_inlinesnd_hdr(wqe, total_size, wr_info, qp); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "inline_send"); + + return 0; +} + +int sxe2_hw_rdma_write(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq) +{ + __le64 *wqe; + struct sxe2_rdma_write *op_info; + __u32 i, wqe_idx; + __u32 total_size = 0, byte_off; + int ret_code; + __u32 frag_cnt, addl_frag_cnt; + __u16 quanta; + u64 frag_info = 0; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + op_info = &wr_info->op_info.rdma_write; + + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].length; + + if (wr_info->imm_data_valid) + frag_cnt = op_info->num_lo_sges + 1; + else + frag_cnt = op_info->num_lo_sges; + + addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0; + ret_code = sxe2_fragcnt_to_quanta_cnt(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + if (wr_info->imm_data_valid) { + sxe2_set_immedata(wqe, 0, wr_info->imm_data); + i = 0; + } else { + sxe2_set_sgelist_data(wqe, 0, op_info->lo_sg_list, + qp->swqe_polarity); + i = 1; + } + + if (total_size == 0) { + get_64bit_val(wqe, 8, &frag_info); + frag_info = frag_info & (~SXE2_WQE_FRAG_VALID); + SXE2_SET_FIELD(frag_info, SXE2_WQE_FRAG_VALID, + !qp->swqe_polarity); + set_64bit_val(wqe, 8, frag_info); + DRV_RDMA_LOG_DEV_DEBUG("wr set frag_info [%#llx] field [%d]\n", + frag_info, !qp->swqe_polarity); + } + + sxe2_set_remote_offset(wqe, 16, op_info->rem_addr.addr); + + for (byte_off = 32; i < op_info->num_lo_sges;) { + sxe2_set_sgelist_data(wqe, byte_off, &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += 16; + i++; + } + + if (!(frag_cnt & 0x01) && frag_cnt) + sxe2_set_sgelist_data(wqe, byte_off, NULL, qp->swqe_polarity); + + sxe2_set_write_hdr(wqe, addl_frag_cnt, wr_info, qp); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "write"); + + return 0; +} + +int sxe2_hw_inline_rdma_write(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq) +{ + __le64 *wqe; + struct sxe2_rdma_write *op_info; + __u32 wqe_idx; + __u32 i, total_size = 0; + __u16 quanta; + + op_info = &wr_info->op_info.rdma_write; + + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].length; + + if (unlikely(total_size > qp->max_inline_data)) + return -EINVAL; + + quanta = sxe2_inline_to_quanta_cnt(total_size); + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + if (wr_info->imm_data_valid) + sxe2_set_immedata(wqe, 0, wr_info->imm_data); + + sxe2_set_remote_offset(wqe, 16, op_info->rem_addr.addr); + + sxe2_set_inline_data_seg_list((__u8 *)wqe, op_info->lo_sg_list, + op_info->num_lo_sges, qp->swqe_polarity); + + sxe2_set_inlinewrite_hdr(wqe, total_size, wr_info, qp); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "inline_write"); + + return 0; +} + +int sxe2_hw_rdma_read(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq) +{ + struct sxe2_rdma_read *op_info; + int ret_code; + __u32 i, byte_off, total_size = 0; + __u32 addl_frag_cnt; + __le64 *wqe; + __u32 wqe_idx; + __u16 quanta; + u64 frag_info = 0; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + bool ord_fence = false; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + + op_info = &wr_info->op_info.rdma_read; + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].length; + + ret_code = sxe2_fragcnt_to_quanta_cnt(op_info->num_lo_sges, &quanta); + if (ret_code) + return ret_code; + + if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) { + ord_fence = true; + qp->ord_cnt = 0; + } + + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + addl_frag_cnt = + op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0; + + sxe2_set_sgelist_data(wqe, 0, op_info->lo_sg_list, qp->swqe_polarity); + + if (total_size == 0) { + get_64bit_val(wqe, 8, &frag_info); + frag_info = frag_info & (~SXE2_WQE_FRAG_VALID); + SXE2_SET_FIELD(frag_info, SXE2_WQE_FRAG_VALID, + !qp->swqe_polarity); + set_64bit_val(wqe, 8, frag_info); + DRV_RDMA_LOG_DEV_DEBUG("wr set frag_info [%#llx] field [%d]\n", + frag_info, !qp->swqe_polarity); + } + + sxe2_set_remote_offset(wqe, 16, op_info->rem_addr.addr); + + for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) { + sxe2_set_sgelist_data(wqe, byte_off, &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += 16; + } + + if (!(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) + sxe2_set_sgelist_data(wqe, byte_off, NULL, qp->swqe_polarity); + + sxe2_set_read_hdr(wqe, addl_frag_cnt, wr_info, qp, ord_fence); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "rdma_read"); + + return 0; +} + +int sxe2_hw_local_invalidate(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq) +{ + __le64 *wqe; + struct sxe2_local_invalidate *op_info; + __u32 wqe_idx; + __u16 quanta = SXE2_QP_WQE_MIN_QUANTA; + struct ib_sge sge = {}; + + op_info = &wr_info->op_info.local_inval; + + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + sge.lkey = op_info->target_stag; + sxe2_set_sgelist_data(wqe, 0, &sge, 0); + + set_64bit_val(wqe, 16, 0); + + sxe2_set_invalidate_hdr(wqe, wr_info, qp); + + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + else if (post_sq) + sxe2_qp_ring_normal_db(qp); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "local_invalidate"); + + return 0; +} + +int sxe2_hw_mr_fast_register(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq) +{ + __le64 *wqe; + u32 wqe_idx; + __u16 quanta = SXE2_QP_WQE_MIN_QUANTA; + struct sxe2_quanta *buf; + + wqe = sxe2_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, + wr_info->wr_id, wr_info->push_wqe); + if (!wqe) + return -ENOMEM; + + wr_info->op_info.fastreg_mr.wqe_valid = qp->swqe_polarity; + + buf = (struct sxe2_quanta *)&wr_info->op_info.fastreg_mr; + + set_64bit_val(wqe, 0, buf->buffer[0]); + set_64bit_val(wqe, 8, buf->buffer[1]); + set_64bit_val(wqe, 16, buf->buffer[2]); + + dma_wmb(); + + set_64bit_val(wqe, 24, buf->buffer[3]); + if (wr_info->push_wqe) + sxe2_qp_push_wqe(qp, wqe, quanta, wqe_idx); + + sxe2_dump_wqe(qp, wqe, quanta, wqe_idx, "fast_regmr"); + + return 0; +} + +static int sxe2_hw_post_receive(struct sxe2_qp_common *qp, + struct sxe2_rq_info *wr_info) +{ + __u32 wqe_idx, i, byte_off; + __u32 addl_frag_cnt; + __le64 *wqe; + struct sxe2_rdma_qp *rdma_qp; + struct sxe2_rdma_device *rdma_dev; + + rdma_qp = container_of(qp, struct sxe2_rdma_qp, qp_ctx.qp_common); + rdma_dev = rdma_qp->dev; + if (!qp->rq_size) + return -EINVAL; + + wqe = sxe2_qp_get_next_recv_wqe(qp, &wqe_idx); + if (!wqe) + return -ENOMEM; + + qp->rq_wrid_array[wqe_idx] = wr_info->wr_id; + addl_frag_cnt = wr_info->num_sges > 1 ? (wr_info->num_sges - 1) : 0; + sxe2_set_sgelist_data(wqe, 0, wr_info->sg_list, qp->rwqe_polarity); + + for (i = 1, byte_off = 32; i < wr_info->num_sges; i++) { + sxe2_set_sgelist_data(wqe, byte_off, &wr_info->sg_list[i], + qp->rwqe_polarity); + byte_off += 16; + } + + if (!(wr_info->num_sges & 0x01) && wr_info->num_sges) + sxe2_set_sgelist_data(wqe, byte_off, NULL, qp->rwqe_polarity); + + set_64bit_val(wqe, 16, 0); + + sxe2_set_rcvq_hdr(wqe, addl_frag_cnt, qp); + + dma_wmb(); + + qp->doorbell_note[SXE2_QP_RQ_PI] = + cpu_to_le32(SXE2_RING_CURRENT_HEAD(qp->rq_ring)); + + DRV_RDMA_LOG_DEV_DEBUG("POST RCV(qpn = %u): wqe_idx %u wr_id %llu\n" + " rq_pi %u ring_size %u\n", + qp->qpn, wqe_idx, qp->rq_wrid_array[wqe_idx], + SXE2_RING_CURRENT_HEAD(qp->rq_ring), + SXE2_RING_SIZE(qp->rq_ring)); + for (i = 0; i < wr_info->num_sges; i++) { + DRV_RDMA_LOG_DEV_DEBUG( + "sgelist[%d] addr 0x%llx len [%u] lkey [%u]\n", i, + wr_info->sg_list[i].addr, wr_info->sg_list[i].length, + wr_info->sg_list[i].lkey); + } + return 0; +} +static void sxe2_wrinfo_init_inv(struct ib_qp *ibqp, + const struct ib_send_wr *ib_wr, + struct sxe2_wr_info *wr_info) +{ + if (ib_wr->opcode == IB_WR_SEND || + ib_wr->opcode == IB_WR_SEND_WITH_IMM) { + wr_info->op_type = SXE2_OP_TYPE_SEND; + if (ib_wr->send_flags & IB_SEND_SOLICITED) + wr_info->op_type = SXE2_OP_TYPE_SEND_SOL; + } else { + wr_info->op_type = SXE2_OP_TYPE_SEND_INV; + if (ib_wr->send_flags & IB_SEND_SOLICITED) + wr_info->op_type = SXE2_OP_TYPE_SEND_SOL_INV; + wr_info->rkey_to_inv = ib_wr->ex.invalidate_rkey; + } + wr_info->op_info.send.num_sges = (u32)ib_wr->num_sge; + wr_info->op_info.send.sg_list = (struct ib_sge *)ib_wr->sg_list; + if ((ibqp->qp_type == IB_QPT_UD) || (ibqp->qp_type == IB_QPT_GSI)) { + struct sxe2_ah *ah = + container_of(ud_wr(ib_wr)->ah, struct sxe2_ah, ibah); + + wr_info->op_info.send.ah_id = ah->ctx_ah.ah_info.field.ah_idx; + if (ibqp->qp_type == IB_QPT_GSI) + wr_info->op_info.send.qkey = IB_QP1_QKEY; + else + wr_info->op_info.send.qkey = ud_wr(ib_wr)->remote_qkey; + wr_info->op_info.send.dest_qp = ud_wr(ib_wr)->remote_qpn; + } + wr_info->funid = SXE2_RDMA_SEND; + if (ib_wr->send_flags & IB_SEND_INLINE) + wr_info->funid = SXE2_RDMA_SEND_INLINE; +} +static void sxe2_wrinfo_init_reg_mr(struct ib_qp *ibqp, + const struct ib_send_wr *ib_wr, + struct sxe2_wr_info *wr_info) +{ + struct sxe2_mr *vendor_mr; + struct sxe2_pbl_pble_alloc_info *palloc; + struct sxe2_rdma_qp *kqp; + + kqp = to_qp(ibqp); + vendor_mr = ibmr_to_vendor_mr(reg_wr(ib_wr)->mr); + palloc = &vendor_mr->pble_alloc; + wr_info->op_info.fastreg_mr.signaled_completion = wr_info->signaled; + wr_info->op_info.fastreg_mr.read_fence = wr_info->read_fence; + wr_info->op_info.fastreg_mr.access_right = + sxe2_get_mr_access(reg_wr(ib_wr)->access); + wr_info->op_info.fastreg_mr.mr_key = reg_wr(ib_wr)->key & 0xff; + wr_info->op_info.fastreg_mr.mr_idx = reg_wr(ib_wr)->key >> 8; + wr_info->op_info.fastreg_mr.log_entity_size = + ilog2(reg_wr(ib_wr)->mr->page_size); + if (reg_wr(ib_wr)->access & IB_ZERO_BASED) { + wr_info->op_info.fastreg_mr.va_based_flag = + SXE2_ADDR_TYPE_ZERO_BASED; + wr_info->op_info.fastreg_mr.va_or_offset = + vendor_mr->ibmr.iova & (vendor_mr->page_size - 1); + } else { + wr_info->op_info.fastreg_mr.va_based_flag = + SXE2_ADDR_TYPE_VA_BASED; + wr_info->op_info.fastreg_mr.va_or_offset = vendor_mr->ibmr.iova; + } + wr_info->op_info.fastreg_mr.len = vendor_mr->ibmr.length; + wr_info->op_info.fastreg_mr.op = SXE2_OP_TYPE_FAST_REG_MR; + wr_info->op_info.fastreg_mr.pbl_mode = palloc->pbl_mode.mode; + wr_info->op_info.fastreg_mr.pbl_index = palloc->pbl_index; + wr_info->op_info.fastreg_mr.push_wqe = + kqp->qp_ctx.qp_common.push_db ? true : false; + wr_info->op_info.fastreg_mr.local_fence = wr_info->read_fence; + wr_info->funid = SXE2_RDMA_FAST_REG_MR; +} +static int sxe2_wrinfo_init(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr, + struct sxe2_wr_info *wr_info) +{ + struct sxe2_common_attrs *uk_attrs; + struct sxe2_rdma_qp *kqp; + struct sxe2_rdma_ctx_dev *ctx_dev; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_qp_common *cqp; + int err = 0; + + kqp = to_qp(ibqp); + ctx_dev = &kqp->dev->rdma_func->ctx_dev; + uk_attrs = &ctx_dev->hw_attrs.uk_attrs; + rdma_dev = kqp->dev; + cqp = &kqp->qp_ctx.qp_common; + + memset(wr_info, 0, sizeof(*wr_info)); + + wr_info->wr_id = (__u64)(ib_wr->wr_id); + wr_info->push_wqe = kqp->qp_ctx.qp_common.push_db ? true : false; + + if ((ib_wr->send_flags & IB_SEND_SIGNALED) || kqp->sig_all) + wr_info->signaled = true; + + if (ib_wr->send_flags & IB_SEND_FENCE) + wr_info->read_fence = true; + + switch (ib_wr->opcode) { + case IB_WR_SEND_WITH_IMM: + if (!(kqp->qp_ctx.qp_common.qp_caps & SXE2_SEND_WITH_IMM)) + return -EINVAL; + wr_info->imm_data_valid = true; + wr_info->imm_data = ntohl(ib_wr->ex.imm_data); + fallthrough; + case IB_WR_SEND: + case IB_WR_SEND_WITH_INV: + sxe2_wrinfo_init_inv(ibqp, ib_wr, wr_info); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + if (!(kqp->qp_ctx.qp_common.qp_caps & SXE2_WRITE_WITH_IMM)) + return -EINVAL; + + wr_info->imm_data_valid = true; + wr_info->imm_data = ntohl(ib_wr->ex.imm_data); + fallthrough; + case IB_WR_RDMA_WRITE: + if (ib_wr->num_sge > (int)cqp->max_sq_sge_cnt) + return -EINVAL; + wr_info->op_type = SXE2_OP_TYPE_RDMA_WRITE; + if (ib_wr->send_flags & IB_SEND_SOLICITED) + wr_info->op_type = SXE2_OP_TYPE_RDMA_WRITE_SOL; + + wr_info->op_info.rdma_write.num_lo_sges = (u32)ib_wr->num_sge; + wr_info->op_info.rdma_write.lo_sg_list = ib_wr->sg_list; + wr_info->op_info.rdma_write.rem_addr.addr = + rdma_wr(ib_wr)->remote_addr; + wr_info->op_info.rdma_write.rem_addr.lkey = + rdma_wr(ib_wr)->rkey; + wr_info->funid = SXE2_RDMA_WRITE; + if (ib_wr->send_flags & IB_SEND_INLINE) + wr_info->funid = SXE2_RDMA_WRITE_INLINE; + break; + case IB_WR_RDMA_READ: + if (ib_wr->num_sge > (int)uk_attrs->max_hw_read_sges) + return -EINVAL; + wr_info->op_type = SXE2_OP_TYPE_RDMA_READ; + wr_info->op_info.rdma_read.rem_addr.addr = + rdma_wr(ib_wr)->remote_addr; + wr_info->op_info.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; + + wr_info->op_info.rdma_read.lo_sg_list = ib_wr->sg_list; + wr_info->op_info.rdma_read.num_lo_sges = (u32)ib_wr->num_sge; + wr_info->funid = SXE2_RDMA_READ; + break; + case IB_WR_LOCAL_INV: + wr_info->op_type = SXE2_OP_TYPE_LOCAL_INV; + wr_info->local_fence = wr_info->read_fence; + wr_info->op_info.local_inval.target_stag = + ib_wr->ex.invalidate_rkey; + wr_info->funid = SXE2_RDMA_LOCAL_INV; + wr_info->post_wqe = true; + break; + case IB_WR_REG_MR: + sxe2_wrinfo_init_reg_mr(ibqp, ib_wr, wr_info); + break; + + default: + err = -EINVAL; + break; + } + + DRV_RDMA_LOG_DEV_DEBUG( + "wr_info qpn [0x%x] opcode [%d]\n" + "wr_id %llu push_mode %d post_wqe %d signald %d\n", + ibqp->qp_num, ib_wr->opcode, wr_info->wr_id, wr_info->push_wqe, + wr_info->post_wqe, wr_info->signaled); + + return err; +} + +int sxe2_kpost_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr, + const struct ib_send_wr **bad_wr) +{ + struct sxe2_rdma_qp *qp; + struct sxe2_qp_common *kqp; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_wr_info wr_info; + int err = 0; + unsigned long flags = 0; + struct sxe2_rdma_device *rdma_dev; + + qp = to_qp(ibqp); + kqp = &qp->qp_ctx.qp_common; + dev = &qp->dev->rdma_func->ctx_dev; + + rdma_dev = qp->dev; + + spin_lock_irqsave(&qp->lock, flags); + while (ib_wr) { + err = sxe2_wrinfo_init(ibqp, ib_wr, &wr_info); + if (err) { + *bad_wr = ib_wr; + break; + } + + err = g_rdma_op[wr_info.funid](kqp, &wr_info, wr_info.post_wqe); + if (err) { + *bad_wr = ib_wr; + break; + } + + ib_wr = ib_wr->next; + } + + if (!qp->flush_issued) { + if (qp->ibqp_state <= IB_QPS_RTS) { + if (!kqp->push_db) + sxe2_qp_ring_normal_db(kqp); + } + spin_unlock_irqrestore(&qp->lock, flags); + } else { + spin_unlock_irqrestore(&qp->lock, flags); + DRV_RDMA_LOG_DEV_DEBUG("io send trigger qp [%u] flush work\n", + qp->ibqp.qp_num); + sxe2_sched_qp_flush_work(qp); + } + + return err; +} + +int sxe2_kpost_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_qp_common *kqp = &qp->qp_ctx.qp_common; + struct sxe2_rq_info rq_info = {}; + struct sxe2_rdma_device *rdma_dev = qp->dev; + unsigned long flags = 0; + int err = 0; + + if (kqp->srq) { + *bad_wr = ib_wr; + return -EINVAL; + } + + spin_lock_irqsave(&qp->lock, flags); + + while (ib_wr) { + if (ib_wr->num_sge > (int)kqp->max_rq_sge_cnt) { + err = -EINVAL; + goto out; + } + rq_info.num_sges = (u32)ib_wr->num_sge; + rq_info.wr_id = ib_wr->wr_id; + rq_info.sg_list = ib_wr->sg_list; + err = sxe2_hw_post_receive(kqp, &rq_info); + if (err) { + *bad_wr = ib_wr; + goto out; + } + + ib_wr = ib_wr->next; + } + +out: + spin_unlock_irqrestore(&qp->lock, flags); + if (qp->flush_issued) { + DRV_RDMA_LOG_DEV_DEBUG("io recv trigger qp [%u] flush work\n", + qp->ibqp.qp_num); + sxe2_sched_qp_flush_work(qp); + } + + return err; +} + +static bool sxe2_cq_empty(struct sxe2_rdma_cq_uk *cq) +{ + __le64 *cqe; + struct sxe2_cqe_info cqe_info; + + cqe = SXE2_GET_CURRENT_CQ_ELEM(cq); + + cqe_info.info.buf[SXE2_CQE_SIZE - 1] = + le64_to_cpu(cqe[SXE2_CQE_SIZE - 1]); + + return cqe_info.info.field.cqe_valid != cq->polarity; +} + +static int sxe2_get_next_cqe(struct sxe2_rdma_cq_uk *cq, + struct sxe2_cqe_info *cqe_info) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + __le64 *cqe; + int i; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + + cqe = SXE2_GET_CURRENT_CQ_ELEM(cq); + cqe_info->info.buf[SXE2_CQE_SIZE - 1] = + le64_to_cpu(cqe[SXE2_CQE_SIZE - 1]); + + if (cqe_info->info.field.cqe_valid != cq->polarity) + return -ENOENT; + + dma_rmb(); + + for (i = 0; i < SXE2_CQE_SIZE - 1; i++) + cqe_info->info.buf[i] = le64_to_cpu(cqe[i]); + + DRV_RDMA_LOG_DEV_DEBUG( + "CQ1 (%d) CI(0x%x) cqeinfo:\n" + "%#llx(%#llx) %#llx(%#llx) %#llx(%#llx) %#llx(%#llx)\n" + "%#llx(%#llx) %#llx(%#llx) %#llx(%#llx) %#llx(%#llx)\n", + cq->cq_id, cq->cq_ring.head, cqe[0], cqe_info->info.buf[0], + cqe[1], cqe_info->info.buf[1], cqe[2], cqe_info->info.buf[2], + cqe[3], cqe_info->info.buf[3], cqe[4], cqe_info->info.buf[4], + cqe[5], cqe_info->info.buf[5], cqe[6], cqe_info->info.buf[6], + cqe[7], cqe_info->info.buf[7]); + + return SXE2_CQ_OK; +} + +static int sxe2_read_cqe(__le64 *cqe, u8 cq_polarity, + struct sxe2_cqe_info *cqe_info) +{ + int i; + + cqe_info->info.buf[SXE2_CQE_SIZE - 1] = + le64_to_cpu(cqe[SXE2_CQE_SIZE - 1]); + + if (cqe_info->info.field.cqe_valid != cq_polarity) + return -ENOENT; + + dma_rmb(); + + for (i = 0; i < SXE2_CQE_SIZE - 1; i++) + cqe_info->info.buf[i] = le64_to_cpu(cqe[i]); + + return SXE2_CQ_OK; +} + +static int sxe2_hw_flush_one_sq_wqe(struct sxe2_rdma_cq_uk *cq, + struct sxe2_qp_common *qp, + struct sxe2_cqe_info *cqe_info) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + __le64 *sw_wqe; + __u64 wqe_hdr; + __u32 tail; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + + if (!SXE2_RING_MORE_WORK(qp->sq_ring) || !SXE2_RING_MORE_WORK_PAD(qp->sq_ring)) + return -ENOENT; + + do { + tail = qp->sq_ring.tail; + sw_wqe = qp->sq_base[tail].elem; + get_64bit_val(sw_wqe, 24, &wqe_hdr); + cqe_info->info.field.op = + (u8)FIELD_GET(SXE2_WQE_OPCODE, wqe_hdr); + SXE2_RING_SET_TAIL(qp->sq_ring, + tail + qp->sq_wrtrk_array[tail].quanta); + if (cqe_info->info.field.op != SXE2_OP_TYPE_NOP) { + cqe_info->wr_id = qp->sq_wrtrk_array[tail].wrid; + cqe_info->bytes = qp->sq_wrtrk_array[tail].wr_len; + DRV_RDMA_LOG_DEV_DEBUG( + "flush qp %u sq_pi %u sq_ci %u wqe_idx %u wr_id %llu.\n", + qp->qpn, SXE2_RING_CURRENT_HEAD(qp->sq_ring), + SXE2_RING_CURRENT_TAIL(qp->sq_ring), tail, + cqe_info->wr_id); +#ifdef SXE2_CFG_DEBUG + qp->statistics.flushed_sq_cnt++; + qp->statistics.last_rcvd_sqwrid = cqe_info->wr_id; +#endif + break; + } + } while (1); + + return SXE2_CQ_OK; +} + +static void sxe2_move_srq_ring_tail(struct sxe2_srq_drv *srq) +{ + struct sxe2_rdma_srq_ctx *srq_ctx; + __u32 tail; + + srq_ctx = container_of(srq, struct sxe2_rdma_srq_ctx, srq_drv); + + while (SXE2_RING_MORE_WORK(srq->srq_ring)) { + tail = SXE2_RING_CURRENT_TAIL(srq->srq_ring); + if (srq_ctx->ksrq_rsc.srqe_array[tail] == SXE2_SRQE_BUSY) + break; + SXE2_RING_MOVE_TAIL(srq->srq_ring); + } +} +static void sxe2_hw_deal_srq_cqe(struct sxe2_rdma_cq_uk *cq, + struct sxe2_cqe_info *cqe_info, + struct sxe2_qp_common *qp, __u32 qpn) +{ + struct sxe2_srq_drv *srq; + struct sxe2_rdma_srq_ctx *srq_ctx; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + __u32 array_idx; + __u32 wqe_idx; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + wqe_idx = (__u32)cqe_info->info.field.wq_desc_idx; + srq = qp->srq; + srq_ctx = container_of(srq, struct sxe2_rdma_srq_ctx, srq_drv); + array_idx = wqe_idx / srq->wqe_size_multiplier; + if (srq_ctx->ksrq_rsc.srqe_array[array_idx] == SXE2_SRQE_BUSY) + srq_ctx->ksrq_rsc.srqe_array[array_idx] = SXE2_SRQE_FREE; + else + DRV_RDMA_LOG_ERROR_BDF( + "cq %u received invalid cqe\n" + "(qpn %u srqn %u wqe_idx %u mul_size %d flag %#x).\n", + cq->cq_id, qpn, srq->srq_id, wqe_idx, + srq->wqe_size_multiplier, + srq_ctx->ksrq_rsc.srqe_array[array_idx]); + + cqe_info->wr_id = srq_ctx->ksrq_rsc.srq_wrid_array[array_idx]; + sxe2_move_srq_ring_tail(srq); +} +static int sxe2_hw_deal_rq_cqe(struct sxe2_rdma_cq_uk *cq, + struct sxe2_cqe_info *cqe_info, + struct sxe2_qp_common *qp, __u32 qpn) +{ + __u32 array_idx; + __u32 wqe_idx; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + wqe_idx = (__u32)cqe_info->info.field.wq_desc_idx; + array_idx = wqe_idx / qp->rq_wqe_size_multiplier; + if (cqe_info->info.field.major_err == SXE2_WR_FLUSH_ERR) { + if (!SXE2_RING_MORE_WORK(qp->rq_ring)) + return -ENOENT; + DRV_RDMA_LOG_DEV_DEBUG( + "flush qp %u state rq_pi %u rq_ci %u .\n", qpn, + SXE2_RING_CURRENT_HEAD(qp->rq_ring), + SXE2_RING_CURRENT_TAIL(qp->rq_ring)); + array_idx = qp->rq_ring.tail; + } + cqe_info->wr_id = qp->rq_wrid_array[array_idx]; + SXE2_RING_SET_TAIL(qp->rq_ring, array_idx + 1); + DRV_RDMA_LOG_DEV_DEBUG( + "update qp %u rq_ci %u wqeidx %u arridx %u wrid %llu.\n", qpn, + SXE2_RING_CURRENT_TAIL(qp->rq_ring), wqe_idx, array_idx, + cqe_info->wr_id); + return 0; +} +static int sxe2_hw_deal_sq_cqe(struct sxe2_rdma_cq_uk *cq, + struct sxe2_cqe_info *cqe_info, + struct sxe2_qp_common *qp, __u32 qpn) +{ + __u32 wqe_idx; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + wqe_idx = (__u32)cqe_info->info.field.wq_desc_idx; + if (cqe_info->info.field.push_dropped) { + qp->llwqe_mode = false; + qp->push_dropped = true; + } + if (cqe_info->info.field.major_err != SXE2_WR_FLUSH_ERR) { + cqe_info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + cqe_info->bytes = qp->sq_wrtrk_array[wqe_idx].wr_len; + SXE2_RING_SET_TAIL(qp->sq_ring, wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); + DRV_RDMA_LOG_DEV_DEBUG( + "update qp %u sq_ci %u wqe_idx %u wr_id %llu.\n", qpn, + SXE2_RING_CURRENT_TAIL(qp->sq_ring), wqe_idx, + cqe_info->wr_id); + } else { + return sxe2_hw_flush_one_sq_wqe(cq, qp, cqe_info); + } + return 0; +} +static int sxe2_hw_cq_poll(struct sxe2_rdma_cq_uk *cq, + struct sxe2_cqe_info *cqe_info) +{ + struct sxe2_qp_common *qp; + __u32 wqe_idx; + __u32 qpn = 0; + int ret_code = SXE2_CQ_OK; + bool move_cq_head = true; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + struct sxe2_ring *cur_ring = NULL; + __le64 *cqe; + __u64 qword = 0; + + cq_ctx = container_of(cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + + ret_code = sxe2_get_next_cqe(cq, cqe_info); + if (ret_code != SXE2_CQ_OK) + return ret_code; + + qp = (struct sxe2_qp_common *)(unsigned long)cqe_info->info.field.qpc; + if (qp == NULL || qp->destroy_pending) { + ret_code = -EFAULT; + goto exit; + } + + qpn = qp->qpn; + wqe_idx = (__u32)cqe_info->info.field.wq_desc_idx; + cqe_info->bytes = cqe_info->info.field.payload_len; + + DRV_RDMA_LOG_DEV_DEBUG( + "cqe process cqn [%u] ci [%u] qp [%u] wqeidx %u.\n", cq->cq_id, + SXE2_RING_CURRENT_HEAD(cq->cq_ring), qpn, wqe_idx); + + if (cqe_info->info.field.is_srq) { + sxe2_hw_deal_srq_cqe(cq, cqe_info, qp, qpn); + } else if (cqe_info->info.field.qp_type == SXE2_CQE_QTYPE_RQ) { + cur_ring = &qp->rq_ring; + ret_code = sxe2_hw_deal_rq_cqe(cq, cqe_info, qp, qpn); + if (ret_code) + goto exit; + } else { + cur_ring = &qp->sq_ring; + ret_code = sxe2_hw_deal_sq_cqe(cq, cqe_info, qp, qpn); + if (ret_code) + goto exit; + } + +exit: + if (!ret_code && cqe_info->info.field.major_err == SXE2_WR_FLUSH_ERR) { + if (cur_ring && SXE2_RING_MORE_WORK(*cur_ring)) + move_cq_head = false; + } + + if (move_cq_head) { + SXE2_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); + if (!SXE2_RING_CURRENT_HEAD(cq->cq_ring)) + cq->polarity ^= 1; + + SXE2_RING_MOVE_TAIL(cq->cq_ring); + cq->doorbell_note[SXE2_CQ_SET_CI] = + cpu_to_le32(SXE2_RING_CURRENT_HEAD(cq->cq_ring)); + DRV_RDMA_LOG_DEV_DEBUG("update qp %u cq %d ci %u.\n", qpn, + cq->cq_id, + SXE2_RING_CURRENT_HEAD(cq->cq_ring)); + } else { + cqe = SXE2_GET_CURRENT_CQ_ELEM(cq); + get_64bit_val(cqe, 24, &qword); + qword &= ~SXE2_CQE_WQEIDX; + qword |= FIELD_PREP(SXE2_CQE_WQEIDX, cur_ring->tail); + set_64bit_val(cqe, 24, qword); + + DRV_RDMA_LOG_DEV_DEBUG( + "cqn [%u] idx [%u] flush wqeidx %d to %d qpn %u.\n", + cq->cq_id, SXE2_RING_CURRENT_HEAD(cq->cq_ring), wqe_idx, + cur_ring->tail, qpn); + } + + return ret_code; +} + +static enum ib_wc_status +sxe2_flush_err_to_ib_wc_status(enum sxe2_major_opcode opcode) +{ + switch (opcode) { + case SXE2_LOCAL_PROTECTION_ERR: + return IB_WC_LOC_PROT_ERR; + case SXE2_BAD_RESPONSE_ERR: + return IB_WC_BAD_RESP_ERR; + case SXE2_REMOTE_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case SXE2_LOCAL_QP_OP_ERR: + return IB_WC_LOC_QP_OP_ERR; + case SXE2_REMOTE_OPERATION_ERR: + return IB_WC_REM_OP_ERR; + case SXE2_LOCAL_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case SXE2_LOCAL_ACCESS_ERR: + return IB_WC_LOC_ACCESS_ERR; + case SXE2_WR_FLUSH_ERR: + return IB_WC_WR_FLUSH_ERR; + case SXE2_TRANS_RETRY_CNT_EXCEED_ERR: + return IB_WC_RETRY_EXC_ERR; + case SXE2_MW_BIND_ERR: + return IB_WC_MW_BIND_ERR; + case SXE2_REMOTE_INVALID_REQUEST_ERR: + return IB_WC_REM_INV_REQ_ERR; + case SXE2_RNR_RETRY_CNT_EXCEED_ERR: + return IB_WC_RNR_RETRY_EXC_ERR; + default: + return IB_WC_GENERAL_ERR; + } +} + +static inline void sxe2_set_ib_wc_op_sq(struct sxe2_cqe_info *cur_cqe, + struct ib_wc *entry) +{ + switch (cur_cqe->info.field.op) { + case SXE2_OP_TYPE_RDMA_WRITE: + case SXE2_OP_TYPE_RDMA_WRITE_SOL: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case SXE2_OP_TYPE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case SXE2_OP_TYPE_SEND_SOL: + case SXE2_OP_TYPE_SEND_SOL_INV: + case SXE2_OP_TYPE_SEND_INV: + case SXE2_OP_TYPE_SEND: + entry->opcode = IB_WC_SEND; + break; + case SXE2_OP_TYPE_FAST_REG_MR: + entry->opcode = IB_WC_REG_MR; + break; + case SXE2_OP_TYPE_LOCAL_INV: + entry->opcode = IB_WC_LOCAL_INV; + break; + default: + entry->status = IB_WC_GENERAL_ERR; + } +} + +static inline void sxe2_set_ib_wc_op_rq(struct sxe2_cqe_info *cur_cqe, + struct ib_wc *entry, + bool send_imm_support) +{ + if (!send_imm_support) { + entry->opcode = cur_cqe->info.field.imm_data_flag ? + IB_WC_RECV_RDMA_WITH_IMM : + IB_WC_RECV; + return; + } + switch (cur_cqe->info.field.op) { + case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: + case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: + entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; + break; + default: + entry->opcode = IB_WC_RECV; + } +} + +static void sxe2_cq_pollinfo_to_wc(struct ib_wc *entry, + struct sxe2_cqe_info *cur_cqe) +{ + struct sxe2_rdma_ctx_qp *qp; + + entry->wc_flags = 0; + entry->pkey_index = 0; + entry->wr_id = cur_cqe->wr_id; + + qp = (struct sxe2_rdma_ctx_qp *)(unsigned long)cur_cqe->info.field.qpc; + entry->qp = qp->qp_common.back_qp; + + if (cur_cqe->info.field.error) { + entry->status = sxe2_flush_err_to_ib_wc_status( + cur_cqe->info.field.major_err); + entry->vendor_err = (u32)(cur_cqe->info.field.major_err << 16 | + cur_cqe->info.field.minor_err); + } else { + entry->status = IB_WC_SUCCESS; + if (cur_cqe->info.field.imm_data_flag) { + entry->ex.imm_data = + htonl(cur_cqe->info.field.imme_data); + entry->wc_flags |= IB_WC_WITH_IMM; + } + if (cur_cqe->info.field.ud_smac_valid) { + u64 dmac = cur_cqe->info.field.ud_smac; + + ether_addr_copy(entry->smac, (u8 *)&dmac); + entry->wc_flags |= IB_WC_WITH_SMAC; + } + if (cur_cqe->info.field.vlan_tag_flag && + ctxdev_to_rf(qp->dev)->vlan_parse_en) { + u16 vlan = + cur_cqe->info.field.ud_vlan_tag & VLAN_VID_MASK; + + entry->sl = cur_cqe->info.field.ud_vlan_tag >> + VLAN_PRIO_SHIFT; + if (vlan) { + entry->vlan_id = vlan; + entry->wc_flags |= IB_WC_WITH_VLAN; + } + } else { + entry->sl = 0; + } + } + + if (cur_cqe->info.field.qp_type == SXE2_CQE_QTYPE_SQ) { + sxe2_set_ib_wc_op_sq(cur_cqe, entry); + } else { + sxe2_set_ib_wc_op_rq( + cur_cqe, entry, + qp->qp_common.qp_caps & SXE2_SEND_WITH_IMM ? true : + false); + if (qp->qp_common.qp_type != IB_QPT_UD && + cur_cqe->info.field.stag_or_lrkey) { + entry->ex.invalidate_rkey = cur_cqe->info.field.l_r_key; + entry->wc_flags |= IB_WC_WITH_INVALIDATE; + } + } + + if (qp->qp_common.qp_type == IB_QPT_UD) { + entry->src_qp = cur_cqe->info.field.ud_src_qpn; + entry->slid = 0; + entry->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); + entry->network_hdr_type = cur_cqe->info.field.ipv4 ? + RDMA_NETWORK_IPV4 : + RDMA_NETWORK_IPV6; + } else { + entry->src_qp = cur_cqe->info.field.qp_id; + } + entry->byte_len = cur_cqe->bytes; +} + +static int sxe2_poll_one(struct sxe2_rdma_cq_uk *ukcq, + struct sxe2_cqe_info *cur_cqe, struct ib_wc *entry) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + int ret; + + cq_ctx = container_of(ukcq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + + ret = sxe2_hw_cq_poll(ukcq, cur_cqe); + if (ret) { + if (ret == -EFAULT) { + DRV_RDMA_LOG_DEV_WARN( + "CQ(kernel) %d Read CI(0x%x) cqe failed.\n", + ukcq->cq_id, ukcq->cq_ring.head); + } + return ret; + } + + DRV_RDMA_LOG_DEV_DEBUG( + "CQ(kernel) (%d) CI(0x%x) wrid(%llu) cqeinfo:\n" + "payload_len : %ul\n" + "packet_seq : %d\n" + "qpc : %#llx\n" + "l_r_key : %ul\n" + "qp_id : %d\n" + "minor_err : %d\n" + "major_err : %d\n" + "wq_desc_idx : %d\n" + "extended_cqe : %d\n" + "push_dropped : %d\n" + "ipv4 : %d\n" + "stag_or_lrkey : %d\n" + "solicited_evt : %d\n" + "error : %d\n" + "op : %d\n" + "qp_type : %d\n" + "imme_data : %ul\n" + "srqn : %d\n" + "is_srq : %d\n" + "cqe_timestamp : %#llx\n" + "ud_smac : %#llx\n" + "ud_vlan_tag : %d\n" + "ud_src_qpn : %d\n" + "vsi_index : %d\n" + "vlan_tag_flag : %d\n" + "ud_smac_valid : %d\n" + "imm_data_flag : %d\n" + "cqe_valid : %d\n", + ukcq->cq_id, ukcq->cq_ring.head, cur_cqe->wr_id, + cur_cqe->info.field.payload_len, cur_cqe->info.field.packet_seq, + cur_cqe->info.field.qpc, cur_cqe->info.field.l_r_key, + cur_cqe->info.field.qp_id, cur_cqe->info.field.minor_err, + cur_cqe->info.field.major_err, cur_cqe->info.field.wq_desc_idx, + cur_cqe->info.field.extended_cqe, + cur_cqe->info.field.push_dropped, cur_cqe->info.field.ipv4, + cur_cqe->info.field.stag_or_lrkey, + cur_cqe->info.field.solicited_evt, cur_cqe->info.field.error, + cur_cqe->info.field.op, cur_cqe->info.field.qp_type, + cur_cqe->info.field.imme_data, cur_cqe->info.field.srqn, + cur_cqe->info.field.is_srq, cur_cqe->info.field.cqe_timestamp, + (__u64)cur_cqe->info.field.ud_smac, + cur_cqe->info.field.ud_vlan_tag, cur_cqe->info.field.ud_src_qpn, + cur_cqe->info.field.vsi_index, + cur_cqe->info.field.vlan_tag_flag, + cur_cqe->info.field.ud_smac_valid, + cur_cqe->info.field.imm_data_flag, + cur_cqe->info.field.cqe_valid); + + sxe2_cq_pollinfo_to_wc(entry, cur_cqe); + + return 0; +} + +int sxe2_generated_cmpls(struct sxe2_rdma_cq *rdma_cq, + struct sxe2_cqe_info *out_cqeinfo) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_flushed_cqe *flushed_cqe; + struct sxe2_qp_common *qp; + struct sxe2_rdma_pci_f *rdma_func; + + rdma_dev = to_dev(rdma_cq->ibcq.device); + + if (list_empty(&rdma_cq->cmpl_generated)) + return -ENOENT; + + rdma_func = rdma_dev->rdma_func; + + flushed_cqe = list_first_entry_or_null(&rdma_cq->cmpl_generated, + struct sxe2_flushed_cqe, list); + qp = (struct sxe2_qp_common *)flushed_cqe->cqeinfo.info.field.qpc; + list_del(&flushed_cqe->list); + memcpy(out_cqeinfo, &flushed_cqe->cqeinfo, sizeof(*out_cqeinfo)); + kfree(flushed_cqe); + DRV_RDMA_LOG_DEV_DEBUG( + "polled one flushed cqe wr_id = 0x%llx qp_id=%u wqe_idx=%u\n", + out_cqeinfo->wr_id, out_cqeinfo->info.field.qp_id, + out_cqeinfo->info.field.wq_desc_idx); + + return SXE2_CQ_OK; +} + +int sxe2_kpoll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) +{ + struct sxe2_rdma_cq *rdma_cq; + struct sxe2_cqe_info *cur_cqe; + unsigned long flags = 0; + int npolled = 0; + int ret = -EINVAL; + + rdma_cq = ibcq_to_vendor_cq(ibcq); + cur_cqe = &rdma_cq->cur_cqe; + + spin_lock_irqsave(&rdma_cq->lock, flags); + + while (npolled < num_entries) { + ret = sxe2_poll_one(&rdma_cq->cq_ctx.cq_uk, cur_cqe, + entry + npolled); + if (ret == -ENOENT) { + ret = sxe2_generated_cmpls(rdma_cq, cur_cqe); + if (!ret) { + sxe2_cq_pollinfo_to_wc(entry + npolled, + cur_cqe); + } + } + + if (ret == SXE2_CQ_OK) { + ++npolled; + continue; + } + + if (ret == -ENOENT) + break; + } + + spin_unlock_irqrestore(&rdma_cq->lock, flags); + + return npolled; +} + +static void sxe2_cq_arm_notify(struct sxe2_rdma_cq_uk *arm_cq, + enum sxe2_arm_type arm_type) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_cq *cq_ctx; + uint64_t doorbell; + uint32_t sn; + uint32_t ci; + uint32_t cmd; + + cq_ctx = container_of(arm_cq, struct sxe2_rdma_ctx_cq, cq_uk); + rdma_dev = to_rdmadev(cq_ctx->dev); + + arm_cq->arm_sn++; + sn = arm_cq->arm_sn & 3; + ci = SXE2_RING_CURRENT_HEAD(arm_cq->cq_ring); + + if (arm_type == SXE2_CQ_ARM_SOLICITED) + cmd = (uint32_t)SXE2_CQ_DB_REQ_SOLICITED; + else + cmd = SXE2_CQ_DB_REQ_NOSOLICITED; + + doorbell = 0; + doorbell = sn << 29 | cmd | ci; + doorbell <<= 32; + doorbell |= arm_cq->cq_id; + + arm_cq->doorbell_note[SXE2_CQ_ARM_DB] = + cpu_to_le32(sn << 29 | cmd | ci); + + dma_wmb(); + + set_64bit_val(arm_cq->cqe_alloc_db, 0, doorbell); + DRV_RDMA_LOG_DEV_DEBUG("CQ(kernel) (%u -> %p) ARM NOTIFY DBNOTE(0x%x)\n" + " DB(0x%llx) CI(%u) SN(%u) CMD(%u)\n", + arm_cq->cq_id, arm_cq->cqe_alloc_db, + cpu_to_le32(sn << 29 | cmd | ci), + cpu_to_le64(doorbell), ci, sn, cmd); +} + +int sxe2_kreq_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct sxe2_rdma_cq *rdma_cq; + struct sxe2_rdma_cq_uk *arm_cq; + unsigned long flags = 0; + enum sxe2_arm_type arm_type = SXE2_CQ_ARM_NEXT; + bool promo_event = false; + int ret = 0; + + rdma_cq = ibcq_to_vendor_cq(ibcq); + arm_cq = &rdma_cq->cq_ctx.cq_uk; + + spin_lock_irqsave(&rdma_cq->lock, flags); + if (notify_flags == IB_CQ_SOLICITED) { + arm_type = SXE2_CQ_ARM_SOLICITED; + } else { + if (rdma_cq->arm_type == SXE2_CQ_ARM_SOLICITED) + promo_event = true; + } + + if (!atomic_cmpxchg(&rdma_cq->armed, 0, 1) || promo_event) { + rdma_cq->arm_type = arm_type; + sxe2_cq_arm_notify(arm_cq, arm_type); + } + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + (!sxe2_cq_empty(arm_cq) || !list_empty(&rdma_cq->cmpl_generated))) { + ret = 1; + } + + spin_unlock_irqrestore(&rdma_cq->lock, flags); + + return ret; +} + +static bool qp_has_unpolled_cqes(struct sxe2_rdma_qp *rdma_qp, + struct sxe2_rdma_cq *iwcq) +{ + struct sxe2_rdma_cq_uk *cq = &iwcq->cq_ctx.cq_uk; + struct sxe2_qp_common *qp = &rdma_qp->qp_ctx.qp_common; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + u32 cq_head = SXE2_RING_CURRENT_HEAD(cq->cq_ring); + struct sxe2_cqe_info cqe_info; + __le64 *cqe; + u8 cq_polarity; + int ret; + + cq_polarity = cq->polarity; + do { + cqe = ((struct sxe2_cqe *)(cq->cq_base))[cq_head].buf; + ret = sxe2_read_cqe(cqe, cq_polarity, &cqe_info); + if (ret) + break; + + if ((struct sxe2_qp_common *)(unsigned long) + cqe_info.info.field.qpc == qp) { + DRV_RDMA_LOG_DEV_DEBUG( + "qp [%u] has unpolled cqe left, cq id = %u\n", + qp->qpn, cq->cq_id); + return true; + } + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + cq_polarity ^= 1; + } while (true); + + DRV_RDMA_LOG_DEV_DEBUG( + "qp [%u] doesn't have unpolled cqe left, cq id = %u\n", qp->qpn, + cq->cq_id); + + return false; +} + +static void sxe2_set_cpi_common_values(struct sxe2_cqe_info *cqeinfo, + struct sxe2_qp_common *qp, u32 qp_num) +{ + cqeinfo->info.field.error = 1; + cqeinfo->info.field.major_err = SXE2_WR_FLUSH_ERR; + cqeinfo->info.field.qpc = (uintptr_t)qp; + cqeinfo->info.field.qp_id = qp_num; +} + +void sxe2_generate_flush_completions(struct sxe2_rdma_qp *rdma_qp) +{ + struct sxe2_qp_common *qp = &rdma_qp->qp_ctx.qp_common; + struct sxe2_ring *sq_ring = &qp->sq_ring; + struct sxe2_ring *rq_ring = &qp->rq_ring; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + struct sxe2_flushed_cqe *flushed_cqe; + __le64 *sw_wqe; + u64 wqe_hdr; + u32 wqe_idx; + bool compl_generated = false; + unsigned long flags1 = 0; + unsigned long flags2 = 0; + + spin_lock_irqsave(&rdma_qp->send_cq->lock, flags1); + if (!qp_has_unpolled_cqes(rdma_qp, rdma_qp->send_cq)) { + spin_lock_irqsave(&rdma_qp->lock, flags2); + while (SXE2_RING_MORE_WORK(*sq_ring)) { + flushed_cqe = kzalloc(sizeof(*flushed_cqe), GFP_ATOMIC); + if (!flushed_cqe) { + spin_unlock_irqrestore(&rdma_qp->lock, flags2); + spin_unlock_irqrestore(&rdma_qp->send_cq->lock, + flags1); + return; + } + + wqe_idx = sq_ring->tail; + sxe2_set_cpi_common_values(&flushed_cqe->cqeinfo, qp, + qp->qpn); + + flushed_cqe->cqeinfo.wr_id = + qp->sq_wrtrk_array[wqe_idx].wrid; + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_hdr); + flushed_cqe->cqeinfo.info.field.op = + (u8)FIELD_GET(SXE2_WQE_OPCODE, wqe_hdr); + flushed_cqe->cqeinfo.info.field.qp_type = + SXE2_CQE_QTYPE_SQ; + flushed_cqe->cqeinfo.info.field.wq_desc_idx = wqe_idx; + + SXE2_RING_SET_TAIL( + *sq_ring, + sq_ring->tail + + qp->sq_wrtrk_array[sq_ring->tail] + .quanta); + + if (flushed_cqe->cqeinfo.info.field.op == + SXE2_OP_TYPE_NOP) { + kfree(flushed_cqe); + continue; + } + list_add_tail(&flushed_cqe->list, + &rdma_qp->send_cq->cmpl_generated); + DRV_RDMA_LOG_DEV_DEBUG( + "adding wr_id = 0x%llx SQ Completion\n" + "\tto list qp_id=%u, wqe_idx=%u\n", + flushed_cqe->cqeinfo.wr_id, qp->qpn, wqe_idx); + compl_generated = true; + } + spin_unlock_irqrestore(&rdma_qp->lock, flags2); + spin_unlock_irqrestore(&rdma_qp->send_cq->lock, flags1); + if (compl_generated) { + sxe2_rdma_comp_handler(&rdma_qp->send_cq->cq_ctx); + compl_generated = false; + } + } else { + spin_unlock_irqrestore(&rdma_qp->send_cq->lock, flags1); + sxe2_sched_qp_flush_work(rdma_qp); + } + + spin_lock_irqsave(&rdma_qp->recv_cq->lock, flags1); + if (rq_ring->size && !qp_has_unpolled_cqes(rdma_qp, rdma_qp->recv_cq)) { + spin_lock_irqsave(&rdma_qp->lock, flags2); + while (SXE2_RING_MORE_WORK(*rq_ring)) { + flushed_cqe = kzalloc(sizeof(*flushed_cqe), GFP_ATOMIC); + if (!flushed_cqe) { + spin_unlock_irqrestore(&rdma_qp->lock, flags2); + spin_unlock_irqrestore(&rdma_qp->recv_cq->lock, + flags1); + return; + } + + wqe_idx = rq_ring->tail; + sxe2_set_cpi_common_values(&flushed_cqe->cqeinfo, qp, + qp->qpn); + + flushed_cqe->cqeinfo.wr_id = qp->rq_wrid_array[wqe_idx]; + flushed_cqe->cqeinfo.info.field.qp_type = + SXE2_CQE_QTYPE_RQ; + flushed_cqe->cqeinfo.info.field.wq_desc_idx = wqe_idx; + SXE2_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); + + list_add_tail(&flushed_cqe->list, + &rdma_qp->recv_cq->cmpl_generated); + DRV_RDMA_LOG_DEV_DEBUG( + "adding wr_id = 0x%llx RQ Completion to\n" + "\tlist qp_id=%u, wqe_idx=%u\n", + flushed_cqe->cqeinfo.wr_id, qp->qpn, wqe_idx); + + compl_generated = true; + } + spin_unlock_irqrestore(&rdma_qp->lock, flags2); + spin_unlock_irqrestore(&rdma_qp->recv_cq->lock, flags1); + if (compl_generated) + sxe2_rdma_comp_handler(&rdma_qp->recv_cq->cq_ctx); + } else { + spin_unlock_irqrestore(&rdma_qp->recv_cq->lock, flags1); + sxe2_sched_qp_flush_work(rdma_qp); + } +} + +static void sxe2_clean_base_cqe(struct sxe2_qp_common *qp, + struct sxe2_rdma_cq_uk *cq, int cq_type) +{ + __le64 *cqe; + u64 cqe_hdr, qpc; + u32 cq_head; + u8 polarity, cq_polarity; + + cq_head = cq->cq_ring.head; + cq_polarity = cq->polarity; + do { + cqe = ((struct sxe2_cqe *)(cq->cq_base))[cq_head].buf; + get_64bit_val(cqe, 56, &cqe_hdr); + polarity = (u8)FIELD_GET(SXE2_CQE_VALID, cqe_hdr); + + if (polarity != cq_polarity) + break; + + dma_rmb(); + + get_64bit_val(cqe, 8, &qpc); + if ((void *)(unsigned long)qpc == qp) + set_64bit_val(cqe, 8, 0); + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + cq_polarity ^= 1; + } while (true); +} + +void sxe2_clean_cqes(struct sxe2_rdma_qp *rdma_qp, struct sxe2_rdma_cq *rdma_cq, + int cq_type) +{ + struct sxe2_rdma_cq_uk *ukcq = &rdma_cq->cq_ctx.cq_uk; + unsigned long flags = 0; + struct sxe2_flushed_cqe *flushed_cqe; + struct list_head *tmp_node, *list_node; + + spin_lock_irqsave(&rdma_cq->lock, flags); + sxe2_clean_base_cqe(&rdma_qp->qp_ctx.qp_common, ukcq, cq_type); + + list_for_each_safe(list_node, tmp_node, &rdma_cq->cmpl_generated) { + flushed_cqe = + list_entry(list_node, struct sxe2_flushed_cqe, list); + if (flushed_cqe->cqeinfo.info.field.qp_id == + rdma_qp->ibqp.qp_num) { +#ifdef SXE2_CFG_DEBUG + if (flushed_cqe->cqeinfo.info.field.qp_type == + SXE2_CQE_QTYPE_SQ) { + rdma_qp->qp_ctx.qp_common.statistics + .cleaned_flushsq_cnt++; + } else { + rdma_qp->qp_ctx.qp_common.statistics + .cleaned_flushrq_cnt++; + } +#endif + list_del(&flushed_cqe->list); + kfree(flushed_cqe); + } + } + + spin_unlock_irqrestore(&rdma_cq->lock, flags); +} + +static __le64 *sxe2_srq_get_next_recv_wqe(struct sxe2_rdma_srq *ksrq, + __u32 *wqe_idx) +{ + struct sxe2_srq_drv *ksrq_drv; + int ret_code; + __le64 *wqe; + + ksrq_drv = &ksrq->srq_ctx.srq_drv; + if (SXE2_RING_FULL_ERR(ksrq_drv->srq_ring)) + return NULL; + + *wqe_idx = SXE2_RING_CURRENT_HEAD(ksrq_drv->srq_ring); + if (ksrq->srq_ctx.ksrq_rsc.srqe_array[*wqe_idx] == SXE2_SRQE_BUSY) + return NULL; + + SXE2_RING_MOVE_HEAD(ksrq_drv->srq_ring, ret_code); + if (ret_code) + return NULL; + + ksrq->srq_ctx.ksrq_rsc.srqe_array[*wqe_idx] = SXE2_SRQE_BUSY; + + if (!*wqe_idx) + ksrq_drv->srq_polarity = !ksrq_drv->srq_polarity; + + wqe = ksrq_drv->srq_base[*wqe_idx * ksrq_drv->wqe_size_multiplier].elem; + + return wqe; +} + +static int sxe2_hw_srq_post_receive(struct sxe2_rdma_srq *ksrq, + struct sxe2_rq_info *info) +{ + struct sxe2_srq_drv *ksrq_drv; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_kmode_srq *ksrq_rsc; + __u32 wqe_idx, i, byte_off; + __u32 addl_sge_cnt; + __le64 *wqe; + __u64 hdr; + + rdma_dev = to_dev(ksrq->ibsrq.device); + ksrq_rsc = &ksrq->srq_ctx.ksrq_rsc; + ksrq_drv = &ksrq->srq_ctx.srq_drv; + wqe = sxe2_srq_get_next_recv_wqe(ksrq, &wqe_idx); + if (!wqe) + return -ENOMEM; + + ksrq_rsc->srq_wrid_array[wqe_idx] = info->wr_id; + + addl_sge_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0; + sxe2_set_sgelist_data(wqe, 0, info->sg_list, ksrq_drv->srq_polarity); + + for (i = 1, byte_off = 32; i < info->num_sges; i++) { + sxe2_set_sgelist_data(wqe, byte_off, &info->sg_list[i], + ksrq_drv->srq_polarity); + byte_off += 16; + } + + if (!(info->num_sges & 0x01) && info->num_sges) { + sxe2_set_sgelist_data(wqe, byte_off, NULL, + ksrq_drv->srq_polarity); + } + + hdr = FIELD_PREP(SXE2_WQE_ADDSGECNT, addl_sge_cnt) | + FIELD_PREP(SXE2_WQE_VALID, ksrq_drv->srq_polarity); + + dma_wmb(); + + set_64bit_val(wqe, 24, hdr); + + set_64bit_val(ksrq_drv->db_note, 0, + (__u64)SXE2_RING_CURRENT_HEAD(ksrq_drv->srq_ring) * + ksrq_drv->wqe_size_multiplier); + + DRV_RDMA_LOG_DEV_DEBUG("POST SRQ RCV(srqn = %u): wqe_idx %u\n" + "wr_id %llu ring_size %u mul_size %d\n", + ksrq->srq_id, wqe_idx, + ksrq_rsc->srq_wrid_array[wqe_idx], + SXE2_RING_SIZE(ksrq_drv->srq_ring), + ksrq_drv->wqe_size_multiplier); + for (i = 0; i < info->num_sges; i++) { + DRV_RDMA_LOG_DEV_DEBUG( + "sgelist[%d] addr 0x%llx len [%u] lkey [%u]\n", i, + info->sg_list[i].addr, info->sg_list[i].length, + info->sg_list[i].lkey); + } + + return 0; +} + +int sxe2_kpost_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr) +{ + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + struct sxe2_srq_drv *ksrq_drv = &ksrq->srq_ctx.srq_drv; + struct sxe2_rq_info rq_info = {}; + unsigned long flags = 0; + int err = 0; + + spin_lock_irqsave(&ksrq->lock, flags); + while (ib_wr) { + if (ib_wr->num_sge > (int)ksrq_drv->max_srq_frag_cnt) { + err = -EINVAL; + goto out; + } + + rq_info.num_sges = (u32)ib_wr->num_sge; + rq_info.wr_id = ib_wr->wr_id; + rq_info.sg_list = ib_wr->sg_list; + err = sxe2_hw_srq_post_receive(ksrq, &rq_info); + if (err) + goto out; + + ib_wr = ib_wr->next; + } + +out: + spin_unlock_irqrestore(&ksrq->lock, flags); + + if (err) + *bad_wr = ib_wr; + + return err; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.h new file mode 100644 index 0000000000000000000000000000000000000000..7fcca66727029a020ad4597b0a5d622113ac5903 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_io.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_io.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DRV_IO_H__ +#define __SXE2_DRV_IO_H__ + +#define SXE2_CQE_QTYPE_RQ 0 +#define SXE2_CQE_QTYPE_SQ 1 + +#define SXE2_MAX_FRAGCNT 16 +#define SXE2_QP_WQE_MIN_SIZE 32 +#define SXE2_QP_WQE_MAX_SIZE 256 +#define SXE2_QP_WQE_MIN_QUANTA 1 +#define SXE2_QP_MAX_INLINE_PER_QUANTA 31 + +#define SXE2_INLINE_VALID_S 7 + +#define SXE2_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20) +#define SXE2_WQE_OPCODE GENMASK_ULL(37, 32) +#define SXE2_WQE_ADDSGECNT GENMASK_ULL(41, 38) +#define SXE2_WQE_VALID BIT_ULL(63) +#define SXE2_WQE_FRAG_VALID BIT_ULL(63) + +#define SXE2_CQE_VALID BIT_ULL(63) +#define SXE2_CQE_WQEIDX GENMASK_ULL(46, 32) + +#define SXE2_SET_FIELD(origin, mask, val) ((origin) |= FIELD_PREP(mask, val)) + +#define SXE2_SRQE_BUSY 1 +#define SXE2_SRQE_FREE 0 + +#define SXE2_WQE_QUANTA_ODD_NUMBER 0x1 + +enum sxe2_disp_id { + SXE2_RDMA_SEND = 0, + SXE2_RDMA_SEND_INLINE = 1, + SXE2_RDMA_WRITE = 2, + SXE2_RDMA_WRITE_INLINE = 3, + SXE2_RDMA_READ = 4, + SXE2_RDMA_LOCAL_INV = 5, + SXE2_RDMA_FAST_REG_MR = 6, + SXE2_RDMA_MAX_ID +}; + +enum sxe2_cq_err { + SXE2_CQ_OK = 0, + SXE2_CQ_NOENT = -2, +}; + +enum { SXE2_CQ_SET_CI = 0, + SXE2_CQ_ARM_DB = 1, +}; + +enum { SXE2_QP_RQ_PI = 0, + SXE2_QP_SQ_CI = 3, +}; + +enum { SXE2_CQ_DB_REQ_SOLICITED = 1 << 31, + SXE2_CQ_DB_REQ_NOSOLICITED = 0 << 31 }; + +struct sxe2_post_send { + struct ib_sge *sg_list; + u32 num_sges; + u32 qkey; + u32 dest_qp; + u32 ah_id; +}; + +struct sxe2_rdma_write { + struct ib_sge *lo_sg_list; + struct ib_sge rem_addr; + u32 num_lo_sges; +}; + +struct sxe2_rdma_read { + struct ib_sge *lo_sg_list; + struct ib_sge rem_addr; + u32 num_lo_sges; +}; + +struct sxe2_local_invalidate { + u32 target_stag; +}; + +struct sxe2_flush_mem_region { + u64 remote_tagged_offset; + u32 remote_stag; + u32 length; + u8 selectivity; + u8 placement_type; +}; + +struct sxe2_fast_reg_mr { + u64 va_or_offset; + u64 pbl_index; + u64 len : 46; + u64 rsv1 : 18; + u64 mr_key : 8; + u64 mr_idx : 24; + u64 op : 6; + u64 log_entity_size : 5; + u64 rsv2 : 1; + u64 rsv3 : 2; + u64 rsv4 : 2; + u64 access_right : 5; + u64 va_based_flag : 1; + u64 pbl_mode : 2; + u64 push_wqe : 1; + u64 rsv5 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; +}; + +struct sxe2_quanta { + u64 buffer[4]; +}; + +struct sxe2_wr_info { + u64 wr_id; + u8 op_type; + u8 l4len; + bool signaled : 1; + bool read_fence : 1; + bool local_fence : 1; + bool inline_data : 1; + bool imm_data_valid : 1; + bool push_wqe : 1; + bool report_rtt : 1; + bool udp_hdr : 1; + bool defer_flag : 1; + bool read_inv_stag : 1; + bool post_wqe : 1; + u32 imm_data; + u32 rkey_to_inv; + union { + struct sxe2_post_send send; + struct sxe2_rdma_write rdma_write; + struct sxe2_rdma_read rdma_read; + struct sxe2_local_invalidate local_inval; + struct sxe2_fast_reg_mr fastreg_mr; + } op_info; + enum sxe2_disp_id funid; +}; + +struct sxe2_rq_info { + u64 wr_id; + struct ib_sge *sg_list; + u32 num_sges; +}; + +struct sxe2_imme_data { + u64 imme_data; +}; + +struct sxe2_frag_data { + u64 tag_offset; + union { + struct { + u64 stag : 32; + u64 frag_len : 31; + u64 frag_valid : 1; + } field; + u64 val; + } offset8; +}; + +union sxe2_dqpn_data { + struct { + u64 dest_qkey : 32; + u64 dest_qpn : 24; + u64 rsv0 : 8; + } field; + u64 val; +}; + +struct sxe2_bindmw_info { + u64 mw_va_base; + union { + struct { + u64 mw_key : 32; + u64 mr_key : 32; + } field; + u64 val; + } offset8; + union { + struct { + u64 mw_len : 46; + u64 rsv0 : 18; + } field; + u64 val; + } offset16; +}; + +union sxe2_send_hdr { + struct { + u64 remote_inv_rkey : 32; + u64 op : 6; + u64 addfragcnt : 4; + u64 rsvd1 : 4; + u64 report_rtt : 1; + u64 imme_data_flag : 1; + u64 rsvd2 : 8; + u64 push_wqe : 1; + u64 inline_data_flag : 1; + u64 rsvd3 : 1; + u64 rsvd4 : 1; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_send_inline_hdr { + struct { + u64 remote_inv_rkey : 32; + u64 op : 6; + u64 rsvd1 : 8; + u64 report_rtt : 1; + u64 imme_data_flag : 1; + u64 inline_data_len : 8; + u64 push_wqe : 1; + u64 inline_data_flag : 1; + u64 rsvd2 : 2; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_write_hdr { + struct { + u64 remote_key : 32; + u64 op : 6; + u64 addfragcnt : 4; + u64 rsvd1 : 4; + u64 report_rtt : 1; + u64 imme_data_flag : 1; + u64 rsvd2 : 8; + u64 push_wqe : 1; + u64 inline_data_flag : 1; + u64 rsvd3 : 2; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_write_inline_hdr { + struct { + u64 remote_key : 32; + u64 op : 6; + u64 rsvd1 : 8; + u64 report_rtt : 1; + u64 imme_data_flag : 1; + u64 inline_data_len : 8; + u64 push_wqe : 1; + u64 inline_data_flag : 1; + u64 rsvd2 : 2; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_read_hdr { + struct { + u64 remote_key : 32; + u64 op : 6; + u64 addfragcnt : 4; + u64 rsvd1 : 4; + u64 report_rtt : 1; + u64 rsvd2 : 1; + u64 rsvd3 : 8; + u64 push_wqe : 1; + u64 rsvd4 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_bindmw_hdr { + struct { + u64 rsvd0 : 32; + u64 op : 6; + u64 rsvd1 : 10; + u64 access : 5; + u64 va_base_flag : 1; + u64 mw_type : 1; + u64 rsvd2 : 1; + u64 push_wqe : 1; + u64 rsvd3 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_inval_hdr { + struct { + u64 rsvd0 : 32; + u64 op : 6; + u64 rsvd1 : 18; + u64 push_wqe : 1; + u64 rsvd3 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_fregmr_hdr { + struct { + u64 consumer_key : 8; + u64 mr_index : 24; + u64 op : 6; + u64 log_entity_size : 5; + u64 rsvd1 : 5; + u64 access : 5; + u64 va_base_flag : 1; + u64 pbl_mode : 2; + u64 push_wqe : 1; + u64 rsvd3 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_rq_hdr { + struct { + u64 rsv0 : 38; + u64 addfragcnt : 4; + u64 rsv1 : 21; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +union sxe2_nop_hdr { + struct { + u64 rsv0 : 32; + u64 op : 6; + u64 addfragcnt : 4; + u64 rsv1 : 14; + u64 push_wqe : 1; + u64 rsv2 : 3; + u64 read_fence : 1; + u64 local_fence : 1; + u64 signaled_completion : 1; + u64 wqe_valid : 1; + } field; + u64 val; +}; + +enum sxe2_qp_wqe_size { + SXE2_WQE_SIZE_32 = 32, + SXE2_WQE_SIZE_64 = 64, + SXE2_WQE_SIZE_96 = 96, + SXE2_WQE_SIZE_128 = 128, + SXE2_WQE_SIZE_256 = 256, +}; + +typedef int (*rdma_disp_func)(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq); +int sxe2_hw_send(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq); +int sxe2_hw_inline_send(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq); +int sxe2_hw_rdma_write(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq); +int sxe2_hw_inline_rdma_write(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq); +int sxe2_hw_rdma_read(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq); +int sxe2_hw_mw_bind(struct sxe2_qp_common *qp, struct sxe2_wr_info *wr_info, + bool post_sq); +int sxe2_hw_local_invalidate(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq); +int sxe2_hw_mr_fast_register(struct sxe2_qp_common *qp, + struct sxe2_wr_info *wr_info, bool post_sq); +int sxe2_kpoll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); +int sxe2_kpost_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr); +int sxe2_kpost_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr, + const struct ib_send_wr **bad_wr); +int sxe2_kreq_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags); +int sxe2_kpost_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr); +int sxe2_generated_cmpls(struct sxe2_rdma_cq *rdma_cq, + struct sxe2_cqe_info *out_cqeinfo); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.c new file mode 100644 index 0000000000000000000000000000000000000000..7bf8407ecca26a9a56ff8054e5f4f3ccdb41a0af --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.c @@ -0,0 +1,2511 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_main.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sxe2_drv_main.h" +#include "sxe2_drv_common_debugfs.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_version.h" +#include "sxe2_drv_aux.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_stats.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_device_port.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_pd.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_ah.h" +#include "sxe2_drv_io.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_srq.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_qos_debugfs.h" +#include "sxe2_drv_mc.h" +#include "sxe2_drv_rdma_inject.h" +#include "sxe2_drv_rdma_inject_debugfs.h" +#include "sxe2_drv_rdma_inject_reg.h" +#include "sxe2_drv_cc_debugfs.h" +#include "sxe2_drv_rdma_configfs.h" + +#define DRV_VER_MAJOR 1 +#define DRV_VER_MINOR 1 +#define DRV_VER_BUILD 1 +#define DRV_VER \ + __stringify(DRV_VER_MAJOR) "." __stringify( \ + DRV_VER_MINOR) "." __stringify(DRV_VER_BUILD) +#define FUNCTION_ENABLE 1 +#define FUNCTION_DISABLE 0 + +static u8 resource_profile; +module_param(resource_profile, byte, 0444); +MODULE_PARM_DESC( + resource_profile, + "Resource Profile: 0=PF only(default), 1=Weighted VF, 2=Even Distribution"); + +unsigned short max_rdma_vfs = SXE2_MAX_PE_ENA_VF_COUNT; +module_param(max_rdma_vfs, ushort, 0444); +MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count, Range: 0-32, default=32"); + +unsigned int limits_sel = SXE2_LIMITS_SEL_DEFAULT; +module_param(limits_sel, uint, 0444); +MODULE_PARM_DESC(limits_sel, "Resource limits selector, Range: 0-7, default=3"); + +u8 fragment_count_limit = SXE2_FRAGCNT_LIMIT_DEFAULT; +module_param(fragment_count_limit, byte, 0444); +MODULE_PARM_DESC( + fragment_count_limit, + "adjust maximum values for queue depth and inline data size, Range: 2-13, default=6"); + +u8 rcms_mode = SXE2_RCMS_MODE_2M; +module_param(rcms_mode, byte, 0444); +MODULE_PARM_DESC( + rcms_mode, + "rcms init mode 1:2MB page mode, 2: 4KB page mode, Range: 1-2, default=1"); + +static u8 hygon_cpu_en; +module_param(hygon_cpu_en, byte, 0444); +MODULE_PARM_DESC( + hygon_cpu_en, + "hygon_cpu_en 0:auto 1:enable 2:disable, Range: 0-2, default=0"); + +module_param_named(rdma_dmesg_level, g_sxe2_rdma_dmesg_level, uint, 0644); +MODULE_PARM_DESC( + rdma_dmesg_level, + "modify sxe2 rdma dmesg log level,\n" + "\tRange: 0(LEVEL_EMERG)-7(LEVEL_DEBUG), default=4(WARNING)"); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +static unsigned int inject_sleep_time; +module_param(inject_sleep_time, uint, 0444); +MODULE_PARM_DESC(inject_sleep_time, "set inject_sleep_time, default=0"); +#endif + +static bool dcqcn_enable; +module_param(dcqcn_enable, bool, 0444); +MODULE_PARM_DESC( + dcqcn_enable, + "enables DCQCN algorithm for RoCEv2 on all ports, default=false"); + +bool dcqcn_cfg_valid; +module_param(dcqcn_cfg_valid, bool, 0444); +MODULE_PARM_DESC(dcqcn_cfg_valid, + "set DCQCN parameters to be valid, default=false"); + +u8 dcqcn_min_dec_factor = SXE2_CC_DCQCN_MIN_DEC_FACTOR_DEFAULT; +module_param(dcqcn_min_dec_factor, byte, 0444); +MODULE_PARM_DESC( + dcqcn_min_dec_factor, + "set minimum percentage factor by which tx\n" + "\trate can be changed for CNP, Range: 2-100, interval 2, default=2"); +u8 dcqcn_min_rate = SXE2_CC_DCQCN_MIN_RATE_DEFAULT; +module_param(dcqcn_min_rate, byte, 0444); +MODULE_PARM_DESC( + dcqcn_min_rate, + "set minimum rate percentage limit value Range: 2-100, interval 2, default=2"); +u8 dcqcn_F = SXE2_CC_DCQCN_F_DEFAULT; +module_param(dcqcn_F, byte, 0444); +MODULE_PARM_DESC( + dcqcn_F, + "set number of times to stay in each stage of bandwidth recovery, Range 0-15, default=5"); +unsigned short dcqcn_T = SXE2_CC_DCQCN_T_DEFAULT; +module_param(dcqcn_T, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_T, + "set number of usecs that should elapse before\n" + "\tincreasing rate in DCQCN mode, Range: 0x1-0xFFFF, default=120"); +unsigned int dcqcn_B = SXE2_CC_DCQCN_B_DEFAULT; +module_param(dcqcn_B, uint, 0444); +MODULE_PARM_DESC( + dcqcn_B, + "The number of B to transmit before increasing rate in DCQCN mode,\n" + "\tRange: 0x0-0xFFFFFF, default=131072"); +u8 dcqcn_timely_rai_factor = SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT; +module_param(dcqcn_timely_rai_factor, byte, 0444); +MODULE_PARM_DESC( + dcqcn_timely_rai_factor, + "set increasing rate factor percentage in additive increase mode,\n" + "\tRange: 2-100, interval 2, default=18"); + +u8 dcqcn_rhai_factor = SXE2_CC_DCQCN_RHAI_DEFAULT; +module_param(dcqcn_rhai_factor, byte, 0444); +MODULE_PARM_DESC( + dcqcn_rhai_factor, + "set increasing rate factor percentage in hyperactive increase\n" + "\tmode rhai > rai,Range: 2-100,interval 2,default=34"); +unsigned short dcqcn_rreduce_mperiod = + SXE2_CC_DCQCN_RREDUCE_MPERIOD_DEFAULT; +module_param(dcqcn_rreduce_mperiod, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_rreduce_mperiod, + "set minimum time (usecs) between 2 consecutive rate reductions\n" + "\tfor a single flow, Range: 0x1-0xFFFF, default=60"); + +static bool timely_enable; +module_param(timely_enable, bool, 0444); +MODULE_PARM_DESC( + timely_enable, + "enables Timely algorithm for RoCEv2 on all ports, default=false"); + +bool timely_cfg_valid; +module_param(timely_cfg_valid, bool, 0444); +MODULE_PARM_DESC(timely_cfg_valid, + "set Timely parameters to be valid, default=false"); + +unsigned short timely_min_rtt = SXE2_CC_TIMELY_MIN_RTT_DEFAULT; +module_param(timely_min_rtt, ushort, 0444); +MODULE_PARM_DESC(timely_min_rtt, + "set min rtt in Timely mode, Range: 0x1-0xFFFF, default=500"); + +unsigned short timely_tlow = SXE2_CC_TIMELY_TLOW_DEFAULT; +module_param(timely_tlow, ushort, 0444); +MODULE_PARM_DESC( + timely_tlow, + "set rtt low threshold in Timely mode, Range: 0x1-0xFFFF, default=300"); + +unsigned short timely_thigh = SXE2_CC_TIMELY_THIGH_DEFAULT; +module_param(timely_thigh, ushort, 0444); +MODULE_PARM_DESC( + timely_thigh, + "set rtt high threshold in Timely mode, Range: 0x1-0xFFFF, default=1000"); + +LIST_HEAD(sxe2_handlers); +DEFINE_SPINLOCK(sxe2_handler_lock); + +struct sxe2_reset_debug g_reset_debug; + +#ifndef SXE2_NATIVE_CPUID_NOT_SUPPORT +bool sxe2_rdma_get_cpu_vendor(struct sxe2_rdma_device *rdma_dev) +{ + u32 eax = 0, ebx = 0, ecx = 0, edx = 0; + s8 cpu_vendor_id[SXE2_VENDOR_ID_SIZE] = "Unknown"; + bool ret = false; + + eax = SXE2_CPU_ID_GET_VENDOR_ID; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + memcpy(cpu_vendor_id, &ebx, 4); + memcpy(cpu_vendor_id + 4, &edx, 4); + memcpy(cpu_vendor_id + 8, &ecx, 4); + cpu_vendor_id[12] = '\0'; + DRV_RDMA_LOG_DEV_DEBUG("cpu:get cpu vendor=%s\n", cpu_vendor_id); + + if (strcmp(cpu_vendor_id, "HygonGenuine") == 0) { + DRV_RDMA_LOG_DEV_DEBUG("cpu:Hygon cpu config en\n"); + ret = true; + } else { + DRV_RDMA_LOG_DEV_DEBUG("cpu:normal cpu config en\n"); + } + + return ret; +} +#endif + +void sxe2_rdma_free_one_vf(struct sxe2_rdma_vchnl_dev *vc_dev) +{ + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + struct sxe2_rdma_ctx_vsi *vf_vsi = vc_dev->vf_vsi; + int i; + + vc_dev->reset_en = true; + for (i = 0; i < SXE2_MAX_UESER_PRIORITY; i++) + vf_vsi->unregister_qsets(vf_vsi, &vf_vsi->qos[i].qset[0], NULL); + + sxe2_vchnl_pf_put_vf_rcms_fcn(dev, &vc_dev); + sxe2_vchnl_put_vf_dev(&vc_dev); +} + +static void sxe2_rdma_free_all_vf_rsrc(struct sxe2_rdma_ctx_dev *dev) +{ + u16 vf_idx; + + for (vf_idx = 0; vf_idx < dev->num_vfs; vf_idx++) { + if (dev->vc_dev[vf_idx]) + sxe2_rdma_free_one_vf(dev->vc_dev[vf_idx]); + } +} + +#ifdef SXE2_CFG_DEBUG +static inline void sxe2_rdma_debug_get_time(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + (long)td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_nsec * 1000); +} +#endif + +static void sxe2_rdma_fill_qos_info(struct sxe2_rdma_l2params *l2params, + struct aux_qos_params *qos_info) +{ + int i; + int index; + + for (index = 0; index < QOS_MAX_QSET_NUM_PER_USER_PRI; index++) { + l2params[index].num_tc = qos_info[index].num_tc; + l2params[index].vsi_prio_type = qos_info[index].vport_priority_type; + l2params[index].vsi_rel_bw = qos_info[index].vport_relative_bw; + DRV_RDMA_LOG_DEBUG( + "%s: num_tc:%u, vport_priority_type:%u, vport_relative_bw:%u\n", + __func__, qos_info[index].num_tc, qos_info[index].vport_priority_type, + qos_info[index].vport_relative_bw); + for (i = 0; i < l2params[index].num_tc; i++) { + l2params[index].tc_info[i].egress_virt_up = + qos_info[index].tc_info[i].egress_virt_up; + l2params[index].tc_info[i].ingress_virt_up = + qos_info[index].tc_info[i].ingress_virt_up; + l2params[index].tc_info[i].prio_type = qos_info[index].tc_info[i].prio_type; + l2params[index].tc_info[i].rel_bw = qos_info[index].tc_info[i].rel_bw; + l2params[index].tc_info[i].tc_ctx = qos_info[index].tc_info[i].tc_ctx; + DRV_RDMA_LOG_DEBUG( + "%s: index:%u, egress_virt_up:%u, ingress_virt_up:%u,\n" + "\tprio_type:%u, rel_bw:%u, tc_ctx:%u\n", + __func__, i, qos_info[index].tc_info[i].egress_virt_up, + qos_info[index].tc_info[i].ingress_virt_up, + qos_info[index].tc_info[i].prio_type, + qos_info[index].tc_info[i].rel_bw, + qos_info[index].tc_info[i].tc_ctx); + } + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + DRV_RDMA_LOG_DEBUG("%s: index:%u up2tc:%u\n", __func__, i, + qos_info[index].up2tc[i]); + l2params[index].up2tc[i] = qos_info[index].up2tc[i]; + } + if (qos_info[index].pfc_mode == SXE2_AUX_DSCP_PFC_MODE) { + l2params[index].dscp_mode = true; + memcpy(l2params[index].dscp_map, qos_info[index].dscp_map, + sizeof(l2params[index].dscp_map)); + } + } + +} + +static void sxe2_aux_event_handler(struct aux_core_dev_info *cdev_info, + struct sxe2_rdma_event_info *event) +{ + struct sxe2_rdma_device *rdma_dev = + dev_get_drvdata(&cdev_info->adev->dev); + struct sxe2_rdma_vchnl_dev *vc_dev; + struct sxe2_rdma_l2params l2params[QOS_MAX_QSET_NUM_PER_USER_PRI] = {0}; + bool dscp_change_flag; +#ifdef SXE2_CFG_DEBUG + struct sxe2_reset_debug_func_info *reset_func_info; +#endif + + if (!rdma_dev || rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEBUG( + "aux_event_handler:rdma dev is null or reset\n" + "ready rdma_dev ptr=%p\n", + (void *)rdma_dev); + return; + } + + if (*event->type & BIT(SXE2_EVENT_NOTIFY_RESET)) { + DRV_RDMA_LOG_DEV_DEBUG( + "aux_event_handler:get WARN RESET event\n"); + rdma_dev->rdma_func->reset = true; + rdma_dev->rdma_func->ctx_dev.vchnl_up = false; +#ifdef SXE2_CFG_DEBUG + reset_func_info = rdma_dev->reset_func_info; + + mutex_lock(&g_reset_debug.reset_debug_mutex); + if (reset_func_info) { + if (reset_func_info->reset_info_idx < + MAX_RESET_INFO_CNT) { + reset_func_info->reset_cnt++; + reset_func_info + ->reset_info[reset_func_info + ->reset_info_idx] + .reset_type = FUNC_WARNING_RESET; + sxe2_rdma_debug_get_time( + reset_func_info + ->reset_info + [reset_func_info + ->reset_info_idx] + .time, + MAX_TIME_BUF_SIZE); + reset_func_info->reset_info_idx++; + if (reset_func_info->reset_info_idx == + MAX_RESET_INFO_CNT) { + reset_func_info->reset_info_idx = 0; + } + DRV_RDMA_LOG_DEV_DEBUG( + "rdma:reset cnt=%u reset info idx=%u reset info ptr=%p\n", + reset_func_info->reset_cnt, + reset_func_info->reset_info_idx, + reset_func_info); + } + } + mutex_unlock(&g_reset_debug.reset_debug_mutex); +#endif + } else if (*event->type & BIT(SXE2_EVENT_MTU_CHANGED)) { + if (rdma_dev->vsi.mtu != rdma_dev->netdev->mtu) { + rdma_dev->vsi.mtu = (u16)rdma_dev->netdev->mtu; + DRV_RDMA_LOG_DEV_DEBUG( + "aux_event_handler:new net dev mtu=%u\n", + rdma_dev->vsi.mtu); + } + } else if (*event->type & BIT(SXE2_EVENT_VF_RESET)) { + DRV_RDMA_LOG_DEV_DEBUG("aux_event_handler:vf %u reset\n", + event->vf_id); + vc_dev = sxe2_vchnl_find_vc_dev(&rdma_dev->rdma_func->ctx_dev, + event->vf_id); + if (vc_dev) + sxe2_rdma_free_one_vf(vc_dev); + } else if (*event->type & BIT(SXE2_EVENT_AEQ_OVERFLOW)) { + DRV_RDMA_LOG_DEV_DEBUG( + "aux_event_handler:get AEQ OVERFLOW event\n"); + if (!rdma_dev->rdma_func->reset) { + rdma_dev->rdma_func->reset = true; + rdma_dev->rdma_func->gen_ops.request_reset( + rdma_dev->rdma_func); + } + } else if (*event->type & BIT(SXE2_EVENT_FAILOVER)) { + DRV_RDMA_LOG_DEBUG_BDF( + "FAILOVER_FINISH:lag_mode %d, bitmap = 0x%x\n", + rdma_dev->lag_mode, cdev_info->rdma_pf_bitmap); + rdma_dev->vsi.lag_port_bitmap = cdev_info->rdma_pf_bitmap; + sxe2_rdma_qos_failover_complete(rdma_dev); + } else if (*event->type & BIT(SXE2_EVENT_TC_CHANGE)) { + DRV_RDMA_LOG_DEV_DEBUG( + "aux_event_handler:get tc change event,\n" + "\tpfc_mode: %u %u, tc_num:%u %u, app_num:%u %u\n", + cdev_info->qos_info[0].pfc_mode, + cdev_info->qos_info[1].pfc_mode, + cdev_info->qos_info[0].num_tc, + cdev_info->qos_info[1].num_tc, + cdev_info->qos_info[0].num_apps, + cdev_info->qos_info[1].num_apps); + sxe2_rdma_fill_qos_info(l2params, cdev_info->qos_info); + + if (rdma_dev->vsi.lag_aa) + dscp_change_flag = ((rdma_dev->vsi.dscp_mode[0] != l2params[0].dscp_mode) || + (rdma_dev->vsi.dscp_mode[1] + != l2params[1].dscp_mode)); + else + dscp_change_flag = (rdma_dev->vsi.dscp_mode[0] != l2params[0].dscp_mode); + if (dscp_change_flag) + sxe2_rdma_update_qos_info(&rdma_dev->vsi, l2params); + else + sxe2_rdma_qos_move_qset(&rdma_dev->vsi, l2params); + } +} + +static int sxe2_dbg_and_configs_init(void) +{ + int ret = 0; +#ifdef SXE2_SUPPORT_CONFIGFS + ret = sxe2_configfs_init(); + if (ret) { + DRV_RDMA_LOG_ERROR( + "ERR: Failed to register sxe2 to configfs subsystem\n", + ret); + goto out; + } +#endif + memset(&g_reset_debug, 0x0, sizeof(g_reset_debug)); + mutex_init(&g_reset_debug.reset_debug_mutex); + ret = sxe2_rdma_dbg_init(); + if (ret) { + DRV_RDMA_LOG_ERROR("debugfs init failed. ret:%d\n", ret); + goto err_destroy_mutex_configfs; + } + + goto out; +err_destroy_mutex_configfs: +#ifdef SXE2_SUPPORT_CONFIGFS + sxe2_configfs_exit(); +#endif +out: + return ret; +} + +static void sxe2_dbg_and_configs_exit(void) +{ +#ifdef SXE2_SUPPORT_CONFIGFS + sxe2_configfs_exit(); +#endif + sxe2_rdma_dbg_exit(); +} + +static void sxe2_rdma_request_reset(struct sxe2_rdma_pci_f *rdma_func) +{ + struct aux_core_dev_info *cdev_info = rdma_func->cdev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(&rdma_func->ctx_dev); +#ifdef SXE2_CFG_DEBUG + struct sxe2_reset_debug_func_info *reset_func_info; +#endif + DRV_RDMA_LOG_DEV_DEBUG("rdma:request reset\n"); + rdma_func->ctx_dev.vchnl_up = false; +#ifdef SXE2_CFG_DEBUG + reset_func_info = rdma_dev->reset_func_info; + + mutex_lock(&g_reset_debug.reset_debug_mutex); + + if (reset_func_info) { + if (reset_func_info->reset_info_idx < MAX_RESET_INFO_CNT) { + reset_func_info->reset_cnt++; + reset_func_info + ->reset_info[reset_func_info->reset_info_idx] + .reset_type = FUNC_REQUEST_RESET; + sxe2_rdma_debug_get_time( + reset_func_info + ->reset_info[reset_func_info + ->reset_info_idx] + .time, + MAX_TIME_BUF_SIZE); + reset_func_info->reset_info_idx++; + if (reset_func_info->reset_info_idx == + MAX_RESET_INFO_CNT) { + reset_func_info->reset_info_idx = 0; + } + DRV_RDMA_LOG_DEV_DEBUG( + "rdma:reset cnt=%u reset info idx=%u reset info ptr=%p\n", + reset_func_info->reset_cnt, + reset_func_info->reset_info_idx, + reset_func_info); + } + } + + mutex_unlock(&g_reset_debug.reset_debug_mutex); +#endif + cdev_info->ops->request_reset(rdma_func->cdev, AUX_PFR); +} + +void sxe2_rdma_cc_dcqcn_set_params(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_rdma_cc_dcqcn_params *dcqcn_p = + &rdma_func->cc_params.dcqcn_params; + + dcqcn_p->b = dcqcn_B; + if (dcqcn_p->b > SXE2_CC_DCQCN_B_MAX) { + dcqcn_p->b = SXE2_CC_DCQCN_B_MAX; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_b value out of range(%u-%u), setting to %u.\n", + dcqcn_B, SXE2_CC_DCQCN_B_MIN, SXE2_CC_DCQCN_B_MAX, + dcqcn_p->b); + } + + dcqcn_p->t_interval = dcqcn_T; + if (dcqcn_p->t_interval < SXE2_CC_DCQCN_T_MIN) { + dcqcn_p->t_interval = SXE2_CC_DCQCN_T_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_t value out of range(%u-%u), setting to %u.\n", + dcqcn_T, SXE2_CC_DCQCN_T_MIN, SXE2_CC_DCQCN_T_MAX, + dcqcn_p->t_interval); + } + + dcqcn_p->f = dcqcn_F; + if (dcqcn_p->f > SXE2_CC_DCQCN_F_MAX) { + dcqcn_p->f = SXE2_CC_DCQCN_F_MAX; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_f value too high, setting to %u.\n", + dcqcn_F, dcqcn_p->f); + } + + dcqcn_p->rai_factor = dcqcn_timely_rai_factor; + if (dcqcn_p->rai_factor > SXE2_CC_DCQCN_TIMELY_RAI_MAX || + dcqcn_p->rai_factor < SXE2_CC_DCQCN_TIMELY_RAI_MIN) { + dcqcn_p->rai_factor = SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_rai_factor value out of range(%u-%u), setting to %u.\n", + dcqcn_timely_rai_factor, SXE2_CC_DCQCN_TIMELY_RAI_MIN, + SXE2_CC_DCQCN_TIMELY_RAI_MAX, dcqcn_p->rai_factor); + } + + dcqcn_p->rhai_factor = dcqcn_rhai_factor; + if (dcqcn_p->rhai_factor > SXE2_CC_DCQCN_RHAI_MAX || + dcqcn_p->rhai_factor < SXE2_CC_DCQCN_RHAI_MIN) { + dcqcn_p->rhai_factor = SXE2_CC_DCQCN_RHAI_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_rhai value out of range(%u-%u), setting to %u.\n", + dcqcn_rhai_factor, SXE2_CC_DCQCN_RHAI_MIN, + SXE2_CC_DCQCN_RHAI_MAX, dcqcn_p->rhai_factor); + } + + dcqcn_p->min_dec_factor = dcqcn_min_dec_factor; + if (dcqcn_p->min_dec_factor > SXE2_CC_DCQCN_MIN_DEC_FACTOR_MAX || + dcqcn_p->min_dec_factor < SXE2_CC_DCQCN_MIN_DEC_FACTOR_MIN) { + dcqcn_p->min_dec_factor = SXE2_CC_DCQCN_MIN_DEC_FACTOR_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_min_dec_factor out of range (%u-%u), setting to %u.\n", + dcqcn_min_dec_factor, SXE2_CC_DCQCN_MIN_DEC_FACTOR_MIN, + SXE2_CC_DCQCN_MIN_DEC_FACTOR_MAX, + dcqcn_p->min_dec_factor); + } + + dcqcn_p->rreduce_mperiod = dcqcn_rreduce_mperiod; + if (dcqcn_p->rreduce_mperiod > SXE2_CC_DCQCN_RREDUCE_MPERIOD_MAX || + dcqcn_p->rreduce_mperiod < SXE2_CC_DCQCN_RREDUCE_MPERIOD_MIN) { + dcqcn_p->rreduce_mperiod = + SXE2_CC_DCQCN_RREDUCE_MPERIOD_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_rreduce_mperiod out of range (%u-%u), setting to %u.\n", + dcqcn_rreduce_mperiod, + SXE2_CC_DCQCN_RREDUCE_MPERIOD_MIN, + SXE2_CC_DCQCN_RREDUCE_MPERIOD_MAX, + dcqcn_p->rreduce_mperiod); + } + + dcqcn_p->min_rate = dcqcn_min_rate; + if (dcqcn_p->min_rate > SXE2_CC_DCQCN_MIN_RATE_MAX || + dcqcn_p->min_rate < SXE2_CC_DCQCN_MIN_RATE_MIN) { + dcqcn_p->min_rate = SXE2_CC_DCQCN_MIN_RATE_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_min_rate out of range (%u-%u), setting to %u.\n", + dcqcn_min_rate, SXE2_CC_DCQCN_MIN_RATE_MIN, + SXE2_CC_DCQCN_MIN_RATE_MAX, dcqcn_p->min_rate); + } +} + +void sxe2_rdma_cc_timely_set_params(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_rdma_cc_timely_params *timely_p = + &rdma_func->cc_params.timely_params; + + timely_p->rai_factor = dcqcn_timely_rai_factor; + if (timely_p->rai_factor > SXE2_CC_DCQCN_TIMELY_RAI_MAX || + timely_p->rai_factor < SXE2_CC_DCQCN_TIMELY_RAI_MIN) { + timely_p->rai_factor = SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] dcqcn_rai_factor value out of range(%u-%u), setting to %u.\n", + dcqcn_timely_rai_factor, SXE2_CC_DCQCN_TIMELY_RAI_MIN, + SXE2_CC_DCQCN_TIMELY_RAI_MAX, timely_p->rai_factor); + } + + timely_p->min_rtt = timely_min_rtt; + if (timely_p->min_rtt < SXE2_CC_TIMELY_MIN_RTT_MIN) { + timely_p->min_rtt = SXE2_CC_TIMELY_MIN_RTT_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] timely_min_rtt out of range (%u-%u), setting to %u.\n", + timely_min_rtt, SXE2_CC_TIMELY_MIN_RTT_MIN, + SXE2_CC_TIMELY_MIN_RTT_MAX, timely_p->min_rtt); + } + + timely_p->tlow = timely_tlow; + if (timely_p->tlow < SXE2_CC_TIMELY_TLOW_MIN) { + timely_p->tlow = SXE2_CC_TIMELY_TLOW_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] timely_tlow out of range (%u-%u), setting to %u.\n", + timely_tlow, SXE2_CC_TIMELY_TLOW_MIN, + SXE2_CC_TIMELY_TLOW_MAX, timely_p->tlow); + } + + timely_p->thigh = timely_thigh; + if (timely_p->thigh < SXE2_CC_TIMELY_THIGH_MIN) { + timely_p->thigh = SXE2_CC_TIMELY_THIGH_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%u] timely_thigh out of range (%u-%u), setting to %u.\n", + timely_thigh, SXE2_CC_TIMELY_THIGH_MIN, + SXE2_CC_TIMELY_THIGH_MAX, timely_p->thigh); + } +} + +void +sxe2_rdma_set_func_user_cfg_params(struct sxe2_rdma_pci_f *rdma_func) + +{ + if (limits_sel > SXE2_LIMITS_SEL_MAX) + limits_sel = SXE2_LIMITS_SEL_MAX; + + rdma_func->limits_sel = limits_sel; + rdma_func->rsrc_profile = + (resource_profile < SXE2_RCMS_PROFILE_EQUAL) ? + (u8)resource_profile + SXE2_RCMS_PROFILE_DEFAULT : + SXE2_RCMS_PROFILE_DEFAULT; + if (max_rdma_vfs > SXE2_MAX_PE_ENA_VF_COUNT) { + pr_warn_once("sxe2_rdma: Requested VF count [%d] is above max\n" + "\tsupported. Setting to %d.\n", + max_rdma_vfs, SXE2_MAX_PE_ENA_VF_COUNT); + max_rdma_vfs = SXE2_MAX_PE_ENA_VF_COUNT; + } + rdma_func->max_rdma_vfs = + (rdma_func->rsrc_profile != SXE2_RCMS_PROFILE_DEFAULT) ? + max_rdma_vfs : + 0; + rdma_func->fragcnt_limit = fragment_count_limit; + if (rdma_func->fragcnt_limit > SXE2_FRAGCNT_LIMIT_MAX + || rdma_func->fragcnt_limit < SXE2_FRAGCNT_LIMIT_MIN) { + rdma_func->fragcnt_limit = SXE2_FRAGCNT_LIMIT_DEFAULT; + pr_warn_once( + "sxe2_rdma: Requested [%d] fragment count limit out of\n" + "\trange (2-13), setting to default=6.\n", + fragment_count_limit); + } + if (rcms_mode > SXE2_RCMS_MODE_4K || rcms_mode == 0) { + rcms_mode = SXE2_RCMS_MODE_2M; + pr_warn_once( + "sxe2_rdma: Requested [%d] rcms init mode limit out of\n" + "\trange (1-2), setting to default=1.\n", + rcms_mode); + } + + if (rcms_mode == 1) { + rdma_func->rcms_mode.ctx_mode = SXE2_RCMS_FIRST_INIT_MODE; + rdma_func->rcms_mode.pbl_mode = SXE2_PBL_SECOND_INIT_MODE; + } else if (rcms_mode == 2) { + rdma_func->rcms_mode.ctx_mode = SXE2_RCMS_SECOND_INIT_MODE; + rdma_func->rcms_mode.pbl_mode = SXE2_PBL_THIRD_INIT_MODE; + } + + rdma_func->cc_params.dcqcn_enable = dcqcn_enable; + rdma_func->cc_params.dcqcn_cfg_valid = dcqcn_cfg_valid; + rdma_func->cc_params.cnp_ecn = SXE2_QP_CC_CNP_ECN_ENABLE; + rdma_func->cc_params.ecn = SXE2_QP_CC_ECN_ENABLE; + if (dcqcn_cfg_valid) { + sxe2_rdma_cc_dcqcn_set_params(rdma_func); + } else { + rdma_func->cc_params.dcqcn_params.t_interval = + SXE2_CC_DCQCN_T_DEFAULT; + rdma_func->cc_params.dcqcn_params.b = SXE2_CC_DCQCN_B_DEFAULT; + rdma_func->cc_params.dcqcn_params.f = SXE2_CC_DCQCN_F_DEFAULT; + rdma_func->cc_params.dcqcn_params.rai_factor = + SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT; + rdma_func->cc_params.dcqcn_params.rhai_factor = + SXE2_CC_DCQCN_RHAI_DEFAULT; + rdma_func->cc_params.dcqcn_params.rreduce_mperiod = + SXE2_CC_DCQCN_RREDUCE_MPERIOD_DEFAULT; + rdma_func->cc_params.dcqcn_params.min_dec_factor = + SXE2_CC_DCQCN_MIN_DEC_FACTOR_DEFAULT; + rdma_func->cc_params.dcqcn_params.min_rate = + SXE2_CC_DCQCN_MIN_RATE_DEFAULT; + } + + rdma_func->cc_params.timely_enable = timely_enable; + rdma_func->cc_params.timely_cfg_valid = timely_cfg_valid; + if (timely_cfg_valid) { + sxe2_rdma_cc_timely_set_params(rdma_func); + } else { + rdma_func->cc_params.timely_params.rai_factor = + SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT; + rdma_func->cc_params.timely_params.min_rtt = + SXE2_CC_TIMELY_MIN_RTT_DEFAULT; + rdma_func->cc_params.timely_params.tlow = + SXE2_CC_TIMELY_TLOW_DEFAULT; + rdma_func->cc_params.timely_params.thigh = + SXE2_CC_TIMELY_THIGH_DEFAULT; + } +} + +void sxe2_rdma_init_cc_params(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_rdma_cc_dcqcn_params *dcqcn_params = + &rdma_func->cc_params.dcqcn_params; + struct sxe2_rdma_cc_timely_params *timely_params = + &rdma_func->cc_params.timely_params; + dcqcn_params->k = SXE2_CC_DCQCN_K_VAL; + dcqcn_params->bc = SXE2_CC_DCQCN_BC_VAL; + dcqcn_params->tc = SXE2_CC_DCQCN_TC_VAL; + dcqcn_params->g = SXE2_CC_DCQCN_G_VAL; + dcqcn_params->rt = SXE2_CC_DCQCN_RT_VAL; + dcqcn_params->rc = SXE2_CC_DCQCN_RC_VAL; + dcqcn_params->alpha = SXE2_CC_DCQCN_ALPHA_VAL; + dcqcn_params->rreduce_next_node_info = + SXE2_CC_DCQCN_RREDUCE_NEXT_NODE_INFO_VAL; + dcqcn_params->decrease_rate_valid = + SXE2_CC_DCQCN_DECREASE_RATE_VALID_VAL; + dcqcn_params->t_next_node_info = SXE2_CC_DCQCN_T_NEXT_NODE_INFO_VAL; + dcqcn_params->byte_counter = SXE2_CC_DCQCN_BYTE_COUNTER_VAL; + timely_params->pre_rtt = SXE2_CC_TIMELY_PRE_RTT_VAL; + timely_params->beta = SXE2_CC_TIMELY_BETA_VAL; + timely_params->alpha = SXE2_CC_TIMELY_ALPHA_VAL; + timely_params->rtt_diff = SXE2_CC_TIMELY_RTT_DIFF_VAL; +} + +int sxe2_rdma_save_msix_info(struct sxe2_rdma_pci_f *rdma_func) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_rdma_qvlist_info *sxe2_qvlist; + struct sxe2_rdma_qv_info *sxe2_qvinfo; + struct msix_entry *pmsix; + u16 ceq_idx; + u32 i; + u32 size; + + if (!rdma_func->msix_count) { + DRV_RDMA_LOG_DEV_ERR( + "probe:no msi-x vector reserved for rdma\n"); + ret = -EINVAL; + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG("rdma_func->msix_count : %d\n", + rdma_func->msix_count); + size = (u32)(sizeof(struct sxe2_rdma_msix_vector) * + rdma_func->msix_count); + size += sizeof(*sxe2_qvlist); + size += sizeof(*sxe2_qvinfo) * rdma_func->msix_count - 1; + rdma_func->sxe2_msixtbl = kzalloc(size, GFP_KERNEL); + if (!rdma_func->sxe2_msixtbl) { + DRV_RDMA_LOG_DEV_ERR( + "probe:no msi-x vector reserved for rdma\n"); + ret = -ENOMEM; + goto end; + } + rdma_func->sxe2_qvlist = + (struct sxe2_rdma_qvlist_info + *)(&rdma_func->sxe2_msixtbl[rdma_func->msix_count]); + sxe2_qvlist = rdma_func->sxe2_qvlist; + sxe2_qvinfo = sxe2_qvlist->qv_info; + sxe2_qvlist->num_vectors = rdma_func->msix_count; + + if (rdma_func->msix_count <= num_online_cpus()) + rdma_func->msix_shared = true; + else if (rdma_func->msix_count > num_online_cpus() + 1) + rdma_func->msix_count = num_online_cpus() + 1; + + pmsix = rdma_func->msix_entries; + for (i = 0, ceq_idx = 0; i < rdma_func->msix_count; + i++, sxe2_qvinfo++) { + DRV_RDMA_LOG_DEV_DEBUG( + "rdma_func->msix_entries[%d] vector: %d entry:%d\n", i, + pmsix->vector, pmsix->entry); + rdma_func->sxe2_msixtbl[i].idx = pmsix->entry; + rdma_func->sxe2_msixtbl[i].irq = pmsix->vector; + rdma_func->sxe2_msixtbl[i].cpu_affinity = ceq_idx; + if (!i) { + sxe2_qvinfo->aeq_idx = 0; + if (rdma_func->msix_shared) + sxe2_qvinfo->ceq_idx = ceq_idx++; + else + sxe2_qvinfo->ceq_idx = SXE2_Q_INVALID_IDX; + } else { + sxe2_qvinfo->aeq_idx = SXE2_Q_INVALID_IDX; + sxe2_qvinfo->ceq_idx = ceq_idx++; + } + sxe2_qvinfo->itr_idx = SXE2_IDX_NOITR; + sxe2_qvinfo->v_idx = rdma_func->sxe2_msixtbl[i].idx; + pmsix++; + } +end: + return ret; +} + +void sxe2_rdma_disassociate_ucontext(struct ib_ucontext *ibctx) +{ + (void)ibctx; +} + +#ifdef HAVE_NO_IB_DEVICE_OPS +static const struct ib_device +#else +static const struct ib_device_ops +#endif + sxe2_rdma_dev_ops = { + + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_SXE2, + .uverbs_abi_ver = 1, + .alloc_ucontext = sxe2_rdma_kalloc_ucontext, + .dealloc_ucontext = sxe2_rdma_kdealloc_ucontext, + .query_device = sxe2_rdma_kquery_device, + .query_port = sxe2_rdma_kquery_port, + .query_gid = sxe2_rdma_kquery_gid, + .get_link_layer = sxe2_rdma_kget_link_layer, + .query_pkey = sxe2_query_pkey, + .get_dev_fw_str = sxe2_rdma_kget_dev_fw_str, +#ifndef CREATE_AH_NOT_SUPPORT + .create_user_ah = sxe2_kcreate_ah, +#endif + .create_ah = sxe2_kcreate_ah, + .query_ah = sxe2_kquery_ah, + .destroy_ah = sxe2_kdestroy_ah, + .alloc_mr = sxe2_kalloc_mr, + .reg_user_mr = sxe2_kreg_user_mr, + .rereg_user_mr = sxe2_krereg_user_mr, + .get_dma_mr = sxe2_kget_dma_mr, + .dereg_mr = sxe2_kdereg_mr, +#ifndef REG_USER_MR_DMABUF_VER_1 + .reg_user_mr_dmabuf = sxe2_kreg_user_mr_dmabuf, +#endif + .poll_cq = sxe2_kpoll_cq, + .post_recv = sxe2_kpost_recv, + .post_send = sxe2_kpost_send, + .post_srq_recv = sxe2_kpost_srq_recv, + .req_notify_cq = sxe2_kreq_notify_cq, + .map_mr_sg = sxe2_kmap_mr_sg, + .get_port_immutable = sxe2_kget_port_immutable, + .create_qp = sxe2_kcreate_qp, + .destroy_qp = sxe2_kdestroy_qp, + .modify_qp = sxe2_kmodify_qp, + .query_qp = sxe2_kquery_qp, + .create_srq = sxe2_kcreate_srq, + .modify_srq = sxe2_kmodify_srq, + .query_srq = sxe2_kquery_srq, + .destroy_srq = sxe2_kdestroy_srq, +#ifdef ALLOC_HW_STATS_V1 + .alloc_hw_stats = sxe2_kalloc_hw_port_stats, +#else + .alloc_hw_port_stats = sxe2_kalloc_hw_port_stats, +#endif + .get_hw_stats = sxe2_kget_hw_stats, + .mmap = sxe2_kmmap, +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + .mmap_free = sxe2_kmmap_free, +#endif + .alloc_pd = sxe2_kalloc_pd, + .dealloc_pd = sxe2_kdealloc_pd, + .create_cq = sxe2_kcreate_cq, + .modify_cq = sxe2_kmodify_cq, + .destroy_cq = sxe2_kdestroy_cq, + .modify_port = sxe2_rdma_kmodify_port, + .get_netdev = sxe2_rdma_kget_net_dev, + .disassociate_ucontext = sxe2_rdma_disassociate_ucontext, + .attach_mcast = sxe2_kattach_mcast, + .detach_mcast = sxe2_kdetach_mcast, + +#ifndef GLOBAL_WM_MEM_NOT_SUPPORT + INIT_RDMA_OBJ_SIZE(ib_mw, sxe2_mr, ibmw), +#endif +#ifndef GLOBAL_QP_MEM_NOT_SUPPORT + INIT_RDMA_OBJ_SIZE(ib_qp, sxe2_rdma_qp, ibqp), +#endif +#ifndef HAVE_NO_DEFINE_STRUCT + INIT_RDMA_OBJ_SIZE(ib_ah, sxe2_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_cq, sxe2_rdma_cq, ibcq), + INIT_RDMA_OBJ_SIZE(ib_pd, sxe2_rdma_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_srq, sxe2_rdma_srq, ibsrq), + INIT_RDMA_OBJ_SIZE(ib_ucontext, sxe2_rdma_kcontext, ibucontext), +#endif +}; + +static void sxe2_add_handler(struct sxe2_rdma_handler *hdl) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&sxe2_handler_lock, flags); + list_add(&hdl->list, &sxe2_handlers); + spin_unlock_irqrestore(&sxe2_handler_lock, flags); +} + +static void sxe2_del_handler(struct sxe2_rdma_handler *hdl) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&sxe2_handler_lock, flags); + if (!list_empty(&hdl->list)) + list_del(&hdl->list); + spin_unlock_irqrestore(&sxe2_handler_lock, flags); +} + +static unsigned int sxe2_calc_mem_rsrc_size(struct sxe2_rdma_pci_f *rf) +{ + unsigned int rsrc_size; + + rsrc_size = (unsigned int)(sizeof(unsigned long) * + BITS_TO_LONGS(rf->max_qp)); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qsets); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_dbs); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); + + rsrc_size += sizeof(struct sxe2_rdma_qp **) * rf->max_qp; + rsrc_size += sizeof(struct sxe2_rdma_cq **) * rf->max_cq; + rsrc_size += sizeof(struct sxe2_rdma_srq **) * rf->max_srq; + + return rsrc_size; +} + +static void sxe2_rdma_set_hw_rsrc(struct sxe2_rdma_pci_f *rf) +{ + rf->allocated_qps = (void *)rf->mem_rsrc; + rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; + rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; + rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)]; + rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; + rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; + rf->allocated_qset = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; + rf->allocated_dbs = &rf->allocated_qset[BITS_TO_LONGS(rf->max_qsets)]; + rf->qp_table = + (struct sxe2_rdma_qp * + *)(&rf->allocated_dbs[BITS_TO_LONGS(rf->max_dbs)]); + rf->cq_table = (struct sxe2_rdma_cq **)(&rf->qp_table[rf->max_qp]); + rf->srq_table = (struct sxe2_rdma_srq **)(&rf->cq_table[rf->max_cq]); + + spin_lock_init(&rf->rsrc_lock); + spin_lock_init(&rf->qptable_lock); + spin_lock_init(&rf->cqtable_lock); + spin_lock_init(&rf->srqtable_lock); + spin_lock_init(&rf->qh_list_lock); +} + +static int sxe2_init_hw_rsrc(struct sxe2_rdma_device *rdma_dev) +{ + unsigned int rsrc_size; + unsigned int mrdrvbits = 0; + int ret = 0; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + int count_order = 0; + + rf->bar_db_addr = (phys_addr_t)(pci_resource_start(rf->pcidev, 0) + + rf->ctx_dev.rcms_info->db_bar_addr); + + rf->max_cqe = rf->ctx_dev.hw_attrs.uk_attrs.max_hw_cq_size; + rf->max_qp = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].cnt; + rf->max_mr = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_MR].cnt; + rf->max_cq = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].cnt; + rf->max_pd = rf->ctx_dev.hw_attrs.max_hw_pds; + rf->max_ah = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_AH].cnt; + rf->max_dbs = rf->ctx_dev.rcms_info->max_db_page_num; + rf->max_srq = rf->ctx_dev.rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt; + rf->max_cc_qp_cnt = rf->ctx_dev.rcms_info->max_cc_qp_cnt; + if (!rf->ftype) + rf->max_qsets = SXE2_PF_MAX_QSET_CNT; + else + rf->max_qsets = SXE2_VF_MAX_QSET_CNT; + + rsrc_size = sxe2_calc_mem_rsrc_size(rf); + + rf->mem_rsrc = vzalloc(rsrc_size); + if (!rf->mem_rsrc) { + ret = -ENOMEM; + goto end; + } + + sxe2_rdma_set_hw_rsrc(rf); + + set_bit(0, rf->allocated_mrs); + set_bit(0, rf->allocated_qps); + set_bit(0, rf->allocated_cqs); + set_bit(0, rf->allocated_pds); + set_bit(0, rf->allocated_ahs); + set_bit(1, rf->allocated_qps); + + set_bit(1, rf->allocated_dbs); + + count_order = get_count_order(rf->max_mr); + mrdrvbits = 24 - max(count_order, 14); + rf->mr_stagmask = ~(u32)(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); + atomic_set(&rf->cc_refcount.cc_qp_refcount, 0); + mutex_init(&rf->cc_refcount.refcount_lock); +end: + return ret; +} + +static int sxe2_init_rsrc_wq(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + + rdma_dev->cleanup_wq = alloc_workqueue( + "sxe2rdma-cleanup-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + if (!rdma_dev->cleanup_wq) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("cleanup-wq alloc failed. ret:%d\n", ret); + goto end; + } + + rdma_dev->rdma_func->used_pds = + (u32)find_first_zero_bit(rdma_dev->rdma_func->allocated_pds, + rdma_dev->rdma_func->max_pd); + rdma_dev->rdma_func->used_qps = + (u32)find_first_zero_bit(rdma_dev->rdma_func->allocated_qps, + rdma_dev->rdma_func->max_qp); + rdma_dev->rdma_func->used_cqs = + (u32)find_first_zero_bit(rdma_dev->rdma_func->allocated_cqs, + rdma_dev->rdma_func->max_cq); + rdma_dev->rdma_func->used_mrs = + (u32)find_first_zero_bit(rdma_dev->rdma_func->allocated_mrs, + rdma_dev->rdma_func->max_mr); + rdma_dev->rdma_func->used_srqs = + (u32)find_first_zero_bit(rdma_dev->rdma_func->allocated_srqs, + rdma_dev->rdma_func->max_srq); + + init_waitqueue_head(&rdma_dev->suspend_wq); + +end: + return ret; +} + +static int sxe2_rdma_wait_fw_ready(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + return ret; +} + +static struct sxe2_rdma_vchnl_if sxe2_vchnl_if_pf = { + .vchnl_recv = sxe2_vchnl_recv_pf, +}; + +static struct sxe2_rdma_vchnl_if sxe2_vchnl_if_vf = { + .vchnl_recv = sxe2_vchnl_recv_vf, +}; + +static int sxe2_rdma_vchnl_init(struct sxe2_rdma_device *rdma_dev, + struct aux_core_dev_info *cdev_info, + u8 *rdma_ver) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_init_info virt_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + u8 gen = cdev_info->rdma_caps.gen; + + rdma_func->vchnl_wq = + alloc_ordered_workqueue("sxe2rdma-virtchnl-wq", 0); + if (!rdma_func->vchnl_wq) { + DRV_RDMA_LOG_ERROR_BDF("probe:alloc vchnl wq err ret=%d\n", ret); + ret = -ENOMEM; + goto end; + } + + mutex_init(&rdma_func->ctx_dev.vchnl_mutex); + virt_info.hw_rev = !gen ? SXE2_RDMA_GEN_1 : gen; + if (cdev_info->ftype) + virt_info.privileged = false; + else + virt_info.privileged = true; + + virt_info.vchnl_if = + virt_info.privileged ? &sxe2_vchnl_if_pf : &sxe2_vchnl_if_vf; + virt_info.vchnl_wq = rdma_func->vchnl_wq; + ret = sxe2_vchnl_ctx_init(&rdma_func->ctx_dev, &virt_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF("probe:vchnl ctx init err ret=%d\n", ret); + goto vchnl_ctx_init_err; + } + *rdma_ver = rdma_func->ctx_dev.hw_attrs.uk_attrs.hw_rev; + goto end; + +vchnl_ctx_init_err: + destroy_workqueue(rdma_func->vchnl_wq); +end: + return ret; +} + +int sxe2_rdma_init_ctx_dev(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_device_init_info *info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + INIT_LIST_HEAD(&dev->mq_cmd_head); + mutex_init(&dev->lag_mutex); + dev->rcms_fn_id = info->rcms_fn_id; + dev->num_vfs = info->max_vfs; + dev->fpm_query_buf_pa = info->fpm_query_buf_pa; + dev->fpm_query_buf = info->fpm_query_buf; + dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; + dev->fpm_commit_buf = info->fpm_commit_buf; + dev->hw = info->hw; + dev->hw->hw_addr = info->bar0; + dev->hw_attrs.min_hw_qp_id = SXE2_MIN_IW_QP_ID; + dev->hw_attrs.min_hw_srq_id = SXE2_MIN_IW_SRQ_ID; + dev->hw_attrs.min_hw_aeq_size = SXE2_MIN_AEQ_ENTRIES; + dev->hw_attrs.max_hw_aeq_size = SXE2_MAX_AEQ_ENTRIES; + dev->hw_attrs.min_hw_ceq_size = SXE2_MIN_CEQ_ENTRIES; + dev->hw_attrs.max_hw_ceq_size = SXE2_MAX_CEQ_ENTRIES; + dev->hw_attrs.uk_attrs.min_hw_cq_size = SXE2_MIN_CQ_SIZE; + dev->hw_attrs.uk_attrs.max_hw_cq_size = SXE2_MAX_CQ_SIZE; + dev->hw_attrs.max_mr_size = SXE2_MAX_MR_SIZE; + dev->hw_attrs.max_hw_outbound_msg_size = SXE2_MAX_OUTBOUND_MSG_SIZE; + dev->hw_attrs.max_hw_inbound_msg_size = SXE2_MAX_INBOUND_MSG_SIZE; + dev->hw_attrs.uk_attrs.max_hw_inline = SXE2_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.max_hw_wqes = SXE2_MAX_WQ_ENTRIES; + dev->hw_attrs.max_qp_wr = SXE2_MAX_QP_WRS(SXE2_MAX_QUANTA_PER_WR); + dev->hw_attrs.uk_attrs.max_hw_rq_quanta = SXE2_QP_SW_MAX_RQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_wq_quanta = SXE2_QP_SW_MAX_WQ_QUANTA; + dev->hw_attrs.max_hw_pds = SXE2_MAX_PDS; + dev->hw_attrs.max_hw_ena_vf_count = SXE2_MAX_PE_ENA_VF_COUNT; + dev->hw_attrs.max_pe_ready_count = 14; + dev->hw_attrs.max_done_count = SXE2_DONE_COUNT; + dev->hw_attrs.max_sleep_count = SXE2_SLEEP_COUNT; + dev->hw_attrs.max_mq_compl_wait_time_ms = SXE2_MQ_COMPL_WAIT_TIME_MS; + dev->hw_attrs.uk_attrs.max_hw_srq_quanta = SXE2_SRQ_SW_MAX_SRQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_srq_wr = SXE2_MAX_SRQ_WRS; + if (!dev->privileged) { + ret = sxe2_vchnl_req_get_rcms_fcn(dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "probe:vchnl get rcms fcn err ret=%d\n", ret); + goto end; + } + } + spin_lock_init(&dev->vc_dev_lock); + sxe2_rdma_init_hw(dev); + + if (dev->privileged) { + if (sxe2_rdma_wait_fw_ready(dev)) { + DRV_RDMA_LOG_DEV_ERR( + "probe:fw is not read err ret=%d\n", ret); + ret = -ETIMEDOUT; + goto end; + } + } + +end: + return ret; +} + +int sxe2_rdma_initialize_dev(struct sxe2_rdma_pci_f *rdma_func) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_device_init_info info = {}; + struct sxe2_rdma_dma_mem mem; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 size; + + size = sizeof(struct sxe2_pbl_pble_rsrc) + + (sizeof(struct sxe2_rcms_obj_info) * SXE2_RCMS_OBJ_MAX); + rdma_func->rcms_info_mem = kzalloc(size, GFP_KERNEL); + if (!rdma_func->rcms_info_mem) { + DRV_RDMA_LOG_DEV_ERR("probe:alloc rcms pbl mem err\n"); + ret = -ENOMEM; + goto end; + } + + rdma_func->pble_rsrc = + (struct sxe2_pbl_pble_rsrc *)rdma_func->rcms_info_mem; + dev->rcms_info = &rdma_func->hw.rcms; + dev->rcms_info->rcms_obj = + (struct sxe2_rcms_obj_info *)(rdma_func->pble_rsrc + 1); + ret = sxe2_kget_aligned_mem(rdma_func, &mem, SXE2_QUERY_FPM_BUF_SIZE, + SXE2_FPM_QUERY_BUF_ALIGNMENT_M); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "probe:query buffer aligned mem err ret=%d\n", ret); + goto error; + } + info.fpm_query_buf_pa = mem.pa; + info.fpm_query_buf = mem.va; + + ret = sxe2_kget_aligned_mem(rdma_func, &mem, SXE2_COMMIT_FPM_BUF_SIZE, + SXE2_FPM_COMMIT_BUF_ALIGNMENT_M); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "probe:commit buffer aligned mem err ret=%d\n", ret); + goto error; + } + info.fpm_commit_buf_pa = mem.pa; + info.fpm_commit_buf = mem.va; + info.bar0 = rdma_func->hw.hw_addr; + info.rcms_fn_id = rdma_func->pf_id; + info.max_vfs = rdma_func->max_rdma_vfs; + info.hw = &rdma_func->hw; + ret = sxe2_rdma_init_ctx_dev(&rdma_func->ctx_dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("probe:init ctx dev err ret=%d\n", ret); + goto error; + } + if (!rdma_func->ctx_dev.privileged) { + ret = sxe2_vchnl_req_get_vlan_parsing_cfg( + &rdma_func->ctx_dev, &rdma_func->vlan_parse_en); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "probe:vf vchnl vlan parsing err ret=%d\n", + ret); + goto error; + } + } else { + rdma_func->vlan_parse_en = 1; + } + goto end; +error: + kfree(rdma_func->rcms_info_mem); + rdma_func->rcms_info_mem = NULL; +end: + return ret; +} + +static void sxe2_destory_rsrc_wq(struct sxe2_rdma_device *rdma_dev) +{ + if (rdma_dev->cleanup_wq) + destroy_workqueue(rdma_dev->cleanup_wq); +} +static void sxe2_rdma_init_device(struct sxe2_rdma_device *rdma_dev) +{ + rdma_dev->ibdev.node_type = RDMA_NODE_IB_CA; +#ifdef UVERBS_CMD_MASK + kc_set_rdma_uverbs_cmd_mask(&rdma_dev->ibdev); +#endif + addrconf_addr_eui48((u8 *)&rdma_dev->ibdev.node_guid, + rdma_dev->netdev->dev_addr); + rdma_dev->ibdev.phys_port_cnt = 1; + rdma_dev->ibdev.num_comp_vectors = (int)rdma_dev->rdma_func->ceqs_count; + rdma_dev->ibdev.dev.parent = &rdma_dev->rdma_func->pcidev->dev; + +#ifdef HAVE_NO_IB_DEVICE_OPS + sxe2_set_device_ops(&rdma_dev->ibdev); +#else + ib_set_device_ops(&rdma_dev->ibdev, &sxe2_rdma_dev_ops); +#endif +} + +static void sxe2_port_ibevent(struct sxe2_rdma_device *rdma_dev) +{ + struct ib_event event; + + event.device = &rdma_dev->ibdev; + event.element.port_num = 1; + event.event = + rdma_dev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + +static void sxe2_set_ib_devname(struct sxe2_rdma_device *rdma_dev) +{ + const char *name = rdma_dev->lag_mode ? "sxe2rdma_bond%d" : "sxe2rdma%d"; + + strscpy(rdma_dev->ib_devname, name, sizeof(rdma_dev->ib_devname)); +} + +static int sxe2_ib_register_device(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + + sxe2_set_ib_devname(rdma_dev); + + sxe2_rdma_init_device(rdma_dev); +#ifdef NETDEV_SET_NOT_SUPPORT + dev_hold(rdma_dev->netdev); +#else + ret = ib_device_set_netdev(&rdma_dev->ibdev, rdma_dev->netdev, 1); +#endif + if (ret != 0) + goto end; + + strscpy(rdma_dev->ibdev.name, rdma_dev->ib_devname, + sizeof(rdma_dev->ibdev.name)); + + dma_set_max_seg_size(rdma_dev->rdma_func->hw.device, UINT_MAX); +#ifdef REGISTER_DEV_NEED_2_PARAMS + ret = ib_register_device(&rdma_dev->ibdev, rdma_dev->ib_devname); +#elif defined REGISTER_DEV_NEED_CHAR_PARAM + ret = ib_register_device(&rdma_dev->ibdev, NULL); +#else + ret = ib_register_device(&rdma_dev->ibdev, rdma_dev->ib_devname, + rdma_dev->rdma_func->hw.device); +#endif + if (ret != 0) + goto end; + + rdma_dev->iw_status = 1; + sxe2_port_ibevent(rdma_dev); + +end: + return ret; +} + +void sxe2_rdma_set_qos_info(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2p) +{ + u8 i; + u8 index; + + for (index = 0; index < QOS_MAX_QSET_NUM_PER_USER_PRI; index++) { + vsi->qos_rel_bw[index] = l2p[index].vsi_rel_bw; + vsi->qos_prio_type[index] = l2p[index].vsi_prio_type; + vsi->dscp_mode[index] = l2p[index].dscp_mode; + if (l2p[index].dscp_mode) + memcpy(vsi->dscp_map[index], l2p[index].dscp_map, + sizeof(vsi->dscp_map[index])); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + vsi->tc_print_warning[i] = true; + + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + vsi->qos[i].qset[index].traffic_class = l2p[index].up2tc[i]; + vsi->qos[i].rel_bw[index] = + l2p[index].tc_info[vsi->qos[i].qset[index].traffic_class].rel_bw; + vsi->qos[i].prio_type[index] = + l2p[index].tc_info[vsi->qos[i].qset[index].traffic_class].prio_type; + vsi->qos[i].valid = false; + } + } + +} + +static int sxe2_rdma_fill_device_info(struct sxe2_rdma_device *rdma_dev, + struct aux_core_dev_info *cdev_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + rdma_func->ctx_dev.hw = &rdma_func->hw; + rdma_func->rdma_dev = rdma_dev; + rdma_func->cdev = cdev_info; + rdma_func->hw.hw_addr = cdev_info->hw_addr; + rdma_func->pcidev = cdev_info->pdev; + rdma_func->hw.device = &rdma_func->pcidev->dev; + rdma_func->ftype = cdev_info->ftype; + rdma_func->msix_count = cdev_info->msix_count; + rdma_func->msix_entries = cdev_info->msix_entries; + rdma_func->vfid_base = cdev_info->vfid_base; + rdma_func->ack_mode = 0; + rdma_func->scqe_break_moderation_en = 0; + rdma_func->log_ack_req_freq = 8; + rdma_func->aeq_pble_en = false; + rdma_func->hygon_cpu_en = hygon_cpu_en; + rdma_func->app_mod_all_flush = FUNCTION_ENABLE; + rdma_func->en_rem_endpoint_trk = FUNCTION_DISABLE; + rdma_func->oi = FUNCTION_DISABLE; + rdma_func->pf_cnt = cdev_info->pf_cnt; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + rdma_func->inject_qos.apply_qset_err_code = 0; + rdma_func->inject_qos.qp_bind_qset_err_code = 0; + rdma_func->inject_qos.release_qset_err_code = 0; + rdma_func->inject_sleep_time = inject_sleep_time; +#endif + ret = sxe2_rdma_vchnl_init(rdma_dev, cdev_info, &rdma_func->rdma_ver); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("probe:virtual channel init err ret=%d\n", + ret); + goto end; + } + rdma_func->pf_id = cdev_info->pf_id; + if (!cdev_info->ftype) { + rdma_func->gen_ops.register_qsets = sxe2_qos_lan_register_qsets; + rdma_func->gen_ops.unregister_qsets = + sxe2_qos_lan_unregister_qsets; + } + + rdma_func->default_vsi.vsi_idx = cdev_info->vport_id; + rdma_func->protocol_used = SXE2_ROCE_PROTOCOL_ONLY; + rdma_func->rsrc_profile = SXE2_RCMS_PROFILE_DEFAULT; + + rdma_func->gen_ops.request_reset = sxe2_rdma_request_reset; + sxe2_rdma_set_func_user_cfg_params(rdma_func); + sxe2_rdma_init_cc_params(rdma_func); + mutex_init(&rdma_dev->ah_tbl_lock); + + rdma_dev->lag_mode = cdev_info->bond_mode; + DRV_RDMA_LOG_DEV_DEBUG("get lag_mode %d\n", rdma_dev->lag_mode); + if (rdma_dev->lag_mode) { + rcu_read_lock(); + rdma_dev->netdev = + netdev_master_upper_dev_get_rcu(cdev_info->netdev); + rcu_read_unlock(); + } + + if (!rdma_dev->netdev) { + rdma_dev->netdev = cdev_info->netdev; + DRV_RDMA_LOG_DEV_DEBUG( + "get netdev from cdev: rdma_pf_bitmap %#x\n", + cdev_info->rdma_pf_bitmap); + } + rdma_dev->vsi_num = cdev_info->vport_id; + rdma_dev->roce_cwnd = SXE2_ROCE_CWND_DEFAULT; + rdma_dev->roce_ackcreds = SXE2_ROCE_ACKCREDS_DEFAULT; + rdma_dev->rcv_wnd = SXE2_CM_DEFAULT_RCV_WND_SCALED; + rdma_dev->rcv_wscale = SXE2_CM_DEFAULT_RCV_WND_SCALE; + rdma_dev->kernel_llwqe_mode = 0; + rdma_dev->roce_dcqcn_en = rdma_dev->rdma_func->cc_params.dcqcn_enable; + rdma_dev->roce_mode = true; + +end: + return ret; +} + +static void sxe2_init_dflt_pkey(struct sxe2_rdma_ctx_dev *dev, u32 pf_id) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + union sxe2_rdma_cfg_pkey_bar cfg_pkey_bar; + u32 __iomem *addr; + int i; + + cfg_pkey_bar.bar_val = SXE2_PEKY_REG_INVALID_VAL; + cfg_pkey_bar.pkey_bar.pkey = SXE2_DEFAULT_PKEY_VAL; + cfg_pkey_bar.pkey_bar.pkey_vld = true; + for (i = 0; i < SXE2_MAX_PORT_CNT; i++) { + cfg_pkey_bar.pkey_bar.pkey_port = (u32)i; + addr = SXE2_PKEY_TBLE_BAR_ADDR(dev->hw_regs[RDMA_CONFIG_PKEY], + i); + SXE2_BAR_WRITE_32(cfg_pkey_bar.bar_val, addr); + } + + for (i = SXE2_MAX_PORT_CNT; i < SXE2_MAX_PKEY_CNT; i++) { + cfg_pkey_bar.pkey_bar.pkey_port = pf_id; + addr = SXE2_PKEY_TBLE_BAR_ADDR(dev->hw_regs[RDMA_CONFIG_PKEY], + i); + SXE2_BAR_WRITE_32(cfg_pkey_bar.bar_val, addr); + } + DRV_RDMA_LOG_DEV_DEBUG("probe:config pkey port val=%u\n", + cfg_pkey_bar.bar_val); +} + +static void sxe2_invalid_dflt_pkey(struct sxe2_rdma_ctx_dev *dev) +{ + union sxe2_rdma_cfg_pkey_bar cfg_pkey_bar; + u32 __iomem *addr; + int i; + + cfg_pkey_bar.bar_val = SXE2_PEKY_REG_INVALID_VAL; + + for (i = 0; i < SXE2_MAX_PKEY_CNT; i++) { + addr = SXE2_PKEY_TBLE_BAR_ADDR(dev->hw_regs[RDMA_CONFIG_PKEY], + i); + SXE2_BAR_WRITE_32(cfg_pkey_bar.bar_val, addr); + } +} + +bool sxe2_get_hw_rsrc_clean_flag(struct sxe2_rdma_ctx_dev *dev) +{ + union sxe2_rdma_cfg_pkey_bar cfg_pkey_bar; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + bool ret = false; + + cfg_pkey_bar.bar_val = SXE2_BAR_READ_32(dev->hw_regs[RDMA_CONFIG_PKEY]); + if (cfg_pkey_bar.bar_val == SXE2_BAR_REG_AUTO_RESP_VAL) { + DRV_RDMA_LOG_DEV_DEBUG( + "probe:pcie start automated response hw rsrc clean\n"); + ret = true; + } else if (!dev->privileged && + cfg_pkey_bar.bar_val == SXE2_BAR_REG_INVALID_VAL) { + DRV_RDMA_LOG_DEV_DEBUG("probe:hw rsrc clean\n"); + ret = true; + } + if (cfg_pkey_bar.pkey_bar.pkey_vld == false) { + DRV_RDMA_LOG_DEV_DEBUG("probe:hw rsrc clean\n"); + ret = true; + } else { + DRV_RDMA_LOG_DEV_DEBUG("probe:hw rsrc not clean\n"); + } + + if (ret && !rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEV_WARN("func reset status=[%d] and resource dead!\n", + rdma_dev->rdma_func->reset); + rdma_dev->rdma_func->reset = true; + } + + return ret; +} + +int sxe2_rdma_setup_init_state(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + ret = sxe2_rdma_save_msix_info(rdma_func); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("probe:save msix info err ret=%d\n", ret); + goto end; + } + rdma_func->obj_mem.size = SXE2_RDMA_MCQ_HW_PAGE_SIZE; + rdma_func->obj_mem.va = + dma_alloc_coherent(rdma_func->hw.device, + rdma_func->obj_mem.size, + &rdma_func->obj_mem.pa, GFP_KERNEL); + if (!rdma_func->obj_mem.va) { + DRV_RDMA_LOG_DEV_ERR("probe:alloc obj mem err ret=%d\n", ret); + ret = -ENOMEM; + goto clean_msixtbl; + } + + rdma_func->obj_next = rdma_func->obj_mem; + ret = sxe2_rdma_initialize_dev(rdma_func); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("probe:initialize dev err ret=%d\n", ret); + goto clean_obj_mem; + } + sxe2_init_dflt_pkey(&rdma_func->ctx_dev, rdma_func->pf_id); + goto end; + +clean_obj_mem: + dma_free_coherent(rdma_func->hw.device, rdma_func->obj_mem.size, + rdma_func->obj_mem.va, rdma_func->obj_mem.pa); + rdma_func->obj_mem.va = NULL; +clean_msixtbl: + kfree(rdma_func->sxe2_msixtbl); + rdma_func->sxe2_msixtbl = NULL; +end: + return ret; +} + +static int sxe2_rdma_init_vsi_ctx(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct aux_core_dev_info *cdev_info = + (struct aux_core_dev_info *)rdma_func->cdev; + struct sxe2_rdma_l2params l2params[QOS_MAX_QSET_NUM_PER_USER_PRI] = {0}; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_ctx_vsi *vsi = &rdma_dev->vsi; + u8 i; + + l2params[0].mtu = (u16)rdma_dev->netdev->mtu; + sxe2_rdma_fill_qos_info(l2params, cdev_info->qos_info); + vsi->dev = dev; + vsi->back_vsi = rdma_dev; + vsi->register_qsets = rdma_func->gen_ops.register_qsets; + vsi->unregister_qsets = rdma_func->gen_ops.unregister_qsets; + vsi->mtu = l2params[0].mtu; + vsi->exception_lan_q = 2; + vsi->vsi_idx = rdma_dev->vsi_num; + vsi->lag_aa = rdma_dev->lag_mode == SXE2_LAG_ACTIVE_ACTIVE; + vsi->lag_backup = rdma_dev->lag_mode == SXE2_LAG_ACTIVE_PASSIVE; + atomic_set(&vsi->port1_qp_cnt, 0); + atomic_set(&vsi->port2_qp_cnt, 0); + + if (vsi->lag_aa && !rdma_func->ftype) + rdma_func->max_qsets = SXE2_PF_MAX_QSET_CNT; + else + rdma_func->max_qsets = SXE2_PF_MAX_QSET_CNT_NO_LAG_AA; + + vsi->vm_vf_type = rdma_func->ftype ? SXE2_VF_TYPE : SXE2_PF_TYPE; + sxe2_rdma_set_qos_info(vsi, l2params); + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + mutex_init(&vsi->qos[i].qos_mutex); + INIT_LIST_HEAD(&vsi->qos[i].qset[0].qp_list); + INIT_LIST_HEAD(&vsi->qos[i].qset[1].qp_list); + } + + ret = drv_rdma_debug_qos_add(rdma_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding qos to debug file system ret=%d\n", + ret); + } +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + ret = drv_rdma_qos_err_code_inject_add(rdma_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding qos inject errcode to debug file system ret=%d\n", + ret); + } +#endif + + return ret; +} + +static void sxe2_rdma_del_vsi_ctx(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_ctx_vsi *vsi = &rdma_dev->vsi; + u8 i; + + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) + mutex_destroy(&vsi->qos[i].qos_mutex); +} + +static void sxe2_rdma_del_init_mem(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + sxe2_invalid_dflt_pkey(&rdma_func->ctx_dev); + if (!rdma_func->ctx_dev.privileged) + sxe2_vchnl_req_put_rcms_fcn(&rdma_func->ctx_dev); + + if (rdma_func->mem_rsrc != NULL) { + vfree(rdma_func->mem_rsrc); + rdma_func->mem_rsrc = NULL; + } + + if (rdma_func->obj_mem.va != NULL) { + dma_free_coherent(rdma_func->hw.device, rdma_func->obj_mem.size, + rdma_func->obj_mem.va, rdma_func->obj_mem.pa); + rdma_func->obj_mem.va = NULL; + } + if (rdma_func->ceqlist != NULL) { + kfree(rdma_func->ceqlist); + rdma_func->ceqlist = NULL; + } + if (rdma_func->sxe2_msixtbl != NULL) { + kfree(rdma_func->sxe2_msixtbl); + rdma_func->sxe2_msixtbl = NULL; + } + if (rdma_func->rcms_info_mem != NULL) { + kfree(rdma_func->rcms_info_mem); + rdma_func->rcms_info_mem = NULL; + } +} +static void sxe2_ib_unregister_device(struct sxe2_rdma_device *rdma_dev) +{ + rdma_dev->iw_status = 0; + DRV_RDMA_LOG_DEV_DEBUG("remove:start unregister device\n"); + sxe2_port_ibevent(rdma_dev); + + ib_unregister_device(&rdma_dev->ibdev); +#ifdef NETDEV_SET_NOT_SUPPORT + dev_put(rdma_dev->netdev); +#endif + DRV_RDMA_LOG_DEV_DEBUG("remove:unregister device finish\n"); +} + +static void sxe2_gid_change_event(struct ib_device *ibdev) +{ + struct ib_event ib_event; + + ib_event.event = IB_EVENT_GID_CHANGE; + ib_event.device = ibdev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); +} + +static int sxe2_khandle_inetaddr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *real_dev, *netdev = ifa->ifa_dev->dev; + struct sxe2_rdma_device *rdma_dev; + u32 local_ipaddr[4] = {}; + int ret; + + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + rdma_dev = container_of(notifier, struct sxe2_rdma_device, + nb_inetaddr_event); + if (rdma_dev->netdev != real_dev) { + ret = NOTIFY_DONE; + goto end; + } + + local_ipaddr[0] = ntohl(ifa->ifa_address); + DRV_RDMA_LOG_DEV_INFO( + "DEV: netdev %s event %lu local_ip=%pI4 MAC=%pM\n", + netdev_name(netdev), event, &local_ipaddr, netdev->dev_addr); + switch (event) { + case NETDEV_DOWN: + sxe2_gid_change_event(&rdma_dev->ibdev); + break; + case NETDEV_UP: + case NETDEV_CHANGEADDR: + sxe2_gid_change_event(&rdma_dev->ibdev); + break; + default: + break; + } + + ret = NOTIFY_DONE; + +end: + return ret; +} + +static int sxe2_khandle_inet6addr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *real_dev = NULL; + struct net_device *netdev = NULL; + struct sxe2_rdma_device *rdma_dev; + u32 local_ipaddr6[4]; + int ret; + + netdev = ifa->idev->dev; + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + rdma_dev = container_of(notifier, struct sxe2_rdma_device, + nb_inet6addr_event); + if (rdma_dev->netdev != real_dev) { + ret = NOTIFY_DONE; + goto end; + } + + sxe2_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); + DRV_RDMA_LOG_DEV_INFO( + "DEV: netdev %s event %lu local_ip=%pI6 MAC=%pM\n", + netdev_name(netdev), event, local_ipaddr6, netdev->dev_addr); + + switch (event) { + case NETDEV_DOWN: + sxe2_gid_change_event(&rdma_dev->ibdev); + break; + case NETDEV_UP: + case NETDEV_CHANGEADDR: + sxe2_gid_change_event(&rdma_dev->ibdev); + break; + default: + break; + } + + ret = NOTIFY_DONE; + +end: + return ret; +} + +static int sxe2_khandle_net_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct neighbour *neigh = (struct neighbour *)ptr; + struct net_device *real_dev = NULL; + struct net_device *netdev = (struct net_device *)neigh->dev; + struct sxe2_rdma_device *rdma_dev = NULL; + __be32 *p; + u32 local_ipaddr[4] = {}; + int ret; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + rdma_dev = container_of(notifier, struct sxe2_rdma_device, + nb_net_event); + if (rdma_dev->netdev != real_dev) { + ret = NOTIFY_DONE; + goto end; + } + + p = (__be32 *)neigh->primary_key; + if (neigh->tbl->family == AF_INET6) + sxe2_copy_ip_ntohl(local_ipaddr, p); + else + local_ipaddr[0] = ntohl(*p); + + DRV_RDMA_LOG_DEV_INFO( + "DEV: netdev %s state %d local_ip=%pI4 MAC=%pM\n", + netdev_name(rdma_dev->netdev), neigh->nud_state, + local_ipaddr, neigh->ha); + default: + break; + } + + ret = NOTIFY_DONE; + +end: + return ret; +} + +static int sxe2_khandle_netdevice_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct sxe2_rdma_device *rdma_dev = NULL; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + int ret; + + rdma_dev = container_of(notifier, struct sxe2_rdma_device, + nb_netdevice_event); + if (rdma_dev->netdev != netdev) { + ret = NOTIFY_DONE; + goto end; + } + + rdma_dev->iw_status = 1; + switch (event) { + case NETDEV_DOWN: + rdma_dev->iw_status = 0; + DRV_RDMA_LOG_DEV_INFO("DEV: netdev %s event %lu status %u\n", + netdev_name(rdma_dev->netdev), event, + rdma_dev->iw_status); + fallthrough; + case NETDEV_UP: + DRV_RDMA_LOG_DEV_INFO("DEV: netdev %s event %lu status %u\n", + netdev_name(rdma_dev->netdev), event, + rdma_dev->iw_status); + sxe2_port_ibevent(rdma_dev); + break; + default: + break; + } + + ret = NOTIFY_DONE; + +end: + return ret; +} + +void sxe2_kunregister_notifiers(struct sxe2_rdma_device *rdma_dev) +{ + int ret; + + ret = unregister_netdevice_notifier(&rdma_dev->nb_netdevice_event); + if (ret) + DRV_RDMA_LOG_DEV_ERR("unregister_netdevice_notifier failed, ret %d\n", ret); + ret = unregister_netevent_notifier(&rdma_dev->nb_net_event); + if (ret) + DRV_RDMA_LOG_DEV_ERR("unregister_netevent_notifier failed, ret %d\n", ret); + ret = unregister_inet6addr_notifier(&rdma_dev->nb_inet6addr_event); + if (ret) + DRV_RDMA_LOG_DEV_ERR("unregister_inet6addr_notifier failed, ret %d\n", ret); + ret = unregister_inetaddr_notifier(&rdma_dev->nb_inetaddr_event); + if (ret) + DRV_RDMA_LOG_DEV_ERR("unregister_inetaddr_notifier failed, ret %d\n", ret); +} + +static int sxe2_kregister_notifiers(struct sxe2_rdma_device *rdma_dev) +{ + int ret; + int tmp_ret; + + rdma_dev->nb_netdevice_event.notifier_call = + sxe2_khandle_netdevice_event; + ret = register_netdevice_notifier(&rdma_dev->nb_netdevice_event); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "register_netdevice_notifier failed, ret %d\n", ret); + return ret; + } + + rdma_dev->nb_net_event.notifier_call = sxe2_khandle_net_event; + ret = register_netevent_notifier(&rdma_dev->nb_net_event); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "register_netevent_notifier failed, ret %d\n", ret); + goto netevent_error; + } + + rdma_dev->nb_inet6addr_event.notifier_call = + sxe2_khandle_inet6addr_event; + ret = register_inet6addr_notifier(&rdma_dev->nb_inet6addr_event); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "register_inet6addr_notifier failed, ret %d\n", ret); + goto inet6addr_error; + } + + rdma_dev->nb_inetaddr_event.notifier_call = sxe2_khandle_inetaddr_event; + ret = register_inetaddr_notifier(&rdma_dev->nb_inetaddr_event); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "register_inetaddr_notifier failed, ret %d\n", ret); + goto inetaddr_error; + } + + return 0; + +inetaddr_error: + tmp_ret = unregister_inet6addr_notifier(&rdma_dev->nb_inet6addr_event); + if (tmp_ret) + DRV_RDMA_LOG_DEV_ERR("unregister_inet6addr_notifier failed, ret %d\n", tmp_ret); +inet6addr_error: + tmp_ret = unregister_netevent_notifier(&rdma_dev->nb_net_event); + if (tmp_ret) + DRV_RDMA_LOG_DEV_ERR("unregister_netevent_notifier failed, ret %d\n", tmp_ret); +netevent_error: + tmp_ret = unregister_netdevice_notifier(&rdma_dev->nb_netdevice_event); + if (tmp_ret) + DRV_RDMA_LOG_DEV_ERR("unregister_netdevice_notifier failed, ret %d\n", tmp_ret); + return ret; +} + +static int sxe2_pf_func_table_init(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct sxe2_pf_func_table_init_info info; + + info.pf_id = cpu_to_le32(rdma_dev->rdma_func->pf_id); + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_PF_FUNC_TABLE_INIT, + (u8 *)&info, (u16)sizeof(info), + NULL, 0); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "probe:aq send pf func table init err ret=%d\n", ret); + goto end; + } + +end: + return ret; +} + +static const struct sxe2_rdma_profile drv_profile = { + STAGE_CREATE(SXE2_RDMA_STAGE_SETUP_INITINFO, sxe2_rdma_setup_init_state, + sxe2_rdma_del_init_mem), + STAGE_CREATE(SXE2_RDMA_STAGE_DEBUG, sxe2_rdma_dbg_pf_init, + sxe2_rdma_dgb_pf_exit), + STAGE_CREATE(SXE2_RDMA_STAGE_CREATE_MQ, sxe2_kcreate_mq, + sxe2_kdestroy_mq), + STAGE_CREATE(SXE2_RDMA_STAGE_GET_FEATURES, sxe2_kget_rdma_features, + NULL), + STAGE_CREATE(SXE2_RDMA_STAGE_RCMS_SETUP, sxe2_rcms_setup, + sxe2_rcms_exit), + STAGE_CREATE(SXE2_RDMA_STAGE_PBLE, sxe2_pbl_init, sxe2_pbl_exit), + STAGE_CREATE(SXE2_RDMA_STAGE_HW_RSRC, sxe2_init_hw_rsrc, NULL), + STAGE_CREATE(SXE2_RDMA_STAGE_DB_INIT, sxe2_kinit_doorbell, + sxe2_kfree_doorbell), + STAGE_CREATE(SXE2_RDMA_STAGE_CREATE_MCQ, sxe2_create_mcq, + sxe2_destroy_mcq), + STAGE_CREATE(SXE2_RDMA_STAGE_CREATE_MCEQ, sxe2_setup_mceq, + sxe2_del_mceq), + STAGE_CREATE(SXE2_RDMA_STAGE_MQ_HDL, sxe2_kinit_mq_handler, + NULL), + STAGE_CREATE(SXE2_RDMA_STAGE_SET_ATTR, sxe2_kset_attr_from_fragcnt, + NULL), + STAGE_CREATE(SXE2_RDMA_STAGE_VSI, sxe2_rdma_init_vsi_ctx, + sxe2_rdma_del_vsi_ctx), + STAGE_CREATE(SXE2_RDMA_STAGE_VSI_STATS, sxe2_kinit_vsi_stats, + sxe2_kfree_vsi_stats), + STAGE_CREATE(SXE2_RDMA_STAGE_CREATE_CEQS, sxe2_setup_ceqs, + sxe2_del_ceqs), + STAGE_CREATE(SXE2_RDMA_STAGE_CREATE_AEQ, sxe2_setup_aeq, sxe2_del_aeq), + STAGE_CREATE(SXE2_RDMA_STAGE_RCRC_WQ, sxe2_init_rsrc_wq, + sxe2_destory_rsrc_wq), +}; + +static int sxe2_drv_add(struct sxe2_rdma_device *rdma_dev, + const struct sxe2_rdma_profile *profile) +{ + int ret = 0; + int i; + + rdma_dev->profile = profile; + + for (i = 0; i < SXE2_RDMA_STAGE_MAX; i++) { + if (profile->stage[i].init) { + ret = profile->stage[i].init(rdma_dev); + if (ret) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SXE2 DRV add fail in stage:%d, ret %d\n", + i, ret); + goto err_out; + } + } + } + + ret = drv_rdma_debug_cc_add(rdma_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "ERR: failed adding cc to debug file system ret=%d\n", + ret); + goto end; + } + ret = drv_rdma_debug_common_add(rdma_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding common to debug file system ret=%d\n", + ret); + goto end; + } + goto end; +err_out: + while (i) { + i--; + if (profile->stage[i].cleanup) + profile->stage[i].cleanup(rdma_dev); + } + +end: + return ret; +} + +static void sxe2_drv_remove(struct sxe2_rdma_device *rdma_dev, + const struct sxe2_rdma_profile *profile, int stage) +{ + DRV_RDMA_LOG_DEV_DEBUG("remove: remove stag start\n"); + while (stage) { + stage--; + if (profile->stage[stage].cleanup) + profile->stage[stage].cleanup(rdma_dev); + } +} +#ifdef SXE2_CFG_DEBUG +static void sxe2_rdma_probe_init_reset_info(struct sxe2_rdma_device *rdma_dev, + struct aux_core_dev_info *cdev_info) +{ + int i; + + rdma_dev->reset_func_info = NULL; + mutex_lock(&g_reset_debug.reset_debug_mutex); + if (!cdev_info->ftype) { + rdma_dev->reset_func_info = &( + g_reset_debug.pf_info[cdev_info->pf_id].pf_reset_info); + if (!rdma_dev->reset_func_info->valid) { + memcpy(rdma_dev->reset_func_info->bdf, rdma_dev->bdf, + MAX_BDF_SIZE); + rdma_dev->reset_func_info->valid = true; + rdma_dev->reset_func_info->reset_cnt = 0; + rdma_dev->reset_func_info->reset_info_idx = 0; + } + DRV_RDMA_LOG_DEV_DEBUG( + "probe:pf reset cnt=%u reset info idx=%u bdf=%s reset info ptr=%p\n", + rdma_dev->reset_func_info->reset_cnt, + rdma_dev->reset_func_info->reset_info_idx, + rdma_dev->reset_func_info->bdf, + rdma_dev->reset_func_info); + } else { + for (i = 0; i < MAX_VF_FUNC_CNT; i++) { + if (g_reset_debug.pf_info[cdev_info->pf_id] + .vf_info[i] + .valid && + (strcmp(g_reset_debug.pf_info[cdev_info->pf_id] + .vf_info[i] + .bdf, + rdma_dev->bdf) == 0)) { + rdma_dev->reset_func_info = + &g_reset_debug.pf_info[cdev_info->pf_id] + .vf_info[i]; + DRV_RDMA_LOG_DEV_DEBUG( + "probe:vf reset info pf id=%u vf idx=%u\n", + cdev_info->pf_id, i); + DRV_RDMA_LOG_DEV_DEBUG( + "probe:vf reset cnt=%u reset info\n" + "\tidx=%u bdf=%s reset info ptr=%p\n", + rdma_dev->reset_func_info->reset_cnt, + rdma_dev->reset_func_info + ->reset_info_idx, + rdma_dev->reset_func_info->bdf, + rdma_dev->reset_func_info); + break; + } + } + + if (!rdma_dev->reset_func_info) { + for (i = 0; i < MAX_VF_FUNC_CNT; i++) { + if (!g_reset_debug.pf_info[cdev_info->pf_id] + .vf_info[i] + .valid) { + rdma_dev->reset_func_info = + &g_reset_debug + .pf_info[cdev_info->pf_id] + .vf_info[i]; + memcpy(rdma_dev->reset_func_info->bdf, + rdma_dev->bdf, MAX_BDF_SIZE); + rdma_dev->reset_func_info->valid = true; + rdma_dev->reset_func_info->reset_cnt = + 0; + rdma_dev->reset_func_info + ->reset_info_idx = 0; + DRV_RDMA_LOG_DEV_DEBUG( + "probe:vf reset info pf id=%u vf idx=%u\n", + cdev_info->pf_id, i); + DRV_RDMA_LOG_DEV_DEBUG( + "probe:vf reset cnt=%u reset info idx=%u\n" + "\tbdf=%s reset info ptr=%p\n", + rdma_dev->reset_func_info + ->reset_cnt, + rdma_dev->reset_func_info + ->reset_info_idx, + rdma_dev->reset_func_info->bdf, + rdma_dev->reset_func_info); + break; + } + } + } + } + mutex_unlock(&g_reset_debug.reset_debug_mutex); +} +#endif + +static int sxe2_rdma_probe_init(struct auxiliary_device *adev, + struct aux_core_dev_info *cdev_info, + struct sxe2_rdma_device **rdma_dev_o) +{ + int ret = 0; +#ifdef NEED_ONE_PARAM_ALLOC_DEVICE + struct ib_device ibdev; +#endif + + struct sxe2_rdma_device *rdma_dev; + u32 value = 0; + u16 major_value = 0; + u16 minor_value = 0; + u32 __iomem *firmware_version; + + if (!cdev_info) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("cdev info is null\n"); + goto end; + } + + if (!cdev_info->ftype) { + firmware_version = (u32 __iomem *)(cdev_info->hw_addr + SXE2_TOP_REG_COMPAT_OFFSET); + value = SXE2_BAR_READ_32(firmware_version); + major_value = RS_32_1(value, 16) & 0xffff; + minor_value = value & 0xffff; + + if (major_value != SXE2_FW_COMP_MAJOR_VER) { + DRV_RDMA_LOG_ERROR("firmware version %d mismatch.rdma %d\n", + major_value, + SXE2_FW_COMP_MAJOR_VER); + ret = -EINVAL; + goto end; + } + + if (minor_value != SXE2_FW_COMP_MINOR_VER) { + DRV_RDMA_LOG_WARN("firmware minor version %d mismatch.rdma %d\n", + minor_value, + SXE2_FW_COMP_MINOR_VER); + } + } + + if (strcmp(cdev_info->drv_ver, SXE2_VERSION) != 0) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("RoCE cannot be enabled because of a driver version mismatch\n"); + DRV_RDMA_LOG_ERROR("sxe driver version [%s]\n" + "\tRoCE diver version [%s] please ensure version matching!\n", + cdev_info->drv_ver, SXE2_VERSION); + goto end; + } + + DRV_RDMA_LOG_INFO( + "probe: cdev_info:%p, cdev_info->dev.aux_dev.bus->number:%d,\n" + "netdev:%p\n", + cdev_info, cdev_info->pdev->bus->number, + netdev_name(cdev_info->netdev)); + +#ifdef NEDD_ONE_PARAM_ALLOC + rdma_dev = sxe2_ib_alloc_device(sxe2_rdma_device, ibdev); +#else + rdma_dev = ib_alloc_device(sxe2_rdma_device, ibdev); +#endif + if (!rdma_dev) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("rdma device alloc failed. ret:%d\n", ret); + goto end; + } + snprintf(rdma_dev->bdf, 16, "%02x:%02x.%x", + cdev_info->pdev->bus->number, PCI_SLOT(cdev_info->pdev->devfn), + PCI_FUNC(cdev_info->pdev->devfn)); + + if (!cdev_info->ftype) { + rdma_dev->fw_ver.major = major_value; + rdma_dev->fw_ver.minor = minor_value; + } + rdma_dev->aux_dev = adev; +#ifdef SXE2_CFG_DEBUG + sxe2_rdma_probe_init_reset_info(rdma_dev, cdev_info); +#endif + rdma_dev->rdma_func = + kzalloc(sizeof(struct sxe2_rdma_pci_f), GFP_KERNEL); + if (!rdma_dev->rdma_func) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("rdma device rf alloc failed. ret:%d\n", + ret); + goto err_rf; + } + ret = sxe2_rdma_fill_device_info(rdma_dev, cdev_info); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("rdma device info fill err. ret:%d\n", + ret); + goto err_fill_devinfo; + } + *rdma_dev_o = rdma_dev; + goto end; +err_fill_devinfo: + kfree(rdma_dev->rdma_func); +err_rf: + ib_dealloc_device(&rdma_dev->ibdev); +end: + return ret; +} + +static int sxe2_rdma_probe_notify(struct aux_core_dev_info *cdev_info, u32 status) +{ + struct sxe2_rdma_notify_status_info rdma_status; + int ret = 0; + + rdma_status.rdma_status = status; + ret = sxe2_rdma_adminq_send( + cdev_info, SXE2_CMD_RDMA_NOTIFY_STATUS, + (u8 *)&rdma_status, (u16)sizeof(rdma_status), + NULL, 0); + return ret; +} + +static int sxe2_rdma_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct sxe2_auxiliary_device *aux_adev = + container_of(adev, struct sxe2_auxiliary_device, adev); + struct aux_core_dev_info *cdev_info = aux_adev->cdev_info; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_handler *hdl; + int ret = 0; + (void)id; + DRV_RDMA_LOG_INFO("rdma probe start\n"); + ret = sxe2_rdma_probe_init(adev, cdev_info, &rdma_dev); + if (ret) { + DRV_RDMA_LOG_INFO("rdma probe sxe2_rdma_probe_init error\n"); + goto end; + } + if (!rdma_dev->rdma_func->ftype) { + ret = sxe2_pf_func_table_init(rdma_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pf %u func table init err ret:%d\n", + rdma_dev->rdma_func->pf_id, ret); + goto err_alloc_hdl; + } + } + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rdma device handler alloc failed. ret:%d\n", ret); + goto err_alloc_hdl; + } + hdl->dev = rdma_dev; + rdma_dev->hdl = hdl; + sxe2_add_handler(hdl); + + atomic_set(&rdma_dev->rdma_func->aeq_created, 0); +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + ret = INJECT_INIT(rdma_dev->rdma_func); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("inject init fail, ret %d\n", ret); + goto err_add; + } + ret = sxe2_drv_inject_reg(rdma_dev->rdma_func); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("core inject reg fail, ret %d", ret); + goto err_inject_reg; + } +#endif + + ret = sxe2_drv_add(rdma_dev, &drv_profile); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rdma device add failed. ret:%d\n", ret); +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + goto err_inject_reg; +#else + goto err_add; +#endif + } + rdma_dev->vsi.lag_port_bitmap = cdev_info->rdma_pf_bitmap; + DRV_RDMA_LOG_DEV_DEBUG("rdma_pf_bitmap = 0x%x\n", + cdev_info->rdma_pf_bitmap); + + ret = sxe2_ib_register_device(rdma_dev); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("rdma device register failed. ret:%d\n", + ret); + goto err_reg_device; + } + ret = sxe2_kregister_notifiers(rdma_dev); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR( + "SXE2 DRV register notifiers failed, ret %d.\n", ret); + goto err_reg_notifiers; + } + + auxiliary_set_drvdata(adev, rdma_dev); +#ifdef SXE2_SUPPORT_CONFIGFS + ret = sxe2_rdma_create_configfs_subdir(rdma_dev->bdf, rdma_dev); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "create configfs subdir %s failed, ret %d\n", + rdma_dev->bdf, ret); + goto err_probe_notify; + } +#endif + if (rdma_dev->rdma_func->ctx_dev.privileged) { + ret = sxe2_rdma_probe_notify(cdev_info, SXE2_RDMA_PROBE); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("notify rdma probe failed, ret %d\n", ret); + goto err_remove_configfs; + } + cdev_info->ops->notify_rdma_load(cdev_info, true); + } + + DRV_RDMA_LOG_DEV_INFO("rdma probe success\n"); + goto end; +err_remove_configfs: +#ifdef SXE2_SUPPORT_CONFIGFS + sxe2_rdma_remove_configfs_subdir(rdma_dev->bdf); +err_probe_notify: +#endif + sxe2_kunregister_notifiers(rdma_dev); +err_reg_notifiers: + sxe2_ib_unregister_device(rdma_dev); +err_reg_device: + sxe2_drv_remove(rdma_dev, &drv_profile, SXE2_RDMA_STAGE_MAX); +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +err_inject_reg: + INJECT_UNINIT(rdma_dev->rdma_func); +#endif +err_add: + sxe2_del_handler(rdma_dev->hdl); + kfree(hdl); +err_alloc_hdl: + destroy_workqueue(rdma_dev->rdma_func->vchnl_wq); + kfree(rdma_dev->rdma_func); + ib_dealloc_device(&rdma_dev->ibdev); +end: + return ret; +} + +#ifdef HAVE_AUXILIARY_DRIVER_INT_REMOVE +static int sxe2_rdma_remove(struct auxiliary_device *adev) +#else +static void sxe2_rdma_remove(struct auxiliary_device *adev) +#endif +{ + struct sxe2_rdma_device *rdma_dev = auxiliary_get_drvdata(adev); + struct sxe2_auxiliary_device *aux_adev = + container_of(adev, struct sxe2_auxiliary_device, adev); + struct aux_core_dev_info *cdev_info = aux_adev->cdev_info; + + if (!rdma_dev) { + DRV_RDMA_LOG_ERROR( + "auxiliary device is not exist, skips shca_drv_ib_remove()\n"); + goto end; + } + DRV_RDMA_LOG_DEV_INFO("rdma remove start\n"); +#ifdef SXE2_SUPPORT_CONFIGFS + sxe2_rdma_remove_configfs_subdir(rdma_dev->bdf); +#endif + if (rdma_dev->rdma_func->ctx_dev.privileged) { + cdev_info->ops->notify_rdma_load(cdev_info, false); + sxe2_rdma_probe_notify(cdev_info, SXE2_RDMA_REMOVE); + sxe2_rdma_free_all_vf_rsrc(&rdma_dev->rdma_func->ctx_dev); + } + sxe2_kunregister_notifiers(rdma_dev); + + sxe2_ib_unregister_device(rdma_dev); + sxe2_drv_remove(rdma_dev, rdma_dev->profile, SXE2_RDMA_STAGE_MAX); + sxe2_del_handler(rdma_dev->hdl); + kfree(rdma_dev->hdl); + rdma_dev->hdl = NULL; + if (rdma_dev->rdma_func->vchnl_wq) + destroy_workqueue(rdma_dev->rdma_func->vchnl_wq); +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + INJECT_UNINIT(rdma_dev->rdma_func); +#endif + kfree(rdma_dev->rdma_func); + rdma_dev->rdma_func = NULL; + ib_dealloc_device(&rdma_dev->ibdev); + +end: +#ifdef HAVE_AUXILIARY_DRIVER_INT_REMOVE + return 0; +#else + return; +#endif +} + +static const struct auxiliary_device_id sxe2_auxiliary_id_table[] = { + { + .name = "sxe2.roce", + }, + { + .name = "sxe2vf.roce", + }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, sxe2_auxiliary_id_table); + +struct sxe2_auxiliary_drv sxe2_auxiliary_drv = { + .adrv = { + .id_table = sxe2_auxiliary_id_table, + .probe = sxe2_rdma_probe, + .remove = sxe2_rdma_remove, + }, + .aux_ops.event_handler = sxe2_aux_event_handler, + .aux_ops.vc_receive = sxe2_vchnl_receive, +}; + +struct mutex func_lock; + +static int __init sxe2_drv_init(void) +{ + int ret; + + mutex_init(&func_lock); + +#ifndef SXE2_CFG_RELEASE + ret = sxe2_log_init(false); + if (ret < 0) { + DRV_RDMA_LOG_PR_ERR("sxe2 log init fail.(err:%d)\n", ret); + goto destroy_lock; + } +#endif + + DRV_RDMA_LOG_INFO("%s start, version[%s], commit_id[%s],\n" + "\tbranch[%s], build_time[%s]\n", __func__, + SXE2_VERSION, SXE2_COMMIT_ID, + SXE2_BRANCH, SXE2_BUILD_TIME); + ret = sxe2_dbg_and_configs_init(); + if (ret) { + DRV_RDMA_LOG_ERROR("dbg and configs init failed. ret:%d\n", + ret); +#ifndef SXE2_CFG_RELEASE + goto free_log; +#else + goto destroy_lock; +#endif + } + + ret = auxiliary_driver_register(&sxe2_auxiliary_drv.adrv); + if (ret) { + DRV_RDMA_LOG_ERROR("driver register failed. ret:%d\n", ret); + goto dbg_config_exit; + } + goto end; + +dbg_config_exit: + sxe2_dbg_and_configs_exit(); +#ifndef SXE2_CFG_RELEASE +free_log: + sxe2_log_exit(); +#endif +destroy_lock: + mutex_destroy(&func_lock); +end: + return ret; +} + +static void __exit sxe2_drv_cleanup(void) +{ + auxiliary_driver_unregister(&sxe2_auxiliary_drv.adrv); + + sxe2_dbg_and_configs_exit(); + +#ifndef SXE2_CFG_RELEASE + sxe2_log_exit(); +#endif + mutex_destroy(&func_lock); +} + +MODULE_INFO(build_time, SXE2_BUILD_TIME); +MODULE_INFO(branch, SXE2_BRANCH); +MODULE_INFO(arch, SXE2_DRV_ARCH); +MODULE_INFO(commit_id, SXE2_COMMIT_ID); +MODULE_AUTHOR("SXE2"); +MODULE_DESCRIPTION("SXE2 RDMA Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(SXE2_VERSION); +MODULE_ALIAS("auxiliary:sxe2.rdma"); + +module_init(sxe2_drv_init); +module_exit(sxe2_drv_cleanup); diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.h new file mode 100644 index 0000000000000000000000000000000000000000..123ad5d8b09b0b6ac16897053965f200028312d3 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_main.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_main.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_MAIN_H__ +#define __SXE2_DRV_MAIN_H__ + +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_rdma_common.h" + +extern u32 g_sxe2_rdma_dmesg_level; +extern struct list_head sxe2_handlers; +extern spinlock_t sxe2_handler_lock; +#define SXE2_RDMA_MCQ_HW_PAGE_SIZE 8192 +#define SXE2_SQ_RSVD 8 +#define SXE2_CPU_ID_GET_VENDOR_ID (0x0) +#define SXE2_VENDOR_ID_SIZE (0x13) +#define SXE2_MAX_PKEY_CNT (32) +#define SXE2_DEFAULT_PKEY_VAL (0xFFFF) +#define SXE2_PEKY_REG_INVALID_VAL (0x0) +#define SXE2_BAR_REG_AUTO_RESP_VAL (0xFFFFFFFF) +#define SXE2_BAR_REG_INVALID_VAL (0xDEADBEEF) +#define SXE2_MAX_PORT_CNT (4) +#define SXE2_LIMITS_SEL_DEFAULT 3 +#define SXE2_LIMITS_SEL_MAX 7 + +#define SXE2_FRAGCNT_LIMIT_DEFAULT 6 +#define SXE2_FRAGCNT_LIMIT_MIN 2 +#define SXE2_FRAGCNT_LIMIT_MAX 13 + +#define SXE2_RCMS_MODE_2M 1 +#define SXE2_RCMS_MODE_4K 2 + +#define SXE2_PKEY_TBLE_BAR_ADDR(base_addr, idx) (base_addr + idx) + +#define SXE2_MAX_QP_WRS(max_quanta_per_wr) \ + ((SXE2_QP_SW_MAX_WQ_QUANTA - SXE2_SQ_RSVD) / (max_quanta_per_wr)) +#define SXE2_CC_DCQCN_T_DEFAULT 120 +#define SXE2_CC_DCQCN_T_MAX 0xFFFF +#define SXE2_CC_DCQCN_T_MIN 1 + +#define SXE2_CC_DCQCN_B_DEFAULT 131072 +#define SXE2_CC_DCQCN_B_MAX 0xFFFFFF +#define SXE2_CC_DCQCN_B_MIN 0 + +#define SXE2_CC_DCQCN_F_DEFAULT 5 +#define SXE2_CC_DCQCN_F_MAX 15 +#define SXE2_CC_DCQCN_F_MIN 0 + +#define SXE2_CC_DCQCN_TIMELY_RAI_DEFAULT 18 +#define SXE2_CC_DCQCN_TIMELY_RAI_MAX 100 +#define SXE2_CC_DCQCN_TIMELY_RAI_MIN 2 + +#define SXE2_CC_DCQCN_RHAI_DEFAULT 34 +#define SXE2_CC_DCQCN_RHAI_MAX 100 +#define SXE2_CC_DCQCN_RHAI_MIN 2 + +#define SXE2_CC_DCQCN_RREDUCE_MPERIOD_DEFAULT 60 +#define SXE2_CC_DCQCN_RREDUCE_MPERIOD_MAX 0xFFFF +#define SXE2_CC_DCQCN_RREDUCE_MPERIOD_MIN 1 + +#define SXE2_CC_DCQCN_MIN_DEC_FACTOR_DEFAULT 2 +#define SXE2_CC_DCQCN_MIN_DEC_FACTOR_MAX 100 +#define SXE2_CC_DCQCN_MIN_DEC_FACTOR_MIN 2 + +#define SXE2_CC_DCQCN_MIN_RATE_DEFAULT 2 +#define SXE2_CC_DCQCN_MIN_RATE_MAX 100 +#define SXE2_CC_DCQCN_MIN_RATE_MIN 2 +#define SXE2_CC_DCQCN_K_VAL 1280 +#define SXE2_CC_DCQCN_BC_VAL 0 +#define SXE2_CC_DCQCN_TC_VAL 0 +#define SXE2_CC_DCQCN_G_VAL 62 +#define SXE2_CC_DCQCN_RT_VAL 0 +#define SXE2_CC_DCQCN_RC_VAL 49 +#define SXE2_CC_DCQCN_ALPHA_VAL 0 +#define SXE2_CC_DCQCN_RREDUCE_NEXT_NODE_INFO_VAL 0 +#define SXE2_CC_DCQCN_DECREASE_RATE_VALID_VAL 1 +#define SXE2_CC_DCQCN_T_NEXT_NODE_INFO_VAL 0 +#define SXE2_CC_DCQCN_BYTE_COUNTER_VAL 0 +#define SXE2_CC_TIMELY_MIN_RTT_DEFAULT 500 +#define SXE2_CC_TIMELY_MIN_RTT_MAX 0xFFFF +#define SXE2_CC_TIMELY_MIN_RTT_MIN 1 + +#define SXE2_CC_TIMELY_TLOW_DEFAULT 300 +#define SXE2_CC_TIMELY_TLOW_MAX 0xFFFF +#define SXE2_CC_TIMELY_TLOW_MIN 1 + +#define SXE2_CC_TIMELY_THIGH_DEFAULT 1000 +#define SXE2_CC_TIMELY_THIGH_MAX 0xFFFF +#define SXE2_CC_TIMELY_THIGH_MIN 1 +#define SXE2_CC_TIMELY_PRE_RTT_VAL 0 +#define SXE2_CC_TIMELY_BETA_VAL 620 +#define SXE2_CC_TIMELY_ALPHA_VAL 500 +#define SXE2_CC_TIMELY_RTT_DIFF_VAL 0 +#define RDMA_DRIVER_SXE2 20 +struct sxe2_rdma_device_init_info { + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + __le32 *fpm_query_buf; + __le32 *fpm_commit_buf; + struct sxe2_rdma_hw *hw; + void __iomem *bar0; + u16 max_vfs; + u16 rcms_fn_id; +}; + +struct sxe2_pf_func_table_init_info { + u32 pf_id; +}; + +#define STAGE_CREATE(_stage, _init, _cleanup) \ + .stage[_stage] = { .init = _init, .cleanup = _cleanup } + +enum sxe2_rdma_stages { + SXE2_RDMA_STAGE_SETUP_INITINFO, + SXE2_RDMA_STAGE_DEBUG, + SXE2_RDMA_STAGE_CREATE_MQ, + SXE2_RDMA_STAGE_GET_FEATURES, + SXE2_RDMA_STAGE_RCMS_SETUP, + SXE2_RDMA_STAGE_PBLE, + SXE2_RDMA_STAGE_HW_RSRC, + SXE2_RDMA_STAGE_DB_INIT, + SXE2_RDMA_STAGE_CREATE_MCQ, + SXE2_RDMA_STAGE_CREATE_MCEQ, + SXE2_RDMA_STAGE_MQ_HDL, + SXE2_RDMA_STAGE_SET_ATTR, + SXE2_RDMA_STAGE_VSI, + SXE2_RDMA_STAGE_VSI_STATS, + SXE2_RDMA_STAGE_CREATE_CEQS, + SXE2_RDMA_STAGE_CREATE_AEQ, + SXE2_RDMA_STAGE_RCRC_WQ, + SXE2_RDMA_STAGE_MAX, +}; + +union sxe2_rdma_cfg_pkey_bar { + struct cfg_pkey_bar { + u32 pkey : 16; + u32 pkey_port : 2; + u32 pkey_vld : 1; + u32 rsv : 13; + } pkey_bar; + u32 bar_val; +}; + +struct sxe2_rdma_stage { + int (*init)(struct sxe2_rdma_device *dev); + void (*cleanup)(struct sxe2_rdma_device *dev); +}; + +struct sxe2_rdma_profile { + struct sxe2_rdma_stage stage[SXE2_RDMA_STAGE_MAX]; +}; + +enum sxe2_rdma_status { + SXE2_RDMA_PROBE, + SXE2_RDMA_REMOVE, + SXE2_RDMA_MAX, +}; +struct sxe2_rdma_notify_status_info { + u32 rdma_status; +}; + +void sxe2_rdma_set_qos_info(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2p); + +bool sxe2_rdma_get_cpu_vendor(struct sxe2_rdma_device *rdma_dev); + +#ifdef RDMA_AUX_GET_SET_DRV_DATA +static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev) +{ + return dev_get_drvdata(&auxdev->dev); +} + +static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, + void *data) +{ + dev_set_drvdata(&auxdev->dev, data); +} +#endif + +void sxe2_rdma_update_qos_info(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2p); + +void sxe2_rdma_qos_move_qset(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2params); + +void sxe2_rdma_free_one_vf(struct sxe2_rdma_vchnl_dev *vc_dev); + +#ifdef SXE2_SUPPORT_CONFIGFS +int sxe2_configfs_init(void); +void sxe2_configfs_exit(void); +#endif +void sxe2_rdma_cc_dcqcn_set_params(struct sxe2_rdma_pci_f *rdma_func); + +void sxe2_rdma_cc_timely_set_params(struct sxe2_rdma_pci_f *rdma_func); + +void sxe2_rdma_init_cc_params(struct sxe2_rdma_pci_f *rdma_func); + +void +sxe2_rdma_set_func_user_cfg_params(struct sxe2_rdma_pci_f *rdma_func); + +int sxe2_rdma_save_msix_info(struct sxe2_rdma_pci_f *rdma_func); + +int sxe2_rdma_init_ctx_dev(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_device_init_info *info); + +int sxe2_rdma_initialize_dev(struct sxe2_rdma_pci_f *rdma_func); + +int sxe2_rdma_setup_init_state(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kunregister_notifiers(struct sxe2_rdma_device *rdma_dev); +void sxe2_rdma_disassociate_ucontext(struct ib_ucontext *ibctx); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.c new file mode 100644 index 0000000000000000000000000000000000000000..0e9449409a8fe3d833e12b349193d87dccd52de0 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mc.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sxe2_drv_aux.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mc.h" + +#define SXE2_RDMA_MC_VALID_TRUE (1) +#define SXE2_RDMA_MC_VALID_FALSE (0) + +static void mcast_kget_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) +{ + struct net_device *ip_dev = NULL; + struct in6_addr laddr6; + + sxe2_copy_ip_ntohl(laddr6.in6_u.u6_addr32, addr); + if (vlan_id) + *vlan_id = 0xFFFF; + + if (mac) + eth_zero_addr(mac); + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + if (mac) + ether_addr_copy(mac, ip_dev->dev_addr); + break; + } + } + rcu_read_unlock(); +} + +static u16 mcast_kget_vlan_ipv4(u32 *addr) +{ + struct net_device *netdev; + u16 vlan_id = 0xFFFF; + + netdev = ip_dev_find(&init_net, htonl(addr[0])); + if (netdev) { + vlan_id = rdma_vlan_dev_vlan_id(netdev); + dev_put(netdev); + } + + return vlan_id; +} + +static int mcast_kdetach_qp(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mcast_cmd_info *detach_info) +{ + struct sxe2_detach_mcast_cmd_resp recv_msg = { 0 }; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + int ret = 0; + int i = 0; + + if (rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEV_INFO("reset is set, mcast detach qp skip\n"); + goto end; + } + + detach_info->vlan_id = cpu_to_le16(detach_info->vlan_id); + for (i = 0; i < SXE2_RDMA_DRV_MC_IP_LEN; i++) { + detach_info->dest_ip_addr[i] = + cpu_to_le32(detach_info->dest_ip_addr[i]); + } + detach_info->qpn = cpu_to_le32(detach_info->qpn); + detach_info->vsi_index = cpu_to_le16(detach_info->vsi_index); + detach_info->pf_id = cpu_to_le16(detach_info->pf_id); + detach_info->vf_id = cpu_to_le16(detach_info->vf_id); + + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_QP_DETACH_MC, + (u8 *)detach_info, + (u16)sizeof(*detach_info), + (u8 *)(&recv_msg), + (u16)sizeof(recv_msg)); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("Send msg:detach mcast failed, ret %d\n", + ret); + goto end; + } + + if (recv_msg.detach_flag == SXE2_MCAST_DETACH_LAST_QP) { + ret = cdev_info->ops->rdma_drv_config( + cdev_info, RDMA_MAC_RULE_DELETE, + detach_info->dest_mac_addr); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "Send msg:delete mac rule failed, ret %d\n", + ret); + } + } else if (recv_msg.detach_flag != SXE2_MCAST_DETACH_NOT_LAST_QP) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "Send msg:detach mcast recv_msg %d, ret %d\n", + recv_msg.detach_flag, ret); + } + + if (detach_info->ipv4_valid) { + DRV_RDMA_LOG_DEV_INFO( + "DETACH MCAST:MAC %pM, VLAN_ID %#x, IP %pI4, IPv4 valid %u,\n" + "\tVLAN valid %u, QPN %#x, VSI Index %#x, recv_msg %d\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + detach_info->dest_mac_addr, detach_info->vlan_id, + detach_info->dest_ip_addr, detach_info->ipv4_valid, + detach_info->vlan_valid, detach_info->qpn, + detach_info->vsi_index, recv_msg.detach_flag, + detach_info->pf_id, detach_info->pf_valid, + detach_info->vf_id); + } else { + DRV_RDMA_LOG_DEV_INFO( + "DETACH MCAST:MAC %pM, VLAN_ID %#x, IP %pI6, IPv4 valid %u,\n" + "\tVLAN valid %u, QPN %#x, VSI Index %#x, recv_msg %d\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + detach_info->dest_mac_addr, detach_info->vlan_id, + detach_info->dest_ip_addr, detach_info->ipv4_valid, + detach_info->vlan_valid, detach_info->qpn, + detach_info->vsi_index, recv_msg.detach_flag, + detach_info->pf_id, detach_info->pf_valid, + detach_info->vf_id); + } + +end: + return ret; +} + +static int mcast_kattach_qp(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mcast_cmd_info *attach_info) +{ + struct sxe2_acttach_mcast_cmd_resp recv_msg = { 0 }; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + int ret = 0; + int detach_ret = 0; + int i = 0; + + if (rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEV_INFO("reset is set, mcast attach qp skip\n"); + ret = -EBUSY; + goto end; + } + + attach_info->vlan_id = cpu_to_le16(attach_info->vlan_id); + for (i = 0; i < SXE2_RDMA_DRV_MC_IP_LEN; i++) { + attach_info->dest_ip_addr[i] = + cpu_to_le32(attach_info->dest_ip_addr[i]); + } + attach_info->qpn = cpu_to_le32(attach_info->qpn); + attach_info->vsi_index = cpu_to_le16(attach_info->vsi_index); + attach_info->pf_id = cpu_to_le16(attach_info->pf_id); + attach_info->vf_id = cpu_to_le16(attach_info->vf_id); + + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_QP_ATTACH_MC, + (u8 *)attach_info, + (u16)sizeof(*attach_info), + (u8 *)(&recv_msg), + (u16)sizeof(recv_msg)); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("Send msg:attach mcast failed, ret %d\n", + ret); + goto end; + } + + if (recv_msg.attach_flag == SXE2_MCAST_ATTACH_FIRST_QP) { + ret = cdev_info->ops->rdma_drv_config( + cdev_info, RDMA_MAC_RULE_ADD, + attach_info->dest_mac_addr); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "Send msg:add mac rule failed, ret %d\n", ret); + goto detach_qp; + } + } else if (recv_msg.attach_flag != SXE2_MCAST_ATTACH_NOT_FIRST_QP) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "Send msg:attach mcast recv_msg %d, ret %d\n", + recv_msg.attach_flag, ret); + goto detach_qp; + } + + if (attach_info->ipv4_valid) { + DRV_RDMA_LOG_DEV_INFO( + "ATTACH MCAST:MAC %pM, VLAN_ID %#x, IP %pI4, IPv4 valid %d,\n" + "\tVLAN valid %d, QPN %#x, VSI Index %#x, recv_msg %d\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + attach_info->dest_mac_addr, attach_info->vlan_id, + attach_info->dest_ip_addr, attach_info->ipv4_valid, + attach_info->vlan_valid, attach_info->qpn, + attach_info->vsi_index, recv_msg.attach_flag, + attach_info->pf_id, attach_info->pf_valid, + attach_info->vf_id); + } else { + DRV_RDMA_LOG_DEV_INFO( + "ATTACH MCAST:MAC %pM, VLAN_ID %#x, IP %pI6, IPv4 valid %d,\n" + "\tVLAN valid %d, QPN %#x, VSI Index %#x, recv_msg %d\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + attach_info->dest_mac_addr, attach_info->vlan_id, + attach_info->dest_ip_addr, attach_info->ipv4_valid, + attach_info->vlan_valid, attach_info->qpn, + attach_info->vsi_index, recv_msg.attach_flag, + attach_info->pf_id, attach_info->pf_valid, + attach_info->vf_id); + } + goto end; + +detach_qp: + detach_ret = mcast_kdetach_qp(rdma_dev, attach_info); + if (detach_ret) { + DRV_RDMA_LOG_DEV_ERR("Detach mcast failed, ret %d\n", + detach_ret); + } +end: + return ret; +} + +int sxe2_kattach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_mcast_cmd_info attach_info = {}; + union sxe2_sockaddr sgid_addr; + int ret = 0; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); + + if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { + sxe2_copy_ip_ntohl( + attach_info.dest_ip_addr, + sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); + mcast_kget_vlan_mac_ipv6(attach_info.dest_ip_addr, + &attach_info.vlan_id, NULL); + attach_info.ipv4_valid = SXE2_RDMA_MC_VALID_FALSE; + mcast_kfill_mac_v6(&attach_info.dest_ip_addr[3], + attach_info.dest_mac_addr); + } else { + attach_info.dest_ip_addr[0] = + ntohl(sgid_addr.saddr_in.sin_addr.s_addr); + attach_info.ipv4_valid = SXE2_RDMA_MC_VALID_TRUE; + attach_info.vlan_id = + mcast_kget_vlan_ipv4(attach_info.dest_ip_addr); + mcast_kfill_mac_v4(attach_info.dest_ip_addr, + attach_info.dest_mac_addr); + } + + if (attach_info.vlan_id < VLAN_N_VID) + attach_info.vlan_valid = SXE2_RDMA_MC_VALID_TRUE; + + attach_info.qpn = qp->ibqp.qp_num; + attach_info.vsi_index = rdma_dev->vsi.vsi_idx; + attach_info.pf_id = (u16)rdma_dev->rdma_func->pf_id; + attach_info.pf_valid = (u8)dev->privileged; + attach_info.vf_id = dev->rcms_info->pmf_index; + + DRV_RDMA_LOG_DEV_DEBUG( + "ATTACH INFO:MAC %pM, VLAN_ID %#x, IP %pI4, IPv4 valid %u,\n" + "\tVLAN valid %u, QPN %#x, VSI Index %#x\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + attach_info.dest_mac_addr, attach_info.vlan_id, + attach_info.dest_ip_addr, attach_info.ipv4_valid, + attach_info.vlan_valid, attach_info.qpn, attach_info.vsi_index, + attach_info.pf_id, attach_info.pf_valid, attach_info.vf_id); + + ret = mcast_kattach_qp(rdma_dev, &attach_info); + + return ret; +} + +int sxe2_kdetach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_mcast_cmd_info detach_info = {}; + union sxe2_sockaddr sgid_addr; + int ret = 0; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); + if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { + sxe2_copy_ip_ntohl( + detach_info.dest_ip_addr, + sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); + mcast_kget_vlan_mac_ipv6(detach_info.dest_ip_addr, + &detach_info.vlan_id, NULL); + detach_info.ipv4_valid = SXE2_RDMA_MC_VALID_FALSE; + mcast_kfill_mac_v6(&detach_info.dest_ip_addr[3], + detach_info.dest_mac_addr); + } else { + detach_info.dest_ip_addr[0] = + ntohl(sgid_addr.saddr_in.sin_addr.s_addr); + detach_info.ipv4_valid = SXE2_RDMA_MC_VALID_TRUE; + detach_info.vlan_id = + mcast_kget_vlan_ipv4(detach_info.dest_ip_addr); + mcast_kfill_mac_v4(detach_info.dest_ip_addr, + detach_info.dest_mac_addr); + } + + if (detach_info.vlan_id < VLAN_N_VID) + detach_info.vlan_valid = SXE2_RDMA_MC_VALID_TRUE; + + detach_info.qpn = qp->ibqp.qp_num; + detach_info.vsi_index = rdma_dev->vsi.vsi_idx; + detach_info.pf_id = (u16)rdma_dev->rdma_func->pf_id; + detach_info.pf_valid = (u8)dev->privileged; + detach_info.vf_id = dev->rcms_info->pmf_index; + + DRV_RDMA_LOG_DEV_DEBUG( + "DETACH INFO:MAC %pM, VLAN_ID %#x, IP %pI4, IPv4 valid %d\n" + "\tVLAN valid %d, QPN %#x, VSI Index %#x\n" + "\tpf_id %#x, pf_valid %u, vf_id %#x\n", + detach_info.dest_mac_addr, detach_info.vlan_id, + detach_info.dest_ip_addr, detach_info.ipv4_valid, + detach_info.vlan_valid, detach_info.qpn, detach_info.vsi_index, + detach_info.pf_id, detach_info.pf_valid, detach_info.vf_id); + + ret = mcast_kdetach_qp(rdma_dev, &detach_info); + + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.h new file mode 100644 index 0000000000000000000000000000000000000000..75555c5ce998bd578fccbf8e6e9200a1a4b19fc0 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mc.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mc.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_MC_H__ +#define __SXE2_DRV_MC_H__ + +#include +#include "sxe2_drv_rdma_common.h" + +#define SXE2_RDMA_DRV_MC_ETH_ALEN (6) +#define SXE2_RDMA_DRV_MC_IP_LEN (4) + +#pragma pack(4) +struct sxe2_mcast_cmd_info { + u8 dest_mac_addr[SXE2_RDMA_DRV_MC_ETH_ALEN]; + u8 rsv0[2]; + u16 vlan_id; + u16 pf_id; + u32 dest_ip_addr[SXE2_RDMA_DRV_MC_IP_LEN]; + u8 ipv4_valid; + u8 vlan_valid; + u8 pf_valid; + u8 rsv2[1]; + u32 qpn; + u16 vsi_index; + u16 vf_id; +}; +#pragma pack() + +enum sxe2_mcast_type { + SXE2_MCAST_ATTACH_FIRST_QP = 1, + SXE2_MCAST_ATTACH_NOT_FIRST_QP = + 2, + SXE2_MCAST_DETACH_LAST_QP = 3, + SXE2_MCAST_DETACH_NOT_LAST_QP = + 4, +}; + +struct sxe2_acttach_mcast_cmd_resp { + u8 attach_flag; +}; + +struct sxe2_detach_mcast_cmd_resp { + u8 detach_flag; +}; + +static inline void mcast_kfill_mac_v6(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac6[ETH_ALEN] = {}; + + mac6[0] = 0x33; + mac6[1] = 0x33; + mac6[2] = ip[3]; + mac6[3] = ip[2]; + mac6[4] = ip[1]; + mac6[5] = ip[0]; + + ether_addr_copy(mac, mac6); +} + +static inline void mcast_kfill_mac_v4(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac4[ETH_ALEN] = {}; + + mac4[0] = 0x01; + mac4[1] = 0x00; + mac4[2] = 0x5E; + mac4[3] = ip[2] & 0x7F; + mac4[4] = ip[1]; + mac4[5] = ip[0]; + + ether_addr_copy(mac, mac4); +} + +int sxe2_kattach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid); + +int sxe2_kdetach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.c new file mode 100644 index 0000000000000000000000000000000000000000..dac2e88c3213f8f45f3d02f99ed44168a477b063 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.c @@ -0,0 +1,1594 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mq.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_stats.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_srq.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_mq_debugfs.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_main.h" + +#define SXE2_MQ_WQE_OPCODE_OFFSET (24) +#define SXE2_MQ_DB_VLD_VALUE (1) +#define SXE2_MQ_DB_VLD_SHIFT (11) +#define SXE2_MQ_WQE_OPCODE_BITS GENMASK_ULL(37, 32) +#define SXE2_MQ_WQE_VALID_BITS BIT_ULL_MASK(63) +#define MQ_WQE_MEMSET_NUM_0 (0) +#define MQ_WQE_MEMSET_NUM_3 (3) +#define MQ_WQE_MEMSET_NUM_4 (4) +#define MQ_WQE_MEMSET_BYTES_24 (24) +#define MQ_WQE_MEMSET_BYTES_32 (32) +#define MQ_TIMEOUT_THRESHOLD (500) +#define MCQ_DB_NOTE_OFFSET_0 (0) +#define MCQ_DB_NOTE_OFFSET_4 (4) +#define MCQ_DB_NOTE_CMDSN_MASK (4) +#define MCQ_DB_NOTE_CMD_VALUE (0) +#define MQC_64B_CACHE_LINE_EN (1) +#define MQC_64B_CACHE_LINE_NO_EN (0) + +static const char *const sxe2_mq_cmd_names[SXE2_MQ_OP_MAX] = { + [SXE2_MQ_OP_CREATE_QP] = "Create QP Cmd", + [SXE2_MQ_OP_MODIFY_QP] = "Modify QP Cmd", + [SXE2_MQ_OP_DESTROY_QP] = "Destroy QP Cmd", + [SXE2_MQ_OP_CREATE_CQ] = "Create CQ Cmd", + [SXE2_MQ_OP_MODIFY_CQ] = "Modify CQ Cmd", + [SXE2_MQ_OP_DESTROY_CQ] = "Destroy CQ Cmd", + [SXE2_MQ_OP_ALLOC_MR_key] = "Allocate MR key Cmd", + [SXE2_MQ_OP_REG_MR] = "Register MR Cmd", + [SXE2_MQ_OP_QUERY_MR_key] = "Query MR key Cmd", + [SXE2_MQ_OP_DEALLOC_MR_key] = "Deallocate MR key Cmd", + [SXE2_MQ_OP_MANAGE_PBLE_BP] = "Manage Function PBLE Backing Pages Cmd", + [SXE2_MQ_OP_QUERY_QP] = "Query QP Cmd", + [SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE] = + "Manage RCMS PM Function Table Cmd", + [SXE2_MQ_OP_CREATE_CEQ] = "Create CEQ Cmd", + [SXE2_MQ_OP_DESTROY_CEQ] = "Destroy CEQ Cmd", + [SXE2_MQ_OP_CREATE_AEQ] = "Create AEQ Cmd", + [SXE2_MQ_OP_DESTROY_AEQ] = "Destroy AEQ Cmd", + [SXE2_MQ_OP_CREATE_ADDR_HANDLE] = "Create Address Handle Cmd", + [SXE2_MQ_OP_MODIFY_ADDR_HANDLE] = "Modify Address Handle Cmd", + [SXE2_MQ_OP_DESTROY_ADDR_HANDLE] = "Destroy Address Handle Cmd", + [SXE2_MQ_OP_UPDATE_FPT] = "Update FPTs Cmd", + [SXE2_MQ_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", + [SXE2_MQ_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", + [SXE2_MQ_OP_NOP] = "NOP Cmd", + [SXE2_MQ_OP_GATHER_STATS] = "Gather Statistics Cmd", + [SXE2_MQ_OP_CREATE_SRQ] = "Create SRQ Cmd", + [SXE2_MQ_OP_MODIFY_SRQ] = "Modify SRQ Cmd", + [SXE2_MQ_OP_DESTROY_SRQ] = "Destroy SRQ Cmd", + [SXE2_MQ_OP_DEREGISTER_MR] = "Deregister MR Cmd", + [SXE2_MQ_OP_MODIFY_CEQ] = "Modify CEQ Cmd", + [SXE2_MQ_OP_QUERY_CEQ] = "Query CEQ Cmd", + [SXE2_MQ_OP_MODIFY_AEQ] = "Modify AEQ Cmd", + [SXE2_MQ_OP_QUERY_AEQ] = "Query AEQ Cmd", + [SXE2_MQ_OP_QUERY_CQ] = "Query CQ Cmd", + [SXE2_MQ_OP_QUERY_SRQ] = "Query SRQ Cmd", + [SXE2_MQ_OP_QUERY_MR] = "Query MR Cmd", +}; + +static const struct sxe2_mq_ctx_err_code sxe2_mq_ctx_err_list[] = { + { 0x1, "Duplicate Create" }, + { 0x2, "DMA" }, + { 0x3, "Func Not Allocated" }, + { 0x4, "SQ Size Illegal" }, +}; + +static const struct sxe2_mq_err_info sxe2_mq_cqe_err_list[] = { + { 0xF000, 0x0000, "Cache Address Translation Error" }, + { 0xF000, 0x0001, "Cache Read RCMS space, but PCIe return Error" }, + { 0xF001, 0x0004, "Context SRAM multi-bit Error" }, + { 0xF002, 0x0001, "WQE QPN/CQN/SRQN/EQN Out of Range" }, + { 0xF002, 0x0002, "QPC:SRQN Out of Range" }, + { 0xF002, 0x0004, "QPC.Send_CQN Out of Range" }, + { 0xF002, 0x0008, "QPC.Receive_CQN Out of Range" }, + { 0xF002, 0x0010, "CQC.CEQN Out of Range" }, + { 0xF003, 0x0001, "QPC/CQC/SRQC/EQC.SW State used rsv value" }, + { 0xF003, 0x0002, "QP Type used rsv value" }, + { 0xF003, 0x0004, "QPC.pmtu used rsv value" }, + { 0xF003, 0x0008, "QPC/CQC/SRQC/EQC.log_size used rsv value" }, + { 0xF003, 0x0010, "QPC.rq_type used rsv value" }, + { 0xF003, 0x0020, "QPC.log_page_size used rsv value" }, + { 0xFFFF, 0x8003, "Invalid Next QP State" }, + { 0xFFFF, 0x800C, "FPT Index Out of Range" }, + { 0xFFFF, 0x800D, "SPT Index Out of Range" }, + { 0xFFFF, 0x800E, "SPT Page Boundary Exceeded" }, + { 0xFFFF, 0x800F, "FPT Boundary Exceeded" }, + { 0xFFFF, 0x8010, "Invalid Function Type" }, + { 0xFFFF, 0x8013, "AH ID Out of Range" }, + { 0xFFFF, 0x8014, "Invalid MRTE Index" }, + { 0xFFFF, 0x8019, "Invalid Access Rights" }, + { 0xFFFF, 0x8020, "Invalid Flag on Reg MR" }, + { 0xFFFF, 0x8021, "Invalid PBL/Host Page" }, + { 0xFFFF, 0x8023, "Invalid RoCE QPID" }, + { 0xFFFF, 0x8025, "Commit RCMS Value Check Error" }, + { 0xFFFF, 0x8026, "VF Num Overflow" }, + { 0xFFFF, 0x8027, "Illegal RCMS Profile Type" }, + { 0xFFFF, 0x8028, "PCIe Unsupported Request" }, + { 0xFFFF, 0x8029, "Update FPT WQE Check Error" }, + { 0xFFFF, 0x802A, "VF Bind Conflict" }, + { 0xFFFF, 0x8030, "MQC Not Created" }, + { 0xFFFF, 0x8031, "SQ Base Access Error" }, + { 0xFFFF, 0x8032, "MQ Context Buf multi-bit ECC error" }, + { 0xFFFF, 0x8033, "MQ WQE Buf multi-bit ECC error" }, + { 0xFFFF, 0x8034, "QP Destroy Abort error" }, + { 0xFFFF, 0x80EE, "Unsupported Opcode" }, +}; + +static const struct sxe2_mq_err_info sxe2_mq_cqe_not_err_list[] = { + { 0x0000, 0x0001, "STag Invalid" }, +}; + +__le64 *sxe2_kget_next_mq_wqe_idx(struct sxe2_mq_ctx *mq, u64 scratch, + u32 *wqe_idx) +{ + __le64 *wqe = NULL; + int ret_code; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + + if (SXE2_RING_FULL_ERR(mq->mq_ring)) { + DRV_RDMA_LOG_DEV_ERR( + "WQE: MQ is full, head 0x%x tail 0x%x size 0x%x\n", + mq->mq_ring.head, mq->mq_ring.tail, mq->mq_ring.size); + goto end; + } + + SXE2_ATOMIC_RING_MOVE_HEAD(mq->mq_ring, *wqe_idx, ret_code); + if (ret_code) + goto end; + + mq->requested_ops++; + if (!*wqe_idx) + mq->polarity = !mq->polarity; + + wqe = mq->mq_buf_va[*wqe_idx].elem; + mq->scratch_array[*wqe_idx] = scratch; + + memset(&wqe[MQ_WQE_MEMSET_NUM_0], 0, MQ_WQE_MEMSET_BYTES_24); + memset(&wqe[MQ_WQE_MEMSET_NUM_4], 0, MQ_WQE_MEMSET_BYTES_32); + wqe[MQ_WQE_MEMSET_NUM_3] &= SXE2_MQ_WQE_VALID_BITS; + +end: + return wqe; +} + +void sxe2_kpost_mq(struct sxe2_mq_ctx *mq) +{ + struct mq_wqe_nop *wqe = NULL; + u32 head = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + + SXE2_BAR_WRITE_32(((u32)(SXE2_MQ_DB_VLD_VALUE << SXE2_MQ_DB_VLD_SHIFT) | + SXE2_RING_CURRENT_HEAD(mq->mq_ring)), + mq->dev->hw_regs[MQ_DB]); + + if (mq->mq_ring.head > 0) + head = mq->mq_ring.head - 1; + else + head = mq->mq_ring.size - 1; + wqe = (struct mq_wqe_nop *)(mq->mq_buf_va[head].elem); + mq->dev->mq_post_stats[wqe->op]++; + + DRV_RDMA_LOG_DEV_DEBUG( + "WQE: MQ head %#x, tail %#x, opcode %#x, MQ size %#x\n", + mq->mq_ring.head, mq->mq_ring.tail, wqe->op, mq->mq_ring.size); +} + +struct sxe2_mq_request *sxe2_kalloc_and_get_mq_request(struct sxe2_mq *mq, + bool wait) +{ + struct sxe2_mq_request *mq_request = NULL; + unsigned long flags = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->mq.dev); + + spin_lock_irqsave(&mq->req_lock, flags); + if (!list_empty(&mq->mq_avail_reqs)) { + mq_request = list_entry(mq->mq_avail_reqs.next, + struct sxe2_mq_request, list); + list_del_init(&mq_request->list); + } + spin_unlock_irqrestore(&mq->req_lock, flags); + + if (!mq_request) { + mq_request = kzalloc(sizeof(*mq_request), GFP_ATOMIC); + if (mq_request) { + mq_request->dynamic = true; + if (wait) + init_waitqueue_head(&mq_request->waitq); + } + } + if (!mq_request) { + DRV_RDMA_LOG_DEV_ERR("ERR: MQ Request Fail: No Memory"); + goto end; + } + + mq_request->waiting = wait; + refcount_set(&mq_request->refcnt, 1); + memset(&mq_request->cmpl_info, 0, sizeof(mq_request->cmpl_info)); + +end: + return mq_request; +} + +static bool mq_kquery_ring_full(struct sxe2_mq_ctx *mq) +{ + return SXE2_RING_FULL_ERR(mq->mq_ring); +} + +int sxe2_knop(struct sxe2_mq_ctx *mq, u64 scratch, bool post_mq, u8 wait_type) +{ + __le64 *wqe = NULL; + struct mq_wqe_nop *nop_wqe = NULL; + u32 tail, val, error = 0; + struct sxe2_rdma_device *rdma_dev; + int ret_code = 0; + + rdma_dev = to_rdmadev(mq->dev); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get next wqe failed, ret (%d)\n", + ret_code); + goto end; + } + + nop_wqe = (struct mq_wqe_nop *)wqe; + nop_wqe->op = SXE2_MQ_OP_NOP; + nop_wqe->wqe_valid = mq->polarity; + + sxe2_kget_mq_reg_info(mq, &val, &tail, &error); + + if (post_mq) { + sxe2_kpost_mq(mq); + if (wait_type == SXE2_MQ_WAIT_POLL_REGS) { + ret_code = sxe2_kpoll_mq_registers( + mq, tail, mq->dev->hw_attrs.max_done_count); + } else if (wait_type == SXE2_MQ_WAIT_POLL_CQ) { + ret_code = sxe2_kpoll_mcq(mq, SXE2_MQ_OP_NOP, NULL); + } + } + +end: + return ret_code; +} + +int mq_kexec_cmd(struct sxe2_rdma_ctx_dev *dev, struct mq_cmds_info *pcmdinfo) +{ + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_dma_mem val_mem; + + switch (pcmdinfo->mq_cmd) { + case MQ_OP_CREATE_QP: + ret_code = sxe2_ctx_qp_create(pcmdinfo->in.u.qp_create.qp, + &pcmdinfo->in.u.qp_create.info, + pcmdinfo->in.u.qp_create.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_MODIFY_QP: + ret_code = sxe2_ctx_qp_modify(pcmdinfo->in.u.qp_modify.qp, + &pcmdinfo->in.u.qp_modify.info, + pcmdinfo->in.u.qp_modify.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_DESTROY_QP: + ret_code = + sxe2_ctx_qp_destroy(pcmdinfo->in.u.qp_destroy.qp, + &pcmdinfo->in.u.qp_destroy.info, + pcmdinfo->in.u.qp_destroy.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_CREATE_CQ: + ret_code = sxe2_drv_cq_create( + pcmdinfo->in.u.cq_create.cq, + pcmdinfo->in.u.cq_create.scratch, + pcmdinfo->in.u.cq_create.check_overflow, + pcmdinfo->post_mq); + break; + case MQ_OP_MODIFY_CQ: + ret_code = sxe2_drv_cq_modify(pcmdinfo->in.u.cq_modify.cq, + pcmdinfo->in.u.cq_modify.cqc, + pcmdinfo->in.u.cq_modify.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_DESTROY_CQ: + ret_code = + sxe2_drv_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, + pcmdinfo->in.u.cq_destroy.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_ALLOC_MR_KEY: + case MQ_OP_REG_MR: + case MQ_OP_QUERY_MR: + case MQ_OP_DEALLOC_MR_KEY: + case MQ_OP_DEREGISTER_MR: + ret_code = sxe2_hw_set_mq_wqe(dev, pcmdinfo); + break; + case MQ_OP_CREATE_ADDR_HANDLE: + case MQ_OP_MODIFY_ADDR_HANDLE: + case MQ_OP_DESTROY_ADDR_HANDLE: + ret_code = sxe2_ah_set_mq_wqe(dev, pcmdinfo); + break; + case MQ_OP_QUERY_MR_KEY: + break; + case MQ_OP_MANAGE_PBLE_BP: + ret_code = sxe2_pbl_manage_pble_cp_cmd( + pcmdinfo->in.u.manage_pble_bp.mq, + &pcmdinfo->in.u.manage_pble_bp.info, + pcmdinfo->in.u.manage_pble_bp.scratch, true); + + break; + case MQ_OP_QUERY_QP: + ret_code = sxe2_ctx_qp_query(pcmdinfo->in.u.qp_query.qp, + &pcmdinfo->in.u.qp_query.info, + pcmdinfo->in.u.qp_query.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE: + ret_code = sxe2_vchnl_manage_rcms_pm_func_table( + pcmdinfo->in.u.manage_rcms_pm.dev->mq, + &pcmdinfo->in.u.manage_rcms_pm.info, + pcmdinfo->in.u.manage_rcms_pm.scratch, true); + break; + case MQ_OP_CREATE_CEQ: + ret_code = sxe2_drv_ceq_create(pcmdinfo->in.u.ceq_ops.ceq, + pcmdinfo->in.u.ceq_ops.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_DESTROY_CEQ: + ret_code = sxe2_drv_ceq_destroy(pcmdinfo->in.u.ceq_ops.ceq, + pcmdinfo->in.u.ceq_ops.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_CREATE_AEQ: + ret_code = sxe2_drv_aeq_create(pcmdinfo->in.u.aeq_ops.aeq, + pcmdinfo->in.u.aeq_ops.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_DESTROY_AEQ: + ret_code = sxe2_drv_aeq_destroy(pcmdinfo->in.u.aeq_ops.aeq, + pcmdinfo->in.u.aeq_ops.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_UPDATE_FPT: + ret_code = sxe2_rcms_update_pe_fptes( + pcmdinfo->in.u.update_pe_fptes.dev, + &pcmdinfo->in.u.update_pe_fptes.info, + pcmdinfo->in.u.update_pe_fptes.scratch); + break; + case MQ_OP_QUERY_FPM_VAL: + val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa; + val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va; + ret_code = sxe2_rcms_query_fpm_val_cmd( + pcmdinfo->in.u.query_fpm_val.mq, + pcmdinfo->in.u.query_fpm_val.scratch, + pcmdinfo->in.u.query_fpm_val.rcms_fn_id, &val_mem, true, + SXE2_MQ_WAIT_EVENT); + + break; + case MQ_OP_COMMIT_FPM_VAL: + val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa; + val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va; + ret_code = sxe2_rcms_commit_fpm_val_cmd( + pcmdinfo->in.u.commit_fpm_val.mq, + pcmdinfo->in.u.commit_fpm_val.scratch, + pcmdinfo->in.u.commit_fpm_val.rcms_fn_id, &val_mem, + true, SXE2_MQ_WAIT_EVENT); + break; + case MQ_OP_NOP: + ret_code = + sxe2_knop(pcmdinfo->in.u.nop.mq, + pcmdinfo->in.u.nop.scratch, pcmdinfo->post_mq, + (u8)pcmdinfo->in.u.nop.wait_type); + break; + case MQ_OP_GATHER_STATS: + ret_code = + sxe2_kgather_stats(pcmdinfo->in.u.stats_gather.mq, + &pcmdinfo->in.u.stats_gather.info, + pcmdinfo->in.u.stats_gather.scratch); + break; + case MQ_OP_CREATE_SRQ: + ret_code = + sxe2_kcreate_srq_ctx(pcmdinfo->in.u.srq_create.srq, + pcmdinfo->in.u.srq_create.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_MODIFY_SRQ: + ret_code = + sxe2_kmodify_srq_ctx(pcmdinfo->in.u.srq_modify.srq, + pcmdinfo->in.u.srq_modify.srqc, + pcmdinfo->in.u.srq_modify.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_DESTROY_SRQ: + ret_code = sxe2_kdestroy_srq_ctx( + pcmdinfo->in.u.srq_destroy.srq, + pcmdinfo->in.u.srq_destroy.scratch, pcmdinfo->post_mq); + break; + case MQ_OP_MODIFY_CEQ: + ret_code = + sxe2_drv_ceq_modify(pcmdinfo->in.u.ceq_modify.ceq, + pcmdinfo->in.u.ceq_modify.ceqc, + pcmdinfo->in.u.ceq_modify.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_QUERY_CEQ: + ret_code = sxe2_drv_ceq_query(pcmdinfo->in.u.ceq_query.ceq, + pcmdinfo->in.u.ceq_query.query_pa, + pcmdinfo->in.u.ceq_query.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_MODIFY_AEQ: + ret_code = + sxe2_drv_aeq_modify(pcmdinfo->in.u.aeq_modify.aeq, + pcmdinfo->in.u.aeq_modify.aeqc, + pcmdinfo->in.u.aeq_modify.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_QUERY_AEQ: + ret_code = sxe2_drv_aeq_query(pcmdinfo->in.u.aeq_query.aeq, + pcmdinfo->in.u.aeq_query.query_pa, + pcmdinfo->in.u.aeq_query.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_QUERY_CQ: + ret_code = sxe2_drv_cq_query(pcmdinfo->in.u.cq_query.cq, + pcmdinfo->in.u.cq_query.query_pa, + pcmdinfo->in.u.cq_query.scratch, + pcmdinfo->post_mq); + break; + case MQ_OP_QUERY_SRQ: + ret_code = + sxe2_kquery_srq_ctx(pcmdinfo->in.u.srq_query.srq, + pcmdinfo->in.u.srq_query.query_pa, + pcmdinfo->in.u.srq_query.scratch, + pcmdinfo->post_mq); + break; + default: + ret_code = -EOPNOTSUPP; + break; + } + + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR( + "%s process cmd [opcode : %d] [ret : %d]\n", __func__, + pcmdinfo->mq_cmd, ret_code); + } + + return ret_code; +} + +bool mq_kcheck_cqe_err(struct sxe2_rdma_ctx_dev *dev, u8 mq_cmd, bool error, + u16 maj_err_code, u16 min_err_code) +{ + size_t i = 0; + u8 min_err_l, min_err_h; + bool ret_code = true; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + for (i = 0; i < ARRAY_SIZE(sxe2_mq_cqe_err_list); ++i) { + if (maj_err_code == sxe2_mq_cqe_err_list[i].maj && + min_err_code == sxe2_mq_cqe_err_list[i].min && + error == true) { + if (sxe2_mq_cmd_names[mq_cmd]) { + DRV_RDMA_LOG_DEV_ERR( + "MQ: [%s Error][%s] maj=%#04X min=%#04X\n", + sxe2_mq_cqe_err_list[i].desc, + sxe2_mq_cmd_names[mq_cmd], maj_err_code, + min_err_code); + } else { + DRV_RDMA_LOG_DEV_ERR( + "MQ: [%s Error][Opcode Err] maj=%#04X min=%#04X\n", + sxe2_mq_cqe_err_list[i].desc, + maj_err_code, min_err_code); + } + ret_code = false; + goto end; + } + } + + for (i = 0; i < ARRAY_SIZE(sxe2_mq_cqe_not_err_list); ++i) { + if (maj_err_code == sxe2_mq_cqe_not_err_list[i].maj && + min_err_code == sxe2_mq_cqe_not_err_list[i].min && + error == false) { + if (sxe2_mq_cmd_names[mq_cmd]) { + DRV_RDMA_LOG_DEV_INFO( + "MQ: [%s NotError][%s] maj=%#04X min=%#04X\n", + sxe2_mq_cqe_not_err_list[i].desc, + sxe2_mq_cmd_names[mq_cmd], maj_err_code, + min_err_code); + } else { + DRV_RDMA_LOG_DEV_INFO( + "MQ: [%s NotError][Opcode Err] maj=%#04X min=%#04X\n", + sxe2_mq_cqe_not_err_list[i].desc, + maj_err_code, min_err_code); + } + ret_code = false; + goto end; + } + } + + if (maj_err_code == 0x0000) { + min_err_l = (u8)(min_err_code & 0xFF); + min_err_h = (u8)((min_err_code >> 8) & 0xFF); + if ((min_err_l == 0x00) && (min_err_h != 0x00) && + (error == false)) { + if (sxe2_mq_cmd_names[mq_cmd]) { + DRV_RDMA_LOG_DEV_INFO("MQ: [STag Valid][%s] maj=%#04X\n" + "\tmin=%#04X, STag key is %#02X\n", + sxe2_mq_cmd_names[mq_cmd], maj_err_code, + min_err_code, min_err_h); + } else { + DRV_RDMA_LOG_DEV_INFO("MQ: [STag Valid][Opcode Err]\n" + "\tmaj=%#04X min=%#04X, STag key is %#02X\n", + maj_err_code, min_err_code, min_err_h); + } + ret_code = false; + } + } + +end: + return ret_code; +} + +int mq_kget_mcqe_info(struct sxe2_rdma_ctx_cq *mcq, + struct sxe2_mcq_cqe_info *info) +{ + u64 temp; + struct mcq_cqe *cqe = NULL; + struct sxe2_mq_ctx *mq; + u32 wqe_idx; + u8 polarity = 0; + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mcq->dev); + + cqe = (struct mcq_cqe *)SXE2_GET_CURRENT_CQ_ELEM(&mcq->cq_uk); + + polarity = (u8)(cqe->CQE_valid); + if (polarity != mcq->cq_uk.polarity) { + ret_code = -ENOENT; + goto end; + } + + dma_rmb(); + + mq = (struct sxe2_mq_ctx *)(unsigned long)(cqe->MQ_completion_context); + + info->error = (bool)cqe->error; + info->maj_err_code = (u16)cqe->major_error_code; + info->min_err_code = (u16)cqe->minor_error_code; + + wqe_idx = (u32)cqe->MQ_desc_index; + info->scratch = mq->scratch_array[wqe_idx]; + + info->op_ret_val = (u32)cqe->operation_return_value; + + get_64bit_val(mq->mq_buf_va[wqe_idx].elem, SXE2_MQ_WQE_OPCODE_OFFSET, + &temp); + info->op_code = (u8)FIELD_GET(SXE2_MQ_WQE_OPCODE_BITS, temp); + info->mq = mq; + mq->dev->mq_cmd_stats[info->op_code]++; + + SXE2_RING_MOVE_HEAD(mcq->cq_uk.cq_ring, ret_code); + if (!SXE2_RING_CURRENT_HEAD(mcq->cq_uk.cq_ring)) + mcq->cq_uk.polarity ^= 1; + + SXE2_RING_MOVE_TAIL(mcq->cq_uk.cq_ring); + set_32bit_val(mcq->cq_uk.doorbell_note, MCQ_DB_NOTE_OFFSET_0, + SXE2_RING_CURRENT_HEAD(mcq->cq_uk.cq_ring)); + + dma_wmb(); + + SXE2_RING_MOVE_TAIL(mq->mq_ring); + atomic64_inc(&mq->completed_ops); + + if (mq_kcheck_cqe_err(mcq->dev, info->op_code, info->error, + info->maj_err_code, info->min_err_code) && + (info->error == true)) { + DRV_RDMA_LOG_DEV_ERR( + "Not Match Standard errcode:mq opcode = %#x, error = %d,\n" + "\tmaj_err_code = %#04X, min_err_code = %#04X\n", + info->op_code, info->error, info->maj_err_code, + info->min_err_code); + } + +end: + return ret_code; +} + +static void mq_kcomplete_mq_request(struct sxe2_mq *mq, + struct sxe2_mq_request *mq_request) +{ + WRITE_ONCE(mq_request->request_done, true); + + if (mq_request->waiting) + wake_up(&mq_request->waitq); + else if (mq_request->callback_fcn) + mq_request->callback_fcn(mq_request); + + sxe2_kput_mq_request(mq, mq_request); +} + +static void *mq_kremove_cmd_head(struct sxe2_rdma_ctx_dev *dev) +{ + struct list_head *entry = NULL; + struct list_head *list = &dev->mq_cmd_head; + + if (list_empty(list)) + goto end; + + entry = list->next; + list_del(entry); + +end: + return entry; +} + +int mq_kprocess_remaining_cmd(struct sxe2_rdma_ctx_dev *dev) +{ + int ret_code = 0; + struct mq_cmds_info *pcmdinfo; + unsigned long flags = 0; + + spin_lock_irqsave(&dev->mq_lock, flags); + while (!list_empty(&dev->mq_cmd_head) && + !mq_kquery_ring_full(dev->mq)) { + pcmdinfo = (struct mq_cmds_info *)mq_kremove_cmd_head(dev); + ret_code = mq_kexec_cmd(dev, pcmdinfo); + if (ret_code) + break; + } + spin_unlock_irqrestore(&dev->mq_lock, flags); + return ret_code; +} + +void mq_karm_mcq(struct sxe2_rdma_ctx_cq *mcq) +{ + u32 temp_val = 0; + u32 arm_ci = 0; + u8 ori_cmd_sn = 0; + u8 cmd_sn = 0; + u64 doorbell = 0; + + get_32bit_val(mcq->cq_uk.doorbell_note, MCQ_DB_NOTE_OFFSET_4, + &temp_val); + arm_ci = + (u32)(SXE2_RING_CURRENT_HEAD(mcq->cq_uk.cq_ring) & INVALID_U32); + ori_cmd_sn = (u8)FIELD_GET(SXE2_CQ_DBNOTE_CMDSN, temp_val); + cmd_sn = (++ori_cmd_sn) % MCQ_DB_NOTE_CMDSN_MASK; + temp_val = FIELD_PREP(SXE2_CQ_DBNOTE_ARMCI, arm_ci) | + FIELD_PREP(SXE2_CQ_DBNOTE_CMDSN, cmd_sn) | + FIELD_PREP(SXE2_CQ_DBNOTE_CMD, MCQ_DB_NOTE_CMD_VALUE); + set_32bit_val(mcq->cq_uk.doorbell_note, MCQ_DB_NOTE_OFFSET_4, temp_val); + + dma_wmb(); + + doorbell = + (((u64)temp_val) << MQ_BITS_PER_INT) | ((u64)mcq->cq_uk.cq_id); + set_64bit_val(mcq->cq_uk.cqe_alloc_db, 0, doorbell); +} + +void sxe2_khandler_mcqe(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_ctx_cq *mcq, bool flag) +{ + struct sxe2_mq_request *mq_request; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + u32 cqe_count = 0; + struct sxe2_mcq_cqe_info info; + unsigned long flags = 0; + int ret_code; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + +#ifdef SXE2_CFG_DEBUG + if (rdma_dev->rdma_func->mq.mcqe_ignore) + return; +#endif + + do { + memset(&info, 0, sizeof(info)); + spin_lock_irqsave(&rdma_func->mq.cmpl_lock, flags); + ret_code = mq_kget_mcqe_info(mcq, &info); + spin_unlock_irqrestore(&rdma_func->mq.cmpl_lock, flags); + if (ret_code) + break; + mq_request = (struct sxe2_mq_request *)(uintptr_t)info.scratch; + if (mq_request) { + mq_request->cmpl_info.maj_err_code = info.maj_err_code; + mq_request->cmpl_info.min_err_code = info.min_err_code; + mq_request->cmpl_info.op_ret_val = info.op_ret_val; + mq_request->cmpl_info.error = info.error; + mq_kcomplete_mq_request(&rdma_func->mq, mq_request); + } + if (mq_request && mq_request->waiting == false && info.error && + (!rdma_func->reset)) { + if ((info.maj_err_code == MQ_CRIERR_MAJ_ERRCODE) && + ((info.min_err_code == MQ_CRIERR_MQC_NOT_CREATED) || + (info.min_err_code == MQ_CRIERR_MQ_BASE_ERR) || + (info.min_err_code == MQ_CRIERR_MQC_ECC_ERR) || + ((info.min_err_code == + MQ_CRIERR_QP_DESTROY_ABORT)))) { + DRV_RDMA_LOG_DEV_ERR( + "Critical Err:Request Reset,\n" + "\tmaj_err %#04X, min_err %#04X, async mode\n", + info.maj_err_code, info.min_err_code); + rdma_func->reset = true; + rdma_func->gen_ops.request_reset(rdma_func); + } + } + cqe_count++; + } while (1); + + if (cqe_count) + mq_kprocess_remaining_cmd(dev); + + if (flag) + mq_karm_mcq(dev->mcq); +} + +void mq_kcheck_progress(struct sxe2_mq_timeout *timeout, + struct sxe2_rdma_ctx_dev *dev) +{ + u64 completed_ops = atomic64_read(&dev->mq->completed_ops); + + if (timeout->cmpl_mq_cmds != completed_ops) { + timeout->cmpl_mq_cmds = completed_ops; + timeout->count = 0; + } else if (timeout->cmpl_mq_cmds != dev->mq->requested_ops) { + timeout->count++; + } +} + +int sxe2_kwait_event(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_mq_request *mq_request) +{ + struct sxe2_mq_timeout mq_timeout = {}; + bool mq_error = false; + u16 maj_err = 0; + u16 min_err = 0; + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + int wait_time_ms = (int)rdma_func->ctx_dev.hw_attrs + .max_mq_compl_wait_time_ms; + struct mq_cmds_info *info = &mq_request->info; + bool hw_rsrc_clean = false; + + mq_timeout.cmpl_mq_cmds = + (u64)atomic64_read(&rdma_func->ctx_dev.mq->completed_ops); + do { + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(&rdma_func->ctx_dev); + if (rdma_func->reset && hw_rsrc_clean) { + ret_code = (info->destroy ? 0 : -EBUSY); + goto end; + } + if (rdma_func->reset && !hw_rsrc_clean && !info->destroy) { + ret_code = -EBUSY; + goto end; + } + + sxe2_khandler_mcqe(rdma_func, &rdma_func->mcq.ctx_cq, false); + if (wait_event_timeout(mq_request->waitq, + READ_ONCE(mq_request->request_done), + msecs_to_jiffies(wait_time_ms))) { + break; + } + + mq_kcheck_progress(&mq_timeout, &rdma_func->ctx_dev); + + if (mq_timeout.count < MQ_TIMEOUT_THRESHOLD) + continue; + if (!rdma_func->reset) { + DRV_RDMA_LOG_DEV_ERR("Timeout Handle:Request Reset\n"); + rdma_func->reset = true; + rdma_func->gen_ops.request_reset(rdma_func); + } + ret_code = -ETIMEDOUT; + goto end; + } while (1); + + if (mq_request->cmpl_info.error) + mq_error = mq_request->cmpl_info.error; + if (mq_error) { + maj_err = mq_request->cmpl_info.maj_err_code; + min_err = mq_request->cmpl_info.min_err_code; + ret_code = -EIO; + if ((maj_err == MQ_CRIERR_MAJ_ERRCODE) && + ((min_err == MQ_CRIERR_MQC_NOT_CREATED) || + (min_err == MQ_CRIERR_MQ_BASE_ERR) || + (min_err == MQ_CRIERR_MQC_ECC_ERR) || + ((min_err == MQ_CRIERR_QP_DESTROY_ABORT)))) { + if (!rdma_func->reset) { + DRV_RDMA_LOG_DEV_ERR( + "Critical Err:Request Reset\n" + "\t, maj_err %#04X, min_err %#04X\n", + maj_err, min_err); + rdma_func->reset = true; + rdma_func->gen_ops.request_reset(rdma_func); + } + } + } + +end: + return ret_code; +} + +void mq_kfree_mq_request(struct sxe2_mq *mq, struct sxe2_mq_request *mq_request) +{ + unsigned long flags = 0; + + if (mq_request->dynamic) { + kfree(mq_request); + } else { + WRITE_ONCE(mq_request->request_done, false); + mq_request->callback_fcn = NULL; + mq_request->waiting = false; + + spin_lock_irqsave(&mq->req_lock, flags); + list_add_tail(&mq_request->list, &mq->mq_avail_reqs); + spin_unlock_irqrestore(&mq->req_lock, flags); + } + wake_up(&mq->remove_wq); +} + +void sxe2_kput_mq_request(struct sxe2_mq *mq, + struct sxe2_mq_request *mq_request) +{ + if (refcount_dec_and_test(&mq_request->refcnt)) + mq_kfree_mq_request(mq, mq_request); +} + +int sxe2_khandle_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_mq_request *mq_request) +{ + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct mq_cmds_info *info = &mq_request->info; + int ret_code = 0; + bool put_mq_request = true; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + bool hw_rsrc_clean = false; + unsigned long flags = 0; + + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(dev); + if (rdma_func->reset && hw_rsrc_clean) { + ret_code = (info->destroy ? 0 : -EBUSY); + goto end; + } + if (rdma_func->reset && !hw_rsrc_clean && !info->destroy) { + ret_code = -EBUSY; + goto end; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[info->mq_cmd] && + !info->destroy) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + info->mq_cmd); + ret_code = -EBUSY; + goto end; + } +#endif + + sxe2_kget_mq_request(mq_request); + + spin_lock_irqsave(&dev->mq_lock, flags); + if (list_empty(&dev->mq_cmd_head) && !mq_kquery_ring_full(dev->mq)) + ret_code = mq_kexec_cmd(dev, info); + else { + DRV_RDMA_LOG_DEV_INFO( + "MQ list is full or cmd_list empty:buf %d, list %d\n", + mq_kquery_ring_full(dev->mq), + list_empty(&dev->mq_cmd_head)); + list_add_tail(&info->mq_cmd_entry, &dev->mq_cmd_head); + } + spin_unlock_irqrestore(&dev->mq_lock, flags); + if (ret_code) + goto err; + + if (mq_request->waiting) { + put_mq_request = false; + ret_code = sxe2_kwait_event(rdma_func, mq_request); + if (ret_code) + goto err; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[info->mq_cmd] && info->destroy && + !ret_code) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + info->mq_cmd); + ret_code = -EBUSY; + goto end; + } +#endif + + goto end; + +err: + if (put_mq_request) + sxe2_kput_mq_request(&rdma_func->mq, mq_request); +end: + return ret_code; +} + +int sxe2_kpoll_mq_registers(struct sxe2_mq_ctx *mq, u32 tail, u32 count) +{ + u32 i = 0; + u32 newtail, error, val; + u32 err_code; + u16 maj_err_code, min_err_code; + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + struct mq_wqe_nop *wqe = NULL; + u32 cmd_tail = 0; + + while (i++ < count) { + sxe2_kget_mq_reg_info(mq, &val, &newtail, &error); + + if (newtail != tail) { + SXE2_RING_MOVE_TAIL(mq->mq_ring); + atomic64_inc(&mq->completed_ops); + if (mq->mq_ring.tail > 0) + cmd_tail = mq->mq_ring.tail - 1; + else + cmd_tail = mq->mq_ring.size - 1; + + wqe = (struct mq_wqe_nop *)(mq->mq_buf_va[cmd_tail] + .elem); + mq->dev->mq_cmd_stats[wqe->op]++; + + err_code = + SXE2_BAR_READ_32(mq->dev->hw_regs[MQ_ERRCODES]); + min_err_code = (u16)(err_code & 0xFFFF); + maj_err_code = (u16)((err_code >> 16) & 0xFFFF); + if (mq_kcheck_cqe_err(mq->dev, (u8)wqe->op, (bool)error, + maj_err_code, min_err_code) && + (error == true)) { + DRV_RDMA_LOG_DEV_ERR( + "Not Match Standard errcode:mq opcode = %#x, error = %d,\n" + "\tmaj_err_code = %#04X, min_err_code = %#04X\n", + (u8)wqe->op, error, maj_err_code, + min_err_code); + } + if (error) + ret_code = -EIO; + goto end; + } + udelay(mq->dev->hw_attrs.max_sleep_count); + } + + ret_code = -ETIMEDOUT; + DRV_RDMA_LOG_DEV_ERR("MQ: MQ Poll Reg timeout, ret_code %d\n", + ret_code); + +end: + return ret_code; +} + +int sxe2_kpoll_mcq(struct sxe2_mq_ctx *mq, u8 op_code, + struct sxe2_mcq_cqe_info *cmpl_info) +{ + struct sxe2_mcq_cqe_info info = {}; + struct sxe2_rdma_ctx_cq *mcq; + int ret_code = 0; + u32 cnt = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + unsigned long flags = 0; + + mcq = mq->dev->mcq; + while (1) { + if (cnt++ > mq->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + goto end; + } + + spin_lock_irqsave(&rdma_dev->rdma_func->mq.cmpl_lock, flags); + ret_code = mq_kget_mcqe_info(mcq, &info); + spin_unlock_irqrestore(&rdma_dev->rdma_func->mq.cmpl_lock, flags); + if (ret_code) { + udelay(mq->dev->hw_attrs.max_sleep_count); + continue; + } + + if (info.error) + ret_code = -EIO; + + if (op_code == info.op_code) + goto cmpl_end; + + ret_code = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "WQE: opcode mismatch for my op code %#x, returned opcode %#x\n", + op_code, info.op_code); + } + +cmpl_end: + if (cmpl_info) + memcpy(cmpl_info, &info, sizeof(*cmpl_info)); +end: + return ret_code; +} + +void sxe2_kwork_mq_cmpl(struct work_struct *work) +{ + struct sxe2_rdma_pci_f *rdma_func = + container_of(work, struct sxe2_rdma_pci_f, mq_cmpl_work); + struct sxe2_rdma_ctx_cq *cq = &rdma_func->mcq.ctx_cq; + + sxe2_khandler_mcqe(rdma_func, cq, true); +} + +int sxe2_mq_kexec_nop_op(struct sxe2_rdma_device *rdma_dev, bool post, u32 wait) +{ + int ret_code; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + bool wait_type; + + if (wait == (u32)SXE2_MQ_WAIT_CQE) { + wait_type = true; + } else if (wait == (u32)SXE2_MQ_WAIT_EVENT) { + wait_type = false; + } else { + ret_code = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("inv wait_type param %d, ret %d\n", wait, + ret_code); + goto end; + } + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, wait_type); + if (!mq_request) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("Get mq_request failed, ret %d\n", + ret_code); + goto end; + } + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_NOP; + + if (post) + mq_info->post_mq = 1; + else + mq_info->post_mq = 0; + + mq_info->in.u.nop.wait_type = wait; + mq_info->in.u.nop.mq = &rdma_func->mq.mq; + mq_info->in.u.nop.scratch = (uintptr_t)mq_request; + ret_code = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret_code) + DRV_RDMA_LOG_DEV_ERR("MQ NOP cmd failed, ret %d\n", ret_code); + +end: + return ret_code; +} + +static int mq_kinit_context(struct sxe2_mq_ctx *mq, + struct sxe2_mq_init_info *info) +{ + u8 hw_mq_size; + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + + if (info->mqe_count > SXE2_MQE_COUNT_2048 || + info->mqe_count < SXE2_MQE_COUNT_4 || + ((info->mqe_count & (info->mqe_count - 1)))) { + ret_code = -EINVAL; + goto end; + } + + hw_mq_size = + sxe2_kget_encoded_wqe_size(info->mqe_count, SXE2_QUEUE_TYPE_MQ); + + mq->size = sizeof(*mq); + mq->mqe_count = info->mqe_count; + mq->hw_mq_size = hw_mq_size; + mq->mq_buf_va = info->mq_buf_va; + mq->mq_ctx_va = info->mq_ctx_va; + mq->mq_buf_pa = info->mq_buf_pa; + mq->mq_ctx_pa = info->mq_ctx_pa; + mq->dev = info->dev; + mq->protocol_used = info->protocol_used; + mq->struct_ver = info->struct_ver; + mq->hw_maj_ver = info->hw_maj_ver; + mq->hw_min_ver = info->hw_min_ver; + mq->scratch_array = info->scratch_array; + mq->polarity = 0; + mq->ceqs_per_vf = info->ceqs_per_vf; + mq->rocev2_rto_policy = info->rocev2_rto_policy; + mq->ena_vf_count = info->ena_vf_count; + mq->rcms_profile = info->rcms_profile; + mq->en_rem_endpoint_trk = info->en_rem_endpoint_trk; + + info->dev->mq = mq; + + SXE2_RING_INIT(mq->mq_ring, mq->mqe_count); + mq->requested_ops = 0; + atomic64_set(&mq->completed_ops, 0); + INIT_LIST_HEAD(&mq->dev->mq_cmd_head); + + DRV_RDMA_LOG_DEV_INFO( + "WQE: mq_size[%04d] hw_mq_size[%04d] mq_base[%p]\n" + "\tmq_pa[%pK] mq[%p] polarity[x%04x]\n", + mq->mqe_count, mq->hw_mq_size, mq->mq_buf_va, + (u64 *)(uintptr_t)mq->mq_buf_pa, mq, mq->polarity); + +end: + return ret_code; +} + +static void mq_mqc_init(struct sxe2_mq_ctx *mq, struct sxe2_mqc *mqc, + struct sxe2_rdma_device *rdma_dev) +{ + memset(mqc, 0, sizeof(*mqc)); + mqc->RoCEv2_RTO_policy = mq->rocev2_rto_policy; + mqc->protocol_used = mq->protocol_used; + mqc->SQ_size = mq->hw_mq_size; + mqc->struct_version = mq->struct_ver; + mqc->num_CEQs_per_VF = mq->ceqs_per_vf; + + mqc->SQ_base = (mq->mq_buf_pa >> SXE2_MQC_SQ_BASE_OFFSET); + + DRV_RDMA_LOG_DEBUG_BDF("Create MQ:mq_buf_pa %#llx, mqc.SQ_base %#llx\n", + mq->mq_buf_pa, mqc->SQ_base); + + mqc->rcms_profile_type = mq->rcms_profile; + mqc->PE_enabled_vf_count = mq->ena_vf_count; + mqc->remote_endpoint_trk_en = mq->en_rem_endpoint_trk; + +#ifndef SXE2_NATIVE_CPUID_NOT_SUPPORT + if (rdma_dev->rdma_func->hygon_cpu_en == (u8)SXE2_RDMA_HYGON_DEFAULT) { + if (true == sxe2_rdma_get_cpu_vendor(rdma_dev)) { + mqc->cache_line_64byte_en = MQC_64B_CACHE_LINE_EN; + rdma_dev->cache_line_64_en = true; + } else { + mqc->cache_line_64byte_en = MQC_64B_CACHE_LINE_NO_EN; + rdma_dev->cache_line_64_en = false; + } + } else if (rdma_dev->rdma_func->hygon_cpu_en == + (u8)SXE2_RDMA_HYGON_FORCE_ENABLE) { + mqc->cache_line_64byte_en = MQC_64B_CACHE_LINE_EN; + rdma_dev->cache_line_64_en = true; + } else if (rdma_dev->rdma_func->hygon_cpu_en == + (u8)SXE2_RDMA_HYGON_FORCE_DISABLE) { + mqc->cache_line_64byte_en = MQC_64B_CACHE_LINE_NO_EN; + rdma_dev->cache_line_64_en = false; + } +#else + mqc->cache_line_64byte_en = MQC_64B_CACHE_LINE_NO_EN; + rdma_dev->cache_line_64_en = false; +#endif + DRV_RDMA_LOG_DEBUG_BDF( + "MQC HYGON CPU:cfg_param:%#x, mqc.cache_line_64byte_en:%#llx,\n" + "\trdma_dev->cache_line_64_en %d\n", + rdma_dev->rdma_func->hygon_cpu_en, mqc->cache_line_64byte_en, + rdma_dev->cache_line_64_en); + + mqc->QP_completion_context = (uintptr_t)mq; + mqc->hw_minor_version = mq->hw_min_ver; + mqc->hw_major_version = mq->hw_maj_ver; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_size", + &rdma_dev->rdma_func->mq.err_val, mqc, mq); + + INJECT_START(rdma_dev->rdma_func, "mq_base", + &rdma_dev->rdma_func->mq.err_val, mqc, mq); + + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_size"); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_base"); +#endif + + memcpy(mq->mq_ctx_va, mqc, sizeof(*mqc)); + DRV_RDMA_LOG_INFO_BDF( + "mqc : ena_fine_grained_timers %#x, RoCEv2_RTO_policy %#x\n" + "mqc : protocol_used %#x, SQ_size %#x, struct_version %#x\n" + "mqc : num_CEQs_per_VF %#x, SQ_base %#llx, rcms_profile_type %#x\n" + "mqc : remote_endpoint_trk_en %#x, rdpa_assist %#x,\n" + "cache_line_64byte_en %#x, PE_enabled_vf_count %#x\n" + "mqc : QP_completion_context %#llx, hw_minor_ver %#x, hw_major_ver\n" + "%#x\n" + "mqc : dcqcn_min_rate %#x, dcqcn_min_dec_factor %#x, dcqcn_T %#x\n" + "mqc : cc_hai_factor %#x, cc_rai_factor %#x, dcqcn_B %#x, dcqcn_F %#x\n" + "mqc : cc_cfg_valid %#x, dcqcn_rreduce_mperiod %#x\n", + mqc->enable_fine_grained_timers, mqc->RoCEv2_RTO_policy, + mqc->protocol_used, mqc->SQ_size, mqc->struct_version, + mqc->num_CEQs_per_VF, mqc->SQ_base, mqc->rcms_profile_type, + mqc->remote_endpoint_trk_en, mqc->rdpa_assist, + mqc->cache_line_64byte_en, mqc->PE_enabled_vf_count, + mqc->QP_completion_context, mqc->hw_minor_version, + mqc->hw_major_version, mqc->dcqcn_min_rate, + mqc->dcqcn_min_dec_factor, mqc->dcqcn_T, mqc->cc_hai_factor, + mqc->cc_rai_factor, mqc->dcqcn_B, mqc->dcqcn_F, + mqc->cc_cfg_valid, mqc->dcqcn_rreduce_mperiod); +} + +int mq_kcreate_context(struct sxe2_mq_ctx *mq) +{ + u32 cnt = 0, p1, p2, val = 0; + u32 err_bit, err_code = 0; + u32 mqc_vld = SXE2_MQC_ADDR_VLD_SET; + size_t i = 0; + struct sxe2_mqc mqc; + int ret_code = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + u32 mqc_cnt = 0; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_creat", + &rdma_dev->rdma_func->mq.err_val); + + if (rdma_dev->rdma_func->mq.err_val.mqc_ignore_vld) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:dbg_mqc_ignore_vld:%#x\n", + rdma_dev->rdma_func->mq.err_val.mqc_ignore_vld); + return 0; + } +#endif + + mq->fptebuf.size = ALIGN(SXE2_UPDATE_FPT_BUFF_SIZE * mq->mqe_count, + SXE2_FPT_BUF_ALIGNMENT); + mq->fptebuf.va = + dma_alloc_coherent(mq->dev->hw->device, mq->fptebuf.size, + &mq->fptebuf.pa, GFP_KERNEL); + if (!mq->fptebuf.va) { + ret_code = -ENOMEM; + goto end; + } + memset(mq->fptebuf.va, 0, mq->fptebuf.size); + spin_lock_init(&mq->dev->mq_lock); + mq_mqc_init(mq, &mqc, rdma_dev); + print_hex_dump_debug("WQE: MQ_HOST_CTX WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, mq->mq_ctx_va, + SXE2_MQ_CTX_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); +#ifdef SXE2_CFG_DEBUG + DATA_DUMP(mq->mq_ctx_va, sizeof(mqc), "create mqc"); +#endif + p1 = (u32)(mq->mq_ctx_pa >> MQ_BITS_PER_INT); + p2 = (u32)(mq->mq_ctx_pa); + + for (mqc_cnt = 0; mqc_cnt < SXE2_MQ_CTX_SIZE; mqc_cnt++) { + DRV_RDMA_LOG_DEV_DEBUG("Create MQ:mq->mq_ctx_va[%#x]:%#llx\n", + mqc_cnt, mq->mq_ctx_va[mqc_cnt]); + } + DRV_RDMA_LOG_DEV_DEBUG("Create MQ:va %p, pa %#llx, p1 %#x, p2 %#x\n", + mq->mq_ctx_va, mq->mq_ctx_pa, p1, p2); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_dma_err", + &rdma_dev->rdma_func->mq.err_val, &p1, &p2, mq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_dma_err"); + + INJECT_START(rdma_dev->rdma_func, "mq_duplicate_creat", &p1, &p2, mq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_duplicate_creat"); +#endif + + SXE2_BAR_WRITE_32(p1, mq->dev->hw_regs[MQC_ADDR_HIGH]); + SXE2_BAR_WRITE_32(p2, mq->dev->hw_regs[MQC_ADDR_LOW]); + SXE2_BAR_WRITE_32(mqc_vld, mq->dev->hw_regs[MQC_ADDR_VLD]); + + do { + if (cnt++ > mq->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + DRV_RDMA_LOG_DEV_ERR( + "Create MQ:wait reg timeout, ret %d\n", + ret_code); + goto err; + } + udelay(mq->dev->hw_attrs.max_sleep_count); + val = SXE2_BAR_READ_32(mq->dev->hw_regs[MQ_STATUS]); + } while (!val); + + err_bit = FIELD_GET(SXE2_RDMA_MQ_STATUS_ERR, val); + if (err_bit) { + err_code = FIELD_GET(SXE2_RDMA_MQ_STATUS_ERRCODE, val); + for (i = 0; i < ARRAY_SIZE(sxe2_mq_ctx_err_list); ++i) { + if (err_code == sxe2_mq_ctx_err_list[i].err) { + DRV_RDMA_LOG_DEV_ERR( + "Create MQ: [%s Error] err=%#x\n", + sxe2_mq_ctx_err_list[i].desc, err_code); + ret_code = -EOPNOTSUPP; + goto err; + } + } + } + mq->process_mq_fpt = sxe2_rcms_update_fptes_cmd; + + goto end; + +err: + dma_free_coherent(mq->dev->hw->device, mq->fptebuf.size, mq->fptebuf.va, + mq->fptebuf.pa); + mq->fptebuf.va = NULL; +end: + return ret_code; +} + +static int mq_kdestroy_context(struct sxe2_mq_ctx *mq, bool free_hwmq) +{ + u32 cnt = 0, val = 0; + u32 mqc_vld = SXE2_MQC_ADDR_VLD_SET; + int ret_code = 0; + + if (free_hwmq) { + SXE2_BAR_WRITE_32(0, mq->dev->hw_regs[MQC_ADDR_HIGH]); + SXE2_BAR_WRITE_32(0, mq->dev->hw_regs[MQC_ADDR_LOW]); + SXE2_BAR_WRITE_32(mqc_vld, mq->dev->hw_regs[MQC_ADDR_VLD]); + do { + if (cnt++ > mq->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + break; + } + udelay(mq->dev->hw_attrs.max_sleep_count); + val = SXE2_BAR_READ_32(mq->dev->hw_regs[MQ_STATUS]); + } while (!(FIELD_GET(SXE2_RDMA_MQ_STATUS_DONE, val))); + } + + dma_free_coherent(mq->dev->hw->device, mq->fptebuf.size, mq->fptebuf.va, + mq->fptebuf.pa); + mq->fptebuf.va = NULL; + return ret_code; +} + +static int mq_info_init(struct sxe2_mq_init_info *mq_init_info, + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_pci_f *rdma_func, u32 mqe_count, + struct sxe2_rdma_dma_mem mem) +{ + int ret_code = 0; + struct sxe2_mq *mq = &rdma_func->mq; + + mq_init_info->dev = dev; + mq_init_info->mqe_count = mqe_count; + mq_init_info->mq_buf_va = mq->mq_buf.va; + mq_init_info->mq_buf_pa = mq->mq_buf.pa; + mq_init_info->mq_ctx_pa = mem.pa; + mq_init_info->mq_ctx_va = mem.va; + + mq_init_info->rcms_profile = rdma_func->rsrc_profile; + mq_init_info->ena_vf_count = rdma_func->max_rdma_vfs; + mq_init_info->scratch_array = mq->scratch_array; + mq_init_info->protocol_used = rdma_func->protocol_used; + mq_init_info->en_rem_endpoint_trk = rdma_func->en_rem_endpoint_trk; + mq_init_info->hw_maj_ver = SXE2_HW_MAJVER_GEN_1; + + ret_code = mq_kinit_context(dev->mq, mq_init_info); + if (ret_code) + DRV_RDMA_LOG_ERROR("ERR: mq init status %d\n", ret_code); + + return ret_code; +} + +int sxe2_kcreate_mq(struct sxe2_rdma_device *rdma_dev) +{ + u32 mqe_count = SXE2_MQE_COUNT_2048; + struct sxe2_rdma_dma_mem mem; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_mq_init_info mq_init_info = {}; + struct sxe2_mq *mq = &rdma_func->mq; + u32 i = 0; + int ret_code = 0; + bool free_hwmq = true; + bool hw_rsrc_clean = false; + int err_ret_code = 0; + + mq->mq_requests = + kcalloc(mqe_count, sizeof(*mq->mq_requests), GFP_KERNEL); + if (!mq->mq_requests) { + ret_code = -ENOMEM; + goto end; + } + + mq->scratch_array = + kcalloc(mqe_count, sizeof(*mq->scratch_array), GFP_KERNEL); + if (!mq->scratch_array) { + ret_code = -ENOMEM; + goto err_scratch; + } + + dev->mq = &mq->mq; + dev->mq->dev = dev; + + mq->mq_buf.size = ALIGN(sizeof(struct sxe2_mq_wqe) * mqe_count, + SXE2_MQ_ALIGNMENT); + mq->mq_buf.va = dma_alloc_coherent(dev->hw->device, mq->mq_buf.size, + &mq->mq_buf.pa, GFP_KERNEL); + if (!mq->mq_buf.va) { + ret_code = -ENOMEM; + goto err_mq; + } + memset(mq->mq_buf.va, 0, mq->mq_buf.size); + + ret_code = sxe2_kget_aligned_mem(rdma_func, &mem, + sizeof(struct sxe2_mq_context), + SXE2_HOST_CTX_ALIGNMENT_M); + if (ret_code) + goto err_ctx; + + dev->mq->mq_ctx_pa = mem.pa; + dev->mq->mq_ctx_va = mem.va; + DRV_RDMA_LOG_DEBUG_BDF( + "CREATE MQ:mq_buf.va %p, mq_buf.pa %#llx mq_ctx.va %p, mq_ctx.pa %#llx\n", + (u64 *)mq->mq_buf.va, (u64)mq->mq_buf.pa, (u64 *)mem.va, + (u64)mem.pa); + + ret_code = mq_info_init(&mq_init_info, dev, rdma_func, mqe_count, mem); + if (ret_code) + goto err_ctx; + + spin_lock_init(&mq->req_lock); + spin_lock_init(&mq->cmpl_lock); + + ret_code = mq_kcreate_context(dev->mq); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("ERR: mq create failed - ret_code %d\n", + ret_code); + goto err_ctx; + } + + INIT_LIST_HEAD(&mq->mq_avail_reqs); + + for (i = 0; i < mqe_count; i++) { + init_waitqueue_head(&mq->mq_requests[i].waitq); + list_add_tail(&mq->mq_requests[i].list, &mq->mq_avail_reqs); + } + init_waitqueue_head(&mq->remove_wq); + + ret_code = sxe2_kadd_mq_rsc_debugfs(rdma_dev, mq); + if (unlikely(ret_code)) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding MQ to debug file system, ret %d\n", + ret_code); + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(dev); + if (rdma_func->reset && hw_rsrc_clean) + free_hwmq = false; + + err_ret_code = mq_kdestroy_context(dev->mq, free_hwmq); + if (unlikely(err_ret_code)) + DRV_RDMA_LOG_DEV_ERR( + "ERR: debugfs err, and destroy MQ failed %d\n", + err_ret_code); + goto err_ctx; + } + + goto end; + +err_ctx: + dma_free_coherent(dev->hw->device, mq->mq_buf.size, mq->mq_buf.va, + mq->mq_buf.pa); + mq->mq_buf.va = NULL; +err_mq: + kfree(mq->scratch_array); + mq->scratch_array = NULL; +err_scratch: + kfree(mq->mq_requests); + mq->mq_requests = NULL; +end: + return ret_code; +} + +static void mq_kfree_pending_mq_request(struct sxe2_mq *mq, + struct sxe2_mq_request *mq_request) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->mq.dev); + + mq_request->cmpl_info.error = true; + WRITE_ONCE(mq_request->request_done, true); + + if (mq_request->waiting) + wake_up(&mq_request->waitq); + + wait_event_timeout(mq->remove_wq, + refcount_read(&mq_request->refcnt) == 1, 1000); + sxe2_kput_mq_request(mq, mq_request); + + DRV_RDMA_LOG_DEV_DEBUG("DESTROY MQ:free mq_request opcode %#x\n", + mq_request->info.mq_cmd); +} + +static void mq_kcleanup_pending_op(struct sxe2_rdma_pci_f *rdma_func) +{ + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_mq *mq = &rdma_func->mq; + struct sxe2_mq_request *mq_request = NULL; + struct mq_cmds_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = SXE2_RING_USED_QUANTA(mq->mq.mq_ring); + wqe_idx = SXE2_RING_CURRENT_TAIL(mq->mq.mq_ring); + + for (i = 0; i < pending_work; i++) { + mq_request = (struct sxe2_mq_request *)(uintptr_t) + mq->scratch_array[wqe_idx]; + if (mq_request) + mq_kfree_pending_mq_request(mq, mq_request); + wqe_idx = (wqe_idx + 1) % SXE2_RING_SIZE(mq->mq.mq_ring); + } + + while (!list_empty(&dev->mq_cmd_head)) { + pcmdinfo = (struct mq_cmds_info *)mq_kremove_cmd_head(dev); + mq_request = + container_of(pcmdinfo, struct sxe2_mq_request, info); + if (mq_request) + mq_kfree_pending_mq_request(mq, mq_request); + } +} + +void sxe2_kdestroy_mq(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_mq *mq = &rdma_func->mq; + bool free_hwmq = true; + int ret_code = 0; + bool hw_rsrc_clean = false; + + hw_rsrc_clean = sxe2_get_hw_rsrc_clean_flag(dev); + if (rdma_func->reset && hw_rsrc_clean) + free_hwmq = false; + + sxe2_kremove_mq_rsc_debugfs(rdma_dev, mq); + + ret_code = mq_kdestroy_context(dev->mq, free_hwmq); + if (ret_code) + DRV_RDMA_LOG_DEV_ERR("ERR: Destroy MQ failed %d\n", ret_code); + mq_kcleanup_pending_op(rdma_func); + + dma_free_coherent(dev->hw->device, mq->mq_buf.size, mq->mq_buf.va, + mq->mq_buf.pa); + mq->mq_buf.va = NULL; + + kfree(mq->scratch_array); + mq->scratch_array = NULL; + kfree(mq->mq_requests); + mq->mq_requests = NULL; + + DRV_RDMA_LOG_DEV_INFO("Destroy MQ over\n"); +} + +void sxe2_kuninit_mq_handler(struct sxe2_rdma_device *rdma_dev) +{ + if (rdma_dev->rdma_func->mq_cmpl_wq) { + destroy_workqueue(rdma_dev->rdma_func->mq_cmpl_wq); + rdma_dev->rdma_func->mq_cmpl_wq = NULL; + } +} + +int sxe2_kinit_mq_handler(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + int ret_code = 0; + + rdma_func->mq_cmpl_wq = + alloc_ordered_workqueue("mq_cmpl_wq", WQ_HIGHPRI | WQ_UNBOUND); + if (!rdma_func->mq_cmpl_wq) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("Alloc MQ Cmpl Wq fail, ret_code %d\n", + ret_code); + goto end; + } + + INIT_WORK(&rdma_func->mq_cmpl_work, sxe2_kwork_mq_cmpl); + + mq_karm_mcq(rdma_func->ctx_dev.mcq); + +end: + return ret_code; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.h new file mode 100644 index 0000000000000000000000000000000000000000..5e84b1b6f55256abc36936be4a16a7cd49aa9329 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mq.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_MQ_H +#define SXE2_DRV_MQ_H + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_hw.h" +#include + +#define MQ_BITS_PER_INT (32) +#define SXE2_MQC_SQ_BASE_OFFSET (9) +#define SXE2_MQC_ADDR_VLD_SET (1) + +#define SXE2_WQEALLOC_WQE_DESC_INDEX \ + GENMASK(31, 20) +#define SXE2_MQTAIL_WQTAIL GENMASK(10, 0) +#define SXE2_MQTAIL_MQ_OP_ERR BIT(11) +#define SXE2_MQERRCODES_MQ_MINOR_CODE GENMASK(15, 0) +#define SXE2_MQERRCODES_MQ_MAJOR_CODE GENMASK(31, 16) + +#define SXE2_RDMA_MQ_STATUS_DONE BIT(0) +#define SXE2_RDMA_MQ_STATUS_ERR BIT(31) +#define SXE2_RDMA_MQ_STATUS_ERRCODE GENMASK(30, 27) + +#define SXE2_MQE_COUNT_4 4 +#define SXE2_MQE_COUNT_2048 2048 +#define SXE2_UPDATE_FPT_BUFF_SIZE 512 + +enum sxe2_mq_shifts { + SXE2_MQ_STATUS_DONE_S, + SXE2_MQ_STATUS_ERR_S, + SXE2_MQ_MAX_SHIFTS, +}; + +enum sxe2_mq_masks { + SXE2_MQ_STATUS_DONE_M, + SXE2_MQ_STATUS_ERR_M, + SXE2_MQ_MAX_MASKS, +}; + +#pragma pack(1) +struct mq_wqe_nop { + u64 rsv0[3]; + u64 rsv1 : 32; + u64 op : 6; + u64 rsv2 : 25; + u64 wqe_valid : 1; + u64 rsv3[4]; +}; +#pragma pack(0) + +struct sxe2_mq_context { + __le64 buf[SXE2_MQ_CTX_SIZE]; +}; + +struct sxe2_mq_wqe { + __le64 buf[SXE2_MQ_WQE_SIZE]; +}; + +struct sxe2_mq_quanta { + __le64 elem[SXE2_MQ_WQE_SIZE]; +}; + +struct sxe2_mq_init_info { + u64 mq_ctx_pa; + u64 mq_buf_pa; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_mq_quanta *mq_buf_va; + __le64 *mq_ctx_va; + u64 *scratch_array; + u32 mqe_count; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 rcms_profile; + u8 ena_vf_count; + u8 ceqs_per_vf; + bool rocev2_rto_policy : 1; + bool en_rem_endpoint_trk : 1; + enum sxe2_protocol_used protocol_used; +}; + +struct sxe2_mq_ctx_err_code { + u32 err; + const char *desc; +}; + +struct sxe2_mq_err_info { + u16 maj; + u16 min; + const char *desc; +}; + +struct sxe2_mq_cmpl_info { + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + bool error; + u8 op_code; +}; + +struct sxe2_mcq_cqe_info { + struct sxe2_mq_ctx *mq; + u64 scratch; + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + u8 op_code; + bool error : 1; +}; + +struct sxe2_mq_timeout { + u64 cmpl_mq_cmds; + u32 count; +}; + +struct mq_cmds_info { + struct list_head mq_cmd_entry; + u8 mq_cmd; + u8 post_mq; + struct mq_info in; + bool destroy; +}; + +struct sxe2_mq_request { + struct mq_cmds_info info; + wait_queue_head_t waitq; + struct list_head list; + refcount_t refcnt; + void (*callback_fcn)( + struct sxe2_mq_request *mq_request); + void *param; + struct sxe2_mq_cmpl_info cmpl_info; + bool request_done; + bool waiting : 1; + bool dynamic : 1; +}; + +__le64 *sxe2_kget_next_mq_wqe_idx(struct sxe2_mq_ctx *mq, u64 scratch, + u32 *wqe_idx); + +static inline __le64 *sxe2_kget_next_mq_wqe(struct sxe2_mq_ctx *mq, u64 scratch) +{ + u32 wqe_idx; + + return sxe2_kget_next_mq_wqe_idx(mq, scratch, &wqe_idx); +} + +static inline void sxe2_kget_mq_request(struct sxe2_mq_request *mq_request) +{ + refcount_inc(&mq_request->refcnt); +} + +static inline void sxe2_kget_mq_reg_info(struct sxe2_mq_ctx *mq, u32 *val, + u32 *tail, u32 *error) +{ + *val = SXE2_BAR_READ_32(mq->dev->hw_regs[MQ_WQE_DONE]); + *tail = FIELD_GET(SXE2_MQTAIL_WQTAIL, *val); + *error = FIELD_GET(SXE2_MQTAIL_MQ_OP_ERR, *val); +} + +void sxe2_kpost_mq(struct sxe2_mq_ctx *mq); + +struct sxe2_mq_request *sxe2_kalloc_and_get_mq_request(struct sxe2_mq *mq, + bool wait); + +int sxe2_knop(struct sxe2_mq_ctx *mq, u64 scratch, bool post_sq, u8 wait_type); + +int mq_kexec_cmd(struct sxe2_rdma_ctx_dev *dev, struct mq_cmds_info *pcmdinfo); + +void sxe2_khandler_mcqe(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_ctx_cq *mcq, bool flag); + +int sxe2_kwait_event(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_mq_request *mq_request); + +void sxe2_kput_mq_request(struct sxe2_mq *mq, + struct sxe2_mq_request *mq_request); + +int sxe2_khandle_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_mq_request *mq_request); + +int sxe2_kpoll_mq_registers(struct sxe2_mq_ctx *mq, u32 tail, u32 count); + +int sxe2_kpoll_mcq(struct sxe2_mq_ctx *mq, u8 op_code, + struct sxe2_mcq_cqe_info *cmpl_info); + +void sxe2_kwork_mq_cmpl(struct work_struct *work); + +int sxe2_mq_kexec_nop_op(struct sxe2_rdma_device *rdma_dev, bool post, + u32 wait); +int mq_kcreate_context(struct sxe2_mq_ctx *mq); + +int sxe2_kcreate_mq(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kdestroy_mq(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kuninit_mq_handler(struct sxe2_rdma_device *rdma_dev); + +int sxe2_kinit_mq_handler(struct sxe2_rdma_device *rdma_dev); +int sxe2_hw_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, + struct mq_cmds_info *pcmdinfo); +int sxe2_ah_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, + struct mq_cmds_info *pcmdinfo); +bool mq_kcheck_cqe_err(struct sxe2_rdma_ctx_dev *dev, u8 mq_cmd, bool error, + u16 maj_err_code, u16 min_err_code); +int mq_kget_mcqe_info(struct sxe2_rdma_ctx_cq *mcq, + struct sxe2_mcq_cqe_info *info); +int mq_kprocess_remaining_cmd(struct sxe2_rdma_ctx_dev *dev); +void mq_karm_mcq(struct sxe2_rdma_ctx_cq *mcq); +void mq_kcheck_progress(struct sxe2_mq_timeout *timeout, + struct sxe2_rdma_ctx_dev *dev); +void mq_kfree_mq_request(struct sxe2_mq *mq, + struct sxe2_mq_request *mq_request); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b4466290c0b902393a24cb947ad5becee8dbb1fa --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.c @@ -0,0 +1,2435 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mq_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_mq_debugfs.h" +#include "sxe2_drv_rdma_inject.h" + +#define MQ_DEBUGFS_WRITE_BUF_MAX_LEN (64) +#define INJECT_CMD_LEN 32 + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +enum { + MQ_SIZE_DEBUGFS, + MQ_BASE_DEBUGFS, +}; + +static char *mq_fields[] = { + [MQ_SIZE_DEBUGFS] = "mq_size", + [MQ_BASE_DEBUGFS] = "mq_base", +}; + +char *rcms_type_fields[] = { + [SXE2_RCMS_OBJ_QP] = "qp", [SXE2_RCMS_OBJ_CQ] = "cq", + [SXE2_RCMS_OBJ_SRQ] = "srq", [SXE2_RCMS_OBJ_MR] = "mr", + [SXE2_RCMS_OBJ_RESP] = "resp", [SXE2_RCMS_OBJ_IRRL] = "irrl", + [SXE2_RCMS_OBJ_SSNT] = "ssnt", [SXE2_RCMS_OBJ_AH] = "ah", +}; + +int size_rcms_type = ARRAY_SIZE(rcms_type_fields); + +char *mq_rsc_wqe_err_fields[] = { + [MQ_WQE_ERR_DEBUGFS_CLR] = "no", + [QPN_DEBUGFS] = "qpn", + [CQN_DEBUGFS] = "cqn", + [SRQN_DEBUGFS] = "srqn", + [CEQN_DEBUGFS] = "ceqn", + [AEQN_DEBUGFS] = "aeqn", + [QPC_SRQN_DEBUGFS] = "qpc_srqn", + [QPC_SEND_CQN_DEBUGFS] = "qpc_send_cqn", + [QPC_RECV_CQN_DEBUGFS] = "qpc_recv_cqn", + [CQC_CEQN_DEBUGFS] = "cqc_ceqn", + [QPC_SW_STATE_DEBUGFS] = "qpc_state", + [CQC_SW_STATE_DEBUGFS] = "cqc_state", + [SRQC_SW_STATE_DEBUGFS] = "srqc_state", + [CEQC_SW_STATE_DEBUGFS] = "ceqc_state", + [AEQC_SW_STATE_DEBUGFS] = "aeqc_state", + [QPC_TYPE_DEBUGFS] = "qpc_type", + [QPC_PMTU_DEBUGFS] = "qpc_pmtu", + [QPC_SQ_SIZE_DEBUGFS] = "qpc_sq_size", + [QPC_RQ_SIZE_DEBUGFS] = "qpc_rq_size", + [CQC_SIZE_DEBUGFS] = "cqc_size", + [SRQC_SIZE_DEBUGFS] = "srqc_size", + [CEQC_SIZE_DEBUGFS] = "ceqc_size", + [AEQC_SIZE_DEBUGFS] = "aeqc_size", + [QPC_RQ_TYPE_DEBUGFS] = "qpc_rq_type", + [QPC_PAGE_SIZE_DEBUGFS] = "qpc_page_size", + [QPC_SW_STATE_JUMP_DEBUGFS] = "qpc_jump_state", + [FPTE_CNT_DEBUGFS] = "fpte_cnt", + [AH_ID_DEBUGFS] = "ah_id", + [MR_ID_DEBUGFS] = "mr_id", + [MR_ACCESS_RIGHT_DEBUGFS] = "mr_access_right", + [MR_TYPE_DEBUGFS] = "mr_type", + [MR_PAGE_SIZE_DEBUGFS] = "mr_page_size", + [QP_CTX_PA_DEBUGFS] = "qp_ctx_pa", + [QP_CREATE_OP_DEBUGFS] = "qp_create_op", +}; + +int mq_rsc_wqe_size = ARRAY_SIZE(mq_rsc_wqe_err_fields); + +char *mq_mng_pbl_wqe_err_fields = "opcode"; + +#endif + +static void mq_cmd_print(char *rsp_end, u32 *len_total, + struct sxe2_rdma_device *rdma_dev) +{ + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "0x00.SXE2_MQ_OP_CREATE_QP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_CREATE_QP], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_CREATE_QP]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x01.SXE2_MQ_OP_MODIFY_QP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_MODIFY_QP], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_MODIFY_QP]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x02.SXE2_MQ_OP_DESTROY_QP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_QP], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_QP]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x03.SXE2_MQ_OP_CREATE_CQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_CREATE_CQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_CREATE_CQ]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x04.SXE2_MQ_OP_MODIFY_CQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_MODIFY_CQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_MODIFY_CQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x05.SXE2_MQ_OP_DESTROY_CQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_CQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_CQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x09.SXE2_MQ_OP_ALLOC_MR_key %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_ALLOC_MR_key], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_ALLOC_MR_key]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x0a.SXE2_MQ_OP_REG_MR %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_REG_MR], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_REG_MR]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x0b.SXE2_MQ_OP_QUERY_MR_key %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_QUERY_MR_key], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_QUERY_MR_key]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x0d.SXE2_MQ_OP_DEALLOC_MR_key %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DEALLOC_MR_key], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DEALLOC_MR_key]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x10.SXE2_MQ_OP_MANAGE_PBLE_BP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MANAGE_PBLE_BP], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MANAGE_PBLE_BP]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x13.SXE2_MQ_OP_QUERY_QP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_QP], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_QP]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "0x15.SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x16.SXE2_MQ_OP_CREATE_CEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_CREATE_CEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_CREATE_CEQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x18.SXE2_MQ_OP_DESTROY_CEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_CEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_CEQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x19.SXE2_MQ_OP_CREATE_AEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_CREATE_AEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_CREATE_AEQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x1b.SXE2_MQ_OP_DESTROY_AEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_AEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_AEQ]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "0x1c.SXE2_MQ_OP_CREATE_ADDR_HANDLE %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_CREATE_ADDR_HANDLE], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_CREATE_ADDR_HANDLE]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "0x1d.SXE2_MQ_OP_MODIFY_ADDR_HANDLE %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MODIFY_ADDR_HANDLE], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MODIFY_ADDR_HANDLE]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "0x1e.SXE2_MQ_OP_DESTROY_ADDR_HANDLE %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_ADDR_HANDLE], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_ADDR_HANDLE]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x1f.SXE2_MQ_OP_UPDATE_FPT %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_UPDATE_FPT], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_UPDATE_FPT]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x20.SXE2_MQ_OP_QUERY_FPM_VAL %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_QUERY_FPM_VAL], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_QUERY_FPM_VAL]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x21.SXE2_MQ_OP_COMMIT_FPM_VAL %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_COMMIT_FPM_VAL], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_COMMIT_FPM_VAL]); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "0x24.SXE2_MQ_OP_NOP %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_NOP], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_NOP]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x2e.SXE2_MQ_OP_GATHER_STATS %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_GATHER_STATS], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_GATHER_STATS]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x30.SXE2_MQ_OP_CREATE_SRQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_CREATE_SRQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_CREATE_SRQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x31.SXE2_MQ_OP_MODIFY_SRQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MODIFY_SRQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MODIFY_SRQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x32.SXE2_MQ_OP_DESTROY_SRQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DESTROY_SRQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DESTROY_SRQ]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x33.SXE2_MQ_OP_DEREGISTER_MR %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_DEREGISTER_MR], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_DEREGISTER_MR]); + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "0x34.SXE2_MQ_OP_MODIFY_CEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MODIFY_CEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MODIFY_CEQ]); +} + +static ssize_t mq_kget_status_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_mq *mq; + struct sxe2_rdma_mcq *mcq; + struct mq_wqe_nop *wqe = NULL; + u32 pending_index = 0; + u32 pending_count = 0; + u32 pending_i = 0; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + mq = &rdma_dev->rdma_func->mq; + mcq = &rdma_dev->rdma_func->mcq; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "mq:status:\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "MQ head %#x, tail %#x, size %#x\n", + mq->mq.mq_ring.head, mq->mq.mq_ring.tail, + mq->mq.mq_ring.size); + + pending_count = (u32)SXE2_RING_USED_QUANTA(mq->mq.mq_ring); + len_total += dbg_vsnprintf(rsp_end, len_total, + "MQ pending WQE count %#x\n", pending_count); + for (pending_i = 0; pending_i < pending_count; pending_i++) { + pending_index = mq->mq.mq_ring.tail; + wqe = (struct mq_wqe_nop *)(mq->mq.mq_buf_va[pending_index] + .elem); + if (wqe) + len_total += + dbg_vsnprintf(rsp_end, len_total, + "MQ pending WQE %#x:opcode %#x\n", + pending_index, wqe->op); + else + len_total += + dbg_vsnprintf(rsp_end, len_total, + "MQ pending WQE %#x:not found\n", + pending_index); + pending_index = (pending_index + 1) % mq->mq.mq_ring.size; + } + + len_total += dbg_vsnprintf(rsp_end, len_total, + "MCQ head %#x, tail %#x, size %#x\n", + mcq->ctx_cq.cq_uk.cq_ring.head, + mcq->ctx_cq.cq_uk.cq_ring.tail, + mcq->ctx_cq.cq_uk.cq_ring.size); + + len_total += dbg_vsnprintf(rsp_end, len_total, + "---Opcode---postcount---handlecount---\n"); + mq_cmd_print(rsp_end, (u32 *)&len_total, rdma_dev); + len_total += dbg_vsnprintf( + rsp_end, len_total, "0x35.SXE2_MQ_OP_QUERY_CEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_CEQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_CEQ]); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "0x36.SXE2_MQ_OP_MODIFY_AEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev + .mq_post_stats[SXE2_MQ_OP_MODIFY_AEQ], + rdma_dev->rdma_func->ctx_dev + .mq_cmd_stats[SXE2_MQ_OP_MODIFY_AEQ]); + len_total += dbg_vsnprintf( + rsp_end, len_total, "0x37.SXE2_MQ_OP_QUERY_AEQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_AEQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_AEQ]); + len_total += dbg_vsnprintf( + rsp_end, len_total, "0x38.SXE2_MQ_OP_QUERY_CQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_CQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_CQ]); + len_total += dbg_vsnprintf( + rsp_end, len_total, "0x39.SXE2_MQ_OP_QUERY_SRQ %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_SRQ], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_SRQ]); + len_total += dbg_vsnprintf( + rsp_end, len_total, "0x3b.SXE2_MQ_OP_QUERY_MR %#llx %#llx\n", + rdma_dev->rdma_func->ctx_dev.mq_post_stats[SXE2_MQ_OP_QUERY_MR], + rdma_dev->rdma_func->ctx_dev.mq_cmd_stats[SXE2_MQ_OP_QUERY_MR]); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_mq_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kget_status_dbg_read_op, +}; + +static ssize_t mq_khandle_nop_cmd_dbg_read_op(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'post' 'wait' 'count' > mq_gen_nop\n" + "post:0-1,0 won't post cmd, 1 post cmd right now\n" + "wait:1-4,1 pollreg,2 pollcqe,3 waitevent,4 waitcqe\n" + "count:mq nop cmd counts, decimal data\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo 1 10 > mq_gen_nop\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_khandle_nop_cmd_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 op_wait = 0; + u32 op_count = 0; + u32 op_post = 0; + bool op_post_param = false; + u32 i; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %zd\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %zd\n", + ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %zd\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + cmd[count - 1] = '\0'; + ret = sscanf(cmd, "%u %u %u", &op_post, &op_wait, &op_count); + if (ret != DEBUG_PARA_CONT3) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Please input 'post wait count', three params, ret %zd\n", + ret); + goto end; + } + if (op_post > 1) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:post_type err,please input 0/1\n"); + goto end; + } + if ((op_wait == 0) || (op_wait > SXE2_MQ_WAIT_CQE)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:wait_type err,please input 1-4\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:input post %u, wait %u, count %u\n", + op_post, op_wait, op_count); + + if (op_post == 0) + op_post_param = false; + else if (op_post == 1) + op_post_param = true; + + if ((op_wait == SXE2_MQ_WAIT_EVENT) || (op_wait == SXE2_MQ_WAIT_CQE)) { + for (i = 0; i < op_count; i++) { + ret = sxe2_mq_kexec_nop_op(rdma_dev, op_post_param, + op_wait); + if (ret == 0) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:NOP cmd post %u,\n" + "\twait_type %u, now_count %u, ret %zd\n", + op_post_param, op_wait, i, ret); + } else { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:NOP cmd post %u,\n" + "\twait_type %u, now_count %u, ret %zd\n", + op_post_param, op_wait, i, ret); + goto end; + } + } + } else if ((op_wait == SXE2_MQ_WAIT_POLL_REGS) || + (op_wait == SXE2_MQ_WAIT_POLL_CQ)) { + for (i = 0; i < op_count; i++) { + ret = sxe2_knop(&rdma_dev->rdma_func->mq.mq, 0, + op_post_param, (u8)op_wait); + if (ret == 0) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:NOP cmd post %u,\n" + "\twait_type %u, now_count %u, ret %zd\n", + op_post_param, op_wait, i, ret); + } else { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:NOP cmd post %u,\n" + "\twait_type %u, now_count %u, ret %zd\n", + op_post_param, op_wait, i, ret); + goto end; + } + } + } else { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:op_wait err,please input 1-4, ret %zd\n", + ret); + } + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_nop_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_khandle_nop_cmd_dbg_read_op, + .write = mq_khandle_nop_cmd_dbg_write_op, +}; + +static ssize_t mq_kignore_mcqe_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + if (rdma_dev->rdma_func->mq.mcqe_ignore) + len_total += + dbg_vsnprintf(rsp_end, len_total, "Now ignore MCQE\n"); + else + len_total += dbg_vsnprintf(rsp_end, len_total, + "Now not ignore MCQE\n"); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo '0/1' > mcqe_ignore\n" + "1:all mcqe will be ignored, 0:all mcqe will be handled\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kignore_mcqe_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %zd\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %zd\n", + ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %zd\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_INFO("MQ DEBUGFS:Request command :%s\n", cmd); + + cmd[count - 1] = '\0'; + if (strcmp(cmd, "1") == 0) { + rdma_dev->rdma_func->mq.mcqe_ignore = true; + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:from now,all mcqe will be ignored\n"); + } else if (strcmp(cmd, "0") == 0) { + rdma_dev->rdma_func->mq.mcqe_ignore = false; + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:from now,all mcqe will be processed\n"); + } else { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd is not 1, please retry, ret %zd\n", + ret); + } + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_ignore_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kignore_mcqe_dbg_read_op, + .write = mq_kignore_mcqe_dbg_write_op, +}; + +static u64 mq_kread_debugfs_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + int ret; + struct sxe2_mqc mqc; + struct sxe2_mq *mq; + size_t len = 0; + + memset(&mqc, 0, sizeof(mqc)); + + if (!rdma_dev || !data) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:RDMA Dev/Data is NULL, ret %d\n", + ret); + goto end; + } + + if (type == SXE2_DBG_RSC_MQ) { + mq = (struct sxe2_mq *)data; + memcpy(&mqc, mq->mq.mq_ctx_va, sizeof(mqc)); + } + + len += dbg_vsnprintf(buf, len, "mq:context:\n"); + len += dbg_vsnprintf(buf, len, "enable_fine_grained_timers:%#x\n", + (u32)mqc.enable_fine_grained_timers); + len += dbg_vsnprintf(buf, len, "disable_FPDU_packing:%#x\n", + (u32)mqc.disable_FPDU_packing); + len += dbg_vsnprintf(buf, len, "RoCEv2_RTO_policy:%#x\n", + (u32)mqc.RoCEv2_RTO_policy); + len += dbg_vsnprintf(buf, len, "protocol_used:%#x\n", + (u32)mqc.protocol_used); + len += dbg_vsnprintf(buf, len, "struct_version:%#x\n", + (u32)mqc.struct_version); + len += dbg_vsnprintf(buf, len, "num_CEQs_per_VF:%#x\n", + (u32)mqc.num_CEQs_per_VF); + len += dbg_vsnprintf(buf, len, "SQ_base:%#llx\n", (u64)mqc.SQ_base); + len += dbg_vsnprintf(buf, len, "rcms_profile_type:%#x\n", + (u32)mqc.rcms_profile_type); + len += dbg_vsnprintf(buf, len, "remote_endpoint_trk_en:%#x\n", + (u32)mqc.remote_endpoint_trk_en); + len += dbg_vsnprintf(buf, len, "rdpa_assist:%#x\n", + (u32)mqc.rdpa_assist); + len += dbg_vsnprintf(buf, len, "cache_line_64byte_en:%#x\n", + (u32)mqc.cache_line_64byte_en); + len += dbg_vsnprintf(buf, len, "PE_enabled_vf_count:%#x\n", + (u32)mqc.PE_enabled_vf_count); + len += dbg_vsnprintf(buf, len, "QP_completion_context:%#llx\n", + (u64)mqc.QP_completion_context); + len += dbg_vsnprintf(buf, len, "hw_minor_version:%#x\n", + (u32)mqc.hw_minor_version); + len += dbg_vsnprintf(buf, len, "hw_major_version:%#x\n", + (u32)mqc.hw_major_version); + len += dbg_vsnprintf(buf, len, "dcqcn_min_rate:%#x\n", + (u32)mqc.dcqcn_min_rate); + len += dbg_vsnprintf(buf, len, "dcqcn_min_dec_factor:%#x\n", + (u32)mqc.dcqcn_min_dec_factor); + len += dbg_vsnprintf(buf, len, "dcqcn_T:%#x\n", (u32)mqc.dcqcn_T); + len += dbg_vsnprintf(buf, len, "cc_hai_factor:%#x\n", + (u32)mqc.cc_hai_factor); + len += dbg_vsnprintf(buf, len, "cc_rai_factor:%#x\n", + (u32)mqc.cc_rai_factor); + len += dbg_vsnprintf(buf, len, "dcqcn_B:%#x\n", (u32)mqc.dcqcn_B); + len += dbg_vsnprintf(buf, len, "dcqcn_F:%#x\n", (u32)mqc.dcqcn_F); + len += dbg_vsnprintf(buf, len, "cc_cfg_valid:%#x\n", + (u32)mqc.cc_cfg_valid); + len += dbg_vsnprintf(buf, len, "dcqcn_rreduce_mperiod:%#x\n", + (u32)mqc.dcqcn_rreduce_mperiod); + +end: + return len; +} + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +void sxe2_kadd_mq_nop_debugfs_files(struct sxe2_rdma_device *rdma_dev) +{ + debugfs_create_file("mq_gen_nop", SXE2_DEBUG_FILE_ONLY_WRITE, + rdma_dev->hdl->mq_debugfs, rdma_dev, + &sxe2_rdma_mq_nop_fops); + debugfs_create_file("mcqe_ignore", SXE2_DEBUG_FILE_ONLY_WRITE, + rdma_dev->hdl->mq_debugfs, rdma_dev, + &sxe2_rdma_mq_ignore_fops); +} + +static ssize_t mq_kcreate_dup_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'count' 'first_ignore_hw' > dup_create\n" + "count:creation counts, decimal data\n" + "first_ignore_hw:0/1, if ignore hw in first creation\n"); + len_total += + dbg_vsnprintf(rsp_end, len_total, "ex:echo 2 0 > dup_create\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kcreate_dup_dbg_write_op(struct file *filp, + const char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u64 op_count = 1; + u64 if_ignore = 0; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %ld\n", + ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Please input 'count' 'first_ignore_hw'\n"); + goto end; + } + + ret = kstrtoull(argv[0], 10, &op_count); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get op_count param value failed, ret (%ld)\n", + ret); + goto end; + } + + ret = kstrtoull(argv[1], 10, &if_ignore); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get if_ignore param value failed, ret (%ld)\n", + ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:count %llu, if_notify %llu\n", + op_count, if_ignore); + snprintf(cmd_buf, sizeof(cmd_buf), "-u %llu", if_ignore); + if (op_count == 1 && if_ignore == 1) { + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_creat", cmd_buf); + ret = sxe2_kcreate_mq(rdma_dev); + if (ret == 0) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:create mq, ret %ld\n", ret); + } + } + + if (op_count == 2 && if_ignore == 0) { + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_duplicate_creat", + cmd_buf); + } + + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_creat"); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_dup_create_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kcreate_dup_dbg_read_op, + .write = mq_kcreate_dup_dbg_write_op, +}; + +static ssize_t mq_kcreate_ctx_addr_dbg_read_op(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:mqc_addr vld %d, mqc_addr %#llx\n\n", + rdma_dev->rdma_func->mq.err_val.mqc_addr_vld, + rdma_dev->rdma_func->mq.err_val.mqc_addr); + + len_total += dbg_vsnprintf(rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'addr' > 0x2_mqc_addr\n" + "addr:Hex data\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo 0xFFFFFFFF > 0x2_mqc_addr\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kcreate_ctx_addr_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u64 mqc_addr = 0; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT1) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + ret = kstrtoull(argv[0], 16, &mqc_addr); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + snprintf(cmd_buf, sizeof(cmd_buf), "-u %llx", mqc_addr); + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_dma_err", cmd_buf); + DRV_RDMA_LOG_DEBUG_BDF("MQ DEBUGFS:addr %#llx\n", mqc_addr); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_ctx_addr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kcreate_ctx_addr_dbg_read_op, + .write = mq_kcreate_ctx_addr_dbg_write_op, +}; + +static ssize_t mq_kcreate_ctx_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:mq_size vld:%d,\n" + "\tmq_size:%#llx, mq_base vld:%d, mq_base:%#llx\n\n", + rdma_dev->rdma_func->mq.err_val.mqc_size_vld, + rdma_dev->rdma_func->mq.err_val.mqc_size, + rdma_dev->rdma_func->mq.err_val.mqc_base_vld, + rdma_dev->rdma_func->mq.err_val.mqc_base); + + len_total += dbg_vsnprintf(rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'field' 'val' > mqc_err\n" + "field:mq_size/mq_base, val:Hex data\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex1:echo mq_size 0xFFFFFFFF > mqc_err\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex2:echo mq_base 0x58FFFFBA > mqc_err\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kcreate_ctx_dbg_write_op(struct file *filp, + const char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + u64 temp_value; + size_t i; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(mq_fields); i++) { + if (!strncmp(argv[0], mq_fields[i], strlen(mq_fields[i]))) + break; + } + + ret = kstrtoull(argv[1], 16, &temp_value); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + snprintf(cmd_buf, sizeof(cmd_buf), "-u %llx", temp_value); + + switch (i) { + case MQ_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_size", cmd_buf); + break; + case MQ_BASE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_base", cmd_buf); + break; + default: + DRV_RDMA_LOG_DEV_WARN("MQ DEBUGFS:invalid index %zu\n", i); + goto end; + } + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:modify mq i:%zu, type:%s, temp_value:%#llx\n", i, + mq_fields[i], temp_value); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_ctx_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kcreate_ctx_dbg_read_op, + .write = mq_kcreate_ctx_dbg_write_op, +}; + +static void sxe2_kadd_mq_err_debugfs_files(struct sxe2_rdma_device *rdma_dev) +{ + debugfs_create_file("dup_create", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_debugfs, rdma_dev, + &sxe2_rdma_mq_dup_create_fops); + debugfs_create_file("0x2_mqc_addr", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_debugfs, rdma_dev, + &sxe2_rdma_mq_ctx_addr_fops); + debugfs_create_file("mqc_err", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_debugfs, rdma_dev, + &sxe2_rdma_mq_ctx_err_fops); +} + +static ssize_t mq_kupdate_fpte_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:update fptes:rsc_type:%u, temp_value:%u\n\n", + rdma_dev->rdma_func->mq.err_cqe_val.fpte_rsc_type, + rdma_dev->rdma_func->mq.err_cqe_val.fpte_err_type); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'rsc_type' 'err_type' > rcms_ctx_err\n" + "rsc_type:qp/cq/srq/mr/resp/irrl/ssnt/ah\n" + "err_type:decimal data,1:fpte invalid,2:addr\n" + "all 0 in fpte,3:addr all F in fpte\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo qp 2 > rcms_ctx_err\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kupdate_fpte_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + u64 temp_value; + size_t i; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(rcms_type_fields); i++) { + if (!strncmp(argv[0], rcms_type_fields[i], + strlen(rcms_type_fields[i]))) { + break; + } + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + snprintf(cmd_buf, sizeof(cmd_buf), "-u %u", (u32)temp_value); + + switch (i) { + case SXE2_RCMS_OBJ_QP: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_ctx_err_qp", cmd_buf); + break; + + case SXE2_RCMS_OBJ_SSNT: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_ctx_err_ssnt", + cmd_buf); + break; + + default: + DRV_RDMA_LOG_DEV_WARN("MQ DEBUGFS:invalid index %zu\n", i); + break; + } + + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:update fptes:rsc_type:%u, temp_value:%u\n", + rdma_dev->rdma_func->mq.err_cqe_val.fpte_rsc_type, + rdma_dev->rdma_func->mq.err_cqe_val.fpte_err_type); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_fpte_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kupdate_fpte_dbg_read_op, + .write = mq_kupdate_fpte_dbg_write_op, +}; + +static ssize_t mq_kinject_wqe_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + size_t i; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:rsrc wqe inject:err_type:%u, temp_value:%u\n\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'err_type' 'val' > rsc_wqe_err' > mqc_err\n" + "err_type:"); + for (i = 0; i < ARRAY_SIZE(mq_rsc_wqe_err_fields); i++) { + if (i == ARRAY_SIZE(mq_rsc_wqe_err_fields) - 1) { + len_total += dbg_vsnprintf(rsp_end, len_total, "%s\n", + mq_rsc_wqe_err_fields[i]); + } else { + len_total += dbg_vsnprintf(rsp_end, len_total, "%s/", + mq_rsc_wqe_err_fields[i]); + } + } + len_total += dbg_vsnprintf(rsp_end, len_total, + "val:hex data,inject value\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo qpn 0xFFFF > rsc_wqe_err\n"); + len_total += dbg_vsnprintf( + rsp_end, len_total, + "if clr injection,please echo 0 0 > rsc_wqe_err\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kinject_wqe_dbg_write_op(struct file *filp, + const char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + u64 temp_value; + size_t i; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(mq_rsc_wqe_err_fields); i++) { + if (!strncmp(argv[0], mq_rsc_wqe_err_fields[i], + strlen(mq_rsc_wqe_err_fields[i]))) { + break; + } + } + + ret = kstrtoull(argv[1], 16, &temp_value); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + snprintf(cmd_buf, sizeof(cmd_buf), "-u %x", (u32)temp_value); + switch (i) { + case 0: + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type = (u32)i; + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val = + temp_value; + break; + case QPN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpn", cmd_buf); + break; + case CQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqn", cmd_buf); + break; + case SRQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srqn", cmd_buf); + break; + case CEQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceqn", cmd_buf); + break; + case QPC_SRQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_srqn", + cmd_buf); + break; + case QPC_SEND_CQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_scqn", + cmd_buf); + break; + case QPC_RECV_CQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rcqn", + cmd_buf); + break; + case CQC_CEQN_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqcn", cmd_buf); + break; + case QPC_SW_STATE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_swState", + cmd_buf); + break; + case CQC_SW_STATE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_swState", + cmd_buf); + break; + case SRQC_SW_STATE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_swState", + cmd_buf); + break; + case CEQC_SW_STATE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_swState", + cmd_buf); + break; + case QPC_TYPE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_type", + cmd_buf); + break; + case QPC_PMTU_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pmtu", + cmd_buf); + break; + case QPC_SQ_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_sqSize", + cmd_buf); + break; + case QPC_RQ_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqSize", + cmd_buf); + break; + case CQC_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cq_size", + cmd_buf); + break; + case SRQC_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_size", + cmd_buf); + break; + case CEQC_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ceq_size", + cmd_buf); + break; + case AEQC_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_cqn", cmd_buf); + break; + case QPC_RQ_TYPE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqType", + cmd_buf); + break; + case QPC_PAGE_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pageSize", + cmd_buf); + break; + case QPC_SW_STATE_JUMP_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, + "mq_rcs_wqe_qpc_swState_jump", cmd_buf); + break; + case FPTE_CNT_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_fpte", cmd_buf); + break; + case AH_ID_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ah", cmd_buf); + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_ah_query", + cmd_buf); + break; + case MR_ID_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_reg", + cmd_buf); + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_dereg", + cmd_buf); + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_query", + cmd_buf); + break; + case MR_ACCESS_RIGHT_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_accessRight", + cmd_buf); + break; + case MR_TYPE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_type", + cmd_buf); + break; + case MR_PAGE_SIZE_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_pageSize", + cmd_buf); + break; + case QP_CTX_PA_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx", cmd_buf); + break; + case QP_CREATE_OP_DEBUGFS: + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_createOp", + cmd_buf); + break; + default: + DRV_RDMA_LOG_DEV_WARN("MQ DEBUGFS:invalid index %zu\n", i); + break; + } + + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:rsrc wqe inject:err_type:%u, temp_value:%llu\n", + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_val); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_rsc_wqe_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kinject_wqe_dbg_read_op, + .write = mq_kinject_wqe_dbg_write_op, +}; + +static ssize_t mq_kcommit_rcms_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:commit rcms:err_vld %d, rsc_type:%u, temp_value:%u\n\n", + rdma_dev->rdma_func->mq.err_cqe_val.commit_wqe_err_vld, + rdma_dev->rdma_func->mq.err_cqe_val.commit_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.commit_wqe_err_val); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'rsc_type' 'err_val' > rcms_commit_err\n" + "rsc_type:qp/cq/srq/ah/mr\n" + "err_val:hex data\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo qp 0x300 > rcms_commit_err\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kcommit_rcms_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + u64 temp_value; + size_t i; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(rcms_type_fields); i++) { + if (!strncmp(argv[0], rcms_type_fields[i], + strlen(rcms_type_fields[i]))) { + break; + } + } + + ret = kstrtoull(argv[1], 16, &temp_value); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + snprintf(cmd_buf, sizeof(cmd_buf), "-u %x", (u32)temp_value); + switch (i) { + case SXE2_RCMS_OBJ_QP: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_commit_err_qp", + cmd_buf); + break; + case SXE2_RCMS_OBJ_CQ: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_commit_err_cq", + cmd_buf); + break; + case SXE2_RCMS_OBJ_SRQ: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_commit_err_srq", + cmd_buf); + break; + case SXE2_RCMS_OBJ_MR: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_commit_err_mr", + cmd_buf); + break; + case SXE2_RCMS_OBJ_AH: + INJECT_ACTIVE(rdma_dev->rdma_func, "rcms_commit_err_ah", + cmd_buf); + break; + + default: + DRV_RDMA_LOG_DEV_WARN("MQ DEBUGFS:invalid index %zu\n", i); + break; + } + + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:commit rcms:rsc_type:%u, temp_value:%u\n", + rdma_dev->rdma_func->mq.err_cqe_val.commit_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.commit_wqe_err_val); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_commit_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kcommit_rcms_dbg_read_op, + .write = mq_kcommit_rcms_dbg_write_op, +}; + +static ssize_t mq_kmanage_pbl_dbg_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "NOW STATE---MQ DEBUGFS:manage pbl:err_vld:%d, err_type:%#x, err_value:%#llx\n\n", + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_vld, + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_val); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'err_type' 'err_val' > vf_manage_pbl_err\n" + "err_type:no|spte_cnt|first_spte_index|fpte_index|opcode\n" + "err_val:hex data\n"); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "ex:echo spte_cnt 0xFF > vf_manage_pbl_err\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kmanage_pbl_dbg_write_op(struct file *filp, + const char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + u64 temp_value; + bool flag = false; + char cmd_buf[INJECT_CMD_LEN] = { 0 }; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Invalid param nums\n"); + goto end; + } + + if (!strncmp(argv[0], mq_mng_pbl_wqe_err_fields, + strlen(mq_mng_pbl_wqe_err_fields))) { + flag = true; + } + + ret = kstrtoull(argv[1], 16, &temp_value); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get param value failed, ret (%ld)\n", ret); + goto end; + } + + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_vld = flag; + + if (flag) { + snprintf(cmd_buf, sizeof(cmd_buf), "-u %llx", (u64)temp_value); + INJECT_ACTIVE(rdma_dev->rdma_func, "mq_pbl_err", cmd_buf); + } else { + DRV_RDMA_LOG_DEV_WARN("MQ DEBUGFS:invalid\n"); + goto end; + } + DRV_RDMA_LOG_DEV_INFO( + "MQ DEBUGFS:manage pbl:err_type:%#x, err_value:%#llx\n", + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_type, + rdma_dev->rdma_func->mq.err_cqe_val.manage_pbl_wqe_err_val); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_manage_pbl_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kmanage_pbl_dbg_read_op, + .write = mq_kmanage_pbl_dbg_write_op, +}; + +static ssize_t mq_kmanage_func_tbl_dbg_read_op(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, + "CMD HELP:\n" + "Please echo 'vf_id' > manage_func_tbl\n" + "vf_id:hex data\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "ex:echo 0xA > manage_func_tbl\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_kmanage_func_tbl_dbg_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u64 vf_id = 0; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rcms_fcn_info rcms_fcn_info = {}; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT1) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:Please input 'vf_id'\n"); + goto end; + } + + ret = kstrtoull(argv[0], 16, &vf_id); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get vf_id param value failed, ret (%ld)\n", + ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:vf_id %#llx\n", vf_id); + + rcms_fcn_info.protocol_used = (u8)SXE2_ROCE_PROTOCOL_ONLY; + rcms_fcn_info.vf_id = (u32)vf_id; + ret = sxe2_vchnl_manage_rcms_pm_func_table(&rdma_dev->rdma_func->mq.mq, + &rcms_fcn_info, 0, 1); + if (ret == 0) { + DRV_RDMA_LOG_DEV_DEBUG( + "MQ DEBUGFS:manage func table, cmd post, ret %ld\n", + ret); + } else { + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:manage func table, cmd post err, ret %ld\n", + ret); + } + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_mq_manage_fun_tbl_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_kmanage_func_tbl_dbg_read_op, + .write = mq_kmanage_func_tbl_dbg_write_op, +}; + +static ssize_t mq_handle_op_failed_read_op(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + int i; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + len_total += + dbg_vsnprintf(rsp_end, len_total, "Inject op failed opcode:\n"); + for (i = 0; i < MQ_MAX_OPS; i++) { + if (rdma_dev->rdma_func->mq.ops_failed[i]) { + len_total += + dbg_vsnprintf(rsp_end, len_total, "%u ", i); + } + } + len_total += dbg_vsnprintf(rsp_end, len_total, "\n"); + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static ssize_t mq_handle_op_failed_write_op(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[MQ_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + bool en; + u64 opcode; + int i; + u64 temp_value; + + if (*off != 0) + goto end; + + if (count >= MQ_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "MQ DEBUGFS:Cmd exceeded length limit, ret %d\n", ret); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("MQ DEBUGFS:dev find failed, ret %d\n", ret); + goto end; + } + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Cmd copy from user failed, ret %ld\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end; + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:Please input 'en opcode' or 'en'\n"); + goto end; + } + + ret = kstrtoull(argv[0], MQ_STR_TO_LL_AUTO_BASE, &temp_value); + if (ret) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:get en param value failed\n"); + goto end; + } + en = (temp_value != 0) ? true : false; + ret = kstrtoull(argv[1], MQ_STR_TO_LL_AUTO_BASE, &opcode); + if (ret) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:get opcode param value failed\n"); + goto end; + } + + if (opcode != MQ_INJECT_ALL_OPCODE_CTRL_VAL && opcode >= MQ_MAX_OPS) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:input opcode param %llu err please input 0xFF or 0-36\n", + opcode); + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG("MQ DEBUGFS:en %u opcode %llu\n", en, opcode); + if (opcode == MQ_INJECT_ALL_OPCODE_CTRL_VAL) { + for (i = 0; i < MQ_MAX_OPS; i++) + rdma_dev->rdma_func->mq.ops_failed[i] = en; + } else { + rdma_dev->rdma_func->mq.ops_failed[opcode] = en; + } + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations sxe2_rdma_op_failed_err_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mq_handle_op_failed_read_op, + .write = mq_handle_op_failed_write_op, +}; + +static void +sxe2_kadd_mq_err_cqe_debugfs_files(struct sxe2_rdma_device *rdma_dev) +{ + debugfs_create_file("rcms_ctx_err", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_cqe_debugfs, rdma_dev, + &sxe2_rdma_mq_fpte_err_fops); + debugfs_create_file("rsc_wqe_err", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_cqe_debugfs, rdma_dev, + &sxe2_rdma_mq_rsc_wqe_err_fops); + debugfs_create_file("rcms_commit_err", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_cqe_debugfs, rdma_dev, + &sxe2_rdma_mq_commit_err_fops); + debugfs_create_file("vf_manage_pbl_err", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_cqe_debugfs, rdma_dev, + &sxe2_rdma_mq_manage_pbl_err_fops); + debugfs_create_file("manage_func_tbl", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_err_cqe_debugfs, rdma_dev, + &sxe2_rdma_mq_manage_fun_tbl_fops); +} + +static void +sxe2_kadd_mq_op_failed_debugfs_files(struct sxe2_rdma_device *rdma_dev) +{ + debugfs_create_file("op_failed", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->mq_op_failed_debugfs, rdma_dev, + &sxe2_rdma_op_failed_err_fops); +} + +int sxe2_kadd_mq_err_debugfs(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:debugfs root dir not exist, ret (%d)\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->mq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq debugfs dir not exist, ret (%d)\n", ret); + goto end; + } + + rdma_dev->hdl->mq_err_debugfs = + debugfs_create_dir("MQ_ERR", rdma_dev->hdl->mq_debugfs); + if (!rdma_dev->hdl->mq_err_debugfs) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:create mq err dir failed, ret %d\n", ret); + goto end; + } + sxe2_kadd_mq_err_debugfs_files(rdma_dev); + + rdma_dev->hdl->mq_err_cqe_debugfs = + debugfs_create_dir("MQ_ERR_CQE", rdma_dev->hdl->mq_debugfs); + if (!rdma_dev->hdl->mq_err_cqe_debugfs) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:create mq err cqe dir failed, ret %d\n", + ret); + goto remove_mq_err_debug; + } + sxe2_kadd_mq_err_cqe_debugfs_files(rdma_dev); + + rdma_dev->hdl->mq_op_failed_debugfs = + debugfs_create_dir("MQ_OP_FAILED", rdma_dev->hdl->mq_debugfs); + if (!rdma_dev->hdl->mq_op_failed_debugfs) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:create mq op failed dir failed, ret %d\n", + ret); + goto remove_mq_err_cqe_debug; + } + sxe2_kadd_mq_op_failed_debugfs_files(rdma_dev); + goto end; + +remove_mq_err_cqe_debug: + debugfs_remove_recursive(rdma_dev->hdl->mq_err_cqe_debugfs); + rdma_dev->hdl->mq_err_debugfs = NULL; + +remove_mq_err_debug: + debugfs_remove_recursive(rdma_dev->hdl->mq_err_debugfs); + rdma_dev->hdl->mq_err_debugfs = NULL; +end: + return ret; +} + +void sxe2_kremove_mq_err_debugfs(struct sxe2_rdma_device *rdma_dev) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("debugfs root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->mq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("mq debugfs dir not exist\n"); + goto end; + } + + debugfs_remove_recursive(rdma_dev->hdl->mq_err_cqe_debugfs); + rdma_dev->hdl->mq_err_cqe_debugfs = NULL; + + debugfs_remove_recursive(rdma_dev->hdl->mq_err_debugfs); + rdma_dev->hdl->mq_err_debugfs = NULL; + + debugfs_remove_recursive(rdma_dev->hdl->mq_op_failed_debugfs); + rdma_dev->hdl->mq_op_failed_debugfs = NULL; + +end: + return; +} +#endif + +int sxe2_kadd_mq_rsc_debugfs(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mq *mq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:debugfs root dir not exist, ret (%d)\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->mq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq debugfs dir not exist, ret (%d)\n", ret); + goto end; + } + + mq->dbg_node = + drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_MQ, + rdma_dev->hdl->mq_debugfs, + mq_kread_debugfs_field, NULL, 0, mq); + if (!mq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq debug res tree add failed, ret %d\n", + ret); + goto end; + } + + debugfs_create_file("mq_status", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->mq_debugfs, rdma_dev, + &sxe2_rdma_mq_status_fops); + +end: + return ret; +} + +void sxe2_kremove_mq_rsc_debugfs(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mq *mq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("debugfs root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->mq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("mq debugfs dir not exist\n"); + goto end; + } + + if (mq->dbg_node) { + drv_rdma_rm_res_tree(mq->dbg_node); + mq->dbg_node = NULL; + } + +end: + return; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..e00cd63df9bc33ba432327eed75e4931028eae54 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mq_debugfs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mq_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_MQ_DEBUGFS_H__ +#define __SXE2_DRV_MQ_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +extern char *rcms_type_fields[]; +extern int size_rcms_type; +extern int mq_rsc_wqe_size; +extern char *mq_rsc_wqe_err_fields[]; +extern char *mq_mng_pbl_wqe_err_fields; +#endif + +#define MQ_INJECT_ALL_OPCODE_CTRL_VAL 0xFF +#define MQ_STR_TO_LL_AUTO_BASE 0 + +int sxe2_kadd_mq_rsc_debugfs(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mq *mq); + +void sxe2_kremove_mq_rsc_debugfs(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mq *mq); + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +void sxe2_kadd_mq_nop_debugfs_files(struct sxe2_rdma_device *rdma_dev); + +int sxe2_kadd_mq_err_debugfs(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kremove_mq_err_debugfs(struct sxe2_rdma_device *rdma_dev); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.c new file mode 100644 index 0000000000000000000000000000000000000000..bec1f6222023378747e6473960300b16b046f95e --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.c @@ -0,0 +1,1263 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mr.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include +#include +#include + +#include "sxe2-abi.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_main.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mr_debugfs.h" + +static void sxe2_print_reg_mr_wqe_info(struct sxe2_rdma_device *rdma_dev, + void *wqe_info) +{ + union sxe2_reg_mr_info *p_reg_mr_info; + + p_reg_mr_info = (union sxe2_reg_mr_info *)wqe_info; + DRV_RDMA_LOG_DEBUG_BDF( + "\nREG_MR:\n" + "pbl_mode : %d\n" + "access_right : %#x\n" + "va_based_flag : %d\n" + "access_mode : %d\n" + "mr_type : %d\n" + "mr_key : %d\n" + "pd : %d\n" + "is_len64 : %d\n" + "start_addr : %llx\n" + "mr_idx : 0x%x\n" + "op : %d\n" + "wqe_valid : %d\n" + "len : %llu\n" + "pbl_index : %llx\n" + "log_entity_size : %d\n", + p_reg_mr_info->field.pbl_mode, + p_reg_mr_info->field.access_right, + p_reg_mr_info->field.va_based_flag, + p_reg_mr_info->field.access_mode, p_reg_mr_info->field.mr_type, + p_reg_mr_info->field.mr_key, p_reg_mr_info->field.pd, + p_reg_mr_info->field.is_len64, p_reg_mr_info->field.start_addr, + p_reg_mr_info->field.mr_idx, p_reg_mr_info->field.op, + p_reg_mr_info->field.wqe_valid, p_reg_mr_info->field.len, + p_reg_mr_info->field.pbl_index, + p_reg_mr_info->field.log_entity_size); + +} + +static void sxe2_print_ah_wqe_info(struct sxe2_rdma_device *rdma_dev, + void *wqe_info) +{ + union sxe2_ah_info *p_ah_info; + u32 srcip[4]; + u32 dstip[4]; + + p_ah_info = (union sxe2_ah_info *)wqe_info; + DRV_RDMA_LOG_DEBUG_BDF( + "\nCREATE/DESCTROY AH:\n" + "dest_mac : %x:%x:%x:%x:%x:%x\n" + "vlan_tag : %#x\n" + "tc_tos : %d\n" + "pd_idx : %d\n" + "flow_label : %d\n" + "hop_ttl : %d\n" + "arp_index : %d\n" + "ah_idx : %#x\n" + "op : %d\n" + "ipv4_valid : %d\n" + "insert_vlan_tag : %d\n" + "do_lpbk : %d\n" + "wqe_valid : %d\n", + p_ah_info->field.dest_mac[5], p_ah_info->field.dest_mac[4], + p_ah_info->field.dest_mac[3], p_ah_info->field.dest_mac[2], + p_ah_info->field.dest_mac[1], p_ah_info->field.dest_mac[0], + p_ah_info->field.vlan_tag, p_ah_info->field.tc_tos, + p_ah_info->field.pd_idx, p_ah_info->field.flow_label, + p_ah_info->field.hop_ttl, p_ah_info->field.arp_index, + p_ah_info->field.ah_idx, p_ah_info->field.op, + p_ah_info->field.ipv4_valid, p_ah_info->field.insert_vlan_tag, + p_ah_info->field.do_lpbk, p_ah_info->field.wqe_valid); + if (p_ah_info->field.ipv4_valid) { + srcip[0] = htonl(p_ah_info->field.src_ip_addr[0]); + dstip[0] = htonl(p_ah_info->field.dest_ip_addr[0]); + DRV_RDMA_LOG_DEBUG_BDF("dest_ip_addr : %pI4\n" + "src_ip_addr : %pI4\n", + dstip, srcip); + } else { + sxe2_copy_ip_htonl(srcip, p_ah_info->field.src_ip_addr); + sxe2_copy_ip_htonl(dstip, p_ah_info->field.dest_ip_addr); + DRV_RDMA_LOG_DEBUG_BDF("dest_ip_addr : %pI6\n" + "src_ip_addr : %pI6\n", + dstip, srcip); + } + +} + +void sxe2_print_wqe_info(struct sxe2_rdma_ctx_dev *dev, void *wqe_info, + u8 mq_cmd) +{ + struct sxe2_rdma_device *rdma_dev; + union sxe2_dereg_mr_info *p_dereg_mr_info; + union sxe2_alloc_key_info *p_alloc_key_info; + union sxe2_dalloc_key_info *p_dalloc_key_info; + union sxe2_query_mr_info *p_query_mr_info; + + rdma_dev = to_rdmadev(dev); + + switch (mq_cmd) { + case MQ_OP_REG_MR: + sxe2_print_reg_mr_wqe_info(rdma_dev, wqe_info); + break; + case MQ_OP_QUERY_MR: + p_query_mr_info = (union sxe2_query_mr_info *)wqe_info; + DRV_RDMA_LOG_DEV_DEBUG("\nQUERY_MR:\n" + "buf_addr : %#llx\n" + "mr_index : %u\n" + "op : %d\n" + "wqe_valid : %d\n", + p_query_mr_info->field.buf_addr, + p_query_mr_info->field.mr_index, + p_query_mr_info->field.op, + p_query_mr_info->field.wqe_valid); + break; + case MQ_OP_DEREGISTER_MR: + p_dereg_mr_info = (union sxe2_dereg_mr_info *)wqe_info; + DRV_RDMA_LOG_DEV_DEBUG("\nDEREG_MR:\n" + "pd : %d\n" + "mr_idx : %#x\n" + "op : %d\n" + "wqe_valid : %d\n", + p_dereg_mr_info->field.pd, + p_dereg_mr_info->field.mr_idx, + p_dereg_mr_info->field.op, + p_dereg_mr_info->field.wqe_valid); + break; + case MQ_OP_ALLOC_MR_KEY: + p_alloc_key_info = (union sxe2_alloc_key_info *)wqe_info; + DRV_RDMA_LOG_DEV_DEBUG("\nALLOC_MR/MW_KEY:\n" + "pbl_mode : %d\n" + "mw_type : %d\n" + "access_mode : %d\n" + "mr_type : %d\n" + "pd : %d\n" + "mr_idx : %#x\n" + "op : %d\n" + "wqe_valid : %d\n" + "len : %llx\n" + "pbl_idx : %llx\n" + "log_entity_size : %d\n", + p_alloc_key_info->field.pbl_mode, + p_alloc_key_info->field.mw_type, + p_alloc_key_info->field.access_mode, + p_alloc_key_info->field.mr_type, + p_alloc_key_info->field.pd, + p_alloc_key_info->field.mr_idx, + p_alloc_key_info->field.op, + p_alloc_key_info->field.wqe_valid, + p_alloc_key_info->field.len, + p_alloc_key_info->field.pbl_idx, + p_alloc_key_info->field.log_entity_size); + break; + case MQ_OP_DEALLOC_MR_KEY: + p_dalloc_key_info = (union sxe2_dalloc_key_info *)wqe_info; + DRV_RDMA_LOG_DEV_DEBUG("\nREG_MR:\n" + "mr_type : %d\n" + "pd : %d\n" + "mr_idx : %#x\n" + "op : %d\n" + "wqe_valid : %d\n", + p_dalloc_key_info->field.mr_type, + p_dalloc_key_info->field.pd, + p_dalloc_key_info->field.mr_idx, + p_dalloc_key_info->field.op, + p_dalloc_key_info->field.wqe_valid); + break; + case MQ_OP_CREATE_ADDR_HANDLE: + case MQ_OP_MODIFY_ADDR_HANDLE: + case MQ_OP_DESTROY_ADDR_HANDLE: + sxe2_print_ah_wqe_info(rdma_dev, wqe_info); + break; + default: + DRV_RDMA_LOG_DEV_ERR("MR: post unknown type mq wqe\n"); + } +} + +void sxe2_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, __le64 *wqe, void *wqe_info) +{ + u32 offset; + int i; + struct sxe2_mq_wqe *mq_wqe; + struct sxe2_rdma_device *rdma_dev; + + mq_wqe = (struct sxe2_mq_wqe *)wqe_info; + rdma_dev = to_rdmadev(dev); + + offset = 0; + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) { + set_64bit_val(wqe, offset, mq_wqe->buf[i]); + offset += sizeof(u64); + } + + DRV_RDMA_LOG_DEV_DEBUG("\n"); + DRV_RDMA_LOG_DEV_DEBUG("---------------POST MQ INFO:---------------\n"); + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + DRV_RDMA_LOG_DEV_DEBUG("info[%d]-----%#llx\n", i, + mq_wqe->buf[i]); +} + +int sxe2_hw_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, + struct mq_cmds_info *pcmdinfo) +{ + __le64 *wqe; + void *wqe_info; + u64 scratch; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_device *rdma_dev; + + mq = dev->mq; + rdma_dev = to_rdmadev(dev); + + switch (pcmdinfo->mq_cmd) { + case MQ_OP_REG_MR: + scratch = pcmdinfo->in.u.reg_mr.scratch; + pcmdinfo->in.u.reg_mr.info.field.wqe_valid = mq->polarity; + wqe_info = (void *)&pcmdinfo->in.u.reg_mr.info; + break; + case MQ_OP_DEREGISTER_MR: + scratch = pcmdinfo->in.u.dereg_mr.scratch; + pcmdinfo->in.u.dereg_mr.info.field.wqe_valid = mq->polarity; + wqe_info = (void *)&pcmdinfo->in.u.dereg_mr.info; + break; + case MQ_OP_QUERY_MR: + scratch = pcmdinfo->in.u.query_mr.scratch; + pcmdinfo->in.u.query_mr.info.field.op = SXE2_MQ_OP_QUERY_MR; + pcmdinfo->in.u.query_mr.info.field.wqe_valid = mq->polarity; + wqe_info = (void *)&pcmdinfo->in.u.query_mr.info; + break; + case MQ_OP_ALLOC_MR_KEY: + scratch = pcmdinfo->in.u.alloc_key.scratch; + pcmdinfo->in.u.alloc_key.info.field.wqe_valid = mq->polarity; + wqe_info = (void *)&pcmdinfo->in.u.alloc_key.info; + break; + case MQ_OP_DEALLOC_MR_KEY: + scratch = pcmdinfo->in.u.dalloc_key.scratch; + pcmdinfo->in.u.dalloc_key.info.field.wqe_valid = mq->polarity; + wqe_info = (void *)&pcmdinfo->in.u.dalloc_key.info; + break; + default: + return -EOPNOTSUPP; + } + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR("MR:get next mq wqe failed.\n"); + return -ENOMEM; + } + + sxe2_print_wqe_info(dev, wqe_info, pcmdinfo->mq_cmd); + + sxe2_set_mq_wqe(dev, wqe, wqe_info); + + if (pcmdinfo->post_mq) + sxe2_kpost_mq(mq); + + return 0; +} + +static void sxe2_mr_info_init(union sxe2_reg_mr_info *mr_info, + struct sxe2_mr *vendor_mr, int access) +{ + struct sxe2_pbl_pble_alloc_info *palloc = &vendor_mr->pble_alloc; + struct ib_pd *pd = vendor_mr->ibmr.pd; + struct sxe2_rdma_pd *vendor_pd = ibpd_to_vendor_pd(pd); + + mr_info->field.mr_idx = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + mr_info->field.mr_key = (u8)vendor_mr->stag; + mr_info->field.len = vendor_mr->len; + mr_info->field.mr_type = vendor_mr->is_mw; + mr_info->field.access_right = sxe2_get_mr_access(access); + mr_info->field.pd = vendor_pd->pd_ctx.pd_id; + if (access & IB_ZERO_BASED) { + mr_info->field.va_based_flag = SXE2_ADDR_TYPE_ZERO_BASED; + mr_info->field.start_addr = + vendor_mr->ibmr.iova & (vendor_mr->page_size - 1); + } else { + mr_info->field.va_based_flag = SXE2_ADDR_TYPE_VA_BASED; + } + mr_info->field.log_entity_size = ilog2(vendor_mr->page_size); + if (palloc->pbl_mode.mode == MR_ACCESS_MODE_PHY) { + mr_info->field.access_mode = SXE2_MR_ACCESS_MODE_PA; + mr_info->field.start_addr = + vendor_mr->pgaddrmem + + (vendor_mr->ibmr.iova & (vendor_mr->page_size - 1)); + } else { + mr_info->field.access_mode = SXE2_MR_ACCESS_MODE_VA; + if (mr_info->field.va_based_flag == SXE2_ADDR_TYPE_VA_BASED) + mr_info->field.start_addr = vendor_mr->ibmr.iova; + mr_info->field.pbl_index = palloc->pbl_index; + } + if (vendor_mr->is_len64) + mr_info->field.is_len64 = 1; + mr_info->field.pbl_mode = palloc->pbl_mode.mode; + mr_info->field.op = SXE2_MQ_OP_REG_MR; + +} +static int sxe2_post_regmr_mqinfo(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr, int access) +{ + union sxe2_reg_mr_info *mr_info; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *cmd_info; + int ret; + + mq_request = + sxe2_kalloc_and_get_mq_request(&rdma_dev->rdma_func->mq, true); + if (!mq_request) + return -ENOMEM; + + cmd_info = &mq_request->info; + mr_info = &cmd_info->in.u.reg_mr.info; + memset(mr_info, 0, sizeof(*mr_info)); + + sxe2_mr_info_init(mr_info, vendor_mr, access); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_pageSize", + &rdma_dev->rdma_func->mq.err_cqe_val, mr_info, vendor_mr); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_pageSize"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_type", + &rdma_dev->rdma_func->mq.err_cqe_val, mr_info, vendor_mr); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_type"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_accessRight", + &rdma_dev->rdma_func->mq.err_cqe_val, mr_info); + if (rdma_dev->rdma_func->mq.err_cqe_val.rsc_wqe_err_type == + MR_ACCESS_RIGHT_DEBUGFS) + DRV_RDMA_LOG_DEBUG_BDF( + "MQ DEBUGFS:inject ori mr_access_right %#x\n", + sxe2_get_mr_access(access)); + + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_accessRight"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_reg", + &rdma_dev->rdma_func->mq.err_cqe_val, mr_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_reg"); + +#endif + + cmd_info->mq_cmd = MQ_OP_REG_MR; + cmd_info->post_mq = 1; + cmd_info->in.u.reg_mr.ctx_dev = &rdma_dev->rdma_func->ctx_dev; + cmd_info->in.u.reg_mr.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_dev->rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_dev->rdma_func->mq, mq_request); + if (!ret) + vendor_mr->is_hwreg = 1; + + return ret; +} + +static int sxe2_post_deregmr_mqinfo(struct ib_mr *ib_mr) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ib_mr->device); + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ib_mr); + struct sxe2_rdma_pd *vendor_pd = ibpd_to_vendor_pd(ib_mr->pd); + union sxe2_dereg_mr_info *mr_info; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *cmd_info; + int ret; + + if (!vendor_mr->is_hwreg) + return 0; + + mq_request = + sxe2_kalloc_and_get_mq_request(&rdma_dev->rdma_func->mq, true); + if (!mq_request) + return -ENOMEM; + + cmd_info = &mq_request->info; + mr_info = &cmd_info->in.u.dereg_mr.info; + memset(mr_info, 0, sizeof(*mr_info)); + mr_info->field.pd = vendor_pd->pd_ctx.pd_id; + mr_info->field.mr_idx = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + mr_info->field.op = SXE2_MQ_OP_DEREGISTER_MR; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_dereg", + &rdma_dev->rdma_func->mq.err_cqe_val, mr_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_dereg"); +#endif + + cmd_info->mq_cmd = MQ_OP_DEREGISTER_MR; + cmd_info->post_mq = 1; + cmd_info->in.u.dereg_mr.ctx_dev = &rdma_dev->rdma_func->ctx_dev; + cmd_info->in.u.dereg_mr.scratch = (uintptr_t)mq_request; + cmd_info->destroy = true; + ret = sxe2_khandle_mq_cmd(rdma_dev->rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_dev->rdma_func->mq, mq_request); + + if (!ret) + vendor_mr->is_hwreg = 0; + + return ret; +} + +static int sxe2_post_alloc_mr_mqinfo(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr) +{ + union sxe2_alloc_key_info *info; + struct sxe2_rdma_pd *iwpd = ibpd_to_vendor_pd(vendor_mr->ibmr.pd); + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *cmd_info; + int ret; + + mq_request = + sxe2_kalloc_and_get_mq_request(&rdma_dev->rdma_func->mq, true); + if (!mq_request) + return -ENOMEM; + + cmd_info = &mq_request->info; + info = &cmd_info->in.u.alloc_key.info; + memset(info, 0, sizeof(*info)); + + info->field.mr_idx = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + info->field.pd = iwpd->pd_ctx.pd_id; + info->field.mr_type = SXE2_MR_TYPE_MR; + info->field.len = vendor_mr->len; + info->field.log_entity_size = ilog2(vendor_mr->page_size); + info->field.pbl_idx = vendor_mr->pble_alloc.pbl_index; + info->field.pbl_mode = vendor_mr->pble_alloc.pbl_mode.mode; + info->field.access_mode = SXE2_MR_ACCESS_MODE_VA; + info->field.op = SXE2_MQ_OP_ALLOC_MR_key; + + cmd_info->mq_cmd = MQ_OP_ALLOC_MR_KEY; + cmd_info->post_mq = 1; + cmd_info->in.u.alloc_key.ctx_dev = &rdma_dev->rdma_func->ctx_dev; + cmd_info->in.u.alloc_key.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_dev->rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_dev->rdma_func->mq, mq_request); + + return ret; +} + +bool sxe2_check_mr_contiguous(struct sxe2_mr *vendor_mr) +{ + struct ib_umem *region = vendor_mr->region; + struct ib_block_iter biter; + bool first = true; + u64 last_addr = 0; + u64 curr_addr = 0; + + rdma_umem_for_each_dma_block(region, &biter, vendor_mr->page_size) { + if (first) { + last_addr = rdma_block_iter_dma_address(&biter); + first = false; + } else { + curr_addr = rdma_block_iter_dma_address(&biter); + if (last_addr + vendor_mr->page_size != curr_addr) + return false; + last_addr = curr_addr; + } + } + + return true; +} + +static void sxe2_get_mr_first_pgaddrs(struct sxe2_mr *vendor_mr, u64 *addr) +{ + struct ib_umem *region = vendor_mr->region; + struct ib_block_iter biter; + + rdma_umem_for_each_dma_block(region, &biter, vendor_mr->page_size) { + *addr = rdma_block_iter_dma_address(&biter); + break; + } +} + +static int sxe2_set_pble_pgaddrs(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_mr *vendor_mr) +{ + struct ib_umem *region = vendor_mr->region; + struct sxe2_pbl_pble_alloc_info *palloc = &vendor_mr->pble_alloc; + struct ib_block_iter biter; + u32 pbl_cnt = 0; + u64 page_addr; + u64 pble_liner_index; + int ret = SXE2_OK; + + pble_liner_index = vendor_mr->pble_alloc.pble_info.liner_addr; + rdma_umem_for_each_dma_block(region, &biter, vendor_mr->page_size) { + page_addr = rdma_block_iter_dma_address(&biter); + ret = sxe2_pbl_set_pble(pble_rsrc, pble_liner_index, page_addr, + palloc->mr_first_page_flags); + if (ret != SXE2_OK) + break; + if (++pbl_cnt == palloc->needed_pble_cnt) + break; + if (!palloc->mr_first_page_flags) + pble_liner_index += sizeof(u64); + else + pble_liner_index += SXE2_FPTE_LINER_ADDR; + } + + return ret; +} + +static void sxe2_free_stag(struct sxe2_rdma_device *rdma_dev, u32 stag) +{ + u32 stag_idx; + + stag_idx = (stag & rdma_dev->rdma_func->mr_stagmask) >> + SXE2_CQPSQ_STAG_IDX_S; + sxe2_kfree_rsrc(rdma_dev->rdma_func, rdma_dev->rdma_func->allocated_mrs, + stag_idx); + + DRV_RDMA_LOG_DEV_DEBUG( + "FREE_MR_STAG:stag 0x%x bit_index %u mask 0x%x\n", stag, + stag_idx, rdma_dev->rdma_func->mr_stagmask); +} + +u32 sxe2_create_stag(struct sxe2_rdma_device *rdma_dev) +{ + u32 stag; + u32 stag_index = 0; + u32 next_stag_index; + u32 random; + u8 consumer_key; + int ret; + + get_random_bytes(&random, sizeof(random)); + consumer_key = (u8)random; + + next_stag_index = (random & rdma_dev->rdma_func->mr_stagmask) >> 8; + next_stag_index %= rdma_dev->rdma_func->max_mr; + + ret = sxe2_kalloc_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_mrs, + rdma_dev->rdma_func->max_mr, &stag_index, + &next_stag_index); + if (ret) { + DRV_RDMA_LOG_DEV_DEBUG("MR: kalloc stag_index failed\n"); + return 0; + } + + stag = stag_index << SXE2_CQPSQ_STAG_IDX_S; + stag |= (u32)consumer_key; + + DRV_RDMA_LOG_DEV_DEBUG( + "CREATE_MR_STAG:stag 0x%x bit_index %u mr_key %d mask 0x%x\n", + stag, stag_index, consumer_key, + rdma_dev->rdma_func->mr_stagmask); + + return stag; +} + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ +static struct sxe2_mr *sxe2_alloc_mr(struct ib_umem *region, struct ib_pd *pd, + u64 virt, u64 start) +#else +static struct sxe2_mr *sxe2_alloc_mr(struct ib_umem *region, struct ib_pd *pd, + u64 virt) +#endif +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(pd->device); + struct sxe2_mr *vendor_mr = NULL; + unsigned long pgsz_bitmap; + + vendor_mr = kzalloc(sizeof(*vendor_mr), GFP_KERNEL); + if (!vendor_mr) { + DRV_RDMA_LOG_DEV_ERR("MR: kzalloc mr mem failed\n"); + return ERR_PTR(-ENOMEM); + } + + vendor_mr->region = region; + vendor_mr->ibmr.pd = pd; + vendor_mr->ibmr.device = pd->device; + vendor_mr->ibmr.iova = virt; + + pgsz_bitmap = rdma_dev->rdma_func->ctx_dev.hw_attrs.page_size_cap; + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + vendor_mr->page_size = + sxe2_set_best_pagesz(start, region, pgsz_bitmap); +#else + vendor_mr->page_size = + ib_umem_find_best_pgsz(region, pgsz_bitmap, virt); +#endif + if (unlikely(!vendor_mr->page_size)) { + DRV_RDMA_LOG_DEV_ERR("MR:0 page size error\n"); + kfree(vendor_mr); + return ERR_PTR(-EOPNOTSUPP); + } + + vendor_mr->page_msk = ~(vendor_mr->page_size - 1); + + vendor_mr->len = region->length; + vendor_mr->is_mw = SXE2_MR_TYPE_MR; +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT + vendor_mr->page_cnt = + sxe2_ib_umem_num_dma_blocks(region, vendor_mr->page_size, virt); +#else + vendor_mr->page_cnt = + (u32)ib_umem_num_dma_blocks(region, vendor_mr->page_size); +#endif + return vendor_mr; +} + +static int sxe2_register_mr(struct sxe2_mr *vendor_mr, int access, + bool create_stag) +{ + struct sxe2_rdma_device *rdma_dev = + ibdev_to_rdmadev(vendor_mr->ibmr.device); + struct sxe2_pbl_pble_alloc_info *pbl_alloc = &vendor_mr->pble_alloc; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + u32 stag = 0; + int ret; + + if (vendor_mr->page_cnt == 1) { + sxe2_get_mr_first_pgaddrs(vendor_mr, &vendor_mr->pgaddrmem); + pbl_alloc->pbl_mode.mode = MR_ACCESS_MODE_PHY; + } else { + if ((rdma_func->app_mod_all_flush) && + (vendor_mr->page_size >= SXE2_HW_PAGE_SIZE_1G)) { + pbl_alloc->mr_first_page_flags = true; + } + ret = sxe2_pbl_get_pble(rdma_dev->rdma_func->pble_rsrc, + pbl_alloc, vendor_mr->page_cnt, + PBL_OBJ_MR); + if (ret) { + DRV_RDMA_LOG_DEV_DEBUG( + "MR:sxe2_pbl_get_pble error, err(%d)\n", ret); + return ret; + } + vendor_mr->alloced_pble = SXE2_MR_ALLOCED_PBLE; + ret = sxe2_set_pble_pgaddrs(rdma_dev->rdma_func->pble_rsrc, + vendor_mr); + if (ret) { + DRV_RDMA_LOG_DEV_DEBUG( + "MR:sxe2_set_pble_pgaddrs error, err(%d)\n", + ret); + goto err_case; + } + } + + if (create_stag) { + stag = sxe2_create_stag(rdma_dev); + if (!stag) { + ret = -ENOMEM; + goto err_case; + } + vendor_mr->alloced_key = SXE2_MR_ALLOCED_KEY; + vendor_mr->stag = stag; + vendor_mr->ibmr.rkey = stag; + vendor_mr->ibmr.lkey = stag; + } + + vendor_mr->access = access; + + ret = sxe2_post_regmr_mqinfo(rdma_dev, vendor_mr, access); + if (ret) + goto err_case; + + return 0; + +err_case: + if (vendor_mr->alloced_key == SXE2_MR_ALLOCED_KEY) { + sxe2_free_stag(rdma_dev, vendor_mr->stag); + vendor_mr->alloced_key = SXE2_MR_NOTALLOCED_KEY; + } + + if (vendor_mr->alloced_pble == SXE2_MR_ALLOCED_PBLE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + vendor_mr->pble_alloc.pbl_index, + vendor_mr->page_cnt, + vendor_mr->pble_alloc.mr_first_page_flags); + vendor_mr->alloced_pble = SXE2_MR_NOTALLOCED_PBLE; + } + + return ret; +} + +static struct ib_mr *sxe2_rereg_mr_trans(struct sxe2_mr *vendor_mr, u64 start, + u64 len, u64 virt, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = + ibdev_to_rdmadev(vendor_mr->ibmr.device); + struct ib_pd *pd = vendor_mr->ibmr.pd; + struct ib_umem *region; + int ret; +#ifdef IB_UMEM_GET_V3 + +#endif + +#ifdef IB_UMEM_GET_V2 + region = ib_umem_get(udata, start, len, vendor_mr->access); +#elif defined(IB_UMEM_GET_V1) + region = ib_umem_get(udata, start, len, vendor_mr->access, 0); +#elif defined IB_UMEM_GET_V3 + + region = ib_umem_get(pd->uobject->context, start, len, vendor_mr->access, 0); +#else + region = ib_umem_get(pd->device, start, len, vendor_mr->access); +#endif + if (IS_ERR(region)) { + DRV_RDMA_LOG_DEV_DEBUG("mr umem ib_umem_get failed\n"); + return (struct ib_mr *)region; + } + + vendor_mr->region = region; + vendor_mr->ibmr.iova = virt; + vendor_mr->ibmr.pd = pd; + vendor_mr->page_size = PAGE_SIZE; + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + vendor_mr->page_size = + sxe2_set_best_pagesz(start, region, + rdma_dev->rdma_func->ctx_dev.hw_attrs.page_size_cap); +#else + vendor_mr->page_size = ib_umem_find_best_pgsz( + region, rdma_dev->rdma_func->ctx_dev.hw_attrs.page_size_cap, + virt); +#endif + if (unlikely(!vendor_mr->page_size)) { + ret = -EOPNOTSUPP; + goto ret; + } + + vendor_mr->len = region->length; +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT + vendor_mr->page_cnt = + sxe2_ib_umem_num_dma_blocks(region, vendor_mr->page_size, virt); +#else + vendor_mr->page_cnt = + (u32)ib_umem_num_dma_blocks(region, vendor_mr->page_size); +#endif + ret = sxe2_register_mr(vendor_mr, vendor_mr->access, false); + if (ret) + goto ret; + + return &vendor_mr->ibmr; + +ret: + ib_umem_release(region); + return ERR_PTR(ret); +} + +static int sxe2_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ibmr); + struct sxe2_rdma_device *rdma_dev = + ibdev_to_rdmadev(vendor_mr->ibmr.device); + struct sxe2_pbl_pble_alloc_info *palloc = &vendor_mr->pble_alloc; + u64 pble_liner_index; + + if (unlikely(vendor_mr->npages == vendor_mr->page_cnt)) { + DRV_RDMA_LOG_DEV_ERR( + "MR: map mr (%d)page exceed mr max pagecnt %d.\n", + vendor_mr->npages, vendor_mr->page_cnt); + return -ENOMEM; + } + + pble_liner_index = palloc->pbl_index; + + pble_liner_index += sizeof(u64) * vendor_mr->npages; + sxe2_pbl_set_pble(rdma_dev->rdma_func->pble_rsrc, pble_liner_index, + addr, false); + + vendor_mr->npages++; + return 0; +} + +static struct ib_mr *sxe2_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, + int access, u64 *iova_start) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(pd->device); + struct sxe2_pbl_pble_alloc_info *palloc; + struct sxe2_mr *vendor_mr; + u32 stag; + int ret; + + DRV_RDMA_LOG_DEV_DEBUG("mr:kreg phys mr enter.\n"); + + vendor_mr = kzalloc(sizeof(*vendor_mr), GFP_KERNEL); + if (!vendor_mr) { + DRV_RDMA_LOG_DEV_ERR("MR: kzalloc mr buf failed.\n"); + return ERR_PTR(-ENOMEM); + } + + palloc = &vendor_mr->pble_alloc; + + vendor_mr->ibmr.pd = pd; + vendor_mr->ibmr.device = pd->device; + + stag = sxe2_create_stag(rdma_dev); + if (!stag) { + DRV_RDMA_LOG_DEV_ERR("MR: alloc mr idx failed.\n"); + ret = -ENOMEM; + goto err; + } + + vendor_mr->stag = stag; + vendor_mr->ibmr.iova = *iova_start; + vendor_mr->ibmr.rkey = stag; + vendor_mr->ibmr.lkey = stag; + vendor_mr->is_mw = SXE2_MR_TYPE_MR; + vendor_mr->alloced_pble = SXE2_MR_NOTALLOCED_PBLE; + vendor_mr->alloced_key = SXE2_MR_ALLOCED_KEY; + vendor_mr->page_cnt = 1; + vendor_mr->pgaddrmem = addr; + vendor_mr->access = access; + vendor_mr->len = size; + vendor_mr->is_len64 = 1; + vendor_mr->page_size = PAGE_SIZE; + vendor_mr->page_msk = ~(PAGE_SIZE - 1); + palloc->pbl_mode.mode = MR_ACCESS_MODE_PHY; + ret = sxe2_post_regmr_mqinfo(rdma_dev, vendor_mr, access); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: post regmr failed, ret %d.\n", ret); + sxe2_free_stag(rdma_dev, stag); + vendor_mr->alloced_key = SXE2_MR_NOTALLOCED_KEY; + goto err; + } + + vendor_mr->is_hwreg = 1; + + (void)sxe2_debbugfs_mr_add(rdma_dev, vendor_mr); + + return &vendor_mr->ibmr; + +err: + kfree(vendor_mr); + + return ERR_PTR(ret); +} + +struct ib_mr *sxe2_kreg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, + int access, struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(pd->device); + struct ib_umem *region; + struct sxe2_mr *vendor_mr; + int ret; +#ifdef IB_UMEM_GET_V3 + +#endif + + SXE2_UNUSED_PARA(udata); + + DRV_RDMA_LOG_DEV_DEBUG( + "mr:kreg user mr enter. start:%#llx, length:%#llx, virt:%#llx,acc:%#x\n", + start, len, virt, access); + + if (len > rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size) { + DRV_RDMA_LOG_DEV_ERR( + "MR: mr len (%#llx) exceed max size 0x%llx.\n", len, + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size); + return ERR_PTR(-EINVAL); + } +#ifdef IB_UMEM_GET_V2 + region = ib_umem_get(udata, start, len, access); +#elif defined(IB_UMEM_GET_V1) + region = ib_umem_get(udata, start, len, access, 0); +#elif defined IB_UMEM_GET_V3 + + region = ib_umem_get(pd->uobject->context, start, len, access, 0); +#else + region = ib_umem_get(pd->device, start, len, access); +#endif + if (IS_ERR(region)) { + DRV_RDMA_LOG_DEV_ERR("MR: ib umem get failed.\n"); + return (struct ib_mr *)region; + } + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + vendor_mr = sxe2_alloc_mr(region, pd, virt, start); +#else + vendor_mr = sxe2_alloc_mr(region, pd, virt); +#endif + if (IS_ERR(vendor_mr)) { + ib_umem_release(region); + DRV_RDMA_LOG_DEV_ERR("MR: alloc mr mem failed.\n"); + return (struct ib_mr *)vendor_mr; + } + + ret = sxe2_register_mr(vendor_mr, access, true); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: register mr failed, ret %d.\n", ret); + goto error; + } + + (void)sxe2_debbugfs_mr_add(rdma_dev, vendor_mr); + + return &vendor_mr->ibmr; + +error: + ib_umem_release(region); + kfree(vendor_mr); + + return ERR_PTR(ret); +} + +#ifdef DEREG_ME_V1 +int sxe2_kdereg_mr(struct ib_mr *ib_mr) +#else +int sxe2_kdereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) +#endif +{ + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ib_mr); + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ib_mr->device); + struct sxe2_pbl_pble_alloc_info *pbl_alloc = &vendor_mr->pble_alloc; + int ret; + + ret = sxe2_post_deregmr_mqinfo(ib_mr); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: post deregister mr failed, ret %d.\n", + ret); + return ret; + } + + if (vendor_mr->alloced_key == SXE2_MR_ALLOCED_KEY) { + sxe2_free_stag(rdma_dev, vendor_mr->stag); + vendor_mr->alloced_key = SXE2_MR_NOTALLOCED_KEY; + } + + if (vendor_mr->alloced_pble == SXE2_MR_ALLOCED_PBLE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + pbl_alloc->pbl_index, vendor_mr->page_cnt, + pbl_alloc->mr_first_page_flags); + vendor_mr->alloced_key = SXE2_MR_NOTALLOCED_PBLE; + } + + if (vendor_mr->region) + ib_umem_release(vendor_mr->region); + + sxe2_debugfs_mr_remove(rdma_dev, vendor_mr); + + kfree(vendor_mr); + + return 0; +} + +#ifdef REREG_MR_VER_1 +int sxe2_krereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, + u64 virt, int new_access, struct ib_pd *new_pd, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ib_mr->device); + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ib_mr); + struct sxe2_pbl_pble_alloc_info *pbl_alloc = &vendor_mr->pble_alloc; + int ret; + + DRV_RDMA_LOG_DEBUG_BDF("MR: rereg mr enter chgflag 0x%x addr %llu\n" + "\tlen %#llx acc 0x%x.\n", + flags, start, len, new_access); + + if (len > rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size) { + DRV_RDMA_LOG_ERROR_BDF( + "MR: mr len (0x%llx) exceed max size 0x%x.\n", len, + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size); + return -EINVAL; + } + + if (flags & + ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) { + DRV_RDMA_LOG_ERROR_BDF("MR: has not support flags 0x%x.\n", + flags & ~(IB_MR_REREG_TRANS | + IB_MR_REREG_PD | + IB_MR_REREG_ACCESS)); + return -EOPNOTSUPP; + } + + ret = sxe2_post_deregmr_mqinfo(ib_mr); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF( + "MR: post deregister mr failed, ret %d.\n", ret); + return ret; + } + + if (flags & IB_MR_REREG_ACCESS) + vendor_mr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + vendor_mr->ibmr.pd = new_pd; + vendor_mr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (vendor_mr->alloced_pble == SXE2_MR_ALLOCED_PBLE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + pbl_alloc->pbl_index, + vendor_mr->page_cnt, + pbl_alloc->mr_first_page_flags); + vendor_mr->alloced_pble = SXE2_MR_NOTALLOCED_PBLE; + } + + if (vendor_mr->region) { + ib_umem_release(vendor_mr->region); + vendor_mr->region = NULL; + } + + ib_mr = sxe2_rereg_mr_trans(vendor_mr, start, len, virt, udata); + if (IS_ERR(ib_mr)) { + DRV_RDMA_LOG_ERROR_BDF("MR: post rereg mr failed.\n"); + return PTR_ERR(ib_mr); + } + } else { + ret = sxe2_post_regmr_mqinfo(rdma_dev, vendor_mr, + vendor_mr->access); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF( + "MR: post reg mr failed, ret %d.\n", ret); + return ret; + } + } + + return 0; +} +#else +struct ib_mr *sxe2_krereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(ib_mr->device); + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ib_mr); + struct sxe2_pbl_pble_alloc_info *pbl_alloc = &vendor_mr->pble_alloc; + int ret; + + DRV_RDMA_LOG_DEV_DEBUG("MR: rereg mr enter chgflag 0x%x addr %llu\n" + "\tlen %#llx acc 0x%x.\n", + flags, start, len, new_access); + + if (len > rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size) { + DRV_RDMA_LOG_DEV_ERR( + "MR: mr len (0x%llx) exceed max size 0x%llx.\n", len, + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size); + return ERR_PTR(-EINVAL); + } + + if (flags & + ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) { + DRV_RDMA_LOG_DEV_ERR("MR: has not support flags 0x%x.\n", + (flags & + ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | + IB_MR_REREG_ACCESS))); + return ERR_PTR(-EOPNOTSUPP); + } + + ret = sxe2_post_deregmr_mqinfo(ib_mr); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: post deregister mr failed, ret %d.\n", + ret); + return ERR_PTR(ret); + } + + if (flags & IB_MR_REREG_ACCESS) + vendor_mr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + vendor_mr->ibmr.pd = new_pd; + vendor_mr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (vendor_mr->alloced_pble == SXE2_MR_ALLOCED_PBLE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + pbl_alloc->pbl_index, + vendor_mr->page_cnt, + pbl_alloc->mr_first_page_flags); + vendor_mr->alloced_pble = SXE2_MR_NOTALLOCED_PBLE; + } + + if (vendor_mr->region) { + ib_umem_release(vendor_mr->region); + vendor_mr->region = NULL; + } + + ib_mr = sxe2_rereg_mr_trans(vendor_mr, start, len, virt, udata); + if (IS_ERR(ib_mr)) { + DRV_RDMA_LOG_DEV_ERR("MR: post rereg mr failed.\n"); + return ib_mr; + } + } else { + ret = sxe2_post_regmr_mqinfo(rdma_dev, vendor_mr, + vendor_mr->access); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "MR: post reg mr failed, ret %d.\n", ret); + return ERR_PTR(ret); + } + } + + return NULL; +} +#endif + +#ifndef REG_USER_MR_DMABUF_VER_1 +struct ib_mr *sxe2_kreg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 len, + u64 virt, int fd, int access, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(pd->device); + struct ib_umem_dmabuf *umem_dmabuf; + struct sxe2_mr *vedor_mr; + long ret; + + DRV_RDMA_LOG_DEV_DEBUG("mr:kreg user mr dmabuf enter.\n"); + + if (len > rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size) { + DRV_RDMA_LOG_DEV_ERR( + "MR: mr len (%#llx) exceed max size %#llx.\n", len, + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_mr_size); + return ERR_PTR(-EINVAL); + } + + umem_dmabuf = + ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access); + if (IS_ERR(umem_dmabuf)) { + ret = PTR_ERR(umem_dmabuf); + DRV_RDMA_LOG_DEV_ERR("MR: ib umem dmabuf pin failed [%ld].\n", + ret); + return ERR_PTR(ret); + } +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + vedor_mr = sxe2_alloc_mr(&umem_dmabuf->umem, pd, virt, start); +#else + vedor_mr = sxe2_alloc_mr(&umem_dmabuf->umem, pd, virt); +#endif + if (IS_ERR(vedor_mr)) { + ret = PTR_ERR(vedor_mr); + DRV_RDMA_LOG_DEV_ERR("MR: alloc mr mem failed [%ld].\n", ret); + goto error_release; + } + + ret = sxe2_register_mr(vedor_mr, access, true); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: register mr failed, ret %ld.\n", ret); + goto err_iwmr; + } + + return &vedor_mr->ibmr; + +err_iwmr: + kfree(vedor_mr); + +error_release: + ib_umem_release(&umem_dmabuf->umem); + + return ERR_PTR(ret); +} +#endif + +#ifdef RDMA_ALLOC_MR_VER_1 +struct ib_mr *sxe2_kalloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata) +#else +struct ib_mr *sxe2_kalloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +#endif + +{ + struct sxe2_rdma_device *rdma_dev = ibdev_to_rdmadev(pd->device); + struct sxe2_pbl_pble_alloc_info *palloc; + struct sxe2_mr *vendor_mr; + int ret; + u32 stag; + int err_code = -ENOMEM; + + vendor_mr = kzalloc(sizeof(*vendor_mr), GFP_KERNEL); + if (!vendor_mr) { + DRV_RDMA_LOG_DEV_ERR("MR: kzalloc mr buf failed.\n"); + return ERR_PTR(-ENOMEM); + } + + stag = sxe2_create_stag(rdma_dev); + if (!stag) { + DRV_RDMA_LOG_DEV_ERR("MR: mr idx alloc failed.\n"); + err_code = -ENOMEM; + goto error; + } + vendor_mr->alloced_key = SXE2_MR_ALLOCED_KEY; + vendor_mr->is_mw = SXE2_MR_TYPE_MR; + vendor_mr->stag = stag; + vendor_mr->ibmr.rkey = stag; + vendor_mr->ibmr.lkey = stag; + vendor_mr->ibmr.pd = pd; + vendor_mr->ibmr.device = pd->device; + + palloc = &vendor_mr->pble_alloc; + vendor_mr->page_cnt = max_num_sg; + vendor_mr->page_size = PAGE_SIZE; + vendor_mr->len = max_num_sg * PAGE_SIZE; + + ret = sxe2_pbl_get_pble(rdma_dev->rdma_func->pble_rsrc, palloc, + vendor_mr->page_cnt, PBL_OBJ_MR); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("MR: get pble failed, ret %d\n", ret); + err_code = -ENOMEM; + goto error; + } + vendor_mr->alloced_pble = SXE2_MR_ALLOCED_PBLE; + + err_code = sxe2_post_alloc_mr_mqinfo(rdma_dev, vendor_mr); + if (err_code) { + DRV_RDMA_LOG_DEV_ERR("MR: post alloc mr failed, ret %d\n", + err_code); + goto error; + } + + (void)sxe2_debbugfs_mr_add(rdma_dev, vendor_mr); + vendor_mr->is_hwreg = 1; + + return &vendor_mr->ibmr; +error: + if (vendor_mr->alloced_key == SXE2_MR_ALLOCED_KEY) { + sxe2_free_stag(rdma_dev, vendor_mr->stag); + vendor_mr->alloced_key = SXE2_MR_NOTALLOCED_KEY; + } + + if (vendor_mr->alloced_pble == SXE2_MR_ALLOCED_PBLE) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + vendor_mr->pble_alloc.pbl_index, + vendor_mr->page_cnt, + vendor_mr->pble_alloc.mr_first_page_flags); + vendor_mr->alloced_pble = SXE2_MR_NOTALLOCED_PBLE; + } + + kfree(vendor_mr); + + return ERR_PTR(err_code); +} + +int sxe2_kmap_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) +{ + struct sxe2_mr *vendor_mr = ibmr_to_vendor_mr(ibmr); + + vendor_mr->npages = 0; + + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, sxe2_set_page); +} + +struct ib_mr *sxe2_kget_dma_mr(struct ib_pd *pd, int acc) +{ + u64 kva = 0; + + return sxe2_reg_phys_mr(pd, 0, (u64)-1, acc, &kva); +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.h new file mode 100644 index 0000000000000000000000000000000000000000..38a3602b813c083e4ac62fcfb165fe74d5669064 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mr.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_MR_H__ +#define __SXE2_DRV_MR_H__ + +#include "sxe2_compat.h" + +#define SXE2_HW_PAGE_SIZE 4096 +#define SXE2_HW_PAGE_SIZE_1G (1 << 30) +#define SXE2_CQPSQ_STAG_IDX_S 8 +#define SXE2_MR_NOTALLOCED_PBLE 0 +#define SXE2_MR_ALLOCED_PBLE 1 +#define SXE2_MR_NOTALLOCED_KEY 0 +#define SXE2_MR_ALLOCED_KEY 1 +#define SXE2_MR_TYPE_MR 0 +#define SXE2_IP_ADDR_LEN 4 +#define SXE2_QPS_PER_PUSH_PAGE 16 +#define SXE2_FPTE_LINER_ADDR (1 << 21) + +#define SXE2_UNUSED_PARA(para) ((void)(para)) + +enum sxe2_mr_mode { + MR_ACCESS_MODE_PHY = 0, + MR_ACCESS_MODE_FIRST = MR_TABLE_FIRST_MODE, + MR_ACCESS_MODE_SECOND = MR_TABLE_SECOND_MODE, + MR_ACCESS_MODE_THREE = MR_TABLE_THIRD_MODE, +}; + +static inline struct sxe2_rdma_device *ibdev_to_rdmadev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct sxe2_rdma_device, ibdev); +} + +static inline struct sxe2_rdma_pd *ibpd_to_vendor_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct sxe2_rdma_pd, ibpd); +} + +static inline u8 sxe2_get_mr_access(int access) +{ + u8 hw_access = 0; + + hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? + SXE2_ACCESS_FLAGS_LOCALWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? + SXE2_ACCESS_FLAGS_REMOTEWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_READ) ? + SXE2_ACCESS_FLAGS_REMOTEREAD : + 0; + hw_access |= (access & IB_ACCESS_MW_BIND) ? + SXE2_ACCESS_FLAGS_BIND_WINDOW : + 0; + hw_access |= SXE2_ACCESS_FLAGS_LOCALREAD; + + return hw_access; +} +struct ib_mr *sxe2_kreg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, + int access, struct ib_udata *udata); +#ifdef DEREG_ME_V1 +int sxe2_kdereg_mr(struct ib_mr *ib_mr); +#else +int sxe2_kdereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +#endif +#ifdef RDMA_ALLOC_MR_VER_1 +struct ib_mr *sxe2_kalloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata); +#else +struct ib_mr *sxe2_kalloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +#endif +int sxe2_kmap_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); +struct ib_mr *sxe2_kget_dma_mr(struct ib_pd *pd, int acc); +#ifdef REREG_MR_VER_1 +int sxe2_krereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, + u64 virt, int new_access, struct ib_pd *new_pd, + struct ib_udata *udata); + +#else +struct ib_mr *sxe2_krereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata); +#endif + +#ifndef REG_USER_MR_DMABUF_VER_1 +struct ib_mr *sxe2_kreg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 len, + u64 virt, int fd, int access, + struct ib_udata *udata); +#endif + +u32 sxe2_create_stag(struct sxe2_rdma_device *rdma_dev); +bool sxe2_check_mr_contiguous(struct sxe2_mr *vendor_mr); +void sxe2_print_wqe_info(struct sxe2_rdma_ctx_dev *dev, void *wqe_info, + u8 mq_cmd); +void sxe2_set_mq_wqe(struct sxe2_rdma_ctx_dev *dev, __le64 *wqe, + void *wqe_info); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..982bbe54e7cec1fa799472de0196abf885be4224 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mr_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rcms_debugfs.h" +#include "sxe2_drv_mr_debugfs.h" + +enum { MR_KEY, MR_PD, MR_ACCESS_RIGHT }; + +#ifdef SXE2_CFG_DEBUG +static char *mr_fields[] = { + [MR_KEY] = "mr_key", + [MR_PD] = "pd", + [MR_ACCESS_RIGHT] = "access_right" +}; +#endif +static int sxe2_drv_mr_query_op(struct sxe2_rdma_device *rdma_dev, u32 mr_index, + dma_addr_t pa) +{ + int ret; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("get mq_request failed, ret (%d)\n", ret); + goto end; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_query", + &rdma_dev->rdma_func->mq.err_cqe_val, &mr_index); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_mr_id_query"); +#endif + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_QUERY_MR; + mq_info->post_mq = 1; + mq_info->in.u.query_mr.ctx_dev = &rdma_func->ctx_dev; + mq_info->in.u.query_mr.scratch = (uintptr_t)mq_request; + mq_info->in.u.query_mr.info.field.buf_addr = pa; + mq_info->in.u.query_mr.info.field.mr_index = mr_index; + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret != 0) + DRV_RDMA_LOG_DEV_ERR("handle query mr failed, ret (%d)\n", ret); + +end: + return ret; +} + +static u64 sxe2_debugfs_mr_read(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + int ret; + int i; + union sxe2_hw_mrc *mr_ctx; + struct sxe2_mr *vendor_mr; + size_t len = 0; + u32 mr_index; + struct sxe2_rdma_dma_mem query_mr; + struct sxe2_rdma_ctx_dev *dev_ctx; + + vendor_mr = (struct sxe2_mr *)data; + mr_index = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + memset(&query_mr, 0, sizeof(query_mr)); + query_mr.size = sizeof(union sxe2_hw_mrc); + query_mr.va = dma_alloc_coherent(dev_ctx->hw->device, query_mr.size, + &query_mr.pa, GFP_KERNEL); + if (!query_mr.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query mr ctx alloc failed. ret:%d\n", + ret); + goto end; + } + + ret = sxe2_drv_mr_query_op(rdma_dev, mr_index, query_mr.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query mr failed, ret (%d)\n", ret); + goto free_ctx; + } + + mr_ctx = (union sxe2_hw_mrc *)query_mr.va; + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + mr_ctx->buf[i] = le64_to_cpu(mr_ctx->buf[i]); + + len += dbg_vsnprintf(buf, len, "MRINFO:%u\n", mr_index); + len += dbg_vsnprintf(buf, len, + "----------------------------------------\n"); + len += dbg_vsnprintf(buf, len, "pbl_mode: %d\n", + mr_ctx->field.pbl_mode); + len += dbg_vsnprintf(buf, len, "access_rights: %d\n", + mr_ctx->field.access_rights); + len += dbg_vsnprintf(buf, len, "va_based_flag: %d\n", + mr_ctx->field.va_based_flag); + len += dbg_vsnprintf(buf, len, "mw_type: %d\n", + mr_ctx->field.mw_type); + len += dbg_vsnprintf(buf, len, "access_mode: %d\n", + mr_ctx->field.access_mode); + len += dbg_vsnprintf(buf, len, "mrct_type: %d\n", + mr_ctx->field.mrct_type); + len += dbg_vsnprintf(buf, len, "free: %d\n", + mr_ctx->field.free); + len += dbg_vsnprintf(buf, len, "mr_key: %d\n", + mr_ctx->field.mr_key); + len += dbg_vsnprintf(buf, len, "qpn: %d\n", + mr_ctx->field.qpn); + len += dbg_vsnprintf(buf, len, "mw_bind_num: %d\n", + mr_ctx->field.mw_bind_num); + len += dbg_vsnprintf(buf, len, "pd: %d\n", + mr_ctx->field.pd); + len += dbg_vsnprintf(buf, len, "is_len64: %d\n", + mr_ctx->field.is_len64); + len += dbg_vsnprintf(buf, len, "start_addr: %#llx\n", + mr_ctx->field.start_addr); + len += dbg_vsnprintf(buf, len, "len: %llu\n", + mr_ctx->field.len); + len += dbg_vsnprintf(buf, len, "parent_mr_stag: %u\n", + mr_ctx->field.parent_mr_stag); + len += dbg_vsnprintf(buf, len, "ref_tag: %u\n", + mr_ctx->field.ref_tag); + len += dbg_vsnprintf(buf, len, "dif_pbl_index: %#llx\n", + (u64)mr_ctx->field.dif_pbl_index); + len += dbg_vsnprintf(buf, len, "dif_offset: %u\n", + mr_ctx->field.dif_offset); + len += dbg_vsnprintf(buf, len, "data_offset: %u\n", + mr_ctx->field.data_offset); + len += dbg_vsnprintf(buf, len, "sge_type: %d\n", + mr_ctx->field.sge_type); + len += dbg_vsnprintf(buf, len, "pbl_index: %#llx\n", + (u64)mr_ctx->field.pbl_index); + len += dbg_vsnprintf(buf, len, "log_entity_size: %d\n", + mr_ctx->field.log_entity_size); + len += dbg_vsnprintf(buf, len, "pfvf_id: %d\n", + mr_ctx->field.pfvf_id); + len += dbg_vsnprintf(buf, len, "app_tag: %d\n", + mr_ctx->field.app_tag); + len += dbg_vsnprintf(buf, len, "dif_mode: %d\n", + mr_ctx->field.dif_mode); + len += dbg_vsnprintf(buf, len, "block_size: %d\n", + mr_ctx->field.block_size); + len += dbg_vsnprintf(buf, len, + "----------------------------------------\n"); + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_mr.size, query_mr.va, + query_mr.pa); + query_mr.va = NULL; + +end: + return len; +} + +#ifdef SXE2_CFG_DEBUG + +static int sxe2_mr_ctx_modify(union sxe2_hw_mrc *mr_ctx, int field, u64 value) +{ + int ret = 0; + + switch (field) { + case MR_KEY: + mr_ctx->field.mr_key = value; + break; + case MR_PD: + mr_ctx->field.pd = value; + break; + case MR_ACCESS_RIGHT: + mr_ctx->field.access_rights = value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_WARN("invalid index %d, ret %d\n", field, ret); + } + + return ret; +} +#endif +static int sxe2_debugfs_mr_write(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ +#ifdef SXE2_CFG_DEBUG + u32 i; + int ret; + int argc; + u64 new_value; + u32 mr_index; + union sxe2_hw_mrc *mr_ctx; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_mr *vendor_mr = (struct sxe2_mr *)data; + struct sxe2_rdma_dma_mem query_mr; + struct sxe2_rdma_ctx_dev *dev_ctx; + void *va_addr; + bool find_field = false; + + mr_index = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + memset(&query_mr, 0, sizeof(query_mr)); + query_mr.size = sizeof(union sxe2_hw_mrc); + query_mr.va = dma_alloc_coherent(dev_ctx->hw->device, query_mr.size, + &query_mr.pa, GFP_KERNEL); + if (!query_mr.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("query mr ctx alloc failed. ret:%d\n", + ret); + goto end; + } + + ret = sxe2_drv_mr_query_op(rdma_dev, mr_index, query_mr.pa); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query mr failed, ret (%d)\n", ret); + goto free_ctx; + } + + mr_ctx = (union sxe2_hw_mrc *)query_mr.va; + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + mr_ctx->buf[i] = le64_to_cpu(mr_ctx->buf[i]); + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) + goto free_ctx; + + DRV_RDMA_LOG_DEV_DEBUG("argv:%s\n", argv[0]); + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto free_ctx; + } + + for (i = 0; i < ARRAY_SIZE(mr_fields); i++) { + if (!strncmp(argv[0], mr_fields[i], strlen(mr_fields[i])) && + (strlen(mr_fields[i]) == strlen(argv[0]))) { + find_field = true; + break; + } + } + if (!find_field) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("unsupport change mr field %s\n", argv[0]); + goto free_ctx; + } + + ret = kstrtoull(argv[1], 10, &new_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto free_ctx; + } + + DRV_RDMA_LOG_DEV_INFO("modify mr field %s new value %llx\n", + mr_fields[i], new_value); + + if (sxe2_mr_ctx_modify(mr_ctx, i, new_value)) + goto free_ctx; + + for (i = 0; i < SXE2_MQ_WQE_SIZE; i++) + mr_ctx->buf[i] = cpu_to_le64(mr_ctx->buf[i]); + + ret = sxe2_rcms_num_to_ctx_va_pointer(rdma_dev, SXE2_RCMS_OBJ_MR, + mr_index, &va_addr); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("query mr va addr failed, ret (%d)\n", + ret); + goto end; + } + memcpy(va_addr, mr_ctx, sizeof(*mr_ctx)); + +free_ctx: + dma_free_coherent(dev_ctx->hw->device, query_mr.size, query_mr.va, + query_mr.pa); + query_mr.va = NULL; +end: + return ret; +#else + return 0; +#endif +} + +int sxe2_debbugfs_mr_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr) +{ + int ret = 0; + u32 mr_idx; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->mr_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("mr debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + mr_idx = vendor_mr->stag >> SXE2_CQPSQ_STAG_IDX_S; + + vendor_mr->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_MR, + rdma_dev->hdl->mr_debugfs, + sxe2_debugfs_mr_read, + sxe2_debugfs_mr_write, + (int)mr_idx, vendor_mr); + if (!vendor_mr->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} + +void sxe2_debugfs_mr_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->mr_debugfs) { + DRV_RDMA_LOG_DEV_ERR("mr debugfs dir not exist\n"); + goto end; + } + + if (vendor_mr->dbg_node) { + drv_rdma_rm_res_tree(vendor_mr->dbg_node); + vendor_mr->dbg_node = NULL; + } + +end: + return; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..eb72c1ebff2a3297adccc5123be4111b288700b5 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_mr_debugfs.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_mr_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_MR_DEBUGFS_H__ +#define __SXE2_DRV_MR_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +int sxe2_debbugfs_mr_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr); +void sxe2_debugfs_mr_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_mr *vendor_mr); +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.c new file mode 100644 index 0000000000000000000000000000000000000000..df8f7d3daa282fba3bc3fc4a0a568da2a97b8688 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_pd.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_drv_pd.h" +#include "sxe2-abi.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_log.h" + +#ifdef ALLOC_PD_V1 +struct ib_pd *sxe2_kalloc_pd(struct ib_device *ibdev, struct ib_ucontext *ibucontext, + struct ib_udata *udata) +{ + int ret; + u32 pd_id; + struct sxe2_rdma_pd *pd = NULL; + struct sxe2_alloc_pd_resp resp; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_kcontext *ucontext; + struct sxe2_rdma_ctx_pd *pd_ctx; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("pd kzalloc failed\n"); + goto end; + } + + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_pds, + rdma_func->max_pd, &pd_id, &rdma_func->next_pd); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("drv pd alloc failed , ret (%d)\n", ret); + goto free_pd; + } + + pd_ctx = &pd->pd_ctx; + if (udata) { + ucontext = to_rdma_kcontext(ibucontext); + pd_ctx->pd_id = pd_id; + pd_ctx->abi_ver = ucontext->abi_ver; + pd_ctx->dev = &rdma_func->ctx_dev; + + resp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + DRV_RDMA_LOG_DEV_ERR("ib_copy_to_udata fail\n"); + ret = -EFAULT; + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_pds, + pd_id); + goto end; + } + } else { + pd_ctx->pd_id = pd_id; + pd_ctx->abi_ver = 0; + pd_ctx->dev = &rdma_func->ctx_dev; + } + + DRV_RDMA_LOG_DEV_DEBUG("pd alloc resp.pdn:%u\n", pd_id); + goto end; + +free_pd: + kfree(pd); +end: + return ret ? ERR_PTR(ret) : &pd->ibpd; +} +#else +int sxe2_kalloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + int ret; + u32 pd_id; + struct sxe2_rdma_pd *pd = to_kpd(ibpd); + struct sxe2_alloc_pd_resp resp; + struct sxe2_rdma_device *rdma_dev = to_dev(ibpd->device); + struct sxe2_rdma_kcontext *ucontext; + struct sxe2_rdma_ctx_pd *pd_ctx; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; +#ifdef HAVE_NO_RDMA_UDATA_TO_DRV_CONTEXT + struct ib_ucontext *ibucontext; +#endif + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_pds, + rdma_func->max_pd, &pd_id, &rdma_func->next_pd); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("drv pd alloc failed , ret (%d)\n", ret); + goto end; + } + + pd_ctx = &pd->pd_ctx; + if (udata) { +#ifdef HAVE_NO_RDMA_UDATA_TO_DRV_CONTEXT + ibucontext = rdma_device_to_drv_context(ibpd->device); + ucontext = to_rdma_kcontext(ibucontext); +#else + ucontext = rdma_udata_to_drv_context( + udata, struct sxe2_rdma_kcontext, ibucontext); +#endif + pd_ctx->pd_id = pd_id; + pd_ctx->abi_ver = ucontext->abi_ver; + pd_ctx->dev = &rdma_func->ctx_dev; + + resp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + DRV_RDMA_LOG_DEV_ERR("ib_copy_to_udata fail\n"); + ret = -EFAULT; + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_pds, + pd_id); + goto end; + } + } else { + pd_ctx->pd_id = pd_id; + pd_ctx->abi_ver = 0; + pd_ctx->dev = &rdma_func->ctx_dev; + } + + DRV_RDMA_LOG_DEV_DEBUG("pd alloc resp.pdn:%u\n", pd_id); + +end: + return ret; +} +#endif + +#ifdef DEALLOC_PD_VER_3 +void sxe2_kdealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +#elif defined DEALLOC_PD_VER_4 +int sxe2_kdealloc_pd(struct ib_pd *ibpd) +#else +int sxe2_kdealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +#endif + +{ + struct sxe2_rdma_pd *drv_pd = to_kpd(ibpd); + struct sxe2_rdma_device *rdma_dev = to_dev(ibpd->device); +#ifndef DEALLOC_PD_VER_4 + (void)udata; +#endif + DRV_RDMA_LOG_DEV_DEBUG("pd:%d dealloc\n", drv_pd->pd_ctx.pd_id); + + sxe2_kfree_rsrc(rdma_dev->rdma_func, rdma_dev->rdma_func->allocated_pds, + drv_pd->pd_ctx.pd_id); +#ifdef DEALLOC_PD_VER_3 + return; +#else + return 0; +#endif +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.h new file mode 100644 index 0000000000000000000000000000000000000000..5a85f4d419727938c2582d9d45492f56b4bd351f --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_pd.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_pd.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RDMA_PD_H__ +#define __SXE2_DRV_RDMA_PD_H__ + +#include "sxe2_compat.h" + +#ifdef ALLOC_PD_V1 +struct ib_pd *sxe2_kalloc_pd(struct ib_device *ibdev, struct ib_ucontext *ibucontext, + struct ib_udata *udata); +#else +int sxe2_kalloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#endif + +#ifdef DEALLOC_PD_VER_3 +void sxe2_kdealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#elif defined DEALLOC_PD_VER_4 +int sxe2_kdealloc_pd(struct ib_pd *pd); +#else +int sxe2_kdealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1f83e1040e78286de38879063554880dadfe05c9 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qos_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_qos_debugfs.h" +#include "sxe2_drv_rdma_common.h" + +static struct sxe2_rdma_ctx_qp *sxe2_get_qp_from_entry(struct list_head *entry) +{ + struct sxe2_rdma_ctx_qp *qp = NULL; + + if (entry == NULL) + return NULL; + + qp = container_of(entry, struct sxe2_rdma_ctx_qp, list); + + return qp; +} + +static struct sxe2_rdma_ctx_qp * +sxe2_qos_get_qp_from_list(struct list_head *head, struct sxe2_rdma_ctx_qp *qp) +{ + struct list_head *last_entry; + struct list_head *entry = NULL; + + if (list_empty(head)) + return NULL; + + if (!qp) { + entry = (head)->next; + } else { + last_entry = &qp->list; + entry = (last_entry)->next; + if (entry == head) + return NULL; + } + + return sxe2_get_qp_from_entry(entry); +} + +static ssize_t drv_rdma_qos_info_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_vsi *vsi_ctx; + struct sxe2_rdma_ctx_qp *qp = NULL; + int i; + int j; + int qset_idx_cnt = 0; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "qos debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + vsi_ctx = &rdma_dev->vsi; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "qos debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "qos info:\n"); + + if (vsi_ctx->lag_aa) + qset_idx_cnt = SXE2_QSET_PER_USER_PRI_BOND; + else + qset_idx_cnt = SXE2_QSET_PER_USER_PRI; + for (j = 0; j < qset_idx_cnt; j++) { + len_total += dbg_vsnprintf(rsp_end, len_total, "dscp mode:%u\n", + vsi_ctx->dscp_mode[j]); + } + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + mutex_lock(&vsi_ctx->qos[i].qos_mutex); + if (!vsi_ctx->qos[i].valid) { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "user pri %u is invalid\n", i); + mutex_unlock(&vsi_ctx->qos[i].qos_mutex); + continue; + } + len_total += dbg_vsnprintf( + rsp_end, len_total, + "user pri=%u:qp cnt=%u\n", + i, vsi_ctx->qos[i].qp_cnt); + for (j = 0; j < qset_idx_cnt; j++) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "user pri=%u:tc=%u pri type=%u rel bw=%u qp cnt=%u\n", + i, vsi_ctx->qos[i].qset[j].traffic_class, + vsi_ctx->qos[i].prio_type[j], vsi_ctx->qos[i].rel_bw[j], + vsi_ctx->qos[i].qset[j].qset_qp_cnt); + + len_total += dbg_vsnprintf( + rsp_end, len_total, + "qset idx %u:qset num=%u qset id=%u teid=%u qset bind qp cnt=%u\n", + j, vsi_ctx->qos[i].qset[j].qset_num, + vsi_ctx->qos[i].qset[j].qset_id, + vsi_ctx->qos[i].qset[j].teid, + vsi_ctx->qos[i].qset[j].qset_qp_cnt); + qp = sxe2_qos_get_qp_from_list( + &vsi_ctx->qos[i].qset[j].qp_list, qp); + if (qp) { + len_total += dbg_vsnprintf(rsp_end, len_total, + "qpn: "); + } + while (qp) { + len_total += + dbg_vsnprintf(rsp_end, len_total, "%u ", + qp->qp_common.qpn); + qp = sxe2_qos_get_qp_from_list( + &vsi_ctx->qos[i].qset[j].qp_list, qp); + } + len_total += dbg_vsnprintf(rsp_end, len_total, "\n"); + } + mutex_unlock(&vsi_ctx->qos[i].qos_mutex); + } + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("qos debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_qos_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_qos_info_read, +}; + +int drv_rdma_debug_qos_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos debugfs:debugfs root dir not exist ret=%d\n", ret); + goto end; + } + + if (!rdma_dev->hdl->qos_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qos debugfs:dir not exist ret=%d\n", ret); + goto end; + } + + debugfs_create_file("qos_info", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->qos_debugfs, rdma_dev, + &sxe2_rdma_qos_info_fops); +end: + return ret; +} + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + +enum { OQS_INJECT_APPLY_QSET_ERR_CODE, + QOS_INJECT_RELEASE_QSET_ERR_CODE, + QOS_INJECT_QP_BIND_QSET_ERR_CODE, + +}; + +enum { MAX_INJECT_APPLY_QSET_ERR_CODE = 2, + MAX_INJECT_RELEASE_QSET_ERR_CODE = 2, + MAX_INJECT_QP_BIND_QSET_ERR_CODE = 8, +}; + +static char *qos_inject_fields[] = { + [OQS_INJECT_APPLY_QSET_ERR_CODE] = "apply_qset_err_code", + [QOS_INJECT_RELEASE_QSET_ERR_CODE] = "release_qset_err_code", + [QOS_INJECT_QP_BIND_QSET_ERR_CODE] = "qp_bind_qset_err_code", +}; + +STATIC ssize_t drv_qset_errcode_inject_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char *rsp = NULL; + char *rsp_end; + size_t len = 0; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "MQ DEBUGFS:mq status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len += dbg_vsnprintf(rsp_end, len, "qset errcodes inject info\n"); + len += dbg_vsnprintf( + rsp_end, len, "apply_qset_err_code: %d\n", + rdma_dev->rdma_func->inject_qos.apply_qset_err_code); + len += dbg_vsnprintf( + rsp_end, len, "release_qset_err_code: %d\n", + rdma_dev->rdma_func->inject_qos.release_qset_err_code); + len += dbg_vsnprintf( + rsp_end, len, "qp_bind_qset_err_code: %d\n", + rdma_dev->rdma_func->inject_qos.qp_bind_qset_err_code); + + ret = simple_read_from_buffer(buf, count, pos, rsp, len); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("MQ DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + +end: + return ret; +} + +STATIC ssize_t drv_qset_errcode_inject_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[64] = { 0 }; + size_t i; + u64 temp_value; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto end; + } + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(in_buf, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(qos_inject_fields); i++) { + if (!strncmp(argv[0], qos_inject_fields[i], + strlen(qos_inject_fields[i]))) { + break; + } + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%zd)\n", + ret); + goto end; + } + + DRV_RDMA_LOG_DEV_INFO("inject field i:%zu, to temp_value:%llx\n", i, + temp_value); + + switch (i) { + case OQS_INJECT_APPLY_QSET_ERR_CODE: + if (temp_value > MAX_INJECT_APPLY_QSET_ERR_CODE) { + DRV_RDMA_LOG_DEV_DEBUG("exceed max val,set default\n"); + temp_value = MAX_INJECT_APPLY_QSET_ERR_CODE; + } + rdma_dev->rdma_func->inject_qos.apply_qset_err_code = + (u8)temp_value; + break; + case QOS_INJECT_RELEASE_QSET_ERR_CODE: + if (temp_value > MAX_INJECT_RELEASE_QSET_ERR_CODE) { + DRV_RDMA_LOG_DEV_DEBUG("exceed max val,set default\n"); + temp_value = MAX_INJECT_RELEASE_QSET_ERR_CODE; + } + rdma_dev->rdma_func->inject_qos.release_qset_err_code = + (u8)temp_value; + break; + case QOS_INJECT_QP_BIND_QSET_ERR_CODE: + if (temp_value > MAX_INJECT_QP_BIND_QSET_ERR_CODE) { + DRV_RDMA_LOG_DEV_DEBUG("exceed max val,set default\n"); + temp_value = MAX_INJECT_QP_BIND_QSET_ERR_CODE; + } + rdma_dev->rdma_func->inject_qos.qp_bind_qset_err_code = + (u8)temp_value; + break; + default: + DRV_RDMA_LOG_DEV_WARN("invalid index %zu\n", i); + ret = -EINVAL; + goto end; + } + + *pos = (loff_t)count; + ret = (ssize_t)count; + +end: + return ret; +} + +static const struct file_operations qset_errcode_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_qset_errcode_inject_read, + .write = drv_qset_errcode_inject_write, +}; + +int drv_rdma_qos_err_code_inject_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!hdl->qos_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qos debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + debugfs_create_file("qset_errcode_inject", SXE2_DEBUG_FILE_READ_WRITE, + hdl->qos_debugfs, rdma_dev, + &qset_errcode_inject_fops); + +end: + return ret; +} + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..86101c5a9956e7a11afb3c7cafbca9fbc2f114cc --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qos_debugfs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qos_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RCMS_DEBUGFS_H__ +#define __SXE2_DRV_RCMS_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_OK 0 + +int drv_rdma_debug_qos_add(struct sxe2_rdma_device *rdma_dev); + +#ifdef SXE2_SUPPORT_INJECT +int drv_rdma_qos_err_code_inject_add(struct sxe2_rdma_device *rdma_dev); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.c new file mode 100644 index 0000000000000000000000000000000000000000..f6c612ea7fe1a062e18d627aaf3715ed5658bfaf --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.c @@ -0,0 +1,3100 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qp.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2-abi.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_main.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_cq.h" +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_db.h" +#include "sxe2_drv_rdma_device_port.h" +#include "sxe2_drv_aux.h" +#include "sxe2_drv_rdma_virtchnl.h" + +void sxe2_qp_add_ref(struct ib_qp *ibqp) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + + refcount_inc(&qp->refcnt); +} + +void sxe2_qp_rem_ref(struct ib_qp *ibqp) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *dev = qp->dev; + unsigned long flags = 0; + + spin_lock_irqsave(&dev->rdma_func->qptable_lock, flags); + if (!refcount_dec_and_test(&qp->refcnt)) { + spin_unlock_irqrestore(&dev->rdma_func->qptable_lock, flags); + goto end; + } + dev->rdma_func->qp_table[qp->ibqp.qp_num] = NULL; + spin_unlock_irqrestore(&dev->rdma_func->qptable_lock, flags); + complete(&qp->free_qp); +end: + return; +} + +static int sxe2_validate_qp_attrs(struct ib_qp_init_attr *init_attr, + struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata) +{ + struct sxe2_rdma_ctx_dev *ctx_dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_common_attrs *common_attrs = &ctx_dev->hw_attrs.uk_attrs; + int ret = 0; + + if (init_attr->create_flags) { + DRV_RDMA_LOG_DEV_ERR( + "create flags illegal , init_attr->create_flags(%d)\n", + init_attr->create_flags); + ret = -EOPNOTSUPP; + goto end; + } + + if (!udata) { + if (init_attr->cap.max_inline_data > + common_attrs->max_hw_inline || + init_attr->cap.max_send_sge > + common_attrs->max_hw_wq_frags || + init_attr->cap.max_send_wr > + common_attrs->max_hw_wq_quanta || + init_attr->cap.max_recv_wr > + common_attrs->max_hw_rq_quanta || + init_attr->cap.max_recv_sge > + common_attrs->max_hw_wq_frags) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qp request caps illegal, max_inline_data(%d) max_send_sge(%d)\n" + "\tmax_send_wr(%d) max_recv_wr(%d) max_recv_sge(%d)\n" + "\tdev hw_attrs: max_hw_inline(%d), max_hw_wq_frags(%d),\n" + "\tmax_hw_wq_quanta(%d), max_hw_rq_quanta(%d),\n", + init_attr->cap.max_inline_data, + init_attr->cap.max_send_sge, + init_attr->cap.max_send_wr, + init_attr->cap.max_recv_wr, + init_attr->cap.max_recv_sge, + common_attrs->max_hw_inline, + common_attrs->max_hw_wq_frags, + common_attrs->max_hw_wq_quanta, + common_attrs->max_hw_rq_quanta); + goto end; + } + } + + if (init_attr->qp_type != IB_QPT_RC && + init_attr->qp_type != IB_QPT_UD && + init_attr->qp_type != IB_QPT_GSI) { + DRV_RDMA_LOG_DEV_ERR("qp type illegal, qp_type(%d)", + init_attr->qp_type); + ret = -EOPNOTSUPP; + } +end: + return ret; +} + +static void sxe2_calc_sq_wqe_shift(u32 max_sq_sge, u32 max_inline_data, + u8 *wqe_shift) +{ + *wqe_shift = WQE_SIZE_32BYTE; + + if (max_sq_sge > MAX_SGE_SIZE_1 || + max_inline_data > MAX_INLINE_DATA_8) { + if (max_sq_sge < MAX_SGE_SIZE_4 && + max_inline_data <= MAX_INLINE_DATA_39) { + *wqe_shift = WQE_SIZE_64BYTE; + } else if (max_sq_sge < MAX_SGE_SIZE_8 && + max_inline_data <= MAX_INLINE_DATA_101) { + *wqe_shift = WQE_SIZE_128BYTE; + } else { + *wqe_shift = WQE_SIZE_256BYTE; + } + } +} +static void sxe2_calc_rq_wqe_shift(u32 max_rq_sge, u8 *wqe_shift) +{ + *wqe_shift = WQE_SIZE_32BYTE; + + if (max_rq_sge > MAX_SGE_SIZE_1) { + if (max_rq_sge < MAX_SGE_SIZE_4) + *wqe_shift = WQE_SIZE_64BYTE; + else if (max_rq_sge < MAX_SGE_SIZE_8) + *wqe_shift = WQE_SIZE_128BYTE; + else + *wqe_shift = WQE_SIZE_256BYTE; + } +} +static int sxe2_calc_sqdepth(struct sxe2_rdma_device *rdma_dev, + struct sxe2_common_attrs *common_attrs, u32 max_wr, + u8 wqe_shift, u32 *sq_depth) +{ + int ret = 0; + u32 depth; + + depth = sxe2_round_up_pow_2((max_wr << wqe_shift) + SQ_RSV_SIZE); + + depth = max_t(u32, depth, (u32)MIN_SQ_SIZE); + *sq_depth = depth; + if (*sq_depth > common_attrs->max_hw_wq_quanta) { + DRV_RDMA_LOG_DEV_ERR("sq depth illegal, sq_depth(%d)\n", depth); + ret = -EINVAL; + } + return ret; +} +static int sxe2_calc_rqdepth(struct sxe2_rdma_device *rdma_dev, + struct sxe2_common_attrs *common_attrs, u32 max_wr, + u8 wqe_shift, u32 *rq_depth) +{ + int ret = 0; + u32 depth; + + depth = sxe2_round_up_pow_2((max_wr << wqe_shift) + RQ_RSV_SIZE); + + depth = max_t(u32, depth, (u32)MIN_RQ_SIZE); + *rq_depth = depth; + if (*rq_depth > common_attrs->max_hw_rq_quanta) { + DRV_RDMA_LOG_DEV_ERR("rq depth illegal, rq_depth(%d)\n", depth); + ret = -EINVAL; + } + return ret; +} +static int sxe2_calc_sq_depth_shift(struct sxe2_rdma_device *rdma_dev, + struct sxe2_qp_common_init_info *init_info, + u32 *sq_depth, u8 *sq_shift) +{ + int ret = 0; + + sxe2_calc_sq_wqe_shift(init_info->max_sq_sge_cnt + 1, + init_info->max_inline_data, sq_shift); + ret = sxe2_calc_sqdepth(rdma_dev, init_info->common_attrs, + init_info->sq_size, *sq_shift, sq_depth); + return ret; +} + +static int sxe2_calc_rq_depth_shift(struct sxe2_rdma_device *rdma_dev, + struct sxe2_qp_common_init_info *init_info, + u32 *rq_depth, u8 *rq_shift) +{ + int ret = 0; + + if ((init_info->srq) || (init_info->rq_size == 0)) { + *rq_depth = 0; + *rq_shift = 0; + ret = 0; + goto end; + } + + sxe2_calc_rq_wqe_shift(init_info->max_rq_sge_cnt, rq_shift); + ret = sxe2_calc_rqdepth(rdma_dev, init_info->common_attrs, + init_info->rq_size, *rq_shift, rq_depth); +end: + return ret; +} +#ifdef NEED_RDMA_UMEM_BLOCK_ITER_NEXT +void kc__rdma_block_iter_start(struct kc_ib_block_iter *biter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgsz) +{ + memset(biter, 0, sizeof(*biter)); + biter->__sg = sglist; + biter->__sg_nents = nents; + + biter->__pg_bit = __fls(pgsz); +} + +bool kc__rdma_block_iter_next(struct kc_ib_block_iter *biter) +{ + unsigned int block_offset; + unsigned int sg_delta; + + if (!biter->__sg_nents || !biter->__sg) + return false; + + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); + sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; + + if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { + biter->__sg_advance += sg_delta; + } else { + biter->__sg_advance = 0; + biter->__sg = sg_next(biter->__sg); + biter->__sg_nents--; + } + + return true; +} +#endif + +static int sxe2_set_qp_pble(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp, u32 page_num, + unsigned long page_size) +{ + struct sxe2_pbl_pble_alloc_info *alloc_info = NULL; + struct sxe2_pbl_pble_rsrc *pble_rsrc = rdma_dev->rdma_func->pble_rsrc; + struct ib_block_iter biter; + u64 pble_liner_index; + u64 pgaddr; + int ret = 0; + + if (page_num == 1) { + rdma_umem_for_each_dma_block(qp->qp_umem, &biter, page_size) { + qp->pbl_pointer = rdma_block_iter_dma_address(&biter); + } + qp->wqe_access_mod = SXE2_QP_WQE_ACCESS_MOD_0; + } else { + alloc_info = kzalloc(sizeof(*alloc_info), GFP_KERNEL); + if (!alloc_info) + goto end; + ret = sxe2_pbl_get_pble(pble_rsrc, alloc_info, (u32)page_num, + PBL_OBJ_QP); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_pbl_get_pble failed, ret(%d)\n", ret); + goto free_alloc_info; + } + qp->pble_alloc_info = alloc_info; + qp->pbl_pointer = alloc_info->pbl_index; + pble_liner_index = alloc_info->pble_info.liner_addr; + qp->wqe_access_mod = SXE2_QP_WQE_ACCESS_MOD_1; + rdma_umem_for_each_dma_block(qp->qp_umem, &biter, page_size) { + pgaddr = rdma_block_iter_dma_address(&biter); + ret = sxe2_pbl_set_pble(pble_rsrc, pble_liner_index, + pgaddr, false); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_pbl_set_pble failed, ret(%d)\n", + ret); + goto free_pble; + } + pble_liner_index += sizeof(u64); + } + } + goto end; +free_pble: + sxe2_pbl_free_pble(pble_rsrc, alloc_info->pble_info.liner_addr, + page_num, false); +free_alloc_info: + kfree(alloc_info); +end: + return ret; +} +static int sxe2_setup_umode_qp(struct ib_udata *udata, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp, + struct sxe2_qp_init_info *info, + struct ib_qp_init_attr *init_attr) +{ + struct sxe2_qp_common_init_info *common_info = + &info->qp_common_init_info; + struct sxe2_create_qp_req req = {}; + size_t buf_size; + unsigned long page_size; + u32 page_num; + int ret = 0; +#ifdef IB_UMEM_GET_V3 +#endif + + ret = ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("copy fail len %zu , ret (%d)\n", + udata->inlen, ret); + goto end; + } + qp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + qp->user_mod = 1; + common_info->sq_depth = req.sq_depth; + common_info->rq_depth = req.rq_depth; + common_info->sq_shift = req.sq_shift; + common_info->rq_shift = req.rq_shift; + + DRV_RDMA_LOG_DEV_INFO("rq depth %d shift:%d\n", req.rq_depth, + req.rq_shift); + qp->max_send_wr = + (common_info->sq_depth - SQ_RSV_SIZE) >> common_info->sq_shift; + if (common_info->rq_depth != 0) { + qp->max_recv_wr = (common_info->rq_depth - RQ_RSV_SIZE) >> + common_info->rq_shift; + } else { + qp->max_recv_wr = common_info->rq_depth; + } + common_info->sq_size = common_info->sq_depth >> common_info->sq_shift; + common_info->rq_size = common_info->rq_depth >> common_info->rq_shift; + buf_size = (common_info->rq_depth + common_info->sq_depth) * + QP_QUANTA_SIZE; +#ifdef IB_UMEM_GET_V2 + qp->qp_umem = ib_umem_get(udata, req.user_wqe_bufs, buf_size, + IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + qp->qp_umem = ib_umem_get(udata, req.user_wqe_bufs, buf_size, + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined(IB_UMEM_GET_V3) + qp->qp_umem = ib_umem_get(qp->pd->ibpd.uobject->context, req.user_wqe_bufs, + buf_size, IB_ACCESS_LOCAL_WRITE, 0); +#else + qp->qp_umem = ib_umem_get(&rdma_dev->ibdev, req.user_wqe_bufs, buf_size, + IB_ACCESS_LOCAL_WRITE); +#endif + if (!qp->qp_umem) { + DRV_RDMA_LOG_DEV_ERR("qp umem ib_umem_get failed ret %ld\n", PTR_ERR(qp->qp_umem)); + ret = -EINVAL; + goto end; + } +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + page_size = sxe2_set_best_pagesz(req.user_wqe_bufs, qp->qp_umem, + GENMASK(21, PAGE_SHIFT)); +#else + page_size = ib_umem_find_best_pgsz(qp->qp_umem, GENMASK(21, PAGE_SHIFT), 0); +#endif + qp->log_page_size = order_base_2(page_size); +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT + page_num = (u32)sxe2_ib_umem_num_dma_blocks(qp->qp_umem, page_size, 0); +#else + page_num = (u32)ib_umem_num_dma_blocks(qp->qp_umem, page_size); +#endif + ret = sxe2_set_qp_pble(rdma_dev, qp, page_num, page_size); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_set_qp_pble failed, ret(%d)\n", ret); + goto free_umem; + } +#ifdef IB_UMEM_GET_V2 + qp->db_note_umem = ib_umem_get(udata, req.doorbell_note, SZ_4K, + IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + qp->db_note_umem = ib_umem_get(udata, req.doorbell_note, SZ_4K, + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined(IB_UMEM_GET_V3) + + qp->db_note_umem = ib_umem_get(qp->pd->ibpd.uobject->context, req.doorbell_note, + SZ_4K, IB_ACCESS_LOCAL_WRITE, 0); +#else + qp->db_note_umem = ib_umem_get(&rdma_dev->ibdev, req.doorbell_note, + SZ_4K, IB_ACCESS_LOCAL_WRITE); +#endif + + if (!qp->db_note_umem) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("dbnote umem ib_umem_get failed\n"); + goto free_pble; + } + goto end; +free_pble: + if (qp->wqe_access_mod == 0x1) { + sxe2_pbl_free_pble(rdma_dev->rdma_func->pble_rsrc, + qp->pble_alloc_info->pble_info.liner_addr, + page_num, false); + kfree(qp->pble_alloc_info); + qp->pble_alloc_info = NULL; + } +free_umem: + ib_umem_release(qp->qp_umem); +end: + return ret; +} +static int sxe2_setup_kmode_qp(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_qp *qp, + struct sxe2_qp_init_info *info, + struct ib_qp_init_attr *init_attr) +{ + struct sxe2_rdma_dma_mem *mem = &qp->kqp.dma_mem; + struct sxe2_rdma_device *rdma_dev = qp->dev; + int ret = 0; + struct sxe2_qp_common_init_info *common_info = + &info->qp_common_init_info; + + ret = sxe2_calc_sq_depth_shift(rdma_dev, common_info, + &common_info->sq_depth, + &common_info->sq_shift); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_calc_sq_depth_shift failed, ret(%d)\n", ret); + goto end; + } + ret = sxe2_calc_rq_depth_shift(rdma_dev, common_info, + &common_info->rq_depth, + &common_info->rq_shift); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_calc_rq_depth_shift failed, ret(%d)\n", ret); + goto end; + } + qp->kqp.sq_wrid_mem = kcalloc(common_info->sq_depth, + sizeof(*qp->kqp.sq_wrid_mem), GFP_KERNEL); + if (!qp->kqp.sq_wrid_mem) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("kcalloc sq wrid mem failed\n"); + goto end; + } + common_info->sq_wrtrk_array = qp->kqp.sq_wrid_mem; + + if (common_info->rq_depth != 0) { + qp->kqp.rq_wrid_mem = + kcalloc(common_info->rq_depth, + sizeof(*qp->kqp.rq_wrid_mem), GFP_KERNEL); + if (!qp->kqp.rq_wrid_mem) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("kcalloc rq wrid mem failed\n"); + goto free_sq_wrid; + } + common_info->rq_wrid_array = qp->kqp.rq_wrid_mem; + } + + mem->size = (common_info->sq_depth + common_info->rq_depth) * + QP_QUANTA_SIZE; + mem->va = dma_alloc_coherent(dev->hw->device, mem->size, &mem->pa, + GFP_KERNEL); + if (!mem->va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("qp buffer dma_alloc_coherent failed\n"); + goto free_rq_wrid; + } + memset(mem->va, 0, mem->size); + + qp->log_page_size = order_base_2(PAGE_SIZE); + common_info->rq = mem->va; + info->rq_pa = mem->pa; + common_info->sq = &common_info->rq[common_info->rq_depth]; + info->sq_pa = mem->pa + (common_info->rq_depth * QP_QUANTA_SIZE); + + common_info->sq_size = common_info->sq_depth >> common_info->sq_shift; + common_info->rq_size = common_info->rq_depth >> common_info->rq_shift; + qp->max_send_wr = + (common_info->sq_depth - SQ_RSV_SIZE) >> common_info->sq_shift; + if (common_info->rq_depth != 0) { + qp->max_recv_wr = (common_info->rq_depth - RQ_RSV_SIZE) >> + common_info->rq_shift; + } else { + qp->max_recv_wr = common_info->rq_depth; + } + init_attr->cap.max_send_wr = qp->max_send_wr; + init_attr->cap.max_recv_wr = qp->max_recv_wr; + + common_info->doorbell_note = dma_alloc_coherent( + dev->hw->device, SZ_4K, &info->shadow_area_pa, GFP_KERNEL); + if (!common_info->doorbell_note) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("shadow area dma_alloc_coherent failed\n"); + goto free_qp_dma_buf; + } + memset(common_info->doorbell_note, 0, SZ_4K); + goto end; +free_qp_dma_buf: + dma_free_coherent(dev->hw->device, mem->size, mem->va, mem->pa); + mem->va = NULL; +free_rq_wrid: + kfree(qp->kqp.rq_wrid_mem); + qp->kqp.rq_wrid_mem = NULL; +free_sq_wrid: + kfree(qp->kqp.sq_wrid_mem); + qp->kqp.sq_wrid_mem = NULL; +end: + return ret; +} +static void sxe2_common_qp_init(struct sxe2_qp_common *qp, + struct sxe2_qp_common_init_info *info) +{ + u32 sq_ring_size; + + qp->qp_caps = info->qp_caps; + qp->sq_base = info->sq; + qp->rq_base = info->rq; + qp->qp_type = info->qp_type; + qp->doorbell_note = info->doorbell_note; + qp->sq_wrtrk_array = info->sq_wrtrk_array; + qp->rq_wrid_array = info->rq_wrid_array; + qp->qp_db_no_llwqe = info->qp_db_no_llwqe; + qp->qpn = info->qpn; + qp->sq_size = info->sq_size; + qp->llwqe_mode = false; + qp->max_sq_sge_cnt = info->max_sq_sge_cnt; + sq_ring_size = qp->sq_size << info->sq_shift; + SXE2_RING_INIT(qp->sq_ring, sq_ring_size); + SXE2_RING_INIT(qp->initial_ring, sq_ring_size); + qp->swqe_polarity = 0; + qp->rwqe_polarity = 0; + qp->rq_size = info->rq_size; + qp->max_rq_sge_cnt = info->max_rq_sge_cnt; + qp->max_inline_data = info->max_inline_data; + qp->rq_wqe_size = info->rq_shift; + SXE2_RING_INIT(qp->rq_ring, qp->rq_size); + qp->rq_wqe_size_multiplier = 1 << info->rq_shift; + qp->srq = info->srq; + qp->common_attrs = info->common_attrs; +} + +static void sxe2_rdma_ctx_qp_init(struct sxe2_rdma_ctx_qp *qp, + struct sxe2_qp_init_info *info) +{ + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + qp->dev = info->pd->dev; + qp->vsi = info->vsi; + qp->sq_pa = info->sq_pa; + qp->rq_pa = info->rq_pa; + qp->hw_host_ctx = info->host_ctx; + qp->hw_host_ctx_pa = info->host_ctx_pa; + qp->shadow_area_va = info->qp_common_init_info.doorbell_note; + qp->shadow_area_pa = info->shadow_area_pa; + qp->pd = info->pd; + info->qp_common_init_info.qp_db_no_llwqe = + rdma_qp->dev->rdma_func->db->map; + + sxe2_common_qp_init(&qp->qp_common, &info->qp_common_init_info); + qp->hw_sq_size = sxe2_kget_encoded_wqe_size(qp->qp_common.sq_ring.size, + SXE2_QUEUE_TYPE_SQ_RQ); + qp->hw_rq_size = sxe2_kget_encoded_wqe_size( + qp->qp_common.rq_size * qp->qp_common.rq_wqe_size_multiplier, + SXE2_QUEUE_TYPE_SQ_RQ); + qp->qp_state = IB_QPS_RESET; +} +static void sxe2_free_qp_buf(struct sxe2_rdma_qp *qp, + struct sxe2_rdma_ctx_dev *dev, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *device = to_rdmadev(dev); + + if (udata) { + ib_umem_release(qp->qp_umem); + if (qp->wqe_access_mod == SXE2_QP_WQE_ACCESS_MOD_1) { + sxe2_pbl_free_pble( + device->rdma_func->pble_rsrc, + qp->pble_alloc_info->pble_info.liner_addr, + qp->pble_alloc_info->needed_pble_cnt, false); + kfree(qp->pble_alloc_info); + qp->pble_alloc_info = NULL; + } + ib_umem_release(qp->db_note_umem); + } else { + dma_free_coherent(dev->hw->device, qp->kqp.dma_mem.size, + qp->kqp.dma_mem.va, qp->kqp.dma_mem.pa); + qp->kqp.dma_mem.va = NULL; + dma_free_coherent(dev->hw->device, SZ_4K, + qp->qp_ctx.shadow_area_va, + qp->qp_ctx.shadow_area_pa); + qp->qp_ctx.shadow_area_va = NULL; + kfree(qp->kqp.rq_wrid_mem); + qp->kqp.rq_wrid_mem = NULL; + kfree(qp->kqp.sq_wrid_mem); + qp->kqp.sq_wrid_mem = NULL; + } +}; +static void ether_addr_to_lmac_hmac(const u8 *eth_add, u32 *lmac, u16 *hmac) +{ + int i; + + for (i = 0, *hmac = 0; i < 2; i++) + *hmac = (u16)((u32)(*hmac << 8) | eth_add[i]); + for (*lmac = 0; i < ETH_ALEN; i++) + *lmac = *lmac << 8 | eth_add[i]; +} + +static void sxe2_qpc_fill_tph(struct sxe2_rdma_device *rdma_dev, + bool tph_enable, struct sxe2_qpc *qpc) +{ + u32 st_mode = 0; + s32 cpu_id = 0; + + if (sxe2_drv_core_is_tph_enable(rdma_dev, tph_enable, &st_mode)) { + if (st_mode != MODE_NO_ST) + cpu_id = TPH_GET_CPU(); + qpc->sq_tph_en = true; + qpc->sq_tph_value = (cpu_id & TPH_CPUID_MASK) | PH_HWDR << 8; + qpc->rq_tph_en = true; + qpc->rq_tph_value = (cpu_id & TPH_CPUID_MASK) | PH_DWHR << 8; + qpc->xmit_tph_en = true; + qpc->rcv_tph_en = true; + } else { + qpc->sq_tph_en = false; + qpc->sq_tph_value = 0; + qpc->rq_tph_en = false; + qpc->rq_tph_value = 0; + qpc->xmit_tph_en = false; + qpc->rcv_tph_en = false; + } +} + +static void sxe2_ctx_qp_setctx_roce(struct sxe2_rdma_qp *qp, u64 *ctx_va) +{ + struct sxe2_qpc *qpc = (struct sxe2_qpc *)ctx_va; + struct sxe2_rdma_device *rdma_dev = qp->dev; + u32 lmac = 0; + u16 hmac = 0; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + + qpc->ack_timeout = qp->ctx_info.udp_info->ack_timeout; + qpc->retry_mode = qp->ctx_info.retry_mode; + qpc->credit_en = qp->ctx_info.credit_en; + qpc->comm_est = 1; + qpc->rqe_lwm = qp->ctx_info.rqe_lwm; + qpc->retry_resp_op_sel = qp->ctx_info.retry_resp_op_sel; + qpc->log_rtm = qp->ctx_info.log_rtm; + qpc->qp_state = qp->qp_ctx.qp_state; + qpc->dqpn = qp->ctx_info.roce_info->dest_qp; + if (qp->qp_ctx.qp_common.qp_type == IB_QPT_RC) { + qpc->qp_type = SXE2_QP_TYPE_RDMA_RC; + } else { + qpc->qp_type = SXE2_QP_TYPE_RDMA_UD; + if (qp->gsi_flag == true) + qpc->qp_type = SXE2_QP_TYPE_RDMA_QP1; + } + qpc->llwqe_page_index = qp->ctx_info.llwqe_page_index; + qpc->llwqe_mode_enable = qp->ctx_info.llwqe_mod_enable; + qpc->log_msg_max = qp->ctx_info.log_msg_max; + qpc->pmtu = qp->ctx_info.udp_info->pmtu; + qpc->page_offset = qp->qp_ctx.qp_buff_page_offset; + qpc->log_rq_size = qp->qp_ctx.hw_rq_size; + qpc->log_sq_size = qp->qp_ctx.hw_sq_size; + qpc->cqn_snd = qp->ctx_info.send_cq_num; + qpc->cqn_rcv = qp->ctx_info.rcv_cq_num; + qpc->log_rq_stride = qp->qp_ctx.qp_common.rq_wqe_size; + qpc->log_page_size = qp->log_page_size; + qpc->min_rnr_nak = qp->ctx_info.udp_info->min_rnr_timer; + qpc->fl = qp->ctx_info.fl; + if (qp->qp_ctx.qp_common.srq) + qpc->srqn = (__u64)qp->qp_ctx.qp_common.srq->srq_id; + qpc->bucket_type = qp->ctx_info.qp_bucket_type; + qpc->stat_rate = qp->ctx_info.roce_info->state_rate; + + if (qp->max_recv_wr == 0) { + qpc->rq_type = qp->qp_ctx.qp_common.srq ? SXE2_SRQ : SXE2_NO_RQ; + qpc->log_rq_size = 1; + } else { + qpc->rq_type = SXE2_REGULAR_RQ; + } + + qpc->ipv4 = qp->ctx_info.udp_info->ipv4; + qpc->pd = qp->ctx_info.roce_info->pd_id; + qpc->log_rra_max = qp->ctx_info.roce_info->log_rra_max; + qpc->log_sra_max = qp->ctx_info.roce_info->log_sra_max; + qpc->send_start_psn = qp->ctx_info.udp_info->sq_psn; + qpc->rcv_start_psn = qp->ctx_info.udp_info->rq_psn; + qpc->tmo_retry = qp->ctx_info.udp_info->retry_cnt; + qpc->udpriv_cqenable = qp->ctx_info.roce_info->udprivcq_en; + qpc->rnr_retry = qp->ctx_info.udp_info->rnr_retry; + qpc->ack_mode = qp->ctx_info.ack_mode; + qpc->pkey_index = qp->ctx_info.roce_info->p_key; + qpc->log_ack_req_freq = qp->ctx_info.log_ack_req_freq; + qpc->hop_limit = qp->ctx_info.udp_info->ttl; + qpc->q_key = qp->ctx_info.roce_info->qkey; + qpc->src_port_num = qp->ctx_info.udp_info->src_port; + qpc->dest_port_num = qp->ctx_info.udp_info->dst_port; + ether_addr_to_lmac_hmac(qp->roce_info.mac_addr, &lmac, &hmac); + qpc->smac_low = lmac; + qpc->smac_high = hmac; + ether_addr_to_lmac_hmac(qp->roce_info.dest_mac_addr, &lmac, &hmac); + qpc->rmac_low = lmac; + qpc->rmac_high = hmac; + + qpc->qp_completion_contex = qp->ctx_info.qp_compl_ctx; + sxe2_qpc_fill_tph(rdma_dev, true, qpc); + qpc->dest_ipaddr0 = qp->ctx_info.udp_info->dest_ip_addr[0]; + qpc->dest_ipaddr1 = qp->ctx_info.udp_info->dest_ip_addr[1]; + qpc->dest_ipaddr2 = qp->ctx_info.udp_info->dest_ip_addr[2]; + qpc->dest_ipaddr3 = qp->ctx_info.udp_info->dest_ip_addr[3]; + + qpc->local_ipaddr_0 = qp->ctx_info.udp_info->local_ipaddr[0]; + qpc->local_ipaddr_1 = qp->ctx_info.udp_info->local_ipaddr[1]; + qpc->local_ipaddr_2 = qp->ctx_info.udp_info->local_ipaddr[2]; + qpc->local_ipaddr_3 = qp->ctx_info.udp_info->local_ipaddr[3]; + + qpc->flow_label = qp->ctx_info.udp_info->flow_label; + + if (rf->cc_params.dcqcn_enable && qpc->qp_type == SXE2_QP_TYPE_RDMA_RC) { + qpc->cnp_ecn = rdma_dev->rdma_func->cc_params.cnp_ecn; + qpc->ecn = rdma_dev->rdma_func->cc_params.ecn; + } else { + qpc->cnp_ecn = SXE2_QP_CC_CNP_ECN_DISABLE; + qpc->ecn = qp->udp_info.ecn; + } + DRV_RDMA_LOG_DEBUG_BDF("QP CC: qpc cnp_ecn=%u ecn=%u\n", qpc->cnp_ecn, + qpc->ecn); + qpc->dscp = qp->udp_info.dscp; + qpc->cnp_dscp = qp->udp_info.dscp; + +#ifdef HAVE_IB_UMEM_SG_HEAD + qpc->dbr_addr = qp->user_mod ? + sg_dma_address(qp->db_note_umem->sg_head.sgl) : + qp->qp_ctx.shadow_area_pa; +#else + qpc->dbr_addr = + qp->user_mod ? + sg_dma_address(qp->db_note_umem->sgt_append.sgt.sgl) : + qp->qp_ctx.shadow_area_pa; +#endif + qpc->pbl_pointer = qp->user_mod ? qp->pbl_pointer : qp->kqp.dma_mem.pa; + qpc->wq_acces_mode = qp->wqe_access_mod; + qpc->use_statistics_intance = qp->ctx_info.stats_idx_valid; + qpc->statistics_instance_index = qp->ctx_info.stats_idx; + qpc->insert_vlan_tag = qp->ctx_info.udp_info->insert_vlan_tag; + qpc->vlan_tag = qp->ctx_info.udp_info->vlan_tag; + qpc->src_vsi = qp->ctx_info.src_vsi; + qpc->dispatch_min_unit = qp->ctx_info.dispatch_min_unit; + qpc->dispatch_mode = qp->ctx_info.dispatch_mode; + + if (qp->qp_ctx.flush_sq) + qp->qp_ctx.sq_flush_polarity = !qp->qp_ctx.sq_flush_polarity; + qpc->sq_flush_flag = qp->qp_ctx.sq_flush_polarity; + qpc->rq_flush_flag = (qp->qp_ctx.flush_rq ? 1 : 0); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_srqn", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_srqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rcqn", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rcqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_type", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_type"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pmtu", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pmtu"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_sqSize", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_sqSize"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqType", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqType"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pageSize", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_pageSize"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_scqn", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_scqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqSize", + &rdma_dev->rdma_func->mq.err_cqe_val, qpc, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_rqSize"); + +#endif +} + +static void sxe2_print_qpc_info(struct sxe2_rdma_device *rdma_dev, + struct sxe2_qpc *qpc) +{ + DRV_RDMA_LOG_DEV_DEBUG( + "\nack_timeout(%llu) retry_mode(%llu) credit_en(%llu)\n" + "\trqe_lwm(%llu)\n" + "\tretry_resp_op_sel(%llu) log_rtm(%llu) qp_state(%llu) dqpn(%#llx)\n" + "\tqp_type(%llu) llwqe_page_index(%llu)\n" + "\tllwqe_mode_enable(%llu) log_msg_max(%llu) pmtu(%llu)\n" + "\tpage_offset(%llu) log_rq_size(%llu)\n" + "\tlog_sq_size(%llu) cqn_snd(%#llx) log_rq_stride(%llu)\n" + "\tlog_page_size(%llu) cqn_rcv(%#llx)\n" + "\tmin_rnr_nak(%llu) fl(%llu) srqn(%#llx) bucket_type(%llu)\n" + "\ttat_rate(%llu) rq_type(%llu)\n" + "\tipv4(%llu) pd(%llu) log_rra_max(%llu) log_sra_max(%llu)\n" + "\tsend_start_psn(%llu)\n" + "\ttmo_retry(%llu) udpriv_cqenable(%llu) rnr_retry(%llu)\n" + "\trcv_start_psn(%llu) ack_mode(%llu)\n" + "\tpkey_index(%llu) rq_flush_flag(%llu) sq_flush_flag(%llu)\n" + "\tlog_ack_req_freq(%llu) hop_limit(%llu) q_key(%#llx) smac_low(%llu)\n" + "\tsmac_high(%llu) src_port_num(%llu) rmac_low(%llu) rmac_high(%llu)\n" + "\tdest_port_num(%llu)\n" + "\tqp_completion_contex(%#llx) sq_tph_value(%llu) rq_tph_value(%llu)\n" + "\tsq_tph_en(%llu) rq_tph_en(%llu)\n" + "\txmit_tph_en(%llu) rcv_tph_en(%llu) cnp_dscp(%llu) cnp_ecn(%llu)\n" + "\tflow_label(%llu)\n" + "\tdscp(%llu) ecn(%llu) dest_ipaddr3(%llu) dest_ipaddr2(%llu)\n" + "\tdest_ipaddr1(%llu)\n" + "\tdest_ipaddr0(%llu) local_ipaddr_3(%llu) local_ipaddr_2(%llu)\n" + "\tlocal_ipaddr_1(%llu) local_ipaddr_0(%llu)\n" + "\tdbr_addr(%#llx) pbl_pointer(%#llx) tpid_index(%llu)\n" + "\twq_acces_mode(%llu) use_statistics_intance(%llu)\n" + "\tstatistics_instance_index(%llu) insert_vlan_tag(%llu) vlan_tag(%llu)\n" + "\tsrc_vsi(%llu)\n" + "\tdispatch_min_unit(%llu) dispatch_mode(%llu)\n", + (u64)qpc->ack_timeout, (u64)qpc->retry_mode, + (u64)qpc->credit_en, (u64)qpc->rqe_lwm, + (u64)qpc->retry_resp_op_sel, (u64)qpc->log_rtm, + (u64)qpc->qp_state, (u64)qpc->dqpn, (u64)qpc->qp_type, + (u64)qpc->llwqe_page_index, (u64)qpc->llwqe_mode_enable, + (u64)qpc->log_msg_max, (u64)qpc->pmtu, (u64)qpc->page_offset, + (u64)qpc->log_rq_size, (u64)qpc->log_sq_size, (u64)qpc->cqn_snd, + (u64)qpc->log_rq_stride, (u64)qpc->log_page_size, + (u64)qpc->cqn_rcv, (u64)qpc->min_rnr_nak, (u64)qpc->fl, + (u64)qpc->srqn, (u64)qpc->bucket_type, (u64)qpc->stat_rate, + (u64)qpc->rq_type, (u64)qpc->ipv4, (u64)qpc->pd, + (u64)qpc->log_rra_max, (u64)qpc->log_sra_max, + (u64)qpc->send_start_psn, (u64)qpc->tmo_retry, + (u64)qpc->udpriv_cqenable, (u64)qpc->rnr_retry, + (u64)qpc->rcv_start_psn, (u64)qpc->ack_mode, + (u64)qpc->pkey_index, (u64)qpc->rq_flush_flag, + (u64)qpc->sq_flush_flag, (u64)qpc->log_ack_req_freq, + (u64)qpc->hop_limit, (u64)qpc->q_key, (u64)qpc->smac_low, + (u64)qpc->smac_high, (u64)qpc->src_port_num, (u64)qpc->rmac_low, + (u64)qpc->rmac_high, (u64)qpc->dest_port_num, + (u64)qpc->qp_completion_contex, (u64)qpc->sq_tph_value, + (u64)qpc->rq_tph_value, (u64)qpc->sq_tph_en, + (u64)qpc->rq_tph_en, (u64)qpc->xmit_tph_en, + (u64)qpc->rcv_tph_en, (u64)qpc->cnp_dscp, (u64)qpc->cnp_ecn, + (u64)qpc->flow_label, (u64)qpc->dscp, (u64)qpc->ecn, + (u64)qpc->dest_ipaddr3, (u64)qpc->dest_ipaddr2, + (u64)qpc->dest_ipaddr1, (u64)qpc->dest_ipaddr0, + (u64)qpc->local_ipaddr_3, (u64)qpc->local_ipaddr_2, + (u64)qpc->local_ipaddr_1, (u64)qpc->local_ipaddr_0, + (u64)qpc->dbr_addr, (u64)qpc->pbl_pointer, (u64)qpc->tpid_index, + (u64)qpc->wq_acces_mode, (u64)qpc->use_statistics_intance, + (u64)qpc->statistics_instance_index, (u64)qpc->insert_vlan_tag, + (u64)qpc->vlan_tag, (u64)qpc->src_vsi, + (u64)qpc->dispatch_min_unit, (u64)qpc->dispatch_mode); +} + +static void sxe2_fill_and_set_qpctx_info(struct sxe2_rdma_qp *qp, + struct sxe2_qp_host_ctx_info *ctx_info) +{ + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_roce_offload_info *roce_info; + struct sxe2_udp_offload_info *udp_info; + u32 ib_mtu = (u32)sxe2_iboe_get_mtu(rdma_dev->vsi.mtu); + + udp_info = &qp->udp_info; + udp_info->pmtu = min_t(u32, (u32)IB_MTU_4096, ib_mtu); + + if (qp->qp_ctx.qp_common.qp_type == IB_QPT_RC) { + udp_info->retry_cnt = 2; + udp_info->rnr_retry = 2; + } + udp_info->src_port = 0xc000; + udp_info->dst_port = ROCE_V2_UDP_DPORT; + roce_info = &qp->roce_info; + ether_addr_copy(roce_info->mac_addr, rdma_dev->netdev->dev_addr); + if (!qp->user_mod) { + roce_info->udprivcq_en = true; +#ifdef SXE2_CFG_DEBUG + if (rdma_dev->rdma_func->UDPriv_CQEnable) + roce_info->udprivcq_en = false; +#endif + } + + roce_info->log_rra_max = (u16)fls(dev->hw_attrs.max_rra - 1); + roce_info->log_sra_max = (u16)fls(dev->hw_attrs.max_sra - 1); + ctx_info->roce_info = &qp->roce_info; + ctx_info->udp_info = &qp->udp_info; + if (qp->qp_ctx.qp_common.qp_type == IB_QPT_RC) { + ctx_info->retry_mode = SXE2_RETRY_MODE_GO_BACK_N; + ctx_info->credit_en = 1; + ctx_info->retry_resp_op_sel = 1; + ctx_info->ack_mode = rdma_dev->rdma_func->ack_mode; + ctx_info->log_ack_req_freq = + rdma_dev->rdma_func->log_ack_req_freq; + ctx_info->log_rtm = 1; + } + ctx_info->qp_bucket_type = SXE2_QP_BUCKET_128K; + ctx_info->dispatch_min_unit = 54; + ctx_info->dispatch_mode = 1; + ctx_info->udp_info->min_rnr_timer = SXE2_RDMA_MIN_RNR_NAK_DEFAULT; + sxe2_ctx_qp_setctx_roce(qp, qp->host_ctx.va); + DRV_RDMA_LOG_DEV_DEBUG("create qp%#x qpc print:\n", qp->ibqp.qp_num); + sxe2_print_qpc_info(rdma_dev, qp->host_ctx.va); +} + +static int sxe2_mq_create_qp_cmd(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_pci_f *rf = qp->dev->rdma_func; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_create_qp_info *qp_info; + struct sxe2_rdma_device *rdma_dev = qp->dev; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kalloc_and_get_mq_request failed\n"); + goto end; + } + mq_info = &mq_request->info; + qp_info = &mq_request->info.in.u.qp_create.info; + memset(qp_info, 0, sizeof(*qp_info)); + + mq_info->mq_cmd = MQ_OP_CREATE_QP; + mq_info->post_mq = 1; + mq_info->in.u.qp_create.qp = &qp->qp_ctx; + mq_info->in.u.qp_create.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); +end: + return ret; +} +int sxe2_mq_modify_qp_cmd(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_pci_f *rf = qp->dev->rdma_func; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_modify_qp_info *qp_info; + struct sxe2_rdma_device *rdma_dev = qp->dev; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kalloc_and_get_mq_request failed\n"); + goto end; + } + mq_info = &mq_request->info; + qp_info = &mq_request->info.in.u.qp_modify.info; + memset(qp_info, 0, sizeof(*qp_info)); + + mq_info->mq_cmd = MQ_OP_MODIFY_QP; + mq_info->post_mq = 1; + mq_info->in.u.qp_modify.qp = &qp->qp_ctx; + mq_info->in.u.qp_modify.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); +end: + return ret; +} +static int sxe2_mq_destroy_qp_cmd(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_pci_f *rf = qp->dev->rdma_func; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_destroy_qp_info *qp_info; + struct sxe2_rdma_device *rdma_dev = qp->dev; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kalloc_and_get_mq_request failed\n"); + goto end; + } + mq_info = &mq_request->info; + qp_info = &mq_request->info.in.u.qp_destroy.info; + memset(qp_info, 0, sizeof(*qp_info)); + + mq_info->mq_cmd = MQ_OP_DESTROY_QP; + mq_info->post_mq = 1; + mq_info->in.u.qp_destroy.qp = &qp->qp_ctx; + mq_info->in.u.qp_destroy.scratch = (uintptr_t)mq_request; + mq_info->destroy = true; + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); +end: + return ret; +} + +int sxe2_mq_query_qp_cmd(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_pci_f *rf = qp->dev->rdma_func; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_query_qp_info *qp_info; + struct sxe2_rdma_device *rdma_dev = qp->dev; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rf->mq, true); + + if (!mq_request) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kalloc_and_get_mq_request failed\n"); + goto end; + } + mq_info = &mq_request->info; + qp_info = &mq_request->info.in.u.qp_query.info; + memset(qp_info, 0, sizeof(*qp_info)); + + mq_info->mq_cmd = MQ_OP_QUERY_QP; + mq_info->post_mq = 1; + mq_info->in.u.qp_query.qp = &qp->qp_ctx; + mq_info->in.u.qp_query.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rf, mq_request); + sxe2_kput_mq_request(&rf->mq, mq_request); +end: + return ret; +} + +void sxe2_ctx_qp_fill_cc_wqe(struct sxe2_rdma_ctx_qp *qp, + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe) +{ + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + struct sxe2_rdma_ctx_dev *ctx_dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_rdma_cc_dcqcn_params *dcqcn_params = + &rdma_dev->rdma_func->cc_params.dcqcn_params; + struct sxe2_rdma_cc_timely_params *timely_params = + &rdma_dev->rdma_func->cc_params.timely_params; + + create_cc_qp_wqe->qp_id = qp->qp_common.qpn; + create_cc_qp_wqe->op = SXE2_MQ_OP_CREATE_QP; + create_cc_qp_wqe->qp_context_address = qp->hw_host_ctx_pa; + create_cc_qp_wqe->sw_cc_enable = rdma_qp->sw_cc_enable; + create_cc_qp_wqe->dcqcn_rc_l = dcqcn_params->rc & DCQCN_RC_L_MASK; + create_cc_qp_wqe->dcqcn_rc_h = dcqcn_params->rc >> DCQCN_RC_H_SHIFT; + if (ctx_dev->privileged) + create_cc_qp_wqe->dcqcn_func_id = rdma_dev->rdma_func->pf_id; + else + create_cc_qp_wqe->dcqcn_func_id = ctx_dev->rcms_info->pmf_index; + DRV_RDMA_LOG_DEBUG_BDF("qp cc:func id :%u\n", + create_cc_qp_wqe->dcqcn_func_id); + if (rdma_qp->sw_cc_enable == SXE2_QP_ENABLE_DCQCN || + rdma_qp->sw_cc_enable == SXE2_QP_ENABLE_DCQCN_TIMLEY) { + DRV_RDMA_LOG_DEBUG_BDF( + "qp cc:dcqcn params:\n" + "T interval : %u\n" + "B : %u\n" + "rai : %u\n" + "rhai : %u\n" + "rreduce mperiod : %u\n" + "min dec factor : %u\n" + "min rate : %u\n" + "K : %u\n" + "BC : %u\n" + "TC : %u\n" + "g : %u\n" + "alpha : %u\n", + dcqcn_params->t_interval, dcqcn_params->b, + dcqcn_params->rai_factor, dcqcn_params->rhai_factor, + dcqcn_params->rreduce_mperiod, + dcqcn_params->min_dec_factor, dcqcn_params->min_rate, + dcqcn_params->k, dcqcn_params->bc, dcqcn_params->tc, + dcqcn_params->g, dcqcn_params->alpha); + + create_cc_qp_wqe->dcqcn_b_l = dcqcn_params->b & DCQCN_B_L_MASK; + create_cc_qp_wqe->dcqcn_b_h = + dcqcn_params->b >> DCQCN_B_H_SHIFT; + create_cc_qp_wqe->dcqcn_bc = dcqcn_params->bc; + create_cc_qp_wqe->dcqcn_t_interval_h = + dcqcn_params->t_interval >> DCQCN_T_INTERVAL_H_SHIFT; + create_cc_qp_wqe->dcqcn_g = dcqcn_params->g; + create_cc_qp_wqe->dcqcn_rhai_h = + SXE2_USER_PARM_TO_HW_VAL(dcqcn_params->rhai_factor) >> + DCQCN_RHAI_H_SHIFT; + create_cc_qp_wqe->dcqcn_rai = + SXE2_USER_PARM_TO_HW_VAL(dcqcn_params->rai_factor); + create_cc_qp_wqe->dcqcn_f = dcqcn_params->f; + create_cc_qp_wqe->dcqcn_t_interval_l = + dcqcn_params->t_interval & DCQCN_T_INTERVAL_L_MASK; + create_cc_qp_wqe->dcqcn_rreduce_mperiod_h = + dcqcn_params->rreduce_mperiod >> + DCQCN_RREDUCE_MPERIOD_H_SHIFT; + create_cc_qp_wqe->dcqcn_k = dcqcn_params->k; + create_cc_qp_wqe->dcqcn_rhai_l = + SXE2_USER_PARM_TO_HW_VAL(dcqcn_params->rhai_factor) & + DCQCN_RHAI_L_MASK; + create_cc_qp_wqe->dcqcn_min_dec_factor = + SXE2_USER_PARM_TO_HW_VAL(dcqcn_params->min_dec_factor); + create_cc_qp_wqe->dcqcn_rreduce_next_node_info = + dcqcn_params->rreduce_next_node_info; + create_cc_qp_wqe->dcqcn_rreduce_mperiod_l = + dcqcn_params->rreduce_mperiod & + DCQCN_RREDUCE_MPERIOD_L_MASK; + create_cc_qp_wqe->dcqcn_alpha = dcqcn_params->alpha; + create_cc_qp_wqe->dcqcn_min_rate = + SXE2_USER_PARM_TO_HW_VAL(dcqcn_params->min_rate); + create_cc_qp_wqe->dcqcn_rt_h = + dcqcn_params->rt >> DCQCN_RT_H_SHIFT; + create_cc_qp_wqe->dcqcn_t_next_node_info = + dcqcn_params->t_next_node_info; + create_cc_qp_wqe->dcqcn_rt_l = + dcqcn_params->rt & DCQCN_RT_L_MASK; + create_cc_qp_wqe->dcqcn_t_counter = dcqcn_params->tc; + create_cc_qp_wqe->dcqcn_byte_counter = + dcqcn_params->byte_counter; + create_cc_qp_wqe->dcqcn_decrease_rate_valid = + dcqcn_params->decrease_rate_valid; + } + if (rdma_qp->sw_cc_enable == SXE2_QP_ENABLE_TIMELY || + rdma_qp->sw_cc_enable == SXE2_QP_ENABLE_DCQCN_TIMLEY) { + DRV_RDMA_LOG_DEBUG_BDF( + "qp cc:timely params:\n" + "min rtt : %u\n" + "tlow : %u\n" + "thigh : %u\n" + "rai : %u\n" + "beta : %u\n" + "alpha : %u\n", + timely_params->min_rtt, timely_params->tlow, + timely_params->thigh, timely_params->rai_factor, + timely_params->beta, timely_params->alpha); + + if (rdma_qp->sw_cc_enable == SXE2_QP_ENABLE_TIMELY) { + create_cc_qp_wqe->dcqcn_rai = SXE2_USER_PARM_TO_HW_VAL( + timely_params->rai_factor); + create_cc_qp_wqe->dcqcn_b_l = + SXE2_QP_DCQCN_MAX_B & DCQCN_B_L_MASK; + create_cc_qp_wqe->dcqcn_b_h = + SXE2_QP_DCQCN_MAX_B >> DCQCN_B_H_SHIFT; + } + create_cc_qp_wqe->timely_min_rtt_h = + timely_params->min_rtt >> TIMELY_MIN_RTT_H_SHIFT; + create_cc_qp_wqe->timely_alpha = timely_params->alpha; + create_cc_qp_wqe->timely_thigh_h = + timely_params->thigh >> TIMELY_THIGH_H_SHIFT; + create_cc_qp_wqe->timely_tlow = timely_params->tlow; + create_cc_qp_wqe->timely_min_rtt_l = + timely_params->min_rtt & TIMELY_MIN_RTT_L_MASK; + create_cc_qp_wqe->timely_pre_rtt_h = + timely_params->pre_rtt >> TIMELY_PRE_RTT_H_SHIFT; + create_cc_qp_wqe->timely_beta = timely_params->beta; + create_cc_qp_wqe->timely_thigh_l = + timely_params->thigh & TIMELY_THIGH_L_MASK; + create_cc_qp_wqe->timely_rtt_diff = timely_params->rtt_diff; + create_cc_qp_wqe->timely_pre_rtt_l = + timely_params->pre_rtt & TIMELY_PRE_RTT_L_MASK; + } +} + +int sxe2_ctx_qp_create(struct sxe2_rdma_ctx_qp *qp, + struct sxe2_create_qp_info *info, u64 scratch, + bool post_sq) +{ + struct sxe2_mq_ctx *mq; + void *mq_wqe; + struct qp_mq_wqe *create_normal_qp_wqe = NULL; + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe = NULL; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + int ret = 0; + + mq = qp->dev->mq; + + if (qp->qp_common.qpn < mq->dev->hw_attrs.min_hw_qp_id || + qp->qp_common.qpn > + (mq->dev->rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].max_cnt - + 1)) { + DRV_RDMA_LOG_DEV_ERR("qpn illegal, qpn(%d)\n", + qp->qp_common.qpn); + ret = -EINVAL; + goto end; + } + mq_wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!mq_wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kget_next_mq_wqe fail\n"); + goto end; + } + + memset(mq_wqe, 0, sizeof(struct qp_mq_wqe)); + if (!rdma_qp->sw_cc_enable) { + create_normal_qp_wqe = (struct qp_mq_wqe *)mq_wqe; + create_normal_qp_wqe->qpn = qp->qp_common.qpn; + create_normal_qp_wqe->op = SXE2_MQ_OP_CREATE_QP; + create_normal_qp_wqe->qp_context_address = qp->hw_host_ctx_pa; + create_normal_qp_wqe->sw_cc_enable = rdma_qp->sw_cc_enable; + } else { + sxe2_ctx_qp_fill_cc_wqe( + qp, (struct qp_mq_create_cc_qp_wqe *)mq_wqe); + create_cc_qp_wqe = (struct qp_mq_create_cc_qp_wqe *)mq_wqe; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (!rdma_qp->sw_cc_enable) { + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpn", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_normal_qp_wqe, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_normal_qp_wqe, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_createOp", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_normal_qp_wqe, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_createOp"); + } else { + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpn", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_cc_qp_wqe, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_cc_qp_wqe, qp, SXE2_MQ_OP_CREATE_QP); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_createOp", + &rdma_dev->rdma_func->mq.err_cqe_val, + create_cc_qp_wqe, qp, SXE2_MQ_OP_CREATE_QP); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_createOp"); + } + +#endif + + if (post_sq) + sxe2_kpost_mq(mq); + +end: + return ret; +} +int sxe2_ctx_qp_modify(struct sxe2_rdma_ctx_qp *qp, + struct sxe2_modify_qp_info *info, u64 scratch, + bool post_sq) +{ + struct sxe2_mq_ctx *mq; + struct qp_mq_wqe *modify_qp_wqe; + int ret = 0; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + + mq = qp->dev->mq; + + if (qp->qp_common.qpn < mq->dev->hw_attrs.min_hw_qp_id || + qp->qp_common.qpn > + (mq->dev->rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].max_cnt - + 1)) { + DRV_RDMA_LOG_DEV_ERR("qpn illegal, qpn(%d)\n", + qp->qp_common.qpn); + ret = -EINVAL; + goto end; + } + + modify_qp_wqe = (struct qp_mq_wqe *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!modify_qp_wqe) { + DRV_RDMA_LOG_DEV_ERR("sxe2_kget_next_mq_wqe fail\n"); + ret = -ENOMEM; + goto end; + } + memset(modify_qp_wqe, 0, sizeof(struct qp_mq_wqe)); + modify_qp_wqe->qpn = qp->qp_common.qpn; + modify_qp_wqe->op = SXE2_MQ_OP_MODIFY_QP; + modify_qp_wqe->qp_context_address = qp->hw_host_ctx_pa; + modify_qp_wqe->modify_qp_mode = true; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpn", + &rdma_dev->rdma_func->mq.err_cqe_val, modify_qp_wqe, qp, + SXE2_MQ_OP_MODIFY_QP); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx", + &rdma_dev->rdma_func->mq.err_cqe_val, modify_qp_wqe, qp, + SXE2_MQ_OP_MODIFY_QP); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpctx"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); +end: + return ret; +} +int sxe2_ctx_qp_destroy(struct sxe2_rdma_ctx_qp *qp, + struct sxe2_destroy_qp_info *info, u64 scratch, + bool post_sq) +{ + struct sxe2_mq_ctx *mq; + struct qp_mq_wqe *destroy_qp_wqe; + int ret = 0; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + + mq = qp->dev->mq; + + destroy_qp_wqe = (struct qp_mq_wqe *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!destroy_qp_wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kget_next_mq_wqe fail\n"); + goto end; + } + memset(destroy_qp_wqe, 0, sizeof(struct qp_mq_wqe)); + destroy_qp_wqe->qpn = qp->qp_common.qpn; + destroy_qp_wqe->op = SXE2_MQ_OP_DESTROY_QP; + destroy_qp_wqe->qp_context_address = qp->hw_host_ctx_pa; + + if (post_sq) + sxe2_kpost_mq(mq); +end: + return ret; +} +int sxe2_ctx_qp_query(struct sxe2_rdma_ctx_qp *qp, + struct sxe2_query_qp_info *info, u64 scratch, + bool post_sq) +{ + struct sxe2_mq_ctx *mq; + struct qp_mq_wqe *query_qp_wqe; + int ret = 0; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct sxe2_rdma_device *rdma_dev = rdma_qp->dev; + + mq = qp->dev->mq; + + query_qp_wqe = (struct qp_mq_wqe *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!query_qp_wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("sxe2_kget_next_mq_wqe fail\n"); + goto end; + } + memset(query_qp_wqe, 0, sizeof(struct qp_mq_wqe)); + query_qp_wqe->qpn = qp->qp_common.qpn; + query_qp_wqe->op = SXE2_MQ_OP_QUERY_QP; + query_qp_wqe->qp_context_address = qp->hw_host_ctx_pa; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpn", + &rdma_dev->rdma_func->mq.err_cqe_val, query_qp_wqe, qp, + SXE2_MQ_OP_QUERY_QP); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpn"); +#endif + + if (post_sq) + sxe2_kpost_mq(mq); +end: + return ret; +} + +int sxe2_qp_info_init(struct ib_qp_init_attr *init_attr, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_dev *dev, struct sxe2_rdma_pd *pd, + u32 qpn, struct sxe2_qp_init_info *init_info) +{ + int ret = 0; + struct sxe2_common_attrs *common_attrs = &dev->hw_attrs.uk_attrs; + + if (init_attr->srq) + init_info->qp_common_init_info.srq = + &(to_srq(init_attr->srq)->srq_ctx.srq_drv); + + init_info->vsi = &rdma_dev->vsi; + init_info->qp_common_init_info.common_attrs = common_attrs; + init_info->qp_common_init_info.sq_size = init_attr->cap.max_send_wr; + init_info->qp_common_init_info.rq_size = init_attr->cap.max_recv_wr; + init_info->qp_common_init_info.max_sq_sge_cnt = + init_attr->cap.max_send_sge; + init_info->qp_common_init_info.max_rq_sge_cnt = + init_attr->cap.max_recv_sge; + init_info->qp_common_init_info.max_inline_data = + init_attr->cap.max_inline_data; + init_info->pd = &pd->pd_ctx; + init_info->qp_common_init_info.qpn = qpn; + init_info->host_ctx = + dma_alloc_coherent(dev->hw->device, + ALIGN(SXE2_QP_CTX_SIZE, 256), + &init_info->host_ctx_pa, GFP_KERNEL); + if (!init_info->host_ctx) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("host ctx dma_alloc_coherent failed\n"); + goto end; + } + memset(init_info->host_ctx, 0, ALIGN(SXE2_QP_CTX_SIZE, 256)); + + DRV_RDMA_LOG_DEV_DEBUG( + "create qp%#x:qp_type(%d) max_inline_data(%d) max_send_sge(%d)\n" + "\tmax_recv_sge(%d)\n" + "\tmax_send_wr(%d) max_recv_wr(%d) qpc_va %p qpc_pa %#llx\n", + qpn, init_attr->qp_type, init_attr->cap.max_inline_data, + init_attr->cap.max_send_sge, init_attr->cap.max_recv_sge, + init_attr->cap.max_send_wr, init_attr->cap.max_recv_wr, + init_info->host_ctx, init_info->host_ctx_pa); +end: + return ret; +} + +int sxe2_rdma_qp_init(struct ib_qp_init_attr *init_attr, struct ib_udata *udata, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_pd *pd, u32 qpn, + struct sxe2_qp_init_info *init_info, + struct sxe2_rdma_qp *qp) +{ + int ret = 0; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_rdma_ctx_qp *qp_ctx = &qp->qp_ctx; + + qp_ctx->qp_common.back_qp = qp; + qp_ctx->qp_common.lock = &qp->lock; + qp_ctx->qp_common.rd_fence_rate = rdma_dev->rd_fence_rate; + qp->dev = rdma_dev; + qp->pd = pd; + qp->ibqp.qp_num = qpn; + qp_ctx = &qp->qp_ctx; + qp->send_cq = to_scq(init_attr->send_cq); + qp->recv_cq = to_scq(init_attr->recv_cq); + qp->host_ctx.va = init_info->host_ctx; + qp->host_ctx.pa = init_info->host_ctx_pa; + qp->host_ctx.size = ALIGN(SXE2_QP_CTX_SIZE, 256); + qp->ctx_info.qp_compl_ctx = (uintptr_t)qp_ctx; + + if (udata) { + ret = sxe2_setup_umode_qp(udata, rdma_dev, qp, init_info, + init_attr); + if (ret) + goto end; + } else { + INIT_DELAYED_WORK(&qp->dwork_flush, sxe2_flush_wqe_worker); + ret = sxe2_setup_kmode_qp(dev, qp, init_info, init_attr); + if (ret) + goto end; + if (rdma_dev->kernel_llwqe_mode == 0) { + qp->qp_ctx.qp_common.push_db = NULL; + qp->qp_ctx.qp_common.push_wqe = NULL; + qp->qp_ctx.qp_common.llwqe = NULL; + DRV_RDMA_LOG_DEV_DEBUG( + "QPC: kernel llwqe disable, llwqe NULL\n"); + } else { + qp->qp_ctx.qp_common.push_db = rf->llwqe.db_addr; + qp->qp_ctx.qp_common.push_wqe = rf->llwqe.wqe_addr; + qp->qp_ctx.qp_common.llwqe = &rf->llwqe; + } + } + + if (init_attr->qp_type == IB_QPT_RC) { + init_info->qp_common_init_info.qp_type = IB_QPT_RC; + init_info->qp_common_init_info.qp_caps = + SXE2_SEND_WITH_IMM | SXE2_WRITE_WITH_IMM; + } else { + init_info->qp_common_init_info.qp_type = IB_QPT_UD; + init_info->qp_common_init_info.qp_caps = SXE2_SEND_WITH_IMM; + + if (init_attr->qp_type == IB_QPT_GSI) + qp->gsi_flag = true; + else + qp->gsi_flag = false; + } + + qp->sw_cc_enable = SXE2_QP_DISABLE_SW_CC; + if ((rf->cc_params.dcqcn_enable || rf->cc_params.timely_enable) && + init_attr->qp_type == IB_QPT_RC) { + mutex_lock(&rf->cc_refcount.refcount_lock); + if ((u32)atomic_read(&rf->cc_refcount.cc_qp_refcount) < + rf->max_cc_qp_cnt) { + atomic_inc(&rf->cc_refcount.cc_qp_refcount); + if (rf->cc_params.dcqcn_enable && + rf->cc_params.timely_enable) { + qp->sw_cc_enable = SXE2_QP_ENABLE_DCQCN_TIMLEY; + } else { + qp->sw_cc_enable = + (rf->cc_params.dcqcn_enable == true) ? + SXE2_QP_ENABLE_DCQCN : + SXE2_QP_ENABLE_TIMELY; + } + + DRV_RDMA_LOG_DEBUG_BDF( + "create cc qp refcount=%u enable algorithm=%u pf id=%u\n\n", + atomic_read(&rf->cc_refcount.cc_qp_refcount), + qp->sw_cc_enable, rf->pf_id); + } else { + DRV_RDMA_LOG_DEBUG_BDF( + "failed to create cc qp refcount=%u max cc qp cnt=%u\n", + atomic_read(&rf->cc_refcount.cc_qp_refcount), + rf->max_cc_qp_cnt); + } + mutex_unlock(&rf->cc_refcount.refcount_lock); + } + +end: + return ret; +} + +int sxe2_qp_qset_init_and_resp(struct ib_udata *udata, + struct sxe2_rdma_device *rdma_dev, u32 qpn, + struct sxe2_rdma_ctx_qp *qp_ctx, + struct sxe2_rdma_qp *qp) +{ + int ret = 0; + struct sxe2_create_qp_resp uresp = {}; + struct sxe2_qpc *qpc; + + if (rdma_dev->vsi.lag_aa) + ret = sxe2_qos_register_qset_bond(&rdma_dev->vsi, 0); + else + ret = sxe2_qos_register_qset(&rdma_dev->vsi, 0); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_qos_register_qset failed , rdma_dev->vsi.lag_aa(%d) err(%d)\n", + rdma_dev->vsi.lag_aa, ret); + goto end; + } + if (rdma_dev->vsi.lag_aa) { + if (atomic_read(&rdma_dev->vsi.port1_qp_cnt) <= + atomic_read(&rdma_dev->vsi.port2_qp_cnt)) { + qp->qp_ctx.qset_idx = QOS_QSET_IDX_0; + } else { + qp->qp_ctx.qset_idx = QOS_QSET_IDX_1; + } + ret = sxe2_qos_qp_add_qos_bond(&rdma_dev->vsi, qp_ctx); + } else { + ret = sxe2_qos_qp_add_qos(&rdma_dev->vsi, qp_ctx); + } + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_qos_qp_add_qos failed , err(%d)\n", + ret); + goto qp_unregist_qset; + } + + if (udata) { + uresp.qpn = qpn; + uresp.qp_caps = qp->qp_ctx.qp_common.qp_caps; + ret = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "ib_copy_to_udata failed , err(%d)\n", ret); + goto qp_remove_qos; + } + } + + if (qp->sw_cc_enable) { + ret = sxe2_mq_query_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("query qp failed , err(%d)\n", + ret); + goto qp_remove_qos; + } + qpc = (struct sxe2_qpc *)qp->host_ctx.va; + qp->sw_cc_idx = qpc->sw_cc_index; + DRV_RDMA_LOG_DEV_DEBUG("create cc qp enable %u cc qp index %u\n", + qpc->sw_cc_enable, qpc->sw_cc_index); + } + +end: + return ret; +qp_remove_qos: + (void)sxe2_qos_qp_rem_qos(&rdma_dev->vsi, qp_ctx); +qp_unregist_qset: + if (rdma_dev->vsi.lag_aa) + sxe2_qos_unregister_qset_bond(&rdma_dev->vsi, 0); + else + sxe2_qos_unregister_qset(&rdma_dev->vsi, 0); + goto end; +} + +#ifdef CREATE_QP_VER_1 +struct ib_qp *sxe2_kcreate_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct sxe2_rdma_pd *pd = to_pd(ibpd); + struct sxe2_rdma_device *rdma_dev = to_dev(ibpd->device); + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_qp *qp = NULL; + u32 qpn = 0; + int ret; + struct sxe2_rdma_ctx_qp *qp_ctx = NULL; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_qp_init_info init_info = {}; + struct sxe2_qp_host_ctx_info *ctx_info; + + ret = sxe2_validate_qp_attrs(init_attr, rdma_dev, udata); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_validate_qp_attrs failed , err(%d)\n", ret); + goto end; + } + if (udata && (udata->inlen < sizeof(struct sxe2_create_qp_req) || + udata->outlen < sizeof(struct sxe2_create_qp_resp))) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "udata illegal! , udata->inlen(%zu) udata->outlen(%zu)\n", + udata->inlen, udata->outlen); + goto end; + } + if (init_attr->qp_type == IB_QPT_GSI) { + qpn = 1; + } else { + ret = sxe2_kalloc_rsrc(rf, rf->allocated_qps, rf->max_qp, &qpn, + &rf->next_qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_kalloc_rsrc failed , err(%d)\n", ret); + goto end; + } + } + + ret = sxe2_qp_info_init(init_attr, rdma_dev, dev, pd, qpn, &init_info); + if (ret) + goto free_qpn; + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("qp kzalloc failed\n"); + goto free_host_ctx; + } + + qp_ctx = &qp->qp_ctx; + + ret = sxe2_rdma_qp_init(init_attr, udata, rdma_dev, pd, qpn, &init_info, + qp); + if (ret) + goto free_qp; + sxe2_rdma_ctx_qp_init(qp_ctx, &init_info); + ctx_info = &qp->ctx_info; + ctx_info->send_cq_num = qp->send_cq->cq_ctx.cq_uk.cq_id; + ctx_info->rcv_cq_num = qp->recv_cq->cq_ctx.cq_uk.cq_id; + ctx_info->src_vsi = rdma_dev->vsi.vsi_idx; + sxe2_fill_and_set_qpctx_info(qp, ctx_info); + ret = sxe2_mq_create_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_mq_create_qp_cmd failed , err(%d)\n", + ret); + goto free_qp_buf; + } + ret = drv_rdma_debug_qp_add(rdma_dev, qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("drv_rdma_debug_qp_add failed , err(%d)\n", + ret); + goto qp_destroy_cmd; + } + refcount_set(&qp->refcnt, 1); + spin_lock_init(&qp->lock); + qp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + rf->qp_table[qpn] = qp; + qp->ibqp_state = qp_ctx->qp_state; + + ret = sxe2_qp_qset_init_and_resp(udata, rdma_dev, qpn, qp_ctx, qp); + if (ret) + goto qp_debugfs_remove; + init_completion(&qp->free_qp); + goto end; + +qp_debugfs_remove: + drv_rdma_debug_qp_remove(rdma_dev, qp); +qp_destroy_cmd: + sxe2_mq_destroy_qp_cmd(qp); +free_qp_buf: + if (qp->sw_cc_enable != SXE2_QP_DISABLE_SW_CC) { + mutex_lock(&rf->cc_refcount.refcount_lock); + atomic_dec(&rf->cc_refcount.cc_qp_refcount); + mutex_unlock(&rf->cc_refcount.refcount_lock); + } + sxe2_free_qp_buf(qp, dev, udata); +free_qp: + kfree(qp); +free_host_ctx: + dma_free_coherent(dev->hw->device, ALIGN(SXE2_QP_CTX_SIZE, 256), + init_info.host_ctx, init_info.host_ctx_pa); + init_info.host_ctx = NULL; +free_qpn: + if (init_attr->qp_type != IB_QPT_GSI) + sxe2_kfree_rsrc(rf, rf->allocated_qps, qpn); +end: + return ret ? ERR_PTR(ret) : &qp->ibqp; +} + +#else +int sxe2_kcreate_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_pd *ibpd = ibqp->pd; + struct sxe2_rdma_pd *pd = to_pd(ibpd); + struct sxe2_rdma_device *rdma_dev = to_dev(ibpd->device); + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + struct sxe2_rdma_qp *qp = to_qp(ibqp); + u32 qpn = 0; + int ret = 0; + struct sxe2_rdma_ctx_qp *qp_ctx = &qp->qp_ctx; + struct sxe2_rdma_ctx_dev *dev = &rf->ctx_dev; + struct sxe2_qp_init_info init_info = {}; + struct sxe2_qp_host_ctx_info *ctx_info; + + ret = sxe2_validate_qp_attrs(init_attr, rdma_dev, udata); + if (ret) + goto end; + if (udata && (udata->inlen < sizeof(struct sxe2_create_qp_req) || + udata->outlen < sizeof(struct sxe2_create_qp_resp))) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "udata illegal! , udata->inlen(%zu) udata->outlen(%zu)\n", + udata->inlen, udata->outlen); + goto end; + } + if (init_attr->qp_type == IB_QPT_GSI) { + qpn = 1; + } else { + ret = sxe2_kalloc_rsrc(rf, rf->allocated_qps, rf->max_qp, &qpn, + &rf->next_qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_kalloc_rsrc failed , err(%d)\n", ret); + goto end; + } + } + + ret = sxe2_qp_info_init(init_attr, rdma_dev, dev, pd, qpn, &init_info); + if (ret) + goto free_qpn; + + ret = sxe2_rdma_qp_init(init_attr, udata, rdma_dev, pd, qpn, &init_info, + qp); + if (ret) + goto free_host_ctx; + + sxe2_rdma_ctx_qp_init(qp_ctx, &init_info); + + ctx_info = &qp->ctx_info; + ctx_info->send_cq_num = qp->send_cq->cq_ctx.cq_uk.cq_id; + ctx_info->rcv_cq_num = qp->recv_cq->cq_ctx.cq_uk.cq_id; + ctx_info->src_vsi = rdma_dev->vsi.vsi_idx; + sxe2_fill_and_set_qpctx_info(qp, ctx_info); + ret = sxe2_mq_create_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_mq_create_qp_cmd failed , err(%d)\n", + ret); + goto free_qp_buf; + } + ret = drv_rdma_debug_qp_add(rdma_dev, qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("drv_rdma_debug_qp_add failed , err(%d)\n", + ret); + goto qp_destroy_cmd; + } + refcount_set(&qp->refcnt, 1); + spin_lock_init(&qp->lock); + qp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + rf->qp_table[qpn] = qp; + qp->ibqp_state = qp_ctx->qp_state; + + ret = sxe2_qp_qset_init_and_resp(udata, rdma_dev, qpn, qp_ctx, qp); + if (ret) + goto qp_debugfs_remove; + + init_completion(&qp->free_qp); + goto end; + +qp_debugfs_remove: + drv_rdma_debug_qp_remove(rdma_dev, qp); +qp_destroy_cmd: + sxe2_mq_destroy_qp_cmd(qp); +free_qp_buf: + if (qp->sw_cc_enable != SXE2_QP_DISABLE_SW_CC) { + mutex_lock(&rf->cc_refcount.refcount_lock); + atomic_dec(&rf->cc_refcount.cc_qp_refcount); + mutex_unlock(&rf->cc_refcount.refcount_lock); + } + sxe2_free_qp_buf(qp, dev, udata); +free_host_ctx: + dma_free_coherent(dev->hw->device, ALIGN(SXE2_QP_CTX_SIZE, 256), + init_info.host_ctx, init_info.host_ctx_pa); + init_info.host_ctx = NULL; +free_qpn: + if (init_attr->qp_type != IB_QPT_GSI) + sxe2_kfree_rsrc(rf, rf->allocated_qps, qpn); +end: + return ret; +} +#endif + +#ifdef QUERY_PKEY_V1 +int sxe2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +#else +int sxe2_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) +#endif +{ + int ret = 0; + + if (index >= SXE2_PKEY_TBL_SZ) { + ret = -EINVAL; + goto end; + } + *pkey = SXE2_DEFAULT_PKEY; +end: + return ret; +} + +static u16 sxe2_rdma_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03FFF; + u32 fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + + return (u16)(fl_low | 0xc000); +} +static u32 sxe2_rdma_calc_flow_label(u32 local_qpn, u32 remote_qpn) +{ + u64 fl = (u64)local_qpn * remote_qpn; + + fl ^= fl >> 20; + fl ^= fl >> 40; + + return (u32)(fl & 0x000FFFFF); +} +static u16 sxe2_rdma_get_udp_sport(u32 flow_label, u32 local_qpn, + u32 remote_qpn) +{ + if (!flow_label) + flow_label = sxe2_rdma_calc_flow_label(local_qpn, remote_qpn); + + return sxe2_rdma_flow_label_to_udp_sport(flow_label); +} +static u8 sxe2_roce_get_vlan_prio(struct net_device __rcu *ndev_rcu, u8 prio) +{ + struct net_device *ndev; + + rcu_read_lock(); + ndev = rcu_dereference(ndev_rcu); + if (!ndev) + goto end; + if (is_vlan_dev(ndev)) { + u16 vlan_qos = (u16)vlan_dev_get_egress_qos_mask(ndev, prio); + + prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + } +end: + rcu_read_unlock(); + return prio; +} +static int sxe2_set_roce_cm_info(struct sxe2_rdma_qp *qp, + struct ib_qp_attr *attr, u16 *vlan_id) +{ + const struct ib_gid_attr *sgid_attr; + int ret = 0; + struct sxe2_av *av = &qp->roce_ah.av; + + sgid_attr = attr->ah_attr.grh.sgid_attr; + if (sgid_attr->ndev) { +#ifdef HAVE_NO_FIELD_GID + *vlan_id = rdma_vlan_dev_vlan_id(sgid_attr->ndev); + ether_addr_copy(qp->ctx_info.roce_info->mac_addr, + sgid_attr->ndev->dev_addr); +#else + ret = rdma_read_gid_l2_fields(sgid_attr, vlan_id, + qp->ctx_info.roce_info->mac_addr); +#endif + if (ret) + goto end; + } + av->net_type = rdma_gid_attr_network_type(sgid_attr); + rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); + qp->ctx_info.user_pri = + sxe2_roce_get_vlan_prio(sgid_attr->ndev, qp->ctx_info.user_pri); + qp->qp_ctx.user_pri = qp->ctx_info.user_pri; +end: + return ret; +} + +static int sxe2_wait_for_suspend(struct sxe2_rdma_qp *qp) +{ + int ret = 0; + + if (!wait_event_timeout(qp->dev->suspend_wq, !qp->suspend_pending, + msecs_to_jiffies(SXE2_EVENT_TIMEOUT_MS))) { + qp->suspend_pending = false; + ret = -EBUSY; + } + return ret; +} + +void sxe2_sched_qp_flush_work(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_device *rdma_dev = qp->dev; + + if (qp->qp_ctx.qp_common.destroy_pending) { + DRV_RDMA_LOG_DEV_ERR("qp:%d is destroying\n", qp->ibqp.qp_num); + goto end; + } + + sxe2_qp_add_ref(&qp->ibqp); + if (mod_delayed_work(qp->dev->cleanup_wq, &qp->dwork_flush, + msecs_to_jiffies(SXE2_FLUSH_DELAY_MS))) { + sxe2_qp_rem_ref(&qp->ibqp); + } + +end: + return; +} + +void sxe2_flush_wqe_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct sxe2_rdma_qp *rdma_qp = + container_of(dwork, struct sxe2_rdma_qp, dwork_flush); + + sxe2_generate_flush_completions(rdma_qp); + + sxe2_qp_rem_ref(&rdma_qp->ibqp); +} + +static int drv_rel_rate_to_stat_rate(u32 rel_rate) +{ + u8 state_rate = 0; + + if (rel_rate > 0 && rel_rate <= SXE2_RATE_2_5_GBPS) + state_rate = SXE2_IB_RATE_TAG_1; + else if (rel_rate > SXE2_RATE_2_5_GBPS && rel_rate <= SXE2_RATE_10_GBPS) + state_rate = SXE2_IB_RATE_TAG_2; + else if (rel_rate > SXE2_RATE_10_GBPS && rel_rate <= SXE2_RATE_25_GBPS) + state_rate = SXE2_IB_RATE_TAG_3; + else if (rel_rate > SXE2_RATE_25_GBPS && rel_rate <= SXE2_RATE_40_GBPS) + state_rate = SXE2_IB_RATE_TAG_4; + else if (rel_rate > SXE2_RATE_40_GBPS && rel_rate <= SXE2_RATE_50_GBPS) + state_rate = SXE2_IB_RATE_TAG_5; + else if (rel_rate > SXE2_RATE_50_GBPS && rel_rate <= SXE2_RATE_60_GBPS) + state_rate = SXE2_IB_RATE_TAG_6; + else if (rel_rate > SXE2_RATE_60_GBPS && rel_rate <= SXE2_RATE_80_GBPS) + state_rate = SXE2_IB_RATE_TAG_7; + else if (rel_rate > SXE2_RATE_80_GBPS && rel_rate <= SXE2_RATE_100_GBPS) + state_rate = SXE2_IB_RATE_TAG_8; + else if (rel_rate > SXE2_RATE_100_GBPS) + state_rate = SXE2_IB_RATE_TAG_8; + + return state_rate; +} + +static int drv_ib_rate_to_sxe2(struct sxe2_rdma_qp *qp, u8 rate) +{ + int ret = 0; + u32 rel_rate = 0; + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct aux_core_dev_info *cdev_info = + (struct aux_core_dev_info *)(qp->dev->rdma_func->cdev); + u32 port_active_speed; + u32 ib_rate_to_real_rate[STATA_RATE_MAX_CNT]; + + ib_rate_to_real_rate[SXE2_IB_RATE_2_5_GBPS] = SXE2_RATE_2_5_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_5_GBPS] = SXE2_RATE_5_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_10_GBPS] = SXE2_RATE_10_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_20_GBPS] = SXE2_RATE_20_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_30_GBPS] = SXE2_RATE_30_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_40_GBPS] = SXE2_RATE_40_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_60_GBPS] = SXE2_RATE_60_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_80_GBPS] = SXE2_RATE_80_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_120_GBPS] = SXE2_RATE_120_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_14_GBPS] = SXE2_RATE_14_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_56_GBPS] = SXE2_RATE_56_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_112_GBPS] = SXE2_RATE_112_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_168_GBPS] = SXE2_RATE_168_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_25_GBPS] = SXE2_RATE_25_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_100_GBPS] = SXE2_RATE_100_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_200_GBPS] = SXE2_RATE_200_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_300_GBPS] = SXE2_RATE_300_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_28_GBPS] = SXE2_RATE_28_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_50_GBPS] = SXE2_RATE_50_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_400_GBPS] = SXE2_RATE_400_GBPS; + ib_rate_to_real_rate[SXE2_IB_RATE_600_GBPS] = SXE2_RATE_600_GBPS; + if (rdma_dev->rdma_func->ctx_dev.privileged) { + port_active_speed = cdev_info->ops->rdma_get_link_speed(cdev_info); + } else { + ret = sxe2_vchnl_req_get_port_acitve_speed( + &rdma_dev->rdma_func->ctx_dev, &port_active_speed); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("qp:vchnl get port active speed err ret=%d\n", ret); + goto end; + } + } + if (port_active_speed == SXE2_LINK_SPEED_UNKNOWN) { + ret = -EPERM; + DRV_RDMA_LOG_DEV_ERR("qp:port active speed is unknown\n"); + goto end; + } + if (rate == IB_RATE_PORT_CURRENT) { + rel_rate = port_active_speed; + DRV_RDMA_LOG_DEV_DEBUG("qp:rate %u rel rate %u\n", rate, rel_rate); + } else if (rate < SXE2_IB_RATE_2_5_GBPS || rate > SXE2_IB_RATE_600_GBPS) { + ret = -EINVAL; + goto end; + } else { + rel_rate = ib_rate_to_real_rate[rate]; + if (rel_rate > port_active_speed) + rel_rate = port_active_speed; + } + qp->ctx_info.roce_info->state_rate = drv_rel_rate_to_stat_rate(rel_rate); + DRV_RDMA_LOG_DEV_DEBUG("qp:user rate %u rel_rate %u state_rate:%u\n", + rate, rel_rate, qp->ctx_info.roce_info->state_rate); +end: + return ret; +} + +static int sxe2_rdma_encoding_rd_atomic(u8 max_rd_atomic, + struct sxe2_rdma_qp *qp, bool is_sra) +{ + struct sxe2_rdma_device *rdma_dev = qp->dev; + u16 log_val = 0; + int ret = 0; + + if (max_rd_atomic > SXE2_MAX_RD_ATOMIC) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "max_rd_atomic must be [0,64], now is %u\n", + max_rd_atomic); + goto end; + } else if (max_rd_atomic == SXE2_MIN_LOG_RD_ATOMIC) { + log_val = SXE2_MIN_LOG_RD_ATOMIC; + } else { + log_val = (u16)fls((u32)(max_rd_atomic - 1)); + } + + if (is_sra) + qp->ctx_info.roce_info->log_sra_max = log_val; + else + qp->ctx_info.roce_info->log_rra_max = log_val; + +end: + return ret; +} + +static u8 sxe2_rdma_decoding_rd_atomic(u16 log_rd_atomic) +{ + u8 cal_val = 0; + + if (log_rd_atomic <= SXE2_ENCODING_READ_3) + cal_val = SXE2_MIN_RD_ATOMIC; + else if (log_rd_atomic >= SXE2_ENCODING_READ_7) + cal_val = SXE2_MAX_RD_ATOMIC; + else + cal_val = (1 << log_rd_atomic); + return cal_val; +} + +static void sxe2_flush_wqes(struct sxe2_rdma_qp *qp, u32 flush_op) +{ + struct sxe2_rdma_pci_f *rdma_func = qp->dev->rdma_func; + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_qpc *qpc; + unsigned long flags; + + if (!(flush_op & SXE2_FLUSH_SQ) && !(flush_op & SXE2_FLUSH_RQ)) { + DRV_RDMA_LOG_DEV_DEBUG("SQ/RQ flush_op not valid\n"); + goto end; + } + + if (flush_op & SXE2_REFLUSH) { + if (qp->qp_ctx.flush_sq || qp->qp_ctx.flush_rq) { + qp->qp_ctx.flush_sq = false; + qp->qp_ctx.flush_rq = false; + } else { + DRV_RDMA_LOG_DEV_ERR( + "reflush, but last sq/rq flush flag not valid\n"); + goto end; + } + if (flush_op & SXE2_FLUSH_SQ) { + if (sxe2_mq_query_qp_cmd(qp)) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_mq_query_qp_cmd failed\n"); + goto end; + } + qpc = (struct sxe2_qpc *)qp->host_ctx.va; + if (qpc->sq_flush_flag != qpc->txeng_sq_flush_flag) { + DRV_RDMA_LOG_DEV_WARN( + "reflush, but hw not finish last sq flush\n"); + } else { + qp->qp_ctx.flush_sq = true; + } + } + if (flush_op & SXE2_FLUSH_RQ) + qp->qp_ctx.flush_rq = true; + } else { + if (flush_op & SXE2_FLUSH_SQ) + qp->qp_ctx.flush_sq = true; + if (flush_op & SXE2_FLUSH_RQ) + qp->qp_ctx.flush_rq = true; + } + + if (qp->qp_ctx.flush_sq || qp->qp_ctx.flush_rq) { + spin_lock_irqsave(&qp->lock, flags); + qp->flush_issued = true; + spin_unlock_irqrestore(&qp->lock, flags); + } + + DRV_RDMA_LOG_DEV_DEBUG( + "flush out:qp %#x, op %u, sq_flush: %u, rq_flush: %u, flush_issued: %u\n", + qp->qp_ctx.qp_common.qpn, flush_op, qp->qp_ctx.flush_sq, + qp->qp_ctx.flush_rq, qp->flush_issued); +end: + return; +} + +static int sxe2_check_modify_qp_attrs(struct ib_qp_attr *attr, int attr_mask, + struct sxe2_rdma_device *rdma_dev, + struct ib_udata *udata) +{ + int ret = 0; + + if (udata && (udata->inlen < sizeof(struct sxe2_modify_qp_req) || + udata->outlen < sizeof(struct sxe2_modify_qp_resp))) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "udata illegal! , udata->inlen(%zu) udata->outlen(%zu)\n", + udata->inlen, udata->outlen); + goto end; + } + + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) { + ret = -EOPNOTSUPP; + DRV_RDMA_LOG_DEV_ERR("attr_mask illegal, attr_mask(%d)\n", + attr_mask); + goto end; + } + +end: + return ret; +} + +static int sxe2_modify_qp_roce_info_set(struct ib_qp *ibqp, + struct ib_qp_attr *attr, int attr_mask) +{ + int ret = 0; + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_roce_offload_info *roce_info = &qp->roce_info; + + if (attr_mask & IB_QP_DEST_QPN) + roce_info->dest_qp = attr->dest_qp_num; + if (attr_mask & IB_QP_PKEY_INDEX) { + ret = sxe2_query_pkey(ibqp->device, 0, attr->pkey_index, + &roce_info->p_key); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_query_pkey fail, pkey_index(%u) ret(%d)\n", + attr->pkey_index, ret); + goto end; + } + roce_info->p_key = attr->pkey_index; + } + if (attr_mask & IB_QP_QKEY) + roce_info->qkey = attr->qkey; +end: + return ret; +} + +static void sxe2_modify_qp_ctx_info_set(struct sxe2_rdma_qp *qp, + struct sxe2_rdma_pd *pd, + struct sxe2_qp_host_ctx_info *ctx_info) +{ + if (qp->qp_ctx.qp_common.qp_type == IB_QPT_UD) { + if (qp->gsi_flag) + ctx_info->log_msg_max = SXE2_LOG_MSG_8; + else + ctx_info->log_msg_max = SXE2_LOG_MSG_12; + } else { + ctx_info->log_msg_max = SXE2_LOG_MSG_31; + } + ctx_info->roce_info->pd_id = pd->pd_ctx.pd_id; +} + +static int sxe2_modify_qp_qset_and_vlan_set(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask) +{ + int ret = 0; + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_qp_host_ctx_info *ctx_info = &qp->ctx_info; + struct sxe2_udp_offload_info *udp_info = &qp->udp_info; + u16 vlan_id = VLAN_N_VID; + + ret = sxe2_set_roce_cm_info(qp, attr, &vlan_id); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_set_roce_cm_info fail, ret(%d)\n", + ret); + goto end; + } + if (rdma_dev->vsi.lag_aa) + ret = sxe2_qos_register_qset_bond(&rdma_dev->vsi, + ctx_info->user_pri); + else + ret = sxe2_qos_register_qset(&rdma_dev->vsi, + ctx_info->user_pri); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_qos_register_qset fail, aa(%d), ret(%d)\n", + rdma_dev->vsi.lag_aa, ret); + goto end; + } + qp->qp_ctx.user_pri = ctx_info->user_pri; + if (rdma_dev->vsi.lag_aa) + ret = sxe2_qos_qp_add_qos_bond(&rdma_dev->vsi, &qp->qp_ctx); + else + ret = sxe2_qos_qp_add_qos(&rdma_dev->vsi, &qp->qp_ctx); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_qos_qp_add_qos fail, aa(%d), ret(%d)\n", + rdma_dev->vsi.lag_aa, ret); + goto end; + } + if (vlan_id >= VLAN_N_VID && rdma_dev->dcb_vlan_mode) + vlan_id = 0; + if (vlan_id < VLAN_N_VID) { + udp_info->insert_vlan_tag = true; + udp_info->vlan_tag = vlan_id | ctx_info->user_pri + << VLAN_PRIO_SHIFT; + } else { + udp_info->insert_vlan_tag = false; + } + +end: + return ret; +} + +static int sxe2_rdma_gid_to_ip(struct sxe2_av *av, struct ib_qp_attr *attr, + struct sxe2_rdma_qp *qp) +{ + int ret = 0; + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_udp_offload_info *udp_info = &qp->udp_info; + + rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); + if (av->net_type == RDMA_NETWORK_IPV6) { + __be32 *daddr = + av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; + __be32 *saddr = + av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; + sxe2_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); + sxe2_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); + udp_info->ipv4 = false; + } else if (av->net_type == RDMA_NETWORK_IPV4) { + __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; + __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; + + udp_info->ipv4 = true; + udp_info->dest_ip_addr[0] = 0; + udp_info->dest_ip_addr[1] = 0; + udp_info->dest_ip_addr[2] = 0; + udp_info->dest_ip_addr[3] = ntohl(daddr); + udp_info->local_ipaddr[0] = 0; + udp_info->local_ipaddr[1] = 0; + udp_info->local_ipaddr[2] = 0; + udp_info->local_ipaddr[3] = ntohl(saddr); + } else { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("net_type illegal, av->net_type(%d)\n", + av->net_type); + goto end; + } + +end: + return ret; +} + +static int sxe2_modify_qp_udp_info_set(struct ib_qp *ibqp, + struct ib_qp_attr *attr, int attr_mask) +{ + int ret = 0; + u32 active_mtu = 0; + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_qp_host_ctx_info *ctx_info = &qp->ctx_info; + struct sxe2_udp_offload_info *udp_info = &qp->udp_info; + struct sxe2_roce_offload_info *roce_info = &qp->roce_info; + struct sxe2_av *av = &qp->roce_ah.av; + u32 ib_mtu = 0; + bool cur_dscp_mode = 0; + + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || + attr->path_mtu > IB_MTU_4096) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("path_mtu illegal, path_mtu(%d)\n", + attr->path_mtu); + goto end; + } + ib_mtu = (u32)sxe2_iboe_get_mtu((int)rdma_dev->netdev->mtu); + active_mtu = min_t(u32, (u32)IB_MTU_4096, ib_mtu); + udp_info->pmtu = min_t(u32, active_mtu, (u32)attr->path_mtu); + } + + if (attr_mask & IB_QP_SQ_PSN) + udp_info->sq_psn = attr->sq_psn; + if (attr_mask & IB_QP_RQ_PSN) + udp_info->rq_psn = attr->rq_psn; + if (attr_mask & IB_QP_RNR_RETRY) + udp_info->rnr_retry = attr->rnr_retry; + if (attr_mask & IB_QP_MIN_RNR_TIMER) + udp_info->min_rnr_timer = attr->min_rnr_timer; + if (attr_mask & IB_QP_RETRY_CNT) + udp_info->retry_cnt = attr->retry_cnt; + + if (attr_mask & IB_QP_TIMEOUT) { + udp_info->ack_timeout = + (attr->timeout > SXE2_MAX_ACK_TIMEOUT_VAL) ? + SXE2_MAX_ACK_TIMEOUT_VAL : + attr->timeout; + } else { + udp_info->ack_timeout = + (rdma_dev->roce_rtomin > SXE2_MAX_ACK_TIMEOUT_VAL) ? + SXE2_MAX_ACK_TIMEOUT_VAL : + rdma_dev->roce_rtomin; + } + + if (attr_mask & IB_QP_AV) { + ether_addr_copy(roce_info->dest_mac_addr, + ah_attr_to_dmac(attr->ah_attr)); + memset(&qp->roce_ah, 0, sizeof(qp->roce_ah)); + if (attr->ah_attr.ah_flags & IB_AH_GRH) { + udp_info->ttl = attr->ah_attr.grh.hop_limit; + udp_info->flow_label = attr->ah_attr.grh.flow_label; + udp_info->dscp = attr->ah_attr.grh.traffic_class >> 2; + udp_info->ecn = attr->ah_attr.grh.traffic_class & 0x3; + udp_info->src_port = + sxe2_rdma_get_udp_sport(udp_info->flow_label, + ibqp->qp_num, + roce_info->dest_qp); + ret = sxe2_qos_qp_rem_qos(&rdma_dev->vsi, &qp->qp_ctx); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_qos_qp_rem_qos fail, ret(%d)\n", + ret); + goto end; + } + if (rdma_dev->vsi.lag_aa) + sxe2_qos_unregister_qset_bond( + &rdma_dev->vsi, qp->qp_ctx.user_pri); + else + sxe2_qos_unregister_qset(&rdma_dev->vsi, + qp->qp_ctx.user_pri); + if (rdma_dev->vsi.lag_aa) + cur_dscp_mode = qp->qp_ctx.vsi->dscp_mode[qp->qp_ctx.qset_idx]; + else + cur_dscp_mode = qp->qp_ctx.vsi->dscp_mode[QOS_QSET_IDX_0]; + + if (rdma_dev->vsi.lag_aa && cur_dscp_mode) { + ctx_info->user_pri = + qp->qp_ctx.vsi->dscp_map[qp->qp_ctx.qset_idx][udp_info->dscp]; + } else if (cur_dscp_mode) { + ctx_info->user_pri = + qp->qp_ctx.vsi->dscp_map[QOS_QSET_IDX_0][udp_info->dscp]; + + } else { + ctx_info->user_pri = (u8)rt_tos2priority( + attr->ah_attr.grh.traffic_class); + } + if (ctx_info->user_pri >= SXE2_MAX_USER_PRIORITY) { + DRV_RDMA_LOG_DEV_ERR("user pri error, val(%d)\n", + ctx_info->user_pri); + ctx_info->user_pri = 0; + } + } + + ret = drv_ib_rate_to_sxe2(qp, + rdma_ah_get_static_rate(&attr->ah_attr)); + if (ret) + goto end; + + ret = sxe2_modify_qp_qset_and_vlan_set(ibqp, attr, attr_mask); + if (ret) + goto end; + + av->attrs = attr->ah_attr; + ret = sxe2_rdma_gid_to_ip(av, attr, qp); + if (ret) + goto end; + } +end: + return ret; +} + +static int sxe2_qp_state_info_set(struct sxe2_rdma_qp *qp, + struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata, u8 *issue_modify_qp) +{ + int ret = 0; + struct sxe2_rdma_device *rdma_dev = qp->dev; + unsigned long flags; + +#ifdef NEED_LINK_LAYER + if (!ib_modify_qp_is_ok(qp->ibqp_state, attr->qp_state, + qp->ibqp.qp_type, attr_mask, IB_LINK_LAYER_ETHERNET)) { +#else + if (!ib_modify_qp_is_ok(qp->ibqp_state, attr->qp_state, + qp->ibqp.qp_type, attr_mask)) { +#endif + + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "modify qp illegal, cur_state(%d)\n" + "\tnext_state(%d) qp_type(%d) attr_mask(%d)\n", + qp->ibqp_state, attr->qp_state, qp->ibqp.qp_type, + attr_mask); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG( + "modify qp%#x cure_state(%d) next_state(%d) qp_type(%d) attr_mask(%d)\n", + qp->ibqp.qp_num, qp->ibqp_state, attr->qp_state, + qp->ibqp.qp_type, attr_mask); + switch (attr->qp_state) { + case IB_QPS_INIT: + *issue_modify_qp = 1; + break; + case IB_QPS_RTR: + *issue_modify_qp = 1; + break; + case IB_QPS_RTS: + *issue_modify_qp = 1; + break; + case IB_QPS_SQD: + if (qp->ibqp_state == IB_QPS_SQD) { + DRV_RDMA_LOG_DEV_DEBUG("QP trans from SQD TO SQD\n"); + goto end; + } + + *issue_modify_qp = 1; + qp->suspend_pending = true; + break; + case IB_QPS_ERR: + *issue_modify_qp = 1; + break; + case IB_QPS_RESET: + *issue_modify_qp = 1; + spin_lock_irqsave(&qp->lock, flags); + qp->flush_issued = 0; + spin_unlock_irqrestore(&qp->lock, flags); + qp->qp_ctx.flush_rq = 0; + qp->qp_ctx.flush_sq = 0; + qp->qp_ctx.sq_flush_polarity = 0; + if (!udata) { + if (qp->send_cq) { + sxe2_clean_cqes(qp, qp->send_cq, SQ_CQ); + if ((qp->recv_cq) && + (qp->recv_cq != qp->send_cq)) + sxe2_clean_cqes(qp, qp->recv_cq, RQ_CQ); + } + qp->qp_ctx.qp_common.rq_ring.head = 0; + qp->qp_ctx.qp_common.rq_ring.tail = 0; + qp->qp_ctx.qp_common.sq_ring.head = 0; + qp->qp_ctx.qp_common.sq_ring.tail = 0; + qp->qp_ctx.qp_common.initial_ring.head = 0; + qp->qp_ctx.qp_common.initial_ring.tail = 0; + qp->qp_ctx.qp_common.rwqe_polarity = 0; + qp->qp_ctx.qp_common.swqe_polarity = 0; + qp->qp_ctx.qp_common.llwqe_mode = false; + memset(qp->kqp.dma_mem.va, 0, qp->kqp.dma_mem.size); + } + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "next qp state illegal, attr->qp_state(%d)\n", + attr->qp_state); + goto end; + } +end: + return ret; +} + +static int sxe2_qp_state_change(struct sxe2_rdma_qp *qp, + struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata, u8 *issue_modify_qp, + struct sxe2_modify_qp_resp *uresp) +{ + int ret = 0; + int tmp_ret = 0; + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_qp_host_ctx_info *ctx_info = &qp->ctx_info; + struct sxe2_modify_qp_req ucmd; + u32 db_page_id = 0; + u8 new_llwqe_page_alloc = false; + + if (attr_mask & IB_QP_STATE) { + ret = sxe2_qp_state_info_set(qp, attr, attr_mask, udata, + issue_modify_qp); + if (ret) + goto end; + } + ctx_info->send_cq_num = qp->send_cq->cq_ctx.cq_uk.cq_id; + ctx_info->rcv_cq_num = qp->recv_cq->cq_ctx.cq_uk.cq_id; + qp->qp_ctx.qp_state = attr->qp_state; + + if (udata) { + memset(&ucmd, 0, sizeof(ucmd)); + if (ib_copy_from_udata(&ucmd, udata, udata->inlen)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR("copy fail len %zu , ret (%d)\n", + udata->inlen, ret); + goto end; + } + db_page_id = ucmd.llwqe_page_index; + new_llwqe_page_alloc = ucmd.new_page_alloc; + ctx_info->llwqe_mod_enable = ucmd.llwqe_enable; + if (attr->qp_state == IB_QPS_RTS) { + if (new_llwqe_page_alloc) { + tmp_ret = db_kalloc_llwqe_mmap_entry( + rdma_dev, udata, qp, + &uresp->db_page_id, &uresp->db_mmap_size, + &uresp->db_mmap_offset); + if (tmp_ret) { + uresp->db_page_id = 0; + uresp->db_mmap_size = 0; + uresp->db_mmap_offset = 0; + DRV_RDMA_LOG_DEV_WARN( + "db_kalloc_llwqe_mmap_entry failed.\n"); + } + if (!db_page_id) { + ctx_info->llwqe_mod_enable = true; + db_page_id = uresp->db_page_id; + } + } + if (ctx_info->llwqe_mod_enable) { + if (rdma_dev->rdma_func->ctx_dev.privileged) { + ctx_info->llwqe_page_index = + (SXE2_PF_DB_PAGE_BAR_OFFSET( + db_page_id) >> + SXE2_RDMA_DB_PAGE_SHIFT); + } else { + ctx_info->llwqe_page_index = + (SXE2_VF_DB_PAGE_BAR_OFFSET( + db_page_id) >> + SXE2_RDMA_DB_PAGE_SHIFT); + } + } + } + if (qp->qp_ctx.qp_state == IB_QPS_ERR) { + sxe2_flush_wqes(qp, + (ucmd.sq_flush ? SXE2_FLUSH_SQ : 0) | + (ucmd.rq_flush ? SXE2_FLUSH_RQ : + 0) | + SXE2_REFLUSH); + } + } else if (!qp->user_mod) { + if (rdma_dev->kernel_llwqe_mode == 0) { + ctx_info->llwqe_mod_enable = false; + ctx_info->llwqe_page_index = 0; + qp->qp_ctx.qp_common.llwqe_mode = false; + } else { + ctx_info->llwqe_mod_enable = true; + qp->qp_ctx.qp_common.llwqe_mode = true; + if (rdma_dev->rdma_func->ctx_dev.privileged) { + ctx_info->llwqe_page_index = + (SXE2_PF_DB_PAGE_BAR_OFFSET( + qp->qp_ctx.qp_common.llwqe + ->index) >> + SXE2_RDMA_DB_PAGE_SHIFT); + } else { + ctx_info->llwqe_page_index = + (SXE2_VF_DB_PAGE_BAR_OFFSET( + qp->qp_ctx.qp_common.llwqe + ->index) >> + SXE2_RDMA_DB_PAGE_SHIFT); + } + } + } + if ((attr_mask & IB_QP_STATE) && *issue_modify_qp && + qp->qp_ctx.qp_state == IB_QPS_ERR && !qp->flush_issued) { + if ((udata && !ucmd.sq_flush && !ucmd.rq_flush) || !udata) + sxe2_flush_wqes(qp, SXE2_FLUSH_SQ | SXE2_FLUSH_RQ); + else + DRV_RDMA_LOG_DEV_DEBUG("data no flush condition\n"); + } +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_swState_jump", + &rdma_dev->rdma_func->mq.err_cqe_val, attr, qp); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_qpc_swState_jump"); +#endif + sxe2_ctx_qp_setctx_roce(qp, qp->host_ctx.va); + DRV_RDMA_LOG_DEV_DEBUG("modify qp%#x qpc print:\n", qp->ibqp.qp_num); + sxe2_print_qpc_info(rdma_dev, qp->host_ctx.va); +end: + return ret; +} + +int sxe2_kmodify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata) +{ + struct sxe2_rdma_pd *pd = to_pd(ibqp->pd); + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_qp_host_ctx_info *ctx_info = &qp->ctx_info; + u8 issue_modify_qp = 0; + int ret = 0; + struct sxe2_modify_qp_resp uresp = {0}; + unsigned long flags; + + ret = sxe2_check_modify_qp_attrs(attr, attr_mask, rdma_dev, udata); + if (ret) + goto end; + + ret = sxe2_modify_qp_roce_info_set(ibqp, attr, attr_mask); + if (ret) + goto end; + + sxe2_modify_qp_ctx_info_set(qp, pd, ctx_info); + + ret = sxe2_modify_qp_udp_info_set(ibqp, attr, attr_mask); + if (ret) + goto end; + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + ret = sxe2_rdma_encoding_rd_atomic(attr->max_rd_atomic, qp, + true); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("max_rd_atomic must be [0,64]\n"); + goto end; + } + } + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + ret = sxe2_rdma_encoding_rd_atomic(attr->max_dest_rd_atomic, qp, + false); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "max_dest_rd_atomic must be [0,64]\n"); + goto end; + } + } + + ret = sxe2_qp_state_change(qp, attr, attr_mask, udata, + &issue_modify_qp, &uresp); + if (ret) + goto end; + + if (attr_mask & IB_QP_STATE) { + if (issue_modify_qp) { + ret = sxe2_mq_modify_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_mq_modify_qp_cmd fail\n"); + goto end; + } + if (attr->qp_state == IB_QPS_SQD) { + ret = sxe2_wait_for_suspend(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "sxe2_wait_for_suspend fail, ret(%d)\n", + ret); + goto end; + } + } + spin_lock_irqsave(&qp->lock, flags); + qp->ibqp_state = attr->qp_state; + spin_unlock_irqrestore(&qp->lock, flags); + + if (qp->ibqp_state == IB_QPS_ERR && !qp->user_mod) + sxe2_sched_qp_flush_work(qp); + } else { + spin_lock_irqsave(&qp->lock, flags); + qp->ibqp_state = attr->qp_state; + spin_unlock_irqrestore(&qp->lock, flags); + } + + if (udata) { + uresp.rd_fence_rate = rdma_dev->rd_fence_rate; + ret = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), + udata->outlen)); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "ib_copy_to_udata failed , err(%d)\n", + ret); + goto end; + } + } + } + +end: + return ret; +} + +static u8 drv_sxe2_rate_to_ib(struct sxe2_rdma_qp *qp) +{ + u8 state_rate = 0; + + switch (qp->ctx_info.roce_info->state_rate) { + case SXE2_IB_RATE_TAG_0: + state_rate = IB_RATE_PORT_CURRENT; + break; + case SXE2_IB_RATE_TAG_1: + state_rate = IB_RATE_2_5_GBPS; + break; + case SXE2_IB_RATE_TAG_2: + state_rate = IB_RATE_10_GBPS; + break; + case SXE2_IB_RATE_TAG_3: + state_rate = IB_RATE_25_GBPS; + break; + case SXE2_IB_RATE_TAG_4: + state_rate = IB_RATE_40_GBPS; + break; +#ifndef NOT_SURPORT_50_GBPS + case SXE2_IB_RATE_TAG_5: + state_rate = IB_RATE_50_GBPS; + break; +#else + case SXE2_IB_RATE_TAG_5: + state_rate = IB_RATE_56_GBPS; + break; +#endif + + case SXE2_IB_RATE_TAG_6: + state_rate = IB_RATE_60_GBPS; + break; + case SXE2_IB_RATE_TAG_7: + state_rate = IB_RATE_80_GBPS; + break; + case SXE2_IB_RATE_TAG_8: + state_rate = IB_RATE_100_GBPS; + break; + default: + break; + } + + return state_rate; +} + +int sxe2_kquery_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int qp_attr_mask, struct ib_qp_init_attr *init_attr) +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_ctx_qp *qp_ctx = &qp->qp_ctx; + + memset(attr, 0, sizeof(*attr)); + memset(init_attr, 0, sizeof(*init_attr)); + + attr->qp_state = qp->ibqp_state; + attr->cur_qp_state = qp->ibqp_state; + attr->cap.max_send_wr = qp->max_send_wr; + attr->cap.max_recv_wr = qp->max_recv_wr; + attr->cap.max_inline_data = qp_ctx->qp_common.max_inline_data; + attr->cap.max_send_sge = qp_ctx->qp_common.max_sq_sge_cnt; + attr->cap.max_recv_sge = qp_ctx->qp_common.max_rq_sge_cnt; + + attr->qp_access_flags = + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); + + attr->port_num = 1; + attr->path_mtu = qp->udp_info.pmtu; + attr->qkey = qp->roce_info.qkey; + attr->rq_psn = qp->udp_info.rq_psn; + attr->sq_psn = qp->udp_info.sq_psn; + attr->dest_qp_num = qp->roce_info.dest_qp; + attr->pkey_index = qp->roce_info.p_key; + + attr->retry_cnt = qp->udp_info.retry_cnt; + attr->rnr_retry = qp->udp_info.rnr_retry; + attr->min_rnr_timer = qp->udp_info.min_rnr_timer; + + attr->max_rd_atomic = + sxe2_rdma_decoding_rd_atomic(qp->roce_info.log_sra_max); + attr->max_dest_rd_atomic = + sxe2_rdma_decoding_rd_atomic(qp->roce_info.log_rra_max); + + memcpy(&attr->ah_attr, &qp->roce_ah.av.attrs, sizeof(attr->ah_attr)); + attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; + attr->ah_attr.static_rate = drv_sxe2_rate_to_ib(qp); + + attr->timeout = qp->udp_info.ack_timeout; + init_attr->qp_type = qp->ibqp.qp_type; + + init_attr->event_handler = qp->ibqp.event_handler; + init_attr->qp_context = qp->ibqp.qp_context; + init_attr->send_cq = qp->ibqp.send_cq; + init_attr->recv_cq = qp->ibqp.recv_cq; + init_attr->srq = qp->ibqp.srq; + init_attr->cap = attr->cap; + + return 0; +} + +static int sxe2_destroy_cc_qp(struct sxe2_rdma_device *rdma_dev, u32 cc_qp_idx) +{ + int ret = SXE2_OK; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct sxe2_destroy_cc_qp_info info; + + if (cc_qp_idx >= SXE2_MAX_CC_QP_IDX) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR_BDF("qp:destroy cc qp idx err idx=%u\n", + cc_qp_idx); + goto end; + } + DRV_RDMA_LOG_DEBUG_BDF("qp:destroy cc qp idx=%u\n", cc_qp_idx); + info.cc_qp_idx = cpu_to_le32(cc_qp_idx); + + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_DESTROY_CC_QP, + (u8 *)&info, (u16)sizeof(info), + NULL, 0); + if (ret) { + DRV_RDMA_LOG_ERROR_BDF("qp:aq send destroy cc qp err ret=%d\n", + ret); + goto end; + } + +end: + return ret; +} + + #ifdef DESTROY_QP_V1 +int sxe2_kdestroy_qp(struct ib_qp *ibqp) + #else +int sxe2_kdestroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +#endif +{ + struct sxe2_rdma_qp *qp = to_qp(ibqp); + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct sxe2_rdma_pci_f *rf = rdma_dev->rdma_func; + int ret = 0; +#ifndef HAVE_NO_REF_COUNT + DRV_RDMA_LOG_DEV_DEBUG("qp:kdestroy qp start, ibdev refcount=%u\n", + refcount_read(&rdma_dev->ibdev.refcount)); +#endif + if (qp->qp_ctx.qp_common.destroy_pending) + goto free_rsrc; + qp->qp_ctx.qp_common.destroy_pending = true; + + if (!qp->user_mod) { + if (qp->send_cq) { + sxe2_clean_cqes(qp, qp->send_cq, SQ_CQ); + if ((qp->recv_cq) && (qp->recv_cq != qp->send_cq)) + sxe2_clean_cqes(qp, qp->recv_cq, RQ_CQ); + } + } + + sxe2_qp_rem_ref(&qp->ibqp); + wait_for_completion(&qp->free_qp); + + if (qp->sw_cc_enable) { + DRV_RDMA_LOG_DEBUG_BDF("qp:destroy cc qp idx %u\n", + qp->sw_cc_idx); + ret = sxe2_destroy_cc_qp(rdma_dev, qp->sw_cc_idx); + if (ret) + DRV_RDMA_LOG_ERROR_BDF("qp:%d destroy cc qp failed\n", + qp->qp_ctx.qp_common.qpn); + } + + ret = sxe2_mq_destroy_qp_cmd(qp); + if (ret) + DRV_RDMA_LOG_DEV_ERR("qp:%d destroy failed\n", + qp->qp_ctx.qp_common.qpn); + +free_rsrc: + if (qp->sw_cc_enable) { + mutex_lock(&rf->cc_refcount.refcount_lock); + atomic_dec(&rf->cc_refcount.cc_qp_refcount); + mutex_unlock(&rf->cc_refcount.refcount_lock); + } + drv_rdma_debug_qp_remove(rdma_dev, qp); + if (qp->qp_ctx.vsi) { + (void)sxe2_qos_qp_rem_qos(&rdma_dev->vsi, &qp->qp_ctx); + if (rdma_dev->vsi.lag_aa) + sxe2_qos_unregister_qset_bond(&rdma_dev->vsi, + qp->qp_ctx.user_pri); + else + sxe2_qos_unregister_qset(&rdma_dev->vsi, + qp->qp_ctx.user_pri); + } +#ifndef DESTROY_QP_V1 + sxe2_free_qp_buf(qp, &rdma_dev->rdma_func->ctx_dev, udata); +#endif + if (qp->ibqp.qp_type != IB_QPT_GSI) { + sxe2_kfree_rsrc(rdma_dev->rdma_func, + rdma_dev->rdma_func->allocated_qps, + qp->ibqp.qp_num); + } + + if (qp->qp_ctx.hw_host_ctx != NULL) { + dma_free_coherent(rdma_dev->rdma_func->ctx_dev.hw->device, + ALIGN(SXE2_QP_CTX_SIZE, 256), + qp->qp_ctx.hw_host_ctx, + qp->qp_ctx.hw_host_ctx_pa); + qp->qp_ctx.hw_host_ctx = NULL; + } +#ifdef CREATE_QP_VER_1 + kfree(qp); +#endif + return 0; +} + +void sxe2_ib_qp_event(struct sxe2_rdma_qp *qp, enum sxe2_qp_event_type event) +{ + struct ib_event ibevent; + + if (!qp->ibqp.event_handler) + goto out; + + switch (event) { + case SXE2_QP_EVENT_CATASTROPHIC: + ibevent.event = IB_EVENT_QP_FATAL; + break; + case SXE2_QP_EVENT_ACCESS_ERR: + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + break; + case SXE2_QP_EVENT_REQ_ERR: + ibevent.event = IB_EVENT_QP_REQ_ERR; + break; + case SXE2_QP_EVENT_COMM_EST: + ibevent.event = IB_EVENT_COMM_EST; + break; + case SXE2_QP_EVENT_QP_LASTWQE_REACHED: + ibevent.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + default: + break; + } + + ibevent.device = qp->ibqp.device; + ibevent.element.qp = &qp->ibqp; + qp->ibqp.event_handler(&ibevent, qp->ibqp.qp_context); + +out: + return; +} + +static void sxe2_qp_cm_disconn_true(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_qp *qp_ctx = &qp->qp_ctx; + unsigned long flags = 0; + struct ib_qp_attr attr; + + rdma_dev = qp->dev; + spin_lock_irqsave(&qp->lock, flags); + + if ((qp->flush_issued) || (qp_ctx->qp_common.destroy_pending)) { + spin_unlock_irqrestore(&qp->lock, flags); + goto out; + } + + spin_unlock_irqrestore(&qp->lock, flags); + attr.qp_state = IB_QPS_ERR; + sxe2_kmodify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL); + sxe2_ib_qp_event(qp, qp_ctx->event_type); + +out: + return; +} + +static void sxe2_qp_disconnect_worker(struct work_struct *work) +{ + struct qp_disconn_work *dwork = + container_of(work, struct qp_disconn_work, work); + struct sxe2_rdma_qp *qp = dwork->qp; + + kfree(dwork); + sxe2_qp_cm_disconn_true(qp); + sxe2_qp_rem_ref(&qp->ibqp); +} + +void sxe2_cm_disconn(struct sxe2_rdma_qp *qp) +{ + struct sxe2_rdma_device *rdma_dev = qp->dev; + struct qp_disconn_work *work; + unsigned long flags = 0; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + DRV_RDMA_LOG_DEV_ERR("qp disconn work alloc failed.\n"); + goto out; + } + + spin_lock_irqsave(&rdma_dev->rdma_func->qptable_lock, flags); + if (!rdma_dev->rdma_func->qp_table[qp->ibqp.qp_num]) { + spin_unlock_irqrestore(&rdma_dev->rdma_func->qptable_lock, + flags); + DRV_RDMA_LOG_DEV_WARN("qp:%d is already freed.\n", + qp->ibqp.qp_num); + kfree(work); + goto out; + } + + sxe2_qp_add_ref(&qp->ibqp); + spin_unlock_irqrestore(&rdma_dev->rdma_func->qptable_lock, flags); + + work->qp = qp; + INIT_WORK(&work->work, sxe2_qp_disconnect_worker); + queue_work(rdma_dev->cleanup_wq, &work->work); + +out: + return; +} + +static int sxe2_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) +{ + int ret = 0; + + switch (frag_cnt) { + case MAX_SGE_SIZE_0: + case MAX_SGE_SIZE_1: + *wqe_size = RQE_SIZE_32BYTE; + break; + case MAX_SGE_SIZE_2: + case MAX_SGE_SIZE_3: + *wqe_size = RQE_SIZE_64BYTE; + break; + case MAX_SGE_SIZE_4: + case MAX_SGE_SIZE_5: + case MAX_SGE_SIZE_6: + case MAX_SGE_SIZE_7: + *wqe_size = RQE_SIZE_128BYTE; + break; + case MAX_SGE_SIZE_8: + case MAX_SGE_SIZE_9: + case MAX_SGE_SIZE_10: + case MAX_SGE_SIZE_11: + case MAX_SGE_SIZE_12: + case MAX_SGE_SIZE_13: + case MAX_SGE_SIZE_14: + *wqe_size = RQE_SIZE_256BYTE; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int sxe2_kset_attr_from_fragcnt(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *ctx_dev = &rdma_dev->rdma_func->ctx_dev; + u8 max_fragcnt = rdma_dev->rdma_func->fragcnt_limit; + u32 max_inline; + u16 max_wqesz, max_quanta_per_wr; + + ret = sxe2_fragcnt_to_wqesize_rq(max_fragcnt + 1, &max_wqesz); + if (ret) + goto end; + + max_inline = ctx_dev->hw_attrs.uk_attrs.max_hw_inline; + switch (max_wqesz) { + case RQE_SIZE_32BYTE: + if (max_inline > MAX_INLINE_DATA_8) + max_inline = MAX_INLINE_DATA_8; + break; + case RQE_SIZE_64BYTE: + if (max_inline > MAX_INLINE_DATA_39) + max_inline = MAX_INLINE_DATA_39; + break; + case RQE_SIZE_128BYTE: + if (max_inline > MAX_INLINE_DATA_101) + max_inline = MAX_INLINE_DATA_101; + break; + case RQE_SIZE_256BYTE: + default: + break; + } + + max_quanta_per_wr = (max_wqesz / QP_QUANTA_SIZE); + + ctx_dev->hw_attrs.uk_attrs.max_hw_wq_frags = max_fragcnt; + ctx_dev->hw_attrs.uk_attrs.max_hw_read_sges = max_fragcnt; + ctx_dev->hw_attrs.uk_attrs.max_hw_inline = max_inline; + ctx_dev->hw_attrs.max_qp_wr = SXE2_MAX_QP_WRS(max_quanta_per_wr); + +end: + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.h new file mode 100644 index 0000000000000000000000000000000000000000..6271388767ea1e04a83f173e31603190cca14bc0 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp.h @@ -0,0 +1,325 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qp.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DRV_QP_H__ +#define __SXE2_DRV_QP_H__ + +#include "sxe2_compat.h" + +#define MIN_SQ_SIZE (8) +#define MIN_RQ_SIZE (8) +#define SQ_RSV_SIZE (8) +#define RQ_RSV_SIZE (8) +#define QP_QUANTA_SIZE (32) +#define SXE2_DEFAULT_PKEY (0xFFFF) +#define SXE2_INVALID_RD_ATOMIC (256) +#define SXE2_MIN_LOG_RD_ATOMIC (0) +#define SXE2_MAX_RD_ATOMIC (64) +#define SXE2_MIN_RD_ATOMIC (16) +#define SXE2_EVENT_TIMEOUT_MS (5000) +#define SXE2_QP_CTX_SIZE (512) +#define SXE2_FLUSH_DELAY_MS (20) +#define SXE2_QP_TYPE_RDMA_RC (3) +#define SXE2_QP_TYPE_RDMA_UD (4) +#define SXE2_QP_TYPE_RDMA_QP1 (7) +#define SXE2_QP_CC_CNP_ECN_ENABLE (1) +#define SXE2_QP_CC_CNP_ECN_DISABLE (0) +#define SXE2_QP_CC_ECN_ENABLE (1) +#define SXE2_QP_CC_ECN_DISABLE (0) +#define SXE2_QP_DCQCN_MAX_B (0xFFFFFF) +#define SXE2_MAX_CC_QP_IDX (4096) + +#define SXE2_USER_PARM_TO_HW_VAL(val) (val / 2 - 1) +#define SXE2_PF_DB_PAGE_BAR_OFFSET(idx) (0x3F0000 + (idx) * 0x1000) +#define SXE2_VF_DB_PAGE_BAR_OFFSET(idx) (0x010000 + (idx) * 0x1000) + +#ifndef ether_addr_copy +#define ether_addr_copy(mac_addr, new_mac_addr) memcpy(mac_addr, new_mac_addr, ETH_ALEN) +#endif + +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) + +#define SXE2_RETRY_MODE_GO_BACK_0 0 +#define SXE2_RETRY_MODE_GO_BACK_N 1 + +enum sxe2_rdma_flush_op { + SXE2_FLUSH_SQ = BIT(0), + SXE2_FLUSH_RQ = BIT(1), + SXE2_REFLUSH = BIT(2), +}; + +enum sxe2_rdma_encoding_read_max { + SXE2_ENCODING_READ_0 = 0, + SXE2_ENCODING_READ_1 = 1, + SXE2_ENCODING_READ_2 = 2, + SXE2_ENCODING_READ_3 = 3, + SXE2_ENCODING_READ_4 = 4, + SXE2_ENCODING_READ_5 = 5, + SXE2_ENCODING_READ_6 = 6, + SXE2_ENCODING_READ_7 = 7, + SXE2_ENCODING_READ_8 = 8, +}; + +#define SXE2_QP_DISABLE_SW_CC 0 +#define SXE2_QP_ENABLE_DCQCN 1 +#define SXE2_QP_ENABLE_TIMELY 2 +#define SXE2_QP_ENABLE_DCQCN_TIMLEY 3 + +#define DCQCN_B_L_MASK 0x3FFF +#define DCQCN_B_H_SHIFT 14 +#define DCQCN_T_INTERVAL_L_MASK 0xF +#define DCQCN_T_INTERVAL_H_SHIFT 4 +#define DCQCN_RHAI_L_MASK 0xFF +#define DCQCN_RHAI_H_SHIFT 8 +#define DCQCN_RREDUCE_MPERIOD_L_MASK 0xFF +#define DCQCN_RREDUCE_MPERIOD_H_SHIFT 8 +#define DCQCN_RC_L_MASK 0xFFFF +#define DCQCN_RC_H_SHIFT 16 +#define DCQCN_RT_L_MASK 0xF +#define DCQCN_RT_H_SHIFT 4 + +#define TIMELY_MIN_RTT_L_MASK 0xF +#define TIMELY_MIN_RTT_H_SHIFT 4 +#define TIMELY_THIGH_L_MASK 0xF +#define TIMELY_THIGH_H_SHIFT 4 +#define TIMELY_PRE_RTT_L_MASK 0xFF +#define TIMELY_PRE_RTT_H_SHIFT 8 + +#define STATA_RATE_MAX_CNT 30 +enum sxe2_stat_rate_rel_val { + SXE2_RATE_2_5_GBPS = 2500, + SXE2_RATE_5_GBPS = 5000, + SXE2_RATE_10_GBPS = 10000, + SXE2_RATE_20_GBPS = 20000, + SXE2_RATE_30_GBPS = 30000, + SXE2_RATE_40_GBPS = 40000, + SXE2_RATE_60_GBPS = 60000, + SXE2_RATE_80_GBPS = 80000, + SXE2_RATE_120_GBPS = 120000, + SXE2_RATE_14_GBPS = 14000, + SXE2_RATE_56_GBPS = 56000, + SXE2_RATE_112_GBPS = 112000, + SXE2_RATE_168_GBPS = 168000, + SXE2_RATE_25_GBPS = 25000, + SXE2_RATE_100_GBPS = 100000, + SXE2_RATE_200_GBPS = 200000, + SXE2_RATE_300_GBPS = 300000, + SXE2_RATE_28_GBPS = 28000, + SXE2_RATE_50_GBPS = 50000, + SXE2_RATE_400_GBPS = 400000, + SXE2_RATE_600_GBPS = 600000, +}; + +enum sxe2_ib_rate { + SXE2_IB_RATE_PORT_CURRENT = 0, + SXE2_IB_RATE_2_5_GBPS = 2, + SXE2_IB_RATE_5_GBPS = 5, + SXE2_IB_RATE_10_GBPS = 3, + SXE2_IB_RATE_20_GBPS = 6, + SXE2_IB_RATE_30_GBPS = 4, + SXE2_IB_RATE_40_GBPS = 7, + SXE2_IB_RATE_60_GBPS = 8, + SXE2_IB_RATE_80_GBPS = 9, + SXE2_IB_RATE_120_GBPS = 10, + SXE2_IB_RATE_14_GBPS = 11, + SXE2_IB_RATE_56_GBPS = 12, + SXE2_IB_RATE_112_GBPS = 13, + SXE2_IB_RATE_168_GBPS = 14, + SXE2_IB_RATE_25_GBPS = 15, + SXE2_IB_RATE_100_GBPS = 16, + SXE2_IB_RATE_200_GBPS = 17, + SXE2_IB_RATE_300_GBPS = 18, + SXE2_IB_RATE_28_GBPS = 19, + SXE2_IB_RATE_50_GBPS = 20, + SXE2_IB_RATE_400_GBPS = 21, + SXE2_IB_RATE_600_GBPS = 22, +}; + +enum sxe2_link_get_speed { + SXE2_LINK_SPEED_UNKNOWN = 0, + SXE2_LINK_SPEED_10G = 10000, + SXE2_LINK_SPEED_25G = 25000, + SXE2_LINK_SPEED_50G = 50000, + SXE2_LINK_SPEED_100G = 100000, + SXE2_LINK_SPEED_AUTO = 200000, +}; + +enum sxe2_qp_bucket_type { + SXE2_QP_BUCKET_4K = 0, + SXE2_QP_BUCKET_8K = 1, + SXE2_QP_BUCKET_16K = 2, + SXE2_QP_BUCKET_32K = 3, + SXE2_QP_BUCKET_64K = 4, + SXE2_QP_BUCKET_128K = 5, +}; + +enum sxe2_log_msg { + SXE2_LOG_MSG_8 = 8, + SXE2_LOG_MSG_12 = 12, + SXE2_LOG_MSG_31 = 31, +}; + +enum sxe2_rq_type { + SXE2_REGULAR_RQ = 0, + SXE2_SRQ = 1, + SXE2_NO_RQ = 3, +}; + +enum inline_data_size { + MAX_INLINE_DATA_8 = 8, + MAX_INLINE_DATA_39 = 39, + MAX_INLINE_DATA_101 = 101, +}; + +enum sge_size { + MAX_SGE_SIZE_0 = 0, + MAX_SGE_SIZE_1, + MAX_SGE_SIZE_2, + MAX_SGE_SIZE_3, + MAX_SGE_SIZE_4, + MAX_SGE_SIZE_5, + MAX_SGE_SIZE_6, + MAX_SGE_SIZE_7, + MAX_SGE_SIZE_8, + MAX_SGE_SIZE_9, + MAX_SGE_SIZE_10, + MAX_SGE_SIZE_11, + MAX_SGE_SIZE_12, + MAX_SGE_SIZE_13, + MAX_SGE_SIZE_14, +}; + +enum wqe_size_shift { + WQE_SIZE_32BYTE = 0, + WQE_SIZE_64BYTE, + WQE_SIZE_128BYTE, + WQE_SIZE_256BYTE, +}; + +enum rqe_size { + RQE_SIZE_32BYTE = 32, + RQE_SIZE_64BYTE = 64, + RQE_SIZE_128BYTE = 128, + RQE_SIZE_256BYTE = 256, +}; + +enum sxe2_ib_rate_tag { + SXE2_IB_RATE_TAG_0 = 0, + SXE2_IB_RATE_TAG_1 = 1, + SXE2_IB_RATE_TAG_2 = 2, + SXE2_IB_RATE_TAG_3 = 3, + SXE2_IB_RATE_TAG_4 = 4, + SXE2_IB_RATE_TAG_5 = 5, + SXE2_IB_RATE_TAG_6 = 6, + SXE2_IB_RATE_TAG_7 = 7, + SXE2_IB_RATE_TAG_8 = 8, +}; + +struct sxe2_destroy_cc_qp_info { + u32 cc_qp_idx; +}; + +struct sxe2_qp_common_init_info { + struct sxe2_qp_quanta *sq; + struct sxe2_qp_quanta *rq; + struct sxe2_srq_drv *srq; + struct sxe2_common_attrs *common_attrs; + __u32 *qp_db_no_llwqe; + __u32 *doorbell_note; + struct sxe2_sq_common_wr_trk_info *sq_wrtrk_array; + __u64 *rq_wrid_array; + __u32 qpn; + __u32 qp_caps; + __u32 sq_size; + __u32 rq_size; + __u32 max_sq_sge_cnt; + __u32 max_rq_sge_cnt; + __u32 max_inline_data; + __u32 sq_depth; + __u32 rq_depth; + __u8 qp_type; + __u8 sq_shift; + __u8 rq_shift; + bool legacy_mode; +}; + +struct sxe2_qp_init_info { + struct sxe2_qp_common_init_info qp_common_init_info; + struct sxe2_rdma_ctx_pd *pd; + struct sxe2_rdma_ctx_vsi *vsi; + __u64 *host_ctx; + __u64 host_ctx_pa; + __u64 sq_pa; + __u64 rq_pa; + __u64 shadow_area_pa; +}; + +struct qp_disconn_work { + struct work_struct work; + struct sxe2_rdma_qp *qp; +}; + +int drv_rdma_debug_qp_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp); +void drv_rdma_debug_qp_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp); + +#ifdef CREATE_QP_VER_1 +struct ib_qp *sxe2_kcreate_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +#else +int sxe2_kcreate_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +#endif +int sxe2_kmodify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int sxe2_kquery_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int qp_attr_mask, struct ib_qp_init_attr *init_attr); +#ifdef DESTROY_QP_V1 +int sxe2_kdestroy_qp(struct ib_qp *ibqp); +#else +int sxe2_kdestroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); +#endif +int sxe2_ctx_qp_create(struct sxe2_rdma_ctx_qp *qp, struct sxe2_create_qp_info *info, + u64 scratch, bool post_sq); +int sxe2_ctx_qp_modify(struct sxe2_rdma_ctx_qp *qp, struct sxe2_modify_qp_info *info, + u64 scratch, bool post_sq); +int sxe2_ctx_qp_destroy(struct sxe2_rdma_ctx_qp *qp, struct sxe2_destroy_qp_info *info, + u64 scratch, bool post_sq); +int sxe2_ctx_qp_query(struct sxe2_rdma_ctx_qp *qp, struct sxe2_query_qp_info *info, + u64 scratch, bool post_sq); +int sxe2_mq_query_qp_cmd(struct sxe2_rdma_qp *qp); +int sxe2_mq_modify_qp_cmd(struct sxe2_rdma_qp *qp); + +void sxe2_cm_disconn(struct sxe2_rdma_qp *qp); + +void sxe2_ib_qp_event(struct sxe2_rdma_qp *qp, enum sxe2_qp_event_type event); + +int sxe2_kset_attr_from_fragcnt(struct sxe2_rdma_device *rdma_dev); + +void sxe2_ctx_qp_fill_cc_wqe(struct sxe2_rdma_ctx_qp *qp, + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe); +int sxe2_qp_info_init(struct ib_qp_init_attr *init_attr, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_dev *dev, struct sxe2_rdma_pd *pd, + u32 qpn, struct sxe2_qp_init_info *init_info); + +int sxe2_rdma_qp_init(struct ib_qp_init_attr *init_attr, struct ib_udata *udata, + struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_pd *pd, u32 qpn, + struct sxe2_qp_init_info *init_info, + struct sxe2_rdma_qp *qp); +int sxe2_qp_qset_init_and_resp(struct ib_udata *udata, + struct sxe2_rdma_device *rdma_dev, u32 qpn, + struct sxe2_rdma_ctx_qp *qp_ctx, + struct sxe2_rdma_qp *qp); + +#endif + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..7044eeca3c166c9a0a3fbf2db4d43011129446f7 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qp_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_qp_debugfs.h" + +enum qpc_e { + ACK_TIMEOUT = 0, + RETRY_MODE, + CREDIT_EN, + COMM_EST, + RQE_LWM, + RETRY_RESP_OP_SEL, + LOG_RTM, + QP_STATE, + DQPN, + QP_TYPE, + LLWQE_PAGE_INDEX, + LLWQE_MODE_ENABLE, + LOG_MSG_MAX, + PMTU, + QP_PAGE_OFFSET, + LOG_RQ_SIZE, + LOG_SQ_SIZE, + CQN_SND, + LOG_RQ_STRIDE, + LOG_PAGE_SIZE, + CQN_RCV, + MIN_RNR_NAK, + FL, + SRQN, + BUCKET_TYPE, + STAT_RATE, + RQ_TYPE, + IPV4, + PD, + LOG_RRA_MAX, + LOG_SRA_MAX, + SEND_START_PSN, + TMO_RETRY, + UDPRIV_CQENABLE, + RNR_RETRY, + RCV_START_PSN, + ACK_MODE, + PKEY_INDEX, + SQ_FLUSH_FLAG, + RQ_FLUSH_FLAG, + LOG_ACK_REQ_FREQ, + HOP_LIMIT, + Q_KEY, + SMAC_LOW, + SMAC_HIGH, + SRC_PORT_NUM, + RMAC_LOW, + RMAC_HIGH, + DEST_PORT_NUM, + QP_CTX, + SQ_TPH_VAL, + RQ_TPH_VAL, + SQ_TPH_EN, + RQ_TPH_EN, + XMIT_TPH_EN, + RCV_TPH_EN, + CNP_DSCP, + CNP_ECN, + FLOW_LABEL, + DSCP, + ECN, + DEST_IP3, + DEST_IP2, + DEST_IP1, + DEST_IP0, + LOCAL_IP3, + LOCAL_IP2, + LOCAL_IP1, + LOCAL_IP0, + DBR_ADDR, + PBL_POINTER, + TPID_IDX, + WQ_ACCESS_MODE, + USE_STATS, + STATS_INDEX, + INSERT_VLAN_TAG, + VLAN_TAG, + SRC_VSI, + DISPATCH_MIN_UNIT, + DISPATCH_MODE, +}; + +#ifdef SXE2_CFG_DEBUG +static char *g_qp_fields[] = { + [ACK_TIMEOUT] = "ack_timeout", + [RETRY_MODE] = "retry_mode", + [CREDIT_EN] = "credit_en", + [COMM_EST] = "comm_est", + [RQE_LWM] = "rqe_lwm", + [QP_STATE] = "qp_state", + [DQPN] = "dqpn", + [QP_TYPE] = "qp_type", + [LLWQE_PAGE_INDEX] = "llwqe_page_index", + [LLWQE_MODE_ENABLE] = "llwqe_mode_enable", + [RETRY_RESP_OP_SEL] = "retry_resp_op_sel", + [LOG_RTM] = "log_rtm", + [LOG_MSG_MAX] = "log_msg_max", + [PMTU] = "pmtu", + [QP_PAGE_OFFSET] = "page_offset", + [LOG_RQ_SIZE] = "log_rq_size", + [LOG_SQ_SIZE] = "log_sq_size", + [CQN_SND] = "cqn_snd", + [LOG_RQ_STRIDE] = "log_rq_stride", + [LOG_PAGE_SIZE] = "log_page_size", + [CQN_RCV] = "cqn_rcv", + [MIN_RNR_NAK] = "min_rnr_nak", + [FL] = "fl", + [SRQN] = "srqn", + [BUCKET_TYPE] = "bucket_type", + [STAT_RATE] = "stat_rate", + [RQ_TYPE] = "rq_type", + [IPV4] = "ipv4", + [PD] = "pd", + [LOG_RRA_MAX] = "log_rra_max", + [LOG_SRA_MAX] = "log_sra_max", + [SEND_START_PSN] = "send_start_psn", + [TMO_RETRY] = "tmo_retry", + [UDPRIV_CQENABLE] = "udpriv_cqenable", + [RNR_RETRY] = "rnr_retry", + [RCV_START_PSN] = "rcv_start_psn", + [ACK_MODE] = "ack_mode", + [PKEY_INDEX] = "pkey_index", + [SQ_FLUSH_FLAG] = "sq_flush_flag", + [RQ_FLUSH_FLAG] = "rq_flush_flag", + [LOG_ACK_REQ_FREQ] = "log_ack_req_freq", + [HOP_LIMIT] = "hop_limit", + [Q_KEY] = "q_key", + [SMAC_LOW] = "smac_low", + [SMAC_HIGH] = "smac_high", + [SRC_PORT_NUM] = "src_port_num", + [RMAC_LOW] = "rmac_low", + [RMAC_HIGH] = "rmac_high", + [DEST_PORT_NUM] = "dest_port_num", + [QP_CTX] = "qp_completion_context", + [SQ_TPH_VAL] = "sq_tph_value", + [RQ_TPH_VAL] = "rq_tph_value", + [SQ_TPH_EN] = "sq_tph_en", + [RQ_TPH_EN] = "rq_tph_en", + [XMIT_TPH_EN] = "xmit_tph_en", + [RCV_TPH_EN] = "rcv_tph_en", + [CNP_DSCP] = "cnp_dscp", + [CNP_ECN] = "cnp_ecn", + [FLOW_LABEL] = "flow_label", + [DSCP] = "dscp", + [ECN] = "ecn", + [DEST_IP3] = "dest_ip_addr_3", + [DEST_IP2] = "dest_ip_addr_2", + [DEST_IP1] = "dest_ip_addr_1", + [DEST_IP0] = "dest_ip_addr_0", + [LOCAL_IP3] = "local_ip_addr_3", + [LOCAL_IP2] = "local_ip_addr_2", + [LOCAL_IP1] = "local_ip_addr_1", + [DBR_ADDR] = "dbr_addr", + [PBL_POINTER] = "pbl_pointer", + [TPID_IDX] = "tpid_index", + [WQ_ACCESS_MODE] = "wq_access_mode", + [USE_STATS] = "use_statistics_instance", + [STATS_INDEX] = "statistics_instance_index", + [INSERT_VLAN_TAG] = "insert_vlan_tag", + [VLAN_TAG] = "vlan_tag", + [SRC_VSI] = "src_vsi", + [DISPATCH_MIN_UNIT] = "dispatch_min_unit", + [DISPATCH_MODE] = "dispatch_mode", +}; +#endif + +void drv_rdma_qp_read_field_seg0(char *buf, size_t *len, struct sxe2_qpc *qpc) +{ + *len += dbg_vsnprintf(buf, *len, "SEG0(SW_CFG)\n"); + *len += dbg_vsnprintf(buf, *len, + "ack_timeout: %llu\n", + qpc->ack_timeout); + *len += dbg_vsnprintf(buf, *len, + "retry_mode: %llu\n", + qpc->retry_mode); + *len += dbg_vsnprintf(buf, *len, + "credit_en: %llu\n", + qpc->credit_en); + *len += dbg_vsnprintf(buf, *len, + "comm_est: %llu\n", + qpc->comm_est); + *len += dbg_vsnprintf(buf, *len, + "rqe_lwm: %llu\n", + qpc->rqe_lwm); + *len += dbg_vsnprintf(buf, *len, + "qp_state: %llu\n", + qpc->qp_state); + *len += dbg_vsnprintf(buf, *len, "dqpn: %llu\n", + qpc->dqpn); + *len += dbg_vsnprintf(buf, *len, + "qp_type: %llu\n", + qpc->qp_type); + *len += dbg_vsnprintf(buf, *len, + "llwqe_page_index: %llu\n", + qpc->llwqe_page_index); + *len += dbg_vsnprintf(buf, *len, + "llwqe_mode_enable: %llu\n", + qpc->llwqe_mode_enable); + *len += dbg_vsnprintf(buf, *len, + "retry_resp_op_sel: %llu\n", + qpc->retry_resp_op_sel); + *len += dbg_vsnprintf(buf, *len, + "log_rtm: %llu\n", + qpc->log_rtm); + *len += dbg_vsnprintf(buf, *len, + "log_msg_max: %llu\n", + qpc->log_msg_max); + *len += dbg_vsnprintf(buf, *len, "pmtu: %llu\n", + qpc->pmtu); + *len += dbg_vsnprintf(buf, *len, + "page_offset: %llu\n", + qpc->page_offset); + *len += dbg_vsnprintf(buf, *len, + "log_rq_size: %llu\n", + qpc->log_rq_size); + *len += dbg_vsnprintf(buf, *len, + "log_sq_size: %llu\n", + qpc->log_sq_size); + *len += dbg_vsnprintf(buf, *len, + "cqn_snd: %llu\n", + qpc->cqn_snd); + *len += dbg_vsnprintf(buf, *len, + "log_rq_stride: %llu\n", + qpc->log_rq_stride); + *len += dbg_vsnprintf(buf, *len, + "log_page_size: %llu\n", + qpc->log_page_size); + *len += dbg_vsnprintf(buf, *len, + "cqn_rcv: %llu\n", + qpc->cqn_rcv); + *len += dbg_vsnprintf(buf, *len, + "min_rnr_nak: %llu\n", + qpc->min_rnr_nak); + *len += dbg_vsnprintf(buf, *len, "fl: %llu\n", + qpc->fl); + *len += dbg_vsnprintf(buf, *len, "srqn: %llu\n", + qpc->srqn); + *len += dbg_vsnprintf(buf, *len, + "bucket_type: %llu\n", + qpc->bucket_type); + *len += dbg_vsnprintf(buf, *len, + "stat_rate: %llu\n", + qpc->stat_rate); + *len += dbg_vsnprintf(buf, *len, + "rq_type: %llu\n", + qpc->rq_type); + *len += dbg_vsnprintf(buf, *len, "ipv4: %llu\n", + qpc->ipv4); + *len += dbg_vsnprintf(buf, *len, "pd: %llu\n", + qpc->pd); + *len += dbg_vsnprintf(buf, *len, + "log_rra_max: %llu\n", + qpc->log_rra_max); + *len += dbg_vsnprintf(buf, *len, + "log_sra_max: %llu\n", + qpc->log_sra_max); + *len += dbg_vsnprintf(buf, *len, + "send_start_psn: %llu\n", + qpc->send_start_psn); + *len += dbg_vsnprintf(buf, *len, + "tmo_retry: %llu\n", + qpc->tmo_retry); + *len += dbg_vsnprintf(buf, *len, + "udpriv_cqenable: %llu\n", + qpc->udpriv_cqenable); + *len += dbg_vsnprintf(buf, *len, + "rnr_retry: %llu\n", + qpc->rnr_retry); + *len += dbg_vsnprintf(buf, *len, + "rcv_start_psn: %llu\n", + qpc->rcv_start_psn); + *len += dbg_vsnprintf(buf, *len, + "ack_mode: %llu\n", + qpc->ack_mode); + *len += dbg_vsnprintf(buf, *len, + "pkey_index: %llu\n", + qpc->pkey_index); + *len += dbg_vsnprintf(buf, *len, + "rq_flush_flag: %llu\n", + qpc->rq_flush_flag); + *len += dbg_vsnprintf(buf, *len, + "sq_flush_flag: %llu\n", + qpc->sq_flush_flag); + *len += dbg_vsnprintf(buf, *len, + "log_ack_req_freq: %llu\n", + qpc->log_ack_req_freq); + *len += dbg_vsnprintf(buf, *len, + "hop_limit: %llu\n", + qpc->hop_limit); + *len += dbg_vsnprintf(buf, *len, + "q_key: %llu\n", + qpc->q_key); + *len += dbg_vsnprintf(buf, *len, + "smac_low: %llu\n", + qpc->smac_low); + *len += dbg_vsnprintf(buf, *len, + "smac_high: %llu\n", + qpc->smac_high); + *len += dbg_vsnprintf(buf, *len, + "src_port_num: %llu\n", + qpc->src_port_num); + *len += dbg_vsnprintf(buf, *len, + "rmac_low: %llu\n", + qpc->rmac_low); + *len += dbg_vsnprintf(buf, *len, + "rmac_high: %llu\n", + qpc->rmac_high); + *len += dbg_vsnprintf(buf, *len, + "dest_port_num: %llu\n", + qpc->dest_port_num); +} + +void drv_rdma_qp_read_field_seg1(char *buf, size_t *len, struct sxe2_qpc *qpc) +{ + *len += dbg_vsnprintf(buf, *len, "SEG1(SW_CFG)\n"); + *len += dbg_vsnprintf( + buf, *len, + "qp_completion_context: %llu\n", + qpc->qp_completion_contex); + *len += dbg_vsnprintf(buf, *len, + "sq_tph_value: %llu\n", + qpc->sq_tph_value); + *len += dbg_vsnprintf(buf, *len, + "rq_tph_value: %llu\n", + qpc->rq_tph_value); + *len += dbg_vsnprintf(buf, *len, + "sq_tph_en: %llu\n", + qpc->sq_tph_en); + *len += dbg_vsnprintf(buf, *len, + "rq_tph_en: %llu\n", + qpc->rq_tph_en); + *len += dbg_vsnprintf(buf, *len, + "xmit_tph_en: %llu\n", + qpc->xmit_tph_en); + *len += dbg_vsnprintf(buf, *len, + "rcv_tph_en: %llu\n", + qpc->rcv_tph_en); + *len += dbg_vsnprintf(buf, *len, + "cnp_dscp: %llu\n", + qpc->cnp_dscp); + *len += dbg_vsnprintf(buf, *len, + "cnp_ecn: %llu\n", + qpc->cnp_ecn); + *len += dbg_vsnprintf(buf, *len, + "flow_label: %llu\n", + qpc->flow_label); + *len += dbg_vsnprintf(buf, *len, "dscp: %llu\n", + qpc->dscp); + *len += dbg_vsnprintf(buf, *len, "ecn: %llu\n", + qpc->ecn); + *len += dbg_vsnprintf(buf, *len, + "dest_ip_addr_3: %llu\n", + qpc->dest_ipaddr3); + *len += dbg_vsnprintf(buf, *len, + "dest_ip_addr_2: %llu\n", + qpc->dest_ipaddr2); + *len += dbg_vsnprintf(buf, *len, + "dest_ip_addr_1: %llu\n", + qpc->dest_ipaddr1); + *len += dbg_vsnprintf(buf, *len, + "dest_ip_addr_0: %llu\n", + qpc->dest_ipaddr0); + *len += dbg_vsnprintf(buf, *len, + "local_ip_addr_3: %llu\n", + qpc->local_ipaddr_3); + *len += dbg_vsnprintf(buf, *len, + "local_ip_addr_2: %llu\n", + qpc->local_ipaddr_2); + *len += dbg_vsnprintf(buf, *len, + "local_ip_addr_1: %llu\n", + qpc->local_ipaddr_1); + *len += dbg_vsnprintf(buf, *len, + "local_ip_addr_0: %llu\n", + qpc->local_ipaddr_0); + *len += dbg_vsnprintf(buf, *len, + "dbr_addr: %llu\n", + qpc->dbr_addr); + *len += dbg_vsnprintf(buf, *len, + "pbl_pointer: %llu\n", + qpc->pbl_pointer); + *len += dbg_vsnprintf(buf, *len, + "tpid_index: %llu\n", + qpc->tpid_index); + *len += dbg_vsnprintf(buf, *len, + "wq_access_mode: %llu\n", + qpc->wq_acces_mode); + *len += dbg_vsnprintf( + buf, *len, + "use_statistics_instance: %llu\n", + qpc->use_statistics_intance); + *len += dbg_vsnprintf( + buf, *len, + "statistics_instance_index: %llu\n", + qpc->statistics_instance_index); + *len += dbg_vsnprintf(buf, *len, + "insert_vlan_tag: %llu\n", + qpc->insert_vlan_tag); + *len += dbg_vsnprintf(buf, *len, + "vlan_tag: %llu\n", + qpc->vlan_tag); + *len += dbg_vsnprintf(buf, *len, + "src_vsi: %llu\n", + qpc->src_vsi); + *len += dbg_vsnprintf(buf, *len, + "dispatch_min_unit: %llu\n", + qpc->dispatch_min_unit); + *len += dbg_vsnprintf(buf, *len, + "dispatch_mode: %llu\n", + qpc->dispatch_mode); +} + +void drv_rdma_qp_read_field_seg2(char *buf, size_t *len, struct sxe2_qpc *qpc) +{ + *len += dbg_vsnprintf(buf, *len, "SEG2(WQE_PROC)\n"); + *len += dbg_vsnprintf(buf, *len, + "hw_sq_wqebb_counter: %llu\n", + qpc->hw_sq_wqebb_counter); + *len += dbg_vsnprintf(buf, *len, + "txw_sq_retry_end_ptr: %llu\n", + qpc->txw_sq_retry_end_ptr); + *len += dbg_vsnprintf(buf, *len, + "txw_sq_retry_end_psn: %llu\n", + qpc->txw_sq_retry_end_psn); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa1_low: %llu\n", + qpc->wqe_page_pa1_low); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa1_high: %llu\n", + qpc->wqe_page_pa1_high); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa2_low: %llu\n", + qpc->wqe_page_pa2_low); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa2_high: %llu\n", + qpc->wqe_page_pa2_high); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa_vld: %llu\n", + qpc->wqe_page_pa_vld); + *len += dbg_vsnprintf(buf, *len, + "wqe_page_pa_sel: %llu\n", + qpc->wqe_page_pa_sel); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_retry_flag: %llu\n", + qpc->txw_resp_retry_flag); + *len += dbg_vsnprintf(buf, *len, + "wqe_send_bk_vld: %llu\n", + qpc->wqe_send_bk_vld); + *len += dbg_vsnprintf(buf, *len, + "wqe_send_sge_addr: %llu\n", + qpc->wqe_send_sge_addr); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_occupty: %llu\n", + qpc->txw_resp_occupty); + *len += dbg_vsnprintf(buf, *len, + "wqe_polarity: %llu\n", + qpc->wqe_polarity); + *len += dbg_vsnprintf(buf, *len, + "wqe_rx_sqretry: %llu\n", + qpc->wqe_rx_sqretry); + *len += dbg_vsnprintf(buf, *len, + "txw_sq_occupty: %llu\n", + qpc->txw_sq_occupty); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_break_all: %llu\n", + qpc->txw_resp_break_all); + *len += dbg_vsnprintf(buf, *len, + "txw_ll_type: %llu\n", + qpc->txw_ll_type); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_go_back_n: %llu\n", + qpc->txw_resp_go_back_n); + *len += dbg_vsnprintf(buf, *len, + "wqe_rx_ack_retry: %llu\n", + qpc->wqe_rx_ack_retry); + *len += dbg_vsnprintf(buf, *len, + "wqe_tx_eng_retry: %llu\n", + qpc->wqe_tx_eng_retry); + *len += dbg_vsnprintf(buf, *len, + "wqe_resp_bk_valid: %llu\n", + qpc->wqe_resp_bk_valid); + *len += dbg_vsnprintf(buf, *len, + "txw_sq_retry_flag: %llu\n", + qpc->txw_sq_retry_flag); + *len += dbg_vsnprintf( + buf, *len, + "txw_nxt_need_irrl_num: %llu\n", + qpc->txw_nxt_need_irrl_num); + *len += dbg_vsnprintf(buf, *len, + "txw_ll_wqe_drop: %llu\n", + qpc->txw_ll_wqe_drop); + *len += dbg_vsnprintf(buf, *len, + "txw_sq_retry_type: %llu\n", + qpc->txw_sq_retry_type); + *len += dbg_vsnprintf( + buf, *len, + "txw_sq_retry_start_psn_low: %llu\n", + qpc->txw_sq_retry_start_psn_low); + *len += dbg_vsnprintf( + buf, *len, + "txw_sq_retry_start_psn: %llu\n", + qpc->txw_sq_retry_start_psn); + *len += dbg_vsnprintf( + buf, *len, + "txw_resp_retry_finish_flag: %llu\n", + qpc->txw_resp_retry_finish_flag); + *len += dbg_vsnprintf(buf, *len, + "txw_send_try_flag: %llu\n", + qpc->txw_send_try_flag); + *len += dbg_vsnprintf(buf, *len, + "wqe_proc_qp_state: %llu\n", + qpc->wqe_proc_qp_state); + *len += dbg_vsnprintf( + buf, *len, + "stat_rate_already_init: %llu\n", + qpc->stat_rate_already_init); + *len += dbg_vsnprintf( + buf, *len, + "last_wqe_already_sent: %llu\n", + qpc->last_wqe_already_sent); + *len += dbg_vsnprintf(buf, *len, + "wqe_nak_retry_cnt: %llu\n", + qpc->wqe_nak_retry_cnt); + *len += dbg_vsnprintf(buf, *len, + "wqe_rnr_retry_cnt: %llu\n", + qpc->wqe_rnr_retry_cnt); + *len += dbg_vsnprintf(buf, *len, + "wqe_send_offest: %llu\n", + qpc->wqe_send_offest); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_retry_cnt: %llu\n", + qpc->txw_resp_retry_cnt); + *len += dbg_vsnprintf( + buf, *len, + "txw_resp_retry_end_ptr: %llu\n", + qpc->txw_resp_retry_end_ptr); + *len += dbg_vsnprintf( + buf, *len, + "txw_resp_retry_finish_back_ptr: %llu\n", + qpc->txw_resp_retry_finish_back_ptr); + *len += dbg_vsnprintf( + buf, *len, + "txw_resp_retry_exit_ptr: %llu\n", + qpc->txw_resp_retry_exit_ptr); + *len += dbg_vsnprintf(buf, *len, + "wqe_ssn_send: %llu\n", + qpc->wqe_ssn_send); + *len += dbg_vsnprintf( + buf, *len, + "wqe_ssn_nocredit_cnt_low: %llu\n", + qpc->wqe_ssn_nocredit_cnt_low); + *len += dbg_vsnprintf( + buf, *len, + "wqe_ssn_nocredit_cnt_high: %llu\n", + qpc->wqe_ssn_nocredit_cnt_high); + *len += dbg_vsnprintf( + buf, *len, + "wqe_next_send_psn_low: %llu\n", + qpc->wqe_next_send_psn_low); + *len += dbg_vsnprintf( + buf, *len, + "wqe_next_send_psn_high: %llu\n", + qpc->wqe_next_send_psn_high); + *len += dbg_vsnprintf(buf, *len, + "respcache_msn: %llu\n", + qpc->respcache_msn); + *len += dbg_vsnprintf(buf, *len, + "respcache_credit: %llu\n", + qpc->respcache_credit); + *len += dbg_vsnprintf(buf, *len, + "wqe_credit_check_en: %llu\n", + qpc->wqe_credit_check_en); + *len += dbg_vsnprintf( + buf, *len, + "wqe_outsdanding_check_en: %llu\n", + qpc->wqe_outsdanding_check_en); + *len += dbg_vsnprintf(buf, *len, + "sq_stop: %llu\n", + qpc->sq_stop); + *len += dbg_vsnprintf(buf, *len, + "respcache_psn: %llu\n", + qpc->respcache_psn); + *len += dbg_vsnprintf(buf, *len, + "respcache_r_va_low: %llu\n", + qpc->respcache_r_va_low); + *len += dbg_vsnprintf(buf, *len, + "respcache_r_va_high: %llu\n", + qpc->respcache_r_va_high); + *len += dbg_vsnprintf(buf, *len, + "respcache_r_key: %llu\n", + qpc->respcache_r_key); + *len += dbg_vsnprintf(buf, *len, + "respcache_dma_len: %llu\n", + qpc->respcache_dma_len); + *len += dbg_vsnprintf(buf, *len, + "mcrt_busy: %llu\n", + qpc->mcrt_busy); + *len += dbg_vsnprintf(buf, *len, + "txw_rtr2rts_flag: %llu\n", + qpc->txw_rtr2rts_flag); + *len += dbg_vsnprintf( + buf, *len, + "wqe_resp_first_rd_flag: %llu\n", + qpc->wqe_resp_first_rd_flag); + *len += dbg_vsnprintf(buf, *len, + "wqe_resp_rd_ptr_out: %llu\n", + qpc->wqe_resp_rd_ptr_out); + *len += dbg_vsnprintf(buf, *len, + "wqe_resp_rd_ptr_sel: %llu\n", + qpc->wqe_resp_rd_ptr_sel); + *len += dbg_vsnprintf(buf, *len, + "wqe_resp_rd_ptr_in: %llu\n", + qpc->wqe_resp_rd_ptr_in); + *len += dbg_vsnprintf(buf, *len, + "txw_wqe_irrl_wptr: %llu\n", + qpc->txw_wqe_irrl_wptr); + *len += dbg_vsnprintf(buf, *len, + "wqe_psn_check_en: %llu\n", + qpc->wqe_psn_check_en); + *len += dbg_vsnprintf(buf, *len, + "wqe_nop_check_en: %llu\n", + qpc->wqe_nop_check_en); + *len += dbg_vsnprintf( + buf, *len, + "wqe_read_fence_check_en: %llu\n", + qpc->wqe_read_fence_check_en); + *len += dbg_vsnprintf(buf, *len, + "resp_newest_psn: %llu\n", + qpc->resp_newest_psn); + *len += dbg_vsnprintf( + buf, *len, + "wqe_local_fence_check_en: %llu\n", + qpc->wqe_local_fence_check_en); + *len += dbg_vsnprintf(buf, *len, + "txw_chx_st: %llu\n", + qpc->txw_chx_st); + *len += dbg_vsnprintf(buf, *len, + "txw_resp_err: %llu\n", + qpc->txw_resp_err); + *len += dbg_vsnprintf(buf, *len, + "wqe_proc_cnp_flag: %llu\n", + qpc->wqe_proc_cnp_flag); + *len += dbg_vsnprintf(buf, *len, + "mcrt_wqe_idx: %llu\n", + qpc->mcrt_wqe_idx); + *len += dbg_vsnprintf(buf, *len, + "txw_rate_bucket_low: %llu\n", + qpc->txw_rate_bucket_low); + *len += dbg_vsnprintf(buf, *len, + "txw_nxt_wqe_psn: %llu\n", + qpc->txw_nxt_wqe_psn); + *len += dbg_vsnprintf(buf, *len, + "txw_rate_bucket_high: %llu\n", + qpc->txw_rate_bucket_high); + *len += dbg_vsnprintf(buf, *len, + "txw_nak_flag: %llu\n", + qpc->txw_nak_flag); + *len += dbg_vsnprintf(buf, *len, + "txw_token_num: %llu\n", + qpc->txw_token_num); + *len += dbg_vsnprintf(buf, *len, + "txw_token_num: %llu\n", + qpc->txw_sq_retry_end_psn); + *len += dbg_vsnprintf(buf, *len, + "txw_rate_timestamp: %llu\n", + qpc->txw_rate_timestamp); + *len += dbg_vsnprintf(buf, *len, + "txw_wqe_start_psn: %llu\n", + qpc->txw_wqe_start_psn); +} + +void drv_rdma_qp_read_field_seg3(char *buf, size_t *len, struct sxe2_qpc *qpc) +{ + *len += dbg_vsnprintf(buf, *len, "SEG3(TX_ENG)\n"); + *len += dbg_vsnprintf( + buf, *len, + "txeng_qpc_rtr2rts_flag: %llu\n", + qpc->txeng_qpc_rtr2rts_flag); + *len += dbg_vsnprintf(buf, *len, + "txeng_req_err_flag: %llu\n", + qpc->txeng_req_err_flag); + *len += dbg_vsnprintf(buf, *len, + "txeng_resp_err_flag: %llu\n", + qpc->txeng_resp_err_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_flush_err_retry_flag: %llu\n", + qpc->txeng_flush_err_retry_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_flush_err_retry_wqe_idx: %llu\n", + qpc->txeng_flush_err_retry_wqe_idx); + *len += dbg_vsnprintf(buf, *len, + "txeng_wqe_start_flag: %llu\n", + qpc->txeng_wqe_start_flag); + *len += dbg_vsnprintf(buf, *len, + "txeng_qp_state: %llu\n", + qpc->txeng_qp_state); + *len += dbg_vsnprintf(buf, *len, + "txeng_ok_req_ssn: %llu\n", + qpc->txeng_ok_req_ssn); + *len += dbg_vsnprintf(buf, *len, + "txeng_sq_err_syndrom: %llu\n", + qpc->txeng_sq_err_syndrom); + *len += dbg_vsnprintf( + buf, *len, + "txeng_tx2rx_retry_end_psn: %llu\n", + qpc->txeng_tx2rx_retry_end_psn); + *len += dbg_vsnprintf(buf, *len, + "txeng_resp_err_psn_l: %llu\n", + qpc->txeng_resp_err_psn_l); + *len += dbg_vsnprintf( + buf, *len, + "txeng_resp_wait_nak_flag: %llu\n", + qpc->txeng_resp_wait_nak_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_syn_sch2rxi_retry_phase_tag: %llu\n", + qpc->txeng_syn_sch2rxi_retry_phase_tag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_tx2rx_retry_flag: %llu\n", + qpc->txeng_sq_flush_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_send_req_next_ssn: %llu\n", + qpc->txeng_send_req_next_ssn); + *len += dbg_vsnprintf(buf, *len, + "txeng_token_num_l: %llu\n", + qpc->txeng_token_num_l); + *len += dbg_vsnprintf(buf, *len, + "txeng_npsn: %llu\n", + qpc->txeng_npsn); + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_wr_totptr: %llu\n", + qpc->txeng_ssnt_tx_wr_totptr); + + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_first_wr_flag: %llu\n", + qpc->txeng_irrl_tx_first_wr_flag); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_first_wr_sel: %llu\n", + qpc->txeng_irrl_tx_first_wr_sel); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_first_wr_inptr: %llu\n", + qpc->txeng_irrl_tx_first_wr_inptr); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_wr_sel: %llu\n", + qpc->txeng_irrl_tx_wr_sel); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_wr_inptr: %llu\n", + qpc->txeng_irrl_tx_wr_inptr); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_tx_wr_outptr: %llu\n", + qpc->txeng_irrl_tx_wr_outptr); + + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_first_wr_in_flag: %llu\n", + qpc->txeng_ssnt_tx_first_wr_in_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_first_wr_inptr: %llu\n", + qpc->txeng_ssnt_tx_first_wr_inptr); + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_rd2wr_flag: %llu\n", + qpc->txeng_ssnt_tx_rd2wr_flag); + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_wr_inptr: %llu\n", + qpc->txeng_ssnt_tx_wr_inptr); + *len += dbg_vsnprintf( + buf, *len, + "txeng_ssnt_tx_wr_outptr: %llu\n", + qpc->txeng_ssnt_tx_wr_outptr); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_0_31: %llu\n", + qpc->txeng_irrl_pa_0_31); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_32_63: %llu\n", + qpc->txeng_irrl_pa_32_63); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_64_95: %llu\n", + qpc->txeng_irrl_pa_64_95); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_96_127: %llu\n", + qpc->txeng_irrl_pa_96_127); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_96_127: %llu\n", + qpc->txeng_ssnt_pa_0_31); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_96_127: %llu\n", + qpc->txeng_ssnt_pa_32_63); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_96_127: %llu\n", + qpc->txeng_ssnt_pa_64_95); + *len += dbg_vsnprintf(buf, *len, + "txeng_irrl_pa_96_127: %llu\n", + qpc->txeng_ssnt_pa_96_127); + *len += dbg_vsnprintf(buf, *len, + "txeng_resp_err_psn_h: %llu\n", + qpc->txeng_resp_err_psn_h); + *len += dbg_vsnprintf( + buf, *len, + "txeng_newest_ack_req_psn: %llu\n", + qpc->txeng_newest_ack_req_psn); + *len += dbg_vsnprintf(buf, *len, + "txeng_token_num_m: %llu\n", + qpc->txeng_token_num_m); + *len += dbg_vsnprintf( + buf, *len, + "txeng_newest_unack_psn: %llu\n", + qpc->txeng_newest_unack_psn); + *len += dbg_vsnprintf(buf, *len, + "txeng_token_num_h: %llu\n", + qpc->txeng_token_num_h); + *len += dbg_vsnprintf( + buf, *len, + "txeng_oldest_unack_psn: %llu\n", + qpc->txeng_oldest_unack_psn); + *len += dbg_vsnprintf(buf, *len, + "txeng_tmo_log_rtm: %llu\n", + qpc->txeng_tmo_log_rtm); + *len += dbg_vsnprintf(buf, *len, + "txeng_tmo_stamp_h: %llu\n", + qpc->txeng_tmo_stamp_h); +} + +void drv_rdma_qp_read_field_seg4(char *buf, size_t *len, struct sxe2_qpc *qpc) +{ + *len += dbg_vsnprintf(buf, *len, "SEG4(RX_INI)\n"); + *len += dbg_vsnprintf(buf, *len, + "rxi_newest_unack_psn: %llu\n", + qpc->rxi_newest_unack_psn); + *len += dbg_vsnprintf( + buf, *len, + "rxi_qp_error_cqe_syndrome: %llu\n", + qpc->rxi_qp_error_cqe_syndrome); + *len += dbg_vsnprintf(buf, *len, + "rxi_qp_state: %llu\n", + qpc->rxi_qp_state); + *len += dbg_vsnprintf( + buf, *len, + "rxi_req_rnr_retry_flag: %llu\n", + qpc->rxi_req_rnr_retry_flag); + *len += dbg_vsnprintf(buf, *len, + "rxi_resp_rnr_nak_tmr: %llu\n", + qpc->rxi_resp_rnr_nak_tmr); + *len += dbg_vsnprintf(buf, *len, + "rxi_retring_flag: %llu\n", + qpc->rxi_retring_flag); + *len += dbg_vsnprintf(buf, *len, + "rxi_retry_phase_tag: %llu\n", + qpc->rxi_retry_phase_tag); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_wqe_sge_ce: %llu\n", + qpc->rxi_ssnt_wqe_sge_ce); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_in_wr_ptr_from_rx: %llu\n", + qpc->rxi_ssnt_in_wr_ptr_from_rx); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_out_rd_ptr: %llu\n", + qpc->rxi_ssnt_out_rd_ptr); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_ll_wqe_drop: %llu\n", + qpc->rxi_ssnt_ll_wqe_drop); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_wqe_data_length: %llu\n", + qpc->rxi_ssnt_wqe_data_length); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_wqe_start_psn: %llu\n", + qpc->rxi_ssnt_wqe_start_psn); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_rx_rd2wr_flag: %llu\n", + qpc->rxi_ssnt_rx_rd2wr_flag); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_rx_first_rd_flag: %llu\n", + qpc->rxi_ssnt_rx_first_rd_flag); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_info_flag: %llu\n", + qpc->rxi_ssnt_info_flag); + *len += dbg_vsnprintf(buf, *len, + "rxi_credit: %llu\n", + qpc->rxi_credit); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_unack_wqe_ssn: %llu\n", + qpc->rxi_ssnt_unack_wqe_ssn); + *len += dbg_vsnprintf( + buf, *len, + "rxi_ssnt_total_rd_ptr: %llu\n", + qpc->rxi_ssnt_total_rd_ptr); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_wqe_wrid: %llu\n", + qpc->rxi_ssnt_wqe_wrid); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_in_rd_ptr: %llu\n", + qpc->rxi_ssnt_in_rd_ptr); + *len += dbg_vsnprintf(buf, *len, + "rxi_ssnt_opcode: %llu\n", + qpc->rxi_ssnt_opcode); + *len += dbg_vsnprintf(buf, *len, + "rxi_resp_msn: %llu\n", + qpc->rxi_resp_msn); + *len += dbg_vsnprintf(buf, *len, + "rxi_irrl_wr_outptr: %llu\n", + qpc->rxi_irrl_wr_outptr); + *len += dbg_vsnprintf(buf, *len, + "rxi_ack_time_stamp: %llu\n", + qpc->rxi_ack_time_stamp); + *len += dbg_vsnprintf(buf, *len, + "rxi_ae_code: %llu\n", + qpc->rxi_ae_code); + *len += dbg_vsnprintf( + buf, *len, + "rxeng_dif_breack_point: %llu\n", + qpc->rxeng_dif_breack_point); + *len += dbg_vsnprintf(buf, *len, + "rx_dif_flag: %llu\n", + qpc->rx_dif_flag); + *len += dbg_vsnprintf(buf, *len, + "rxeng_dif_ref_tag: %llu\n", + qpc->rxeng_dif_ref_tag); + *len += dbg_vsnprintf(buf, *len, + "rxeng_dif_crc_seed: %llu\n", + qpc->rxeng_dif_crc_seed); + *len += dbg_vsnprintf( + buf, *len, + "rxeng_dif_check_speed: %llu\n", + qpc->rxeng_dif_check_speed); + *len += dbg_vsnprintf(buf, *len, + "sw_cc_enable: %llu\n", + qpc->sw_cc_enable); + *len += dbg_vsnprintf(buf, *len, + "sw_cc_index: %llu\n", + qpc->sw_cc_index); +} + +void drv_rdma_qp_read_field_seg5(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc) +{ + DRV_RDMA_LOG_DEV_DEBUG("qp:%#x qpc SEG5(RX_TGT)\n", qpn); + DRV_RDMA_LOG_DEV_DEBUG("rxt_qp_state: %u\n", + qpc->rxt_qp_state); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_pa_type: %u\n", + qpc->rxt_resp_pa_type); + DRV_RDMA_LOG_DEV_DEBUG("rxt_newest_credit: %u\n", + qpc->rxt_newest_credit); + DRV_RDMA_LOG_DEV_DEBUG("rx_cnp_flag: %u\n", + qpc->rx_cnp_flag); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_retry_wr_cnt: %u\n", + qpc->rxt_resp_retry_wr_cnt); + DRV_RDMA_LOG_DEV_DEBUG("rxt_ack_type: %u\n", + qpc->rxt_ack_type); + DRV_RDMA_LOG_DEV_DEBUG("rxt_nak_flag: %u\n", + qpc->rxt_nak_flag); + DRV_RDMA_LOG_DEV_DEBUG("rxt_ack_retry_flag: %u\n", + qpc->rxt_ack_retry_flag); + DRV_RDMA_LOG_DEV_DEBUG("rxt_ack_syndrome: %u\n", + qpc->rxt_ack_syndrome); + DRV_RDMA_LOG_DEV_DEBUG("rxt_ack_msn: %u\n", + qpc->rxt_ack_msn); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_paddr_96_127: %u\n", + qpc->rxt_resp_paddr_96_127); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_paddr_64_95: %u\n", + qpc->rxt_resp_paddr_64_95); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_paddr_32_63: %u\n", + qpc->rxt_resp_paddr_32_63); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_paddr_0_31: %u\n", + qpc->rxt_resp_paddr_0_31); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_rd_flag: %u\n", + qpc->rxt_resp_rd_flag); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_boundary_done: %u\n", + qpc->rxt_resp_boundary_done); + DRV_RDMA_LOG_DEV_DEBUG( + "rxt_resp_first_wr_ptr_in: %u\n", + qpc->rxt_resp_first_wr_ptr_in); + DRV_RDMA_LOG_DEV_DEBUG( + "rxt_resp_first_wr_ptr_sel: %u\n", + qpc->rxt_resp_first_wr_ptr_sel); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_first_wr_flag: %u\n", + qpc->rxt_resp_first_wr_flag); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_wr_ptr_in: %u\n", + qpc->rxt_resp_wr_ptr_in); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_wr_ptr_out: %u\n", + qpc->rxt_resp_wr_ptr_out); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_wr_ptr_sel: %u\n", + qpc->rxt_resp_wr_ptr_sel); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_pa_vld: %u\n", + qpc->rxt_resp_pa_vld); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_newest_psn: %u\n", + qpc->rxt_resp_newest_psn); + DRV_RDMA_LOG_DEV_DEBUG( + "rxt_resp_retry_start_ptr: %u\n", + qpc->rxt_resp_retry_start_ptr); + DRV_RDMA_LOG_DEV_DEBUG("rxt_rmsm: %u\n", + qpc->rxt_rmsm); + DRV_RDMA_LOG_DEV_DEBUG("rxt_resp_retry_end_ptr: %u\n", + qpc->rxt_resp_retry_end_ptr); + DRV_RDMA_LOG_DEV_DEBUG("rx_cnp_disp_timing: %u\n", + qpc->rx_cnp_disp_timing); +} + +void drv_rdma_qp_read_field_seg6(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc) +{ + DRV_RDMA_LOG_DEV_DEBUG("qp:%#x qpc SEG6(RX_PKT_PROC)\n", qpn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_rd_ptr_in: %u\n", + qpc->rpp_ini_irrl_rd_ptr_in); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_rd_ptr_out: %u\n", + qpc->rpp_ini_irrl_rd_ptr_out); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_rd_ptr_sel: %u\n", + qpc->rpp_ini_irrl_rd_ptr_sel); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_first_rd_flag: %u\n", + qpc->rpp_ini_irrl_first_rd_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_delete_en: %u\n", + qpc->rpp_ini_irrl_delete_en); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_delete_done: %u\n", + qpc->rpp_ini_irrl_delete_done); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_sge_va_low: %u\n", + qpc->rpp_ini_irrl_sge_va_low); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_sge_va_high: %u\n", + qpc->rpp_ini_irrl_sge_va_high); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_sge_lkey: %u\n", + qpc->rpp_ini_irrl_sge_lkey); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_sge_len: %u\n", + qpc->rpp_ini_irrl_sge_len); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_irrl_sge_totallen: %u\n", + qpc->rpp_ini_irrl_sge_totallen); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_sge_spsn: %u\n", + qpc->rpp_ini_irrl_sge_spsn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_sge_ssn: %u\n", + qpc->rpp_ini_irrl_sge_ssn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_rcv_messlen: %u\n", + qpc->rpp_ini_rcv_messlen); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_unack_psn: %u\n", + qpc->rpp_ini_unack_psn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_irrl_info_flag: %u\n", + qpc->rpp_ini_irrl_info_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_qpsts_err_flag: %u\n", + qpc->rpp_ini_qpsts_err_flag); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_read_retry_flag: %u\n", + qpc->rpp_ini_read_retry_flag); + DRV_RDMA_LOG_DEV_DEBUG( + "rpp_ini_write_retry_flag: %u\n", + qpc->rpp_ini_write_retry_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_cur_ssn: %u\n", + qpc->rpp_ini_cur_ssn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_va_low: %u\n", + qpc->rpp_tgt_sge_va_low); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_va_high: %u\n", + qpc->rpp_tgt_sge_va_high); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_key: %u\n", + qpc->rpp_tgt_sge_key); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_len: %u\n", + qpc->rpp_tgt_sge_len); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_len: %u\n", + qpc->rpp_wqe_err); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_read_ots_ptr: %u\n", + qpc->rpp_tgt_read_ots_ptr); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_rnr_flag: %u\n", + qpc->rpp_tgt_rnr_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_nak_flag: %u\n", + qpc->rpp_tgt_nak_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_nak_flag: %u\n", + qpc->rpp_tgt_rtr_aeq_flag); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sge_num: %u\n", + qpc->rpp_tgt_sge_num); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_sel: %u\n", + qpc->rpp_tgt_page_pa_sel); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_vld: %u\n", + qpc->rpp_tgt_page_pa_vld); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_1_low: %u\n", + qpc->rpp_tgt_page_pa_1_low); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_1_high: %u\n", + qpc->rpp_tgt_page_pa_1_high); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_rq_ud_len_err: %u\n", + qpc->rpp_tgt_rq_ud_len_err); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_l: %u\n", + qpc->rpp_tgt_page_pa_l); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_page_pa_h: %u\n", + qpc->rpp_tgt_page_pa_h); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_qp_error: %u\n", + qpc->rpp_tgt_qp_error); + DRV_RDMA_LOG_DEV_DEBUG("rpp_wqe_vld: %u\n", + qpc->rpp_wqe_vld); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_bth_opcode: %u\n", + qpc->rpp_tgt_bth_opcode); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_epsn: %u\n", + qpc->rpp_tgt_epsn); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_rcv_messlen: %u\n", + qpc->rpp_tgt_rcv_messlen); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_sw_counter: %u\n", + qpc->rpp_tgt_sw_counter); + DRV_RDMA_LOG_DEV_DEBUG("rpp_tgt_hw_counter: %u\n", + qpc->rpp_tgt_hw_counter); + DRV_RDMA_LOG_DEV_DEBUG("rpp_ini_pkt_psn: %u\n", + qpc->rpp_ini_pkt_psn); +} + +void drv_rdma_qp_read_field_seg7(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc) +{ + DRV_RDMA_LOG_DEV_DEBUG("qp:%#x qpc SEG7(HW_ID)\n", qpn); + DRV_RDMA_LOG_DEV_DEBUG("pf_id: %u\n", qpc->pf_id); + DRV_RDMA_LOG_DEV_DEBUG("vf_relative_id: %u\n", + qpc->vf_relative_id); + DRV_RDMA_LOG_DEV_DEBUG("vf_id: %u\n", qpc->vf_id); + DRV_RDMA_LOG_DEV_DEBUG("vf_valid: %u\n", + qpc->vf_valid); + DRV_RDMA_LOG_DEV_DEBUG("sqpn_8_17: %u\n", + qpc->sqpn_8_17); + DRV_RDMA_LOG_DEV_DEBUG("hw_cqn_snd: %u\n", + qpc->hw_cqn_snd); + DRV_RDMA_LOG_DEV_DEBUG("sqpn_0_7: %u\n", + qpc->sqpn_0_7); + DRV_RDMA_LOG_DEV_DEBUG("hw_cqn_rcv: %u\n", + qpc->hw_cqn_rcv); + DRV_RDMA_LOG_DEV_DEBUG("hw_srqn: %u\n", + qpc->hw_srqn); + DRV_RDMA_LOG_DEV_DEBUG("sw_cc_enable: %u\n", + qpc->sw_cc_enable); + DRV_RDMA_LOG_DEV_DEBUG("sw_cc_index: %u\n", + qpc->sw_cc_index); +} + +u64 drv_rdma_qp_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf) +{ + struct sxe2_rdma_qp *qp; + struct sxe2_rdma_ctx_qp *qp_ctx; + int qpn; + int ret = 0; + struct sxe2_qpc *qpc; + size_t len = 0; + + qp = (struct sxe2_rdma_qp *)data; + qpn = qp->ibqp.qp_num; + qp_ctx = &qp->qp_ctx; + + ret = sxe2_mq_query_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("sxe2_mq_query_qp_cmd failed , ret(%d)\n", + ret); + goto end; + } + + qpc = (struct sxe2_qpc *)qp->host_ctx.va; + + len += dbg_vsnprintf(buf, len, "qp:%#x context:\n\n", qpn); + drv_rdma_qp_read_field_seg0(buf, &len, qpc); + + drv_rdma_qp_read_field_seg1(buf, &len, qpc); + + drv_rdma_qp_read_field_seg2(buf, &len, qpc); + + drv_rdma_qp_read_field_seg3(buf, &len, qpc); + + drv_rdma_qp_read_field_seg4(buf, &len, qpc); + + drv_rdma_qp_read_field_seg5(rdma_dev, qpn, qpc); + + drv_rdma_qp_read_field_seg6(rdma_dev, qpn, qpc); + + drv_rdma_qp_read_field_seg7(rdma_dev, qpn, qpc); + +end: + return len; +} +static int drv_rdma_qp_write_field(struct sxe2_rdma_device *rdma_dev, + void *data, enum drv_rdma_dbg_rsc_type type, + char *buf) +{ +#ifdef SXE2_CFG_DEBUG + u32 i; + int ret; + u64 temp_value; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_qp *qp; + struct sxe2_qpc *qpc; + + qp = (struct sxe2_rdma_qp *)data; + qpc = (struct sxe2_qpc *)qp->host_ctx.va; + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(g_qp_fields); i++) { + if (!strncmp(argv[0], g_qp_fields[i], strlen(g_qp_fields[i])) && + (strlen(g_qp_fields[i]) == strlen(argv[0]))) { + break; + } + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%d)\n", ret); + goto end; + } + + DRV_RDMA_LOG_DEV_INFO("query qp i:%d, temp_value:%llx\n", i, + temp_value); + + switch (i) { + case ACK_TIMEOUT: + qpc->ack_timeout = temp_value; + break; + case RETRY_MODE: + qpc->retry_mode = temp_value; + break; + case CREDIT_EN: + qpc->credit_en = temp_value; + break; + case COMM_EST: + qpc->comm_est = temp_value; + break; + case RQE_LWM: + qpc->rqe_lwm = temp_value; + break; + case QP_STATE: + qpc->qp_state = temp_value; + break; + case DQPN: + qpc->dqpn = temp_value; + break; + case QP_TYPE: + qpc->qp_type = temp_value; + break; + case LLWQE_PAGE_INDEX: + qpc->llwqe_page_index = temp_value; + break; + case LLWQE_MODE_ENABLE: + qpc->llwqe_mode_enable = temp_value; + break; + case RETRY_RESP_OP_SEL: + qpc->retry_resp_op_sel = temp_value; + break; + case LOG_RTM: + qpc->log_rtm = temp_value; + break; + case LOG_MSG_MAX: + qpc->log_msg_max = temp_value; + break; + case PMTU: + qpc->pmtu = temp_value; + break; + case QP_PAGE_OFFSET: + qpc->page_offset = temp_value; + break; + case LOG_RQ_SIZE: + qpc->log_rq_size = temp_value; + break; + case LOG_SQ_SIZE: + qpc->log_sq_size = temp_value; + break; + case CQN_SND: + qpc->cqn_snd = temp_value; + break; + case LOG_RQ_STRIDE: + qpc->log_rq_stride = temp_value; + break; + case LOG_PAGE_SIZE: + qpc->log_page_size = temp_value; + break; + case CQN_RCV: + qpc->cqn_rcv = temp_value; + break; + case MIN_RNR_NAK: + qpc->min_rnr_nak = temp_value; + break; + case FL: + qpc->fl = temp_value; + break; + case SRQN: + qpc->srqn = temp_value; + break; + case BUCKET_TYPE: + qpc->bucket_type = temp_value; + break; + case STAT_RATE: + qpc->stat_rate = temp_value; + break; + case RQ_TYPE: + qpc->rq_type = temp_value; + break; + case IPV4: + qpc->ipv4 = temp_value; + break; + case PD: + qpc->pd = temp_value; + break; + case LOG_RRA_MAX: + qpc->log_rra_max = temp_value; + break; + case LOG_SRA_MAX: + qpc->log_sra_max = temp_value; + break; + case SEND_START_PSN: + qpc->send_start_psn = temp_value; + break; + case TMO_RETRY: + qpc->tmo_retry = temp_value; + break; + case UDPRIV_CQENABLE: + qpc->udpriv_cqenable = temp_value; + break; + case RNR_RETRY: + qpc->rnr_retry = temp_value; + break; + case RCV_START_PSN: + qpc->rcv_start_psn = temp_value; + break; + case ACK_MODE: + qpc->ack_mode = temp_value; + break; + case PKEY_INDEX: + qpc->pkey_index = temp_value; + break; + case SQ_FLUSH_FLAG: + qpc->sq_flush_flag = temp_value; + break; + case RQ_FLUSH_FLAG: + qpc->rq_flush_flag = temp_value; + break; + case LOG_ACK_REQ_FREQ: + qpc->log_ack_req_freq = temp_value; + break; + case HOP_LIMIT: + qpc->hop_limit = temp_value; + break; + case Q_KEY: + qpc->q_key = temp_value; + break; + case SMAC_LOW: + qpc->smac_low = temp_value; + break; + case SMAC_HIGH: + qpc->smac_high = temp_value; + break; + case SRC_PORT_NUM: + qpc->src_port_num = temp_value; + break; + case RMAC_LOW: + qpc->rmac_low = temp_value; + break; + case RMAC_HIGH: + qpc->rmac_high = temp_value; + break; + case DEST_PORT_NUM: + qpc->dest_port_num = temp_value; + break; + case QP_CTX: + qpc->qp_completion_contex = temp_value; + break; + case SQ_TPH_VAL: + qpc->sq_tph_value = temp_value; + break; + case RQ_TPH_VAL: + qpc->rq_tph_value = temp_value; + break; + case SQ_TPH_EN: + qpc->sq_tph_en = temp_value; + break; + case RQ_TPH_EN: + qpc->rq_tph_en = temp_value; + break; + case XMIT_TPH_EN: + qpc->xmit_tph_en = temp_value; + break; + case RCV_TPH_EN: + qpc->rcv_tph_en = temp_value; + break; + case CNP_DSCP: + qpc->cnp_dscp = temp_value; + break; + case CNP_ECN: + qpc->cnp_ecn = temp_value; + break; + case FLOW_LABEL: + qpc->flow_label = temp_value; + break; + case DSCP: + qpc->dscp = temp_value; + break; + case ECN: + qpc->ecn = temp_value; + break; + case DEST_IP3: + qpc->dest_ipaddr3 = temp_value; + break; + case DEST_IP2: + qpc->dest_ipaddr2 = temp_value; + break; + case DEST_IP1: + qpc->dest_ipaddr1 = temp_value; + break; + case DEST_IP0: + qpc->dest_ipaddr0 = temp_value; + break; + case LOCAL_IP3: + qpc->local_ipaddr_3 = temp_value; + break; + case LOCAL_IP2: + qpc->local_ipaddr_2 = temp_value; + break; + case LOCAL_IP1: + qpc->local_ipaddr_1 = temp_value; + break; + case LOCAL_IP0: + qpc->local_ipaddr_0 = temp_value; + break; + case DBR_ADDR: + qpc->dbr_addr = temp_value; + break; + case PBL_POINTER: + qpc->pbl_pointer = temp_value; + break; + case TPID_IDX: + qpc->tpid_index = temp_value; + break; + case WQ_ACCESS_MODE: + qpc->wq_acces_mode = temp_value; + break; + case USE_STATS: + qpc->use_statistics_intance = temp_value; + break; + case STATS_INDEX: + qpc->statistics_instance_index = temp_value; + break; + case INSERT_VLAN_TAG: + qpc->insert_vlan_tag = temp_value; + break; + case VLAN_TAG: + qpc->vlan_tag = temp_value; + break; + case SRC_VSI: + qpc->src_vsi = temp_value; + break; + case DISPATCH_MIN_UNIT: + qpc->dispatch_min_unit = temp_value; + break; + case DISPATCH_MODE: + qpc->dispatch_mode = temp_value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_DEV_WARN("invalid index %d, ret %d\n", i, ret); + goto end; + } + + ret = sxe2_mq_modify_qp_cmd(qp); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("modify qp ctx:%s failed, ret (%d)\n", + argv[0], ret); + } +end: + return ret; +#else + return 0; +#endif +} +int drv_rdma_debug_qp_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!rdma_dev->hdl->qp_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qp debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + qp->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_QP, + rdma_dev->hdl->qp_debugfs, + drv_rdma_qp_read_field, + drv_rdma_qp_write_field, + (int)qp->ibqp.qp_num, qp); + if (!qp->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} +void drv_rdma_debug_qp_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR("root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->qp_debugfs) { + DRV_RDMA_LOG_DEV_ERR("qp debugfs dir not exist\n"); + goto end; + } + + if (qp->dbg_node) { + drv_rdma_rm_res_tree(qp->dbg_node); + qp->dbg_node = NULL; + } + +end: + return; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..e1593402387d21a5c4348c8e8c4845fb101b7896 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_qp_debugfs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_qp_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_QP_DEBUGFS_H__ +#define __SXE2_DRV_QP_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" +void drv_rdma_qp_read_field_seg0(char *buf, size_t *len, struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg1(char *buf, size_t *len, struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg2(char *buf, size_t *len, struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg3(char *buf, size_t *len, struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg4(char *buf, size_t *len, struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg5(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg6(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc); +void drv_rdma_qp_read_field_seg7(struct sxe2_rdma_device *rdma_dev, int qpn, + struct sxe2_qpc *qpc); +u64 drv_rdma_qp_read_field(struct sxe2_rdma_device *rdma_dev, void *data, + enum drv_rdma_dbg_rsc_type type, char *buf); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1ae7f497da5873825fbaf18ca90d367fc123c8b4 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.c @@ -0,0 +1,1353 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rcms_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rcms_debugfs.h" + +u64 sxe2_rcms_num_to_liner_addr(struct sxe2_rcms_info *rcms_info, u32 obj_type, + u32 obj_num) +{ + u64 liner_addr; + + liner_addr = rcms_info->rcms_obj[obj_type].base + + obj_num * rcms_info->rcms_obj[obj_type].size; + return liner_addr; +} + +int sxe2_rcms_num_to_ctx_va_pointer(struct sxe2_rdma_device *rdma_dev, + u32 obj_type, u32 obj_num, + void **va_pointer) +{ + int ret = SXE2_OK; + struct sxe2_rcms_info *rcms_info = + rdma_dev->rdma_func->ctx_dev.rcms_info; + u64 liner_addr; + u32 fpte_idx; + u32 spte_idx; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_spt_entry *spte; + u8 *byte_pointer; + u32 cp_offset; + + if (obj_type >= SXE2_RCMS_OBJ_MAX) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:num to ctx va input obj type err obj type=%u\n", + obj_type); + ret = -EINVAL; + goto end; + } + + if (obj_type == SXE2_RCMS_OBJ_RESP && obj_type == SXE2_RCMS_OBJ_SSNT && + obj_type == SXE2_RCMS_OBJ_IRRL && + obj_type == SXE2_RCMS_OBJ_ACK_TIMEOUT) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:num to ctx va input obj type err obj type=%u\n", + obj_type); + ret = -EINVAL; + goto end; + } + + if (obj_num >= rcms_info->rcms_obj[obj_type].cnt) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:num to ctx va input obj num err obj num=%u max num=%u\n", + obj_num, rcms_info->rcms_obj[obj_type].cnt - 1); + ret = -EINVAL; + goto end; + } + + liner_addr = sxe2_rcms_num_to_liner_addr(rcms_info, obj_type, obj_num); + fpte_idx = FPT_INDEX_GET(liner_addr); + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + + if (!fpte || !fpte->valid) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:num to ctx va fpte valid err or fpte is null fpte idx=%u\n", + fpte_idx); + ret = -EINVAL; + goto end; + } + + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + cp_offset = FIST_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(fpte->u.cp.page_addr.va); + byte_pointer += cp_offset; + DRV_RDMA_LOG_DEV_DEBUG( + "rcms debugfs:obj %u num %u\n" + "\tliner addr=%#llx fpte idx=%u entry type=%u va=%p\n", + obj_type, obj_num, liner_addr, fpte_idx, + fpte->entry_type, byte_pointer); + *va_pointer = (void *)byte_pointer; + } else { + spte_idx = LINER_ADDR_TO_REL_SPT_IDX(liner_addr); + spte = &(fpte->u.spt.spte[spte_idx]); + if (!spte->valid) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:num to ctx va spte valid err spte idx=%u\n", + spte_idx); + ret = -EINVAL; + goto end; + } else { + cp_offset = SECOND_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(spte->cp.page_addr.va); + byte_pointer += cp_offset; + DRV_RDMA_LOG_DEV_DEBUG( + "rcms debugfs:obj %u num %u\n" + "\tliner addr=%#llx fpte idx=%u spte idx=%u va=%p\n", + obj_type, obj_num, liner_addr, fpte_idx, + spte_idx, byte_pointer); + *va_pointer = (void *)byte_pointer; + } + } + +end: + return ret; +} + +static ssize_t drv_rdma_rcms_info_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + int i; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + dev = &rdma_dev->rdma_func->ctx_dev; + rcms_info = dev->rcms_info; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "rcms info:\n"); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "privileged=%u rcms fn id=%u ctx init mode=%u\n", + dev->privileged, rcms_info->rcms_fn_id, + rdma_dev->rdma_func->rcms_mode.ctx_mode); + len_total += dbg_vsnprintf(rsp_end, len_total, "first fpte index=%u\n", + rcms_info->first_fpte_index); + len_total += + dbg_vsnprintf(rsp_end, len_total, "ctx max fpte index=%u\n", + rcms_info->max_fpte_index); + len_total += dbg_vsnprintf(rsp_end, len_total, "func max fpte cnt=%u\n", + rcms_info->max_fpte_cnt); + len_total += + dbg_vsnprintf(rsp_end, len_total, "ctx fpte needed cnt=%u\n", + rcms_info->fpte_needed); + len_total += dbg_vsnprintf(rsp_end, len_total, "1GB fpte cnt=%u\n", + rcms_info->first_page_fpte); + len_total += dbg_vsnprintf(rsp_end, len_total, "max ceqs=%u\n", + rcms_info->max_ceqs); + len_total += dbg_vsnprintf(rsp_end, len_total, "max db page num=%u\n", + rcms_info->max_db_page_num); + len_total += dbg_vsnprintf(rsp_end, len_total, "db bar addr=0x%x\n", + rcms_info->db_bar_addr); + len_total += dbg_vsnprintf(rsp_end, len_total, "max cc qp=%u\n", + rcms_info->max_cc_qp_cnt); + len_total += dbg_vsnprintf(rsp_end, len_total, "create mode=%u\n", + rcms_info->create_mode); + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "obj %u max cnt=%u cnt=%u size=%u liner addr base=0x%llx\n", + i, rcms_info->rcms_obj[i].max_cnt, + rcms_info->rcms_obj[i].cnt, rcms_info->rcms_obj[i].size, + rcms_info->rcms_obj[i].base); + } + + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, + (size_t)len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_info_read, +}; + +static ssize_t drv_rdma_pbl_info_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_pbl_pble_rsrc *pble_rsrc; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + pble_rsrc = rdma_dev->rdma_func->pble_rsrc; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + + len_total += dbg_vsnprintf(rsp_end, len_total, "pbl info:\n"); + len_total += dbg_vsnprintf(rsp_end, len_total, + "privileged=%u pbl init mode=%u\n", + pble_rsrc->dev->privileged, + rdma_dev->rdma_func->rcms_mode.pbl_mode); + len_total += + dbg_vsnprintf(rsp_end, len_total, "alloced normal pble=%u\n", + pble_rsrc->allocated_pbles); + len_total += dbg_vsnprintf(rsp_end, len_total, + "unallocated normal pble=%u\n", + pble_rsrc->unallocated_pble); + len_total += dbg_vsnprintf(rsp_end, len_total, "pble base addr=%#llx\n", + pble_rsrc->pble_base_addr); + len_total += dbg_vsnprintf(rsp_end, len_total, + "alloc pble base addr=%#llx\n", + pble_rsrc->alloc_pble_base_addr); + len_total += + dbg_vsnprintf(rsp_end, len_total, "second type fpte cnt=%u\n", + pble_rsrc->second_type_fpte_cnt); + len_total += + dbg_vsnprintf(rsp_end, len_total, "third type fpte cnt=%u\n", + pble_rsrc->third_type_fpte_cnt); + if (pble_rsrc->first_page_en) { + len_total += dbg_vsnprintf( + rsp_end, len_total, "1GB fpte cnt=%u\n", + pble_rsrc->first_page_bitmap.max_fpte_cnt); + len_total += dbg_vsnprintf( + rsp_end, len_total, "1GB fpte start fpte idx=%u\n", + pble_rsrc->first_page_bitmap.first_fpte_idx); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "allocated 1GB fpte cnt=%u\n", + pble_rsrc->allocated_first_type_fpte_cnt); + len_total += dbg_vsnprintf( + rsp_end, len_total, "unallocated 1GB fpte cnt=%u\n", + pble_rsrc->unallocated_first_type_fpte_cnt); + } else { + len_total += dbg_vsnprintf(rsp_end, len_total, + "not support 1 GB page\n"); + } + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_pbl_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_pbl_info_read, +}; + +#ifdef SXE2_CFG_DEBUG +static ssize_t drv_rdma_rcms_read_fpte_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[RCMS_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 fpte_idx; + u32 fpte_cnt; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + if (count >= RCMS_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("rcms debugfs:dev find failed err\n"); + goto end; + } + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:Cmd copy from user failed err\n"); + goto end; + } + ret = sscanf(cmd, "%u:%u", &fpte_idx, &fpte_cnt); + if (ret != 2) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:analyze cmd err please input fpte_idx:cnt\n"); + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG("rcms debugfs:input fpte idx=%u fpte cnt=%u\n", + fpte_idx, fpte_cnt); + if (fpte_idx < rcms_info->first_fpte_index || + fpte_idx > rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input fpte idx err func idx range %u - %u\n,", + rcms_info->first_fpte_index, + fpte_idx > rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + rcms_info->read_fpte_input.fpte_idx = fpte_idx; + + if (fpte_cnt > rcms_info->fpte_needed) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input fpte cnt err func fpte needed=%u\n", + rcms_info->fpte_needed); + ret = -EINVAL; + goto end; + } + + if (fpte_cnt > RCMS_DEBUGFS_READ_FPTE_MAX_CNT) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input fpte cnt support cnt=%u\n", + RCMS_DEBUGFS_READ_FPTE_MAX_CNT); + fpte_cnt = RCMS_DEBUGFS_READ_FPTE_MAX_CNT; + } + rcms_info->read_fpte_input.fpte_cnt = fpte_cnt; + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static ssize_t drv_rdma_rcms_read_fpte_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + u32 i; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rcms_info *rcms_info; + u32 fpte_idx; + u32 fpte_cnt; + struct sxe2_rcms_fpt_entry *fpte; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + fpte_idx = rcms_info->read_fpte_input.fpte_idx; + fpte_cnt = rcms_info->read_fpte_input.fpte_cnt; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + for (i = 0; i < fpte_cnt; i++) { + len_total += dbg_vsnprintf(rsp_end, len_total, + "fpte idx %u info:\n", fpte_idx); + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + if (!fpte || !fpte->valid) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "fpte is invalid or ptr is null\n"); + } else { + len_total += dbg_vsnprintf(rsp_end, len_total, + "fpte entry type=%u\n", + fpte->entry_type); + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "spt page pa=%#llx\n", + fpte->u.spt.spt_page_addr.pa); + len_total += dbg_vsnprintf(rsp_end, len_total, + "spt use cnt=%u\n", + fpte->u.spt.use_cnt); + } else { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "cp page pa=%#llx\n", + fpte->u.cp.page_addr.pa); + len_total += dbg_vsnprintf(rsp_end, len_total, + "cp use cnt=%u\n", + fpte->u.cp.use_cnt); + } + } + len_total += dbg_vsnprintf(rsp_end, len_total, + "---------------------\n"); + fpte_idx++; + } + + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_read_fpte_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_read_fpte_read, + .write = drv_rdma_rcms_read_fpte_write, +}; + +static ssize_t drv_rdma_rcms_read_spte_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[RCMS_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 fpte_idx; + u32 spte_idx; + u32 spte_cnt; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + if (count >= RCMS_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("rcms debugfs:dev find failed err\n"); + goto end; + } + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:Cmd copy from user failed err\n"); + goto end; + } + ret = sscanf(cmd, "%u:%u:%u", &fpte_idx, &spte_idx, &spte_cnt); + if (ret != 3) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:analyze cmd err please input fpte_idx=1 spte_idx=1 cnt=1\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG( + "rcms debugfs:input fpte idx=%u spte idx=%u spte cnt=%u\n", + fpte_idx, spte_idx, spte_cnt); + if (fpte_idx < rcms_info->first_fpte_index || + fpte_idx > rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input fpte idx err func idx range %u - %u\n,", + rcms_info->first_fpte_index, + fpte_idx > rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + rcms_info->read_spte_input.fpte_idx = fpte_idx; + if (spte_idx > RCMS_DEBUGFS_SPTE_MAX_IDX) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input spte idx err func idx range 0 - 512\n"); + ret = -EINVAL; + goto end; + } + rcms_info->read_spte_input.spte_idx = spte_idx; + if (spte_cnt > RCMS_DEBUGFS_READ_SPTE_MAXCNT) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input fpte cnt support cnt=%u\n", + RCMS_DEBUGFS_READ_SPTE_MAXCNT); + spte_cnt = RCMS_DEBUGFS_READ_SPTE_MAXCNT; + } + + rcms_info->read_spte_input.spte_cnt = spte_cnt; + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static ssize_t drv_rdma_rcms_read_spte_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + u32 i; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rcms_info *rcms_info; + u32 fpte_idx; + u32 spte_idx; + u32 spte_cnt; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_spt_entry *spte; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + fpte_idx = rcms_info->read_spte_input.fpte_idx; + spte_idx = rcms_info->read_spte_input.spte_idx; + spte_cnt = rcms_info->read_spte_input.spte_cnt; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + if (!fpte || !fpte->valid || + fpte->entry_type != SXE2_RCMS_FPT_TYPE_SECOND) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "fpte ptr is null or fpte is invalid or entry type is first type\n"); + } else { + for (i = 0; i < spte_cnt; i++) { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "fpte idx %u spte idx %u info:\n", + fpte_idx, spte_idx); + spte = &(fpte->u.spt.spte[i]); + if (!spte->valid) { + len_total += dbg_vsnprintf(rsp_end, len_total, + "spte is invalid\n"); + } else { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "cp page pa=%#llx\n", + spte->cp.page_addr.pa); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "cp page use cnt=%u\n", + spte->cp.use_cnt); + } + len_total += dbg_vsnprintf(rsp_end, len_total, + "---------------------\n"); + spte_idx++; + } + } + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_read_spte_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_read_spte_read, + .write = drv_rdma_rcms_read_spte_write, +}; + +static ssize_t drv_rdma_rcms_read_liner_addr_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[RCMS_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u64 liner_addr; + u32 size; + u32 fpte_idx; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + if (count >= RCMS_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("rcms debugfs:dev find failed err\n"); + goto end; + } + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:cmd copy from user failed err\n"); + goto end; + } + + ret = sscanf(cmd, "%llx:%u", &liner_addr, &size); + if (ret != 2) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:analyze cmd err please input liner_addr=0xFF size=1024\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("rcms debugfs:input liner addr=%#llx size=%u\n", + liner_addr, size); + fpte_idx = FPT_INDEX_GET(liner_addr); + + if (fpte_idx < rcms_info->first_fpte_index || + fpte_idx > rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:intput liner addr idx err fpte idx=%u func idx range %u - %u\n", + fpte_idx, rcms_info->first_fpte_index, + fpte_idx > rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + rcms_info->read_liner_addr_input.liner_addr = liner_addr; + if (size > RCMS_DEBUGFS_READ_LINER_ADDR_MAX_SIZE) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input liner addr support 1024B\n"); + size = RCMS_DEBUGFS_READ_LINER_ADDR_MAX_SIZE; + } + + rcms_info->read_liner_addr_input.size = size; + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static void drv_rdma_rcms_print_liner_addr(struct sxe2_rdma_device *rdma_dev, + u64 liner_addr, + struct sxe2_rcms_fpt_entry *fpte, + u32 *len_total, char *rsp_end) +{ + u32 i, j; + u8 *byte_pointer; + u32 cp_offset; + u64 size; + u32 fpte_idx; + u32 spte_idx; + struct sxe2_rcms_spt_entry *spte; + + size = rdma_dev->rdma_func->ctx_dev.rcms_info->read_liner_addr_input + .size; + fpte_idx = FPT_INDEX_GET(liner_addr); + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + cp_offset = FIST_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(fpte->u.cp.page_addr.va); + byte_pointer += cp_offset; + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "cp offset=%x liner addr to pa=%#llx\n", + cp_offset, + fpte->u.cp.page_addr.pa + cp_offset); + size = (RCMS_DEBUGFS_2M_PAGE_MAX_OFFSET - cp_offset) <= size ? + (RCMS_DEBUGFS_2M_PAGE_MAX_OFFSET - cp_offset) : + size; + for (i = 0, j = 1; i < size; i++) { + *len_total += dbg_vsnprintf(rsp_end, *len_total, "%#x ", + *byte_pointer); + byte_pointer++; + j++; + if ((j % 32 == 0) || i == size - 1) { + *len_total += dbg_vsnprintf(rsp_end, *len_total, + "\n"); + j = 1; + } + } + } else { + spte_idx = LINER_ADDR_TO_REL_SPT_IDX(liner_addr); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + spte = &(fpte->u.spt.spte[spte_idx]); + if (!spte->valid) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to spte idx=%u is invalid\n", + spte_idx); + goto end; + } else { + cp_offset = SECOND_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(spte->cp.page_addr.va); + byte_pointer += cp_offset; + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to spte idx=%u cp offset=%u liner to pa=%#llx\n", + spte_idx, cp_offset, + spte->cp.page_addr.pa + cp_offset); + size = (RCMS_DEBUGFS_4K_PAGE_MAX_OFFSET - cp_offset) <= + size ? + (RCMS_DEBUGFS_4K_PAGE_MAX_OFFSET - + cp_offset) : + size; + for (i = 0, j = 1; i < size; i++) { + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "%x ", *byte_pointer); + byte_pointer++; + j++; + if ((j % 32 == 0) || i == size - 1) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "\n"); + j = 1; + } + } + } + } + +end: + return; +} + +static ssize_t drv_rdma_rcms_read_liner_addr_read(struct file *filp, + char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + u32 len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rcms_info *rcms_info; + u32 fpte_idx; + u64 liner_addr; + struct sxe2_rcms_fpt_entry *fpte; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + liner_addr = rcms_info->read_liner_addr_input.liner_addr; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + fpte_idx = FPT_INDEX_GET(liner_addr); + len_total += dbg_vsnprintf(rsp_end, len_total, "liner addr %#llx:\n", + liner_addr); + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + if (!fpte || !fpte->valid) { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "fpte is invalid or fpte ptr is null\n"); + goto show_buffer; + } + + drv_rdma_rcms_print_liner_addr(rdma_dev, liner_addr, fpte, &len_total, + rsp_end); + +show_buffer: + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_read_liner_addr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_read_liner_addr_read, + .write = drv_rdma_rcms_read_liner_addr_write, +}; + +static ssize_t drv_rdma_rcms_read_obj_ctx_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[RCMS_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 obj_type; + u32 obj_num; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + if (count >= RCMS_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("rcms debugfs:dev find failed err\n"); + goto end; + } + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:cmd copy from user failed err\n"); + goto end; + } + + ret = sscanf(cmd, "%u:%u", &obj_type, &obj_num); + if (ret != 2) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:analyze cmd err please input liner_addr=0xFF size=1024\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("rcms debugfs:input obj type=%u num=%u\n", + obj_type, obj_num); + + if (obj_type >= SXE2_RCMS_OBJ_MAX) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:intput obj type=%u err max obj type=%u\n", + obj_type, SXE2_RCMS_OBJ_MAX); + ret = -EINVAL; + goto end; + } + rcms_info->read_obj_ctx_input.obj_type = obj_type; + if (obj_num >= rcms_info->rcms_obj[obj_type].cnt) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input obj %u obj num %u err support max cnt=%u\n", + obj_type, obj_num, rcms_info->rcms_obj[obj_type].cnt); + ret = -EINVAL; + goto end; + } + + rcms_info->read_obj_ctx_input.obj_num = obj_num; + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static void drv_rdma_rcms_print_obj_ctx(struct sxe2_rdma_device *rdma_dev, + u32 obj_type, u64 liner_addr, + struct sxe2_rcms_fpt_entry *fpte, + u32 *len_total, char *rsp_end) +{ + u32 spte_idx; + u8 *byte_pointer; + struct sxe2_rcms_spt_entry *spte; + u32 fpte_idx; + u32 cp_offset; + u64 i, j; + u64 size; + + size = rdma_dev->rdma_func->ctx_dev.rcms_info->rcms_obj[obj_type].size; + fpte_idx = FPT_INDEX_GET(liner_addr); + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + cp_offset = FIST_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(fpte->u.cp.page_addr.va); + byte_pointer += cp_offset; + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "cp offset=%x liner addr to pa=%#llx\n", + cp_offset, + fpte->u.cp.page_addr.pa + cp_offset); + if (obj_type != SXE2_RCMS_OBJ_RESP && + obj_type != SXE2_RCMS_OBJ_SSNT && + obj_type != SXE2_RCMS_OBJ_IRRL && + obj_type != SXE2_RCMS_OBJ_ACK_TIMEOUT) { + for (i = 0, j = 1; i < size; i++) { + *len_total += + dbg_vsnprintf(rsp_end, *len_total, + "%#x ", *byte_pointer); + byte_pointer++; + j++; + if ((j % 32 == 0) || i == size - 1) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, "\n"); + j = 1; + } + } + } + } else { + spte_idx = LINER_ADDR_TO_REL_SPT_IDX(liner_addr); + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + spte = &(fpte->u.spt.spte[spte_idx]); + if (!spte->valid) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to spte idx=%u is invalid\n", + spte_idx); + goto end; + } else { + cp_offset = SECOND_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + byte_pointer = (u8 *)(spte->cp.page_addr.va); + byte_pointer += cp_offset; + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "liner addr to spte idx=%u cp offset=%u liner to pa=%#llx\n", + spte_idx, cp_offset, + spte->cp.page_addr.pa + cp_offset); + if (obj_type != SXE2_RCMS_OBJ_RESP && + obj_type != SXE2_RCMS_OBJ_SSNT && + obj_type != SXE2_RCMS_OBJ_IRRL && + obj_type != SXE2_RCMS_OBJ_ACK_TIMEOUT) { + for (i = 0, j = 1; i < size; i++) { + *len_total += + dbg_vsnprintf(rsp_end, + *len_total, "%x ", + *byte_pointer); + byte_pointer++; + j++; + if ((j % 32 == 0) || i == size - 1) { + *len_total += dbg_vsnprintf( + rsp_end, *len_total, + "\n"); + j = 1; + } + } + } + } + } +end: + return; +} + +static ssize_t drv_rdma_rcms_read_obj_ctx_read(struct file *filp, + char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rcms_info *rcms_info; + u32 fpte_idx; + u64 liner_addr; + u32 obj_type; + u32 obj_num; + struct sxe2_rcms_fpt_entry *fpte; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + obj_type = rcms_info->read_obj_ctx_input.obj_type; + obj_num = rcms_info->read_obj_ctx_input.obj_num; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + liner_addr = sxe2_rcms_num_to_liner_addr(rcms_info, obj_type, obj_num); + fpte_idx = FPT_INDEX_GET(liner_addr); + len_total += dbg_vsnprintf(rsp_end, len_total, + "obj %u num %u liner addr %#llx:\n", + obj_type, obj_num, liner_addr); + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + if (!fpte || !fpte->valid) { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "fpte is invalid or fpte ptr is null\n"); + goto show_buffer; + } + + drv_rdma_rcms_print_obj_ctx(rdma_dev, obj_type, liner_addr, fpte, + (u32 *)&len_total, rsp_end); + +show_buffer: + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + + kfree(rsp); + rsp = NULL; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_read_obj_ctx_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_read_obj_ctx_read, + .write = drv_rdma_rcms_read_obj_ctx_write, +}; + +static ssize_t drv_rdma_rcms_num_to_liner_addr_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + char cmd[RCMS_DEBUGFS_WRITE_BUF_MAX_LEN] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + u32 obj_type; + u32 obj_num; + struct sxe2_rcms_info *rcms_info; + + if (*off != 0) + goto end; + + if (count >= RCMS_DEBUGFS_WRITE_BUF_MAX_LEN) { + ret = -ENOSPC; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:cmd exceeded length limit err\n"); + goto end; + } + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("rcms debugfs:dev find failed err\n"); + goto end; + } + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:cmd copy from user failed err\n"); + goto end; + } + + ret = sscanf(cmd, "%u:%u", &obj_type, &obj_num); + if (ret != 2) { + ret = -ENODATA; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:analyze cmd err please input liner_addr=0xFF size=1024\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("rcms debugfs:input obj type=%u num=%u\n", + obj_type, obj_num); + + if (obj_type >= SXE2_RCMS_OBJ_MAX) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:intput obj type=%u err max obj type=%u\n", + obj_type, SXE2_RCMS_OBJ_MAX); + ret = -EINVAL; + goto end; + } + rcms_info->num_to_la_input.obj_type = obj_type; + if (obj_num >= rcms_info->rcms_obj[obj_type].cnt) { + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:input obj %u obj num %u err support max cnt=%u\n", + obj_type, obj_num, rcms_info->rcms_obj[obj_type].cnt); + ret = -EINVAL; + goto end; + } + + rcms_info->num_to_la_input.obj_num = obj_num; + + ret = (ssize_t)count; + *off = (loff_t)count; + +end: + return ret; +} + +static ssize_t drv_rdma_rcms_num_to_liner_addr_read(struct file *filp, + char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = SXE2_OK; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rcms_info *rcms_info; + u32 fpte_idx; + u32 spte_idx; + u64 liner_addr; + u32 obj_type; + u32 obj_num; + u32 cp_offset; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_spt_entry *spte; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "rcms debugfs:find dev struct from private_data failed err\n"); + goto end; + } + + rcms_info = rdma_dev->rdma_func->ctx_dev.rcms_info; + obj_type = rcms_info->num_to_la_input.obj_type; + obj_num = rcms_info->num_to_la_input.obj_num; + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:rcms info rsp kmalloc failed err\n"); + goto end; + } + rsp_end = rsp; + liner_addr = sxe2_rcms_num_to_liner_addr(rcms_info, obj_type, obj_num); + fpte_idx = FPT_INDEX_GET(liner_addr); + len_total += dbg_vsnprintf(rsp_end, len_total, + "obj %u num %u liner addr %#llx:\n", + obj_type, obj_num, liner_addr); + fpte = &(rcms_info->fpt.fpte[fpte_idx]); + if (!fpte || !fpte->valid) { + len_total += + dbg_vsnprintf(rsp_end, len_total, + "fpte is invalid or fpte ptr is null\n"); + goto show_buffer; + } + + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + cp_offset = FIST_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + len_total += + dbg_vsnprintf(rsp_end, len_total, + "cp offset=%x liner addr to pa=%#llx\n", + cp_offset, + fpte->u.cp.page_addr.pa + cp_offset); + } else { + spte_idx = LINER_ADDR_TO_REL_SPT_IDX(liner_addr); + len_total += dbg_vsnprintf( + rsp_end, len_total, + "liner addr to fpte idx=%u entry type=%u\n", fpte_idx, + fpte->entry_type); + spte = &(fpte->u.spt.spte[spte_idx]); + if (!spte->valid) { + len_total += dbg_vsnprintf( + rsp_end, len_total, + "liner addr to spte idx=%u is invalid\n", + spte_idx); + goto show_buffer; + } else { + cp_offset = SECOND_PAGE_TABLE_CP_OFFSET_GET(liner_addr); + len_total += dbg_vsnprintf( + rsp_end, len_total, + "liner addr to spte idx=%u cp offset=%u liner to pa=%#llx\n", + spte_idx, cp_offset, + spte->cp.page_addr.pa + cp_offset); + } + } +show_buffer: + ret = (ssize_t)simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:simple read error %zu\n", + ret); + } + kfree(rsp); + rsp = NULL; +end: + return ret; +} + +static const struct file_operations sxe2_rdma_rcms_num_to_liner_addr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_rcms_num_to_liner_addr_read, + .write = drv_rdma_rcms_num_to_liner_addr_write, +}; +#endif + +int drv_rdma_debug_rcms_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rcms_info *rcms_info) +{ + int ret = SXE2_OK; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "rcms debugfs:debugfs root dir not exist ret=%d\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->rcms_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("rcms debugfs:dir not exist ret=%d\n", + ret); + goto end; + } + + debugfs_create_file("rcms_info", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_info_fops); + debugfs_create_file("pbl_info", SXE2_DEBUG_FILE_ONLY_READ, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_pbl_info_fops); +#ifdef SXE2_CFG_DEBUG + debugfs_create_file("rcms_read_fpte", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_read_fpte_fops); + debugfs_create_file("rcms_read_spte", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_read_spte_fops); + debugfs_create_file("rcms_read_liner_addr", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_read_liner_addr_fops); + debugfs_create_file("rcms_read_obj_ctx", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_read_obj_ctx_fops); + debugfs_create_file("rcms_num_to_liner_addr", + SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->rcms_debugfs, rdma_dev, + &sxe2_rdma_rcms_num_to_liner_addr_fops); +#endif + +end: + return ret; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..3ea788b77928bec66d47fb868cc833a356d7c72f --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rcms_debugfs.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rcms_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RCMS_DEBUGFS_H__ +#define __SXE2_DRV_RCMS_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_OK 0 +#define RCMS_DEBUGFS_WRITE_BUF_MAX_LEN 64 +#define RCMS_DEBUGFS_READ_FPTE_MAX_CNT 10 +#define RCMS_DEBUGFS_SPTE_MAX_IDX 511 +#define RCMS_DEBUGFS_SPTE_MAX_CNT 512 +#define RCMS_DEBUGFS_READ_SPTE_MAXCNT 10 +#define RCMS_DEBUGFS_READ_LINER_ADDR_MAX_SIZE 512 +#define RCMS_DEBUGFS_2M_PAGE_MAX_OFFSET 0x1FFFFF +#define RCMS_DEBUGFS_4K_PAGE_MAX_OFFSET 0xFFF + +int drv_rdma_debug_rcms_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rcms_info *rcms_info); + +int sxe2_rcms_num_to_ctx_va_pointer(struct sxe2_rdma_device *rdma_dev, + u32 obj_type, u32 obj_num, + void **va_pointer); + +u64 sxe2_rcms_num_to_liner_addr(struct sxe2_rcms_info *rcms_info, u32 obj_type, + u32 obj_num); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_common.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_common.h new file mode 100644 index 0000000000000000000000000000000000000000..8643fa95c615c5e1a3db2b0a6f35915a142969df --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_common.h @@ -0,0 +1,2608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_common.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RDMA_COMMON_H__ +#define __SXE2_DRV_RDMA_COMMON_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_rdma_ifc.h" + +#include "sxe2_drv_rdma_inject.h" + +#include "sxe2_drv_aux.h" +#include "sxe2_cmd.h" + +#define SXE2_MQ_WAIT_POLL_REGS 1 +#define SXE2_MQ_WAIT_POLL_CQ 2 +#define SXE2_MQ_WAIT_EVENT 3 +#define SXE2_MQ_WAIT_CQE 4 + +#define SXE2_RDMA_MIN_RNR_NAK_DEFAULT 2 + +#define SQ_CQ 1 +#define RQ_CQ 2 + +#define SXE2_PRINT_HEX_BYTE_PER_ROW 16 +#define SXE2_PRINT_HEX_BREAK_PER_BYTE \ + 8 +#define SXE2_PRINT_HEX_MUL_BYTE_8 8 + +#define SXE2_CQ_DBNOTE_ARMCI GENMASK_ULL(23, 0) +#define SXE2_CQ_DBNOTE_CMD BIT_ULL(31) +#define SXE2_CQ_DBNOTE_CMDSN GENMASK_ULL(30, 29) + +#define SXE2_RDMA_IRQ_NAME_STR_LEN 64 +#define SXE2_RDMA_VCHNL_MAX_MSG_SIZE 512 +#define IB_DEVICE_NAME_MAX 64 +#define SXE2_RCMS_MAX_UPDATE_FPTE_ENTRIES 35 +#define SXE2_RCMS_VF_MAX_UPDATE_FPTE_ENTRIES 35 + +#define SXE2_RDMA_DB_PAGE_SHIFT (12) +#define SXE2_RDMA_DB_PAGE_SIZE (4096) + +#define MQ_CRIERR_MAJ_ERRCODE (0xFFFF) + +#define MQ_CRIERR_MQC_NOT_CREATED (0x8030) +#define MQ_CRIERR_MQ_BASE_ERR (0x8031) +#define MQ_CRIERR_MQC_ECC_ERR (0x8032) +#define MQ_CRIERR_QP_DESTROY_ABORT (0x8034) + +#define SXE2_LAG_PRIMARY_IDX (0) +#define SXE2_LAG_SECONDARY_IDX (1) + +#define OFFSET_TPH_CONTROL (8) +#define OFFSET_TPH_CAPABILITY (4) + +#define OFFSET_TPHENABLE_IN_TPH_CONTROL \ + (8) +#define TPH_GET_CPU() raw_smp_processor_id() + +#define SXE2_KERNEL_TPH_EN_DEFAULT (1) + +#define TPH_CPUID_MASK 0xFF + +#define PCI_EXP_DEVCAP2_TPH_COMP_MASK (0x1000) +#define PCI_EXP_DEVCAP2_TPH_COMP_SHIFT (12) + +#define PCI_EXP_EXT_TPH_REQ_ST_DEVICE_MODE_MASK (0x2) + +#define PCI_EXP_EXT_TPH_REQ_ENABLE_MASK (0x100) + +#define SXE2_MAX_ACK_TIMEOUT_VAL 28 + +enum { PH_BI_DIRECTIONAL = 0x0, + PH_DXDX = 0x1, + PH_DWHR = 0x2, + PH_HWDR = 0x2, +}; + +enum { MODE_NO_ST = 0x0, + MODE_INTERRUPT_VECTOR = 0x1, + MODE_DEVICE_SPECIFIC = 0x2, +}; + +enum sxe2_rdma_vers { + SXE2_RDMA_GEN_RSVD = 0, + SXE2_RDMA_GEN_1 = 1, + SXE2_RDMA_GEN_MAX = SXE2_RDMA_GEN_1, +}; + +enum sxe2_cmpl_status { + SXE2_COMPL_STATUS_SUCCESS = 0, + SXE2_COMPL_STATUS_FLUSHED, + SXE2_COMPL_STATUS_INVALID_WQE, + SXE2_COMPL_STATUS_QP_CATASTROPHIC, + SXE2_COMPL_STATUS_REMOTE_TERMINATION, + SXE2_COMPL_STATUS_INVALID_STAG, + SXE2_COMPL_STATUS_BASE_BOUND_VIOLATION, + SXE2_COMPL_STATUS_ACCESS_VIOLATION, + SXE2_COMPL_STATUS_INVALID_PD_ID, + SXE2_COMPL_STATUS_WRAP_ERROR, + SXE2_COMPL_STATUS_STAG_INVALID_PDID, + SXE2_COMPL_STATUS_RDMA_READ_ZERO_ORD, + SXE2_COMPL_STATUS_QP_NOT_PRIVLEDGED, + SXE2_COMPL_STATUS_STAG_NOT_INVALID, + SXE2_COMPL_STATUS_INVALID_PHYS_BUF_SIZE, + SXE2_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY, + SXE2_COMPL_STATUS_INVALID_FBO, + SXE2_COMPL_STATUS_INVALID_LEN, + SXE2_COMPL_STATUS_INVALID_ACCESS, + SXE2_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG, + SXE2_COMPL_STATUS_INVALID_VIRT_ADDRESS, + SXE2_COMPL_STATUS_INVALID_REGION, + SXE2_COMPL_STATUS_INVALID_WINDOW, + SXE2_COMPL_STATUS_INVALID_TOTAL_LEN, + SXE2_COMPL_STATUS_UNKNOWN, +}; + +enum sxe2_arm_type { + SXE2_CQ_ARM_NEXT = 0, + SXE2_CQ_ARM_SOLICITED = 1, +}; + +enum sxe2_qp_caps { + SXE2_WRITE_WITH_IMM = 1, + SXE2_SEND_WITH_IMM = 2, + SXE2_ROCE = 4, + SXE2_PUSH_MODE = 8, +}; + +struct sxe2_rcms_update_fpt_entry { + u64 cmd; + u64 data; +}; +struct sxe2_rcms_update_fptes_info { + u32 cnt; + u16 rcms_fn_id; + struct sxe2_rcms_update_fpt_entry + entry[SXE2_RCMS_MAX_UPDATE_FPTE_ENTRIES]; +}; + +struct sxe2_rcms_vf_update_fptes_info { + u32 cnt; + bool set; + struct sxe2_rcms_update_fpt_entry + entry[SXE2_RCMS_VF_MAX_UPDATE_FPTE_ENTRIES]; +}; + +enum sxe2_rcms_init_mode { + SXE2_RCMS_FIRST_INIT_MODE = + 1, + SXE2_RCMS_SECOND_INIT_MODE = + 2, + SXE2_RCMS_INIT_MODE_MAX, +}; + +enum sxe2_rdma_feature_type { + SXE2_RDMA_HW_MQ_MAJOR_VERSION = + 0, + SXE2_RDMA_HW_MQ_MINOR_VERSION = + 1, + SXE2_RDMA_HW_MODEL_VERSION_USED = + 2, + SXE2_RDMA_ENDPT_TRK_EN = 3, + SXE2_RDMA_QSETS_MAX_NUMBER = 4, + SXE2_RDMA_FW_MAIN_VERSION = 5, + SXE2_RDMA_FW_SUB_VERSION = 6, + SXE2_RDMA_FW_FIX_VERSION = 7, + SXE2_RDMA_FW_BUILD_NUMBER = 8, + SXE2_RDMA_MAX_FEATURES = 9, +}; + +enum sxe2_rdma_hygon_en_type { + SXE2_RDMA_HYGON_DEFAULT = 0, + SXE2_RDMA_HYGON_FORCE_ENABLE = 1, + SXE2_RDMA_HYGON_FORCE_DISABLE = 2, +}; + +union sxe2_sockaddr { + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; +}; + +struct sxe2_av { + struct rdma_ah_attr attrs; + union sxe2_sockaddr sgid_addr; + union sxe2_sockaddr dgid_addr; + u8 net_type; +}; + +#define SXE2_HW_MAJVER_GEN_1 0 + +#define SXE2_GET_CURRENT_CQ_ELEM(_cq) \ + (((struct sxe2_cqe \ + *)((_cq)->cq_base))[SXE2_RING_CURRENT_HEAD((_cq)->cq_ring)] \ + .buf) + +#define SXE2_RING_INIT(_ring, _size) \ + { \ + (_ring).head = 0; \ + (_ring).tail = 0; \ + (_ring).size = (_size); \ + } +#define SXE2_RING_SIZE(_ring) ((_ring).size) +#define SXE2_RING_CURRENT_HEAD(_ring) ((_ring).head) +#define SXE2_RING_CURRENT_TAIL(_ring) ((_ring).tail) + +#define SXE2_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + u32 size; \ + size = (_ring).size; \ + if (!SXE2_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOMEM; \ + } \ + } +#define SXE2_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + u32 size; \ + size = (_ring).size; \ + if ((SXE2_RING_USED_QUANTA(_ring) + (_count)) < size) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOMEM; \ + } \ + } +#define SXE2_SQ_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + u32 size; \ + size = (_ring).size; \ + if (!SXE2_SQ_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOMEM; \ + } \ + } +#define SXE2_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + u32 size; \ + size = (_ring).size; \ + if ((SXE2_RING_USED_QUANTA(_ring) + (_count)) < (size - 1)) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOMEM; \ + } \ + } +#define SXE2_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \ + (_ring).head = ((_ring).head + (_count)) % (_ring).size + +#define SXE2_RING_MOVE_TAIL(_ring) \ + (_ring).tail = ((_ring).tail + 1) % (_ring).size + +#define SXE2_RING_MOVE_HEAD_NOCHECK(_ring) \ + (_ring).head = ((_ring).head + 1) % (_ring).size + +#define SXE2_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \ + (_ring).tail = ((_ring).tail + (_count)) % (_ring).size + +#define SXE2_RING_SET_TAIL(_ring, _pos) (_ring).tail = (_pos) % (_ring).size + +#define SXE2_RING_FULL_ERR(_ring) \ + ((SXE2_RING_USED_QUANTA(_ring) == ((_ring).size - 1))) + +#define SXE2_SQ_RING_FULL_ERR(_ring) \ + ((SXE2_RING_USED_QUANTA(_ring) == ((_ring).size - 1))) + +#define SXE2_RING_MORE_WORK(_ring) ((SXE2_RING_USED_QUANTA(_ring) != 0)) + +#define SXE2_RING_MORE_WORK_PAD(_ring) ((SXE2_RING_USED_QUANTA_PAD(_ring) != 0)) + +#define SXE2_RING_USED_QUANTA(_ring) \ + ((((_ring).head + (_ring).size - (_ring).tail) % (_ring).size)) + +#define SXE2_RING_USED_QUANTA_PAD(_ring) \ + ((((_ring).head + (_ring).size - (_ring).tail - 1) % (_ring).size)) + +#define SXE2_RING_FREE_QUANTA(_ring) \ + (((_ring).size - SXE2_RING_USED_QUANTA(_ring) - 1)) + +#define SXE2_SQ_RING_FREE_QUANTA(_ring) \ + (((_ring).size - SXE2_RING_USED_QUANTA(_ring) - 1)) + +#define SXE2_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \ + { \ + index = SXE2_RING_CURRENT_HEAD(_ring); \ + SXE2_RING_MOVE_HEAD(_ring, _retcode); \ + } + +#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits)) +#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits)) +#define LS_32_1(val, bits) ((u32)((val) << (bits))) +#define RS_32_1(val, bits) ((u32)((val) >> (bits))) + +#define SXE2_RDMA_MAX_STATS_16 0xffffULL +#define SXE2_RDMA_MAX_STATS_24 0xffffffULL +#define SXE2_RDMA_MAX_STATS_32 0xffffffffULL +#define SXE2_RDMA_MAX_STATS_48 0xffffffffffffULL +#define SXE2_RDMA_MAX_STATS_56 0xffffffffffffffULL +#define SXE2_RDMA_MAX_STATS_64 0xffffffffffffffffULL + +enum sxe2_sw_mq_op { + MQ_OP_CREATE_QP = 0, + MQ_OP_MODIFY_QP = 1, + MQ_OP_DESTROY_QP = 2, + MQ_OP_CREATE_CQ = 3, + MQ_OP_MODIFY_CQ = 4, + MQ_OP_DESTROY_CQ = 5, + MQ_OP_ALLOC_MR_KEY = 6, + MQ_OP_ALLOC_MW = 7, + MQ_OP_REG_MR = 8, + MQ_OP_QUERY_MR_KEY = 9, + MQ_OP_DEALLOC_MR_KEY = 10, + MQ_OP_MANAGE_PBLE_BP = 11, + MQ_OP_QUERY_QP = 12, + MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE = 13, + MQ_OP_CREATE_CEQ = 14, + MQ_OP_DESTROY_CEQ = 15, + MQ_OP_CREATE_AEQ = 16, + MQ_OP_DESTROY_AEQ = 17, + MQ_OP_CREATE_ADDR_HANDLE = 18, + MQ_OP_MODIFY_ADDR_HANDLE = 19, + MQ_OP_DESTROY_ADDR_HANDLE = 20, + MQ_OP_UPDATE_FPT = 21, + MQ_OP_QUERY_FPM_VAL = 22, + MQ_OP_COMMIT_FPM_VAL = 23, + MQ_OP_NOP = 24, + MQ_OP_GATHER_STATS = 25, + MQ_OP_CREATE_SRQ = 26, + MQ_OP_MODIFY_SRQ = 27, + MQ_OP_DESTROY_SRQ = 28, + MQ_OP_DEREGISTER_MR = 29, + MQ_OP_MODIFY_CEQ = 30, + MQ_OP_QUERY_CEQ = 31, + MQ_OP_MODIFY_AEQ = 32, + MQ_OP_QUERY_AEQ = 33, + MQ_OP_QUERY_CQ = 34, + MQ_OP_QUERY_SRQ = 35, + MQ_OP_QUERY_MR = 36, + MQ_MAX_OPS, +}; + +enum sxe2_alignment { + SXE2_MQ_ALIGNMENT = 0x200, + SXE2_AEQ_ALIGNMENT = 0x100, + SXE2_CEQ_ALIGNMENT = 0x100, + SXE2_MCQ_ALIGNMENT = 0x100, + SXE2_FPT_BUF_ALIGNMENT = 0x200, + SXE2_FEATURE_BUF_ALIGNMENT = 0x10, +}; + +enum sxe2_queue_type { + SXE2_QUEUE_TYPE_SQ_RQ = 0, + SXE2_QUEUE_TYPE_MQ, + SXE2_QUEUE_TYPE_SRQ, +}; + +struct sxe2_ring { + __u32 head; + __u32 tail; + __u32 size; +}; + +enum sxe2_pbl_obj_type { + PBL_OBJ_QP = 0, + PBL_OBJ_SRQ = 1, + PBL_OBJ_CQ = 2, + PBL_OBJ_EQ = 3, + PBL_OBJ_MR = 4, +}; + +enum sxe2_addressing_type { + SXE2_ADDR_TYPE_ZERO_BASED = 0, + SXE2_ADDR_TYPE_VA_BASED = 1, +}; + +enum sxe2_qp_event_type { + SXE2_QP_EVENT_CATASTROPHIC, + SXE2_QP_EVENT_ACCESS_ERR, + SXE2_QP_EVENT_REQ_ERR, + SXE2_QP_EVENT_COMM_EST, + SXE2_QP_EVENT_QP_LASTWQE_REACHED, +}; + +enum sxe2_pbl_init_mode { + SXE2_PBL_SECOND_INIT_MODE = 1, + SXE2_PBL_THIRD_INIT_MODE = 2, + SXE2_PBL_INIT_MODE_MAX, +}; + +struct sxe2_common_attrs { + u64 feature_flags; + u32 max_hw_wq_frags; + u32 max_hw_read_sges; + u32 max_hw_inline; + u32 max_hw_rq_quanta; + u32 max_hw_wq_quanta; + u32 min_hw_cq_size; + u32 max_hw_cq_size; + u16 max_hw_push_len; + u16 max_hw_sq_chunk; + u16 min_hw_wq_size; + u8 hw_rev; + u8 rsv; + u32 max_hw_srq_quanta; + u32 max_hw_srq_wr; +}; + +enum drv_rdma_dbg_rsc_type { + SXE2_DBG_RSC_QP, + SXE2_DBG_RSC_AEQ, + SXE2_DBG_RSC_CEQ, + SXE2_DBG_RSC_MCQ, + SXE2_DBG_RSC_CQ, + SXE2_DBG_RSC_MR, + SXE2_DBG_RSC_SRQ, + SXE2_DBG_RSC_MQ, + SXE2_DBG_RSC_RCMS, + SXE2_DBG_RSC_AH, + SXE2_DBG_RSC_MAX, +}; + +struct sxe2_rdma_device; + +typedef u64 (*sxe2_drv_rdma_debugfs_read)(struct sxe2_rdma_device *dev, + void *data, + enum drv_rdma_dbg_rsc_type type, + char *buf); +typedef int (*sxe2_drv_rdma_debugfs_write)(struct sxe2_rdma_device *dev, + void *data, + enum drv_rdma_dbg_rsc_type type, + char *buf); + +struct sxe2_rdma_debugfs_handle { + sxe2_drv_rdma_debugfs_read read_func; + sxe2_drv_rdma_debugfs_write write_func; +}; + +struct sxe2_rdma_rsc_debug { + struct sxe2_rdma_device *dev; + void *object; + enum drv_rdma_dbg_rsc_type type; + struct dentry *root; + struct sxe2_rdma_debugfs_handle func_tab; +}; + +struct sxe2_rdma_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +struct sxe2_rdma_virt_mem { + void *va; + u32 size; +} __packed; + +struct sxe2_rcms_cp { + enum sxe2_rcms_fpt_entry_type entry_type; + struct sxe2_rdma_dma_mem page_addr; + u32 fpt_spt_index; + u32 use_cnt; +}; + +struct sxe2_rcms_spt_entry { + struct sxe2_rcms_cp cp; + u32 fpt_index; + bool valid; +}; + +struct sxe2_rcms_spt { + struct sxe2_rdma_dma_mem spt_page_addr; + struct sxe2_rcms_spt_entry *spte; + struct sxe2_rdma_virt_mem spte_virt_mem; + u32 use_cnt; + u32 fpt_index; +}; + +struct sxe2_rcms_fpt_entry { + enum sxe2_rcms_fpt_entry_type entry_type; + bool valid; + union { + struct sxe2_rcms_spt spt; + struct sxe2_rcms_cp cp; + } u; +}; + +struct sxe2_rcms_fpt { + struct sxe2_rdma_virt_mem addr; + u32 fpte_cnt; + u32 use_cnt; + struct sxe2_rcms_fpt_entry *fpte; +}; + +enum sxe2_rcms_creat_table_mode { + FIRST_PAGE_TABLE = 1, + SECOND_PAGE_TABLE = 2, +}; + +struct sxe2_rcms_read_fpte_debugfs_input { + u32 fpte_idx; + u32 fpte_cnt; + u32 spte_idx; + u64 liner_base; +}; + +struct sxe2_rcms_read_spte_debugfs_input { + u32 fpte_idx; + u32 spte_idx; + u32 spte_cnt; +}; + +struct sxe2_rcms_read_liner_addr_input { + u64 liner_addr; + u32 size; +}; + +struct sxe2_rcms_read_obj_ctx_input { + u32 obj_type; + u32 obj_num; +}; + +struct sxe2_rcms_num_to_liner_addr_input { + u32 obj_type; + u32 obj_num; +}; + +struct sxe2_rcms_info { + u16 rcms_fn_id; + u32 first_fpte_index; + u32 max_fpte_index; + u32 max_fpte_cnt; + u32 max_cc_qp_cnt; + u32 fpte_needed; + u32 first_page_fpte; + u8 irrl_ost_num; + u8 ssnt_ost_num; + u8 resp_ost_num; + u32 max_ceqs; + u32 max_db_page_num; + u32 db_bar_addr; + enum sxe2_rcms_creat_table_mode create_mode; + struct sxe2_rcms_obj_info *rcms_obj; + struct sxe2_rcms_fpt fpt; + u16 fpte_indexes[SXE2_RCMS_MAX_FPT_COUNT]; + u16 pmf_index; + u32 pf_max_ceqs; +#ifdef SXE2_CFG_DEBUG + struct sxe2_rcms_read_fpte_debugfs_input + read_fpte_input; + struct sxe2_rcms_read_spte_debugfs_input + read_spte_input; + struct sxe2_rcms_read_liner_addr_input + read_liner_addr_input; + struct sxe2_rcms_read_obj_ctx_input + read_obj_ctx_input; + struct sxe2_rcms_num_to_liner_addr_input + num_to_la_input; +#endif +}; + +struct sxe2_rdma_hw { + u8 __iomem *hw_addr; + u8 __iomem *priv_hw_addr; + struct device *device; + struct sxe2_rcms_info rcms; +}; + +#define SXE2_ROCE_CWND_DEFAULT 0x400 +#define SXE2_ROCE_ACKCREDS_DEFAULT 0x1E +#define SXE2_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC +#define SXE2_CM_DEFAULT_RCV_WND_SCALE 2 +#define SXE2_DEFAULT_UP_UP_MAP 0x0706050403020100l +#define SXE2_Q_INVALID_IDX 0xffff +#ifndef SXE2_MAX_USER_PRIORITY +#define SXE2_MAX_USER_PRIORITY 8 +#else +#undef SXE2_MAX_USER_PRIORITY +#define SXE2_MAX_USER_PRIORITY 8 +#endif +#define SXE2_DSCP_NUM_VAL 64 +#define SXE2_AUX_DSCP_PFC_MODE 0x1 +#define IEEE_8021QAZ_MAX_TCS 8 +#define SXE2_QSET_PER_USER_PRI 1 +#define SXE2_QSET_PER_USER_PRI_BOND 2 + +enum sxe2_rdma_obj_mem_alignment { + SXE2_AEQ_ALIGNMENT_M = (256 - 1), + SXE2_CEQ_ALIGNMENT_M = (256 - 1), + SXE2_CQ0_ALIGNMENT_M = (256 - 1), + SXE2_HOST_CTX_ALIGNMENT_M = (4 - 1), + SXE2_SHADOW_AREA_M = (128 - 1), + SXE2_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1), + SXE2_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1), +}; + +enum sxe2_dyn_idx_t { + SXE2_IDX_ITR0 = 0, + SXE2_IDX_ITR1 = 1, + SXE2_IDX_ITR2 = 2, + SXE2_IDX_NOITR = 3, +}; + +enum sxe2_protocol_used { + SXE2_ROCE_PROTOCOL_ONLY = 2, +}; + +enum sxe2_mq_rcms_profile { + SXE2_RCMS_PROFILE_DEFAULT = 1, + SXE2_RCMS_PROFILE_FAVOR_VF = 2, + SXE2_RCMS_PROFILE_EQUAL = 3, +}; + +struct sxe2_rdma_qos_tc_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct sxe2_rdma_l2params { + struct sxe2_rdma_qos_tc_info tc_info[SXE2_MAX_USER_PRIORITY]; + u32 num_apps; + u16 qs_handle_list[SXE2_MAX_USER_PRIORITY]; + u16 mtu; + u8 up2tc[SXE2_MAX_USER_PRIORITY]; + u8 dscp_map[SXE2_DSCP_NUM_VAL]; + u8 num_tc; + u8 vsi_rel_bw; + u8 vsi_prio_type; + bool mtu_changed : 1; + bool tc_changed : 1; + bool dscp_mode : 1; +}; + +#define SXE2_MCQ_SIZE (4096) +#define SXE2_DB_NOTE_M (128 - 1) +#define INVALID_U32 0xFFFFFFFF +#define SXE2_IRQ_NAME_STR_LEN (64) +#define SXE2_MAX_UESER_PRIORITY 8 +#define SXE2_LE_32_TO_HOST(value) ((u32) le32_to_cpu(value)) +#define SXE2_HOST_32_TO_LE(value) ((u32) cpu_to_le32(value)) + +#define SXE2_BAR_READ_32(addr) SXE2_LE_32_TO_HOST(readl(addr)) +#define SXE2_BAR_WRITE_32(val, addr) writel(SXE2_HOST_32_TO_LE(val), addr) + +struct sxe2_rdma_hw_attrs { + struct sxe2_common_attrs uk_attrs; + u64 max_hw_outbound_msg_size; + u64 max_hw_inbound_msg_size; + u64 max_mr_size; + u64 page_size_cap; + u32 min_hw_qp_id; + u32 min_hw_aeq_size; + u32 max_hw_aeq_size; + u32 min_hw_ceq_size; + u32 max_hw_ceq_size; + u32 max_hw_device_pages; + u32 max_hw_vf_fpm_id; + u32 first_hw_vf_fpm_id; + u32 max_rra; + int max_sra; + u32 max_hw_wqes; + u32 max_hw_pds; + u32 max_hw_ena_vf_count; + u32 max_qp_wr; + u32 max_pe_ready_count; + u32 max_done_count; + u32 max_sleep_count; + u32 max_mq_compl_wait_time_ms; + u32 min_hw_srq_id; + u16 max_stat_inst; + u16 max_stat_idx; +}; + +enum init_completion_state { + INVALID_STATE = 0, + INITIAL_STATE, + MQ_CREATED, + HMC_OBJS_CREATED, + HW_RSRC_INITIALIZED, + CCQ_CREATED, + CEQ0_CREATED, + ILQ_CREATED, + IEQ_CREATED, + REM_ENDPOINT_TRK_CREATED, + CEQS_CREATED, + PBLE_CHUNK_MEM, + AEQ_CREATED, + IP_ADDR_REGISTERED, +}; + +struct sxe2_rdma_vchnl_rdma_caps { + u8 hw_rev; + u16 cqp_timeout_s; + u16 cqp_def_timeout_s; + u16 max_hw_push_len; +}; + +enum sxe2_rdma_vm_vf_type { + SXE2_VF_TYPE = 0, + SXE2_VM_TYPE, + SXE2_PF_TYPE, +}; + +struct sxe2_rdma_config_check { + bool config_ok : 1; + bool lfc_set : 1; + bool pfc_set : 1; + u8 traffic_class; + u16 qs_handle; +}; + +struct sxe2_rdma_qset { + u32 qset_num; + u16 qset_id; + u16 teid; + u32 qset_qp_cnt; + u16 vsi_index; + u8 traffic_class; + u8 user_pri; + struct list_head qp_list; + u8 pf_id; + u8 active_port; + bool register_flag : 1; +}; + +struct sxe2_rdma_qos { + struct mutex qos_mutex; + struct sxe2_rdma_qset qset[SXE2_QSET_PER_USER_PRI_BOND]; + u32 teid[SXE2_QSET_PER_USER_PRI_BOND]; + u8 rel_bw[SXE2_QSET_PER_USER_PRI_BOND]; + u8 prio_type[SXE2_QSET_PER_USER_PRI_BOND]; + u32 qp_cnt; + bool valid : 1; +}; + +struct sxe2_rdma_hw_stat_map { + u16 byteoff; + u8 bitoff; + u64 bitmask; +}; + +struct sxe2_rdma_dev_hw_stats { + u64 stats_val[SXE2_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct sxe2_rdma_stats_gather_info { + bool use_rdma_fcn_index : 1; + bool use_stats_inst : 1; + u16 rcms_fcn_index; + u16 stats_inst_index; + struct sxe2_rdma_dma_mem stats_buff_mem; + void *gather_stats_va; +}; + +struct sxe2_rdma_vsi_pestat { + struct sxe2_rdma_hw *hw; + struct sxe2_rdma_dev_hw_stats hw_stats; + struct sxe2_rdma_stats_gather_info + gather_info; + struct timer_list stats_timer; + u32 timer_delay; + struct sxe2_rdma_ctx_vsi *vsi; + struct work_struct work; + struct workqueue_struct *stats_wq; + struct mutex stats_lock; +}; + +struct sxe2_rdma_ctx_vsi { + u16 vsi_idx; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_vchnl_dev *vc_dev; + void *back_vsi; + u32 exception_lan_q; + u16 mtu; + u16 vf_id; + enum sxe2_rdma_vm_vf_type vm_vf_type; + bool tc_change_pending : 1; + bool mtu_change_pending : 1; + bool failover_pending : 1; + struct sxe2_rdma_vsi_pestat *pestat; + atomic_t qp_suspend_reqs; + int (*register_qsets)(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + void (*unregister_qsets)(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + struct sxe2_rdma_config_check cfg_check[1]; + bool tc_print_warning[SXE2_MAX_USER_PRIORITY]; + u8 qos_rel_bw[SXE2_QSET_PER_USER_PRI_BOND]; + u8 qos_prio_type[SXE2_QSET_PER_USER_PRI_BOND]; + u16 stats_idx; + u8 dscp_map[SXE2_QSET_PER_USER_PRI_BOND][SXE2_DSCP_NUM_VAL]; + struct sxe2_rdma_qos qos[SXE2_MAX_USER_PRIORITY]; + bool dscp_mode[SXE2_QSET_PER_USER_PRI_BOND]; + bool lag_aa : 1; + bool lag_backup : 1; + u8 lag_ports[2]; + u8 lag_port_bitmap; + + u32 primary_port_node_ids + [SXE2_MAX_USER_PRIORITY]; + u32 secondary_port_node_ids + [SXE2_MAX_USER_PRIORITY]; + atomic_t port1_qp_cnt; + atomic_t port2_qp_cnt; + bool primary_port_migrated; + bool secondary_port_migrated; +}; + +struct sxe2_rdma_ctx_dev { + struct list_head mq_cmd_head; + spinlock_t mq_lock; + struct sxe2_rdma_dma_mem vf_fpm_query_buf[SXE2_MAX_PE_ENA_VF_COUNT]; + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + __le32 *fpm_query_buf; + __le32 *fpm_commit_buf; + struct sxe2_rdma_hw *hw; + u32 __iomem *wqe_alloc_db; + u32 __iomem *hw_regs[SXE2_MAX_BAR_REGS]; + u32 ceq_itr; + const struct sxe2_rdma_hw_stat_map *hw_stats_map; + u64 hw_masks[10]; + u8 hw_shifts[10]; + u32 feature_info[SXE2_RDMA_MAX_FEATURES]; + u32 fw_ver; + u64 mq_post_stats[SXE2_MQ_OP_MAX]; + u64 mq_cmd_stats[SXE2_MQ_OP_MAX]; + struct sxe2_rdma_hw_attrs hw_attrs; + struct sxe2_rcms_info *rcms_info; + struct sxe2_rdma_vchnl_if *vchnl_if; + struct sxe2_rdma_vchnl_rdma_caps vc_caps; + u8 vc_recv_buf[SXE2_VCHNL_MAX_MSG_SIZE]; + u16 vc_recv_len; + struct sxe2_rdma_vchnl_dev *vc_dev[SXE2_MAX_PE_ENA_VF_COUNT]; + spinlock_t vc_dev_lock; + struct workqueue_struct *vchnl_wq; + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_ctx_aeq *aeq; + struct sxe2_rdma_ctx_ceq *ceq[SXE2_RDMA_CEQ_MAX_COUNT]; + struct sxe2_rdma_ctx_cq *mcq; + const struct sxe2_rdma_irq_ops *irq_ops; + struct aux_ver_info fw_version; + u16 num_vfs; + u16 rcms_fn_id; + u8 vf_id; + bool privileged : 1; + bool vchnl_up : 1; + bool ceq_valid : 1; + bool double_vlan_en : 1; + struct mutex vchnl_mutex; + struct sxe2_rdma_dma_mem vf_gather_stats_buf[SXE2_MAX_PE_ENA_VF_COUNT]; + u8 pf_cnt; + struct mutex lag_mutex; +}; + +struct sxe2_rdma_irq_ops { + void (*sxe2_rdma_cfg_aeq)(struct sxe2_rdma_ctx_dev *dev, u32 idx, + bool enable); + void (*sxe2_rdma_cfg_ceq)(struct sxe2_rdma_ctx_dev *dev, u32 ceq_id, + u32 idx, bool enable); + void (*sxe2_rdma_dis_irq)(struct sxe2_rdma_ctx_dev *dev, u32 idx); + void (*sxe2_rdma_en_irq)(struct sxe2_rdma_ctx_dev *dev, u32 idx); +}; + +struct sxe2_rdma_vchnl_if { + int (*vchnl_recv)(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id); +}; + +struct sxe2_rdma_handler { + struct list_head list; + struct sxe2_rdma_device *dev; +#ifdef SXE2_CFG_DEBUG + struct dentry *db_debugfs; + struct dentry *stats_debugfs; +#endif + struct dentry *sxe2_rdma_dbg_dentry; + struct dentry *qp_debugfs; + struct dentry *cq_debugfs; + struct dentry *eq_debugfs; + struct dentry *ceq_debugfs; + struct dentry *aeq_debugfs; + struct dentry *srq_debugfs; + struct dentry *mq_debugfs; + struct dentry *rcms_debugfs; + struct dentry *mr_debugfs; +#ifdef SXE2_CFG_DEBUG + struct dentry *ah_debugfs; +#endif + struct dentry *aeq_codes_err_debugfs; + struct dentry *mq_err_debugfs; + struct dentry *mq_err_cqe_debugfs; + struct dentry *qos_debugfs; + struct dentry *mq_op_failed_debugfs; + + struct dentry *cc_debugfs; + struct dentry *common_debugfs; + struct list_head ucontext_list; + spinlock_t uctx_list_lock; + bool shared_res_created; +}; + +enum sxe2_cmpl_notify { + SXE2_RDMA_CQ_COMPL_EVENT = 0, + SXE2_RDMA_CQ_COMPL_SOLICITED = 1, +}; + +struct sxe2_rdma_cq_uk { + __le64 *cqe_alloc_db; + struct sxe2_cqe *cq_base; + __le32 *doorbell_note; + __u32 cq_id; + u32 arm_sn; + __u32 ncqe; + struct sxe2_ring cq_ring; + __u8 polarity; +}; + +struct sxe2_rdma_ctx_cq { + struct sxe2_rdma_cq_uk cq_uk; + u64 cq_pa; + u64 db_pa; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_ctx_vsi *vsi; + void *back_cq; + u8 cq_type; + struct drv_rdma_soft_cqc cqc; +}; + +struct sxe2_rdma_ctx_ceq { + u32 size; + u32 cons_index; + u32 __iomem *doorbell; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_eqe *ceqe_base; + struct sxe2_eqe_hygon *ceqe_hygon_base; + u32 ceq_id; + struct sxe2_ring ceq_ring; + u8 polarity; + struct sxe2_rdma_ctx_vsi *vsi; + struct drv_rdma_soft_eqc eqc; +}; + +enum sxe2_pbl_qp_srq_mode { + QP_SRQ_PA_FIRST_MODE = 0, + QP_SRQ_PA_SECOND_MODE = 1, +}; + +struct sxe2_pbl_pble_info { + u64 liner_addr; + u32 pble_idx; +}; + +struct sxe2_pbl_pble_alloc_info { + u32 total_pble_cnt; + u32 needed_pble_cnt; + u64 pbl_index; + bool mr_first_page_flags; + union { + enum sxe2_pbl_qp_srq_mode qp_srq_mode; + enum sxe2_pbl_cq_eq_mode cq_eq_mode; + enum sxe2_pbl_mr_mode mr_mode; + u32 mode; + } pbl_mode; + struct sxe2_pbl_pble_info pble_info; +}; + +struct sxe2_ceq_pble_buf { + void *buf; + dma_addr_t map; +}; + +struct sxe2_rdma_ceq { + struct sxe2_rdma_ctx_ceq ctx_ceq; + struct sxe2_rdma_dma_mem mem; + u32 irq; + u32 msix_idx; + struct sxe2_rdma_pci_f *rf; + struct tasklet_struct dpc_tasklet; + spinlock_t ce_lock; + struct sxe2_rdma_rsc_debug *dbg_node; + + struct sxe2_pbl_pble_alloc_info palloc; + bool pble_map; + struct sxe2_ceq_pble_buf *pble_buf; +}; + +struct sxe2_pbl_buddy { + unsigned long **bits; + u32 *num_free; + u32 max_order; + spinlock_t buddy_lock; + struct sxe2_rdma_ctx_dev *dev; +}; + +struct sxe2_pbl_first_page_bitmap { + unsigned long *fpte_bits; + spinlock_t bitmap_lock; + u32 max_fpte_cnt; + u32 first_fpte_idx; + struct sxe2_rdma_ctx_dev *dev; +}; + +enum sxe2_pbl_create_mode { + PBL_FIRST_PAGE_TABLE = 1, + PBL_SECOND_PAGE_TABLE = 2, + PBL_THIRD_PAGE_TABLE = 3, +}; + +struct sxe2_pbl_pble_rsrc { + enum sxe2_pbl_create_mode init_mode; + u32 unallocated_pble; + u32 allocated_pbles; + struct mutex pble_mutex_lock; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_pbl_buddy buddy; + bool first_page_en; + struct sxe2_pbl_first_page_bitmap first_page_bitmap; + u16 fpte_indexes[SXE2_RCMS_MAX_FPT_COUNT]; + u32 spte_indexes[SXE2_RCMS_SPT_ENTRY_CNT]; + u32 add_fpte_cnt; + u32 add_spte_cnt; + u64 pble_base_addr; + u64 alloc_pble_base_addr; + u32 unallocated_first_type_fpte_cnt; + u32 allocated_first_type_fpte_cnt; + u32 second_type_fpte_cnt; + u32 third_type_fpte_cnt; +}; + +struct sxe2_rdma_ctx_aeq { + u32 size; + u32 cons_index; + u32 __iomem *doorbell; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_eqe *aeqe_base; + struct sxe2_eqe_hygon *aeqe_hygon_base; + void *pbl_list; + struct sxe2_ring aeq_ring; + u32 irq; + u32 msix_idx; + u8 polarity; + bool virtual_map : 1; + struct sxe2_rdma_ctx_vsi *vsi; + struct drv_rdma_soft_eqc eqc; +}; + +struct sxe2_aeq_pble_buf { + void *buf; + dma_addr_t map; +}; + +struct sxe2_rdma_aeq { + struct sxe2_rdma_ctx_aeq ctx_aeq; + struct sxe2_rdma_dma_mem mem; + struct sxe2_pbl_pble_alloc_info palloc; + bool pble_map; + struct sxe2_aeq_pble_buf *pble_buf; + struct sxe2_rdma_rsc_debug *dbg_node; + struct dentry *debugfs_cnt; +}; + +struct sxe2_sq_common_wr_trk_info { + __u64 wrid; + __u32 wr_len; + __u16 quanta; + __u8 reserved[2]; +}; + +struct sxe2_qp_quanta { + __le64 elem[SXE2_WQE_SIZE]; +}; + +struct sxe2_cqe { + __le64 buf[SXE2_CQE_SIZE]; +}; +union sxe2_ah_info { + struct { + u16 rsv1; + u8 dest_mac[ETH_ALEN]; + + u64 vlan_tag : 16; + u64 rsv2 : 16; + u64 tc_tos : 8; + u64 rsv3 : 6; + u64 pd_idx : 18; + + u64 flow_label : 20; + u64 rsv4 : 12; + u64 hop_ttl : 8; + u64 rsv5 : 8; + u64 arp_index : 16; + + u64 ah_idx : 17; + u64 rsv6 : 15; + u64 op : 6; + u64 rsv7 : 4; + u64 rsv8 : 17; + u64 ipv4_valid : 1; + u64 insert_vlan_tag : 1; + u64 rsv9 : 1; + u64 do_lpbk : 1; + u64 wqe_valid : 1; + u32 dest_ip_addr[4]; + u32 src_ip_addr[4]; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +struct sxe2_ctx_ah { + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_mq_request *mq_request; + union sxe2_ah_info ah_info; + bool ah_valid : 1; +}; + +struct sxe2_ah { + struct ib_ah ibah; + struct sxe2_ctx_ah ctx_ah; + struct sxe2_av av; + struct hlist_node list; + refcount_t refcnt; + struct sxe2_ah *parent_ah; +#ifdef SXE2_CFG_DEBUG + struct sxe2_rdma_rsc_debug *dbg_node; +#endif +}; + +struct sxe2_srq_drv { + struct sxe2_qp_quanta *srq_base; + struct sxe2_common_attrs *common_attrs; + __le64 *db_note; + struct sxe2_ring srq_ring; + u32 srq_id; + u32 srq_size; + u32 max_srq_frag_cnt; + u8 srq_polarity; + u8 wqe_size; + u8 wqe_size_multiplier; + u32 srq_buf_size; +}; + +struct sxe2_umode_srq { + struct ib_umem *srq_umem; + struct ib_umem *db_note_umem; + u8 wqe_access_mod; + u64 pbl_pointer; + struct sxe2_pbl_pble_alloc_info *pble_alloc_info; +}; + +struct sxe2_kmode_srq { + struct sxe2_rdma_dma_mem srq_buf_mem; + struct sxe2_rdma_dma_mem + srq_db_note_mem; + u64 *srq_wrid_array; + u8 *srqe_array; +}; + +struct sxe2_rdma_srq_ctx { + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_ctx_vsi *vsi; + struct sxe2_rdma_ctx_pd *pd; + struct sxe2_srq_drv srq_drv; + struct sxe2_umode_srq usrq_rsc; + struct sxe2_kmode_srq ksrq_rsc; + u64 srq_pa; + u64 db_note_pa; + u32 hw_srq_size; + u16 srq_limit; + bool user_mode : 1; + u32 log_page_size; + u64 srq_cmpl_ctx; +}; + +struct sxe2_rdma_srq { + struct ib_srq ibsrq; + struct sxe2_rdma_srq_ctx srq_ctx; + struct completion free_srq; + refcount_t refcnt; + spinlock_t lock; + u32 srq_id; + u32 max_wr; + u32 max_sge; + struct sxe2_rdma_rsc_debug *dbg_node; + struct sxe2_rdma_pd *pd; +}; + +struct sxe2_io_info { + u32 total_sqe_cnt; + u32 total_rqe_cnt; + u32 finished_sqe_cnt; + u32 finished_rqe_cnt; + u32 finished_rqe_insrq_cnt; + u32 flushed_sq_cnt; + u32 flushed_rq_cnt; + u32 cleaned_sq_cnt; + u32 cleaned_rq_cnt; + u32 cleaned_flushsq_cnt; + u32 cleaned_flushrq_cnt; + u32 total_signal_cnt; + u32 send_cnt; + u32 send_inv_cnt; + u32 read_cnt; + u32 write_cnt; + u32 local_inv_cnt; + u32 bind_mw_cnt; + u32 fast_regmr_cnt; + u64 last_send_sqwrid; + u64 last_rcvd_sqwrid; + u64 last_send_rqwrid; + u64 last_rcvd_rqwrid; +}; + +struct sxe2_qp_common { + struct sxe2_qp_quanta *sq_base; + struct sxe2_qp_quanta *rq_base; + struct sxe2_srq_drv *srq; + struct sxe2_common_attrs *common_attrs; + u32 __iomem *qp_db_no_llwqe; + struct sxe2_sq_common_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + __le32 *doorbell_note; + __le32 *push_db; + __le64 *push_wqe; + struct sxe2_llwqe *llwqe; + struct sxe2_ring sq_ring; + struct sxe2_ring rq_ring; + struct sxe2_ring initial_ring; + u32 qpn; + u32 qp_caps; + u32 sq_size; + u32 rq_size; + u32 max_sq_sge_cnt; + u32 max_rq_sge_cnt; + u32 max_inline_data; + u8 qp_type; + u8 swqe_polarity; + u8 rwqe_polarity; + u8 rq_wqe_size; + u8 rq_wqe_size_multiplier; + u8 rd_fence_rate; + u16 ord_cnt; + bool llwqe_mode : 1; + bool push_dropped : 1; + void *back_qp; + bool destroy_pending : 1; + spinlock_t *lock; + u8 dbg_rq_flushed; + struct sxe2_io_info statistics; +}; + +struct sxe2_rdma_ctx_qp { + struct sxe2_qp_common qp_common; + u64 sq_pa; + u64 rq_pa; + u64 hw_host_ctx_pa; + u32 *shadow_area_va; + u64 shadow_area_pa; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_ctx_vsi *vsi; + struct sxe2_rdma_ctx_pd *pd; + __le64 *hw_host_ctx; + u64 qp_compl_ctx; + u32 llwqe_page_index; + u16 push_offset; + u8 qp_state; + u8 hw_sq_size; + u8 hw_rq_size; + u8 qp_buff_page_offset; + bool on_qoslist : 1; + bool flush_sq : 1; + bool flush_rq : 1; + enum sxe2_qp_event_type event_type; + u8 user_pri; + struct list_head list; + u8 sq_flush_polarity; + u8 qset_idx; +}; +struct sxe2_roce_offload_info { + u16 p_key; + u32 err_rq_idx; + u32 qkey; + u32 dest_qp; + u8 err_rq_idx_valid; + u32 pd_id; + u16 log_rra_max; + u16 log_sra_max; + bool is_qp1 : 1; + bool udprivcq_en : 1; + bool flush_mr : 1; + bool fw_cc_enable : 1; + bool use_stats_inst : 1; + bool bind_en : 1; + u8 loacl_ack_timeout; + u16 t_high; + u16 t_low; + u8 last_byte_sentl; + u8 state_rate; + u8 mac_addr[ETH_ALEN]; + u8 dest_mac_addr[ETH_ALEN]; +}; +struct sxe2_udp_offload_info { + bool ipv4 : 1; + bool insert_vlan_tag : 1; + u8 ttl; + u8 dscp; + u8 ecn; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr[4]; + u32 pmtu; + u16 vlan_tag; + u32 flow_label; + u8 udp_state; + u32 sq_psn; + u32 rq_psn; + u32 local_ipaddr[4]; + u8 retry_cnt; + u8 rnr_retry; + u8 min_rnr_timer; + u8 ack_timeout; +}; +struct sxe2_qp_host_ctx_info { + u64 qp_compl_ctx; + struct sxe2_udp_offload_info *udp_info; + struct sxe2_roce_offload_info *roce_info; + u32 send_cq_num; + u32 rcv_cq_num; + u32 srqn; + u16 stats_idx; + bool srq_valid : 1; + bool stats_idx_valid : 1; + u8 user_pri; + u32 llwqe_page_index; + u8 llwqe_mod_enable; + u8 log_msg_max; + u8 fl; + u32 src_vsi; + u8 retry_mode; + u8 credit_en; + u16 rqe_lwm; + u8 retry_resp_op_sel; + u8 log_rtm; + u8 ack_mode; + u8 log_ack_req_freq; + u16 dispatch_min_unit; + u8 dispatch_mode; + u8 qp_bucket_type; +}; +struct sxe2_qp_kmode { + struct sxe2_rdma_dma_mem dma_mem; + struct sxe2_sq_common_wr_trk_info *sq_wrid_mem; + u64 *rq_wrid_mem; +}; + +enum sxe2_qp_wqe_access_mod { + SXE2_QP_WQE_ACCESS_MOD_0 = 0x0, + SXE2_QP_WQE_ACCESS_MOD_1 = 0x1, +}; + +struct sxe2_rdma_qp { + struct ib_qp ibqp; + struct sxe2_rdma_ctx_qp qp_ctx; + struct sxe2_rdma_device *dev; + struct sxe2_rdma_cq *send_cq; + struct sxe2_rdma_cq *recv_cq; + struct sxe2_rdma_pd *pd; + struct sxe2_qp_host_ctx_info ctx_info; + struct sxe2_roce_offload_info roce_info; + struct sxe2_udp_offload_info + udp_info; + struct sxe2_ah roce_ah; + refcount_t refcnt; + struct delayed_work dwork_flush; + enum ib_qp_state ibqp_state; + u32 max_send_wr; + u32 max_recv_wr; + spinlock_t lock; + u8 sig_all : 1; + struct sxe2_qp_kmode kqp; + struct sxe2_rdma_dma_mem host_ctx; + struct completion free_qp; + struct ib_umem *qp_umem; + struct ib_umem *db_note_umem; + u64 pbl_pointer; + u8 log_page_size; + u8 wqe_access_mod; + struct sxe2_pbl_pble_alloc_info *pble_alloc_info; + u8 suspend_pending : 1; + bool user_mod : 1; + u8 flush_issued : 1; + struct sxe2_rdma_rsc_debug *dbg_node; + u8 gsi_flag : 1; + u8 state_rate_other_flag : 1; + u8 sw_cc_enable; + u32 sw_cc_idx; + bool alloc_db_page; +}; + +struct sxe2_cqe_info { + __u64 wr_id; + __u32 bytes; + union { + struct { + __u64 payload_len : 32; + __u64 packet_seq : 24; + __u64 rsvd1 : 8; + __u64 qpc; + __u64 l_r_key : 32; + __u64 qp_id : 18; + __u64 rsvd2 : 14; + __u64 minor_err : 16; + __u64 major_err : 16; + __u64 wq_desc_idx : 15; + __u64 rsvd3 : 3; + __u64 extended_cqe : 1; + __u64 push_dropped : 1; + __u64 ipv4 : 1; + __u64 stag_or_lrkey : 1; + __u64 solicited_evt : 1; + __u64 error : 1; + __u64 op : 6; + __u64 qp_type : 1; + __u64 rsvd4 : 1; + __u64 imme_data : 32; + __u64 srqn : 18; + __u64 is_srq : 1; + __u64 rsvd5 : 13; + __u64 cqe_timestamp; + __u64 ud_smac : 48; + __u64 ud_vlan_tag : 16; + __u64 ud_src_qpn : 24; + __u64 rsvd6 : 8; + __u64 rsvd7 : 6; + __u64 vsi_index : 10; + __u64 rsvd8 : 12; + __u64 vlan_tag_flag : 1; + __u64 ud_smac_valid : 1; + __u64 imm_data_flag : 1; + __u64 cqe_valid : 1; + } field; + __u64 buf[SXE2_CQE_SIZE]; + } info; +}; + +struct sxe2_flushed_cqe { + struct list_head list; + struct sxe2_cqe_info cqeinfo; +}; + +struct sxe2_rdma_cq { + struct ib_cq ibcq; + struct sxe2_rdma_ctx_cq cq_ctx; + u16 cq_head; + u32 cq_num; + bool user_mode; + atomic_t armed; + enum sxe2_arm_type arm_type; + struct sxe2_rdma_dma_mem kmem; + struct sxe2_rdma_dma_mem kmem_db; + struct ib_umem * + cq_umem; + struct ib_umem * + db_umem; + struct completion free_cq; + refcount_t refcnt; + spinlock_t lock; + struct sxe2_pbl_pble_alloc_info palloc; + struct sxe2_cqe_info cur_cqe; + struct list_head cmpl_generated; + struct sxe2_rdma_rsc_debug *dbg_node; +}; + +struct sxe2_cq_cmpl_gen { + struct list_head list; + struct sxe2_cqe_info cur_cqe; +}; + +struct sxe2_rdma_ctx_pd { + struct sxe2_rdma_ctx_dev *dev; + u32 pd_id; + int abi_ver; +}; + +struct sxe2_rdma_pd { + struct ib_pd ibpd; + struct sxe2_rdma_ctx_pd pd_ctx; +}; + +struct sxe2_rdma_mcq { + struct sxe2_rdma_ctx_cq ctx_cq; + struct sxe2_rdma_dma_mem mem_cq; + struct sxe2_rdma_dma_mem mem_db_note; + struct sxe2_rdma_rsc_debug *dbg_node; +}; + +struct sxe2_rdma_msix_vector { + u32 idx; + u32 irq; + u32 cpu_affinity; + u32 ceq_id; + u32 ceq_abs_id; + cpumask_t mask; + char name[SXE2_RDMA_IRQ_NAME_STR_LEN]; +}; + +struct sxe2_rdma_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct sxe2_rdma_qvlist_info { + u32 num_vectors; + struct sxe2_rdma_qv_info qv_info[]; +}; + +struct sxe2_rdma_vchnl_dev { + struct sxe2_rdma_ctx_dev *pf_dev; + struct sxe2_rdma_ctx_vsi *vf_vsi; + u8 *rcms_info_mem; + u8 vchnl_msg_buf[SXE2_RDMA_VCHNL_MAX_MSG_SIZE]; + struct sxe2_rcms_info rcms_info; + u64 fpm_query_buf_pa; + u64 *fpm_query_buf; + refcount_t refcnt; + u16 pmf_index; + u16 vf_id; + u16 vf_idx; + u8 protocol_used; + bool pf_rcms_initialized : 1; + bool reset_en : 1; + bool port_vlan_en : 1; + bool stats_initialized : 1; + struct sxe2_rdma_stats_gather_info gather_stats_info; + struct sxe2_rdma_dma_mem gather_stats_buf; +}; + +struct sxe2_rdma_gen_ops { + void (*request_reset)(struct sxe2_rdma_pci_f *rf); + int (*register_qsets)(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + void (*unregister_qsets)(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); +}; + +struct sxe2_rdma_cc_dcqcn_params { + u16 t_interval; + u32 b; + u8 f; + u8 rai_factor; + u8 rhai_factor; + u32 rreduce_mperiod; + u8 min_dec_factor; + u8 min_rate; + u16 k; + u8 bc; + u8 tc; + u32 g; + u32 rt; + u32 rc; + u32 alpha; + u16 rreduce_next_node_info; + u16 t_next_node_info; + u32 byte_counter; + u8 decrease_rate_valid; + u8 func_id; +}; + +struct sxe2_rdma_cc_timely_params { + u16 min_rtt; + u16 tlow; + u16 thigh; + u8 rai_factor; + u16 pre_rtt; + u32 beta; + u32 alpha; + u16 rtt_diff; +}; + +struct sxe2_rdma_cc_params { + bool dcqcn_enable; + bool dcqcn_cfg_valid; + struct sxe2_rdma_cc_dcqcn_params dcqcn_params; + bool timely_enable; + bool timely_cfg_valid; + struct sxe2_rdma_cc_timely_params timely_params; + u8 ecn; + u8 cnp_ecn; + u32 cc_qp_idx; +}; + +struct sxe2_cc_refcount { + atomic_t cc_qp_refcount; + struct mutex refcount_lock; +}; + +struct rcms_page_table_mode { + enum sxe2_rcms_init_mode ctx_mode; + enum sxe2_pbl_init_mode pbl_mode; +}; + +struct sxe2_db_llwqe_head { + struct mutex lock; + struct list_head list; +}; + +struct sxe2_llwqe { + void __iomem *wqe_addr; + void __iomem *db_addr; + struct sxe2_db_page *db_page; + bool wc; + u32 index; + spinlock_t lock; +}; + +struct sxe2_db_mmap_entry_head { + struct mutex lock; + struct list_head list; +}; + +struct sxe2_mq_ctx { + u32 size; + u64 mq_buf_pa; + u64 mq_ctx_pa; + struct sxe2_rdma_ctx_dev *dev; + int (*process_mq_fpt)(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info + *info); + struct sxe2_rdma_dma_mem fptebuf; + struct sxe2_ring mq_ring; + struct sxe2_mq_quanta *mq_buf_va; + __le64 *mq_ctx_va; + u64 *scratch_array; + u64 requested_ops; + atomic64_t completed_ops; + u32 mqe_count; + u32 hw_mq_size; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 polarity; + u8 rcms_profile; + u8 ena_vf_count; + u8 ceqs_per_vf; + bool rocev2_rto_policy : 1; + bool en_rem_endpoint_trk : 1; + enum sxe2_protocol_used protocol_used; +}; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +enum mq_wqe_err_code { + MQ_WQE_ERR_DEBUGFS_CLR = 0x0, + QPN_DEBUGFS = 0x1, + CQN_DEBUGFS = 0x2, + SRQN_DEBUGFS = 0x3, + CEQN_DEBUGFS = 0x4, + AEQN_DEBUGFS = 0x5, + QPC_SRQN_DEBUGFS = 0x6, + QPC_SEND_CQN_DEBUGFS = 0x7, + QPC_RECV_CQN_DEBUGFS = 0x8, + CQC_CEQN_DEBUGFS = 0x9, + QPC_SW_STATE_DEBUGFS = 0xA, + CQC_SW_STATE_DEBUGFS = 0xB, + SRQC_SW_STATE_DEBUGFS = 0xC, + CEQC_SW_STATE_DEBUGFS = 0xD, + AEQC_SW_STATE_DEBUGFS = 0xE, + QPC_TYPE_DEBUGFS = 0xF, + QPC_PMTU_DEBUGFS = 0x10, + QPC_SQ_SIZE_DEBUGFS = 0x11, + QPC_RQ_SIZE_DEBUGFS = 0x12, + CQC_SIZE_DEBUGFS = 0x13, + SRQC_SIZE_DEBUGFS = 0x14, + CEQC_SIZE_DEBUGFS = 0x15, + AEQC_SIZE_DEBUGFS = 0x16, + QPC_RQ_TYPE_DEBUGFS = 0x17, + QPC_PAGE_SIZE_DEBUGFS = 0x18, + QPC_SW_STATE_JUMP_DEBUGFS = 0x19, + FPTE_CNT_DEBUGFS = 0x1A, + AH_ID_DEBUGFS = 0x1B, + MR_ID_DEBUGFS = 0x1C, + MR_ACCESS_RIGHT_DEBUGFS = 0x1D, + MR_TYPE_DEBUGFS = 0x1E, + MR_PAGE_SIZE_DEBUGFS = 0x1F, + QP_CTX_PA_DEBUGFS = 0x20, + QP_CREATE_OP_DEBUGFS = 0x21, + MQ_DEBUGFS_INJECT_ERR_MAX = 0x22, +}; + +enum { + MQ_MNG_PBL_WQE_ERR_CLR = 0x0, + MQ_MNG_PBL_SPTE_CNT = 0x1, + MQ_MNG_PBL_FIRST_SPTE_IDX = 0x2, + MQ_MNG_PBL_FPTE_IDX = 0x3, + MQ_MNG_PBL_OPCODE = 0x4, +}; + +struct sxe2_mq_err_dbg_val { + bool mqc_addr_vld; + u64 mqc_addr; + bool mqc_size_vld; + u64 mqc_size; + bool mqc_base_vld; + u64 mqc_base; + u32 mqc_ignore_vld; +}; + +struct sxe2_mq_err_mcqe_dbg_val { + u32 fpte_rsc_type; + u32 fpte_err_type; + u32 rsc_wqe_err_type; + u64 rsc_wqe_err_val; + bool commit_wqe_err_vld; + u32 commit_wqe_err_type; + u32 commit_wqe_err_val; + bool manage_pbl_wqe_err_vld; + u32 manage_pbl_wqe_err_type; + u64 manage_pbl_wqe_err_val; +}; +#endif + +struct sxe2_mq { + struct sxe2_mq_ctx mq; + spinlock_t req_lock; + spinlock_t cmpl_lock; + wait_queue_head_t remove_wq; + struct sxe2_rdma_dma_mem mq_buf; + struct sxe2_rdma_dma_mem mq_ctx; + u64 *scratch_array; + struct sxe2_mq_request *mq_requests; + struct list_head mq_avail_reqs; + struct sxe2_rdma_rsc_debug *dbg_node; + bool mcqe_ignore; +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + struct sxe2_mq_err_dbg_val err_val; + struct sxe2_mq_err_mcqe_dbg_val err_cqe_val; + bool ops_failed[MQ_MAX_OPS]; +#endif +}; + +union sxe2_hw_ahc { + struct { + u16 rsv1; + u8 dest_mac[ETH_ALEN]; + + u64 vlan_tag : 16; + u64 rsv2 : 16; + u64 tc_tos : 8; + u64 rsv3 : 6; + u64 pd_idx : 18; + + u64 flow_label : 20; + u64 rsv4 : 12; + u64 hop_ttl : 8; + u64 rsv5 : 8; + u64 rsv10 : 16; + + u64 ah_idx : 17; + u64 rsv6 : 15; + u64 op : 6; + u64 rsv7 : 4; + u64 rsv8 : 17; + u64 ipv4_valid : 1; + u64 insert_vlan_tag : 1; + u64 ah_valid : 1; + u64 do_lpbk : 1; + u64 wqe_valid : 1; + u32 dest_ip_addr[4]; + u32 src_ip_addr[4]; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +union sxe2_alloc_key_info { + struct { + u64 rsv1 : 7; + u64 rsv2 : 1; + u64 pbl_mode : 2; + u64 rsv4 : 5; + u64 rsv5 : 1; + u64 mw_type : 1; + u64 rsv6 : 7; + u64 access_mode : 5; + u64 mr_type : 1; + u64 rsv8 : 1; + u64 rsv9 : 1; + u64 rsv10 : 8; + u64 rsv11 : 18; + u64 rsv12 : 6; + + u64 rsv13 : 24; + u64 rsv14 : 8; + u64 pd : 18; + u64 rsv15 : 1; + u64 rsv16 : 1; + u64 rsv17 : 12; + u64 rsv18; + u64 mr_idx : 22; + u64 rsv19 : 10; + u64 op : 6; + u64 rsv20 : 25; + u64 wqe_valid : 1; + u64 rsv21; + u64 len; + u64 pbl_idx; + u64 log_entity_size : 5; + u64 rsv22 : 27; + u64 rsv23 : 32; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +union sxe2_dalloc_key_info { + struct { + u64 rsv1 : 7; + u64 rsv2 : 1; + u64 rsv3 : 2; + u64 rsv4 : 5; + u64 rsv5 : 1; + u64 rsv6 : 1; + u64 rsv7 : 7; + u64 rsv8 : 5; + u64 mr_type : 1; + u64 rsv9 : 1; + u64 rsv10 : 1; + u64 rsv11 : 8; + u64 rsv12 : 18; + u64 rsv13 : 6; + + u64 rsv14 : 24; + u64 rsv15 : 8; + u64 pd : 18; + u64 rsv16 : 1; + u64 rsv17 : 1; + u64 rsv18 : 12; + u64 rsv19; + u64 mr_idx : 22; + u64 rsv20 : 10; + u64 op : 6; + u64 rsv21 : 25; + u64 wqe_valid : 1; + u64 rsv22; + u64 rsv23; + u64 rsv24; + u64 rsv25; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; +struct sxe2_create_qp_info { +}; +struct sxe2_modify_qp_info { +}; +struct sxe2_destroy_qp_info { +}; +struct sxe2_query_qp_info { +}; + +struct sxe2_rcms_fcn_info { + u32 vf_id; + u8 protocol_used; + u8 free_fcn; +}; + +struct sxe2_pbl_manage_pble_info { + u32 fpte_idx; + u16 first_spte_idx; + u16 spte_cnt; + u8 invalidate_spte_cnt; + u64 first_spte_pa; +}; + +struct mq_info { + union { + struct { + struct sxe2_mq_ctx *mq; + struct sxe2_rdma_stats_gather_info info; + u64 scratch; + } stats_gather; + + struct { + struct sxe2_rdma_ctx_cq *cq; + u64 scratch; + bool check_overflow; + } cq_create; + + struct { + struct sxe2_rdma_ctx_cq *cq; + struct sxe2_rdma_cqc *cqc; + u64 scratch; + } cq_modify; + + struct { + struct sxe2_rdma_ctx_cq *cq; + u64 query_pa; + u64 scratch; + } cq_query; + + struct { + struct sxe2_rdma_ctx_cq *cq; + u64 scratch; + } cq_destroy; + + struct { + struct sxe2_rdma_ctx_ceq *ceq; + u64 scratch; + } ceq_ops; + + struct { + struct sxe2_rdma_ctx_ceq *ceq; + u64 query_pa; + u64 scratch; + } ceq_query; + + struct { + struct sxe2_rdma_ctx_ceq *ceq; + struct sxe2_rdma_eqc *ceqc; + u64 scratch; + } ceq_modify; + + struct { + struct sxe2_rdma_ctx_aeq *aeq; + u64 scratch; + } aeq_ops; + + struct { + struct sxe2_rdma_ctx_aeq *aeq; + u64 query_pa; + u64 scratch; + } aeq_query; + + struct { + struct sxe2_rdma_ctx_aeq *aeq; + struct sxe2_rdma_eqc *aeqc; + u64 scratch; + } aeq_modify; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_reg_mr_info info; + u64 scratch; + } reg_mr; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_query_mr_info info; + u64 scratch; + } query_mr; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_dereg_mr_info info; + u64 scratch; + } dereg_mr; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_alloc_key_info info; + u64 scratch; + } alloc_key; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_dalloc_key_info info; + u64 scratch; + } dalloc_key; + + struct { + struct sxe2_rdma_ctx_dev *ctx_dev; + union sxe2_ah_info info; + u64 scratch; + } ah_info; + + struct { + struct sxe2_mq_ctx *mq; + void *fpm_val_va; + u64 fpm_val_pa; + u16 rcms_fn_id; + u64 scratch; + } query_fpm_val; + + struct { + struct sxe2_mq_ctx *mq; + void *fpm_val_va; + u64 fpm_val_pa; + u16 rcms_fn_id; + u64 scratch; + } commit_fpm_val; + + struct { + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rcms_update_fptes_info info; + u64 scratch; + } update_pe_fptes; + struct { + struct sxe2_rdma_ctx_qp *qp; + struct sxe2_create_qp_info info; + u64 scratch; + } qp_create; + struct { + struct sxe2_rdma_ctx_qp *qp; + struct sxe2_modify_qp_info info; + u64 scratch; + } qp_modify; + struct { + struct sxe2_rdma_ctx_qp *qp; + struct sxe2_destroy_qp_info info; + u64 scratch; + } qp_destroy; + struct { + struct sxe2_rdma_ctx_qp *qp; + struct sxe2_query_qp_info info; + u64 scratch; + } qp_query; + struct { + struct sxe2_mq_ctx *mq; + u64 scratch; + u32 wait_type; + } nop; + + struct { + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rcms_fcn_info info; + u64 scratch; + } manage_rcms_pm; + + struct { + struct sxe2_mq_ctx *mq; + struct sxe2_pbl_manage_pble_info info; + u64 scratch; + } manage_pble_bp; + + struct { + struct sxe2_rdma_srq_ctx *srq; + u64 scratch; + } srq_create; + + struct { + struct sxe2_rdma_srq_ctx *srq; + u64 query_pa; + u64 scratch; + } srq_query; + + struct { + struct sxe2_rdma_srq_ctx *srq; + struct sxe2_rdma_srqc *srqc; + u64 scratch; + } srq_modify; + + struct { + struct sxe2_rdma_srq_ctx *srq; + u64 scratch; + } srq_destroy; + } u; +}; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +struct sxe2_rdma_aeq_codes_inject { + u8 cq_sw_status_err : 1; + u8 ceq_sw_status_err : 1; + u8 db_ceqn_err : 1; + u8 ceq_ci_noupdate : 2; + u8 aeq_ci_noupdate : 1; + u8 tmo_fpte_valid_0 : 1; + u8 tmo_fpte_flag : 1; + u8 cq_db_no_update : 1; + u8 srq_limit_flag : 1; + u8 llwqe_flag : 1; +}; +#endif + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +struct sxe2_rdma_qos_inject { + u8 apply_qset_err_code; + u8 release_qset_err_code; + u8 qp_bind_qset_err_code; +}; +#endif + +struct sxe2_rdma_pci_f { + bool reset : 1; + bool rsrc_created : 1; + bool msix_shared : 1; + bool ftype : 1; + u8 rsrc_profile; + u16 max_rdma_vfs; + u8 *rcms_info_mem; + u8 *mem_rsrc; + u8 rdma_ver; + u8 rst_to; + u8 pf_id; + u8 fragcnt_limit; + enum sxe2_protocol_used protocol_used; + bool en_rem_endpoint_trk : 1; + u32 sd_type; + u32 msix_count; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 max_ah; + u32 next_ah; + u32 max_pd; + u32 next_qp; + u32 next_cq; + u32 next_pd; + u32 max_mr_size; + u32 max_cqe; + u32 mr_stagmask; + u32 used_pds; + u32 used_cqs; + u32 used_mrs; + u32 used_qps; + u32 used_srqs; + u32 arp_table_size; + u32 next_arp_index; + u32 ceqs_count; + u32 limits_sel; + u32 max_qsets; + u32 next_qset; + u32 max_srq; + u32 next_srq; + u32 max_dbs; + u32 next_db; + u32 max_cc_qp_cnt; + unsigned long *allocated_qset; + unsigned long *allocated_qps; + unsigned long *allocated_cqs; + unsigned long *allocated_mrs; + unsigned long *allocated_pds; + unsigned long *allocated_ahs; + unsigned long *allocated_arps; + unsigned long *allocated_dbs; + unsigned long *allocated_srqs; + enum init_completion_state init_state; + struct sxe2_rdma_ctx_dev ctx_dev; + struct sxe2_rdma_handler *hdl; + struct pci_dev *pcidev; + void *cdev; + struct sxe2_rdma_hw hw; + struct sxe2_mq mq; + struct sxe2_rdma_mcq mcq; + struct sxe2_rdma_aeq aeq; + struct sxe2_rdma_ceq *ceqlist; + struct sxe2_pbl_pble_rsrc *pble_rsrc; + spinlock_t rsrc_lock; + spinlock_t qptable_lock; + spinlock_t cqtable_lock; + spinlock_t srqtable_lock; + struct sxe2_rdma_qp **qp_table; + struct sxe2_rdma_cq **cq_table; + struct sxe2_rdma_srq **srq_table; + spinlock_t qh_list_lock; + struct sxe2_rdma_msix_vector *sxe2_msixtbl; + struct sxe2_rdma_qvlist_info *sxe2_qvlist; + struct tasklet_struct dpc_tasklet; + struct msix_entry *msix_entries; + struct sxe2_rdma_dma_mem obj_mem; + struct sxe2_rdma_dma_mem obj_next; + atomic64_t push_cnt; + atomic_t vchnl_msgs; + wait_queue_head_t vchnl_waitq; + struct workqueue_struct *mq_cmpl_wq; + struct work_struct mq_cmpl_work; + struct workqueue_struct *vchnl_wq; + struct sxe2_rdma_ctx_vsi default_vsi; + void *back_fcn; + struct sxe2_rdma_gen_ops gen_ops; + void (*check_fc)(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *ctx_qp); + struct sxe2_cc_refcount cc_refcount; + struct sxe2_rdma_cc_params cc_params; + struct sxe2_rdma_device *rdma_dev; + u8 vlan_parse_en; + phys_addr_t bar_db_addr; + struct sxe2_db_page *db; + struct sxe2_db_llwqe_head db_head; + struct sxe2_llwqe llwqe; + struct sxe2_db_mmap_entry_head db_mmap_entry_head; + struct rcms_page_table_mode rcms_mode; + u8 ack_mode : 1; + u8 oi : 1; + u8 scqe_break_moderation_en : 1; + u8 log_ack_req_freq : 4; + u8 UDPriv_CQEnable : 1; + u8 aeq_pble_en : 1; + u8 hygon_cpu_en; + u8 app_mod_all_flush; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + struct sxe2_rdma_aeq_codes_inject inject_aeq; +#endif + u16 vfid_base; + u8 pf_cnt; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + u32 inject_sleep_time; + struct sxe2_inject_mem inject_mem; + struct sxe2_inject_debug inject_dbg; + struct sxe2_rdma_qos_inject inject_qos; +#endif + atomic_t aeq_created; +}; + +struct sxe2_b300_test_debugfs { + bool mac_loopback_en; + u32 dest_ip_addr; + u32 src_ip_addr; +}; + +#define MAX_RESET_INFO_CNT 10 +#define MAX_VF_FUNC_CNT 32 +#define MAX_BDF_SIZE 16 +#define MAX_TIME_BUF_SIZE 40 + +enum sxe2_reset_type { + FUNC_REQUEST_RESET = 0, + FUNC_WARNING_RESET = 1, +}; + +struct sxe2_reset_debug_info { + s8 time[40]; + enum sxe2_reset_type reset_type; +}; + +struct sxe2_reset_debug_func_info { + u32 reset_cnt; + s8 bdf[16]; + u8 reset_info_idx; + struct sxe2_reset_debug_info reset_info[MAX_RESET_INFO_CNT]; + bool valid; +}; + +struct sxe2_reset_debug_pf_info { + struct sxe2_reset_debug_func_info pf_reset_info; + struct sxe2_reset_debug_func_info vf_info[32]; +}; + +struct sxe2_reset_debug { + struct mutex reset_debug_mutex; + struct sxe2_reset_debug_pf_info pf_info[8]; +}; + +enum sxe2_lag_type { + SXE2_LAG_NONE, + SXE2_LAG_ACTIVE_PASSIVE, + SXE2_LAG_ACTIVE_ACTIVE +}; + +struct sxe2_rdma_device { + struct ib_device ibdev; + struct sxe2_rdma_pci_f *rdma_func; + struct net_device *netdev; + struct notifier_block nb_netdevice_event; + struct notifier_block nb_net_event; + struct notifier_block nb_inet6addr_event; + struct notifier_block nb_inetaddr_event; + struct auxiliary_device *aux_dev; + struct sxe2_rdma_handler *hdl; + const struct sxe2_rdma_profile *profile; + struct workqueue_struct *cleanup_wq; + struct sxe2_rdma_ctx_vsi vsi; + u32 cm_core; + DECLARE_HASHTABLE(ah_hash_tbl, 8); + struct mutex ah_tbl_lock; + +#ifdef CONFIG_DEBUG_FS + u64 ah_reused; +#endif + u32 ah_list_cnt; + u32 ah_list_hwm; + u32 roce_cwnd; + u32 roce_ackcreds; + u32 vendor_id; + u32 vendor_part_id; + u32 rcv_wnd; + u16 mac_ip_table_idx; + u16 vsi_num; + u8 rcv_wscale; + u8 iw_status; + u8 roce_rtomin; + u8 rd_fence_rate; + bool override_rcv_wnd : 1; + bool override_cwnd : 1; + bool override_ackcreds : 1; + bool override_ooo : 1; + bool override_rd_fence_rate : 1; + bool override_rtomin : 1; +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + u64 up_up_map; + u8 cnp_up_override; + u8 iwarp_rtomin; + u32 ceq_intrl; + bool up_map_en : 1; + bool iwarp_dctcp_en : 1; + bool iwarp_timely_en : 1; + bool iwarp_bolt_en : 1; + bool iwarp_ecn_en : 1; + bool roce_ecn_en : 1; + bool roce_timely_en : 1; + bool roce_no_icrc_en : 1; + bool roce_dctcp_en : 1; + bool kernel_llwqe_mode : 1; + bool roce_enable_tph : 1; +#endif + bool roce_mode : 1; + bool roce_dcqcn_en : 1; + bool dcb_vlan_mode : 1; + bool iw_ooo : 1; + bool cache_line_64_en : 1; + enum init_completion_state init_state; + enum sxe2_lag_type lag_mode; + s8 bdf[16]; + wait_queue_head_t suspend_wq; + char ib_devname[IB_DEVICE_NAME_MAX]; + int numa_node; + bool rdma_dump_pcap; +#ifdef SXE2_CFG_DEBUG + struct sxe2_reset_debug_func_info *reset_func_info; +#endif + struct aux_ver_info fw_ver; +}; + +struct sxe2_rdma_kcontext { + struct ib_ucontext ibucontext; + struct sxe2_rdma_device *rdma_dev; +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + struct rdma_user_mmap_entry + *db_mmap_entry; +#else + struct sxe2_user_mmap_entry *db_mmap_entry; + DECLARE_HASHTABLE(mmap_hash_tbl, 6); + spinlock_t mmap_tbl_lock; +#endif + struct list_head cq_reg_mem_list; + spinlock_t cq_reg_mem_list_lock; + struct list_head qp_reg_mem_list; + spinlock_t qp_reg_mem_list_lock; + struct list_head uctx_list; + int abi_ver; + struct list_head vma_list; + struct mutex vma_list_mutex; +}; + +struct sxe2_rdma_up_info { + u8 map[8]; + u8 cnp_up_override; + u16 hmc_fcn_idx; + bool use_vlan : 1; + bool use_cnp_up_override : 1; +}; + +struct sxe2_user_mmap_entry { +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + struct rdma_user_mmap_entry rdma_entry; +#else + struct sxe2_rdma_kcontext *ucontext; + struct hlist_node hlist; + u64 pgoff_key; +#endif + + u8 mmap_flag; + u64 address; + u32 page_idx; +}; + +struct sxe2_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + }; + struct ib_umem *region; + int access; + u8 is_hwreg; + u8 acc_mode; + u8 alloced_pble; + u8 alloced_key; + bool is_mw; + u8 is_len64; + u16 rsv2; + u32 page_cnt; + u64 page_size; + u64 page_msk; + u32 npages; + u32 stag; + u64 len; + u64 pgaddrmem; + struct sxe2_pbl_pble_alloc_info pble_alloc; + struct sxe2_rdma_rsc_debug *dbg_node; +}; + +static inline struct sxe2_ah *ibah_to_vendor_ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct sxe2_ah, ibah); +} + +static inline struct sxe2_mr *ibmr_to_vendor_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct sxe2_mr, ibmr); +} + +static inline struct sxe2_mr *ibmw_to_vendor_mw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct sxe2_mr, ibmw); +} + +static inline struct sxe2_rdma_cq *ibcq_to_vendor_cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct sxe2_rdma_cq, ibcq); +} + +static inline struct sxe2_rdma_pci_f * +ctxdev_to_rf(struct sxe2_rdma_ctx_dev *dev) +{ + return container_of(dev, struct sxe2_rdma_pci_f, ctx_dev); +} + +static inline ulong log_page_size_2_bitmap(u32 log_pgsz_bits, u32 pgsz_shift) +{ + u32 largest_pg_shift = + min_t(ulong, (1ULL << log_pgsz_bits) - 1 + pgsz_shift, + BITS_PER_LONG - 1); + + pgsz_shift = max_t(u32, PAGE_SHIFT, pgsz_shift); + + return GENMASK(largest_pg_shift, pgsz_shift); +} + +#define sxe2_umem_find_best_pgsz(umem, pgsz_width, pgsz_shift, iova) \ + ib_umem_find_best_pgsz( \ + umem, log_page_size_2_bitmap(pgsz_width, pgsz_shift), iova) + +static inline struct sxe2_rdma_pd *to_kpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct sxe2_rdma_pd, ibpd); +} + +static inline struct sxe2_rdma_kcontext * +to_rdma_kcontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct sxe2_rdma_kcontext, ibucontext); +} + +static inline struct sxe2_rdma_device *to_dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct sxe2_rdma_device, ibdev); +} + +static inline struct sxe2_rdma_kcontext * +ibuctxto_kctx(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct sxe2_rdma_kcontext, ibucontext); +} + +static inline struct sxe2_rdma_device *to_rdmadev(struct sxe2_rdma_ctx_dev *dev) +{ + return (container_of(dev, struct sxe2_rdma_pci_f, ctx_dev))->rdma_dev; +} +#ifndef RDMA_MMAP_DB_NOT_SUPPORT +static inline struct sxe2_user_mmap_entry * +to_mmap(struct rdma_user_mmap_entry *rdma_entry) +{ + return container_of(rdma_entry, struct sxe2_user_mmap_entry, + rdma_entry); +} +#endif +static inline struct sxe2_rdma_pci_f *to_rdmafunc(struct sxe2_rdma_ctx_dev *dev) +{ + return container_of(dev, struct sxe2_rdma_pci_f, ctx_dev); +} + +int sxe2_kget_aligned_mem(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_dma_mem *memptr, u32 size, u32 mask); + +u8 sxe2_kget_encoded_wqe_size(u32 wqsize, enum sxe2_queue_type queue_type); + +static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val) +{ + wqe_words[byte_index >> 3] = cpu_to_le64(val); +} + +static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val) +{ + wqe_words[byte_index >> 2] = cpu_to_le32(val); +} + +static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val) +{ + *val = le64_to_cpu(wqe_words[byte_index >> 3]); +} + +static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val) +{ + *val = le32_to_cpu(wqe_words[byte_index >> 2]); +} + +static inline void sxe2_write64(void __iomem *dest, __le32 val[2]) +{ +#if BITS_PER_LONG == 64 + __raw_writeq(*(u64 *)val, dest); +#else + __raw_writel((__force u32)val[0], dest); + __raw_writel((__force u32)val[1], dest + 4); +#endif +} + +static inline void sxe2_memcpy_x64(void *dest, const void *src, size_t bytecnt) +{ + __le64 *dst_p = dest; + + const __le64 *src_p = src; + + do { + sxe2_write64(dst_p++, (__le32 *)src_p++); + bytecnt -= sizeof(*dst_p); + } while (bytecnt > 0); +} + +int sxe2_kalloc_rsrc(struct sxe2_rdma_pci_f *rf, unsigned long *rsrc_array, + u32 max_rsrc, u32 *req_rsrc_num, u32 *next); + +void sxe2_kfree_rsrc(struct sxe2_rdma_pci_f *rf, unsigned long *rsrc_array, + u32 rsrc_num); + +int sxe2_ucount_bitmap_zero_bits(unsigned long *bitmap, u32 max); + +u32 sxe2_round_up_pow_2(u32 value); + +void sxe2_copy_ip_ntohl(u32 *dst, __be32 *src); + +void sxe2_copy_ip_htonl(__be32 *dst, u32 *src); + +void sxe2_qp_add_ref(struct ib_qp *ibqp); +void sxe2_qp_rem_ref(struct ib_qp *ibqp); +static inline struct sxe2_rdma_qp *to_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct sxe2_rdma_qp, ibqp); +} +static inline struct sxe2_rdma_pd *to_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct sxe2_rdma_pd, ibpd); +} + +static inline struct sxe2_rdma_srq *to_srq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct sxe2_rdma_srq, ibsrq); +} + +void sxe2_clean_cqes(struct sxe2_rdma_qp *rdma_qp, struct sxe2_rdma_cq *rdma_cq, + int cq_type); + +void sxe2_flush_wqe_worker(struct work_struct *work); + +void sxe2_sched_qp_flush_work(struct sxe2_rdma_qp *rdma_qp); +#ifdef QUERY_PKEY_V1 +int sxe2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey); +#else +int sxe2_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); +#endif + +void sxe2_sched_qp_flush_work(struct sxe2_rdma_qp *qp); + +void sxe2_generate_flush_completions(struct sxe2_rdma_qp *rdma_qp); + +bool sxe2_get_hw_rsrc_clean_flag(struct sxe2_rdma_ctx_dev *dev); + +bool sxe2_drv_core_is_tph_enable(struct sxe2_rdma_device *rdma_dev, + bool is_user_enable, u32 *st_mode); + +bool check_bridge_tph_is_support(struct sxe2_rdma_device *rdma_dev); + +int pci_dev_set_tph_request_cap(struct sxe2_rdma_device *rdma_dev, bool enable); +int sxe2_rdma_adminq_send(struct aux_core_dev_info *cdev_info, + int opcode, + u8 *msg, u16 len, u8 *recv_msg, u16 recv_len); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bf670ecbde9505a71fbc0a02d0cbcfd4413c56ae --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_configfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include "sxe2_drv_main.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_configfs.h" + +#ifdef SXE2_SUPPORT_CONFIGFS + +#define SXE2_MIN_INT_RATE_LIMIT 3968 +#define SXE2_MAX_INT_RATE_LIMIT 250000 +#define SXE2_USECS_PER_SEC 1000000 +#define SXE2_USECS_PER_UNIT 4 +#define SXE2_MAX_SUPPORTED_INT_RATE_INTERVAL 0x3F + +#define SXE2_MAX_ITR 8190 +#define SXE2_MAX_RD_FENCE_RATE 255 + +enum sxe2_configfs_attr_type { + SXE2_ATTR_ROCE_TIMELY, + SXE2_ATTR_ROCE_DCQCN, +}; + +struct sxe2_vsi_grp { + struct config_group group; + struct sxe2_rdma_device *dev; +}; + +static struct config_group *sxe2rdma_group; + +void sxe2_rdma_set_irq_rate_limit(struct sxe2_rdma_ctx_dev *dev, u32 idx, + u32 interval) +{ + u32 value; + u32 rate_limit_reg; + u32 credit_max_value_reg; + + if (interval == 0) { + value = 0; + } else { + if (interval > SXE2_MAX_SUPPORTED_INT_RATE_INTERVAL) + interval = SXE2_MAX_SUPPORTED_INT_RATE_INTERVAL; + + rate_limit_reg = + FIELD_PREP(SXE2_PF_INT_RATE_CREDIT_INTERVAL, + interval); + credit_max_value_reg = + SXE2_PF_INT_RATE_CREDIT_MAX_VALUE; + + value = rate_limit_reg | credit_max_value_reg | + SXE2_PF_INT_RATE_INTRL_ENABLE; + } + SXE2_BAR_WRITE_32(value, dev->hw_regs[PF_INT_RATE] + idx); +} + +static struct sxe2_rdma_device *sxe2_find_device_by_name(const char *name) +{ + struct sxe2_rdma_handler *hdl; + struct sxe2_rdma_device *dev; + unsigned long flags; + + spin_lock_irqsave(&sxe2_handler_lock, flags); + list_for_each_entry(hdl, &sxe2_handlers, list) { + dev = hdl->dev; + if (!strcmp(name, dev->ibdev.name)) { + spin_unlock_irqrestore(&sxe2_handler_lock, flags); + return dev; + } + } + spin_unlock_irqrestore(&sxe2_handler_lock, flags); + + return NULL; +} + +static int sxe2_configfs_set_vsi_attr(struct config_item *item, const char *buf, + enum sxe2_configfs_attr_type attr_type) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + + struct sxe2_rdma_device *dev = grp->dev; + bool enable; + + int ret = 0; + + if (kstrtobool(buf, &enable)) { + ret = -EINVAL; + goto done; + } + + switch (attr_type) { + case SXE2_ATTR_ROCE_TIMELY: + dev->rdma_func->cc_params.timely_enable = enable; + break; + case SXE2_ATTR_ROCE_DCQCN: + dev->rdma_func->cc_params.dcqcn_enable = enable; + break; + + default: + ret = -EINVAL; + } + +done: + return ret; +} + +static ssize_t ceq_intrl_store(struct config_item *item, const char *buf, + size_t count) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *rdma_dev = grp->dev; + struct sxe2_rdma_msix_vector *msix_vec; + u32 intrl, interval = 0; + int i; + + if (kstrtou32(buf, 0, &intrl)) + return -EINVAL; + + if (intrl && intrl < SXE2_MIN_INT_RATE_LIMIT) + intrl = SXE2_MIN_INT_RATE_LIMIT; + if (intrl > SXE2_MAX_INT_RATE_LIMIT) + intrl = SXE2_MAX_INT_RATE_LIMIT; + + rdma_dev->ceq_intrl = intrl; + if (intrl) { + interval = (SXE2_USECS_PER_SEC / intrl) / + SXE2_USECS_PER_UNIT; + + DRV_RDMA_LOG_DEV_DEBUG( + "CEQ Interrupt rate Limit enabled with interval = %d\n", + interval); + } else { + DRV_RDMA_LOG_DEV_DEBUG("CEQ Interrupt rate Limit disabled\n"); + } + + if (rdma_dev->rdma_func->msix_shared) + msix_vec = &rdma_dev->rdma_func->sxe2_msixtbl[1]; + else + msix_vec = &rdma_dev->rdma_func->sxe2_msixtbl[2]; + + for (i = 1; i < rdma_dev->rdma_func->ceqs_count; i++, msix_vec++) + sxe2_rdma_set_irq_rate_limit(&rdma_dev->rdma_func->ctx_dev, + msix_vec->idx, interval); + return count; +} + +static ssize_t ceq_intrl_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->ceq_intrl); + + return ret; +} + +static ssize_t roce_rtomin_store(struct config_item *item, const char *buf, + size_t count) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + u8 rtomin; + + if (kstrtou8(buf, 0, &rtomin)) + return -EINVAL; + + if (rtomin > SXE2_MAX_ACK_TIMEOUT_VAL) + rtomin = SXE2_MAX_ACK_TIMEOUT_VAL; + + dev->roce_rtomin = rtomin; + dev->override_rtomin = true; + + return count; +} + +static ssize_t roce_rtomin_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->roce_rtomin); + + return ret; +} + +static ssize_t kernel_llwqe_mode_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->kernel_llwqe_mode); + + return ret; +} + +static ssize_t kernel_llwqe_mode_store(struct config_item *item, + const char *buf, size_t count) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + bool enable; + + if (kstrtobool(buf, &enable)) + return -EINVAL; + + dev->kernel_llwqe_mode = enable; + + return count; +} + +static ssize_t roce_timely_enable_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->rdma_func->cc_params.timely_enable); + + return ret; +} + +static ssize_t roce_timely_enable_store(struct config_item *item, + const char *buf, size_t count) +{ + int ret; + + ret = sxe2_configfs_set_vsi_attr(item, buf, SXE2_ATTR_ROCE_TIMELY); + if (ret) + return ret; + + return count; +} + +static ssize_t roce_dcqcn_enable_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->rdma_func->cc_params.dcqcn_enable); + + return ret; +} + +static ssize_t roce_dcqcn_enable_store(struct config_item *item, + const char *buf, size_t count) +{ + int ret; + + ret = sxe2_configfs_set_vsi_attr(item, buf, SXE2_ATTR_ROCE_DCQCN); + if (ret) + return ret; + + return count; +} + +static ssize_t roce_rd_fence_rate_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->rd_fence_rate); + + return ret; +} + +static ssize_t roce_rd_fence_rate_store(struct config_item *item, + const char *buf, size_t count) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + u32 rd_fence_rate; + + if (kstrtou32(buf, 0, &rd_fence_rate)) + return -EINVAL; + + if (rd_fence_rate > SXE2_MAX_RD_FENCE_RATE) + rd_fence_rate = SXE2_MAX_RD_FENCE_RATE; + + dev->rd_fence_rate = rd_fence_rate; + dev->override_rd_fence_rate = true; + + return count; +} + +static ssize_t roce_enable_tph_show(struct config_item *item, char *buf) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *dev = grp->dev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", dev->roce_enable_tph); + + return ret; +} + +static ssize_t roce_enable_tph_store(struct config_item *item, + const char *buf, size_t count) +{ + struct sxe2_vsi_grp *grp = + container_of(to_config_group(item), struct sxe2_vsi_grp, group); + struct sxe2_rdma_device *rdma_dev = grp->dev; + bool state; + + if (kstrtobool(buf, &state)) + return -EINVAL; + + if (check_bridge_tph_is_support(rdma_dev)) { + pci_dev_set_tph_request_cap(rdma_dev, state); + rdma_dev->roce_enable_tph = state; + } else { + DRV_RDMA_LOG_DEV_WARN("upstream rp not support tph comp.\n"); + } + + return count; +} + +CONFIGFS_ATTR(, kernel_llwqe_mode); +CONFIGFS_ATTR(, roce_timely_enable); +CONFIGFS_ATTR(, roce_dcqcn_enable); +CONFIGFS_ATTR(, ceq_intrl); +CONFIGFS_ATTR(, roce_rtomin); +CONFIGFS_ATTR(, roce_rd_fence_rate); +CONFIGFS_ATTR(, roce_enable_tph); + +static struct configfs_attribute *sxe2_roce_vsi_attrs_pf[] = { + &attr_kernel_llwqe_mode, + &attr_roce_timely_enable, + &attr_roce_dcqcn_enable, + &attr_ceq_intrl, + &attr_roce_rtomin, + &attr_roce_rd_fence_rate, + &attr_roce_enable_tph, + NULL, +}; + +static struct configfs_attribute *sxe2_roce_vsi_attrs_vf[] = { + &attr_kernel_llwqe_mode, &attr_roce_timely_enable, + &attr_roce_dcqcn_enable, &attr_roce_rtomin, + &attr_roce_rd_fence_rate, &attr_roce_enable_tph, NULL, +}; + +static void sxe2_release_vsi_grp(struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct sxe2_vsi_grp *vsi_grp = + container_of(group, struct sxe2_vsi_grp, group); + + kfree(vsi_grp); +} + +static struct configfs_item_operations sxe2_vsi_ops = { + .release = sxe2_release_vsi_grp +}; + +static struct config_item_type sxe2_roce_vsi_type_pf = { + .ct_attrs = sxe2_roce_vsi_attrs_pf, + .ct_item_ops = &sxe2_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type sxe2_roce_vsi_type_vf = { + .ct_attrs = sxe2_roce_vsi_attrs_vf, + .ct_item_ops = &sxe2_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *sxe2_vsi_make_group(struct config_group *group, + const char *name) +{ + struct sxe2_vsi_grp *vsi_grp; + struct sxe2_rdma_device *dev; + u8 hw_ver; + + dev = sxe2_find_device_by_name(name); + if (!dev) + return ERR_PTR(-ENODEV); + + hw_ver = dev->rdma_func->ctx_dev.hw_attrs.uk_attrs.hw_rev; + + vsi_grp = kzalloc(sizeof(*vsi_grp), GFP_KERNEL); + if (!vsi_grp) + return ERR_PTR(-ENOMEM); + + vsi_grp->dev = dev; + + config_group_init(&vsi_grp->group); + if (dev->rdma_func->ctx_dev.privileged) { + config_group_init_type_name(&vsi_grp->group, name, + &sxe2_roce_vsi_type_pf); + } else { + config_group_init_type_name(&vsi_grp->group, name, + &sxe2_roce_vsi_type_vf); + } + + return &vsi_grp->group; +} + +static struct configfs_group_operations sxe2_vsi_group_ops = { + .make_group = sxe2_vsi_make_group, +}; + +static struct config_item_type sxe2_subsys_type = { + .ct_group_ops = &sxe2_vsi_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem cfs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "sxe2rdma", + .ci_type = &sxe2_subsys_type, + }, + }, +}; + +int sxe2_configfs_init(void) +{ + int ret; + + config_group_init(&cfs_subsys.su_group); + mutex_init(&cfs_subsys.su_mutex); + ret = configfs_register_subsystem(&cfs_subsys); + if (ret) + goto end; + + sxe2rdma_group = &cfs_subsys.su_group; +end: + return ret; +} + +void sxe2_configfs_exit(void) +{ + configfs_unregister_subsystem(&cfs_subsys); + mutex_destroy(&cfs_subsys.su_mutex); + sxe2rdma_group = NULL; +} + +int sxe2_rdma_create_configfs_subdir(const char *name, struct sxe2_rdma_device *dev) +{ + int ret = 0; + struct sxe2_vsi_grp *vsi_grp; + + if (!name || !*name) { + DRV_RDMA_LOG_ERROR("Invalid directory name\n"); + ret = -EINVAL; + goto end; + } + + if (!sxe2rdma_group) { + DRV_RDMA_LOG_ERROR("Parent directory not initialized\n"); + ret = -ENODEV; + goto end; + } + + vsi_grp = kzalloc(sizeof(*vsi_grp), GFP_KERNEL); + if (!vsi_grp) { + ret = -ENOMEM; + goto end; + } + + vsi_grp->dev = dev; + config_group_init(&vsi_grp->group); + + if (dev->rdma_func->ctx_dev.privileged) { + config_group_init_type_name(&vsi_grp->group, name, + &sxe2_roce_vsi_type_pf); + } else { + config_group_init_type_name(&vsi_grp->group, name, + &sxe2_roce_vsi_type_vf); + } + + ret = configfs_register_group(sxe2rdma_group, &vsi_grp->group); + if (ret) { + DRV_RDMA_LOG_ERROR("Failed to create dir %s: %d\n", name, ret); + kfree(vsi_grp); + } +end: + return ret; +} + +void sxe2_rdma_remove_configfs_subdir(const char *name) +{ + struct config_group *parent_group = sxe2rdma_group; + struct sxe2_vsi_grp *vsi_grp = NULL; + struct config_item *item; + struct config_item *tmp; + + if (!parent_group || !name || !*name) { + DRV_RDMA_LOG_ERROR("Invalid parameters\n"); + return; + } + + mutex_lock(&cfs_subsys.su_mutex); + list_for_each_entry_safe(item, tmp, &parent_group->cg_children, ci_entry) { + if (!item) { + DRV_RDMA_LOG_WARN("NULL item encountered in list\n"); + continue; + } + + if (strcmp(config_item_name(item), name) == 0) { + vsi_grp = container_of(to_config_group(item), struct sxe2_vsi_grp, group); + break; + } + } + mutex_unlock(&cfs_subsys.su_mutex); + + if (vsi_grp) { + configfs_unregister_group(&vsi_grp->group); + kfree(vsi_grp); + + DRV_RDMA_LOG_INFO("Removed directory: %s\n", name); + } else { + DRV_RDMA_LOG_ERROR("Directory %s not found\n", name); + } + +} +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.h new file mode 100644 index 0000000000000000000000000000000000000000..40c949752fb50d0118405c3056b36215e99fba2b --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_configfs.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_configfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RDMA_CONFIGFS_H__ +#define __SXE2_DRV_RDMA_CONFIGFS_H__ +#ifdef SXE2_SUPPORT_CONFIGFS +int sxe2_rdma_create_configfs_subdir(const char *name, struct sxe2_rdma_device *dev); +void sxe2_rdma_remove_configfs_subdir(const char *name); +int sxe2_configfs_init(void); +void sxe2_configfs_exit(void); +void sxe2_rdma_set_irq_rate_limit(struct sxe2_rdma_ctx_dev *dev, u32 idx, + u32 interval); + +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..807a074f405dc2737fd758500c0679894bd322fe --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.c @@ -0,0 +1,927 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_cq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_eq_debugfs.h" +#include "sxe2_drv_mq_debugfs.h" +#include "sxe2_drv_rdma_inject_debugfs.h" + +#define SXE2_DEBUG_RSC_FILE_NAME_LEN (32) + +static struct dentry *sxe2_rdma_dbg_root; +static const char sxe2_rdma_driver_name[] = "sxe2rdma"; + +size_t dbg_vsnprintf(char *buf, size_t len, char *fmt, ...) +{ + size_t cnt = 0; + int ret; + va_list args; + + va_start(args, fmt); + ret = vsnprintf(buf + len, SXE2_DEBUG_DUMP_BUF_SIZE - len, fmt, args); + if (ret >= 0) { + cnt = (size_t)ret; + } else { + DRV_RDMA_LOG_ERROR("vsnprintf format err %d\n", ret); + ; + } + va_end(args); + + return cnt; +} + +int split_command(char *cmd, int *argc, char *argv[]) +{ + int ret = 0; + char *token = NULL; + + cmd[strlen(cmd) - 1] = '\0'; + token = strsep(&cmd, " "); + while (token != NULL) { + if (*argc >= DEBUG_ARGV_COUNT_MAX) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("too many arguments: '%s'\n", token); + goto end; + } + + argv[*argc] = token; + + token = strsep(&cmd, " "); + (*argc)++; + } + +end: + return ret; +} + +STATIC ssize_t drv_sceq_break_moderation_en_read(struct file *filp, + char __user *buf, size_t count, + loff_t *pos) +{ + u32 ret; + u8 sceq_break_moderation_en; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + sceq_break_moderation_en = + rdma_dev->rdma_func->scqe_break_moderation_en; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", + sceq_break_moderation_en); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_sceq_break_moderation_en_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 sceq_break_moderation_en; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &sceq_break_moderation_en); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->scqe_break_moderation_en = + (u8)sceq_break_moderation_en; + DRV_RDMA_LOG_DEV_WARN("set sceq_break_moderation_en to %d\n", + sceq_break_moderation_en); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations sceq_break_moderation_en_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_sceq_break_moderation_en_read, + .write = drv_sceq_break_moderation_en_write, +}; + +STATIC ssize_t drv_aeq_pble_en_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + u32 ret; + u8 aeq_pble_en; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + aeq_pble_en = rdma_dev->rdma_func->aeq_pble_en; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", aeq_pble_en); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_aeq_pble_en_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 aeq_pble_en; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &aeq_pble_en); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->aeq_pble_en = (u8)aeq_pble_en; + DRV_RDMA_LOG_DEV_WARN("set aeq_pble_en to %d\n", aeq_pble_en); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations aeq_pble_en_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_aeq_pble_en_read, + .write = drv_aeq_pble_en_write, +}; + +STATIC ssize_t drv_ceq_itr_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + u32 ret; + u8 ceq_itr; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + ceq_itr = (u8)rdma_dev->rdma_func->ctx_dev.ceq_itr; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", ceq_itr); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_ceq_itr_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 ceq_itr; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &ceq_itr); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->ctx_dev.ceq_itr = ceq_itr; + DRV_RDMA_LOG_DEV_WARN("set ceq_itr to %d\n", ceq_itr); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations ceq_itr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_ceq_itr_read, + .write = drv_ceq_itr_write, +}; + +STATIC ssize_t drv_ack_mode_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + u32 ret; + u8 ack_mode; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + ack_mode = rdma_dev->rdma_func->ack_mode; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", ack_mode); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_ack_mode_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 ack_mode; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &ack_mode); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->ack_mode = ack_mode; + DRV_RDMA_LOG_DEV_WARN("set ack_mode to %d\n", ack_mode); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations ack_mode_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_ack_mode_read, + .write = drv_ack_mode_write, +}; + +STATIC ssize_t drv_log_ack_req_freq_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + u32 ret; + u8 log_ack_req_freq; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + log_ack_req_freq = rdma_dev->rdma_func->log_ack_req_freq; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", log_ack_req_freq); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_log_ack_req_freq_write(struct file *filp, + const char __user *buf, size_t count, + loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 log_ack_req_freq; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &log_ack_req_freq); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->log_ack_req_freq = (u8)log_ack_req_freq; + DRV_RDMA_LOG_DEV_WARN("set log_ack_req_freq to %d\n", log_ack_req_freq); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations log_ack_req_freq_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_log_ack_req_freq_read, + .write = drv_log_ack_req_freq_write, +}; + +STATIC ssize_t drv_UDPriv_CQEnable_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + u32 ret; + u8 UDPriv_CQEnable; + char out_buf[8]; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + UDPriv_CQEnable = rdma_dev->rdma_func->UDPriv_CQEnable; + ret = (u32)snprintf(out_buf, sizeof(out_buf), "%d\n", UDPriv_CQEnable); + return simple_read_from_buffer(buf, count, pos, out_buf, ret); +} + +STATIC ssize_t drv_UDPriv_CQEnable_write(struct file *filp, + const char __user *buf, size_t count, + loff_t *pos) +{ + ssize_t ret; + char in_buf[8] = { 0 }; + u32 UDPriv_CQEnable; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = sscanf(in_buf, "%u", &UDPriv_CQEnable); + if (ret != 1) { + ret = -EINVAL; + goto out; + } + + rdma_dev->rdma_func->UDPriv_CQEnable = UDPriv_CQEnable; + DRV_RDMA_LOG_DEV_WARN("set UDPriv_CQEnable to %d\n", UDPriv_CQEnable); + + *pos = (loff_t)count; + ret = (ssize_t)count; +out: + return ret; +} + +static const struct file_operations UDPriv_CQEnable_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_UDPriv_CQEnable_read, + .write = drv_UDPriv_CQEnable_write, +}; + +STATIC ssize_t drv_rdma_dbg_read(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + u64 field = 0; + char *out_buf = NULL; + struct sxe2_rdma_rsc_debug *dbg_node; + struct sxe2_rdma_device *rdma_dev; + + if (*pos != 0) { + ret = 0; + goto end; + } + + dbg_node = filp->private_data; + rdma_dev = dbg_node->dev; + + if (dbg_node->type >= SXE2_DBG_RSC_MAX) { + ret = 0; + DRV_RDMA_LOG_DEV_ERR("read invalid type %d\n", dbg_node->type); + goto end; + } + + out_buf = kzalloc(SXE2_DEBUG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!out_buf) { + DRV_RDMA_LOG_DEV_ERR("debugfs memory alloc failed.\n"); + ret = -ENOMEM; + goto end; + } + + if (dbg_node->func_tab.read_func) { + field = dbg_node->func_tab.read_func(dbg_node->dev, + dbg_node->object, + dbg_node->type, out_buf); + if (field == 0 || field >= SXE2_DEBUG_DUMP_BUF_SIZE) { + DRV_RDMA_LOG_DEV_ERR( + "debugfs read failed. field %llu\n", field); + ret = -EFAULT; + kfree(out_buf); + out_buf = NULL; + goto end; + } + } + + ret = simple_read_from_buffer(buf, count, pos, out_buf, field); + kfree(out_buf); + out_buf = NULL; + +end: + return ret; +} + +STATIC ssize_t drv_rdma_dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char log_buf[64] = { 0 }; + struct sxe2_rdma_rsc_debug *dbg_node; + struct sxe2_rdma_device *rdma_dev; + + (void)filp; + + dbg_node = filp->private_data; + rdma_dev = dbg_node->dev; + + if (count >= sizeof(log_buf)) { + ret = -ENOSPC; + goto end; + } + + if (copy_from_user(log_buf, buf, count)) { + ret = -EFAULT; + goto end; + } + + if (dbg_node->func_tab.write_func) { + ret = dbg_node->func_tab.write_func(dbg_node->dev, + dbg_node->object, + dbg_node->type, log_buf); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("debugfs write failed.\n"); + ret = -EFAULT; + goto end; + } + } + + *pos = (loff_t)count; + ret = (ssize_t)count; + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_dbg_read, + .write = drv_rdma_dbg_write, +}; + +struct sxe2_rdma_rsc_debug * +drv_rdma_add_res_tree(struct sxe2_rdma_device *rdma_dev, + enum drv_rdma_dbg_rsc_type type, struct dentry *root, + sxe2_drv_rdma_debugfs_read rfunc, + sxe2_drv_rdma_debugfs_write wfunc, int rsn, void *data) +{ + char resn[SXE2_DEBUG_RSC_FILE_NAME_LEN]; + struct sxe2_rdma_rsc_debug *dbg_node = NULL; + + if (type >= SXE2_DBG_RSC_MAX) { + DRV_RDMA_LOG_DEV_ERR("cmr debugfs reg invalid type %d\n", type); + goto end; + } + + dbg_node = kzalloc(sizeof(*dbg_node), GFP_KERNEL); + if (!dbg_node) { + DRV_RDMA_LOG_DEV_ERR("dbg node buf alloc error sz %lu\n", + sizeof(*dbg_node)); + goto end; + } + + dbg_node->dev = rdma_dev; + dbg_node->object = data; + dbg_node->type = type; + dbg_node->func_tab.read_func = rfunc; + dbg_node->func_tab.write_func = wfunc; + + if (type == SXE2_DBG_RSC_AEQ) { + dbg_node->root = root; + goto create_file; + } else { + sprintf(resn, "0x%x", rsn); + + dbg_node->root = debugfs_create_dir(resn, root); + if (!dbg_node->root) { + DRV_RDMA_LOG_DEV_ERR("debugfs create %s dir failed.\n", + resn); + kfree(dbg_node); + dbg_node = NULL; + goto end; + } + } + +create_file: + debugfs_create_file("context", SXE2_DEBUG_FILE_READ_WRITE, + dbg_node->root, dbg_node, &sxe2_rdma_fops); + +end: + return dbg_node; +} + +void drv_rdma_rm_res_tree(struct sxe2_rdma_rsc_debug *dbg) +{ + debugfs_remove_recursive(dbg->root); + dbg->root = NULL; + kfree(dbg); + dbg = NULL; +} + +struct dentry *sxe2_rdma_debugfs_get_dev_root(struct sxe2_rdma_device *rdma_dev) +{ + return rdma_dev->hdl->sxe2_rdma_dbg_dentry; +} + +#ifdef SXE2_CFG_DEBUG +void sxe2_debug_file_creat(struct sxe2_rdma_device *rdma_dev) +{ + debugfs_create_file("ack_mode", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->qp_debugfs, rdma_dev, + &ack_mode_fops); + + debugfs_create_file("log_ack_req_freq", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->qp_debugfs, rdma_dev, + &log_ack_req_freq_fops); + + debugfs_create_file("UDPriv_CQEnable", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->qp_debugfs, rdma_dev, + &UDPriv_CQEnable_fops); + + debugfs_create_file("scqe_break_moderation_en", + SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->cq_debugfs, rdma_dev, + &sceq_break_moderation_en_fops); + + debugfs_create_file("aeq_pble_en", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->aeq_debugfs, rdma_dev, + &aeq_pble_en_fops); + + debugfs_create_file("ceq_itr", SXE2_DEBUG_FILE_READ_WRITE, + rdma_dev->hdl->eq_debugfs, rdma_dev, &ceq_itr_fops); +} +#endif + +int sxe2_rdma_dbg_pf_init(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + const char *name = pci_name(hdl->dev->rdma_func->pcidev); + struct dentry *pfile __always_unused; + + DRV_RDMA_LOG_DEV_DEBUG("debugfs pf init start.\n"); + + spin_lock_init(&hdl->uctx_list_lock); + INIT_LIST_HEAD(&hdl->ucontext_list); + + hdl->sxe2_rdma_dbg_dentry = + debugfs_create_dir(name, sxe2_rdma_dbg_root); + if (!hdl->sxe2_rdma_dbg_dentry) { + DRV_RDMA_LOG_DEV_ERR("debugfs create %s dir failed.\n", name); + ret = -ENOMEM; + goto end; + } + + hdl->cq_debugfs = debugfs_create_dir("CQs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->cq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create CQ dir failed.\n"); + ret = -ENOMEM; + goto remove_root; + } + + hdl->qp_debugfs = debugfs_create_dir("QPs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->qp_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create QP dir failed.\n"); + ret = -ENOMEM; + goto remove_cq_debug; + } + + hdl->eq_debugfs = debugfs_create_dir("EQs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->eq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create EQ dir failed.\n"); + ret = -ENOMEM; + goto remove_qp_debug; + } + + hdl->ceq_debugfs = debugfs_create_dir("ceqs", hdl->eq_debugfs); + if (!hdl->ceq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create ceq dir failed.\n"); + ret = -ENOMEM; + goto remove_eq_debug; + } + + hdl->aeq_debugfs = debugfs_create_dir("aeq", hdl->eq_debugfs); + if (!hdl->aeq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create aeq dir failed.\n"); + ret = -ENOMEM; + goto remove_ceq_debug; + } + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + ret = sxe2_rdma_aeq_codes_inject_add(rdma_dev); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("aeq codes inject add failed.\n"); + goto remove_aeq_debug; + } + sxe2_drv_inject_create_debugfs_files(rdma_dev); +#endif + + hdl->mq_debugfs = debugfs_create_dir("MQ", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->mq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create MQ dir failed.\n"); + ret = -ENOMEM; +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + goto remove_aeq_codes_inject_debug; +#else + goto remove_aeq_debug; +#endif + } + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + sxe2_kadd_mq_nop_debugfs_files(rdma_dev); + + ret = sxe2_kadd_mq_err_debugfs(rdma_dev); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("debugfs create MQ Err dirs failed.\n"); + ret = -ENOMEM; + goto remove_mq_debug; + } +#endif + +#ifdef SXE2_CFG_DEBUG + hdl->db_debugfs = debugfs_create_dir("DB", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->db_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create DB dir failed.\n"); + ret = -ENOMEM; +#ifdef SXE2_SUPPORT_INJECT + goto remove_mq_err_debug; +#else + goto remove_mq_debug; +#endif + } +#endif + + hdl->mr_debugfs = debugfs_create_dir("MRs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->mr_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create mr dir failed.\n"); + ret = -ENOMEM; + goto remove_db_debug; + } + + hdl->rcms_debugfs = + debugfs_create_dir("RCMS", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->rcms_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create RCMS dir failed.\n"); + ret = -ENOMEM; + goto remove_mr_debug; + } +#ifdef SXE2_CFG_DEBUG + hdl->ah_debugfs = debugfs_create_dir("AHs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->ah_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create ah dir failed.\n"); + ret = -ENOMEM; + goto remove_rcms_debug; + } +#endif + hdl->srq_debugfs = + debugfs_create_dir("SRQs", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->srq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create srq dir failed.\n"); + ret = -ENOMEM; +#ifdef SXE2_CFG_DEBUG + goto remove_ah_debug; +#else + goto remove_rcms_debug; +#endif + } + +#ifdef SXE2_CFG_DEBUG + sxe2_debug_file_creat(rdma_dev); +#endif + + hdl->qos_debugfs = debugfs_create_dir("QOS", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->qos_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create QOS dir failed.\n"); + ret = -ENOMEM; + goto remove_srq_debug; + } + +#ifdef SXE2_CFG_DEBUG + hdl->stats_debugfs = + debugfs_create_dir("STATS", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->stats_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create STATS dir failed.\n"); + ret = -ENOMEM; + goto remove_qos_debug; + } +#endif + + hdl->cc_debugfs = debugfs_create_dir("CC", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->cc_debugfs) { + DRV_RDMA_LOG_ERROR_BDF("debugfs create CC dir failed.\n"); + ret = -ENOMEM; + goto remove_stats_debug; + } + + hdl->common_debugfs = + debugfs_create_dir("COMMON", hdl->sxe2_rdma_dbg_dentry); + if (!hdl->common_debugfs) { + DRV_RDMA_LOG_DEV_ERR("debugfs create COMMON dir failed.\n"); + ret = -ENOMEM; + goto remove_cc_debug; + } + + DRV_RDMA_LOG_DEV_DEBUG("debugfs pf init end.\n"); + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + if (rdma_dev->rdma_func->inject_sleep_time) + msleep(rdma_dev->rdma_func->inject_sleep_time); +#endif + + goto end; + +remove_cc_debug: + debugfs_remove_recursive(hdl->cc_debugfs); + hdl->cc_debugfs = NULL; + +remove_stats_debug: +#ifdef SXE2_CFG_DEBUG + debugfs_remove_recursive(hdl->stats_debugfs); + hdl->stats_debugfs = NULL; +#endif + +#ifdef SXE2_CFG_DEBUG +remove_qos_debug: + debugfs_remove_recursive(hdl->qos_debugfs); + hdl->qos_debugfs = NULL; +#endif + +remove_srq_debug: + debugfs_remove_recursive(hdl->srq_debugfs); + hdl->srq_debugfs = NULL; + +#ifdef SXE2_CFG_DEBUG +remove_ah_debug: + debugfs_remove_recursive(hdl->ah_debugfs); + hdl->ah_debugfs = NULL; +#endif + +remove_rcms_debug: + debugfs_remove_recursive(hdl->rcms_debugfs); + hdl->rcms_debugfs = NULL; + +remove_mr_debug: + debugfs_remove_recursive(hdl->mr_debugfs); + hdl->mr_debugfs = NULL; + +remove_db_debug: +#ifdef SXE2_CFG_DEBUG + debugfs_remove_recursive(hdl->db_debugfs); + hdl->db_debugfs = NULL; +#endif + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +remove_mq_err_debug: + sxe2_kremove_mq_err_debugfs(rdma_dev); +#endif + +#ifdef SXE2_CFG_DEBUG +remove_mq_debug: + debugfs_remove_recursive(hdl->mq_debugfs); + hdl->mq_debugfs = NULL; +#endif + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +remove_aeq_codes_inject_debug: + drv_rdma_aeq_codes_inject_del(rdma_dev); +#endif + +remove_aeq_debug: + debugfs_remove_recursive(hdl->aeq_debugfs); + hdl->aeq_debugfs = NULL; + +remove_ceq_debug: + debugfs_remove_recursive(hdl->ceq_debugfs); + hdl->ceq_debugfs = NULL; + +remove_eq_debug: + debugfs_remove_recursive(hdl->eq_debugfs); + hdl->eq_debugfs = NULL; + +remove_qp_debug: + debugfs_remove_recursive(hdl->qp_debugfs); + hdl->qp_debugfs = NULL; + +remove_cq_debug: + debugfs_remove_recursive(hdl->cq_debugfs); + hdl->cq_debugfs = NULL; + +remove_root: + debugfs_remove_recursive(hdl->sxe2_rdma_dbg_dentry); + hdl->sxe2_rdma_dbg_dentry = NULL; + +end: + return ret; +} + +void sxe2_rdma_dgb_pf_exit(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + + DRV_RDMA_LOG_DEV_DEBUG("debugfs remove entries start.\n"); + + debugfs_remove_recursive(hdl->cq_debugfs); + hdl->cq_debugfs = NULL; + + debugfs_remove_recursive(hdl->qp_debugfs); + hdl->qp_debugfs = NULL; + + debugfs_remove_recursive(hdl->ceq_debugfs); + hdl->ceq_debugfs = NULL; + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + drv_rdma_aeq_codes_inject_del(rdma_dev); +#endif + + debugfs_remove_recursive(hdl->aeq_debugfs); + hdl->aeq_debugfs = NULL; + + debugfs_remove_recursive(hdl->eq_debugfs); + hdl->eq_debugfs = NULL; + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + sxe2_kremove_mq_err_debugfs(rdma_dev); +#endif + + debugfs_remove_recursive(hdl->mq_debugfs); + hdl->mq_debugfs = NULL; + + debugfs_remove_recursive(hdl->rcms_debugfs); + hdl->rcms_debugfs = NULL; + +#ifdef SXE2_CFG_DEBUG + debugfs_remove_recursive(hdl->db_debugfs); + hdl->db_debugfs = NULL; +#endif + + debugfs_remove_recursive(hdl->srq_debugfs); + hdl->srq_debugfs = NULL; + + debugfs_remove_recursive(hdl->mr_debugfs); + hdl->mr_debugfs = NULL; +#ifdef SXE2_CFG_DEBUG + debugfs_remove_recursive(hdl->ah_debugfs); + hdl->ah_debugfs = NULL; +#endif + debugfs_remove_recursive(hdl->qos_debugfs); + hdl->qos_debugfs = NULL; + +#ifdef SXE2_CFG_DEBUG + debugfs_remove_recursive(hdl->stats_debugfs); + hdl->stats_debugfs = NULL; +#endif + debugfs_remove_recursive(hdl->cc_debugfs); + hdl->cc_debugfs = NULL; + + debugfs_remove_recursive(hdl->common_debugfs); + hdl->common_debugfs = NULL; +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + sxe2_drv_inject_clean_debug_files(rdma_dev); +#endif + + debugfs_remove_recursive(hdl->sxe2_rdma_dbg_dentry); + hdl->sxe2_rdma_dbg_dentry = NULL; +} + +int sxe2_rdma_dbg_init(void) +{ + sxe2_rdma_dbg_root = debugfs_create_dir(sxe2_rdma_driver_name, NULL); + if (!sxe2_rdma_dbg_root) { + DRV_RDMA_LOG_ERROR("debugfs create sxe2 dir failed.\n"); + return -ENOMEM; + } + return 0; +} + +void sxe2_rdma_dbg_exit(void) +{ + debugfs_remove_recursive(sxe2_rdma_dbg_root); +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..75b068d93210f674181d1a419544c6037934e450 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_debugfs.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_RDMA_DEBUGFS_H__ +#define __SXE2_DRV_RDMA_DEBUGFS_H__ + +#include "sxe2_drv_rdma_common.h" + +#define SXE2_DEBUG_DUMP_BUF_SIZE (8192) +#define SXE2_DEBUG_FILE_ONLY_WRITE (0200) +#define SXE2_DEBUG_FILE_ONLY_READ (0400) +#define SXE2_DEBUG_FILE_READ_WRITE (0600) + +#define DEBUG_ARGV_COUNT_MAX (32) +#define DEBUG_PARA_CONT1 (1) +#define DEBUG_PARA_CONT2 (2) +#define DEBUG_PARA_CONT3 (3) +#define DEBUG_PARA_CONT4 (4) + +size_t dbg_vsnprintf(char *buf, size_t len, char *fmt, ...); +int split_command(char *cmd, int *argc, char *argv[]); + +struct sxe2_rdma_rsc_debug * +drv_rdma_add_res_tree(struct sxe2_rdma_device *dev, + enum drv_rdma_dbg_rsc_type type, struct dentry *root, + sxe2_drv_rdma_debugfs_read rfunc, + sxe2_drv_rdma_debugfs_write wfunc, int rsn, void *data); +void drv_rdma_rm_res_tree(struct sxe2_rdma_rsc_debug *dbg); + +struct dentry * +sxe2_rdma_debugfs_get_dev_root(struct sxe2_rdma_device *rdma_dev); + +int sxe2_rdma_dbg_pf_init(struct sxe2_rdma_device *rdma_dev); +void sxe2_rdma_dgb_pf_exit(struct sxe2_rdma_device *rdma_dev); + +int sxe2_rdma_dbg_init(void); +void sxe2_rdma_dbg_exit(void); + +int drv_rdma_debug_qp_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp); +void drv_rdma_debug_qp_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_qp *qp); +void sxe2_debug_file_creat(struct sxe2_rdma_device *rdma_dev); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.c new file mode 100644 index 0000000000000000000000000000000000000000..8eaccfdcd749cc06e0f694c0032e4bad10a498e9 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_device_port.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_device_port.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_db.h" +#include +#include +#include +#include +#include + +#ifdef NEED_KALLOC_UCONTEXT_V1 +struct ib_ucontext *sxe2_rdma_kalloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_alloc_ucontext_req req = {}; + struct sxe2_alloc_ucontext_resp uresp = {}; + struct sxe2_rdma_kcontext *ucontext = NULL; + struct sxe2_common_attrs *uk_attrs = + &rdma_dev->rdma_func->ctx_dev.hw_attrs.uk_attrs; + struct sxe2_db_ucontext *db_ucontext_entry = NULL; + + DRV_RDMA_LOG_DEV_DEBUG( + "device:kalloc uctx start inlen=%zu outlen=%zu\n", udata->inlen, + udata->outlen); + DRV_RDMA_LOG_DEV_DEBUG("device:inlen min=%zu outlen min=%zu\n", + SXE2_ALLOC_UCTX_MIN_REQ_LEN, + SXE2_ALLOC_UCTX_MIN_RESP_LEN); + if (udata->inlen < SXE2_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < SXE2_ALLOC_UCTX_MIN_RESP_LEN) { + DRV_RDMA_LOG_DEV_ERR( + "device:inlen or out len size err inlen=%zu outlen=%zu\n", + udata->inlen, udata->outlen); + ret = -EINVAL; + goto end; + } + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EINVAL; + goto end; + } + if (req.userspace_ver < SXE2_MIN_USER_SPACE_VER || + req.userspace_ver > SXE2_MAX_USER_SPACE_VER) { + DRV_RDMA_LOG_DEV_ERR("device:user space ver %u err\n", + req.userspace_ver); + ret = -EINVAL; + goto end; + } + + ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); + if (!ucontext) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("ucontext kzalloc failed\n"); + goto end; + } + + INIT_LIST_HEAD(&ucontext->vma_list); + mutex_init(&ucontext->vma_list_mutex); + + ucontext->rdma_dev = rdma_dev; + ucontext->ibucontext.device = ibdev; + ucontext->abi_ver = req.userspace_ver; + DRV_RDMA_LOG_DEV_DEBUG("device:rdma dev=%p userspace ver=%u\n", + ucontext->rdma_dev, ucontext->abi_ver); + if (udata->outlen == SXE2_ALLOC_UCTX_MIN_RESP_LEN) { + uresp.max_qps = rdma_dev->rdma_func->max_qp; + uresp.max_pds = + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_hw_pds; + uresp.wq_size = + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_qp_wr * 2; + uresp.kernel_ver = req.userspace_ver; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EFAULT; + goto end; + } + } else { + uresp.kernel_ver = SXE2_RDMA_ABI_VER; + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.hw_rev = uk_attrs->hw_rev; + uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; + uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; + uresp.max_hw_srq_wr = uk_attrs->max_hw_srq_wr; + uresp.comp_mask |= SXE2_ALLOC_UCTX_MIN_HW_WQ_SIZE; + DRV_RDMA_LOG_DEV_DEBUG( + "device:max hw wq frages=%u min hw wq size=%u\n", + uresp.max_hw_wq_frags, uresp.min_hw_wq_size); + strscpy(uresp.bdf, rdma_dev->bdf, sizeof(uresp.bdf) - 1); + uresp.max_db = (__u32)sxe2_ucount_bitmap_zero_bits( + rdma_func->allocated_dbs, rdma_func->max_dbs); + uresp.is_pf = (__u8)rdma_dev->rdma_func->ftype ? + SXE2_UCTX_IS_VF : + SXE2_UCTX_IS_PF; + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->db_mmap_entry = rdma_user_mmap_entry_add_hash( + ucontext, SXE2_DRV_DB_MMAP_TYPE_NC, &uresp.db_mmap_key); + + if (!ucontext->db_mmap_entry) { + DRV_RDMA_LOG_DEV_ERR( + "device:db insert user mmap entry err\n"); + ret = -ENOMEM; + goto end; + } + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EFAULT; + goto free_mmap_entry; + } + } + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + db_ucontext_entry = kzalloc(sizeof(*db_ucontext_entry), GFP_KERNEL); + if (!db_ucontext_entry) { + DRV_RDMA_LOG_DEV_ERR( + "device:db alloc db ucontext entry failed.\n"); + ret = -ENOMEM; + goto free_mmap_entry; + } + INIT_LIST_HEAD(&db_ucontext_entry->db_pageid_list); + INIT_LIST_HEAD(&db_ucontext_entry->entry_list); + db_ucontext_entry->ibucontext = &ucontext->ibucontext; + mutex_lock(&rdma_func->db_mmap_entry_head.lock); + list_add_tail(&db_ucontext_entry->list, &rdma_func->db_mmap_entry_head.list); + mutex_unlock(&rdma_func->db_mmap_entry_head.lock); + goto end; +free_mmap_entry: + rdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); +end: + return ret ? ERR_PTR(ret) : &ucontext->ibucontext; +} +#else +int sxe2_rdma_kalloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) +{ + int ret = SXE2_OK; + struct ib_device *ibdev = uctx->device; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_alloc_ucontext_req req = {}; + struct sxe2_alloc_ucontext_resp uresp = {}; + struct sxe2_rdma_kcontext *ucontext = ibuctxto_kctx(uctx); + struct sxe2_common_attrs *uk_attrs = + &rdma_dev->rdma_func->ctx_dev.hw_attrs.uk_attrs; + struct sxe2_db_ucontext *db_ucontext_entry = NULL; + + DRV_RDMA_LOG_DEV_DEBUG( + "device:kalloc uctx start inlen=%zu outlen=%zu\n", udata->inlen, + udata->outlen); + DRV_RDMA_LOG_DEV_DEBUG("device:inlen min=%zu outlen min=%zu\n", + SXE2_ALLOC_UCTX_MIN_REQ_LEN, + SXE2_ALLOC_UCTX_MIN_RESP_LEN); + if (udata->inlen < SXE2_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < SXE2_ALLOC_UCTX_MIN_RESP_LEN) { + DRV_RDMA_LOG_DEV_ERR( + "device:inlen or out len size err inlen=%zu outlen=%zu\n", + udata->inlen, udata->outlen); + ret = -EINVAL; + goto end; + } + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EINVAL; + goto end; + } + if (req.userspace_ver < SXE2_MIN_USER_SPACE_VER || + req.userspace_ver > SXE2_MAX_USER_SPACE_VER) { + DRV_RDMA_LOG_DEV_ERR("device:user space ver %u err\n", + req.userspace_ver); + ret = -EINVAL; + goto end; + } + ucontext->rdma_dev = rdma_dev; + ucontext->abi_ver = req.userspace_ver; + DRV_RDMA_LOG_DEV_DEBUG("device:rdma dev=%p userspace ver=%u\n", + ucontext->rdma_dev, ucontext->abi_ver); + if (udata->outlen == SXE2_ALLOC_UCTX_MIN_RESP_LEN) { + uresp.max_qps = rdma_dev->rdma_func->max_qp; + uresp.max_pds = + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_hw_pds; + uresp.wq_size = + rdma_dev->rdma_func->ctx_dev.hw_attrs.max_qp_wr * 2; + uresp.kernel_ver = req.userspace_ver; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EFAULT; + goto end; + } + } else { + uresp.kernel_ver = SXE2_RDMA_ABI_VER; + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.hw_rev = uk_attrs->hw_rev; + uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; + uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; + uresp.max_hw_srq_wr = uk_attrs->max_hw_srq_wr; + uresp.comp_mask |= SXE2_ALLOC_UCTX_MIN_HW_WQ_SIZE; + DRV_RDMA_LOG_DEV_DEBUG( + "device:max hw wq frages=%u min hw wq size=%u\n", + uresp.max_hw_wq_frags, uresp.min_hw_wq_size); + strscpy(uresp.bdf, rdma_dev->bdf, sizeof(uresp.bdf) - 1); + uresp.max_db = (__u32)sxe2_ucount_bitmap_zero_bits( + rdma_func->allocated_dbs, rdma_func->max_dbs); + uresp.is_pf = (__u8)rdma_dev->rdma_func->ftype ? + SXE2_UCTX_IS_VF : + SXE2_UCTX_IS_PF; +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + ucontext->db_mmap_entry = sxe2_kinsert_user_mmap_entry( + ucontext, SXE2_DRV_DB_MMAP_TYPE_NC, &uresp.db_mmap_key); +#else + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->db_mmap_entry = rdma_user_mmap_entry_add_hash( + ucontext, SXE2_DRV_DB_MMAP_TYPE_NC, &uresp.db_mmap_key); +#endif + + if (!ucontext->db_mmap_entry) { + DRV_RDMA_LOG_DEV_ERR( + "device:db insert user mmap entry err\n"); + ret = -ENOMEM; + goto end; + } + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + DRV_RDMA_LOG_DEV_ERR("device:copy from udata err\n"); + ret = -EFAULT; + goto free_mmap_entry; + } + } + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + db_ucontext_entry = kzalloc(sizeof(*db_ucontext_entry), GFP_KERNEL); + if (!db_ucontext_entry) { + DRV_RDMA_LOG_DEV_ERR( + "device:db alloc db ucontext entry failed.\n"); + ret = -ENOMEM; + goto free_mmap_entry; + } + INIT_LIST_HEAD(&db_ucontext_entry->db_pageid_list); + INIT_LIST_HEAD(&db_ucontext_entry->entry_list); + db_ucontext_entry->ibucontext = uctx; + mutex_lock(&rdma_func->db_mmap_entry_head.lock); + list_add_tail(&db_ucontext_entry->list, &rdma_func->db_mmap_entry_head.list); + mutex_unlock(&rdma_func->db_mmap_entry_head.lock); + goto end; + +free_mmap_entry: +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); +#else + rdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); +#endif +end: + return ret; +} +#endif + +#ifdef DEVICE_OPS_V1 +int sxe2_rdma_kdealloc_ucontext(struct ib_ucontext *uctx) +#else +void sxe2_rdma_kdealloc_ucontext(struct ib_ucontext *uctx) +#endif +{ + struct sxe2_rdma_kcontext *ucontext = ibuctxto_kctx(uctx); + struct ib_device *ibdev = uctx->device; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_db_ucontext *db_ucontext_entry; + struct sxe2_db_ucontext *uconetxt_next; + struct sxe2_db_mmap_entry *db_mmap_entry; + struct sxe2_db_mmap_entry *entry_next; + struct sxe2_db_page_idx *db_page_idx_entry; + struct sxe2_db_page_idx *idx_entry_next; + + DRV_RDMA_LOG_DEV_DEBUG("device:dealloc uncontext start\n"); + mutex_lock(&rdma_func->db_mmap_entry_head.lock); + + list_for_each_entry_safe(db_ucontext_entry, + uconetxt_next, &rdma_func->db_mmap_entry_head.list, list) { + if (db_ucontext_entry->ibucontext == uctx) { + list_for_each_entry_safe(db_page_idx_entry, idx_entry_next, + &db_ucontext_entry->db_pageid_list, list) { + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_dbs, + db_page_idx_entry->db_page_idx); + list_del(&db_page_idx_entry->list); + kfree(db_page_idx_entry); + db_page_idx_entry = NULL; + } + list_for_each_entry_safe(db_mmap_entry, + entry_next, &db_ucontext_entry->entry_list, list) { +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + rdma_user_mmap_entry_remove(db_mmap_entry->mmap_entry); +#else + rdma_user_mmap_entry_del_hash(db_mmap_entry->mmap_entry); +#endif + list_del(&db_mmap_entry->list); + kfree(db_mmap_entry); + db_mmap_entry = NULL; + } + list_del(&db_ucontext_entry->list); + kfree(db_ucontext_entry); + db_ucontext_entry = NULL; + } + } + mutex_unlock(&rdma_func->db_mmap_entry_head.lock); +#ifndef RDMA_MMAP_DB_NOT_SUPPORT + rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); +#else + rdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); +#endif + DRV_RDMA_LOG_DEV_DEBUG("device:dealloc uncontext finish\n"); +#ifdef DEVICE_OPS_V1 + return 0; +#endif +} + +int sxe2_rdma_kquery_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct pci_dev *pcidev = rdma_dev->rdma_func->pcidev; + struct sxe2_rdma_hw_attrs *hw_attrs = &rdma_func->ctx_dev.hw_attrs; + + if (udata->inlen || udata->outlen) { + DRV_RDMA_LOG_DEV_ERR( + "device:kquery device inlen or out len err\n"); + ret = -EINVAL; + goto end; + } + memset(props, 0, sizeof(*props)); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + rdma_dev->netdev->dev_addr); + props->fw_ver = (u64)rdma_func->ctx_dev.fw_ver; + props->device_cap_flags = + IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_RC_RNR_NAK_GEN; + props->vendor_id = pcidev->vendor; + props->vendor_part_id = pcidev->device; + props->hw_ver = pcidev->revision; + props->page_size_cap = hw_attrs->page_size_cap; + props->max_mr_size = hw_attrs->max_mr_size; + props->max_qp = (s32)(rdma_func->max_qp - SXE2_QP_USED_NUM); + props->max_qp_wr = (s32)hw_attrs->max_qp_wr; + set_max_sge(props, rdma_func); + props->max_cq = (s32)(rdma_func->max_cq - SXE2_CQ_USED_NUM); + props->max_cqe = (s32)(rdma_func->max_cqe - 1); + props->max_mr = (s32)(rdma_func->max_mr - SXE2_MR_USED_NUM); + props->max_mw = 0; + props->max_pd = (s32)(rdma_func->max_pd - SXE2_PD_USED_NUM); + props->max_sge_rd = (s32)hw_attrs->uk_attrs.max_hw_read_sges; + props->max_qp_rd_atom = (s32)hw_attrs->max_rra; + props->max_qp_init_rd_atom = hw_attrs->max_sra; + props->max_pkeys = SXE2_PKEY_TBL_SZ; + props->max_ah = (s32)rdma_func->max_ah - SXE2_AH_USED_NUM; + props->max_mcast_grp = SXE2_MAX_MCGS; + props->max_mcast_qp_attach = SXE2_MAX_QPS_PER_MGN; + props->max_total_mcast_qp_attach = + (SXE2_MAX_MCGS * SXE2_MAX_QPS_PER_MGN); + props->max_fast_reg_page_list_len = SXE2_MAX_PAGES_PER_FMR; + + props->max_srq = (s32)(rdma_func->max_srq - rdma_func->used_srqs); + props->max_srq_wr = SXE2_MAX_SRQ_WRS; + props->max_srq_sge = (s32)hw_attrs->uk_attrs.max_hw_wq_frags; + + props->timestamp_mask = GENMASK(31, 0); + props->hca_core_clock = SXE2_HCA_CORE_CLOCK_KHZ; + + props->cq_caps.max_cq_moderation_count = SXE2_MAX_CQ_MODERATION_COUNT; + props->cq_caps.max_cq_moderation_period = SXE2_MAX_CQ_MODERATION_PERIOD; +end: + return ret; +} + +#ifdef QUERY_PORT_V1 +int sxe2_rdma_kquery_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) + +#else +int sxe2_rdma_kquery_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +#endif +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct net_device *netdev = rdma_dev->netdev; + enum ib_mtu ndev_ib_mtu; + + props->max_mtu = IB_MTU_4096; + ndev_ib_mtu = sxe2_iboe_get_mtu((int)netdev->mtu); + props->active_mtu = min(props->max_mtu, ndev_ib_mtu); + + props->lid = 1; + props->lmc = 0; + props->sm_lid = 0; + props->sm_sl = 0; + if (netif_carrier_ok(netdev) && netif_running(netdev)) { + props->state = IB_PORT_ACTIVE; + props->phys_state = SXE2_PORT_PHYS_STATE_LINK_UP; + } else { + props->state = IB_PORT_DOWN; + props->phys_state = SXE2_PORT_PHYS_STATE_DISABLED; + } + ib_get_eth_speed(ibdev, port, &props->active_speed, + &props->active_width); + props->gid_tbl_len = SXE2_GID_TABLE_LEN; + kc_set_props_ip_gid_caps(props); + props->pkey_tbl_len = SXE2_PKEY_TBL_SZ; + props->qkey_viol_cntr = 0; + props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; + props->max_msg_sz = (u32)rdma_dev->rdma_func->ctx_dev.hw_attrs + .max_hw_outbound_msg_size; + return SXE2_OK; +} + +void sxe2_rdma_kget_dev_fw_str(struct ib_device *ibdev, char *str) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + + snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u", + dev->feature_info[SXE2_RDMA_FW_MAIN_VERSION], + dev->feature_info[SXE2_RDMA_FW_SUB_VERSION], + dev->feature_info[SXE2_RDMA_FW_FIX_VERSION], + dev->feature_info[SXE2_RDMA_FW_BUILD_NUMBER]); +} + +#ifdef QUERY_GID_ROCE_V1 +int sxe2_rdma_kquery_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +#else +int sxe2_rdma_kquery_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +#endif +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + + memset(gid->raw, 0, sizeof(gid->raw)); + ether_addr_copy(gid->raw, rdma_dev->netdev->dev_addr); + + return ret; +} + +#ifdef GET_LINK_LAYER_V1 +enum rdma_link_layer sxe2_rdma_kget_link_layer(struct ib_device *ibdev, + u8 port_num) +#else +enum rdma_link_layer sxe2_rdma_kget_link_layer(struct ib_device *ibdev, + u32 port_num) +#endif +{ + return IB_LINK_LAYER_ETHERNET; +} + +#ifdef GET_NETDEV_OP_V1 +struct net_device *sxe2_rdma_kget_net_dev(struct ib_device *ibdev, u8 port_num) +#else +struct net_device *sxe2_rdma_kget_net_dev(struct ib_device *ibdev, u32 port_num) +#endif +{ + struct net_device *net_dev = NULL; + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + + if (rdma_dev->netdev) { + net_dev = rdma_dev->netdev; + dev_hold(net_dev); + } + + return net_dev; +} + +#ifdef MODIFY_PORT_V1 +int sxe2_rdma_kmodify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +#else +int sxe2_rdma_kmodify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +#endif +{ + int ret = SXE2_OK; + + if (port > 1) + ret = -EINVAL; + + return ret; +} +#ifdef ROCE_PORT_IMMUTABLE_V1 +int sxe2_kget_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +#else +int sxe2_kget_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +#endif +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.h new file mode 100644 index 0000000000000000000000000000000000000000..8e1afa06f85d285ca18507f1faaab9e19fe304ba --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_device_port.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_device_port.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_DEVICE_H +#define SXE2_DRV_DEVICE_H + +#include +#include "sxe2_drv_rdma_common.h" + +#define SXE2_OK 0 +#define SXE2_MAX_USER_SPACE_VER 1 +#define SXE2_MIN_USER_SPACE_VER 1 +#define SXE2_FEATURE_FW_INFO 0 +#define SXE2_MAX_PAGES_PER_FMR 262144 +#define SXE2_MAX_QPS_PER_MGN 16 +#define SXE2_MAX_MCGS 256 +#define SXE2_PKEY_TBL_SZ 1 +#define SXE2_GID_TABLE_LEN 32 +#define SXE2_RDMA_ABI_VER 1 +#define SXE2_UCTX_IS_PF 1 +#define SXE2_UCTX_IS_VF 0 +#define SXE2_MAX_CQ_MODERATION_COUNT 0xFFFF +#define SXE2_MAX_CQ_MODERATION_PERIOD 0xFFF +#define SXE2_RDMA_FW_VER_MAJOR GENMASK_ULL(31, 16) +#define SXE2_RDMA_FW_VER_MINOR GENMASK_ULL(15, 0) +#define SXE2_CQ_USED_NUM 0 +#define SXE2_MR_USED_NUM 1 +#define SXE2_PD_USED_NUM 1 +#define SXE2_AH_USED_NUM 1 +#define SXE2_QP_USED_NUM 1 + +#define set_max_sge(props, rdma_func) \ + do { \ + ((props)->max_send_sge = \ + (s32)(rdma_func) \ + ->ctx_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (s32)(rdma_func) \ + ->ctx_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) + +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define ether_addr_copy(mac_addr, new_mac_addr) \ + memcpy(mac_addr, new_mac_addr, ETH_ALEN) + +enum { SXE2_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 0, +}; + +enum sxe2_port_phys_state { + SXE2_PORT_PHYS_STATE_SLEEP = 1, + SXE2_PORT_PHYS_STATE_POLLING = 2, + SXE2_PORT_PHYS_STATE_DISABLED = 3, + SXE2_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + SXE2_PORT_PHYS_STATE_LINK_UP = 5, + SXE2_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + SXE2_PORT_PHYS_STATE_PHY_TEST = 7, +}; + +struct sxe2_alloc_ucontext_req { + __u32 rsvd32; + __u8 userspace_ver; + __u8 rsvd8[3]; + __aligned_u64 comp_mask; +}; + +struct sxe2_alloc_ucontext_resp { + __u32 max_pds; + __u32 max_qps; + __u32 wq_size; + __u8 kernel_ver; + __u8 rsvd[3]; + __aligned_u64 feature_flags; + __aligned_u64 db_mmap_key; + __u32 max_hw_wq_frags; + __u32 max_hw_read_sges; + __u32 max_hw_inline; + __u32 max_hw_rq_quanta; + __u32 max_hw_wq_quanta; + __u32 min_hw_cq_size; + __u32 max_hw_cq_size; + __u16 max_hw_sq_chunk; + __u8 hw_rev; + __u8 is_pf; + __aligned_u64 comp_mask; + __u16 min_hw_wq_size; + __u32 max_db; + __u8 rsvd3[2]; + __u8 bdf[16]; + __u32 max_hw_srq_quanta; + __u32 max_hw_srq_wr; +}; + +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + +#define SXE2_ALLOC_UCTX_MIN_REQ_LEN \ + offsetofend(struct sxe2_alloc_ucontext_req, rsvd8) +#define SXE2_ALLOC_UCTX_MIN_RESP_LEN \ + offsetofend(struct sxe2_alloc_ucontext_resp, rsvd) + + #ifdef NEED_KALLOC_UCONTEXT_V1 +struct ib_ucontext *sxe2_rdma_kalloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata); + #else +int sxe2_rdma_kalloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +#endif +#ifdef DEVICE_OPS_V1 +int sxe2_rdma_kdealloc_ucontext(struct ib_ucontext *context); +#else +void sxe2_rdma_kdealloc_ucontext(struct ib_ucontext *context); +#endif +int sxe2_rdma_kquery_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata); +#ifdef QUERY_PORT_V1 +int sxe2_rdma_kquery_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +#else +int sxe2_rdma_kquery_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); +#endif +void sxe2_rdma_kget_dev_fw_str(struct ib_device *ibdev, char *str); + +#ifdef QUERY_GID_ROCE_V1 +int sxe2_rdma_kquery_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +#else +int sxe2_rdma_kquery_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); +#endif +#ifdef GET_LINK_LAYER_V1 +enum rdma_link_layer sxe2_rdma_kget_link_layer(struct ib_device *ibdev, + u8 port_num); +#else +enum rdma_link_layer sxe2_rdma_kget_link_layer(struct ib_device *ibdev, + u32 port_num); +#endif +#ifdef GET_NETDEV_OP_V1 +struct net_device *sxe2_rdma_kget_net_dev(struct ib_device *ibdev, u8 port_num); +#else +struct net_device *sxe2_rdma_kget_net_dev(struct ib_device *ibdev, + u32 port_num); +#endif +#ifdef MODIFY_PORT_V1 +int sxe2_rdma_kmodify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props); +#else +int sxe2_rdma_kmodify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props); +#endif + +static inline enum ib_mtu sxe2_mtu_int_to_enum(int mtu) +{ + enum ib_mtu ib_mtu; + + if (mtu >= 4096) + ib_mtu = IB_MTU_4096; + else if (mtu >= 2048) + ib_mtu = IB_MTU_2048; + else if (mtu >= 1024) + ib_mtu = IB_MTU_1024; + else if (mtu >= 512) + ib_mtu = IB_MTU_512; + else + ib_mtu = IB_MTU_256; + + return ib_mtu; +} + +static inline int sxe2_ib_mtu_enmu_to_int(enum ib_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: + return 256; + case IB_MTU_512: + return 512; + case IB_MTU_1024: + return 1024; + case IB_MTU_2048: + return 2048; + case IB_MTU_4096: + return 4096; + default: + return -1; + } +} + +static inline enum ib_mtu sxe2_iboe_get_mtu(int mtu) +{ + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + IB_ICRC_BYTES); + + if (mtu >= sxe2_ib_mtu_enmu_to_int(IB_MTU_4096)) + return IB_MTU_4096; + else if (mtu >= sxe2_ib_mtu_enmu_to_int(IB_MTU_2048)) + return IB_MTU_2048; + else if (mtu >= sxe2_ib_mtu_enmu_to_int(IB_MTU_1024)) + return IB_MTU_1024; + else if (mtu >= sxe2_ib_mtu_enmu_to_int(IB_MTU_512)) + return IB_MTU_512; + else + return IB_MTU_256; +} +#ifdef ROCE_PORT_IMMUTABLE_V1 +int sxe2_kget_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable); +#else +int sxe2_kget_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.c new file mode 100644 index 0000000000000000000000000000000000000000..c2ca968b7f7cce0af37654aa7a7faa7048ea9128 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.c @@ -0,0 +1,741 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + +#include +#include +#include +#include +#include + +#include "sxe2_drv_rdma_inject.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_log.h" + +#define INJECT_MAX_ARGC 16 +#define INJECT_CMD_LEN 200 +#define INJECT_HASH_TABLE_SIZE 512 +#define INJECT_PARTITION_SIZE (2 * 1024 * 1024) +#define INJECT_HASH_CODE (1315423911) + +struct inject_type_proc { + char *string; + enum inject_type value; +}; + +struct inject_cmd_param_proc { + const char *opt; + s32 (*proc)(const char *data, struct sxe2_injection *param); +}; + +void inject_lock(struct sxe2_injection *injection, unsigned long *flags) +{ + spin_lock_irqsave(&injection->lock, *flags); +} + +void inject_unlock(struct sxe2_injection *injection, unsigned long *flags) +{ + spin_unlock_irqrestore(&injection->lock, *flags); +} + +static s32 inject_init_mem(struct sxe2_rdma_pci_f *dev) +{ + s32 ret = 0; + + struct sxe2_inject_mem_mgr *mm = &dev->inject_mem.inject_mem_mgr; + + mm->base_addr = vzalloc(INJECT_PARTITION_SIZE); + if (mm->base_addr == NULL) { + ret = -EFAULT; + } else { + mm->end_addr = mm->base_addr + INJECT_PARTITION_SIZE; + mm->cursor = mm->base_addr; + } + + return ret; +} + +static void *inject_get_sub_mem(struct sxe2_rdma_pci_f *dev, u32 size) +{ + void *ptr = NULL; + struct sxe2_inject_mem_mgr *mm = &dev->inject_mem.inject_mem_mgr; + + if (mm->cursor + size < mm->end_addr) { + ptr = mm->cursor; + mm->cursor += size; + } + return ptr; +} + +s32 inject_init(struct sxe2_rdma_pci_f *dev) +{ + u32 i = 0; + s32 ret = 0; + struct list_head *g_inject_hash_table; + struct sxe2_inject_mem_mgr *mm; + u64 *usable_mm; + u32 inject_mm_cnt = 0; + + WARN_ON(dev == NULL); + if (dev == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail, ret %d\n", ret); + goto end; + } + + ret = inject_init_mem(dev); + WARN_ON(ret != 0); + if (ret != 0) { + DRV_RDMA_LOG_ERROR("inject init mem fail, ret %d\n", ret); + goto end; + } + + mm = &dev->inject_mem.inject_mem_mgr; + + g_inject_hash_table = (struct list_head *)inject_get_sub_mem( + dev, sizeof(struct list_head) * INJECT_HASH_TABLE_SIZE); + dev->inject_mem.g_inject_hash_table = g_inject_hash_table; + WARN_ON(g_inject_hash_table == NULL); + if (g_inject_hash_table == NULL) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("inject get sub mem fail, ret %d\n", ret); + goto err_hash; + } + + for (; i < INJECT_HASH_TABLE_SIZE; i++) + INIT_LIST_HEAD(&g_inject_hash_table[i]); + + INIT_LIST_HEAD(&dev->inject_mem.g_inject_list); + spin_lock_init(&dev->inject_mem.list_lock); + inject_mm_cnt = (u32)DIV_ROUND_UP((mm->end_addr - mm->cursor), + sizeof(struct sxe2_injection)); + usable_mm = vzalloc(inject_mm_cnt * sizeof(u64)); + DRV_RDMA_LOG_INFO("inject mm cnt %#x, sizeof(u64) %#x, usable mm %#x\n", + (u32)inject_mm_cnt, (u32)sizeof(u64), + (u32)inject_mm_cnt * (u32)sizeof(u64)); + if (usable_mm == NULL) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("inject init usable mem fail, ret %d\n", + ret); + goto err_hash; + } + mm->usable_array.array_addr = usable_mm; + + spin_lock_init(&mm->usable_array.array_lock); + mm->usable_array.array_addr[0] = 0; + + DRV_RDMA_LOG_INFO( + "inject init mem %#x, usable mem %#llx, each injection mem %#x\n", + (u32)INJECT_PARTITION_SIZE, (u64)inject_mm_cnt * sizeof(u64), + (u32)sizeof(struct sxe2_injection)); + + goto end; + +err_hash: + vfree(mm->base_addr); + mm->base_addr = NULL; + mm->end_addr = NULL; + mm->cursor = NULL; +end: + return ret; +} + +static u32 inject_str_hash_code(const char *str) +{ + u32 hash = INJECT_HASH_CODE; + + while (*str) + hash ^= ((hash << 5) + (hash >> 2) + (*str++)); + + return hash; +} + +static void inject_set_name(struct inject_name *injection_name, const char *name) +{ + injection_name->name = name; + injection_name->hash_code = inject_str_hash_code(name); +} + +static inline struct list_head * +inject_get_hash_head(struct sxe2_rdma_pci_f *dev, u32 hash_code) +{ + struct sxe2_inject_mem *mm = &dev->inject_mem; + + return &mm->g_inject_hash_table[hash_code % INJECT_HASH_TABLE_SIZE]; +} + +static struct sxe2_injection *inject_find_inner(struct sxe2_rdma_pci_f *dev, + const char *name) +{ + struct list_head *pos; + struct sxe2_injection *injection; + bool found = false; + struct list_head *head; + struct inject_name injection_name; + unsigned long flags = 0; + + if (strlen(name) > SXE2_INJECT_NAME_MAX_LEN) + goto end; + + inject_set_name(&injection_name, name); + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags); + head = inject_get_hash_head(dev, injection_name.hash_code); + list_for_each(pos, head) { + injection = list_entry(pos, struct sxe2_injection, hash_node); + if (injection->inject_name.hash_code != + injection_name.hash_code) { + continue; + } + + if (strncmp(injection->inject_name.name, injection_name.name, + strlen(injection_name.name)) == 0) { + found = true; + break; + } + } + +end: + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags); + return found ? injection : NULL; +} + +struct sxe2_injection *inject_find(struct sxe2_rdma_pci_f *dev, const char *name) +{ + struct sxe2_injection *ret = NULL; + + if (dev == NULL || name == NULL) { + DRV_RDMA_LOG_ERROR("dev/name find fail\n"); + goto end; + } + + ret = inject_find_inner(dev, name); + +end: + return ret; +} +EXPORT_SYMBOL(inject_find); + +s32 inject_register(struct sxe2_rdma_pci_f *dev, const char *name, + inject_callback callback, s32 mid) +{ + s32 ret = 0; + struct sxe2_injection *injection; + struct list_head *head; + struct sxe2_inject_usable_array *usable_arr; + u64 *usable_mm; + u64 usable_index; + unsigned long flags = 0; + + if (dev == NULL || name == NULL || callback == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "Inject reg param dev/name/callback is null, ret %d\n", + ret); + goto end; + } + + injection = inject_find_inner(dev, name); + if (injection != NULL) { + DRV_RDMA_LOG_WARN("Name=%s exist\n", name); + goto end; + } + + usable_arr = &dev->inject_mem.inject_mem_mgr.usable_array; + usable_mm = usable_arr->array_addr; + spin_lock_irqsave(&usable_arr->array_lock, flags); + usable_index = usable_mm[0]; + + if (usable_index) { + injection = (struct sxe2_injection *)usable_mm[usable_index]; + DRV_RDMA_LOG_INFO("usable index %#llx, usable addr %#llx\n", + usable_index, usable_mm[usable_index]); + usable_mm[usable_index] = 0; + usable_index--; + usable_mm[0] = usable_index; + } else { + injection = inject_get_sub_mem(dev, sizeof(struct sxe2_injection)); + if (injection == NULL) { + spin_unlock_irqrestore(&usable_arr->array_lock, flags); + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("inject get sub mem fail, ret %d\n", + ret); + goto end; + } + } + spin_unlock_irqrestore(&usable_arr->array_lock, flags); + + memset(injection, 0, sizeof(struct sxe2_injection)); + injection->alive = 0; + inject_set_name(&injection->inject_name, name); + injection->callback = callback; + injection->mid = mid; + injection->type = INJECT_TYPE_BUTT; + spin_lock_init(&injection->lock); + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags); + head = inject_get_hash_head(dev, injection->inject_name.hash_code); + list_add_tail(&injection->hash_node, head); + list_add_tail(&injection->list_node, &dev->inject_mem.g_inject_list); + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags); + + DRV_RDMA_LOG_INFO("inject %s registered\n", name); + +end: + return ret; +} +EXPORT_SYMBOL(inject_register); + +s32 inject_unregister(struct sxe2_rdma_pci_f *dev, const char *name) +{ + s32 ret = 0; + struct sxe2_injection *injection; + struct sxe2_inject_usable_array *usable_arr; + u64 *usable_mm; + u64 usable_index; + unsigned long flags = 0; + unsigned long flags_array = 0; + unsigned long flags_list = 0; + + if (dev == NULL || name == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "Inject unreg param dev/name is null, ret %d\n", ret); + goto end; + } + + injection = inject_find_inner(dev, name); + if (injection == NULL) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("Name=%s is not exist, ret %d\n", name, ret); + goto end; + } + inject_lock(injection, &flags); + usable_arr = &dev->inject_mem.inject_mem_mgr.usable_array; + usable_mm = usable_arr->array_addr; + spin_lock_irqsave(&usable_arr->array_lock, flags_array); + usable_index = usable_mm[0]; + + usable_index++; + usable_mm[usable_index] = (u64)injection; + usable_mm[0] = usable_index; + spin_unlock_irqrestore(&usable_arr->array_lock, flags_array); + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags_list); + list_del(&injection->hash_node); + list_del(&injection->list_node); + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags_list); + + inject_unlock(injection, &flags); + + memset(injection, 0, sizeof(struct sxe2_injection)); + + DRV_RDMA_LOG_INFO( + "inject %s unregistered, usable index %#llx, usable addr %#llx\n", + name, usable_index, usable_mm[usable_index]); + + goto end; + +end: + return ret; +} +EXPORT_SYMBOL(inject_unregister); + +static void inject_split_string(char *str, s32 *argc, char *argv[], + s32 max_argc) +{ + s32 idx = 0; + bool is_head = false; + + if (*str != ' ' && *str != '\t') + is_head = true; + + while (*str != '\0') { + if (*str == ' ' || *str == '\t') { + is_head = true; + *str = '\0'; + str++; + continue; + } + + if (!is_head) { + str++; + continue; + } + + if (idx < max_argc) { + argv[idx++] = str; + is_head = false; + } + + str++; + } + + if (idx < max_argc) + argv[idx] = NULL; + + *argc = idx; +} + +static inline s32 inject_proc_type(const char *param_val, + struct sxe2_injection *injection) +{ + s32 ret = 0; + + static struct inject_type_proc s_inject_type[] = { + { "pause", INJECT_TYPE_PAUSE }, + { "abort", INJECT_TYPE_ABORT }, + { "reset", INJECT_TYPE_RESET }, + { "callback", INJECT_TYPE_CALLBACK } + }; + + u32 size = ARRAY_SIZE(s_inject_type); + u32 i = 0; + + for (; i < size; i++) { + if (strncmp(param_val, s_inject_type[i].string, + strlen(param_val)) == 0) { + injection->type = s_inject_type[i].value; + goto end; + } + } + + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("Invalid type %s, when active inject %s, ret %d\n", + param_val, injection->inject_name.name, ret); + +end: + return ret; +} + +static inline s32 inject_proc_user_data(const char *param_val, + struct sxe2_injection *injection) +{ + s32 ret = 0; + + strscpy(injection->user_data, param_val, + sizeof(injection->user_data) - 1); + if (injection->user_data[0] == '\0') { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR( + "Invalid userdata %s, when active inject %s, ret %d\n", + param_val, injection->inject_name.name, ret); + } + return ret; +} + +static inline s32 inject_proc_alive(const char *param_val, + struct sxe2_injection *injection) +{ + s32 val = 0; + s32 ret = 0; + int count = 0; + + count = kstrtol(param_val, 0, (long *)&val); + if (val < 0) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR( + "Invalid alive %s, when active inject %s, ret %d\n", + param_val, injection->inject_name.name, ret); + goto end; + } + injection->alive = val; + +end: + return ret; +} + +static struct inject_cmd_param_proc s_inject_proc[] = { + { "-t", inject_proc_type }, + { "-u", inject_proc_user_data }, + { "-a", inject_proc_alive } +}; + +static struct inject_cmd_param_proc *inject_get_cmd_param_proc(const char *opt) +{ + struct inject_cmd_param_proc *ret = NULL; + + u32 size = ARRAY_SIZE(s_inject_proc); + u32 i = 0; + + for (; i < size; i++) { + if (!strncmp(opt, s_inject_proc[i].opt, strlen(opt))) { + ret = &s_inject_proc[i]; + break; + } + } + + return ret; +} + +static s32 inject_parse_cmd_param(struct sxe2_injection *injection, s32 argc, + char *argv[]) +{ + struct inject_cmd_param_proc *param_proc = NULL; + s32 ret = 0; + s32 i = 0; + + for (; i < argc; i++) { + param_proc = inject_get_cmd_param_proc(argv[i]); + if (param_proc == NULL) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR( + "get active cmd param proc fail, ret %d\n", + ret); + goto end; + } + + i++; + if (i >= argc) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR( + "active cmd param incomplete, ret %d\n", ret); + goto end; + } + + ret = (s32)param_proc->proc(argv[i], injection); + if (ret != 0) { + DRV_RDMA_LOG_ERROR( + "active injection param fail, ret %d\n", ret); + goto end; + } + } + + ret = 0; + +end: + return ret; +} + +void inject_fill(struct sxe2_injection *src, struct sxe2_injection *dst) +{ + unsigned long flags = 0; + + memcpy(dst->user_data, src->user_data, sizeof(dst->user_data)); + + dst->type = src->type == INJECT_TYPE_BUTT ? INJECT_TYPE_CALLBACK : + src->type; + + if (src->alive == 0) + src->alive = 1; + + inject_lock(dst, &flags); + dst->alive = src->alive; + inject_unlock(dst, &flags); + +} + +void inject_active_intf(struct sxe2_rdma_pci_f *dev, const char *name, + const char *cmd) +{ + s32 ret = 0; + char cmd_tmp_buff[INJECT_CMD_LEN]; + s32 argc = 0; + char *argv[INJECT_MAX_ARGC]; + struct sxe2_injection *injection = NULL; + struct sxe2_injection tmp; + + if (dev == NULL || name == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "inject active param dev/name is null, ret %d\n", ret); + goto finish; + } + + injection = inject_find_inner(dev, name); + if (injection == NULL) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("Name=%s is not exist, ret %d\n", name, ret); + goto finish; + } + + if (injection->alive) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("injection %s is alived, ret %d\n", name, + ret); + goto finish; + } + + memset(&tmp, 0, sizeof(tmp)); + tmp.inject_name.name = injection->inject_name.name; + tmp.type = INJECT_TYPE_BUTT; + + if (cmd == NULL || *cmd == '\0') + goto finish; + + (void)snprintf(cmd_tmp_buff, INJECT_CMD_LEN, "%s", cmd); + + inject_split_string((char *)cmd_tmp_buff, &argc, argv, INJECT_MAX_ARGC); + ret = (s32)inject_parse_cmd_param(&tmp, argc, argv); + +finish: + if (ret == 0) { + inject_fill(&tmp, injection); + DRV_RDMA_LOG_INFO( + "inject name:%s user_data:%s, alive:%d type:%d mid:%d\n", + injection->inject_name.name, injection->user_data, + injection->alive, injection->type, injection->mid); + } else { + DRV_RDMA_LOG_ERROR("inject %s: active failed, ret :%d!\n", name, + ret); + } + +} +EXPORT_SYMBOL(inject_active_intf); + +void inject_deactive_intf(struct sxe2_rdma_pci_f *dev, const char *name) +{ + struct sxe2_injection *injection; + unsigned long flags = 0; + + if (dev == NULL || name == NULL) { + DRV_RDMA_LOG_ERROR("inject active param dev/name is null\n"); + goto end; + } + + injection = inject_find_inner(dev, name); + if (injection != NULL) { + inject_lock(injection, &flags); + injection->alive = 0; + inject_unlock(injection, &flags); + } + +end: + return; +} +EXPORT_SYMBOL(inject_deactive_intf); + +static s32 inject_read_and_dec_alive(struct sxe2_injection *injection) +{ + s32 val = 0; + unsigned long flags = 0; + + inject_lock(injection, &flags); + + val = injection->alive; + if (injection->alive != 0 && injection->alive != -1) + injection->alive--; + + inject_unlock(injection, &flags); + + return val; +} + +bool inject_execute_callback(struct sxe2_rdma_pci_f *dev, const char *name) +{ + bool ret = false; + s32 micro_sec = 0; + int count = 0; + + struct sxe2_injection *injection = inject_find(dev, name); + + if (dev == NULL || injection == NULL || + inject_read_and_dec_alive(injection) == 0) { + ret = false; + goto end; + } + + switch (injection->type) { + case INJECT_TYPE_CALLBACK: + ret = true; + goto end; + + case INJECT_TYPE_PAUSE: + count = (s32)kstrtol(injection->user_data, 0, (long *)µ_sec); + if (micro_sec >= 0) + udelay(micro_sec); + ret = true; + goto end; + + case INJECT_TYPE_ABORT: + SXE2_INJECT_BUG(); + ret = false; + goto end; + + case INJECT_TYPE_RESET: + ret = true; + goto end; + + default: + break; + } + +end: + return ret; +} +EXPORT_SYMBOL(inject_execute_callback); + +s32 inject_count(struct sxe2_rdma_pci_f *dev) +{ + struct list_head *pos = NULL; + s32 ret = 0; + unsigned long flags = 0; + + if (dev == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("inject count param dev is null, ret %d\n", + ret); + goto end; + } + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags); + list_for_each(pos, &dev->inject_mem.g_inject_list) { + ret++; + } + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags); + +end: + return ret; +} + +void inject_clear_intf(struct sxe2_rdma_pci_f *dev) +{ + struct sxe2_injection *pos = NULL; + struct sxe2_injection *next = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags); + list_for_each_entry_safe(pos, next, &dev->inject_mem.g_inject_list, + list_node) { + pos->alive = 0; + } + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags); +} +EXPORT_SYMBOL(inject_clear_intf); + +s32 inject_uninit(struct sxe2_rdma_pci_f *dev) +{ + s32 ret = 0; + struct sxe2_inject_mem_mgr *mm; + + if (dev == NULL) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + NULL, "inject unint param dev is null, ret %d\n", ret); + goto end; + } + + mm = &dev->inject_mem.inject_mem_mgr; + inject_clear_intf(dev); + + vfree(mm->usable_array.array_addr); + mm->usable_array.array_addr = NULL; + vfree(mm->base_addr); + mm->base_addr = NULL; + mm->end_addr = NULL; + mm->cursor = NULL; + +end: + return ret; +} + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.h new file mode 100644 index 0000000000000000000000000000000000000000..686701a63744fe422c369c71721dae5c8c76b591 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef _SXE2_DRV_INJECT_H_ +#define _SXE2_DRV_INJECT_H_ + +#include +#include "sxe2_drv_rdma_log.h" + +struct sxe2_rdma_pci_f; + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +enum { SXE2_INJECT_MID_MQ, + SXE2_INJECT_MID_RCMS, +}; + +#define SXE2_INJECT_STRTOL_BASE (0) +#define SXE2_INJECT_USR_DATA_LEN (128) +#define SXE2_INJECT_NAME_MAX_LEN (120) +#define INJECT_CMD_MAX_LEN (200) + +#if defined(SXE2_SUPPORT_DEBUG) && !defined(PCLINT) +#define SXE2_INJECT_BUG() (*(int *)0 = 0) + +#define SXE2_INJECT_BUG_ON(_cond) \ + do { \ + if (unlikely(_cond)) \ + SXE2_INJECT_BUG(); \ + } while (0) +#else +#define SXE2_INJECT_BUG() +#endif + +struct sxe2_inject_usable_array { + u64 *array_addr; + spinlock_t array_lock; +}; + +struct sxe2_inject_mem_mgr { + char *base_addr; + char *end_addr; + char *cursor; + struct sxe2_inject_usable_array + usable_array; +}; + +struct sxe2_inject_mem { + struct sxe2_inject_mem_mgr inject_mem_mgr; + struct list_head *g_inject_hash_table; + struct list_head g_inject_list; + spinlock_t list_lock; +}; + +struct sxe2_inject_debug { + struct dentry *dbg_root; + spinlock_t dbg_lock; + char inject_cmd[INJECT_CMD_MAX_LEN]; +}; + +enum { SXE2_INJECT_MID_CQ, + SXE2_INJECT_MID_RESET, + SXE2_INJECT_MID_EQ, + SXE2_INJECT_MID_QP, + SXE2_INJECT_MID_MR, + SXE2_INJECT_MID_CEQ, +}; + +typedef void (*inject_callback)(char *user_data, ...); + +struct inject_name { + const char *name; + u32 hash_code; +}; + +enum inject_type { + INJECT_TYPE_CALLBACK = 0, + INJECT_TYPE_PAUSE = 1, + INJECT_TYPE_ABORT = 2, + INJECT_TYPE_RESET = 3, + INJECT_TYPE_BUTT +}; + +struct sxe2_injection { + struct list_head hash_node; + struct list_head list_node; + struct inject_name inject_name; + s32 mid; + char user_data[SXE2_INJECT_USR_DATA_LEN]; + s32 alive; + enum inject_type type; + inject_callback callback; + spinlock_t lock; +}; + +void inject_lock(struct sxe2_injection *injection, unsigned long *flags); + +void inject_unlock(struct sxe2_injection *injection, unsigned long *flags); + +s32 inject_init(struct sxe2_rdma_pci_f *dev); + +struct sxe2_injection *inject_find(struct sxe2_rdma_pci_f *dev, const char *name); + +s32 inject_register(struct sxe2_rdma_pci_f *dev, const char *name, + inject_callback callback, s32 mid); + +s32 inject_unregister(struct sxe2_rdma_pci_f *dev, const char *name); + +void inject_fill(struct sxe2_injection *src, struct sxe2_injection *dst); + +void inject_active_intf(struct sxe2_rdma_pci_f *dev, const char *name, + const char *cmd); + +void inject_deactive_intf(struct sxe2_rdma_pci_f *dev, const char *name); + +bool inject_execute_callback(struct sxe2_rdma_pci_f *dev, const char *name); + +s32 inject_count(struct sxe2_rdma_pci_f *dev); + +void inject_clear_intf(struct sxe2_rdma_pci_f *dev); + +s32 inject_uninit(struct sxe2_rdma_pci_f *dev); + +#define INJECT_INIT(dev) inject_init(dev) +#define INJECT_UNINIT(dev) inject_uninit(dev) +#define INJECT_REG(dev, name, callback, mid) \ + inject_register(dev, name, (inject_callback)callback, mid) +#define INJECT_UNREG(dev, name) inject_unregister(dev, name) + +#define INJECT_START(dev, name, ...) \ + do { \ + if (inject_execute_callback(dev, name)) { \ + struct sxe2_injection *injection = inject_find(dev, name); \ + DRV_RDMA_LOG_INFO("inject(%s) active, alive dec=%d,\n" \ + "\ttype=%u, user_data=%s\n", \ + name, injection->alive, injection->type, \ + injection->user_data); \ + injection->callback(injection->user_data, __VA_ARGS__); \ + } \ + } while (0) + +#define INJECT_END + +#define INJECT_GET_ALIVE(dev, name) \ + ({ \ + struct sxe2_injection *injection = inject_find(dev, name); \ + injection->alive; \ + }) + +#define INJECT_ACTIVE(dev, name, cmd) inject_active_intf(dev, name, cmd) +#define INJECT_DEACTIVE(dev, name) inject_deactive_intf(dev, name) +#define INJECT_COUNT(dev) inject_count(dev) +#define INJECT_CLEAR(dev) inject_clear_intf(dev) + +#else +typedef void (*inject_callback)(char *user_data, ...); + +static inline s32 inject_init(struct sxe2_rdma_pci_f *dev) +{ + (void)dev; + return 0; +} + +static inline s32 inject_uninit(struct sxe2_rdma_pci_f *dev) +{ + (void)dev; + return 0; +} + +static inline s32 inject_register(struct sxe2_rdma_pci_f *dev, const char *name, + inject_callback callback, s32 mid) +{ + (void)dev; + (void)name; + (void)callback; + (void)mid; + return 0; +} + +static inline s32 inject_unregister(struct sxe2_rdma_pci_f *dev, + const char *name) +{ + (void)dev; + (void)name; + return 0; +} + +#define INJECT_INIT(dev) inject_init(dev) +#define INJECT_UNINIT(dev) inject_uninit(dev) +#define INJECT_REG(dev, name, callback, mid) \ + inject_register(dev, name, (inject_callback)callback, mid) +#define INJECT_UNREG(dev, name) inject_unregister(dev, name) +#define INJECT_START(dev, name, ...) +#define INJECT_END +#define INJECT_GET_ALIVE(dev, name) 0 +#define INJECT_ACTIVE(dev, name, cmd) +#define INJECT_DEACTIVE(dev, name) +#define INJECT_COUNT(dev) 0 +#define INJECT_CLEAR(dev) +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..122ddbba6b505dbc72bd695cd82b12d1c465cd02 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + +#include +#include +#include +#include +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_inject.h" +#include "sxe2_drv_rdma_inject_debugfs.h" +#include "sxe2_drv_rdma_common.h" + +#define SXE2_INJECT_DEBUG_FILE_READ_AND_WRITE (0600) +#define SXE2_INJECT_DEBUG_FILE_READ_ONLY (0400) +#define SXE2_INJECT_DEBUG_FILE_WRITE_ONLY (0200) +#define INJECT_ARGV_COUNT_MAX (32) +#define INJECT_CMD_PARA_CONT1 (1) +#define INJECT_CMD_PARA_CONT2 (2) +#define INJECT_CMD_PARA_CONT3 (3) +#define INJECT_CMD_PARA_CONT4 (4) +#define INJECT_SHOW_ATTR (4) + +struct dentry *sxe2_drv_debugfs_get_dev_root(struct sxe2_rdma_device *dev) +{ + return dev->hdl->sxe2_rdma_dbg_dentry; +} +static struct dentry *sxe2_debugfs_create_dir(const char *name, + struct dentry *parent) +{ + if (IS_ERR_OR_NULL(parent) && strncmp(name, DEBUGFS_ROOT_DIR, 4)) + return ERR_PTR(-ENOMEM); + else + return debugfs_create_dir(name, parent); +} + +static struct dentry * +sxe2_debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, + void *data, const struct file_operations *fops) +{ + if (IS_ERR_OR_NULL(parent)) + return ERR_PTR(-ENOMEM); + else + return debugfs_create_file(name, mode, parent, data, fops); +} + +static bool inject_filter_mid(struct sxe2_injection *injection, const char *val) +{ + s32 mid = 0; + bool ret = false; + int count = 0; + + count = kstrtol(val, 0, (long *)&mid); + if (mid < 0) + goto end; + + ret = (bool)(mid == injection->mid); + +end: + return ret; +} + +static bool inject_filter_name(struct sxe2_injection *injection, const char *val) +{ + return (bool)(!strncmp(val, injection->inject_name.name, strlen(val))); +} + +static bool inject_filter_status(struct sxe2_injection *injection, const char *val) +{ + bool ret = false; + + if (!strncmp(val, "active", strlen(val)) && (injection->alive != 0)) { + ret = true; + goto end; + } + if (!strncmp(val, "inactive", strlen(val)) && (injection->alive == 0)) + ret = true; + +end: + return ret; +} + +static void inject_fmt_item(struct sxe2_injection *injection, + struct inject_custom_show_rsp *rsp) +{ + rsp->mid = (u32)injection->mid; + memcpy(rsp->name, injection->inject_name.name, + strlen(injection->inject_name.name)); + rsp->alive = injection->alive; + rsp->type = injection->type; + if (rsp->alive == 0 || injection->user_data[0] == '\0') { + rsp->usr_data[0] = '\0'; + } else { + memcpy(rsp->usr_data, injection->user_data, + strlen(injection->user_data)); + } +} + +static s32 split_command(char *cmd, s32 *argc, char *argv[]) +{ + s32 ret = 0; + char *token = NULL; + + cmd[strlen(cmd) - 1] = '\0'; + token = strsep(&cmd, " "); + while (token != NULL) { + if (*argc >= INJECT_ARGV_COUNT_MAX) { + ret = -EINVAL; + DRV_RDMA_LOG_WARN("too many arguments: '%s'\n", token); + goto end; + } + + argv[*argc] = token; + + token = strsep(&cmd, " "); + (*argc)++; + } + +end: + return ret; +} + +static char *inject_type_n2p(enum inject_type type) +{ + s32 ret = false; + static char *str[INJECT_TYPE_BUTT] = { "callback", "pause", "abort", + "reset" }; + if (type >= INJECT_TYPE_BUTT) + ret = true; + + return ret ? "-" : str[type]; +} + +static enum inject_type inject_type_p2n(char *type) +{ + char *str[INJECT_TYPE_BUTT] = { "callback", "pause", "abort", "reset" }; + enum inject_type e = INJECT_TYPE_CALLBACK; + + for (; e < INJECT_TYPE_BUTT; e++) { + if (!strcasecmp(str[e], type)) + break; + } + return e; +} + +static ssize_t sxe2_drv_inject_deactive_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[INJECT_CMD_MAX_LEN] = { 0 }; + s32 argc = 0; + char *argv[INJECT_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_pci_f *dev; + struct sxe2_injection *injection; + size_t field_len = 0; + unsigned long flags = 0; + unsigned long flags_inject = 0; + + if (*off != 0) + goto end; + + if (count > INJECT_CMD_MAX_LEN) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("Cmd exceeded length limit\n"); + goto end; + } + + dev = filp->private_data; + if (!dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail\n"); + goto end; + } + spin_lock_irqsave(&dev->inject_dbg.dbg_lock, flags); + + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("Cmd copy from user fail\n"); + goto end_unlock; + } + DRV_RDMA_LOG_INFO("Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * INJECT_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end_unlock; + + if (argc != INJECT_CMD_PARA_CONT1) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("Inject Inactive invalid param nums\n"); + goto end_unlock; + } + + field_len = strlen(argv[0]); + if (field_len > INJECT_NAME_MAX_LEN - 1) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("InjectActive name exceed max len:%u\n", + INJECT_NAME_MAX_LEN - 1); + goto end_unlock; + } + + injection = inject_find(dev, argv[0]); + if (injection == NULL) { + ret = -ENOENT; + DRV_RDMA_LOG_ERROR("Find inject fail\n"); + goto end_unlock; + } + + inject_lock(injection, &flags_inject); + injection->alive = 0; + inject_unlock(injection, &flags_inject); + +end_unlock: + spin_unlock_irqrestore(&dev->inject_dbg.dbg_lock, flags); + +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations inject_deactive_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = sxe2_drv_inject_deactive_write, +}; + +static ssize_t sxe2_drv_inject_active_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + char cmd[INJECT_CMD_MAX_LEN] = { 0 }; + s32 argc = 0; + char *argv[INJECT_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_pci_f *dev; + char *attr = NULL; + struct sxe2_injection *injection; + struct sxe2_injection tmp; + size_t field_len = 0; + s32 i = 0; + char *inject_active_in[INJECT_SHOW_ATTR] = { "type=", "alive=", + "userData=" }; + int str_count = 0; + unsigned long flags = 0; + + if (*off != 0) + goto end; + + if (count > INJECT_CMD_MAX_LEN) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("Cmd exceeded length limit\n"); + goto end; + } + dev = filp->private_data; + if (!dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail\n"); + goto end; + } + spin_lock_irqsave(&dev->inject_dbg.dbg_lock, flags); + if (copy_from_user(cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("Cmd copy from user fail\n"); + goto end_unlock; + } + DRV_RDMA_LOG_INFO("Request command :%s\n", cmd); + + argc = 0; + memset(argv, 0, sizeof(*argv) * INJECT_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) + goto end_unlock; + + if (argc < INJECT_CMD_PARA_CONT1 || argc > INJECT_CMD_PARA_CONT4) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR("InjectActive invalid param nums\n"); + goto end_unlock; + } + field_len = strlen(argv[0]); + if (field_len > INJECT_NAME_MAX_LEN - 1) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("InjectActive name exceed max len:%u\n", + INJECT_NAME_MAX_LEN - 1); + goto end_unlock; + } + memset(&tmp, 0, sizeof(tmp)); + + injection = inject_find(dev, argv[0]); + if (injection == NULL) { + ret = -ENOENT; + DRV_RDMA_LOG_ERROR("Find inject fail\n"); + goto end_unlock; + } + tmp.inject_name.name = injection->inject_name.name; + + for (i = 0; i < argc; i++) { + if (!strncmp(argv[i], inject_active_in[0], + strlen(inject_active_in[0]))) { + attr = strstr(argv[i], inject_active_in[0]) + + strlen(inject_active_in[0]); + tmp.type = inject_type_p2n(attr); + } else if (!strncmp(argv[i], inject_active_in[1], + strlen(inject_active_in[1]))) { + attr = strstr(argv[i], inject_active_in[1]) + + strlen(inject_active_in[1]); + str_count = (s32)kstrtol(attr, 0, (long *)&tmp.alive); + } else if (!strncmp(argv[i], inject_active_in[2], + strlen(inject_active_in[2]))) { + attr = strstr(argv[i], inject_active_in[2]) + + strlen(inject_active_in[2]); + field_len = strlen(attr); + if (field_len > INJECT_USR_DATA_LEN - 1) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR( + "InjectActive User data exceed max len:%u\n", + INJECT_USR_DATA_LEN - 1); + goto end_unlock; + } + memcpy(tmp.user_data, attr, field_len); + } + } + inject_fill(&tmp, injection); + +end_unlock: + spin_unlock_irqrestore(&dev->inject_dbg.dbg_lock, flags); +end: + ret = ret ? ret : (ssize_t)count; + return ret; +} + +static const struct file_operations inject_active_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = sxe2_drv_inject_active_write, +}; + +static ssize_t sxe2_drv_inject_count_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + u32 len_total = 0; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_pci_f *dev; + unsigned long flags = 0; + + if (*off != 0) + goto end; + + dev = filp->private_data; + if (!dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail\n"); + goto end; + } + spin_lock_irqsave(&dev->inject_dbg.dbg_lock, flags); + + rsp = kzalloc(sizeof(char) * INJECT_SNPRINTF_LEN_MAX, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("Inject rsp kmalloc fail\n"); + goto end_unlock; + } + rsp_end = rsp; + + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "Inject Count: %d\n", + inject_count(dev)); + + ret = simple_read_from_buffer(buf, count, off, rsp, (ssize_t)len_total); + if (ret < 0) + DRV_RDMA_LOG_ERROR("simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; + +end_unlock: + spin_unlock_irqrestore(&dev->inject_dbg.dbg_lock, flags); + +end: + return ret; +} + +static const struct file_operations inject_count_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe2_drv_inject_count_read, +}; +int sxe2_drv_inject_show_read_parse_args(char *cmd, char *rsp, + inject_filter_func *filter, + char **attr, u32 *len) +{ + s32 argc = 0; + char *argv[INJECT_ARGV_COUNT_MAX] = { 0 }; + u32 len_total = 0; + int ret = 0; + char *inject_show_in[INJECT_SHOW_ATTR] = { "all", + "mid=", "name=", "status=" }; + + argc = 0; + memset(argv, 0, sizeof(*argv) * INJECT_ARGV_COUNT_MAX); + ret = split_command(cmd, &argc, argv); + if (ret) { + ret = -EINVAL; + goto end; + } + + if (argc != INJECT_CMD_PARA_CONT1) { + INJECT_DEBUG_SNPRINTF(rsp, len_total, + "InjectShow invalid param nums\n"); + *len = len_total; + ret = -EINVAL; + goto end; + } + + if (!strncmp(argv[0], inject_show_in[1], strlen(inject_show_in[1]))) { + *filter = inject_filter_mid; + *attr = strstr(argv[0], inject_show_in[1]) + + strlen(inject_show_in[1]); + } else if (!strncmp(argv[0], inject_show_in[2], + strlen(inject_show_in[2]))) { + *filter = inject_filter_name; + *attr = strstr(argv[0], inject_show_in[2]) + + strlen(inject_show_in[2]); + } else if (!strncmp(argv[0], inject_show_in[3], + strlen(inject_show_in[3]))) { + *filter = inject_filter_status; + *attr = strstr(argv[0], inject_show_in[3]) + + strlen(inject_show_in[3]); + } else if (strncmp(argv[0], inject_show_in[0], + strlen(inject_show_in[0]))) { + INJECT_DEBUG_SNPRINTF(rsp, len_total, + "InjectShow invalid param str %s\n", + argv[0]); + *len = len_total; + ret = -EINVAL; + goto end; + } +end: + return ret; +} +static ssize_t sxe2_drv_inject_show_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + u32 len_total = 0; + + char *cmd; + char *rsp = NULL; + char *rsp_end; + struct sxe2_rdma_pci_f *dev; + u32 inject_count = 0; + char *attr = NULL; + inject_filter_func filter = NULL; + struct inject_custom_show_rsp inject_rsp; + struct list_head *pos = NULL; + struct sxe2_injection *injection; + unsigned long flags = 0; + + char *inject_info_format[INJECT_MAX_ATTR] = { "MID", "Name", + "Status", "Alive", + "Type", "UserData" }; + + if (*off != 0) + goto end; + + dev = filp->private_data; + if (!dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail\n"); + goto end; + } + spin_lock_irqsave(&dev->inject_dbg.dbg_lock, flags); + + cmd = dev->inject_dbg.inject_cmd; + DRV_RDMA_LOG_INFO("Request command :%s\n", cmd); + + rsp = kzalloc(sizeof(char) * INJECT_SNPRINTF_LEN_MAX, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("Inject rsp kmalloc fail\n"); + goto end_unlock; + } + rsp_end = rsp; + + ret = sxe2_drv_inject_show_read_parse_args(cmd, rsp_end, &filter, &attr, + &len_total); + if (ret) + goto end_kfree; + + spin_lock_irqsave(&dev->inject_mem.list_lock, flags); + list_for_each(pos, &dev->inject_mem.g_inject_list) { + injection = list_entry(pos, struct sxe2_injection, list_node); + if (filter == NULL || filter(injection, attr)) { + memset(&inject_rsp, 0, sizeof(inject_rsp)); + inject_fmt_item(injection, &inject_rsp); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, + ":\n", inject_count); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "%s:\t\t%u\n", + inject_info_format[0], + inject_rsp.mid); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "%s:\t\t%s\n", + inject_info_format[1], + inject_rsp.name); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "%s:\t\t%s\n", + inject_info_format[2], + inject_rsp.alive ? "Active" : + "Inactive"); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "%s:\t\t%d\n", + inject_info_format[3], + inject_rsp.alive); + INJECT_DEBUG_SNPRINTF( + rsp_end, len_total, "%s:\t\t%s\n", + inject_info_format[4], + inject_rsp.alive ? + inject_type_n2p(inject_rsp.type) : + "-"); + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "%s:\t%s\n", + inject_info_format[5], + inject_rsp.usr_data); + inject_count++; + } + } + INJECT_DEBUG_SNPRINTF(rsp_end, len_total, "InjectCounts %u\n", + inject_count); + spin_unlock_irqrestore(&dev->inject_mem.list_lock, flags); + +end_kfree: + ret = simple_read_from_buffer(buf, count, off, rsp, (ssize_t)len_total); + if (ret < 0) + DRV_RDMA_LOG_WARN("simple read error %zu\n", ret); + + kfree(rsp); + rsp = NULL; +end_unlock: + spin_unlock_irqrestore(&dev->inject_dbg.dbg_lock, flags); +end: + return ret; +} + +static ssize_t sxe2_drv_inject_show_write(struct file *filp, + const char __user *buf, size_t count, + loff_t *off) +{ + ssize_t ret = 0; + char *show_cmd = NULL; + struct sxe2_rdma_pci_f *dev = NULL; + unsigned long flags = 0; + + if (*off != 0) + goto end; + + dev = filp->private_data; + if (!dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR("dev find fail\n"); + goto end; + } + spin_lock_irqsave(&dev->inject_dbg.dbg_lock, flags); + + show_cmd = dev->inject_dbg.inject_cmd; + memset(show_cmd, 0, sizeof(char) * INJECT_CMD_MAX_LEN); + + if (count > INJECT_CMD_MAX_LEN) { + ret = -ENOMEM; + DRV_RDMA_LOG_ERROR("Cmd exceeded length limit\n"); + goto end_unlock; + } + + if (copy_from_user(show_cmd, buf, count)) { + ret = -EFAULT; + DRV_RDMA_LOG_ERROR("Cmd copy from user fail\n"); + goto end_unlock; + } + DRV_RDMA_LOG_INFO("Request command :%s\n", show_cmd); + +end_unlock: + spin_unlock_irqrestore(&dev->inject_dbg.dbg_lock, flags); + ret = ret ? ret : (ssize_t)count; + +end: + return ret; +} + +static const struct file_operations inject_show_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe2_drv_inject_show_read, + .write = sxe2_drv_inject_show_write, +}; + +void sxe2_drv_inject_clean_debug_files(struct sxe2_rdma_device *dev) +{ + struct sxe2_inject_debug *dbg_node = NULL; + + dbg_node = &dev->rdma_func->inject_dbg; + debugfs_remove_recursive(dbg_node->dbg_root); + dbg_node->dbg_root = NULL; +} +EXPORT_SYMBOL(sxe2_drv_inject_clean_debug_files); + +void sxe2_drv_inject_create_debugfs_files(struct sxe2_rdma_device *dev) +{ + struct sxe2_inject_debug *dbg_node = NULL; + + dbg_node = &dev->rdma_func->inject_dbg; + dbg_node->dbg_root = sxe2_debugfs_create_dir( + "inject", sxe2_drv_debugfs_get_dev_root(dev)); + if (IS_ERR_OR_NULL(dbg_node->dbg_root)) { + DRV_RDMA_LOG_ERROR("Create debugfs dir fail.\n"); + goto end; + } + + sxe2_debugfs_create_file("inject_show", + SXE2_INJECT_DEBUG_FILE_READ_AND_WRITE, + dbg_node->dbg_root, dev->rdma_func, + &inject_show_fops); + sxe2_debugfs_create_file("inject_count", + SXE2_INJECT_DEBUG_FILE_READ_ONLY, + dbg_node->dbg_root, dev->rdma_func, + &inject_count_fops); + sxe2_debugfs_create_file("inject_active", + SXE2_INJECT_DEBUG_FILE_WRITE_ONLY, + dbg_node->dbg_root, dev->rdma_func, + &inject_active_fops); + sxe2_debugfs_create_file("inject_deactive", + SXE2_INJECT_DEBUG_FILE_WRITE_ONLY, + dbg_node->dbg_root, dev->rdma_func, + &inject_deactive_fops); + + spin_lock_init(&dbg_node->dbg_lock); + +end: + return; +} +EXPORT_SYMBOL(sxe2_drv_inject_create_debugfs_files); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..da1edb56499a8c73518c3be2938929a55640ce95 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_debugfs.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject_debugfs.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_RDMA_INJECT_DEBUGFS_H_ +#define _SXE2_DRV_RDMA_INJECT_DEBUGFS_H_ + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +#include +#include "sxe2_drv_rdma_inject.h" +#include "sxe2_drv_rdma_common.h" + +#define DEBUGFS_ROOT_DIR "sxe2" +#define INJECT_NAME_MAX_LEN (120) +#define INJECT_USR_DATA_LEN (64) +#define INJECT_SNPRINTF_LEN_MAX (2 * 1024 * 1024) + +#define INJECT_DEBUG_SNPRINTF(rsp, lens, ...) \ + { \ + u32 _injected_len_ = (u32)snprintf( \ + rsp, INJECT_SNPRINTF_LEN_MAX, __VA_ARGS__); \ + lens += _injected_len_; \ + rsp += _injected_len_; \ + } +typedef bool (*inject_filter_func)(struct sxe2_injection *, const char *); + +struct inject_custom_show_rsp { + u32 mid; + s32 alive; + s8 name[INJECT_NAME_MAX_LEN]; + s8 usr_data[INJECT_USR_DATA_LEN]; + enum inject_type type; +}; + +enum inject_attr { + INJECT_ATTR_MID, + INJECT_ATTR_NAME, + INJECT_ATTR_STATUS, + INJECT_ATTR_ALIVE, + INJECT_ATTR_TYPE, + INJECT_ATTR_USERDATA, + INJECT_MAX_ATTR, +}; + +void sxe2_drv_inject_clean_debug_files(struct sxe2_rdma_device *dev); + +void sxe2_drv_inject_create_debugfs_files(struct sxe2_rdma_device *dev); + +struct dentry *sxe2_drv_debugfs_get_dev_root(struct sxe2_rdma_device *dev); + +int sxe2_drv_inject_show_read_parse_args(char *cmd, char *rsp, + inject_filter_func *filter, + char **attr, u32 *len); + +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.c new file mode 100644 index 0000000000000000000000000000000000000000..4cd50a999df876b55661e442b8613c023dc4ee1a --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.c @@ -0,0 +1,1711 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject_reg.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) + +#include +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_mq_debugfs.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_mr.h" +#include "sxe2_drv_eq.h" +#include "sxe2_drv_rdma_inject_reg.h" +#define INJECT_AEQ_ADD_CQ_RET (-1) +#define INJECT_CQ_ICM_CTX_DESTROY_RET (-1) +#define INJECT_COMMON_RET (-EFAULT) +#define INJECT_COMMON_ERR_PTR ERR_PTR(INJECT_COMMON_RET) + +static void sxe2_drv_inject_ah_err_idx(char *user_data, + union sxe2_ah_info *ah_info) +{ + int count = 0; + long data = 0; + + count = kstrtol(user_data, 0, &data); + ah_info->field.ah_idx = data; +} + +static void sxe2_drv_inject_mq_creat(char *user_data, + struct sxe2_mq_err_dbg_val *err_val) +{ + int count = 0; + + count = kstrtol(user_data, 0, (long *)&err_val->mqc_ignore_vld); + +} + +static void sxe2_drv_inject_mq_dma_err(char *user_data, + struct sxe2_mq_err_dbg_val *err_val, + u32 *p1, u32 *p2, struct sxe2_mq_ctx *mq) +{ + int count = 0; + + err_val->mqc_addr_vld = true; + count = kstrtol(user_data, 16, (long *)&err_val->mqc_addr); + + if (err_val->mqc_addr_vld) { + *p1 = (u32)(err_val->mqc_addr >> MQ_BITS_PER_INT); + *p2 = (u32)(err_val->mqc_addr); + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:ori addr:%#llx, modify addr:%#llx\n", + mq->mq_ctx_pa, err_val->mqc_addr); + } + +} + +static void sxe2_drv_inject_cq_swstatus_err(char *user_data, struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_cq *cq_ctx) +{ + if (rdma_dev->rdma_func->inject_aeq.cq_sw_status_err) { + cq_ctx->cqc.sw_status = 0; + DRV_RDMA_LOG_DEBUG_BDF("inject cq sw_status 0 err.\n"); + } +} + +static void sxe2_drv_inject_ceq_sw_status_err(char *user_data, + struct sxe2_rdma_device *rdma_dev, struct sxe2_rdma_ctx_ceq *ceq_ctx) +{ + if (rdma_dev->rdma_func->inject_aeq.ceq_sw_status_err) { + ceq_ctx->eqc.sw_status = 0; + DRV_RDMA_LOG_DEBUG_BDF("inject ceq sw_status 0 err.\n"); + } +} + +static void sxe2_drv_inject_db_ceqn_err(char *user_data, + struct sxe2_rdma_device *rdma_dev, u32 *ci_eqn) +{ + if (rdma_dev->rdma_func->inject_aeq.db_ceqn_err) { + *ci_eqn = 0xffff; + DRV_RDMA_LOG_DEBUG_BDF("inject db ceqn 0xffff err.\n"); + } +} + +static void sxe2_drv_inject_srq_limit_flag(char *user_data, + struct sxe2_rdma_device *rdma_dev, struct mq_wqe_of_srq *wqe) +{ + if (rdma_dev->rdma_func->inject_aeq.srq_limit_flag) { + wqe->lwm = 4; + DRV_RDMA_LOG_DEV_DEBUG("inject srq limit err. lwm(%d)\n", wqe->lwm); + } +} + +static void sxe2_drv_inject_ceq_ci_noupdate(char *user_data, + struct sxe2_rdma_device *rdma_dev, u32 *ceq_size, u32 ceq_id) +{ + if (rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate == SXE2_INJECT_EQ_CEQ0_VALUE && + ceq_id == 0) { + DRV_RDMA_LOG_DEBUG_BDF("inject ceq0 ci no update err ceq min size16.\n"); + *ceq_size = SXE2_MIN_EQ_SIZE * 4; + } else if (rdma_dev->rdma_func->inject_aeq.ceq_ci_noupdate == SXE2_INJECT_EQ_CEQS_VALUE && + ceq_id != 0) { + DRV_RDMA_LOG_DEBUG_BDF("inject ceq%u ci no update err ceq min size8.\n", ceq_id); + *ceq_size = SXE2_MIN_EQ_SIZE * 2; + } +} + +static void sxe2_drv_inject_mq_duplicate_creat(char *user_data, u32 *p1, + u32 *p2, struct sxe2_mq_ctx *mq) +{ + SXE2_BAR_WRITE_32(*p1, mq->dev->hw_regs[MQC_ADDR_HIGH]); + SXE2_BAR_WRITE_32(*p2, mq->dev->hw_regs[MQC_ADDR_LOW]); + SXE2_BAR_WRITE_32(SXE2_MQC_ADDR_VLD_SET, + mq->dev->hw_regs[MQC_ADDR_VLD]); + udelay(mq->dev->hw_attrs.max_done_count * + mq->dev->hw_attrs.max_sleep_count); + DRV_RDMA_LOG_DEBUG("MQ DEBUGFS:duplicate create\n"); + +} + +static void sxe2_drv_inject_mq_size(char *user_data, + struct sxe2_mq_err_dbg_val *err_val, + struct sxe2_mqc *mqc, + struct sxe2_mq_ctx *mq) +{ + int count = 0; + + err_val->mqc_size_vld = true; + count = kstrtol(user_data, 16, (long *)&err_val->mqc_size); + + if (err_val->mqc_size_vld) { + mqc->SQ_size = err_val->mqc_size & 0xF; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:ori mq_size:%#llx, modify mq_size:%#llx\n", + mq->hw_mq_size, mqc->SQ_size); + } + +} + +static void sxe2_drv_inject_mq_base(char *user_data, + struct sxe2_mq_err_dbg_val *err_val, + struct sxe2_mqc *mqc, + struct sxe2_mq_ctx *mq) +{ + int count = 0; + + err_val->mqc_base_vld = true; + count = kstrtol(user_data, 16, (long *)&err_val->mqc_base); + + if (err_val->mqc_base_vld) { + mqc->SQ_base = err_val->mqc_base; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:ori mq_base:%#llx, modify mq_base:%#llx\n", + (mq->mq_buf_pa >> SXE2_MQC_SQ_BASE_OFFSET), + mqc->SQ_base); + } + +} + +static void +sxe2_drv_inject_rcms_ctx_err_qp(char *user_data, + struct sxe2_rcms_create_obj_info *info, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val) +{ + int count = 0; + + err_cqe_val->fpte_rsc_type = SXE2_RCMS_OBJ_QP; + count = kstrtol(user_data, 10, (long *)&err_cqe_val->fpte_err_type); + + g_inject_fpte_err = 0; + if ((info->rsrc_type == err_cqe_val->fpte_rsc_type) && + (err_cqe_val->fpte_err_type != 0)) { + g_inject_fpte_err = err_cqe_val->fpte_err_type; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:fpte_rsc_type:%#x, fpte_err_type %#x,\n" + "fptes_index[0]:%#x, add_fpt_cnt:%#x\n", + err_cqe_val->fpte_rsc_type, err_cqe_val->fpte_err_type, + info->rcms_info->fpte_indexes[0], info->add_fpte_cnt); + } + +} + +static void +sxe2_drv_inject_rcms_ctx_err_ssnt(char *user_data, + struct sxe2_rcms_create_obj_info *info, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val) +{ + int count = 0; + + err_cqe_val->fpte_rsc_type = SXE2_RCMS_OBJ_SSNT; + count = kstrtol(user_data, 10, (long *)&err_cqe_val->fpte_err_type); + + g_inject_fpte_err = 0; + if ((info->rsrc_type == err_cqe_val->fpte_rsc_type) && + (err_cqe_val->fpte_err_type != 0)) { + g_inject_fpte_err = err_cqe_val->fpte_err_type; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:fpte_rsc_type:%#x, fpte_err_type %#x,\n" + "fptes_index[0]:%#x, add_fpt_cnt:%#x\n", + err_cqe_val->fpte_rsc_type, err_cqe_val->fpte_err_type, + info->rcms_info->fpte_indexes[0], info->add_fpte_cnt); + } + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + void *qp_wqe, struct sxe2_rdma_ctx_qp *qp, u8 opcode) +{ + int count = 0; + struct qp_mq_wqe *normal_qp = NULL; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe = NULL; + + err_cqe_val->rsc_wqe_err_type = QPN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + if (!rdma_qp->sw_cc_enable || opcode != SXE2_MQ_OP_CREATE_QP) { + normal_qp = (struct qp_mq_wqe *)qp_wqe; + normal_qp->qpn = (u64)err_cqe_val->rsc_wqe_err_val; + } else { + create_cc_qp_wqe = (struct qp_mq_create_cc_qp_wqe *)qp_wqe; + create_cc_qp_wqe->qp_id = (u64)err_cqe_val->rsc_wqe_err_val; + } + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#x,\n" + "ori_qpn %#x\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + qp->qp_common.qpn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpctx( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + void *qp_wqe, struct sxe2_rdma_ctx_qp *qp, u8 opcode) +{ + int count = 0; + struct qp_mq_wqe *normal_qp = NULL; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe = NULL; + + err_cqe_val->rsc_wqe_err_type = QP_CTX_PA_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + if (!rdma_qp->sw_cc_enable || opcode != SXE2_MQ_OP_CREATE_QP) { + normal_qp = (struct qp_mq_wqe *)qp_wqe; + normal_qp->qp_context_address = + (u64)err_cqe_val->rsc_wqe_err_val; + } else { + create_cc_qp_wqe = (struct qp_mq_create_cc_qp_wqe *)qp_wqe; + create_cc_qp_wqe->qp_context_address = + (u64)err_cqe_val->rsc_wqe_err_val; + } + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#x,\n" + "ori_qp_ctx_pa %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + qp->hw_host_ctx_pa); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_createOp( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + void *qp_wqe, struct sxe2_rdma_ctx_qp *qp) +{ + int count = 0; + struct qp_mq_wqe *normal_qp = NULL; + struct sxe2_rdma_qp *rdma_qp = + (struct sxe2_rdma_qp *)qp->qp_common.back_qp; + struct qp_mq_create_cc_qp_wqe *create_cc_qp_wqe = NULL; + + err_cqe_val->rsc_wqe_err_type = QP_CREATE_OP_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + if (!rdma_qp->sw_cc_enable) { + normal_qp = (struct qp_mq_wqe *)qp_wqe; + normal_qp->op = (u64)err_cqe_val->rsc_wqe_err_val; + } else { + create_cc_qp_wqe = (struct qp_mq_create_cc_qp_wqe *)qp_wqe; + create_cc_qp_wqe->op = (u64)err_cqe_val->rsc_wqe_err_val; + } + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#x,\n" + "err_qp_create_op %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)SXE2_MQ_OP_CREATE_QP); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_swState( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SW_STATE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->qp_state = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori qpc_state 0x0, err qpc_state %#x\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_srqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SRQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->srqn = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_srqn %#x, err qpc_srqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->ctx_info.srqn, (u64)qpc->srqn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_scqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SEND_CQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->cqn_snd = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_send_cqn %#x, err qpc_send_cqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->ctx_info.send_cq_num, (u64)qpc->cqn_snd); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_rcqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_RECV_CQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->cqn_rcv = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_recv_cqn %#x, err qpc_recv_cqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->ctx_info.rcv_cq_num, (u64)qpc->cqn_rcv); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_type( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_TYPE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_type %#x\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)qpc->qp_type); + qpc->qp_type = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG("MQ DEBUGFS:err qpc_type %#llx\n", + (u64)qpc->qp_type); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_pmtu( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_PMTU_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->pmtu = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_pmtu %#x, err qpc_pmtu %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->ctx_info.udp_info->pmtu, (u64)qpc->pmtu); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_sqSize( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SQ_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->log_sq_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_sq_size %#x, err qpc_sq_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->qp_ctx.hw_sq_size, (u64)qpc->log_sq_size); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_rqSize( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SQ_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->log_rq_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_sq_size %#x, err qpc_sq_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->qp_ctx.hw_rq_size, (u64)qpc->log_rq_size); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_rqType( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_RQ_TYPE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->rq_type = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_rq_type %#x, err qpc_rq_type %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u32)(rdma_qp->qp_ctx.qp_common.srq ? SXE2_SRQ : + SXE2_REGULAR_RQ), + (u64)qpc->rq_type); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_pageSize( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_qpc *qpc, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_PAGE_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + qpc->log_page_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori qpc_page_size %#x, err qpc_page_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + rdma_qp->log_page_size, (u64)qpc->log_page_size); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_qpc_swState_jump( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct ib_qp_attr *attr, struct sxe2_rdma_qp *rdma_qp) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = QPC_SW_STATE_JUMP_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + rdma_qp->qp_ctx.qp_state = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori qpc_state %#x, err qpc_state %#x\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u32)attr->qp_state, rdma_qp->qp_ctx.qp_state); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_cqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_cq_wqe *cq_wqe, struct sxe2_rdma_ctx_cq *cq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + cq_wqe->cqn = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori cqc_eqn %#llx, err cqc_eqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + cq->cqc.eqn, cq_wqe->eqn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_cqcn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_cq_wqe *cq_wqe, struct sxe2_rdma_ctx_cq *cq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CQC_CEQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + cq_wqe->eqn = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori cqc_eqn %#llx, err cqc_eqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + cq->cqc.eqn, cq_wqe->eqn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_cq_swState( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_cq_wqe *cq_wqe, struct sxe2_rdma_ctx_cq *cq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CQC_SW_STATE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + cq_wqe->sw_status = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori cqc_state %#llx, err cqc_state %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + cq->cqc.sw_status, cq_wqe->sw_status); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_cq_size( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_cq_wqe *cq_wqe, struct sxe2_rdma_ctx_cq *cq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CQC_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + cq_wqe->log_cq_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori cqc_size %#llx, err cqc_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + cq->cqc.log_cq_size, cq_wqe->log_cq_size); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_srqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct mq_wqe_of_srq *srq_wqe, struct sxe2_rdma_srq_ctx *srq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = SRQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + srq_wqe->srqn = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori srqn %#x, err srqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + srq->srq_drv.srq_id, srq_wqe->srqn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_srq_swState( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct mq_wqe_of_srq *srq_wqe, struct sxe2_rdma_srq_ctx *srq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = SRQC_SW_STATE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + srq_wqe->state = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori srqc_state 0, err srqc_state %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + srq_wqe->state); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_srq_size( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct mq_wqe_of_srq *srq_wqe, struct sxe2_rdma_srq_ctx *srq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = SRQC_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + srq_wqe->log_srq_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x,\n" + "\trsc_err_val %#llx ori srq_size %#lx, err srq_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + srq->hw_srq_size, srq_wqe->log_srq_size); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_ceqn( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_eq_wqe *eq_wqe, struct sxe2_rdma_ctx_ceq *ceq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CEQN_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + eq_wqe->eqn = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqn %#x, err ceqn %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + ceq->ceq_id, eq_wqe->eqn); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_ceq_swState( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_eq_wqe *eq_wqe, struct sxe2_rdma_ctx_ceq *ceq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CEQC_SW_STATE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + eq_wqe->sw_status = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqc_state %#llx, err ceqc_state %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + ceq->eqc.sw_status, eq_wqe->sw_status); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_ceq_size( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_eq_wqe *eq_wqe, struct sxe2_rdma_ctx_ceq *ceq) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = CEQC_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + eq_wqe->log_eq_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori ceqc_size %#llx, err ceqc_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + ceq->eqc.log_eq_size, eq_wqe->log_eq_size); + +} + +static void +sxe2_drv_inject_mq_rsc_wqe_ah(char *user_data, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_ctx_ah *ctx_ah) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = AH_ID_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + ctx_ah->ah_info.field.ah_idx = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori ah_id %#llx, err ah_id %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)ctx_ah->ah_info.field.ah_idx); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_ah_query( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_hw_ahc *ah_ctx) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = AH_ID_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + ah_ctx->field.ah_idx = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori ah_id %#llx, err ah_id %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)ah_ctx->field.ah_idx); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_fpte( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_update_fpte_wqe *update_fpte_wqe) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = FPTE_CNT_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + update_fpte_wqe->fpt_entry_cnt = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx\n" + "ori fpte_cnt %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)update_fpte_wqe->fpt_entry_cnt); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_id_reg( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_reg_mr_info *mr_info) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_ID_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + mr_info->field.mr_idx = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "err mr_id %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + mr_info->field.mr_idx); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_id_dereg( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_dereg_mr_info *mr_info) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_ID_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + mr_info->field.mr_idx = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "err mr_id %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + mr_info->field.mr_idx); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_id_query( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + u32 *mr_index) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_ID_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + *mr_index = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "err mr_id %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + *mr_index); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_accessRight( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_reg_mr_info *mr_info) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_ACCESS_RIGHT_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + mr_info->field.access_right = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "err mr_access_right %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + mr_info->field.access_right); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_type( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_reg_mr_info *mr_info, struct sxe2_mr *vendor_mr) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_TYPE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + mr_info->field.mr_type = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori mr_type %#llx, err mr_type %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)vendor_mr->is_mw, mr_info->field.mr_type); + +} + +static void sxe2_drv_inject_mq_rsc_wqe_mr_pageSize( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + union sxe2_reg_mr_info *mr_info, struct sxe2_mr *vendor_mr) +{ + int count = 0; + + err_cqe_val->rsc_wqe_err_type = MR_PAGE_SIZE_DEBUGFS; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->rsc_wqe_err_val); + + mr_info->field.log_entity_size = err_cqe_val->rsc_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx,\n" + "ori mr_page_size %#llx, err mr_page_size %#llx\n", + err_cqe_val->rsc_wqe_err_type, err_cqe_val->rsc_wqe_err_val, + (u64)ilog2(vendor_mr->page_size), + mr_info->field.log_entity_size); + +} + +static void sxe2_drv_inject_mq_pbl_err( + char *user_data, struct sxe2_rcms_manage_vf_pble_cp_wqe *manage_cp_wqe, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val) +{ + int count = 0; + + err_cqe_val->manage_pbl_wqe_err_type = MQ_MNG_PBL_OPCODE; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->manage_pbl_wqe_err_val); + + manage_cp_wqe->op = err_cqe_val->manage_pbl_wqe_err_val; + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject rsc_err_type:%#x, rsc_err_val %#llx ori op %#x, err op %#llx\n", + err_cqe_val->manage_pbl_wqe_err_type, + err_cqe_val->manage_pbl_wqe_err_val, + (u32)SXE2_MQ_OP_MANAGE_PBLE_BP, manage_cp_wqe->op); + +} + +static void +sxe2_drv_inject_rcms_commit_err_qp(char *user_data, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_info *rcms_info) +{ + int count = 0; + + err_cqe_val->commit_wqe_err_type = SXE2_RCMS_OBJ_QP; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->commit_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject commit_wqe_err_type:%#x, commit_wqe_err_val\n" + "%#x, ori qp_cnt %#llx, err qp_cnt %#llx\n", + err_cqe_val->commit_wqe_err_type, + err_cqe_val->commit_wqe_err_val, + rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].cnt, + err_cqe_val->commit_wqe_err_val); + rcms_info->rcms_obj[SXE2_RCMS_OBJ_QP].cnt = + err_cqe_val->commit_wqe_err_val; + +} + +static void +sxe2_drv_inject_rcms_commit_err_cq(char *user_data, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_info *rcms_info) +{ + int count = 0; + + err_cqe_val->commit_wqe_err_type = SXE2_RCMS_OBJ_CQ; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->commit_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject commit_wqe_err_type:%#x, commit_wqe_err_val %#x,\n" + "ori cq_cnt %#llx, err cq_cnt %#llx\n", + err_cqe_val->commit_wqe_err_type, + err_cqe_val->commit_wqe_err_val, + rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].cnt, + err_cqe_val->commit_wqe_err_val); + rcms_info->rcms_obj[SXE2_RCMS_OBJ_CQ].cnt = + err_cqe_val->commit_wqe_err_val; + +} + +static void sxe2_drv_inject_rcms_commit_err_srq( + char *user_data, struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_info *rcms_info) +{ + int count = 0; + + err_cqe_val->commit_wqe_err_type = SXE2_RCMS_OBJ_SRQ; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->commit_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject commit_wqe_err_type:%#x, commit_wqe_err_val %#x,\n" + "ori srq_cnt %#llx, err srq_cnt %#llx\n", + err_cqe_val->commit_wqe_err_type, + err_cqe_val->commit_wqe_err_val, + rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt, + err_cqe_val->commit_wqe_err_val); + rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt = + err_cqe_val->commit_wqe_err_val; + +} + +static void +sxe2_drv_inject_rcms_commit_err_mr(char *user_data, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_info *rcms_info) +{ + int count = 0; + + err_cqe_val->commit_wqe_err_type = SXE2_RCMS_OBJ_MR; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->commit_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject commit_wqe_err_type:%#x, commit_wqe_err_val %#x,\n" + "ori mr_cnt %#llx, err mr_cnt %#llx\n", + err_cqe_val->commit_wqe_err_type, + err_cqe_val->commit_wqe_err_val, + rcms_info->rcms_obj[SXE2_RCMS_OBJ_MR].cnt, + err_cqe_val->commit_wqe_err_val); + rcms_info->rcms_obj[SXE2_RCMS_OBJ_MR].cnt = + err_cqe_val->commit_wqe_err_val; + +} + +static void +sxe2_drv_inject_rcms_commit_err_ah(char *user_data, + struct sxe2_mq_err_mcqe_dbg_val *err_cqe_val, + struct sxe2_rcms_info *rcms_info) +{ + int count = 0; + + err_cqe_val->commit_wqe_err_type = SXE2_RCMS_OBJ_AH; + count = kstrtol(user_data, 16, (long *)&err_cqe_val->commit_wqe_err_val); + + DRV_RDMA_LOG_DEBUG( + "MQ DEBUGFS:inject commit_wqe_err_type:%#x, commit_wqe_err_val %#x,\n" + "ori ah_cnt %#llx, err ah_cnt %#llx\n", + err_cqe_val->commit_wqe_err_type, + err_cqe_val->commit_wqe_err_val, + rcms_info->rcms_obj[SXE2_RCMS_OBJ_AH].cnt, + err_cqe_val->commit_wqe_err_val); + rcms_info->rcms_obj[SXE2_RCMS_OBJ_AH].cnt = + err_cqe_val->commit_wqe_err_val; + +} + +static void +sxe2_drv_inject_llwqe_flag(char *user_data, struct sxe2_rdma_device *rdma_dev, + __le64 *push, __le64 *wqe) +{ + if (rdma_dev->rdma_func->inject_aeq.llwqe_flag) + memcpy(push, wqe, 4); +} + +static void sxe2_drv_inject_aeq_ci_noupdate(char *user_data, + struct sxe2_rdma_device *rdma_dev, u32 *aeq_size) +{ + if (rdma_dev->rdma_func->inject_aeq.aeq_ci_noupdate) { + DRV_RDMA_LOG_DEV_DEBUG( + "inject aeq ci no update err ceq min size8.\n"); + *aeq_size = SXE2_MIN_EQ_SIZE * 2; + } +} + +s32 sxe2_drv_inject_reg(struct sxe2_rdma_pci_f *dev) +{ + s32 ret; + + ret = INJECT_REG(dev, "ah_err_idx", sxe2_drv_inject_ah_err_idx, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("ah_err_idx inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "mq_creat", sxe2_drv_inject_mq_creat, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("mq_creat inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "mq_dma_err", sxe2_drv_inject_mq_dma_err, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("mq_dma_err inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "mq_duplicate_creat", + sxe2_drv_inject_mq_duplicate_creat, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_duplicate_creat inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_size", sxe2_drv_inject_mq_size, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("mq_size inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "mq_base", sxe2_drv_inject_mq_base, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("mq_base inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "rcms_ctx_err_qp", + sxe2_drv_inject_rcms_ctx_err_qp, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("rcms_ctx_err_qp inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "rcms_ctx_err_ssnt", + sxe2_drv_inject_rcms_ctx_err_ssnt, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_ctx_err_ssnt inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpn", sxe2_drv_inject_mq_rsc_wqe_qpn, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_qpn inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpctx", + sxe2_drv_inject_mq_rsc_wqe_qpctx, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_qpctx inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_createOp", + sxe2_drv_inject_mq_rsc_wqe_createOp, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_createOp inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_swState", + sxe2_drv_inject_mq_rsc_wqe_qpc_swState, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_swState inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_srqn", + sxe2_drv_inject_mq_rsc_wqe_qpc_srqn, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_srqn inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_scqn", + sxe2_drv_inject_mq_rsc_wqe_qpc_scqn, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_scqn inject reg fail, ret %d\n", ret); + } + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_rcqn", + sxe2_drv_inject_mq_rsc_wqe_qpc_rcqn, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rcqn inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_type", + sxe2_drv_inject_mq_rsc_wqe_qpc_type, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_type inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_pmtu", + sxe2_drv_inject_mq_rsc_wqe_qpc_pmtu, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_pmtu inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_sqSize", + sxe2_drv_inject_mq_rsc_wqe_qpc_sqSize, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_sqSize inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_rqSize", + sxe2_drv_inject_mq_rsc_wqe_qpc_rqSize, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rqSize inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_rqType", + sxe2_drv_inject_mq_rsc_wqe_qpc_rqType, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rqType inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_pageSize", + sxe2_drv_inject_mq_rsc_wqe_qpc_pageSize, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_pageSize inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_qpc_swState_jump", + sxe2_drv_inject_mq_rsc_wqe_qpc_swState_jump, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_swState_jump inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_cqn", sxe2_drv_inject_mq_rsc_wqe_cqn, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_cqn inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_cqcn", + sxe2_drv_inject_mq_rsc_wqe_cqcn, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_cqn inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_cq_swState", + sxe2_drv_inject_mq_rsc_wqe_cq_swState, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_cq_swState inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_cq_size", + sxe2_drv_inject_mq_rsc_wqe_cq_size, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_cq_size inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_srqn", + sxe2_drv_inject_mq_rsc_wqe_srqn, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_srqn inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_srq_swState", + sxe2_drv_inject_mq_rsc_wqe_srq_swState, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_srq_swState inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_srq_size", + sxe2_drv_inject_mq_rsc_wqe_srq_size, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_srq_size inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_ceqn", + sxe2_drv_inject_mq_rsc_wqe_ceqn, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_ceqn inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_ceq_swState", + sxe2_drv_inject_mq_rsc_wqe_ceq_swState, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ceq_swState inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_ceq_size", + sxe2_drv_inject_mq_rsc_wqe_ceq_size, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ceq_size inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_ah", sxe2_drv_inject_mq_rsc_wqe_ah, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_ah inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_ah_query", + sxe2_drv_inject_mq_rsc_wqe_ah_query, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ah_query inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_fpte", + sxe2_drv_inject_mq_rsc_wqe_fpte, SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_fpte inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_id_reg", + sxe2_drv_inject_mq_rsc_wqe_mr_id_reg, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_reg inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_id_dereg", + sxe2_drv_inject_mq_rsc_wqe_mr_id_dereg, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_dereg inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_id_query", + sxe2_drv_inject_mq_rsc_wqe_mr_id_query, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_query inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_accessRight", + sxe2_drv_inject_mq_rsc_wqe_mr_accessRight, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_accessRight inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_type", + sxe2_drv_inject_mq_rsc_wqe_mr_type, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_type inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "mq_rcs_wqe_mr_pageSize", + sxe2_drv_inject_mq_rsc_wqe_mr_pageSize, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_pageSize inject reg fail, ret %d\n", + ret); + } + + ret = INJECT_REG(dev, "mq_pbl_err", sxe2_drv_inject_mq_pbl_err, + SXE2_INJECT_MID_MQ); + if (ret) + DRV_RDMA_LOG_ERROR("mq_pbl_err inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "rcms_commit_err_qp", + sxe2_drv_inject_rcms_commit_err_qp, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_qp inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "rcms_commit_err_cq", + sxe2_drv_inject_rcms_commit_err_cq, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_cq inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "rcms_commit_err_srq", + sxe2_drv_inject_rcms_commit_err_srq, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_srq inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "rcms_commit_err_mr", + sxe2_drv_inject_rcms_commit_err_mr, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_mr inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "rcms_commit_err_ah", + sxe2_drv_inject_rcms_commit_err_ah, + SXE2_INJECT_MID_MQ); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_ah inject reg fail, ret %d\n", ret); + } + + ret = INJECT_REG(dev, "cq_sw_status_err", sxe2_drv_inject_cq_swstatus_err, + SXE2_INJECT_MID_CQ); + if (ret) + DRV_RDMA_LOG_ERROR("cq_swstatus_err inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "ceq_sw_status_err", sxe2_drv_inject_ceq_sw_status_err, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("ceq_sw_status_err inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "db_ceqn_err", sxe2_drv_inject_db_ceqn_err, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("db_ceqn_err inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "srq_limit_flag", sxe2_drv_inject_srq_limit_flag, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("srq_limit_flag inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "ceq_ci_noupdate", sxe2_drv_inject_ceq_ci_noupdate, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("ceq_ci_noupdate inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "llwqe_flag", sxe2_drv_inject_llwqe_flag, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("llwqe_flag inject reg fail, ret %d\n", ret); + + ret = INJECT_REG(dev, "aeq_ci_noupdate", sxe2_drv_inject_aeq_ci_noupdate, + SXE2_INJECT_MID_CEQ); + if (ret) + DRV_RDMA_LOG_ERROR("aeq_ci_noupdate inject reg fail, ret %d\n", ret); + + return ret; +} + +void sxe2_drv_inject_unreg(struct sxe2_rdma_pci_f *dev) +{ + s32 ret; + + ret = INJECT_UNREG(dev, "ah_err_idx"); + if (ret) { + DRV_RDMA_LOG_ERROR("ah_err_idx inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_creat"); + if (ret) + DRV_RDMA_LOG_ERROR("mq_creat inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "mq_dma_err"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_dma_err inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_duplicate_creat"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_duplicate_creat inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_size"); + if (ret) + DRV_RDMA_LOG_ERROR("mq_size inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "mq_base"); + if (ret) + DRV_RDMA_LOG_ERROR("mq_base inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "rcms_ctx_err_qp"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_ctx_err_qp inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "rcms_ctx_err_ssnt"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_ctx_err_ssnt inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpn"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_qpn inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpctx"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpctx inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_createOp"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_createOp inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_swState"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_swState inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_srqn"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_srqn inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_scqn"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_scqn inject unreg fail, ret %d\n", ret); + } + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_rcqn"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rcqn inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_type"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_type inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_pmtu"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_pmtu inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_sqSize"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_sqSize inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_rqSize"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rqSize inject reg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_rqType"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_rqType inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_pageSize"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_pageSize inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_qpc_swState_jump"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_qpc_swState_jump inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_cqn"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_cqn inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_cqcn"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_cqn inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_cq_swState"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_cq_swState inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_cq_size"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_cq_size inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_srqn"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_srqn inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_srq_swState"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_srq_swState inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_srq_size"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_srq_size inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_ceqn"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ceqn inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_ceq_swState"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ceq_swState inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_ceq_size"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ceq_size inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_ah"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_rcs_wqe_ah inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_ah_query"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_ah_query inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_fpte"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_fpte inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_id_reg"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_reg inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_id_dereg"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_dereg inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_id_query"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_id_query inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_accessRight"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_accessRight inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_type"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_type inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "mq_rcs_wqe_mr_pageSize"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "mq_rcs_wqe_mr_pageSize inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "mq_pbl_err"); + if (ret) { + DRV_RDMA_LOG_ERROR("mq_pbl_err inject unreg fail, ret %d\n", + ret); + } + + ret = INJECT_UNREG(dev, "rcms_commit_err_qp"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_qp inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "rcms_commit_err_cq"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_cq inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "rcms_commit_err_srq"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_srq inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "rcms_commit_err_mr"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_mr inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "rcms_commit_err_ah"); + if (ret) { + DRV_RDMA_LOG_ERROR( + "rcms_commit_err_ah inject unreg fail, ret %d\n", ret); + } + + ret = INJECT_UNREG(dev, "cq_sw_status_err"); + if (ret) + DRV_RDMA_LOG_ERROR("cq_swstatus_err inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "ceq_sw_status_err"); + if (ret) + DRV_RDMA_LOG_ERROR("ceq_sw_status_err inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "db_ceqn_err"); + if (ret) + DRV_RDMA_LOG_ERROR("db_ceqn_err inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "srq_limit_flag"); + if (ret) + DRV_RDMA_LOG_ERROR("srq_limit_flag inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "ceq_ci_noupdate"); + if (ret) + DRV_RDMA_LOG_ERROR("ceq_ci_noupdate inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "llwqe_flag"); + if (ret) + DRV_RDMA_LOG_ERROR("llwqe_flag inject unreg fail, ret %d\n", ret); + + ret = INJECT_UNREG(dev, "aeq_ci_noupdate"); + if (ret) + DRV_RDMA_LOG_ERROR("aeq_ci_noupdate inject unreg fail, ret %d\n", ret); +} + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..9c91871ff11755f898914aa06f694d16f7c6950a --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_inject_reg.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_inject_reg.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_RDMA_INJECT_REG_H_ +#define _SXE2_DRV_RDMA_INJECT_REG_H_ + +#if defined(SXE2_SUPPORT_INJECT) && defined(SXE2_CFG_DEBUG) +s32 sxe2_drv_inject_reg(struct sxe2_rdma_pci_f *dev); + +void sxe2_drv_inject_unreg(struct sxe2_rdma_pci_f *dev); +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.c new file mode 100644 index 0000000000000000000000000000000000000000..9a5f17a96d0348cb48be2b5e3f78d278a037de6e --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.c @@ -0,0 +1,1223 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_log.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_drv_rdma_log.h" + +#if (defined SXE2_CFG_DEBUG && defined __KERNEL__) + +int time_for_file_name(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + + return snprintf(buff, buf_len, "%04ld-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +int sxe2_file_write(struct file *file, char *buf, int len) +{ + int ret = 0; + + void *journal; + + journal = current->journal_info; + current->journal_info = NULL; + + if (!file) + return 0; + + do { + ret = kernel_write(file, buf, len, &file->f_pos); + } while (ret == -EINTR); + + if (ret >= 0) + fsnotify_modify(file); + + current->journal_info = journal; + + return ret; +} +#endif + +u32 g_sxe2_rdma_dmesg_level = LOGLEVEL_WARNING; + +#if defined SXE2_CFG_DEBUG && defined __KERNEL__ + +#define FILE_NAME_SIZE 128 +#define SXE2_KLOG_OUT_WAIT (5 * HZ) +#define SWITCH_FILE +#define LOG_PATH_LEN 100 +#define DRV_LOG_FILE_SIZE_MIN_MB 10 +#define DRV_LOG_FILE_SIZE_MAX_MB 200 + +struct sxe2_debug g_sxe2_debug_rdma; +char g_log_path_str_rdma[LOG_PATH_LEN] = { 0 }; +char g_log_path_bin_rdma[LOG_PATH_LEN] = { 0 }; + +static char g_log_path_rdma[80] = { 0 }; +static u32 g_log_space_size_rdma; +static u32 g_log_file_size_rdma; + +#ifdef SXE2_CFG_DEBUG +module_param_string(g_log_path_rdma, g_log_path_rdma, 80, 0644); +MODULE_PARM_DESC( + g_log_path_rdma, + "the path host driver will be saved(<80 chars) Default: /var/log"); + +module_param(g_log_file_size_rdma, uint, 0644); +MODULE_PARM_DESC( + g_log_file_size_rdma, + "single driver log file size(10MB ~ 200MB), Default: 200, Unit: MB"); + +module_param(g_log_space_size_rdma, uint, 0644); +MODULE_PARM_DESC(g_log_space_size_rdma, + "the space allowed host driver log to be store, Default: 0(unlimited), Unit: MB"); +#endif + +static inline int time_for_log(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec, tv.tv_nsec * 1000); +} + +static inline char *sxe2_stack_top(void) +{ + unsigned long *ptr = (unsigned long *)(task_thread_info(current) + 1); + + return (char *)(ptr + 1); +} + +static inline struct sxe2_thread_local *sxe2_thread_local_get(struct sxe2_thread_key *key) +{ + return (struct sxe2_thread_local *)(sxe2_stack_top() + key->offset); +} + +static __maybe_unused void sxe2_thread_key_create(int size, + struct sxe2_thread_key *key) +{ + key->offset = g_sxe2_debug_rdma.key_offset; + g_sxe2_debug_rdma.key_offset += sizeof(struct sxe2_thread_local) + size; +} + +static __maybe_unused void *sxe2_thread_get_specific(struct sxe2_thread_key *key) +{ + struct sxe2_thread_local *local = sxe2_thread_local_get(key); + + if (local->magic != DEBUG_TRACE_MAGIC) + return NULL; + + return (void *)local->data; +} + +static __maybe_unused void sxe2_thread_clear_specific(struct sxe2_thread_key *key) +{ + struct sxe2_thread_local *local = sxe2_thread_local_get(key); + + local->magic = 0; +} + +static __maybe_unused int sxe2_filter_file_add(char *name) +{ + struct debug_file *file = NULL; + + file = kmalloc(sizeof(struct debug_file), GFP_ATOMIC); + if (!file) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PAGE_SIZE); + return -ENOMEM; + } + strscpy(file->name, name, sizeof(file->name)); + INIT_LIST_HEAD(&file->list); + + list_add_rcu(&file->list, &g_sxe2_debug_rdma.filter_file); + return 0; +} + +static __maybe_unused void sxe2_filter_file_del(char *filename) +{ + struct debug_file *file = NULL; + + list_for_each_entry_rcu(file, &g_sxe2_debug_rdma.filter_file, list) { + if (!strcmp(file->name, filename)) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + return; + } + } +} + +static __maybe_unused void sxe2_log_level_modify(u32 level) +{ + sxe2_level_set(level); +} + +static char *sxe2_log_path_query(void) +{ +#ifndef __cplusplus + return g_log_path_rdma; +#else + return NULL; +#endif +} + +static u32 sxe2_log_space_size_query(void) +{ + return g_log_space_size_rdma; +} + +static u32 sxe2_log_file_size_query(void) +{ + return g_log_file_size_rdma; +} + +static void sxe2_log_file_size_modify(u32 size) +{ + g_log_file_size_rdma = size; +} + +#ifndef SXE2_CFG_RELEASE +static inline int sxe2_filter_file_print(const char *filename) +{ + struct debug_file *file = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(file, &g_sxe2_debug_rdma.filter_file, list) { + if (!strcmp(file->name, filename)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static inline int sxe2_filter_func_print(const char *name) +{ + struct debug_func *func = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(func, &g_sxe2_debug_rdma.filter_func, list) { + if (!strcmp(func->name, name)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +#endif +static __maybe_unused void sxe2_filter_file_clear(void) +{ + struct debug_file *file = NULL; + + do { + file = list_first_or_null_rcu(&g_sxe2_debug_rdma.filter_file, + struct debug_file, list); + if (file) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + } + } while (file); + +} + +static __maybe_unused int sxe2_filter_func_add(char *name) +{ + struct debug_func *func = NULL; + + func = kmalloc(sizeof(struct debug_func), GFP_ATOMIC); + if (!func) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PAGE_SIZE); + return -ENOMEM; + } + strscpy(func->name, name, sizeof(func->name)); + INIT_LIST_HEAD(&func->list); + + list_add_rcu(&func->list, &g_sxe2_debug_rdma.filter_func); + return 0; +} + +static __maybe_unused void sxe2_filter_func_del(char *name) +{ + struct debug_func *func = NULL; + + list_for_each_entry_rcu(func, &g_sxe2_debug_rdma.filter_func, list) { + if (!strcmp(func->name, name)) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + return; + } + } +} + +void sxe2_filter_func_clear(void) +{ + struct debug_func *func = NULL; + + do { + func = list_first_or_null_rcu(&g_sxe2_debug_rdma.filter_func, + struct debug_func, list); + if (func) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + } + } while (func); + +} + +static void sxe2_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +static int sxe2_file_open(struct sxe2_log *log, struct file **pp_file) +{ + struct file *file; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int flags_rewrite = O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC; + int err = 0; + int len = 0; + char filename[FILE_NAME_SIZE]; + +#ifdef SWITCH_FILE + memset(filename, 0, FILE_NAME_SIZE); + len += snprintf(filename, PAGE_SIZE, "%s", log->file_path); + if (log->file_num == 0) { + time_for_file_name(filename + len, FILE_NAME_SIZE - len); + } else { + snprintf(filename + len, FILE_NAME_SIZE - len, "%04d", + log->index++); + log->index = log->index % log->file_num; + } + + if (log->file_num == 1 && log->file != NULL) { + sxe2_file_close(&log->file); + log->file_pos = 0; + } +#else + memset(filename, 0, FILE_NAME_SIZE); + strscpy(filename, path, FILE_NAME_SIZE); +#endif + if (log->file_num == 0) { + file = filp_open(filename, flags_new, 0666); + } else { + file = filp_open(filename, flags_rewrite, 0666); + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + if (err == -ENOENT) + file = filp_open(filename, flags_new, 0666); + } + } + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + sxe2_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", + filename, err); + goto l_out; + } + mapping_set_gfp_mask(file->f_path.dentry->d_inode->i_mapping, GFP_NOFS); + + sxe2_print(KERN_DEBUG, NULL, "redirect file %s\n", filename); + + *pp_file = file; + +l_out: + return err; +} + +static void sxe2_file_sync(struct file *file) +{ + struct address_space *mapping; + void *journal; + int ret = 0; + int err; + + (void)ret; + (void)err; + + if (!file || !file->f_op || !file->f_op->fsync) + goto l_end; + + journal = current->journal_info; + current->journal_info = NULL; + + mapping = file->f_mapping; + + ret = filemap_fdatawrite(mapping); + + err = file->f_op->fsync(file, 0, file->f_mapping->host->i_size, 1); + + current->journal_info = journal; + +l_end: + return; +} + +static void sxe2_klog_in(struct sxe2_log *log, char *buf, const int len) +{ + int begin = 0; + int end = 0; + int free_size; + unsigned long flags; + + spin_lock_irqsave(&log->lock, flags); + + if (log->head > log->tail) { + sxe2_print(KERN_WARNING, NULL, + "FAILURE: log head exceeds log tail\n"); + SXE2_BUG_NO_SYNC(); + } + + free_size = log->buf_size - (log->tail - log->head); + + if (free_size <= len) { + log->is_drop = 1; + spin_unlock_irqrestore(&log->lock, flags); + return; + } + + begin = log->tail % log->buf_size; + end = (log->tail + len) % log->buf_size; + + if (begin < end) { + memcpy(log->buf + begin, buf, len); + } else { + memcpy(log->buf + begin, buf, log->buf_size - begin); + memcpy(log->buf, buf + log->buf_size - begin, end); + } + + log->tail = log->tail + len; + + spin_unlock_irqrestore(&log->lock, flags); + +} + +static void sxe2_klog_out(struct sxe2_log *log) +{ + int len = 0; + int rc = 0; + long long tail; + int begin; + int end; + int schedule_count_th = 0; + const int max_loop = 4096; + +#ifdef SWITCH_FILE + struct file *file = NULL; +#endif + + if (log->file == NULL) { + rc = sxe2_file_open(log, &log->file); + if (log->file != NULL) + log->file_pos = 0; + else + return; + } + + do { + tail = log->tail; + begin = log->head % log->buf_size; + end = tail % log->buf_size; + len = 0; + rc = 0; + + schedule_count_th++; + if (schedule_count_th >= max_loop) { + schedule_count_th = 0; + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + } + + if (log->is_drop) { + rc = sxe2_file_write(log->file, DEBUG_DROP_LOG_STRING, + strlen(DEBUG_DROP_LOG_STRING)); + if (rc < 0) + break; + log->is_drop = 0; + } + + if (begin < end) { + rc = sxe2_file_write(log->file, log->buf + begin, + end - begin); + if (rc > 0) + len += rc; + } else if (begin > end) { + rc = sxe2_file_write(log->file, log->buf + begin, + log->buf_size - begin); + if (rc > 0) { + len += rc; + rc = sxe2_file_write(log->file, log->buf, end); + if (rc > 0) + len += rc; + } + } + log->head += len; + log->file_pos += len; + + LOG_BUG_ON(log->head > log->tail, + "FAILURE: log head exceeds log tail\n"); + } while (log->head != log->tail && rc > 0); + + if (rc < 0) { + sxe2_print(KERN_ERR, NULL, "write file %s error %d\n", + log->file_path, rc); + return; + } + +#ifdef SWITCH_FILE + if (log->file_pos >= log->file_size) { + rc = sxe2_file_open(log, &file); + if (rc >= 0 && log->file != NULL && log->file_num != 1) { + sxe2_file_close(&log->file); + log->file = file; + log->file_pos = 0; + } + } +#endif +} + +static int sxe2_klog_flush(void *arg) +{ + int i; + + while (!kthread_should_stop()) { + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug_rdma.log); i++) + sxe2_klog_out(&g_sxe2_debug_rdma.log[i]); + } + return 0; +} + +static int sxe2_klog_init(struct sxe2_log *log, long long buf_size, char *file_path, + long long file_size, u32 file_num) +{ + int rc = 0; + + memset(log, 0, sizeof(*log)); + spin_lock_init(&log->lock); + + log->buf = vmalloc(buf_size + PER_CPU_PAGE_SIZE); + if (!log->buf) { + rc = -ENOMEM; + goto l_end; + } + + log->file = NULL; + log->head = 0; + log->tail = 0; + log->buf_size = buf_size; + + log->file_path = file_path; + log->file_pos = 0; + log->file_size = file_size; + log->file_num = file_num; + log->index = 0; +l_end: + return rc; +} + +static void sxe2_klog_exit(struct sxe2_log *log) +{ + if (log->buf) { + vfree(log->buf); + log->buf = NULL; + } + if (log->file) + sxe2_file_close(&log->file); +} + +static inline char *sxe2_file_name_locale(char *file) +{ + char *p_slash = strrchr(file, '/'); + + return (p_slash == NULL) ? file : (p_slash + 1); +} + +void sxe2_level_set(int level) +{ + g_sxe2_debug_rdma.level = level; +} +EXPORT_SYMBOL(sxe2_level_set); + +s32 sxe2_level_get(void) +{ + return (s32)g_sxe2_debug_rdma.level; +} +EXPORT_SYMBOL(sxe2_level_get); + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_debug_rdma.status = status; +} + +s32 sxe2_bin_status_get(void) +{ + return (s32)g_sxe2_debug_rdma.status; +} + +void sxe2_log_string(enum debug_level level, const char *dev_name, const char *bdf, + const char *file, const char *func, int line, + const char *fmt, ...) +{ + struct sxe2_ctxt *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + const char *name = dev_name ? dev_name : ""; + + va_list args; + + if (level > g_sxe2_debug_rdma.level) { +#ifndef SXE2_CFG_RELEASE + if (!sxe2_filter_file_print(file) && + !sxe2_filter_func_print(func)) { + return; + } +#else + return; +#endif + } + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, get_cpu()); + put_cpu(); + + buf = (char *)ctxt->buff; + len = snprintf(buf, PAGE_SIZE, "%s", sxe2_debug_level_name(level)); + len += time_for_log(buf + len, PAGE_SIZE - len); + + if (bdf != NULL) { + len += snprintf( + buf + len, PAGE_SIZE - len, + "[%d][%d][%s][%s]%s:%4d:%s:", raw_smp_processor_id(), + current->pid, name, bdf, + sxe2_file_name_locale((char *)file), line, func); + } else { + len += snprintf( + buf + len, PAGE_SIZE - len, + "[%d][%d][%s]%s:%4d:%s:", raw_smp_processor_id(), + current->pid, name, sxe2_file_name_locale((char *)file), + line, func); + } + + va_start(args, fmt); + len += vsnprintf(buf + len, PAGE_SIZE - len, fmt, args); + va_end(args); + + if (!in_interrupt()) + local_irq_restore(flags); + + sxe2_klog_in(&g_sxe2_debug_rdma.log[DEBUG_TYPE_STRING], buf, len); + + wake_up_process(g_sxe2_debug_rdma.task); + +} +EXPORT_SYMBOL(sxe2_log_string); + +void sxe2_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str) +{ +#define LINE_TOTAL 16 + struct sxe2_ctxt *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + u32 i = 0; + u32 j = 0; + u32 max; + u32 mod; + + if (sxe2_bin_status_get() != true) + return; + + max = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + + len += time_for_log(buf + len, PER_CPU_PAGE_SIZE - len); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "[%d] %s %s():%d %s size:%d\n", current->pid, + sxe2_file_name_locale((char *)file), func, line, str, + size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < max; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + if (!in_interrupt()) + local_irq_restore(flags); + + sxe2_klog_in(&g_sxe2_debug_rdma.log[DEBUG_TYPE_BINARY], buf, len); + + wake_up_process(g_sxe2_debug_rdma.task); + +} +EXPORT_SYMBOL(sxe2_log_binary); + +void sxe2_log_sync(void) +{ + sxe2_file_sync(g_sxe2_debug_rdma.log[DEBUG_TYPE_STRING].file); + sxe2_file_sync(g_sxe2_debug_rdma.log[DEBUG_TYPE_BINARY].file); +} +EXPORT_SYMBOL(sxe2_log_sync); + +static void sxe2_log_file_prefix_add(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s%s.", log_path_p, + VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s%s.", log_path_p, + VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s%s.", log_path_p, + LOG_FILE_PREFIX); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s%s.", log_path_p, + BINARY_FILE_PREFIX); + } + +} + +static void sxe2_log_file_prefix_add_default(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s/%s.", + log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s/%s.", + log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s/%s.", + log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s/%s.", + log_path_p, BINARY_FILE_PREFIX); + } + +} + +static void sxe2_log_file_path_set(bool is_vf) +{ + if (is_vf) { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s.", + VF_LOG_FILE_PATH); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s.", + VF_BINARY_FILE_PATH); + } else { + snprintf(g_log_path_str_rdma, LOG_PATH_LEN, "%s.", + LOG_FILE_PATH); + snprintf(g_log_path_bin_rdma, LOG_PATH_LEN, "%s.", + BINARY_FILE_PATH); + } + +} + +static void sxe2_log_info_set(void) +{ +#ifdef SXE2_CFG_RELEASE + g_sxe2_debug_rdma.level = LEVEL_INFO; + g_sxe2_debug_rdma.status = false; +#else + g_sxe2_debug_rdma.level = LEVEL_DEBUG; + g_sxe2_debug_rdma.status = true; +#endif +} + +static int sxe2_log_alloc_res_per_cpu(void) +{ + int rc = 0; + struct sxe2_ctxt *ctxt = NULL; + int i; + int nid; + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, i); + memset(ctxt, 0, sizeof(*ctxt)); + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, i); + nid = cpu_to_node(i); + ctxt->page = alloc_pages_node(nid, GFP_ATOMIC, PAGE_ORDER); + if (!ctxt->page) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PER_CPU_PAGE_SIZE); + goto free_cpu_buff; + } + ctxt->buff = page_address(ctxt->page); + } + +end: + return rc; +free_cpu_buff: + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + goto end; +} + +static void sxe2_log_free_res_per_cpu(void) +{ + struct sxe2_ctxt *ctxt = NULL; + int i; + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } +} + +int sxe2_log_init(bool is_vf) +{ + struct task_struct *task = NULL; + int rc = 0; + u32 file_num = 0; + u32 log_path_len = 0; + u32 input_log_space = sxe2_log_space_size_query(); + u32 input_log_file_size = sxe2_log_file_size_query(); + unsigned int log_file_size = 0; + char *log_path_p = NULL; + struct sxe2_log *log_bin = &g_sxe2_debug_rdma.log[DEBUG_TYPE_BINARY]; + struct sxe2_log *log_str = &g_sxe2_debug_rdma.log[DEBUG_TYPE_STRING]; + + INIT_LIST_HEAD(&g_sxe2_debug_rdma.filter_file); + INIT_LIST_HEAD(&g_sxe2_debug_rdma.filter_func); + sxe2_log_info_set(); + + g_sxe2_debug_rdma.ctxt = alloc_percpu(struct sxe2_ctxt); + if (!g_sxe2_debug_rdma.ctxt) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "alloc percpu failed\n"); + goto l_end; + } + + rc = sxe2_log_alloc_res_per_cpu(); + if (rc) + goto l_free_cpu_buff; + + log_path_p = sxe2_log_path_query(); + log_path_len = strlen(log_path_p); + if (log_path_p != NULL && log_path_p[0] == '/') { + if (log_path_p[log_path_len] == '/') + sxe2_log_file_prefix_add(is_vf, log_path_p); + else + sxe2_log_file_prefix_add_default(is_vf, log_path_p); + } else { + sxe2_log_file_path_set(is_vf); + } + if (input_log_file_size < DRV_LOG_FILE_SIZE_MIN_MB || + input_log_file_size > DRV_LOG_FILE_SIZE_MAX_MB) { + sxe2_log_file_size_modify(LOG_FILE_SIZE >> MEGABYTE); + input_log_file_size = LOG_FILE_SIZE >> MEGABYTE; + } + if (input_log_space && input_log_space < input_log_file_size) { + sxe2_log_file_size_modify(input_log_space); + input_log_file_size = input_log_space; + } + log_file_size = input_log_file_size << MEGABYTE; + if (input_log_space) { + file_num = input_log_space / input_log_file_size; + if (file_num == 0) { + sxe2_print(KERN_ERR, NULL, "filenum shouldnot be 0\n"); + SXE2_BUG(); + } + } else { + file_num = 0; + } + + rc = sxe2_klog_init(log_str, BUF_SIZE, g_log_path_str_rdma, + log_file_size, file_num); + if (rc < 0) + goto l_free_cpu_ctxt_pages; + + rc = sxe2_klog_init(log_bin, BUF_SIZE, g_log_path_bin_rdma, + BINARY_FILE_SIZE, 0); + if (rc < 0) + goto l_free_string; + + task = kthread_create(sxe2_klog_flush, NULL, "sxe2_klog_flush"); + if (IS_ERR(task)) { + rc = (int)PTR_ERR(task); + sxe2_print(KERN_ERR, NULL, "Create kernel thread, err: %d\n", + rc); + goto l_free_binary; + } + wake_up_process(task); + g_sxe2_debug_rdma.task = task; + rc = 0; + sxe2_print(KERN_INFO, NULL, + "sxe2 debug init logpath[%s] strlogsize[%dM] filenum[%d]\n", + g_log_path_str_rdma, (log_file_size >> MEGABYTE), + log_str->file_num); +l_end: + return rc; +l_free_binary: + sxe2_klog_exit(&g_sxe2_debug_rdma.log[DEBUG_TYPE_BINARY]); +l_free_string: + sxe2_klog_exit(&g_sxe2_debug_rdma.log[DEBUG_TYPE_STRING]); +l_free_cpu_ctxt_pages: + sxe2_log_free_res_per_cpu(); +l_free_cpu_buff: + free_percpu(g_sxe2_debug_rdma.ctxt); + goto l_end; +} +EXPORT_SYMBOL(sxe2_log_init); + +void sxe2_log_exit(void) +{ + int i = 0; + struct sxe2_ctxt *ctxt; + + if (g_sxe2_debug_rdma.task == NULL) + return; + + kthread_stop(g_sxe2_debug_rdma.task); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug_rdma.log); i++) + sxe2_klog_exit(&g_sxe2_debug_rdma.log[i]); + + if (g_sxe2_debug_rdma.ctxt) { + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug_rdma.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + + free_percpu(g_sxe2_debug_rdma.ctxt); + g_sxe2_debug_rdma.ctxt = NULL; + } + + sxe2_print(KERN_INFO, NULL, "sxe2 debug exit\n"); +} +EXPORT_SYMBOL(sxe2_log_exit); + +#elif !defined SXE2_CFG_RELEASE + +s32 g_sxe2_bin_status_rdma; +char *test_bin_buf_rdma; +#define DRV_TEST_LOG_PATH_LEN (100) +enum debug_level g_sxe2_log_level_rdma = LEVEL_DEBUG; +char g_test_log_path_str_rdma[DRV_TEST_LOG_PATH_LEN] = { 0 }; +FILE *g_file_test_rdma; +static int g_loop_index_rdma; +static bool g_loop_flag_rdma; + +s32 sxe2_log_init(bool is_vf) +{ + return 0; +} + +void sxe2_level_set(s32 level) +{ + g_sxe2_log_level_rdma = level; +} + +s32 sxe2_level_get(void) +{ + return (s32)g_sxe2_log_level_rdma; +} + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_bin_status_rdma = status; +} + +s32 sxe2_bin_status_get(void) +{ + return g_sxe2_bin_status_rdma; +} + +void sxe2_log_sync(void) +{ +} + +void sxe2_log_exit(void) +{ + if (test_bin_buf_rdma != NULL) { + free(test_bin_buf_rdma); + test_bin_buf_rdma = NULL; + } +} + +void sxe2_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str) +{ +#define LINE_TOTAL 16 + u32 i = 0; + u32 j = 0; + u32 iMax; + u32 mod; + char *buf = NULL; + int len = 0; + + if (sxe2_bin_status_get() != true) + return; + + buf = zalloc(PER_CPU_PAGE_SIZE); + test_bin_buf_rdma = buf; + + iMax = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + len += snprintf(buf + len, (size_t)(PER_CPU_PAGE_SIZE - len), + "%s size:%d\n", str, size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < iMax; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, (size_t)(PER_CPU_PAGE_SIZE - len), + "0x%llx 0x%llx: ", addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, + (size_t)(PER_CPU_PAGE_SIZE - len), + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += snprintf(buf + len, (size_t)(PER_CPU_PAGE_SIZE - len), + "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += snprintf(buf + len, (size_t)(PER_CPU_PAGE_SIZE - len), + "0x%llx 0x%llx: ", addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += snprintf(buf + len, + (size_t)(PER_CPU_PAGE_SIZE - len), + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += snprintf(buf + len, (size_t)(PER_CPU_PAGE_SIZE - len), + "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + printf("buf:%s", buf); +} + +S8 *sxe2_log_path_query(void) +{ + return NULL; +} + +STATIC S32 get_now_ms_int(S8 *buff, size_t buf_len) +{ + struct timeval tv; + struct tm td; + + gettimeofday(&tv, NULL); + (void)localtime_r(&tv.tv_sec, &td); + + return snprintf(buff, buf_len, "[%04d-%02d-%02d;%02d:%02d:%02d.%06ld]", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec, tv.tv_usec); +} + +STATIC S32 get_now_sec(S8 *buff, S32 buf_len) +{ + struct timeval tv; + struct tm td; + + gettimeofday(&tv, NULL); + (void)localtime_r(&tv.tv_sec, &td); + + return snprintf(buff, (size_t)buf_len, "%04d-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +void sxe2_log_string(enum debug_level level, const char *dev_name, const char *bdf, + const char *file, const char *func, int line, + const char *fmt, ...) +{ + size_t ret; + S32 len = 0; + S8 *log_buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + + if (log_buf == NULL) + return; + va_list args; + const char *name = dev_name ? dev_name : ""; + + if (level > g_sxe2_log_level_rdma) + return; + + len = snprintf(log_buf, PAGE_SIZE, "%s", sxe2_debug_level_name(level)); + len += get_now_ms_int(log_buf + len, (size_t)(PAGE_SIZE - len)); + + if (bdf != NULL) { + len += snprintf(log_buf + len, (size_t)(PAGE_SIZE - len), + "[%d][%s][%s]%s:%4d:%s:", (int)pthread_self(), + name, bdf, filename_printf((S8 *)file), line, + func); + } else { + len += snprintf(log_buf + len, (size_t)(PAGE_SIZE - len), + "[%d][%s]%s:%4d:%s:", (int)pthread_self(), name, + filename_printf((S8 *)file), line, func); + } + + va_start(args, fmt); + len += vsnprintf(log_buf + len, (size_t)(PAGE_SIZE - len), fmt, args); + va_end(args); + + if (g_file_test_rdma == NULL) { + g_file_test_rdma = fopen(g_test_log_path_str_rdma, "ab"); + if (g_file_test_rdma == NULL) { + perror("fopen"); + goto out; + } + } + + ret = fwrite(log_buf, (size_t)len, 1, g_file_test_rdma); + if (ret != 1) + perror("fwrite"); + + fflush(g_file_test_rdma); + +out: + kfree(log_buf); +} + +void sxe2_set_log_loop_index(S32 index) +{ + g_loop_index_rdma = index; +} + +void sxe2_set_log_loop_flag(bool flag) +{ + g_loop_flag_rdma = flag; +} + +int sxe2_debug_init(void) +{ + S32 file_name_len = 0; + + g_sxe2_log_level_rdma = LEVEL_DEBUG; + + if (g_file_test_rdma == NULL) { + file_name_len += snprintf(g_test_log_path_str_rdma, + DRV_TEST_LOG_PATH_LEN, "%s.", + SXE2_DRV_TEST_LOG_PATH); + file_name_len += + get_now_sec(g_test_log_path_str_rdma + file_name_len, + DRV_TEST_LOG_PATH_LEN - file_name_len); + if (g_loop_flag_rdma) { + snprintf( + g_test_log_path_str_rdma + file_name_len, + (size_t)(DRV_TEST_LOG_PATH_LEN - file_name_len), + "-loop%d", g_loop_index_rdma); + } + + g_file_test_rdma = fopen(g_test_log_path_str_rdma, "ab"); + if (g_file_test_rdma == NULL) { + perror("fopen"); + return -1; + } + } + return 0; +} + +void sxe2_debug_exit(void) +{ + if (g_file_test_rdma != NULL) { + fclose(g_file_test_rdma); + g_file_test_rdma = NULL; + } +} + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.h new file mode 100644 index 0000000000000000000000000000000000000000..f56f9178d164590be42ca4e6425ceea4f5c87bd5 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_log.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_log.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_RDMA_LOG_H_ +#define _SXE2_DRV_RDMA_LOG_H_ + +#include +#include + +#ifdef SXE2_UT +#define STATIC +#else +#ifndef STATIC +#define STATIC static +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#define SXE2_HOST(ins) ((ins)->host->host_no) + +#define LOG_INFO_PREFIX_LEN 32 +#define LOG_ERROR_PREFIX_LEN 33 +#define MEGABYTE 20 + +enum debug_level { + LEVEL_ERROR, + LEVEL_WARN, + LEVEL_INFO, + LEVEL_DEBUG, +}; + +static const char * const sxe2_rdma_level[] = { + [LEVEL_ERROR] = "ERROR", + [LEVEL_WARN] = "WARN", + [LEVEL_INFO] = "INFO", + [LEVEL_DEBUG] = "DEBUG", +}; + +static inline const char *sxe2_debug_level_name(enum debug_level lv) +{ + return sxe2_rdma_level[lv]; +} + +#ifdef __KERNEL__ + +#define PRINT_DEBUG KERN_DEBUG +#define PRINT_INFO KERN_INFO +#define PRINT_WARN KERN_WARNING +#define PRINT_ERR KERN_ERR + +#define sxe2_print(level, bdf, fmt, ...) \ + do { \ + if (!level) { \ + if (!strcmp(level, KERN_DEBUG)) { \ + pr_debug("[SXE2][%s]%s():%d:" fmt, bdf ? bdf : "unknown", \ + __func__, __LINE__, ##__VA_ARGS__); \ + } else if (!strcmp(level, KERN_INFO)) { \ + pr_info("[SXE2][%s]%s():%d:" fmt, bdf ? bdf : "unknown", \ + __func__, __LINE__, ##__VA_ARGS__); \ + } else if (!strcmp(level, KERN_WARNING)) { \ + pr_warn("[SXE2][%s]%s():%d:" fmt, bdf ? bdf : "unknown", \ + __func__, __LINE__, ##__VA_ARGS__); \ + } else if (!strcmp(level, KERN_ERR)) { \ + pr_err("[SXE2][%s]%s():%d:" fmt, bdf ? bdf : "unknown", \ + __func__, __LINE__, ##__VA_ARGS__); \ + } \ + } \ + } while (0) +#else + +#define PRINT_DEBUG LEVEL_DEBUG +#define PRINT_INFO LEVEL_INFO +#define PRINT_WARN LEVEL_WARN +#define PRINT_ERR LEVEL_ERROR + +#include +#include +#include +#include + +#define __percpu + +static inline U64 get_now_ms(void) +{ + struct timeval tv; + U64 timestamp = 0; + + gettimeofday(&tv, NULL); + timestamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; + return timestamp; +} + +#define filename_printf(x) (strrchr((x), '/') ? strrchr((x), '/') + 1 : (x)) + +#define sxe2_print(level, bdf, fmt, ...) \ + do { \ + if (level <= 4) { \ + if (level == LEVEL_DEBUG) { \ + printf("DEBUG:%llu:%s:%s():%d:[%lu][%s];" fmt, \ + get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "unknown", ##__VA_ARGS__); \ + } else if (level == LEVEL_INFO) { \ + printf("INFO:%llu:%s:%s():%d:[%lu][%s];" fmt, \ + get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "unknown", ##__VA_ARGS__); \ + } else if (level == LEVEL_WARN) { \ + printf("WARN:%llu:%s:%s():%d:[%lu][%s];" fmt, \ + get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "unknown", ##__VA_ARGS__); \ + } else if (level == LEVEL_ERROR) { \ + printf("ERROR:%llu:%s:%s():%d:[%lu][%s];" fmt, \ + get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "unknown", ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +#endif + +#define LOG_BUG_ON(cond, fmt, ...) \ + do { \ + if ((cond)) { \ + DRV_RDMA_LOG_ERROR(fmt, ##__VA_ARGS__); \ + LOG_SYNC(); \ + BUG(); \ + } \ + } while (0) + +#define DEBUG_TRACE_MAGIC 0x456789 +#define BUF_SIZE (1024LL << 10) + +#define PAGE_ORDER 2 +#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2)) + +#define LOG_FILE_SIZE (200LL << 20) +#define BINARY_FILE_SIZE (200LL << 20) + +#define VF_LOG_FILE_PATH "/var/log/sxe2_drv_rdma_vf.log" +#define VF_LOG_FILE_PREFIX "sxe2_drv_rdma_vf.log" +#define VF_BINARY_FILE_PATH "/var/log/sxe2_drv_rdma_vf.bin" +#define VF_BINARY_FILE_PREFIX "sxe2_drv_rdma_vf.bin" + +#define LOG_FILE_PATH "/var/log/sxe2_drv_rdma.log" +#define LOG_FILE_PREFIX "sxe2_drv_rdma.log" +#define BINARY_FILE_PATH "/var/log/sxe2_drv_rdma.bin" +#define BINARY_FILE_PREFIX "sxe2_drv_rdma.bin" + +#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n" + +extern u32 g_sxe2_rdma_dmesg_level; + +enum { DEBUG_TYPE_STRING, + DEBUG_TYPE_BINARY, + DEBUG_TYPE_NR, +}; + +struct debug_func { + struct list_head list; + char name[64]; +}; + +struct debug_file { + struct list_head list; + char name[64]; +}; + +struct sxe2_log { + struct { + char *buf; + int buf_size; + long long head; + long long tail; + spinlock_t lock; + unsigned char is_drop; + }; + struct { + char *file_path; + struct file *file; + long long file_pos; + long long file_size; + unsigned int file_num; + unsigned int index; + }; +}; + +struct sxe2_thread_local { + int magic; + char data[]; +}; + +struct sxe2_ctxt { + struct page *page; + void *buff; +}; + +struct sxe2_thread_key { + int offset; +}; + +struct sxe2_debug { + enum debug_level level; + bool status; + u16 key_offset; + struct sxe2_ctxt __percpu *ctxt; + struct list_head filter_func; + struct list_head filter_file; + struct task_struct *task; + struct sxe2_log log[DEBUG_TYPE_NR]; +}; + +void sxe2_level_set(int level); +int sxe2_level_get(void); + +void sxe2_bin_status_set(bool status); +int sxe2_bin_status_get(void); + +int sxe2_log_init(bool is_vf); +void sxe2_log_exit(void); + +void sxe2_log_string(enum debug_level level, const char *dev_name, const char *bdf, + const char *file, const char *func, int line, + const char *fmt, ...); +#ifdef SXE2_CFG_DEBUG + +void sxe2_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str); + +#define DATA_DUMP(ptr, size, str) \ + sxe2_log_binary(__FILE__, __func__, __LINE__, (u8 *)ptr, 0, size, \ + str) +#endif + +void sxe2_log_sync(void); + +#if (defined SXE2_CFG_DEBUG && defined __KERNEL__) +int time_for_file_name(char *buff, int buf_len); +int sxe2_file_write(struct file *file, char *buf, int len); +void sxe2_filter_func_clear(void); + +#endif + +#if defined SXE2_CFG_DEBUG && defined __KERNEL__ + +#define WRITE_LOG(level, devname, bdf, fmt, ...) \ + sxe2_log_string(level, devname, bdf, __FILE__, __func__, __LINE__, \ + fmt, ##__VA_ARGS__) + +#define LOG_SYNC() sxe2_log_sync() + +#define DRV_RDMA_LOG_DEBUG(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_INFO(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_WARN(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_ERROR(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, NULL, fmt, ##__VA_ARGS__) + +#define DRV_RDMA_LOG_DEBUG_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_INFO_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_WARN_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_ERROR_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) + +#define DRV_RDMA_LOG_DEV_DEBUG(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_DEBUG) \ + dev_dbg(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + DRV_RDMA_LOG_DEBUG_BDF(format, ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_INFO(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_INFO) \ + dev_info(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + DRV_RDMA_LOG_INFO_BDF(format, ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_WARN(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_WARNING) \ + dev_warn(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + DRV_RDMA_LOG_WARN_BDF(format, ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_ERR(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_ERR) \ + dev_err(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + DRV_RDMA_LOG_ERROR_BDF(format, ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_PR_DEBUG(format, arg...) \ + pr_debug("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_INFO(format, arg...) \ + pr_info("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_WARN(format, arg...) \ + pr_warn("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_ERR(format, arg...) pr_err("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_DEBUG(format, arg...) \ + pr_debug("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_INFO(format, arg...) \ + pr_info("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_WARN(format, arg...) \ + pr_warn("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_ERR(format, arg...) \ + pr_err("sxe2_rdmavf: " format, ##arg) + +#else + +#if defined SXE2_CFG_RELEASE + +#define UNUSED1(x) ((void)(x)) + +#define DRV_RDMA_LOG_DEBUG(fmt, ...) UNUSED1(fmt) +#define DRV_RDMA_LOG_INFO(fmt, ...) UNUSED1(fmt) +#define DRV_RDMA_LOG_WARN(fmt, ...) UNUSED1(fmt) +#define DRV_RDMA_LOG_ERROR(fmt, ...) UNUSED1(fmt) + +#define UNUSED(x, y) \ + do { \ + (void)(x); \ + (void)(y); \ + } while (0) + +#define DRV_RDMA_LOG_DEBUG_BDF(fmt, ...) UNUSED(fmt, rdma_dev) +#define DRV_RDMA_LOG_INFO_BDF(fmt, ...) UNUSED(fmt, rdma_dev) +#define DRV_RDMA_LOG_WARN_BDF(fmt, ...) UNUSED(fmt, rdma_dev) +#define DRV_RDMA_LOG_ERROR_BDF(fmt, ...) UNUSED(fmt, rdma_dev) + +#define DRV_RDMA_LOG_DEV_DEBUG(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_DEBUG) \ + dev_dbg(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_INFO(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_INFO) \ + dev_info(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_WARN(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_WARNING) \ + dev_warn(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_DEV_ERR(format, arg...) \ + do { \ + if (!rdma_dev) \ + break; \ + if (g_sxe2_rdma_dmesg_level >= LOGLEVEL_ERR) \ + dev_err(&rdma_dev->rdma_func->pcidev->dev, format, \ + ##arg); \ + } while (0) + +#define DRV_RDMA_LOG_PR_DEBUG(format, arg...) \ + pr_debug("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_INFO(format, arg...) \ + pr_info("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_WARN(format, arg...) \ + pr_warn("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_ERR(format, arg...) pr_err("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_DEBUG(format, arg...) \ + pr_debug("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_INFO(format, arg...) \ + pr_info("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_WARN(format, arg...) \ + pr_warn("sxe2_rdmavf: " format, ##arg) +#define DRV_RDMA_LOG_PRVF_ERR(format, arg...) \ + pr_err("sxe2_rdmavf: " format, ##arg) + +#else + +#define WRITE_LOG(level, devname, bdf, fmt, ...) \ + sxe2_log_string(level, devname, bdf ? bdf : "unknown", __FILE__, \ + __func__, __LINE__, fmt, ##__VA_ARGS__) + +#define SXE2_DRV_TEST_LOG_PATH "./sxe2_drv_rdma_test.log" +#define LOG_NAME_LEN 64 +#define LOG_LEVEL(log_lvl, fmt, ...) sxe2_print(log_lvl, fmt, ##__VA_ARGS__) +#define LOG_SYNC() +#define LOG_DEBUG_DATA_DUMP(ptr, size, str) + +static inline BOOL sxe2_fs_requires_dev(struct file *fp) +{ + (void)fp; + return SHCA_TRUE; +} + +void sxe2_set_log_loop_flag(bool flag); +void sxe2_set_log_loop_index(int index); +int sxe2_debug_init(void); +void sxe2_debug_exit(void); + +#define DRV_RDMA_LOG_DEBUG(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_INFO(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_WARN(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, NULL, fmt, ##__VA_ARGS__) +#define DRV_RDMA_LOG_ERROR(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, NULL, fmt, ##__VA_ARGS__) + +#define DRV_RDMA_LOG_DEBUG_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_INFO_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_WARN_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) +#define DRV_RDMA_LOG_ERROR_BDF(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, rdma_dev->ibdev.name, rdma_dev->bdf, fmt, \ + ##__VA_ARGS__) + +#define DRV_RDMA_LOG_DEV_DEBUG(format, arg...) \ + DRV_RDMA_LOG_DEBUG_BDF(format, ##arg) +#define DRV_RDMA_LOG_DEV_INFO(format, arg...) \ + DRV_RDMA_LOG_INFO_BDF(format, ##arg) +#define DRV_RDMA_LOG_DEV_WARN(format, arg...) \ + DRV_RDMA_LOG_WARN_BDF(format, ##arg) +#define DRV_RDMA_LOG_DEV_ERR(format, arg...) \ + DRV_RDMA_LOG_ERROR_BDF(format, ##arg) + +#define DRV_RDMA_LOG_PR_DEBUG(format, arg...) \ + pr_debug("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_INFO(format, arg...) \ + pr_info("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_WARN(format, arg...) \ + pr_warn("sxe2_rdma: " format, ##arg) +#define DRV_RDMA_LOG_PR_ERR(format, arg...) pr_err("sxe2_rdma: " format, ##arg) +#endif + +#define LOG_SYNC() + +#endif + +#if defined SXE2_CFG_RELEASE +#define SXE2_BUG_ON(cond) \ + do { \ + if ((cond)) { \ + pr_err( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + LOG_ERROR( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + } \ + } while (0) + +#define SXE2_BUG() +#define SXE2_BUG_ON_NO_SYNC(cond) \ + do { \ + if ((cond)) { \ + pr_err( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + LOG_ERROR( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + } \ + } while (0) + +#define SXE2_BUG_NO_SYNC() +#else +#define SXE2_BUG_ON(cond) \ + do { \ + if ((cond)) { \ + pr_err( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + LOG_ERROR( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + LOG_SYNC(); \ + } \ + BUG_ON(cond); \ + } while (0) + +#define SXE2_BUG(void) \ + do { \ + LOG_SYNC(); \ + BUG(void); \ + } while (0) + +#define SXE2_BUG_ON_NO_SYNC(cond) \ + do { \ + if ((cond)) { \ + pr_err( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + LOG_ERROR( \ + "BUG_ON's condition(%s) has been triggered\n", \ + #cond); \ + } \ + BUG_ON(cond); \ + } while (0) + +#define SXE2_BUG_NO_SYNC(void) BUG(void) + +#endif + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.c new file mode 100644 index 0000000000000000000000000000000000000000..1c05b2a51ef5c59d690ba23f8aada26f7ed77b1c --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.c @@ -0,0 +1,1378 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_pble.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include +#include +#include + +static int sxe2_pbl_buddy_init(struct sxe2_pbl_buddy *buddy, u32 max_order) +{ + int ret = SXE2_OK; + u32 i; + u32 num = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(buddy->dev); + + buddy->max_order = max_order; + spin_lock_init(&buddy->buddy_lock); + buddy->bits = kzalloc( + (buddy->max_order + 1) * sizeof(unsigned long *), GFP_KERNEL); + if (!buddy->bits) { + DRV_RDMA_LOG_DEV_ERR("pbl:buddy init bits alloc mem err\n"); + ret = -ENOMEM; + goto end; + } + buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof(u32 *), + GFP_KERNEL); + if (!buddy->num_free) { + DRV_RDMA_LOG_DEV_ERR("pbl:buddy init num free alloc mem err\n"); + ret = -ENOMEM; + goto num_free_err_out; + } + + for (i = 0; i <= buddy->max_order; ++i) { + num = (u32)BITS_TO_LONGS(1ul << (buddy->max_order - i)); + buddy->bits[i] = kvmalloc_array( + num, sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); + if (!buddy->bits[i]) { + DRV_RDMA_LOG_DEV_ERR( + "pbl:buddy init bits %u alloc mem err\n", i); + ret = -ENOMEM; + goto err_out_free; + } + memset(buddy->bits[i], 0x0, sizeof(unsigned long)); + } + + set_bit(0, buddy->bits[buddy->max_order]); + buddy->num_free[buddy->max_order] = 1; + goto end; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) { + if (buddy->bits[i]) { + kvfree(buddy->bits[i]); + buddy->bits[i] = NULL; + } + } + kfree(buddy->num_free); + buddy->num_free = NULL; +num_free_err_out: + kfree(buddy->bits); + buddy->bits = NULL; +end: + return ret; +} + +int sxe2_pbl_buddy_alloc(struct sxe2_pbl_buddy *buddy, u32 order, + u64 *pbl_seg_index, u32 *total_pble_cnt) +{ + int ret = SXE2_OK; + u32 cur_order = 0; + u32 max_bits = 0; + int seg = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(buddy->dev); + + spin_lock(&buddy->buddy_lock); + for (cur_order = order; cur_order <= buddy->max_order; ++cur_order) { + if (buddy->num_free[cur_order]) { + max_bits = (u32)(1UL << (buddy->max_order - cur_order)); + seg = (int)find_first_bit(buddy->bits[cur_order], + max_bits); + if (seg < (int)max_bits) + goto found; + } + } + + spin_unlock(&buddy->buddy_lock); + *pbl_seg_index = SXE2_PBL_IDX_VALID_VAL; + DRV_RDMA_LOG_DEV_ERR( + "pbl:buddy alloc not find enough pble err order=%u\n", order); + ret = -ENOMEM; + goto end; + +found: + clear_bit(seg, buddy->bits[cur_order]); + --buddy->num_free[cur_order]; + while (cur_order > order) { + --cur_order; + seg <<= 1; + set_bit(seg ^ 1, buddy->bits[cur_order]); + ++buddy->num_free[cur_order]; + } + + spin_unlock(&buddy->buddy_lock); + *total_pble_cnt = 1 << order; + *pbl_seg_index = (u64)seg << order; + +end: + return ret; +} + +static void sxe2_pbl_buddy_free(struct sxe2_pbl_buddy *buddy, u64 pbl_seg_index, + u32 order) +{ + int seg; + + seg = (int)(pbl_seg_index >> order); + spin_lock(&buddy->buddy_lock); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, buddy->bits[order]); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + set_bit(seg, buddy->bits[order]); + ++buddy->num_free[order]; + spin_unlock(&buddy->buddy_lock); + +} + +static void sxe2_pbl_buddy_cleanup(struct sxe2_pbl_buddy *buddy) +{ + u32 i; + + for (i = 0; i <= buddy->max_order; ++i) { + if (buddy->bits && buddy->bits[i]) { + kvfree(buddy->bits[i]); + buddy->bits[i] = NULL; + } + } + + kfree(buddy->bits); + buddy->bits = NULL; + + kfree(buddy->num_free); + buddy->num_free = NULL; +} + +static int sxe2_pbl_first_page_bitmap_init( + struct sxe2_pbl_first_page_bitmap *first_page_bitmap) +{ + int ret = SXE2_OK; + struct sxe2_rcms_info *rcms_info = first_page_bitmap->dev->rcms_info; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(first_page_bitmap->dev); + + first_page_bitmap->max_fpte_cnt = rcms_info->first_page_fpte; + first_page_bitmap->first_fpte_idx = rcms_info->max_fpte_index + 1; + spin_lock_init(&first_page_bitmap->bitmap_lock); + first_page_bitmap->fpte_bits = kzalloc( + (first_page_bitmap->max_fpte_cnt) * sizeof(unsigned long *), + GFP_KERNEL); + if (!first_page_bitmap->fpte_bits) { + DRV_RDMA_LOG_DEV_ERR( + "pbl:first page bitmap init alloc mem err\n"); + ret = -ENOMEM; + } + + return ret; +} + +static int sxe2_pbl_first_page_bitmap_alloc( + struct sxe2_pbl_first_page_bitmap *first_page_bitmap, u32 bit_cnt, + u32 *bit_idx) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(first_page_bitmap->dev); + + *bit_idx = SXE2_FIRST_PAGE_INVALID_IDX; + spin_lock(&first_page_bitmap->bitmap_lock); + + *bit_idx = + (u32)bitmap_find_next_zero_area(first_page_bitmap->fpte_bits, + first_page_bitmap->max_fpte_cnt, + 0, bit_cnt, 0); + if (*bit_idx >= first_page_bitmap->max_fpte_cnt) { + DRV_RDMA_LOG_DEV_ERR("pbl:first page fpte not enough\n"); + ret = -ENOMEM; + goto end; + } + bitmap_set(first_page_bitmap->fpte_bits, *bit_idx, bit_cnt); + +end: + spin_unlock(&first_page_bitmap->bitmap_lock); + return ret; +} + +static void sxe2_pbl_first_page_bitmap_free( + struct sxe2_pbl_first_page_bitmap *first_page_bitmap, u32 bit_idx, + u32 bit_cnt) +{ + spin_lock(&first_page_bitmap->bitmap_lock); + bitmap_clear(first_page_bitmap->fpte_bits, bit_idx, bit_cnt); + spin_unlock(&first_page_bitmap->bitmap_lock); +} + +int sxe2_pbl_liner_addr_to_pble_pa(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 liner_addr, u64 *pa) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rcms_info *rcms_info = dev->rcms_info; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_spt_entry *spte; + u32 fpte_idx; + u32 rel_spte_idx; + u64 page_offset; + + fpte_idx = FPT_INDEX_GET(liner_addr); + fpte = &rcms_info->fpt.fpte[fpte_idx]; + if (!fpte->valid) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa fpte is invalid\n"); + ret = -EINVAL; + goto end; + } + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + if (!fpte->u.cp.use_cnt || !fpte->u.cp.page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa cp is invalid\n"); + ret = -EINVAL; + goto end; + } + page_offset = liner_addr & SXE2_PBL_2MB_PAGE_OFFSET; + *pa = fpte->u.cp.page_addr.pa + page_offset; + } else if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND) { + if (!fpte->u.spt.use_cnt || !fpte->u.spt.spt_page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa spt is invalid\n"); + ret = -EINVAL; + goto end; + } + rel_spte_idx = LINER_ADDR_TO_REL_SPTE_IDX(liner_addr); + spte = &fpte->u.spt.spte[rel_spte_idx]; + if (!spte->valid || !spte->cp.page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa spte is invalid\n"); + ret = -EINVAL; + goto end; + } + page_offset = liner_addr & SXE2_PBL_4KB_PAGE_OFFSET; + *pa = spte->cp.page_addr.pa + page_offset; + } + +end: + return ret; +} + +static int sxe2_pbl_liner_addr_to_pble_va(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 liner_addr, u64 **va) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_info *rcms_info = dev->rcms_info; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_spt_entry *spte; + u32 fpte_idx; + u32 rel_spte_idx; + u64 page_offset; + char *pble_va = NULL; + + fpte_idx = FPT_INDEX_GET(liner_addr); + fpte = &rcms_info->fpt.fpte[fpte_idx]; + if (!fpte->valid) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa fpte is invalid\n"); + ret = -EINVAL; + goto end; + } + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + if (!fpte->u.cp.use_cnt || !fpte->u.cp.page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa cp is invalid\n"); + ret = -EINVAL; + goto end; + } + + page_offset = liner_addr & SXE2_PBL_2MB_PAGE_OFFSET; + pble_va = (char *)fpte->u.cp.page_addr.va; + pble_va += page_offset; + } else if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND) { + if (!fpte->u.spt.use_cnt || !fpte->u.spt.spt_page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa spt is invalid\n"); + ret = -EINVAL; + goto end; + } + rel_spte_idx = LINER_ADDR_TO_REL_SPTE_IDX(liner_addr); + spte = &fpte->u.spt.spte[rel_spte_idx]; + if (!spte->valid || !spte->cp.page_addr.va) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa spte is invalid\n"); + ret = -EINVAL; + goto end; + } + page_offset = liner_addr & SXE2_PBL_4KB_PAGE_OFFSET; + pble_va = (char *)spte->cp.page_addr.va; + pble_va += page_offset; + } + *va = (u64 *)pble_va; +end: + return ret; +} + +static u32 sxe2_pbl_liner_addr_to_idx(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 liner_addr) +{ + u64 idx; + + idx = (liner_addr - pble_rsrc->pble_base_addr) >> + SXE2_PBL_PBLE_SIZE_SHIFT; + return (u32)idx; +} + +static void sxe2_pbl_get_index_info(u64 liner_addr, + struct sxe2_pbl_table_idx_info *idx_info) +{ + idx_info->fpte_idx = (u32)FPT_INDEX_GET(liner_addr); + idx_info->spte_idx = (u32)SPT_INDEX_GET(liner_addr); + idx_info->rel_spte_idx = REL_SPTE_INDEX_GET(idx_info->spte_idx); + +} + +static enum sxe2_rcms_fpt_entry_type +sxe2_pbl_get_type(struct sxe2_pbl_pble_rsrc *pble_rsrc) +{ + enum sxe2_rcms_fpt_entry_type entry_type = SXE2_RCMS_FPT_TYPE_FIRST; + + if (!pble_rsrc->dev->privileged) { + entry_type = SXE2_RCMS_FPT_TYPE_SECOND; + goto end; + } + + if (pble_rsrc->init_mode == PBL_SECOND_PAGE_TABLE) + entry_type = SXE2_RCMS_FPT_TYPE_FIRST; + else if (pble_rsrc->init_mode == PBL_THIRD_PAGE_TABLE) + entry_type = SXE2_RCMS_FPT_TYPE_SECOND; + +end: + return entry_type; +} + +int sxe2_pbl_manage_pble_cp_cmd(struct sxe2_mq_ctx *mq, + struct sxe2_pbl_manage_pble_info *info, + u64 scratch, bool post_sq) +{ + int ret = SXE2_OK; + __le64 *wqe; + struct sxe2_rcms_manage_vf_pble_cp_wqe *manage_cp_wqe; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR("pble: query fpm kget next mq wqe err\n"); + ret = -ENOMEM; + goto end; + } + manage_cp_wqe = (struct sxe2_rcms_manage_vf_pble_cp_wqe *)wqe; + manage_cp_wqe->spte_cnt = info->spte_cnt; + manage_cp_wqe->first_spte_idx = info->first_spte_idx; + manage_cp_wqe->fpte_idx = info->fpte_idx; + manage_cp_wqe->op = SXE2_MQ_OP_MANAGE_PBLE_BP; + manage_cp_wqe->invalidate_spte = info->invalidate_spte_cnt; + manage_cp_wqe->spt_pagelist_buf_pa = + info->first_spte_pa >> SXE2_PBL_MANAGE_CP_WQE_PA_SHIFT; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_pbl_err", manage_cp_wqe, + &rdma_dev->rdma_func->mq.err_cqe_val); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_pbl_err"); +#endif + + dma_wmb(); + manage_cp_wqe->wqe_valid = mq->polarity; + + if (post_sq) + sxe2_kpost_mq(mq); + +end: + return ret; +} + +int sxe2_pbl_build_second_type_table(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_add_page_info *info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_pbl_table_idx_info *idx = &info->idx_info; + struct sxe2_rcms_info *rcms_info = info->rcms_info; + struct sxe2_rcms_fpt_entry *fpte = info->fpte; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!fpte->valid) { + ret = sxe2_rcms_add_fpt_entry(dev, rcms_info, idx->fpte_idx, + SXE2_RCMS_FPT_TYPE_FIRST); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pbl:build second type table add fpte %u err ret=%d\n", + idx->fpte_idx, ret); + goto end; + } + pble_rsrc->fpte_indexes[pble_rsrc->add_fpte_cnt] = + (u16)idx->fpte_idx; + pble_rsrc->add_fpte_cnt++; + fpte->valid = true; + pble_rsrc->second_type_fpte_cnt++; + } + +end: + return ret; +} + +int sxe2_pbl_build_third_type_table(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_add_page_info *info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rcms_fpt_entry *fpte = info->fpte; + struct sxe2_rcms_spt_entry *spte; + struct sxe2_rcms_info *rcms_info = info->rcms_info; + u32 fpte_idx = info->idx_info.fpte_idx; + u32 rel_spte_idx = info->idx_info.rel_spte_idx; + u32 spte_idx = info->idx_info.spte_idx; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!fpte->valid) { + ret = sxe2_rcms_add_fpt_entry(dev, rcms_info, fpte_idx, + SXE2_RCMS_FPT_TYPE_SECOND); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pbl:build third type table add fpte err ret=%d\n", + ret); + goto end; + } + } + + info->start_liner_addr += (info->pages << SXE2_PBL_CP_PBLE_CNT_SHIFT) + << SXE2_PBL_PBLE_SIZE_SHIFT; + + for (i = 0; i < info->pages; i++) { + spte = &fpte->u.spt.spte[rel_spte_idx]; + if (!spte->valid) { + ret = sxe2_rcms_add_spt_entry(dev, rcms_info, spte_idx); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pbl:build third type table add fpte err ret=%d\n", + ret); + goto add_spte_err; + } + pble_rsrc->spte_indexes[pble_rsrc->add_spte_cnt] = + spte_idx; + pble_rsrc->add_spte_cnt++; + } + rel_spte_idx++; + spte_idx++; + } + + if (!fpte->valid) { + pble_rsrc->fpte_indexes[pble_rsrc->add_fpte_cnt] = + (u16)fpte_idx; + pble_rsrc->add_fpte_cnt++; + fpte->valid = true; + pble_rsrc->third_type_fpte_cnt++; + } + goto end; +add_spte_err: + for (i = 0; i < pble_rsrc->add_spte_cnt; i++) { + spte_idx = pble_rsrc->spte_indexes[i]; + sxe2_rcms_remove_spt_entry(dev, rcms_info, spte_idx); + } + pble_rsrc->add_spte_cnt = 0; + sxe2_rcms_remove_fpt_entry(dev, rcms_info, fpte_idx, + SXE2_RCMS_FPT_TYPE_SECOND); +end: + return ret; +} + +int sxe2_pbl_add_pble_prm(struct sxe2_pbl_pble_rsrc *pble_rsrc) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_info *rcms_info; + struct sxe2_pbl_add_page_info info; + enum sxe2_rcms_fpt_entry_type fpte_type = SXE2_RCMS_FPT_TYPE_FIRST; + struct sxe2_pbl_table_idx_info *idx_info = &info.idx_info; + u32 pages; + u32 spte_idx; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (pble_rsrc->unallocated_pble < SXE2_PBL_PBLE_CNT_IN_4K) { + DRV_RDMA_LOG_DEV_ERR("pble:unallocated pble is not enough\n"); + ret = -ENOMEM; + goto end; + } + rcms_info = dev->rcms_info; + sxe2_pbl_get_index_info(pble_rsrc->alloc_pble_base_addr, idx_info); + fpte = &rcms_info->fpt.fpte[idx_info->fpte_idx]; + pages = (idx_info->rel_spte_idx) ? + (SXE2_RCMS_SPT_ENTRY_CNT - idx_info->rel_spte_idx) : + SXE2_RCMS_SPT_ENTRY_CNT; + pages = min(pages, + pble_rsrc->unallocated_pble / SXE2_PBL_PBLE_CNT_IN_4K); + info.fpte = fpte; + info.rcms_info = rcms_info; + info.pages = pages; + info.start_liner_addr = pble_rsrc->alloc_pble_base_addr; + if (!fpte->valid) + fpte_type = sxe2_pbl_get_type(pble_rsrc); + else + fpte_type = fpte->entry_type; + + DRV_RDMA_LOG_DEV_DEBUG("pbl:fpte index %u entry type=%u\n", + idx_info->fpte_idx, fpte_type); + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) { + ret = sxe2_pbl_build_second_type_table(pble_rsrc, &info); + if (ret != SXE2_OK) + goto end; + else + pble_rsrc->second_type_fpte_cnt++; + } else { + ret = sxe2_pbl_build_third_type_table(pble_rsrc, &info); + if (ret != SXE2_OK) + goto end; + else + pble_rsrc->third_type_fpte_cnt++; + } + + if (dev->privileged && pble_rsrc->add_fpte_cnt) { + ret = sxe2_rcms_update_fptes(dev, info.rcms_info, + &pble_rsrc->fpte_indexes[0], + pble_rsrc->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:update fpte %u err ret=%d\n", + idx_info->fpte_idx, ret); + goto update_fpte_err; + } + } else if (!dev->privileged && pble_rsrc->add_fpte_cnt) { + ret = sxe2_rcms_vf_update_fptes(dev, info.rcms_info, + &pble_rsrc->fpte_indexes[0], + pble_rsrc->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "pble:vf update fpte %u err ret=%d\n", + idx_info->fpte_idx, ret); + goto update_fpte_err; + } + } + + pble_rsrc->add_fpte_cnt = 0; + pble_rsrc->add_spte_cnt = 0; + + goto end; +update_fpte_err: + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) { + fpte = &rcms_info->fpt.fpte[idx_info->fpte_idx]; + fpte->u.cp.use_cnt--; + sxe2_rcms_remove_fpt_entry(dev, rcms_info, idx_info->fpte_idx, + SXE2_RCMS_FPT_TYPE_FIRST); + pble_rsrc->add_fpte_cnt = 0; + } else if (fpte_type == SXE2_RCMS_FPT_TYPE_SECOND) { + for (i = 0; i < pble_rsrc->add_spte_cnt; i++) { + spte_idx = pble_rsrc->spte_indexes[i]; + sxe2_rcms_remove_spt_entry(dev, rcms_info, spte_idx); + } + pble_rsrc->add_spte_cnt = 0; + sxe2_rcms_remove_fpt_entry(dev, rcms_info, idx_info->fpte_idx, + SXE2_RCMS_FPT_TYPE_SECOND); + pble_rsrc->add_fpte_cnt = 0; + } +end: + return ret; +} + +static void sxe2_pbl_get_pble_fpte_range(u64 pble_liner_addr, u32 pble_cnt, + u32 *fpte_idx, u32 *fpte_lmt) +{ + u64 liner_addr_lmt; + + *fpte_idx = (u32)FPT_INDEX_GET(pble_liner_addr); + liner_addr_lmt = pble_liner_addr + pble_cnt * SXE2_PBL_PBLE_SIZE; + *fpte_lmt = (u32)FPT_INDEX_GET((liner_addr_lmt - 1)); + *fpte_lmt += 1; + +} + +static void sxe2_pbl_get_pble_spte_range(u64 pble_liner_addr, u32 pble_cnt, + u32 *spte_idx, u32 *spte_lmt) +{ + u64 liner_addr_lmt; + + *spte_idx = (u32)SPT_INDEX_GET(pble_liner_addr); + liner_addr_lmt = pble_liner_addr + pble_cnt * SXE2_PBL_PBLE_SIZE; + *spte_lmt = (u32)SPT_INDEX_GET((liner_addr_lmt - 1)); + *spte_lmt += 1; + +} + +int sxe2_pbl_add_second_type_table( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rcms_info *rcms_info = dev->rcms_info; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_pbl_add_page_info info = {}; + u32 fpte_idx; + u32 fpte_lmt; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + sxe2_pbl_get_pble_fpte_range(pble_alloc_info->pble_info.liner_addr, + pble_alloc_info->total_pble_cnt, &fpte_idx, + &fpte_lmt); + DRV_RDMA_LOG_DEV_DEBUG( + "pble:add second table fpte idx=%u fpte lmt=%u\n", fpte_idx, + fpte_lmt); + for (i = fpte_idx; i < fpte_lmt; i++) { + fpte = &rcms_info->fpt.fpte[i]; + info.fpte = fpte; + info.rcms_info = rcms_info; + info.idx_info.fpte_idx = i; + ret = sxe2_pbl_build_second_type_table(pble_rsrc, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:fpte %u build second type table err ret=%d\n", + i, ret); + goto end; + } + } + if (pble_rsrc->add_fpte_cnt > 0) { + ret = sxe2_rcms_update_fptes(dev, info.rcms_info, + &pble_rsrc->fpte_indexes[0], + pble_rsrc->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:update fpte err ret=%d\n", + ret); + goto update_fpte_err; + } + pble_rsrc->add_fpte_cnt = 0; + } + goto end; + +update_fpte_err: + while (pble_rsrc->add_fpte_cnt) { + pble_rsrc->add_fpte_cnt--; + fpte = &rcms_info->fpt.fpte[pble_rsrc->fpte_indexes + [pble_rsrc->add_fpte_cnt]]; + fpte->u.cp.use_cnt--; + sxe2_rcms_remove_fpt_entry( + dev, rcms_info, + pble_rsrc->fpte_indexes[pble_rsrc->add_fpte_cnt], + SXE2_RCMS_FPT_TYPE_FIRST); + } + +end: + return ret; +} + +int sxe2_pbl_add_third_type_table( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rcms_info *rcms_info = dev->rcms_info; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_pbl_add_page_info info = {}; + u32 fpte_idx, fpte_lmt; + u32 spte_idx, spte_lmt; + u32 spte_idx_range = 0; + u32 spte_lmt_range = 0; + u32 i, j; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + sxe2_pbl_get_pble_fpte_range(pble_alloc_info->pble_info.liner_addr, + pble_alloc_info->total_pble_cnt, &fpte_idx, + &fpte_lmt); + sxe2_pbl_get_pble_spte_range(pble_alloc_info->pble_info.liner_addr, + pble_alloc_info->total_pble_cnt, &spte_idx, + &spte_lmt); + DRV_RDMA_LOG_DEV_DEBUG( + "pble:add third type table fpte idx=%u fpte lmt=%u spte idx=%u spte lmt=%u\n", + fpte_idx, fpte_lmt, spte_idx, spte_lmt); + + info.start_liner_addr = pble_alloc_info->pble_info.liner_addr; + for (i = fpte_idx; i < fpte_lmt; i++) { + fpte = &rcms_info->fpt.fpte[i]; + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) + continue; + info.fpte = fpte; + info.rcms_info = rcms_info; + info.idx_info.fpte_idx = i; + spte_idx_range = max(spte_idx, i * SXE2_RCMS_SPT_ENTRY_CNT); + spte_lmt_range = + min(spte_lmt, (i + 1) * SXE2_RCMS_SPT_ENTRY_CNT); + info.idx_info.spte_idx = spte_idx_range; + info.idx_info.rel_spte_idx = + REL_SPTE_INDEX_GET(info.idx_info.spte_idx); + info.pages = spte_lmt_range - spte_idx_range; + ret = sxe2_pbl_build_third_type_table(pble_rsrc, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:fpte %u build third type table err ret=%d\n", + i, ret); + goto end; + } + pble_rsrc->add_spte_cnt = 0; + } + + if (dev->privileged && pble_rsrc->add_fpte_cnt) { + ret = sxe2_rcms_update_fptes(dev, info.rcms_info, + &pble_rsrc->fpte_indexes[0], + pble_rsrc->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:update fpte err ret=%d\n", + ret); + goto update_fpte_err; + } + } else if (!dev->privileged && pble_rsrc->add_fpte_cnt) { + ret = sxe2_rcms_vf_update_fptes(dev, info.rcms_info, + &pble_rsrc->fpte_indexes[0], + pble_rsrc->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "pble:vf update fpte err ret=%d\n", ret); + goto update_fpte_err; + } + } + + pble_rsrc->add_fpte_cnt = 0; + + goto end; + +update_fpte_err: + for (j = 0; j < pble_rsrc->add_fpte_cnt; j++) { + i = (u32)pble_rsrc->fpte_indexes[j]; + fpte = &rcms_info->fpt.fpte[i]; + fpte->valid = false; + spte_idx_range = max(spte_idx, i * SXE2_RCMS_SPT_ENTRY_CNT); + spte_lmt_range = + min(spte_lmt, (i + 1) * SXE2_RCMS_SPT_ENTRY_CNT); + j = spte_lmt_range; + while (j > spte_idx_range) { + j--; + sxe2_rcms_remove_spt_entry(dev, rcms_info, j); + } + sxe2_rcms_remove_fpt_entry(dev, rcms_info, i, + SXE2_RCMS_FPT_TYPE_SECOND); + } + pble_rsrc->add_fpte_cnt = 0; +end: + return ret; +} + +int sxe2_pbl_alloc_pble_idx(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, + u32 pble_cnt) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 order = 0; + u32 real_pble_cnt; + u64 pbl_seg_idx; + u64 pbl_liner_addr; + u32 i; + + pbl_liner_addr = pble_rsrc->alloc_pble_base_addr; + for (order = 0, i = SXE2_PBL_MIN_ALLOC_PBLE; i < pble_cnt; i <<= 1) + order++; + + ret = sxe2_pbl_buddy_alloc(&pble_rsrc->buddy, order, &pbl_seg_idx, + &real_pble_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:buddy alloc pble %u err ret=%d\n", + pble_cnt, ret); + goto end; + } + pbl_liner_addr += + pbl_seg_idx * SXE2_PBL_MIN_ALLOC_PBLE * SXE2_PBL_PBLE_SIZE; + pble_alloc_info->pble_info.liner_addr = pbl_liner_addr; + pble_alloc_info->pble_info.pble_idx = + sxe2_pbl_liner_addr_to_idx(pble_rsrc, pbl_liner_addr); + DRV_RDMA_LOG_DEV_DEBUG( + "pble:buddy alloc pble needed cnt=%u\n" + "\trel cnt=%u pble liner addr=0x%llx pble idx=%u\n", + pble_cnt, real_pble_cnt, pbl_liner_addr, + pble_alloc_info->pble_info.pble_idx); + pble_alloc_info->total_pble_cnt = real_pble_cnt; + +end: + return ret; +} + +static int +sxe2_pbl_alloc_first_type_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, + u32 pble_cnt) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 bit_idx; + u64 start_fpte_idx; + + mutex_lock(&pble_rsrc->pble_mutex_lock); + if (pble_cnt > pble_rsrc->unallocated_first_type_fpte_cnt) { + DRV_RDMA_LOG_DEV_ERR( + "pble:need first type pble not enough pble cnt=%u unallocated cnt=%u\n", + pble_cnt, pble_rsrc->unallocated_first_type_fpte_cnt); + ret = -ENOMEM; + goto end; + } + + ret = sxe2_pbl_first_page_bitmap_alloc(&pble_rsrc->first_page_bitmap, + pble_cnt, &bit_idx); + if (ret != SXE2_OK) + goto end; + + start_fpte_idx = bit_idx + pble_rsrc->first_page_bitmap.first_fpte_idx; + pble_alloc_info->total_pble_cnt = pble_cnt; + pble_alloc_info->pbl_mode.mr_mode = MR_TABLE_FIRST_MODE; + pble_alloc_info->pbl_index = + SXE2_PBL_FPTE_IDX_TO_PBL_IDX(start_fpte_idx); + pble_alloc_info->pble_info.liner_addr = pble_alloc_info->pbl_index; + pble_alloc_info->pble_info.pble_idx = (u32)start_fpte_idx; + pble_rsrc->unallocated_first_type_fpte_cnt -= pble_cnt; + pble_rsrc->allocated_first_type_fpte_cnt += pble_cnt; +end: + mutex_unlock(&pble_rsrc->pble_mutex_lock); + return ret; +} + +int sxe2_pbl_alloc_second_type_pble( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, u32 pble_cnt, + enum sxe2_pbl_obj_type obj_type) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if ((obj_type == PBL_OBJ_QP || obj_type == PBL_OBJ_SRQ) && + (pble_cnt > SXE2_PBL_2MB_PAGE_PBLE_CNT)) { + DRV_RDMA_LOG_DEV_ERR( + "pble:obj %u alloc pble cross page pble cnt=%u\n", + obj_type, pble_cnt); + ret = -EINVAL; + goto end; + } + mutex_lock(&pble_rsrc->pble_mutex_lock); + ret = sxe2_pbl_alloc_pble_idx(pble_rsrc, pble_alloc_info, pble_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:obj %u pble %u alloc pble idx err ret=%d\n", + obj_type, pble_cnt, ret); + goto end; + } + ret = sxe2_pbl_add_second_type_table(pble_rsrc, pble_alloc_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:obj %u add second type err ret=%d\n", + obj_type, ret); + goto end; + } + pble_rsrc->unallocated_pble -= pble_alloc_info->total_pble_cnt; + pble_rsrc->allocated_pbles += pble_alloc_info->total_pble_cnt; + DRV_RDMA_LOG_DEV_DEBUG( + "pble:obj %u needed pble cnt=%u rel alloc pble cnt=%u unallocated pble cnt=%u\n", + obj_type, pble_cnt, pble_alloc_info->total_pble_cnt, + pble_rsrc->unallocated_pble); + if ((obj_type == PBL_OBJ_QP || obj_type == PBL_OBJ_SRQ || + obj_type == PBL_OBJ_CQ || obj_type == PBL_OBJ_EQ) && + pble_cnt <= SXE2_PBL_2MB_PAGE_PBLE_CNT) { + pble_alloc_info->pbl_mode.mode = QP_SRQ_PA_SECOND_MODE; + ret = sxe2_pbl_liner_addr_to_pble_pa( + pble_rsrc, pble_alloc_info->pble_info.liner_addr, + &pble_alloc_info->pbl_index); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa err ret=%d\n", ret); + goto liner_to_pa_err; + } + } else if (obj_type == PBL_OBJ_CQ || obj_type == PBL_OBJ_EQ) { + pble_alloc_info->pbl_mode.cq_eq_mode = CQ_EQ_TABLE_SECOND_MODE; + pble_alloc_info->pbl_index = + pble_alloc_info->pble_info.liner_addr; + } else if (obj_type == PBL_OBJ_MR) { + pble_alloc_info->pbl_mode.mr_mode = MR_TABLE_SECOND_MODE; + pble_alloc_info->pbl_index = + pble_alloc_info->pble_info.liner_addr; + } + DRV_RDMA_LOG_DEV_DEBUG("pble:obj %u pbl mode %u pbl index 0x%llx\n", + obj_type, pble_alloc_info->pbl_mode.mode, + pble_alloc_info->pbl_index); + goto end; +liner_to_pa_err: + mutex_unlock(&pble_rsrc->pble_mutex_lock); + sxe2_pbl_free_pble(pble_rsrc, pble_alloc_info->pble_info.liner_addr, + pble_cnt, false); + return ret; +end: + mutex_unlock(&pble_rsrc->pble_mutex_lock); + return ret; +} + +int sxe2_pbl_alloc_third_type_pble( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, u32 pble_cnt, + enum sxe2_pbl_obj_type obj_type) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if ((obj_type == PBL_OBJ_QP || obj_type == PBL_OBJ_SRQ) && + (pble_cnt > SXE2_PBL_4KB_PAGE_PBLE_CNT)) { + DRV_RDMA_LOG_DEV_ERR( + "pble:obj %u alloc pble cross page pble cnt=%u\n", + obj_type, pble_cnt); + ret = -EINVAL; + goto end; + } + mutex_lock(&pble_rsrc->pble_mutex_lock); + ret = sxe2_pbl_alloc_pble_idx(pble_rsrc, pble_alloc_info, pble_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:obj %u pble %u alloc pble idx err ret=%d\n", + obj_type, pble_cnt, ret); + goto end; + } + ret = sxe2_pbl_add_third_type_table(pble_rsrc, pble_alloc_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:obj %u add second type err ret=%d\n", + obj_type, ret); + goto end; + } + + pble_rsrc->unallocated_pble -= pble_alloc_info->total_pble_cnt; + pble_rsrc->allocated_pbles += pble_alloc_info->total_pble_cnt; + + DRV_RDMA_LOG_DEV_DEBUG( + "pble:obj %u needed pble=%u rel alloc pble=%u\n" + "\talloced pble=%u unallocated pble=%u\n", + obj_type, pble_cnt, pble_alloc_info->total_pble_cnt, + pble_rsrc->allocated_pbles, pble_rsrc->unallocated_pble); + if ((obj_type == PBL_OBJ_QP || obj_type == PBL_OBJ_SRQ || + obj_type == PBL_OBJ_CQ || obj_type == PBL_OBJ_EQ) && + pble_cnt <= SXE2_PBL_4KB_PAGE_PBLE_CNT) { + pble_alloc_info->pbl_mode.mode = QP_SRQ_PA_SECOND_MODE; + ret = sxe2_pbl_liner_addr_to_pble_pa( + pble_rsrc, pble_alloc_info->pble_info.liner_addr, + &pble_alloc_info->pbl_index); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:liner addr to pble pa err ret=%d\n", ret); + goto liner_to_pa_err; + } + } else if (obj_type == PBL_OBJ_CQ || obj_type == PBL_OBJ_EQ) { + pble_alloc_info->pbl_mode.cq_eq_mode = CQ_EQ_TABLE_THIRD_MODE; + pble_alloc_info->pbl_index = + pble_alloc_info->pble_info.liner_addr; + } else if (obj_type == PBL_OBJ_MR) { + pble_alloc_info->pbl_mode.mr_mode = MR_TABLE_THIRD_MODE; + pble_alloc_info->pbl_index = + pble_alloc_info->pble_info.liner_addr; + } + DRV_RDMA_LOG_DEV_DEBUG("pble:obj %u pbl mode %u pbl index 0x%llx\n", + obj_type, pble_alloc_info->pbl_mode.mode, + pble_alloc_info->pbl_index); + goto end; +liner_to_pa_err: + mutex_unlock(&pble_rsrc->pble_mutex_lock); + sxe2_pbl_free_pble(pble_rsrc, pble_alloc_info->pble_info.liner_addr, + pble_cnt, false); + return ret; +end: + mutex_unlock(&pble_rsrc->pble_mutex_lock); + return ret; +} + +int sxe2_pbl_set_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, u64 page_pa, + u16 fn_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_update_fptes_info *update_fpte_info = NULL; + + update_fpte_info = kzalloc( + sizeof(*update_fpte_info), GFP_KERNEL); + if (!update_fpte_info) { + DRV_RDMA_LOG_DEV_ERR("pble:update fpte info alloc err\n"); + ret = -ENOMEM; + goto end; + } + + DRV_RDMA_LOG_DEV_DEBUG( + "pble:func id %u set pble fpte idx=%u page pa=%llx\n", fn_id, + fpte_idx, page_pa); + update_fpte_info->cnt = 1; + update_fpte_info->rcms_fn_id = fn_id; + update_fpte_info->entry[0].data = + page_pa | FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_TYPE, (s64)1) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_VALID, (s64)1); + update_fpte_info->entry[0].cmd = fpte_idx; + + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:func id %u mq update fpte err ret=%d\n", fn_id, + ret); + } + + kfree(update_fpte_info); +end: + return ret; +} + +int sxe2_pbl_clear_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 pble_cnt, u16 fn_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_update_fptes_info *update_fpte_info = NULL; + u32 i; + + update_fpte_info = kzalloc( + sizeof(*update_fpte_info), GFP_KERNEL); + if (!update_fpte_info) { + DRV_RDMA_LOG_DEV_ERR("pble:update fpte info alloc err\n"); + ret = -ENOMEM; + goto end; + } + + update_fpte_info->cnt = 0; + update_fpte_info->rcms_fn_id = fn_id; + + for (i = 0; i < pble_cnt; i++) { + update_fpte_info->entry[update_fpte_info->cnt].data = + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_TYPE, (s64)1) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_VALID, (s64)0); + update_fpte_info->entry[update_fpte_info->cnt].cmd = + fpte_idx + i; + DRV_RDMA_LOG_DEV_DEBUG("pble:func id %u clear fpte idx=%u\n", + fn_id, fpte_idx + i); + update_fpte_info->cnt++; + + if (update_fpte_info->cnt == + SXE2_RCMS_MAX_UPDATE_FPTE_ENTRIES) { + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:func id %u mq update fpte err ret=%d\n", + fn_id, ret); + goto end; + } + update_fpte_info->cnt = 0; + } + } + + if (update_fpte_info->cnt) { + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:func id %u mq update fpte err ret=%d\n", + fn_id, ret); + } + } + + kfree(update_fpte_info); +end: + return ret; +} + +static int sxe2_pbl_first_type_set_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 pble_liner_addr, u64 page_pa) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 fpte_idx; + + fpte_idx = SXE2_PBL_PBL_IDX_TO_FPTE_IDX(pble_liner_addr); + if (fpte_idx < pble_rsrc->first_page_bitmap.first_fpte_idx || + fpte_idx > (pble_rsrc->first_page_bitmap.first_fpte_idx + + pble_rsrc->first_page_bitmap.max_fpte_cnt)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "pble:first type set pble fpte idx err fpte idx=%u\n", + fpte_idx); + goto end; + } + + if (!dev->privileged) { + ret = sxe2_vchnl_req_set_pbl_fpte(dev, fpte_idx, page_pa); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble: vf set fpte err ret=%d\n", + ret); + } + goto end; + } + + ret = sxe2_pbl_set_fpte(dev, fpte_idx, page_pa, + dev->rcms_info->rcms_fn_id); + if (ret != SXE2_OK) + DRV_RDMA_LOG_DEV_ERR("pble: set fpte err ret=%d\n", ret); + +end: + return ret; +} + +static void sxe2_pbl_first_type_free_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 pble_liner_addr, u32 pble_cnt) +{ + int ret = SXE2_OK; + u32 fpte_idx; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + fpte_idx = SXE2_PBL_PBL_IDX_TO_FPTE_IDX(pble_liner_addr); + + if (!dev->privileged) { + ret = sxe2_vchnl_req_clear_pbl_fpte(dev, fpte_idx, pble_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble: vf clear fpte err ret=%d\n", + ret); + } + } else { + ret = sxe2_pbl_clear_fpte(dev, fpte_idx, pble_cnt, + dev->rcms_info->rcms_fn_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble: clear fpte err ret=%d\n", + ret); + } + } + sxe2_pbl_first_page_bitmap_free( + &pble_rsrc->first_page_bitmap, + (fpte_idx - pble_rsrc->first_page_bitmap.first_fpte_idx), + pble_cnt); + + pble_rsrc->allocated_first_type_fpte_cnt -= pble_cnt; + pble_rsrc->unallocated_first_type_fpte_cnt += pble_cnt; + +} + +int sxe2_pbl_set_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, u64 pble_liner_addr, + u64 page_pa, bool firt_type_flag) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u64 *pble_va; + + if (firt_type_flag) { + ret = sxe2_pbl_first_type_set_pble(pble_rsrc, pble_liner_addr, + page_pa); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:set first type pble err ret=%d\n", + ret); + } + goto end; + } + + ret = sxe2_pbl_liner_addr_to_pble_va(pble_rsrc, pble_liner_addr, + &pble_va); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("pble:liner addr to pble pa err ret=%d\n", + ret); + goto end; + } + memcpy(pble_va, &page_pa, sizeof(*pble_va)); + DRV_RDMA_LOG_DEV_DEBUG( + "pble:set pble liner addr=0x%llx pble va=%p page pa=0x%llx\n", + pble_liner_addr, pble_va, *pble_va); +end: + return ret; +} + +int sxe2_pbl_get_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, + u32 pble_cnt, enum sxe2_pbl_obj_type obj_type) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + if (pble_cnt == 0) { + DRV_RDMA_LOG_DEV_ERR("pble:obj %u alloc pble cnt is 0\n", + obj_type); + goto end; + } + pble_alloc_info->needed_pble_cnt = pble_cnt; + + if (obj_type == PBL_OBJ_MR && pble_alloc_info->mr_first_page_flags) { + ret = sxe2_pbl_alloc_first_type_pble(pble_rsrc, pble_alloc_info, + pble_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "pble:obj %u alloc first type pble err ret=%d\n", + obj_type, ret); + } + goto end; + } + + if (pble_rsrc->init_mode == PBL_SECOND_PAGE_TABLE) { + ret = sxe2_pbl_alloc_second_type_pble( + pble_rsrc, pble_alloc_info, pble_cnt, obj_type); + if (ret != SXE2_OK && rdma_func->rcms_mode.pbl_mode == + SXE2_PBL_SECOND_INIT_MODE) { + goto end; + } + } else { + ret = sxe2_pbl_alloc_third_type_pble(pble_rsrc, pble_alloc_info, + pble_cnt, obj_type); + } +end: + return ret; +} + +void sxe2_pbl_free_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 pble_liner_addr, u32 pble_cnt, bool firt_type_flag) +{ + struct sxe2_rdma_ctx_dev *dev = pble_rsrc->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 order = 0; + u64 i; + u64 pble_seg_idx; + u32 total_free_pble_cnt; + + if (pble_cnt == 0) { + DRV_RDMA_LOG_DEV_ERR("pble:free pble cnt is 0\n"); + return; + } + mutex_lock(&pble_rsrc->pble_mutex_lock); + if (firt_type_flag) { + sxe2_pbl_first_type_free_pble(pble_rsrc, pble_liner_addr, + pble_cnt); + goto end; + } + + pble_seg_idx = ((pble_liner_addr - pble_rsrc->alloc_pble_base_addr) / + SXE2_PBL_MIN_ALLOC_PBLE) / + SXE2_PBL_PBLE_SIZE; + for (order = 0, i = SXE2_PBL_MIN_ALLOC_PBLE; i < pble_cnt; i <<= 1) + ++order; + + sxe2_pbl_buddy_free(&pble_rsrc->buddy, pble_seg_idx, order); + + total_free_pble_cnt = 1 << order; + pble_rsrc->unallocated_pble += total_free_pble_cnt; + pble_rsrc->allocated_pbles -= total_free_pble_cnt; + DRV_RDMA_LOG_DEV_DEBUG( + "pble:free pble liner addr=0x%llx pble cnt=%u total free cnt = 0x%x\n", + pble_liner_addr, pble_cnt, total_free_pble_cnt); + +end: + mutex_unlock(&pble_rsrc->pble_mutex_lock); +} + +int sxe2_pbl_init(struct sxe2_rdma_device *rdma_dev) +{ + s32 ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_pbl_pble_rsrc *pble_rsrc = rdma_func->pble_rsrc; + struct sxe2_rcms_info *rcms_info; + u32 fpm_idx = 0; + u32 max_order = 0; + u32 powerof2 = 1; + + DRV_RDMA_LOG_DEV_DEBUG("pbl:pbl init start\n"); + rcms_info = dev->rcms_info; + if (rdma_func->rcms_mode.pbl_mode >= SXE2_PBL_INIT_MODE_MAX) { + DRV_RDMA_LOG_DEV_ERR("pbl: init mode err init mode=%u\n", + rdma_func->rcms_mode.pbl_mode); + ret = -EINVAL; + goto end; + } + + if (dev->privileged && + rdma_func->rcms_mode.pbl_mode == SXE2_PBL_SECOND_INIT_MODE) { + pble_rsrc->init_mode = PBL_SECOND_PAGE_TABLE; + } else { + pble_rsrc->init_mode = PBL_THIRD_PAGE_TABLE; + } + pble_rsrc->dev = dev; + pble_rsrc->pble_base_addr = + rcms_info->rcms_obj[SXE2_RCMS_OBJ_PBLE].base; + if (pble_rsrc->pble_base_addr & SXE2_PBL_4KB_PAGE_OFFSET) { + fpm_idx = + (SXE2_PBL_SPTE_CP_SIZE - ((pble_rsrc->pble_base_addr & + SXE2_PBL_4KB_PAGE_OFFSET) >> + SXE2_PBL_PBLE_SIZE_SHIFT)); + } + + pble_rsrc->unallocated_pble = + rcms_info->rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt - fpm_idx; + pble_rsrc->allocated_pbles = 0; + pble_rsrc->alloc_pble_base_addr = pble_rsrc->pble_base_addr + + (fpm_idx << SXE2_PBL_PBLE_SIZE_SHIFT); + while (powerof2 <= pble_rsrc->unallocated_pble) { + powerof2 *= 2; + max_order++; + } + pble_rsrc->unallocated_pble = powerof2 / 2; + max_order--; + + pble_rsrc->buddy.dev = dev; + ret = sxe2_pbl_buddy_init(&pble_rsrc->buddy, max_order); + if (ret != SXE2_OK) + goto end; + + if (rcms_info->first_page_fpte) { + pble_rsrc->first_page_en = true; + pble_rsrc->unallocated_first_type_fpte_cnt = + rcms_info->first_page_fpte; + pble_rsrc->first_page_bitmap.dev = dev; + ret = sxe2_pbl_first_page_bitmap_init( + &pble_rsrc->first_page_bitmap); + if (ret != SXE2_OK) + goto end; + } else { + pble_rsrc->first_page_en = false; + } + + mutex_init(&pble_rsrc->pble_mutex_lock); + ret = sxe2_pbl_add_pble_prm(pble_rsrc); + if (ret != SXE2_OK) + goto add_pble_prm_err; + + DRV_RDMA_LOG_DEV_DEBUG("pbl:pbl init finish\n"); + goto end; + +add_pble_prm_err: + sxe2_pbl_buddy_cleanup(&pble_rsrc->buddy); + kvfree(pble_rsrc->first_page_bitmap.fpte_bits); + pble_rsrc->first_page_bitmap.fpte_bits = NULL; +end: + return ret; +} + +void sxe2_pbl_exit(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_pbl_pble_rsrc *pble_rsrc = rdma_dev->rdma_func->pble_rsrc; + + sxe2_pbl_buddy_cleanup(&pble_rsrc->buddy); + kvfree(pble_rsrc->first_page_bitmap.fpte_bits); + pble_rsrc->first_page_bitmap.fpte_bits = NULL; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.h new file mode 100644 index 0000000000000000000000000000000000000000..a0e9380d8722a5929810833e1b2abad706149eca --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_pble.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_pble.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_RDMA_PBLE_H +#define SXE2_DRV_RDMA_PBLE_H + +#define SXE2_OK 0 +#define SXE2_PBL_MIN_ALLOC_PBLE (1) +#define SXE2_PBL_IDX_VALID_VAL (0xFFFFFFFFFFFFFFFF) +#define SXE2_PBL_PBLE_CNT_IN_4K (512) +#define SXE2_PBL_2MB_PAGE_PBLE_CNT (0x40000) +#define SXE2_PBL_4KB_PAGE_PBLE_CNT (0x200) +#define SXE2_PBL_PBLE_SIZE (8) +#define SXE2_PBL_2MB_PAGE_OFFSET (0x1FFFFF) +#define SXE2_PBL_4KB_PAGE_OFFSET (0xFFF) +#define SXE2_PBL_CP_PBLE_CNT_SHIFT (9) +#define SXE2_PBL_PBLE_CNT_PER_4KB_CP (512) +#define SXE2_PBL_MANAGE_CP_WQE_PA_SHIFT (3) +#define SXE2_PBL_SPTE_SIZE (8) +#define SXE2_PBL_SPTE_CP_SIZE (4096) +#define SXE2_PBL_PBLE_SIZE_SHIFT (3) +#define LINER_ADDR_TO_REL_SPTE_IDX(a) (((a) >> 12) & 0x1FF) +#define SXE2_FIRST_PAGE_INVALID_IDX (0xFFFFFFFF) +#define SXE2_PBL_FPTE_IDX_TO_PBL_IDX(a) ((a) << 21) +#define SXE2_PBL_PBL_IDX_TO_FPTE_IDX(a) ((u32)((a) >> 21)) + +struct sxe2_pbl_table_idx_info { + u32 fpte_idx; + u32 spte_idx; + u32 rel_spte_idx; +}; +struct sxe2_pbl_add_page_info { + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_info *rcms_info; + struct sxe2_pbl_table_idx_info idx_info; + u64 start_liner_addr; + u32 pages; +}; + +int sxe2_pbl_set_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, u64 page_pa, + u16 fn_id); + +int sxe2_pbl_clear_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 pble_cnt, u16 fn_id); + +int sxe2_pbl_init(struct sxe2_rdma_device *rdma_dev); + +void sxe2_pbl_exit(struct sxe2_rdma_device *rdma_dev); + +int sxe2_pbl_get_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, + u32 pble_cnt, enum sxe2_pbl_obj_type obj_type); + +void sxe2_pbl_free_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 pble_liner_addr, u32 pble_cnt, bool firt_type_flag); + +int sxe2_pbl_set_pble(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 pble_liner_index, u64 page_pa, bool firt_type_flag); + +int sxe2_pbl_manage_pble_cp_cmd(struct sxe2_mq_ctx *mq, + struct sxe2_pbl_manage_pble_info *info, + u64 scratch, bool post_sq); +int sxe2_pbl_buddy_alloc(struct sxe2_pbl_buddy *buddy, u32 order, + u64 *pbl_seg_index, u32 *total_pble_cnt); +int sxe2_pbl_liner_addr_to_pble_pa(struct sxe2_pbl_pble_rsrc *pble_rsrc, + u64 liner_addr, u64 *pa); + +int sxe2_pbl_build_second_type_table(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_add_page_info *info); +int sxe2_pbl_build_third_type_table(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_add_page_info *info); + +int sxe2_pbl_add_pble_prm(struct sxe2_pbl_pble_rsrc *pble_rsrc); + +int sxe2_pbl_add_second_type_table( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info); + +int sxe2_pbl_add_third_type_table( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info); +int sxe2_pbl_alloc_pble_idx(struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, + u32 pble_cnt); + +int sxe2_pbl_alloc_second_type_pble( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, u32 pble_cnt, + enum sxe2_pbl_obj_type obj_type); + +int sxe2_pbl_alloc_third_type_pble( + struct sxe2_pbl_pble_rsrc *pble_rsrc, + struct sxe2_pbl_pble_alloc_info *pble_alloc_info, u32 pble_cnt, + enum sxe2_pbl_obj_type obj_type); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.c new file mode 100644 index 0000000000000000000000000000000000000000..c3fe4b06b047c20d6c227aae751377541af3408a --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.c @@ -0,0 +1,1531 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_qos.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_aux.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_virtchnl.h" + +struct sxe2_err_code sxe2_qos_apply_qset_err_code[] = { + { "NO ERR", QSET_INVALID_CODE }, + { "APPLY:NO FREE QSET", NO_FREE_QSET }, + { "APPLY:FUNCTION EXCEED MAX LIMIT QSET", + FUNCTION_EXCEED_MAX_LIMIT_QSET }, +}; + +struct sxe2_err_code sxe2_qos_release_qset_err_code[] = { + { "NO ERR", QSET_INVALID_CODE }, + { "RELEASE:QSET NOT ALLOCED", QSET_NOT_ALLOCED }, + { "RELEASE:QSET FUNC NOT MATCH", QSET_FUNC_NOT_MATCH }, +}; + +struct sxe2_err_code sxe2_qos_qp_bind_qset_err_code[] = { + { "NO ERR", QSET_INVALID_CODE }, + { "BIND:QP NOT BIND THIS FUNCTION", BIND_QP_NOT_BIND_THIS_FUNCTION }, + { "BIND:QSET NOT BIND THIS FUNCTION", + BIND_QSET_NOT_BIND_THIS_FUNCTION }, + { "BIND:QP QSET NOT BIND THIS FUNCTION", + BIND_QP_QSET_NOT_BIND_THIS_FUNCTION }, + + { "UNBIND:QP NOT BIND THIS FUNCTION", + UNBIND_QP_NOT_BIND_THIS_FUNCTION }, + { "UNBIND:QSET NOT BIND THIS FUNCTION", + UNBIND_QSET_NOT_BIND_THIS_FUNCTION }, + { "UNBIND:QP QSET NOT BIND THIS FUNCTION", + UNBIND_QP_QSET_NOT_BIND_THIS_FUNCTION }, + { "UNBIND:QP UNBIND", UNBIND_QP_UNBIND }, + { "UNBIND:QP NOT BIND THIS QSET", UNBIND_QP_NOT_BIND_THIS_QSET }, + +}; + +static int sxe2_qos_bar_query_qset(struct sxe2_rdma_ctx_dev *dev, u32 qset_id, + u32 *func_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 pull = QOS_QUERY_QSET_REQ_PULL; + u32 req_reg_val; + u32 resp_reg_val; + u32 req_reg_idx; + u32 resp_reg_idx; + + req_reg_idx = QSET_QUERY_REQ; + resp_reg_idx = QSET_QUERY_RESP; + + if (qset_id >= SXE2_FUNC_MAX_QSET_ID) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:query qset input qset id err qset id=%d\n", + qset_id); + goto end; + } + + if (rdma_dev->rdma_func->reset) { + ret = -EBUSY; + DRV_RDMA_LOG_DEV_INFO( + "reset is set, bar will not be performed\n"); + goto end; + } + + req_reg_val = WRITE_BIT(qset_id, QUERY_QSET_REQ_QSET_ID_M, + QUERY_QSET_REQ_QSET_ID_S) | + WRITE_BIT(1, QUERY_QSET_REQ_VLD_M, QUERY_QSET_REQ_VLD_S); + SXE2_BAR_WRITE_32(req_reg_val, dev->hw_regs[req_reg_idx]); + do { + resp_reg_val = SXE2_BAR_READ_32(dev->hw_regs[resp_reg_idx]); + if (READ_BIT(resp_reg_val, QUERY_QSET_RESP_DONE_M, + QUERY_QSET_RESP_DONE_S) && + !READ_BIT(resp_reg_val, QUERY_QSET_RESP_ERR_M, + QUERY_QSET_RESP_ERR_S)) { + *func_id = READ_BIT(resp_reg_val, + QUERY_QSET_RESP_FUNC_ID_M, + QUERY_QSET_RESP_FUNC_ID_S); + resp_reg_val = WRITE_BIT(0, QUERY_QSET_RESP_DONE_M, + QUERY_QSET_RESP_DONE_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } else if (READ_BIT(resp_reg_val, QUERY_QSET_RESP_DONE_M, + QUERY_QSET_RESP_DONE_S) && + READ_BIT(resp_reg_val, QUERY_QSET_RESP_ERR_M, + QUERY_QSET_RESP_ERR_S)) { + DRV_RDMA_LOG_DEV_ERR( + "qos:bar reg query qset not alloced\n"); + ret = -EINVAL; + resp_reg_val = WRITE_BIT(0, QUERY_QSET_RESP_DONE_M, + QUERY_QSET_RESP_DONE_S) | + WRITE_BIT(0, QUERY_QSET_RESP_ERR_M, + QUERY_QSET_RESP_ERR_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } + cond_resched(); + } while ((--pull) != 0); + + if (pull == 0) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:query qset resp reg not done, reg val=0x%x\n", + resp_reg_val); + } +end: + return ret; +} + +static int sxe2_qos_bar_apply_qset(struct sxe2_rdma_ctx_dev *dev, u16 *qset_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 pull = QOS_APPLY_QSET_REQ_PULL; + u32 req_reg_val; + u32 resp_reg_val; + u32 req_reg_idx; + u32 resp_reg_idx; + u32 err_code; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + u8 inject_err_code = + rdma_dev->rdma_func->inject_qos.apply_qset_err_code; +#endif + + req_reg_idx = QSET_APPLY_REQ; + resp_reg_idx = QSET_APPLY_RESP; + + if (rdma_dev->rdma_func->reset) { + ret = -EBUSY; + DRV_RDMA_LOG_DEV_INFO( + "reset is set, bar will not be performed\n"); + goto end; + } + + req_reg_val = WRITE_BIT(1, APPLY_QSET_REQ_REG_VLD_M, + APPLY_QSET_REQ_REG_VLD_S); + SXE2_BAR_WRITE_32(req_reg_val, dev->hw_regs[req_reg_idx]); + do { + resp_reg_val = SXE2_BAR_READ_32(dev->hw_regs[resp_reg_idx]); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject apply qset resp reg val=0x%x\n", + resp_reg_val); + resp_reg_val = resp_reg_val | QOS_BAR_APPLY_INJECT_VAL; + } +#endif + if (READ_BIT(resp_reg_val, APPLY_QSET_RESP_DONE_M, + APPLY_QSET_RESP_DONE_S) && + !READ_BIT(resp_reg_val, APPLY_QSET_RESP_ERR_M, + APPLY_QSET_RESP_ERR_S)) { + *qset_id = (u16)READ_BIT(resp_reg_val, + APPLY_QSET_RESP_QSET_ID_M, + APPLY_QSET_RESP_QSET_ID_S); + resp_reg_val = WRITE_BIT(0, APPLY_QSET_RESP_DONE_M, + APPLY_QSET_RESP_DONE_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } else if (READ_BIT(resp_reg_val, APPLY_QSET_RESP_DONE_M, + APPLY_QSET_RESP_DONE_S) && + READ_BIT(resp_reg_val, APPLY_QSET_RESP_ERR_M, + APPLY_QSET_RESP_ERR_S)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:no available qset resource reg\n"); + err_code = READ_BIT(resp_reg_val, + APPLY_QSET_RESP_ERR_CODE_M, + APPLY_QSET_RESP_ERR_CODE_S); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + err_code = inject_err_code; + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject apply qset errcode=%d\n", + err_code); + } +#endif + DRV_RDMA_LOG_DEV_ERR( + "qos:err code:%u err type is %s\n", + sxe2_qos_apply_qset_err_code[err_code].err_code, + sxe2_qos_apply_qset_err_code[err_code].err_mean); + resp_reg_val = WRITE_BIT(0, APPLY_QSET_RESP_DONE_M, + APPLY_QSET_RESP_DONE_S) | + WRITE_BIT(0, APPLY_QSET_RESP_ERR_M, + APPLY_QSET_RESP_ERR_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } + cond_resched(); + } while ((--pull) != 0); + + if (pull == 0) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:apply qset resp reg not done, reg val=0x%x\n", + resp_reg_val); + } + +end: + return ret; +} + +static void sxe2_qos_bar_release_qset(struct sxe2_rdma_ctx_dev *dev, + u32 qset_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 pull = QOS_RELEASE_QSET_REQ_PULL; + u32 req_reg_val; + u32 resp_reg_val; + u32 req_reg_idx; + u32 resp_reg_idx; + u32 err_code; + u32 func_id; + bool hw_rsrc_clean = false; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + u8 inject_err_code = + rdma_dev->rdma_func->inject_qos.release_qset_err_code; +#endif + + req_reg_idx = QSET_RELEASE_REQ; + resp_reg_idx = QSET_RELEASE_RESP; + + if (qset_id >= SXE2_FUNC_MAX_QSET_ID) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:release qset input qset id err qset id=%d\n", + qset_id); + goto end; + } + + hw_rsrc_clean = + sxe2_get_hw_rsrc_clean_flag(&rdma_dev->rdma_func->ctx_dev); + if (rdma_dev->rdma_func->reset && hw_rsrc_clean) { + ret = -EBUSY; + DRV_RDMA_LOG_DEV_INFO( + "reset is set, bar will not be performed\n"); + goto end; + } + + req_reg_val = + WRITE_BIT(qset_id, RELEASE_QSET_REQ_QSET_ID_M, + RELEASE_QSET_REQ_QSET_ID_S) | + WRITE_BIT(1, RELEASE_QSET_REQ_VLD_M, RELEASE_QSET_REQ_VLD_S); + SXE2_BAR_WRITE_32(req_reg_val, dev->hw_regs[req_reg_idx]); + do { + resp_reg_val = SXE2_BAR_READ_32(dev->hw_regs[resp_reg_idx]); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject release qset resp reg val=0x%x\n", + resp_reg_val); + resp_reg_val = + resp_reg_val | QOS_BAR_RELEASE_INJECT_VAL; + } +#endif + if (READ_BIT(resp_reg_val, RELEASE_QSET_RESP_DONE_M, + RELEASE_QSET_RESP_DONE_S) && + !READ_BIT(resp_reg_val, RELEASE_QSET_RESP_ERR_M, + RELEASE_QSET_RESP_ERR_S)) { + req_reg_val = WRITE_BIT(0, RELEASE_QSET_RESP_DONE_M, + RELEASE_QSET_RESP_DONE_S); + SXE2_BAR_WRITE_32(req_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } else if (READ_BIT(resp_reg_val, RELEASE_QSET_RESP_DONE_M, + RELEASE_QSET_RESP_DONE_S) && + READ_BIT(resp_reg_val, RELEASE_QSET_RESP_ERR_M, + RELEASE_QSET_RESP_ERR_S)) { + DRV_RDMA_LOG_DEV_ERR("qos:bar reg release qset err\n"); + err_code = READ_BIT(resp_reg_val, + RELEASE_QSET_RESP_ERR_CODE_M, + RELEASE_QSET_RESP_ERR_CODE_S); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + err_code = inject_err_code; + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject release qset errcode=%d\n", + err_code); + } +#endif + DRV_RDMA_LOG_DEV_ERR( + "qos:err code:%u err type is %s\n", + sxe2_qos_release_qset_err_code[err_code] + .err_code, + sxe2_qos_release_qset_err_code[err_code] + .err_mean); + if (err_code == QSET_FUNC_NOT_MATCH) { + ret = sxe2_qos_bar_query_qset(dev, qset_id, + &func_id); + if (ret == SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos: qset id %u bind func id %u\n", + qset_id, func_id); + } + } + + resp_reg_val = WRITE_BIT(0, RELEASE_QSET_RESP_DONE_M, + RELEASE_QSET_RESP_DONE_S) | + WRITE_BIT(0, RELEASE_QSET_RESP_ERR_M, + RELEASE_QSET_RESP_ERR_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } + cond_resched(); + } while ((--pull) != 0); + + if (pull == 0) { + DRV_RDMA_LOG_DEV_ERR( + "qos:release qset resp reg not done, reg val=0x%x\n", + resp_reg_val); + } +end: + return; +} + +static int sxe2_qos_bar_qp_bind_qset(struct sxe2_rdma_ctx_dev *dev, u32 qpn, + u32 qset_id, bool bind) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 pull = QOS_RELEASE_QSET_REQ_PULL; + u32 req_reg_val; + u32 resp_reg_val; + u32 req_reg_idx; + u32 resp_reg_idx; + u32 err_code; + u32 cmd; + u32 func_id; +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + u8 inject_err_code = + rdma_dev->rdma_func->inject_qos.qp_bind_qset_err_code; +#endif + + req_reg_idx = QSET_QP_BIND_REQ; + resp_reg_idx = QSET_QP_BIND_RESP; + + if (qset_id >= SXE2_FUNC_MAX_QSET_ID || qpn >= SXE2_FUNC_MAX_QPN) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:qp qset input qset id err qset id=%d, qpn=%d\n", + qset_id, qpn); + goto end; + } + + if (rdma_dev->rdma_func->reset) { + ret = -EBUSY; + DRV_RDMA_LOG_DEV_INFO( + "reset is set, bar will not be performed\n"); + goto end; + } + + if (bind) { + cmd = QOS_QP_BIND_QSET_CMD; + DRV_RDMA_LOG_DEV_DEBUG("qos:bar reg qp %u bind qset %u\n", qpn, + qset_id); + } else { + cmd = QOS_QP_UNBIND_QSET_CMD; + DRV_RDMA_LOG_DEV_DEBUG("qos:bar reg qp %u unbind qset %u\n", + qpn, qset_id); + } + + req_reg_val = + WRITE_BIT(qpn, QP_BIND_QSET_REQ_QPN_M, QP_BIND_QSET_REQ_QPN_S) | + WRITE_BIT(qset_id, QP_BIND_QSET_REQ_QSET_ID_M, + QP_BIND_QSET_REQ_QSET_ID_S) | + WRITE_BIT(cmd, QP_BIND_QSET_REQ_CMD_M, QP_BIND_QSET_REQ_CMD_S) | + WRITE_BIT(1, QP_BIND_QSET_REQ_VLD_M, QP_BIND_QSET_REQ_VLD_S); + SXE2_BAR_WRITE_32(req_reg_val, dev->hw_regs[req_reg_idx]); + do { + resp_reg_val = SXE2_BAR_READ_32(dev->hw_regs[resp_reg_idx]); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject qp bind qset resp reg val=0x%x\n", + resp_reg_val); + resp_reg_val = + resp_reg_val | QOS_BAR_QP_BIND_INJECT_VAL; + } +#endif + if (READ_BIT(resp_reg_val, QP_BIND_QSET_RESP_DONE_M, + QP_BIND_QSET_RESP_DONE_S) && + !READ_BIT(resp_reg_val, QP_BIND_QSET_RESP_ERR_M, + QP_BIND_QSET_RESP_ERR_S)) { + req_reg_val = WRITE_BIT(0, QP_BIND_QSET_RESP_DONE_M, + QP_BIND_QSET_RESP_DONE_S); + SXE2_BAR_WRITE_32(req_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } else if (READ_BIT(resp_reg_val, QP_BIND_QSET_RESP_DONE_M, + QP_BIND_QSET_RESP_DONE_S) && + READ_BIT(resp_reg_val, QP_BIND_QSET_RESP_ERR_M, + QP_BIND_QSET_RESP_ERR_S)) { + err_code = READ_BIT(resp_reg_val, + QP_BIND_QSET_RESP_ERR_CODE_M, + QP_BIND_QSET_RESP_ERR_CODE_S); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (inject_err_code) { + err_code = inject_err_code; + DRV_RDMA_LOG_DEV_DEBUG( + "qos:inject qp bind qset errcode=%d\n", + err_code); + } +#endif + DRV_RDMA_LOG_DEV_ERR( + "qos:err code:%u err type is %s\n", + sxe2_qos_qp_bind_qset_err_code[err_code] + .err_code, + sxe2_qos_qp_bind_qset_err_code[err_code] + .err_mean); + if (err_code == BIND_QSET_NOT_BIND_THIS_FUNCTION || + err_code == BIND_QP_QSET_NOT_BIND_THIS_FUNCTION || + err_code == UNBIND_QSET_NOT_BIND_THIS_FUNCTION || + err_code == UNBIND_QP_QSET_NOT_BIND_THIS_FUNCTION) { + ret = sxe2_qos_bar_query_qset(dev, qset_id, + &func_id); + if (ret == SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos: qset id %u bind func id %u\n", + qset_id, func_id); + } + } + ret = -EINVAL; + resp_reg_val = WRITE_BIT(0, RELEASE_QSET_RESP_DONE_M, + RELEASE_QSET_RESP_DONE_S) | + WRITE_BIT(0, RELEASE_QSET_RESP_ERR_M, + RELEASE_QSET_RESP_ERR_S); + SXE2_BAR_WRITE_32(resp_reg_val, + dev->hw_regs[resp_reg_idx]); + break; + } + cond_resched(); + } while ((--pull) != 0); + + if (pull == 0) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:qp bind qset resp reg not done, reg val=0x%x\n", + resp_reg_val); + } + +end: + return ret; +} + +static bool sxe2_qos_qset_in_use(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri, + u8 qset_idx) +{ + if (!list_empty(&vsi->qos[user_pri].qset[qset_idx].qp_list) && + vsi->qos[user_pri].qset[qset_idx].qset_qp_cnt != 0) { + return true; + } + + return false; +} + +static int sxe2_qos_qset_add_qp(struct sxe2_rdma_ctx_dev *dev, u32 qpn, + u32 qset_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + ret = sxe2_qos_bar_qp_bind_qset(dev, qpn, qset_id, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:bar qp bind qset ret=%d\n", ret); + goto end; + } +end: + return ret; +} + +static int sxe2_qos_qset_rem_qp(struct sxe2_rdma_ctx_dev *dev, u32 qpn, + u32 qset_id) +{ + int ret = SXE2_OK; + + ret = sxe2_qos_bar_qp_bind_qset(dev, qpn, qset_id, false); + if (ret != SXE2_OK) + goto end; + +end: + return ret; +} + +u8 get_pf_num_by_bitmap(u8 qset_pf) +{ + u8 pf_num = 0; + + if (qset_pf == SXE2_RDMA_PF0) + pf_num = QOS_MAX_PORT_NUM_LAG_0; + else if (qset_pf == SXE2_RDMA_PF1) + pf_num = QOS_MAX_PORT_NUM_LAG_1; + + return pf_num; +} + +int sxe2_qos_lan_register_qsets(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = vsi->back_vsi; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct sxe2_rdma_qset *qset; + struct aux_rdma_qset_params qset_params = {}; + struct aux_rdma_multi_qset_params multi_qset_params = {}; + + if (vsi->lag_aa) { + if (!qset1 || !qset2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register multi qset is NULL, ret=%d\n", + ret); + goto end; + } + multi_qset_params.qset_id[QOS_QSET_IDX_0] = + qset1->qset_id; + multi_qset_params.qset_id[QOS_QSET_IDX_1] = + qset2->qset_id; + multi_qset_params.tc[QOS_QSET_IDX_0] = qset1->traffic_class; + multi_qset_params.tc[QOS_QSET_IDX_1] = qset2->traffic_class; + multi_qset_params.vport_id = qset2->vsi_index; + multi_qset_params.num = QOS_MAX_QSET_NUM_PER_USER_PRI; + multi_qset_params.user_pri = qset2->user_pri; + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->alloc_multi_res( + cdev_info, &multi_qset_params); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register multi qset nodes err ret=%d\n", + ret); + goto end; + } + qset1->teid = multi_qset_params + .teid[QOS_QSET_IDX_0]; + qset2->teid = multi_qset_params + .teid[QOS_QSET_IDX_1]; + vsi->qos[qset1->user_pri] + .teid[QOS_QSET_IDX_0] = qset1->teid; + vsi->qos[qset1->user_pri] + .teid[QOS_QSET_IDX_1] = qset2->teid; + qset1->pf_id = get_pf_num_by_bitmap( + multi_qset_params + .qset_port[QOS_QSET_IDX_0]); + qset2->pf_id = get_pf_num_by_bitmap( + multi_qset_params + .qset_port[QOS_QSET_IDX_1]); + } + goto end; + } + + qset = qset1; + qset_params.qset_id = qset->qset_id; + qset_params.tc[QOS_QSET_IDX_0] = qset->traffic_class; + if (vsi->lag_backup) { + qset_params.tc[QOS_QSET_IDX_1] = + cdev_info->qos_info[QOS_QSET_IDX_1].up2tc[qset->user_pri]; + } + qset_params.vport_id = qset->vsi_index; + qset_params.user_pri = qset->user_pri; + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->alloc_res(cdev_info, &qset_params); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan alloc qset node err ret=%d\n", ret); + goto end; + } + qset->teid = qset_params.teid; + qset->pf_id = get_pf_num_by_bitmap(qset_params.qset_port); + vsi->qos[qset->user_pri].teid[0] = qset->teid; + } + +end: + return ret; +} + +void sxe2_qos_lan_unregister_qsets(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = vsi->back_vsi; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct aux_rdma_qset_params qset_params = {}; + struct sxe2_rdma_qset *qset; + struct aux_rdma_multi_qset_params multi_qset_params = {}; + + if (qset1 && qset2) { + if (!vsi->lag_aa) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register multi qset is NULL, ret=%d\n", + ret); + goto end; + } + multi_qset_params.qset_id[QOS_QSET_IDX_0] = + qset1->qset_id; + multi_qset_params.qset_id[QOS_QSET_IDX_1] = + qset2->qset_id; + multi_qset_params.tc[QOS_QSET_IDX_0] = qset1->traffic_class; + multi_qset_params.tc[QOS_QSET_IDX_1] = qset2->traffic_class; + multi_qset_params.vport_id = qset2->vsi_index; + multi_qset_params.num = QOS_MAX_QSET_NUM_PER_USER_PRI; + multi_qset_params.user_pri = qset2->user_pri; + multi_qset_params.teid[QOS_QSET_IDX_0] = qset1->teid; + multi_qset_params.teid[QOS_QSET_IDX_1] = qset2->teid; + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->free_multi_res( + cdev_info, &multi_qset_params); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan free multi qset nodes err ret=%d\n", + ret); + } + } + goto end; + } + + qset = qset1; + qset_params.qset_id = qset->qset_id; + qset_params.tc[QOS_QSET_IDX_0] = qset->traffic_class; + if (vsi->lag_backup) { + qset_params.tc[QOS_QSET_IDX_1] = + cdev_info->qos_info[QOS_QSET_IDX_1].up2tc[qset->user_pri]; + } + qset_params.vport_id = vsi->vsi_idx; + qset_params.teid = qset->teid; + qset_params.user_pri = qset->user_pri; + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->free_res(cdev_info, &qset_params); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan unregister qset node err ret=%d\n", + ret); + } + } + +end: + return; +} + +int sxe2_qos_qset_bind_pf_tc(struct sxe2_rdma_ctx_vsi *vsi, u32 qset_id, u8 tc, + bool is_pf, u32 func_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct qos_qset_bind_tc_info info = {}; + + if (qset_id >= SXE2_FUNC_MAX_QSET_ID || tc >= QOS_MAX_TC) { + DRV_RDMA_LOG_DEV_ERR( + "qos:qet bind pf tc input parm err qset id=%u tc=%u", + qset_id, tc); + ret = -EINVAL; + goto end; + } + + if (rdma_dev->rdma_func->reset) { + ret = -EBUSY; + DRV_RDMA_LOG_DEV_INFO( + "reset is set, cdev ops will not be performed\n"); + goto end; + } + + memset(&info, 0, sizeof(info)); + info.func_id = cpu_to_le32(func_id); + info.is_pf = cpu_to_le32((u32)is_pf); + info.qset_id = cpu_to_le32(qset_id); + info.tc = cpu_to_le32((u32)tc); + + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_QET_BIND_TC, + (u8 *)&info, (u16)sizeof(info), + NULL, 0); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("qos:aq send qset bind tc err ret=%d\n", + ret); + goto end; + } + +end: + return ret; +} + +int sxe2_qos_qset_bind_pf_tc_bond(struct sxe2_rdma_ctx_vsi *vsi, u32 qset1_id, + u32 qset2_id, u8 tc_idx0, u8 tc_idx1, bool is_pf, + u32 qset1_pf_id, u32 qset2_pf_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + struct qos_qset_bind_tc_info info = {}; + + if ((qset1_id >= SXE2_FUNC_MAX_QSET_ID) || (tc_idx0 >= QOS_MAX_TC) || + (qset2_id >= SXE2_FUNC_MAX_QSET_ID) || (tc_idx1 >= QOS_MAX_TC)) { + DRV_RDMA_LOG_DEV_ERR( + "qos:qet bind pf tc input parm err qset1 id=%u, qset2 id=%u, tc1=%u, tc2=%u", + qset1_id, qset2_id, tc_idx0, tc_idx1); + ret = -EINVAL; + goto end; + } + + if (rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEV_INFO( + "reset is set, cdev ops will not be performed\n"); + goto end; + } + + memset(&info, 0, sizeof(info)); + info.func_id = cpu_to_le32(qset1_pf_id); + info.is_pf = cpu_to_le32((u32)is_pf); + info.qset_id = cpu_to_le32(qset1_id); + info.tc = cpu_to_le32((u32)tc_idx0); + info.tc_id2 = cpu_to_le32((u32)tc_idx1); + info.func_id2 = cpu_to_le32(qset2_pf_id); + info.qset_id2 = cpu_to_le32(qset2_id); + info.is_bond_aa = cpu_to_le32((u32)QOS_IS_BOND_AA); + + ret = sxe2_rdma_adminq_send(cdev_info, SXE2_CMD_RDMA_QET_BIND_TC, + (u8 *)&info, (u16)sizeof(info), + NULL, 0); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("qos:aq send qset bind tc err ret=%d\n", + ret); + goto end; + } + +end: + return ret; +} + +int sxe2_qos_register_qset(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_vchnl_manage_qet_node_info qset_info; + u8 qset_idx = 0; + u8 active_pf = 0; + u8 active_pf_tc = 0; + + mutex_lock(&vsi->qos[user_pri].qos_mutex); + if (vsi->qos[user_pri].valid) { + DRV_RDMA_LOG_DEV_DEBUG("qos:user pri is valid\n"); + goto end; + } + + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_qset, + rdma_func->max_qsets, + &vsi->qos[user_pri].qset[qset_idx].qset_num, + &rdma_func->next_qset); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:alloc qset num err\n"); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("qos:user pri %u tclass=%u alloc qset num=%u\n", + user_pri, vsi->qos[user_pri].qset[0].traffic_class, + vsi->qos[user_pri].qset[qset_idx].qset_num); + ret = sxe2_qos_bar_apply_qset( + dev, &vsi->qos[user_pri].qset[qset_idx].qset_id); + if (ret != SXE2_OK) + goto qset_apply_err; + vsi->qos[user_pri].qset[qset_idx].vsi_index = vsi->vsi_idx; + vsi->qos[user_pri].qset[qset_idx].user_pri = user_pri; + + if (dev->privileged) { + ret = vsi->register_qsets( + vsi, &vsi->qos[user_pri].qset[qset_idx], NULL); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register qset node err ret=%d\n", ret); + goto lan_register_err; + } + if (vsi->lag_backup) { + active_pf = vsi->qos[user_pri].qset[qset_idx].pf_id; + active_pf_tc = vsi->qos[user_pri].qset[active_pf].traffic_class; + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[user_pri].qset[qset_idx].qset_id, + active_pf_tc, true, + vsi->qos[user_pri].qset[qset_idx].pf_id); + } else { + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[user_pri].qset[qset_idx].qset_id, + vsi->qos[user_pri].qset[qset_idx].traffic_class, true, + rdma_dev->rdma_func->pf_id); + } + + if (ret != SXE2_OK) + goto qet_bind_tc_err; + } else { + qset_info.qset_id = vsi->qos[user_pri].qset[qset_idx].qset_id; + qset_info.user_pri = user_pri; + qset_info.add = true; + + ret = sxe2_vchnl_req_manage_qet_node(dev, &qset_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos: vf lan register qset node err ret=%d\n", + ret); + goto lan_register_err; + } + } + + vsi->qos[user_pri].qset[qset_idx].qset_qp_cnt = 0; + vsi->qos[user_pri].qset[qset_idx].register_flag = true; + vsi->qos[user_pri].qp_cnt = 0; + vsi->qos[user_pri].valid = true; + goto end; + +qet_bind_tc_err: + vsi->unregister_qsets(vsi, &vsi->qos[user_pri].qset[qset_idx], NULL); +lan_register_err: + sxe2_qos_bar_release_qset(dev, + vsi->qos[user_pri].qset[qset_idx].qset_id); +qset_apply_err: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[qset_idx].qset_num); +end: + mutex_unlock(&vsi->qos[user_pri].qos_mutex); + return ret; +} + +static void sxe2_qos_lag_setup_qset_node(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset_node, + bool first_node) +{ + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (first_node) { + if ((vsi->lag_port_bitmap & SXE2_RDMA_BOTH_PF) == + SXE2_RDMA_PF1) { + vsi->primary_port_migrated = true; + } else { + vsi->primary_port_migrated = false; + } + } else { + if ((vsi->lag_port_bitmap & SXE2_RDMA_BOTH_PF) == + SXE2_RDMA_PF0) { + vsi->secondary_port_migrated = true; + } else { + vsi->secondary_port_migrated = false; + } + } + DRV_RDMA_LOG_DEV_DEBUG( + "first_node %d, vsi->lag_port_bitmap %u, qset_node->active_port %u,\n" + "\tprimary_port_migrated%d, secondary_port_migrated %d\n", + first_node, vsi->lag_port_bitmap, qset_node->active_port, + vsi->primary_port_migrated, vsi->secondary_port_migrated); +} + +int sxe2_qos_register_qset_bond(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + u8 qset_idx = QOS_QSET_IDX_0; + + if (!dev->privileged) { + DRV_RDMA_LOG_DEV_ERR("qos:vf not support bond\n"); + ret = -EPERM; + goto end; + } + + mutex_lock(&vsi->qos[user_pri].qos_mutex); + if (vsi->qos[user_pri].valid) { + DRV_RDMA_LOG_DEV_DEBUG("qos:user pri is valid\n"); + goto end; + } + + for (qset_idx = QOS_QSET_IDX_0; + qset_idx < QOS_MAX_QSET_NUM_PER_USER_PRI; qset_idx++) { + ret = sxe2_kalloc_rsrc( + rdma_func, rdma_func->allocated_qset, + rdma_func->max_qsets, + &vsi->qos[user_pri].qset[qset_idx].qset_num, + &rdma_func->next_qset); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:alloc qset num err, qset_idx %d\n", + qset_idx); + qset_idx--; + goto lan_register_err; + } + DRV_RDMA_LOG_DEV_DEBUG( + "qos:qset_idx %d user pri %u tclass=%u alloc qset num=%u\n", + qset_idx, user_pri, vsi->qos[user_pri].qset[qset_idx].traffic_class, + vsi->qos[user_pri].qset[qset_idx].qset_num); + ret = sxe2_qos_bar_apply_qset( + dev, &vsi->qos[user_pri].qset[qset_idx].qset_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:bar reg qset apply err ret=%d, qset_idx %d\n", + ret, qset_idx); + sxe2_kfree_rsrc( + rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[qset_idx].qset_num); + qset_idx--; + goto lan_register_err; + } + vsi->qos[user_pri].qset[qset_idx].vsi_index = vsi->vsi_idx; + vsi->qos[user_pri].qset[qset_idx].user_pri = user_pri; + + mutex_lock(&vsi->dev->lag_mutex); + if (qset_idx == QOS_QSET_IDX_0) { + sxe2_qos_lag_setup_qset_node( + vsi, &vsi->qos[user_pri].qset[qset_idx], true); + } else { + sxe2_qos_lag_setup_qset_node( + vsi, &vsi->qos[user_pri].qset[qset_idx], false); + } + mutex_unlock(&vsi->dev->lag_mutex); + } + + ret = vsi->register_qsets( + vsi, &vsi->qos[user_pri].qset[QOS_QSET_IDX_0], + &vsi->qos[user_pri].qset[QOS_QSET_IDX_1]); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:lan register qset node err ret=%d\n", + ret); + goto lan_register_err; + } + + ret = sxe2_qos_qset_bind_pf_tc_bond( + vsi, vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_id, + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].qset_id, + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].traffic_class, + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].traffic_class, + true, vsi->qos[user_pri].qset[QOS_QSET_IDX_0].pf_id, + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].pf_id); + if (ret != SXE2_OK) + goto qet_bind_tc_err; + + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_qp_cnt = 0; + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].qset_qp_cnt = 0; + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].register_flag = true; + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].register_flag = true; + vsi->qos[user_pri].qp_cnt = 0; + vsi->qos[user_pri].valid = true; + + goto end; + +qet_bind_tc_err: + vsi->unregister_qsets( + vsi, &vsi->qos[user_pri].qset[QOS_QSET_IDX_0], + &vsi->qos[user_pri].qset[QOS_QSET_IDX_1]); +lan_register_err: + while ((qset_idx == QOS_QSET_IDX_0) && + (qset_idx == QOS_QSET_IDX_1)) { + sxe2_qos_bar_release_qset( + dev, vsi->qos[user_pri].qset[qset_idx].qset_id); + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[qset_idx].qset_num); + qset_idx--; + } +end: + mutex_unlock(&vsi->qos[user_pri].qos_mutex); + return ret; +} + +void sxe2_qos_unregister_qset(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_vchnl_manage_qet_node_info qset_info; + + mutex_lock(&vsi->qos[user_pri].qos_mutex); + if (!vsi->qos[user_pri].valid) + goto end; + + if (sxe2_qos_qset_in_use(vsi, user_pri, QOS_QSET_IDX_0)) + goto end; + + if (!vsi->qos[user_pri].qset[QOS_QSET_IDX_0].register_flag) { + DRV_RDMA_LOG_DEV_INFO( + "qos: do not register tx schedule success.\n"); + goto release_qset_bar; + } + if (dev->privileged) { + vsi->unregister_qsets( + vsi, + &vsi->qos[user_pri].qset[QOS_QSET_IDX_0], + NULL); + } else { + qset_info.qset_id = vsi->qos[user_pri] + .qset[QOS_QSET_IDX_0] + .qset_id; + qset_info.user_pri = user_pri; + qset_info.add = false; + + ret = sxe2_vchnl_req_manage_qet_node(dev, &qset_info); + } +release_qset_bar: + sxe2_qos_bar_release_qset( + dev, + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_id); + sxe2_kfree_rsrc( + rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_num); + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].register_flag = + false; + vsi->qos[user_pri].valid = false; + +end: + mutex_unlock(&vsi->qos[user_pri].qos_mutex); +} + +void sxe2_qos_unregister_qset_bond(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri) +{ + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + mutex_lock(&vsi->qos[user_pri].qos_mutex); + + if (!vsi->qos[user_pri].valid) + goto end; + + if (sxe2_qos_qset_in_use(vsi, user_pri, QOS_QSET_IDX_0) || + sxe2_qos_qset_in_use(vsi, user_pri, QOS_QSET_IDX_1)) { + goto end; + } + + if (!vsi->qos[user_pri].qp_cnt == 0) { + DRV_RDMA_LOG_DEV_ERR("qos: qp cnt %u\n", + vsi->qos[user_pri].qp_cnt); + goto end; + } + vsi->unregister_qsets( + vsi, &vsi->qos[user_pri].qset[QOS_QSET_IDX_0], + &vsi->qos[user_pri].qset[QOS_QSET_IDX_1]); + sxe2_qos_bar_release_qset( + dev, + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_id); + sxe2_qos_bar_release_qset( + dev, + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].qset_id); + sxe2_kfree_rsrc( + rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_num); + sxe2_kfree_rsrc( + rdma_func, rdma_func->allocated_qset, + vsi->qos[user_pri].qset[QOS_QSET_IDX_1].qset_num); + vsi->qos[user_pri].valid = false; + +end: + mutex_unlock(&vsi->qos[user_pri].qos_mutex); +} + +int sxe2_qos_query_qset_bind_func(struct sxe2_rdma_ctx_vsi *vsi, u32 qset_id, + u32 *func_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + ret = sxe2_qos_bar_query_qset(dev, qset_id, func_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:bar query qset ret=%d\n", ret); + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG("qos:qset %u bind function %u ret=%d\n", qset_id, + *func_id, ret); + +end: + return ret; +} + +int sxe2_qos_qp_add_qos(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u8 qset_idx = 0; + + if (qp->on_qoslist) { + DRV_RDMA_LOG_DEV_WARN("qos:qp already in qp list\n"); + goto end; + } + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (!vsi->qos[qp->user_pri].valid) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qos:qp add qos qset[%u] is invalid\n", + qset_idx); + goto unlock; + } + + if (!vsi->qos[qp->user_pri].qset[qset_idx].register_flag) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "qos:qp add qos qset[%u] register_flag is invalid\n", + qset_idx); + goto unlock; + } + + DRV_RDMA_LOG_DEV_DEBUG("qos:qp %u add user pri %u qset id=%u\n", + qp->qp_common.qpn, qp->user_pri, + vsi->qos[qp->user_pri].qset[qset_idx].qset_id); + ret = sxe2_qos_qset_add_qp( + dev, qp->qp_common.qpn, + vsi->qos[qp->user_pri].qset[qset_idx].qset_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:qset add qp err ret=%d\n", ret); + goto unlock; + } + vsi->qos[qp->user_pri].qset[qset_idx].qset_qp_cnt++; + list_add(&qp->list, &vsi->qos[qp->user_pri].qset[qset_idx].qp_list); + qp->on_qoslist = true; + vsi->qos[qp->user_pri].qp_cnt++; + qp->qset_idx = QOS_QSET_IDX_0; + +unlock: + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +end: + return ret; +} + +int sxe2_qos_qp_add_qos_bond(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (qp->on_qoslist) { + DRV_RDMA_LOG_DEV_WARN("qos:qp already in qp list\n"); + goto end; + } + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + qp->qset_idx ? atomic_inc(&vsi->port2_qp_cnt) : + atomic_inc(&vsi->port1_qp_cnt); + + DRV_RDMA_LOG_DEV_DEBUG( + "qos:qp %u, add user pri %u, qset_idx %u, qset id=%u\n", + qp->qp_common.qpn, qp->user_pri, qp->qset_idx, + vsi->qos[qp->user_pri].qset[qp->qset_idx].qset_id); + ret = sxe2_qos_qset_add_qp( + dev, qp->qp_common.qpn, + vsi->qos[qp->user_pri].qset[qp->qset_idx].qset_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:qset add qp err ret=%d\n", ret); + goto unlock; + } + vsi->qos[qp->user_pri].qset[qp->qset_idx].qset_qp_cnt++; + list_add(&qp->list, &vsi->qos[qp->user_pri].qset[qp->qset_idx].qp_list); + qp->on_qoslist = true; + vsi->qos[qp->user_pri].qp_cnt++; + +unlock: + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +end: + return ret; +} + +int sxe2_qos_qp_rem_qos(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u8 qset_idx = (qp->qset_idx) ? QOS_QSET_IDX_1 : + QOS_QSET_IDX_0; + + DRV_RDMA_LOG_DEV_DEBUG("qos:qp remove user pri=%u qpn=%u qset idx=%u\n", + qp->user_pri, qp->qp_common.qpn, qset_idx); + if (!qp->on_qoslist) { + DRV_RDMA_LOG_DEV_WARN("qos:qp not in qp list\n"); + goto end; + } + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (!vsi->qos[qp->user_pri].valid) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qos:qp rem qos qset[%u] is invalid\n", + qset_idx); + goto unlock; + } + + ret = sxe2_qos_qset_rem_qp( + dev, qp->qp_common.qpn, + vsi->qos[qp->user_pri].qset[qset_idx].qset_id); + + vsi->qos[qp->user_pri].qset[qset_idx].qset_qp_cnt--; + list_del(&qp->list); + qp->on_qoslist = false; + vsi->qos[qp->user_pri].qp_cnt--; + + if (vsi->lag_aa) { + qp->qset_idx ? atomic_dec(&vsi->port2_qp_cnt) : + atomic_dec(&vsi->port1_qp_cnt); + } + +unlock: + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +end: + return ret; +} + +void sxe2_qos_remove_all_qset(struct sxe2_rdma_ctx_vsi *vsi) +{ + u8 i; + + for (i = 0; i < SXE2_MAX_UESER_PRIORITY; i++) { + if (vsi->lag_aa) + sxe2_qos_unregister_qset(vsi, i); + else + sxe2_qos_unregister_qset(vsi, i); + } +} + +static int sxe2_qos_qset_rebind_pf_tc(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_vsi *vsi, + u8 qset_idx, u8 pf_idx) +{ + int ret = 0; + u8 user_pri = 0; + + for (user_pri = 0; user_pri < SXE2_MAX_USER_PRIORITY; user_pri++) { + if (!vsi->qos[user_pri].valid) + continue; + + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[user_pri].qset[qset_idx].qset_id, + vsi->qos[user_pri].qset[qset_idx].traffic_class, true, + pf_idx); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("qset bind tc err, ret %d\n", ret); + goto end; + } + } + +end: + return ret; +} + +static int sxe2_qos_qset_node_move(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_vsi *vsi) +{ + int ret = 0; + u8 qset_idx = 0; + u8 pf_idx = 0; + + mutex_lock(&vsi->dev->lag_mutex); + + DRV_RDMA_LOG_DEV_DEBUG( + "qset node move, lag_port_bitmap %d, primary_port_migrated %d,\n" + "\tsecondary_port_migrated %d.\n", + vsi->lag_port_bitmap, vsi->primary_port_migrated, + vsi->secondary_port_migrated); + + if ((vsi->lag_port_bitmap & SXE2_RDMA_BOTH_PF) == SXE2_RDMA_BOTH_PF) { + if (vsi->primary_port_migrated == true) { + vsi->primary_port_migrated = false; + qset_idx = QOS_QSET_IDX_0; + pf_idx = QOS_MAX_PORT_NUM_LAG_0; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + + else if (vsi->secondary_port_migrated == true) { + vsi->secondary_port_migrated = false; + qset_idx = QOS_QSET_IDX_1; + pf_idx = QOS_MAX_PORT_NUM_LAG_1; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + } else if (vsi->lag_port_bitmap == SXE2_RDMA_PF1) { + if (vsi->primary_port_migrated == false) { + vsi->primary_port_migrated = true; + qset_idx = QOS_QSET_IDX_0; + pf_idx = QOS_MAX_PORT_NUM_LAG_1; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + + if (vsi->secondary_port_migrated == true) { + vsi->secondary_port_migrated = false; + qset_idx = QOS_QSET_IDX_1; + pf_idx = QOS_MAX_PORT_NUM_LAG_1; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + } else if (vsi->lag_port_bitmap == SXE2_RDMA_PF0) { + if (vsi->primary_port_migrated == true) { + vsi->primary_port_migrated = false; + qset_idx = QOS_QSET_IDX_0; + pf_idx = QOS_MAX_PORT_NUM_LAG_0; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + + if (vsi->secondary_port_migrated == false) { + vsi->secondary_port_migrated = true; + qset_idx = QOS_QSET_IDX_1; + pf_idx = QOS_MAX_PORT_NUM_LAG_0; + ret = sxe2_qos_qset_rebind_pf_tc(rdma_dev, vsi, + qset_idx, pf_idx); + } + } + + mutex_unlock(&vsi->dev->lag_mutex); + return ret; +} + +static int sxe2_qos_qset_node_failover(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_ctx_vsi *vsi) +{ + int ret = 0; + u8 user_pri = 0; + int active_port = 0; + u8 dest_tc = 0; + struct aux_core_dev_info *cdev_info = rdma_dev->rdma_func->cdev; + + if (vsi->lag_port_bitmap == SXE2_RDMA_PF0) { + active_port = QOS_MAX_PORT_NUM_LAG_0; + } else if (vsi->lag_port_bitmap == SXE2_RDMA_PF1) { + active_port = QOS_MAX_PORT_NUM_LAG_1; + } else { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("qos: lag_port_bitmap %d, ret=%d\n", + vsi->lag_port_bitmap, ret); + } + + for (user_pri = 0; user_pri < SXE2_MAX_USER_PRIORITY; user_pri++) { + if (!vsi->qos[user_pri].valid) + continue; + dest_tc = cdev_info->qos_info[active_port].up2tc[user_pri]; + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[user_pri].qset[QOS_QSET_IDX_0].qset_id, + dest_tc, true, active_port); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("qos:qset bind pf tc err ret=%d\n", + ret); + goto end; + } + } + +end: + return ret; +} + +void sxe2_rdma_qos_failover_complete(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + + if (rdma_dev->vsi.lag_aa) { + ret = sxe2_qos_qset_node_move(rdma_dev, &rdma_dev->vsi); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lag_aa move qset err ret=%d\n", ret); + } + } else { + ret = sxe2_qos_qset_node_failover(rdma_dev, &rdma_dev->vsi); + if (ret) + DRV_RDMA_LOG_DEV_ERR("qos:move qset err ret=%d\n", ret); + } +} + +void sxe2_rdma_update_qos_info(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2p) +{ + u8 i; + u8 index; + + for (index = 0; index < QOS_MAX_QSET_NUM_PER_USER_PRI; index++) { + vsi->qos_rel_bw[index] = l2p[index].vsi_rel_bw; + vsi->qos_prio_type[index] = l2p[index].vsi_prio_type; + vsi->dscp_mode[index] = l2p[index].dscp_mode; + if (l2p[index].dscp_mode) + memcpy(vsi->dscp_map[index], + l2p[index].dscp_map, sizeof(vsi->dscp_map[index])); + + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + vsi->qos[i].qset[index].traffic_class = l2p[index].up2tc[i]; + vsi->qos[i].rel_bw[index] = + l2p[index].tc_info[vsi->qos[i].qset[index].traffic_class].rel_bw; + vsi->qos[i].prio_type[index] = + l2p[index].tc_info[vsi->qos[i].qset[index].traffic_class].prio_type; + } + } + +} + +void sxe2_rdma_qos_remove_qsets(struct sxe2_rdma_ctx_vsi *vsi, bool *change_list, + struct sxe2_rdma_l2params *l2params) +{ + u8 i; + bool up2tc_changed; + + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + if (vsi->lag_aa) + up2tc_changed = + (vsi->qos[i].qset[0].traffic_class != l2params[0].up2tc[i]) || + (vsi->qos[i].qset[1].traffic_class != l2params[1].up2tc[i]); + else + up2tc_changed = + vsi->qos[i].qset[0].traffic_class != l2params[0].up2tc[i]; + + mutex_lock(&vsi->qos[i].qos_mutex); + if (!vsi->qos[i].valid || !up2tc_changed) { + mutex_unlock(&vsi->qos[i].qos_mutex); + change_list[i] = false; + continue; + } + change_list[i] = true; + if (vsi->qos[i].valid) { + if (vsi->lag_aa) + vsi->unregister_qsets( + vsi, &vsi->qos[i].qset[QOS_QSET_IDX_0], + &vsi->qos[i].qset[QOS_QSET_IDX_1]); + else + vsi->unregister_qsets(vsi, &vsi->qos[i].qset[0], NULL); + } + mutex_unlock(&vsi->qos[i].qos_mutex); + } +} + +void sxe2_rdma_qos_add_qsets(struct sxe2_rdma_ctx_vsi *vsi, bool *change_list, + struct sxe2_rdma_l2params *l2params) +{ + u8 i; + int ret; + struct sxe2_rdma_ctx_dev *dev = vsi->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u8 active_pf = 0; + u8 active_pf_tc = 0; + + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + if (!change_list[i]) + continue; + + mutex_lock(&vsi->qos[i].qos_mutex); + if (vsi->lag_aa) { + ret = vsi->register_qsets(vsi, &vsi->qos[i].qset[QOS_QSET_IDX_0], + &vsi->qos[i].qset[QOS_QSET_IDX_1]); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register qset node err ret=%d\n", ret); + vsi->qos[i].qset[QOS_QSET_IDX_0].register_flag = false; + vsi->qos[i].qset[QOS_QSET_IDX_1].register_flag = false; + mutex_unlock(&vsi->qos[i].qos_mutex); + continue; + } + ret = sxe2_qos_qset_bind_pf_tc_bond( + vsi, vsi->qos[i].qset[QOS_QSET_IDX_0].qset_id, + vsi->qos[i].qset[QOS_QSET_IDX_1].qset_id, + vsi->qos[i].qset[QOS_QSET_IDX_0].traffic_class, + vsi->qos[i].qset[QOS_QSET_IDX_1].traffic_class, + true, vsi->qos[i].qset[QOS_QSET_IDX_0].pf_id, + vsi->qos[i].qset[QOS_QSET_IDX_1].pf_id); + if (ret != SXE2_OK) { + vsi->qos[i].qset[QOS_QSET_IDX_0].register_flag = false; + vsi->qos[i].qset[QOS_QSET_IDX_1].register_flag = false; + } + } else { + ret = vsi->register_qsets(vsi, &vsi->qos[i].qset[0], + NULL); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:lan register qset node err ret=%d\n", + ret); + vsi->qos[i].qset[0].register_flag = false; + mutex_unlock(&vsi->qos[i].qos_mutex); + continue; + } + if (vsi->lag_backup) { + active_pf = vsi->qos[i].qset[0].pf_id; + active_pf_tc = vsi->qos[i].qset[active_pf].traffic_class; + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[i].qset[0].qset_id, + active_pf_tc, + vsi->dev->privileged, + vsi->qos[i].qset[0].pf_id); + } else { + ret = sxe2_qos_qset_bind_pf_tc( + vsi, vsi->qos[i].qset[0].qset_id, + vsi->qos[i].qset[0].traffic_class, + vsi->dev->privileged, + rdma_dev->rdma_func->pf_id); + } + + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "qos:qset bind pf(%u) tc(%u) err ret=%d\n", + rdma_dev->rdma_func->pf_id, + vsi->qos[i].qset[0].traffic_class, ret); + vsi->qos[i].qset[0].register_flag = false; + } + } + mutex_unlock(&vsi->qos[i].qos_mutex); + } +} + +void sxe2_rdma_qos_move_qset(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2params) +{ + bool change_list[SXE2_MAX_USER_PRIORITY] = {}; + + sxe2_rdma_qos_remove_qsets(vsi, change_list, l2params); + + sxe2_rdma_update_qos_info(vsi, l2params); + + sxe2_rdma_qos_add_qsets(vsi, change_list, l2params); +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.h new file mode 100644 index 0000000000000000000000000000000000000000..8214afdd0977bc4a507eff95ee11c7f93a52e0d7 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_qos.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_qos.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_RDMA_QOS_H +#define SXE2_DRV_RDMA_QOS_H + +#include "sxe2_drv_rdma_common.h" + +#define READ_BIT(val, mask, shift) ((u32)(((val) & (mask)) >> (shift))) +#define WRITE_BIT(val, mask, shift) (((val) << (shift)) & (mask)) + +#define SXE2_OK 0 +#define SXE2_FUNC_MAX_QSET_ID 512 +#define SXE2_FUNC_MAX_QPN 262144 +#define QOS_APPLY_QSET_REQ_PULL 10 +#define QOS_QUERY_QSET_REQ_PULL 10 +#define QOS_RELEASE_QSET_REQ_PULL 10 +#define QOS_QP_BIND_QSET_REQ_PULL 10 +#define QOS_QP_BIND_QSET_CMD 1 +#define QOS_QP_UNBIND_QSET_CMD 0 +#define QOS_QUERY_QSET_INVALID_FUNC_ID 0xFFFFFFFF +#define QOS_MAX_TC 8 +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +#define QOS_BAR_APPLY_INJECT_VAL 0x600 +#define QOS_BAR_RELEASE_INJECT_VAL 0x3 +#define QOS_BAR_QP_BIND_INJECT_VAL 0x3 +#endif + +#define QOS_QSET_IDX_0 0 +#define QOS_QSET_IDX_1 1 +#define QOS_MAX_QSET_NUM_PER_USER_PRI 2 +#define QOS_IS_BOND_AA 1 +#define QOS_MAX_PORT_NUM_LAG_0 0 +#define QOS_MAX_PORT_NUM_LAG_1 1 + +#define APPLY_QSET_REQ_REG_VLD_S 0 +#define APPLY_QSET_REQ_REG_VLD_M BIT(0) +#define APPLY_QSET_RESP_DONE_S 9 +#define APPLY_QSET_RESP_DONE_M BIT(9) + +#define APPLY_QSET_RESP_ERR_S 10 +#define APPLY_QSET_RESP_ERR_M BIT(10) + +#define APPLY_QSET_RESP_QSET_ID_S 0 +#define APPLY_QSET_RESP_QSET_ID_M GENMASK(8, 0) + +#define APPLY_QSET_RESP_ERR_CODE_S 2 +#define APPLY_QSET_RESP_ERR_CODE_M GENMASK(12, 11) +#define QUERY_QSET_REQ_QSET_ID_S 0 +#define QUERY_QSET_REQ_QSET_ID_M GENMASK(8, 0) + +#define QUERY_QSET_REQ_VLD_S 9 +#define QUERY_QSET_REQ_VLD_M BIT(9) +#define QUERY_QSET_RESP_FUNC_ID_S 0 +#define QUERY_QSET_RESP_FUNC_ID_M GENMASK(5, 0) + +#define QUERY_QSET_RESP_DONE_S 6 +#define QUERY_QSET_RESP_DONE_M BIT(6) + +#define QUERY_QSET_RESP_ERR_S 7 +#define QUERY_QSET_RESP_ERR_M BIT(7) +#define RELEASE_QSET_REQ_QSET_ID_S 0 +#define RELEASE_QSET_REQ_QSET_ID_M GENMASK(8, 0) + +#define RELEASE_QSET_REQ_VLD_S 9 +#define RELEASE_QSET_REQ_VLD_M BIT(9) +#define RELEASE_QSET_RESP_DONE_S 0 +#define RELEASE_QSET_RESP_DONE_M BIT(0) + +#define RELEASE_QSET_RESP_ERR_S 1 +#define RELEASE_QSET_RESP_ERR_M BIT(1) + +#define RELEASE_QSET_RESP_ERR_CODE_S 2 +#define RELEASE_QSET_RESP_ERR_CODE_M GENMASK(3, 2) +#define QP_BIND_QSET_REQ_QPN_S 0 +#define QP_BIND_QSET_REQ_QPN_M GENMASK(17, 0) + +#define QP_BIND_QSET_REQ_QSET_ID_S 18 +#define QP_BIND_QSET_REQ_QSET_ID_M GENMASK(26, 18) + +#define QP_BIND_QSET_REQ_CMD_S 27 +#define QP_BIND_QSET_REQ_CMD_M BIT(27) + +#define QP_BIND_QSET_REQ_VLD_S 28 +#define QP_BIND_QSET_REQ_VLD_M BIT(28) +#define QP_BIND_QSET_RESP_DONE_S 0 +#define QP_BIND_QSET_RESP_DONE_M BIT(0) + +#define QP_BIND_QSET_RESP_ERR_S 1 +#define QP_BIND_QSET_RESP_ERR_M BIT(1) + +#define QP_BIND_QSET_RESP_ERR_CODE_S 2 +#define QP_BIND_QSET_RESP_ERR_CODE_M GENMASK(5, 2) + +#define QSET_INVALID_CODE 0xFF +#define QSET_MAX_IDX 1 + +enum qos_apply_qset_err_code { + NO_FREE_QSET = 1, + FUNCTION_EXCEED_MAX_LIMIT_QSET = 2, +}; + +enum qos_release_qset_err_code { + QSET_NOT_ALLOCED = 1ul, + QSET_FUNC_NOT_MATCH = 1ul << 1, +}; + +struct qos_qset_bind_tc_info { + u32 func_id; + u32 is_pf; + u32 qset_id; + u32 tc; + u32 func_id2; + u32 qset_id2; + u32 tc_id2; + u32 is_bond_aa; +}; + +enum qos_qp_bind_qset_err_code { + BIND_QP_NOT_BIND_THIS_FUNCTION = 1, + BIND_QSET_NOT_BIND_THIS_FUNCTION = 2, + BIND_QP_QSET_NOT_BIND_THIS_FUNCTION = 3, + + UNBIND_QP_NOT_BIND_THIS_FUNCTION = 4, + UNBIND_QSET_NOT_BIND_THIS_FUNCTION = 5, + UNBIND_QP_QSET_NOT_BIND_THIS_FUNCTION = 6, + UNBIND_QP_UNBIND = 7, + UNBIND_QP_NOT_BIND_THIS_QSET = 8, +}; + +struct sxe2_err_code { + char *err_mean; + u8 err_code; +}; + +int sxe2_qos_lan_register_qsets(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + +void sxe2_qos_lan_unregister_qsets(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + +int sxe2_qos_register_qset(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri); + +int sxe2_qos_register_qset_bond(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri); + +void sxe2_qos_unregister_qset(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri); + +void sxe2_qos_unregister_qset_bond(struct sxe2_rdma_ctx_vsi *vsi, u8 user_pri); + +int sxe2_qos_query_qset_bind_func(struct sxe2_rdma_ctx_vsi *vsi, u32 qset_id, + u32 *func_id); + +int sxe2_qos_qp_add_qos(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp); + +int sxe2_qos_qp_add_qos_bond(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp); + +int sxe2_qos_qp_rem_qos(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_ctx_qp *qp); + +void sxe2_qos_remove_all_qset(struct sxe2_rdma_ctx_vsi *vsi); + +int sxe2_qos_qset_bind_pf_tc(struct sxe2_rdma_ctx_vsi *vsi, u32 qset_id, u8 tc, + bool is_pf, u32 func_id); + +int sxe2_qos_qset_bind_pf_tc_bond(struct sxe2_rdma_ctx_vsi *vsi, u32 qset1_id, + u32 qset2_id, u8 tc_idx0, u8 tc_idx1, bool is_pf, + u32 qset1_pf_id, u32 qset2_pf_id); + +void sxe2_rdma_qos_failover_complete(struct sxe2_rdma_device *rdma_dev); + +void sxe2_rdma_update_qos_info(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2p); +void sxe2_rdma_qos_move_qset(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_rdma_l2params *l2params); +u8 get_pf_num_by_bitmap(u8 qset_pf); + +void sxe2_rdma_qos_remove_qsets(struct sxe2_rdma_ctx_vsi *vsi, bool *change_list, + struct sxe2_rdma_l2params *l2params); + +void sxe2_rdma_qos_add_qsets(struct sxe2_rdma_ctx_vsi *vsi, bool *change_list, + struct sxe2_rdma_l2params *l2params); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.c new file mode 100644 index 0000000000000000000000000000000000000000..c746f3ca24af1ed8c45f7905cd09ee52c81bbb9b --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.c @@ -0,0 +1,2358 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_rcms.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_rcms_debugfs.h" +#include +#include +#include + +extern struct mutex func_lock; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +u32 g_inject_fpte_err; +#endif + +struct sxe2_rcms_rsrc_limits g_rsrc_limits_table[] = { + [0] = { + .qp_limit = 0x80, + }, + [1] = { + .qp_limit = 0x400, + }, + [2] = { + .qp_limit = 0x800, + }, + [3] = { + .qp_limit = 0x1000, + }, + [4] = { + .qp_limit = 0x4000, + }, + [5] = { + .qp_limit = 0x10000, + }, + [6] = { + .qp_limit = 0x20000, + }, + [7] = { + .qp_limit = 0x40000, + }, +}; + +int sxe2_rcms_query_fpm_val_cmd(struct sxe2_mq_ctx *mq, u64 scratch, + u16 rcms_fn_id, + struct sxe2_rdma_dma_mem *query_fpm_mem, + bool post_sq, u8 wait_type) +{ + int ret = SXE2_OK; + __le64 *wqe; + + struct sxe2_rcms_query_fpm_value_wqe *query_fpm_wqe; + struct sxe2_rdma_ctx_dev *dev = mq->dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 tail, val, error; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_QUERY_FPM_VAL]) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + MQ_OP_QUERY_FPM_VAL); + ret = -EBUSY; + goto end; + } +#endif + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR("RCMS: query fpm kget next mq wqe err\n"); + ret = -ENOMEM; + goto end; + } + + query_fpm_wqe = (struct sxe2_rcms_query_fpm_value_wqe *)wqe; + query_fpm_wqe->rcms_fuction_id = rcms_fn_id; + query_fpm_wqe->query_buf_pa = + query_fpm_mem->pa >> + SXE2_RCMS_QUERY_BUF_PA_SHIFT; + query_fpm_wqe->op = SXE2_MQ_OP_QUERY_FPM_VAL; + dma_wmb(); + query_fpm_wqe->wqe_valid = mq->polarity; + DRV_RDMA_LOG_DEV_DEBUG("RCMS: query buf pa=%#llx\n", query_fpm_mem->pa); + sxe2_kget_mq_reg_info(mq, &val, &tail, &error); + + if (post_sq) { + sxe2_kpost_mq(mq); + if (wait_type == SXE2_MQ_WAIT_POLL_REGS) { + ret = sxe2_kpoll_mq_registers( + mq, tail, mq->dev->hw_attrs.max_done_count); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: query fpm kpoll mq reg err ret=%d\n", + ret); + } + } else if (wait_type == SXE2_MQ_WAIT_POLL_CQ) { + ret = sxe2_kpoll_mcq(mq, SXE2_MQ_OP_QUERY_FPM_VAL, + NULL); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: query fpm kpoll mcq reg err ret=%d\n", + ret); + } + } + } + +end: + return ret; +} + +static int +sxe2_rcms_query_fpm_val_cmd_complete(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_dma_mem *query_fpm_mem, + u16 rcms_fn_id) +{ + int ret = SXE2_OK; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!query_fpm_mem) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:query fpm complete buffer pointer is null\n"); + ret = -EINVAL; + goto end; + } + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_request->param = NULL; + mq_info->in.u.query_fpm_val.mq = dev->mq; + mq_info->in.u.query_fpm_val.fpm_val_pa = query_fpm_mem->pa; + mq_info->in.u.query_fpm_val.fpm_val_va = query_fpm_mem->va; + mq_info->in.u.query_fpm_val.rcms_fn_id = rcms_fn_id; + mq_info->mq_cmd = MQ_OP_QUERY_FPM_VAL; + mq_info->post_mq = 1; + mq_info->in.u.query_fpm_val.scratch = (uintptr_t)mq_request; + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret; +} + +static void sxe2_rcms_parse_fpm_query_buf(struct sxe2_rdma_ctx_dev *dev, + __le32 *query_buf, + struct sxe2_rcms_info *rcms_info) +{ + struct sxe2_rcms_obj_info *obj_info; + u32 i; + u32 temp; + u32 start_byte = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!query_buf || !rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:func id %u parse fpm query pointer is null buf add=%p rcms_info=%p\n", + rcms_info->rcms_fn_id, query_buf, rcms_info); + goto end; + } + + obj_info = rcms_info->rcms_obj; + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + get_32bit_val(query_buf, start_byte, &temp); + if (i == SXE2_RCMS_OBJ_ACK_TIMEOUT) { + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:func id %u AMO entry size=%u\n", + rcms_info->rcms_fn_id, temp); + temp = SXE2_RCMS_AMO_PAGE_SIZE; + } + obj_info[i].size = 1 << temp; + start_byte += SXE2_RCMS_PARSE_QUERY_BUF_SHIFT; + get_32bit_val(query_buf, start_byte, &obj_info[i].max_cnt); + start_byte += SXE2_RCMS_PARSE_QUERY_BUF_SHIFT; + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:func id %u obj %d size=%u max_cnt=%u\n", + rcms_info->rcms_fn_id, i, obj_info[i].size, + obj_info[i].max_cnt); + } + get_32bit_val(query_buf, SXE2_RCMS_MAX_FPTE_CNT_BUF_OFFSET, &temp); + rcms_info->max_fpte_cnt = temp & SXE2_RCMS_MAX_FPTE_CNT_MASK; + rcms_info->max_cc_qp_cnt = + FIELD_GET(SXE2_RCMS_MAX_CC_QP_CNT_MASK, temp); + get_32bit_val(query_buf, SXE2_RCMS_FIRST_FPTE_IDX_BUF_OFFSET, &temp); + rcms_info->first_fpte_index = temp & SXE2_RCMS_FIRST_FPTE_IDX_MASK; + rcms_info->irrl_ost_num = FIELD_GET(SXE2_RCMS_IRRL_OST_NUM_MASK, temp); + rcms_info->ssnt_ost_num = FIELD_GET(SXE2_RCMS_SSNT_OST_NUM_MASK, temp); + rcms_info->resp_ost_num = FIELD_GET(SXE2_RCMS_RESP_OST_NUM_MASK, temp); + get_32bit_val(query_buf, SXE2_RCMS_CEQS_DB_BUF_OFFSET, &temp); + rcms_info->max_ceqs = temp & SXE2_RCMS_MAX_CEQS_MASK; + rcms_info->max_db_page_num = + FIELD_GET(SXE2_RCMS_MAX_DB_PAGE_NUM_MASK, temp); + get_32bit_val(query_buf, SXE2_RCMS_DB_BAR_ADDR_BUF_OFFSET, + &rcms_info->db_bar_addr); + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:func id %u max fpte cnt=%u,\n" + "\tfirst fpte idx=%u ceqs = %u db page=%u db addr=0x%x\n", + rcms_info->rcms_fn_id, rcms_info->max_fpte_cnt, + rcms_info->first_fpte_index, rcms_info->max_ceqs, + rcms_info->max_db_page_num, rcms_info->db_bar_addr); + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:func id %u max_cc_qp_cnt=%u, irrl_ost_num=%u\n" + "\tssnt_ost_num = %u resp_ost_num=%u\n", + rcms_info->rcms_fn_id, rcms_info->max_cc_qp_cnt, + rcms_info->irrl_ost_num, rcms_info->ssnt_ost_num, + rcms_info->resp_ost_num); +end: + return; +} + +static int sxe2_rcms_pf_query_fpm_val(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + struct sxe2_rcms_info *rcms_info; + struct sxe2_rdma_dma_mem query_fpm_mem; + u8 wait_type; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: pf query fpm val rcms info pointer is err\n"); + ret = -EINVAL; + goto end; + } + rcms_info = dev->rcms_info; + + if (rcms_info->rcms_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id) { + DRV_RDMA_LOG_DEV_ERR("RCMS: input function id err func id=%u\n", + rcms_info->rcms_fn_id); + ret = -EINVAL; + goto end; + } + + query_fpm_mem.pa = dev->fpm_query_buf_pa; + query_fpm_mem.va = dev->fpm_query_buf; + wait_type = SXE2_MQ_WAIT_POLL_REGS; + + ret = sxe2_rcms_query_fpm_val_cmd(dev->mq, SXE2_RCMS_MQ_CMD_NO_SCRATCH, + rcms_info->rcms_fn_id, &query_fpm_mem, + true, wait_type); + if (ret != SXE2_OK) + goto end; + + sxe2_rcms_parse_fpm_query_buf(dev, (__le32 *)query_fpm_mem.va, + rcms_info); +end: + return ret; +} + +static u32 sxe2_rcms_est_fpt_entry_cnt(struct sxe2_rcms_info *rcms_info) +{ + u64 size = 0; + u32 fpte_cnt; + u32 i; + + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + if (i != SXE2_RCMS_OBJ_PBLE) { + size += round_up(rcms_info->rcms_obj[i].cnt * + rcms_info->rcms_obj[i].size, + rcms_info->rcms_obj[i + 1].size); + } else { + size += rcms_info->rcms_obj[i].cnt * + rcms_info->rcms_obj[i].size; + } + } + + if (size & SXE2_FPTE_RCMS_2MB_MASK) + fpte_cnt = (u32)(FPT_INDEX_GET(size) + 1); + else + fpte_cnt = (u32)FPT_INDEX_GET(size); + + if (fpte_cnt > SXE2_MAX_FPTE_CNT) + fpte_cnt = SXE2_MAX_FPTE_CNT; + + return fpte_cnt; +} + +static int sxe2_rcms_loop_modify_fpm_val(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rcms_info *rcms_info, + u32 qp_limit_count, u8 rcms_profile) +{ + u32 max_fpte; + u32 qp_wanted = 0; + u32 mr_wanted, pble_wanted; + u32 powerof2; + u32 fpte_needed; + u32 fpte_diff; + u32 loop_count = 0; + struct sxe2_rcms_obj_info *rcms_obj = rcms_info->rcms_obj; + int ret = SXE2_OK; + + max_fpte = rcms_info->max_fpte_cnt; + fpte_needed = sxe2_rcms_est_fpt_entry_cnt(rcms_info); + qp_wanted = min(qp_limit_count, rcms_obj[SXE2_RCMS_OBJ_QP].max_cnt); + mr_wanted = rcms_obj[SXE2_RCMS_OBJ_MR].max_cnt; + pble_wanted = rcms_obj[SXE2_RCMS_OBJ_PBLE].max_cnt; + + do { + ++loop_count; + rcms_obj[SXE2_RCMS_OBJ_QP].cnt = qp_wanted; + rcms_obj[SXE2_RCMS_OBJ_ACK_TIMEOUT].cnt = + min(qp_wanted, rcms_obj[SXE2_RCMS_OBJ_ACK_TIMEOUT].cnt); + rcms_obj[SXE2_RCMS_OBJ_IRRL].cnt = + min(qp_wanted, rcms_obj[SXE2_RCMS_OBJ_IRRL].cnt); + rcms_obj[SXE2_RCMS_OBJ_SSNT].cnt = + min(qp_wanted, rcms_obj[SXE2_RCMS_OBJ_SSNT].cnt); + rcms_obj[SXE2_RCMS_OBJ_CQ].cnt = + min(2 * qp_wanted, rcms_obj[SXE2_RCMS_OBJ_CQ].cnt); + rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt = + min(qp_wanted / 2, rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt); + rcms_obj[SXE2_RCMS_OBJ_MR].cnt = mr_wanted; + rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt = pble_wanted; + fpte_needed = sxe2_rcms_est_fpt_entry_cnt(rcms_info); + if (fpte_needed <= max_fpte) + break; + + fpte_diff = fpte_needed - max_fpte; + if (fpte_diff > 128) { + if (!(loop_count % 2) && qp_wanted > 128) { + qp_wanted /= 2; + } else { + pble_wanted /= 2; + mr_wanted /= 2; + } + continue; + } + if (rcms_profile != SXE2_RCMS_PROFILE_FAVOR_VF && + pble_wanted > + (512 * SXE2_RCMS_FPM_MULTIPLIER * fpte_diff)) { + pble_wanted -= + 256 * SXE2_RCMS_FPM_MULTIPLIER * fpte_diff; + continue; + } else if (pble_wanted > 100 * SXE2_RCMS_FPM_MULTIPLIER) { + pble_wanted -= 10 * SXE2_RCMS_FPM_MULTIPLIER; + } else if (pble_wanted > 16 * SXE2_RCMS_FPM_MULTIPLIER) { + pble_wanted -= SXE2_RCMS_FPM_MULTIPLIER; + } + + if (mr_wanted > SXE2_RCMS_FPM_MULTIPLIER) + mr_wanted -= SXE2_RCMS_FPM_MULTIPLIER; + if (!(loop_count % 10) && qp_wanted > 128) + qp_wanted /= 2; + + } while (loop_count < 40); + if (fpte_needed > max_fpte) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:func id %u modify fpm val err fpte needed=%u max fpte cnt=%u\n", + rcms_info->rcms_fn_id, fpte_needed, max_fpte); + ret = -EINVAL; + goto end; + } + + if (loop_count > 2 && fpte_needed < max_fpte) { + pble_wanted += (max_fpte - fpte_needed) * 256 * + SXE2_RCMS_FPM_MULTIPLIER; + rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt = + min(pble_wanted, rcms_obj[SXE2_RCMS_OBJ_PBLE].max_cnt); + powerof2 = 1; + while (powerof2 <= rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt) + powerof2 *= 2; + + powerof2 /= 2; + rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt = powerof2; + } + +end: + return ret; +} + +int sxe2_rcms_modify_fpm_val(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u32 qp_limit_count) +{ + int ret = SXE2_OK; + u32 i; + u32 fpte_needed; +#ifdef SXE2_CFG_DEBUG + u32 loop_count = 0; +#endif + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_obj_info *rcms_obj = rcms_info->rcms_obj; + + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) + rcms_obj[i].cnt = rcms_obj[i].max_cnt; + + ret = sxe2_rcms_loop_modify_fpm_val(rdma_dev, rcms_info, qp_limit_count, + dev->mq->rcms_profile); + if (ret != SXE2_OK) + goto end; + + fpte_needed = sxe2_rcms_est_fpt_entry_cnt(rcms_info); + DRV_RDMA_LOG_DEBUG_BDF("RCMS:func id %u loop cnt=%u fpte needed=%u\n", + rcms_info->rcms_fn_id, loop_count, fpte_needed); + DRV_RDMA_LOG_DEBUG_BDF( + "RCMS:func id %u qp cnt=%u cq cnt=%u srq cnt=%u mr cnt=%u ah cnt=%u pble cnt=%u\n", + rcms_info->rcms_fn_id, rcms_obj[SXE2_RCMS_OBJ_QP].cnt, + rcms_obj[SXE2_RCMS_OBJ_CQ].cnt, rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt, + rcms_obj[SXE2_RCMS_OBJ_MR].cnt, rcms_obj[SXE2_RCMS_OBJ_AH].cnt, + rcms_obj[SXE2_RCMS_OBJ_PBLE].cnt); + DRV_RDMA_LOG_DEBUG_BDF( + "RCMS:func id %u RESP cnt=%u IRRL cnt=%u SSNT cnt=%u ATMO cnt=%u\n", + rcms_info->rcms_fn_id, rcms_obj[SXE2_RCMS_OBJ_RESP].cnt, + rcms_obj[SXE2_RCMS_OBJ_IRRL].cnt, + rcms_obj[SXE2_RCMS_OBJ_SSNT].cnt, + rcms_obj[SXE2_RCMS_OBJ_ACK_TIMEOUT].cnt); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "rcms_commit_err_qp", + &rdma_dev->rdma_func->mq.err_cqe_val, rcms_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_commit_err_qp"); + + INJECT_START(rdma_dev->rdma_func, "rcms_commit_err_cq", + &rdma_dev->rdma_func->mq.err_cqe_val, rcms_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_commit_err_cq"); + + INJECT_START(rdma_dev->rdma_func, "rcms_commit_err_srq", + &rdma_dev->rdma_func->mq.err_cqe_val, rcms_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_commit_err_srq"); + + INJECT_START(rdma_dev->rdma_func, "rcms_commit_err_mr", + &rdma_dev->rdma_func->mq.err_cqe_val, rcms_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_commit_err_mr"); + + INJECT_START(rdma_dev->rdma_func, "rcms_commit_err_ah", + &rdma_dev->rdma_func->mq.err_cqe_val, rcms_info); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_commit_err_ah"); +#endif + +end: + return ret; +} + +static void sxe2_rcms_get_obj_liner_base(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info) +{ + u64 liner_base; + u64 first_fpte_index; + u32 fpte_needed; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:func id %u get obj liner base rcms info pointer is null\n", + rcms_info->rcms_fn_id); + goto end; + } + + first_fpte_index = (u64)rcms_info->first_fpte_index; + liner_base = first_fpte_index * SXE2_RCMS_FPTE_SIZE + + SXE2_RCMS_LINER_ADDR_BASE; + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + if ((i == SXE2_RCMS_OBJ_PBLE) && + (liner_base & SXE2_FPTE_RCMS_2MB_MASK)) { + liner_base = (FPT_INDEX_GET(liner_base) + 1) * + SXE2_RCMS_FPTE_SIZE + + SXE2_RCMS_LINER_ADDR_BASE; + } + + rcms_info->rcms_obj[i].base = + round_up(liner_base, rcms_info->rcms_obj[i].size); + liner_base = rcms_info->rcms_obj[i].base + + rcms_info->rcms_obj[i].cnt * + rcms_info->rcms_obj[i].size; + } + + rcms_info->max_fpte_index = (u32)FPT_INDEX_GET(liner_base); + fpte_needed = + rcms_info->max_fpte_index - rcms_info->first_fpte_index + 1; + rcms_info->fpte_needed = fpte_needed; + rcms_info->first_page_fpte = + rcms_info->max_fpte_cnt - rcms_info->fpte_needed; + rcms_info->fpt.fpte_cnt = fpte_needed; +end: + return; +} + +int sxe2_rcms_commit_fpm_val_cmd(struct sxe2_mq_ctx *mq, u64 scratch, + u16 rcms_fn_id, + struct sxe2_rdma_dma_mem *commit_fpm_mem, + bool post_sq, u8 wait_type) +{ + int ret = SXE2_OK; + __le64 *wqe; + struct sxe2_rcms_commit_fpm_value_wqe *commit_fpm_wqe; + u32 tail, val, error; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + u64 *wqe_start; + int i; + + if (!mq || !commit_fpm_mem) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:commit fpm cmd mq or commit buffer pointer is null\n"); + ret = -EINVAL; + goto end; + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_COMMIT_FPM_VAL]) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + MQ_OP_COMMIT_FPM_VAL); + ret = -EBUSY; + goto end; + } +#endif + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR("RCMS:get wq err ret=%d\n", ret); + ret = -ENOMEM; + goto end; + } + commit_fpm_wqe = (struct sxe2_rcms_commit_fpm_value_wqe *)wqe; + commit_fpm_wqe->rcms_fuction_id = rcms_fn_id; + commit_fpm_wqe->commit_buf_pa = commit_fpm_mem->pa >> 2; + commit_fpm_wqe->op = SXE2_MQ_OP_COMMIT_FPM_VAL; + dma_wmb(); + commit_fpm_wqe->wqe_valid = mq->polarity; + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS: commit fpm wqe func id=%u buf pa=%#llx op=%u wqe valid=%u\n", + commit_fpm_wqe->rcms_fuction_id, + (u64)commit_fpm_wqe->commit_buf_pa, commit_fpm_wqe->op, + commit_fpm_wqe->wqe_valid); + wqe_start = (u64 *)wqe; + for (i = 0; i < SXE2_RCMS_MQ_WQE_LEN; i++) { + DRV_RDMA_LOG_DEV_DEBUG("RCMS:commit fpm wqe 64bit[%u]=%#llx\n", + i, wqe_start[i]); + } + + sxe2_kget_mq_reg_info(mq, &val, &tail, &error); + + if (post_sq) { + sxe2_kpost_mq(mq); + if (wait_type == SXE2_MQ_WAIT_POLL_REGS) + ret = sxe2_kpoll_mq_registers( + mq, tail, mq->dev->hw_attrs.max_done_count); + else if (wait_type == SXE2_MQ_WAIT_POLL_CQ) + ret = sxe2_kpoll_mcq(mq, SXE2_MQ_OP_COMMIT_FPM_VAL, + NULL); + } +end: + return ret; +} + +static int +sxe2_rcms_commit_fpm_val_cmd_complete(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_dma_mem *commit_fpm_mem, + u16 rcms_fn_id) +{ + int ret = SXE2_OK; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + DRV_RDMA_LOG_DEV_ERR("RCMS:get mq request err\n"); + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_request->param = NULL; + mq_info->in.u.commit_fpm_val.mq = dev->mq; + mq_info->in.u.commit_fpm_val.fpm_val_pa = commit_fpm_mem->pa; + mq_info->in.u.commit_fpm_val.fpm_val_va = commit_fpm_mem->va; + mq_info->in.u.commit_fpm_val.rcms_fn_id = rcms_fn_id; + mq_info->mq_cmd = MQ_OP_COMMIT_FPM_VAL; + mq_info->post_mq = 1; + mq_info->in.u.commit_fpm_val.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret; +} + +static int sxe2_rcms_pf_commit_fpm_val(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + __le32 *buf; + struct sxe2_rcms_obj_info *obj_info; + struct sxe2_rcms_info *rcms_info; + struct sxe2_rdma_dma_mem commit_fpm_mem; + u8 wait_type; + u32 i; + u32 start_byte = 0; + u64 base_val; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->rcms_info) { + DRV_RDMA_LOG_DEV_ERR("RCMS:rcms info is null err\n"); + ret = -EFAULT; + goto end; + } + rcms_info = dev->rcms_info; + + if (rcms_info->rcms_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id) { + DRV_RDMA_LOG_DEV_ERR("RCMS:pf func id err func id=%d\n", + rcms_info->rcms_fn_id); + ret = -EINVAL; + goto end; + } + + if (!rcms_info->rcms_obj) { + DRV_RDMA_LOG_DEV_ERR("RCMS:rcms obj is null err\n"); + ret = -EFAULT; + goto end; + } + obj_info = rcms_info->rcms_obj; + + buf = dev->fpm_commit_buf; + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + base_val = obj_info[i].base >> SXE2_RCMS_OBJ_LINER_BASE_SHIFT; + set_32bit_val(buf, start_byte, (u32)base_val); + start_byte += SXE2_RCMS_COMMIT_BUF_SHIFT; + set_32bit_val(buf, start_byte, (u32)obj_info[i].cnt); + start_byte += SXE2_RCMS_COMMIT_BUF_SHIFT; + } + + set_32bit_val(buf, SXE2_RCMS_MAX_FPTE_CNT_BUF_OFFSET, + (u32)rcms_info->max_fpte_cnt); + set_32bit_val(buf, SXE2_RCMS_CEQS_DB_BUF_OFFSET, + (u32)rcms_info->max_ceqs); + commit_fpm_mem.pa = dev->fpm_commit_buf_pa; + commit_fpm_mem.va = (void *)buf; + wait_type = SXE2_MQ_WAIT_POLL_REGS; + ret = sxe2_rcms_commit_fpm_val_cmd(dev->mq, SXE2_RCMS_MQ_CMD_NO_SCRATCH, + rcms_info->rcms_fn_id, + &commit_fpm_mem, true, wait_type); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS:commit fpt val cmd err ret=%d\n", + ret); + } + +end: + return ret; +} + +static int sxe2_rcms_alloc_fpt_entry_mem(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_virt_mem virt_mem; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 mem_size; + + if (!rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:alloc fpte rcms info pointer is null\n"); + ret = -EINVAL; + goto end; + } + mem_size = + (u32)(sizeof(struct sxe2_rcms_fpt_entry) * + (rcms_info->fpt.fpte_cnt + rcms_info->first_fpte_index)); + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + DRV_RDMA_LOG_DEV_ERR("RCMS:alloc fpte struct mem err\n"); + ret = -ENOMEM; + goto end; + } + rcms_info->fpt.fpte = virt_mem.va; +end: + return ret; +} + +int sxe2_rcms_pf_config_fpm_val(struct sxe2_rdma_ctx_dev *dev, + u32 qp_limit_count) +{ + int ret = SXE2_OK; + struct sxe2_rcms_info *rcms_info = dev->rcms_info; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + ret = sxe2_rcms_pf_query_fpm_val(dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS: pf query fpm val err ret=%d\n", + ret); + goto end; + } + ret = sxe2_rcms_modify_fpm_val(dev, rcms_info, qp_limit_count); + if (ret != SXE2_OK) + goto end; + + sxe2_rcms_get_obj_liner_base(dev, rcms_info); + ret = sxe2_rcms_pf_commit_fpm_val(dev); + if (ret != SXE2_OK) + goto end; + + ret = sxe2_rcms_alloc_fpt_entry_mem(dev, rcms_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS: alloc fpt entry mem err ret=%d\n", + ret); + } + +end: + return ret; +} + +void sxe2_rcms_get_obj_fpte_range(struct sxe2_rcms_info *rcms_info, + u32 obj_type, u32 start_idx, u32 obj_cnt, + u32 *fpte_idx, u32 *fpte_limit) +{ + u64 liner_addr; + u64 liner_addr_limit; + + liner_addr = rcms_info->rcms_obj[obj_type].base + + rcms_info->rcms_obj[obj_type].size * start_idx; + liner_addr_limit = + liner_addr + rcms_info->rcms_obj[obj_type].size * obj_cnt; + *fpte_idx = (u32)FPT_INDEX_GET(liner_addr); + *fpte_limit = (u32)FPT_INDEX_GET((liner_addr_limit - 1)); + *fpte_limit += 1; +} + +void sxe2_rcms_get_obj_spte_range(struct sxe2_rcms_info *rcms_info, + u32 obj_type, u32 start_idx, u32 obj_cnt, + u32 *spte_idx, u32 *spte_limit) +{ + u64 liner_addr; + u64 liner_addr_limit; + + liner_addr = rcms_info->rcms_obj[obj_type].base + + rcms_info->rcms_obj[obj_type].size * start_idx; + liner_addr_limit = + liner_addr + rcms_info->rcms_obj[obj_type].size * obj_cnt; + *spte_idx = (u32)SPT_INDEX_GET(liner_addr); + *spte_limit = (u32)SPT_INDEX_GET((liner_addr_limit - 1)); + *spte_limit += 1; +} + +int sxe2_rcms_add_fpt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, u32 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rdma_dma_mem page_mem = { 0 }; + struct sxe2_rdma_virt_mem *vmem; + u64 page_len = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:add fpte rcms info pointer is null\n"); + ret = -EINVAL; + goto end; + } + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (!fpte->valid) { + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) + page_len = SXE2_RCMS_FIRST_CP_SIZE; + else if (fpte_type == SXE2_RCMS_FPT_TYPE_SECOND) + page_len = SXE2_RCMS_SPT_PAGE_SIZE; + + page_mem.size = + (u32)ALIGN(page_len, SXE2_RCMS_FPT_SPT_BUF_ALIGNMENT); + page_mem.va = dma_alloc_coherent(dev->hw->device, page_mem.size, + &page_mem.pa, GFP_KERNEL); + if (!page_mem.va) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:The current environment cannot allocate contiguous memory.\n" + "\tplease use rcms_mode=2 or release the used memory\n" + "\tin the environment\n"); + DRV_RDMA_LOG_DEV_ERR("RCMS:alloc fpte %u mem err\n", + fpte_index); + ret = -ENOMEM; + return ret; + } + memset(page_mem.va, 0, page_mem.size); + + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) { + memcpy(&fpte->u.cp.page_addr, &page_mem, + sizeof(fpte->u.cp.page_addr)); + fpte->u.cp.fpt_spt_index = fpte_index; + } else if (fpte_type == SXE2_RCMS_FPT_TYPE_SECOND) { + vmem = &fpte->u.spt.spte_virt_mem; + vmem->size = sizeof(struct sxe2_rcms_spt_entry) * + SXE2_RCMS_SPT_ENTRY_CNT; + vmem->va = kzalloc(vmem->size, GFP_KERNEL); + if (!vmem->va) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:alloc fpte %u spte struct mem err\n", + fpte_index); + ret = -ENOMEM; + goto spte_mem_err; + } + fpte->u.spt.spte = vmem->va; + memset(page_mem.va, SXE2_RCMS_SPT_INVALID_VAL, + page_len); + memcpy(&fpte->u.spt.spt_page_addr, &page_mem, + sizeof(fpte->u.spt.spt_page_addr)); + fpte->u.spt.use_cnt = 0; + } + rcms_info->fpt.fpte[fpte_index].entry_type = fpte_type; + rcms_info->fpt.use_cnt++; + } + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) + fpte->u.cp.use_cnt++; + + goto end; +spte_mem_err: + dma_free_coherent(dev->hw->device, page_mem.size, page_mem.va, + page_mem.pa); + page_mem.va = NULL; +end: + return ret; +} + +int sxe2_rcms_invalidate_spt_cache(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 spte_idx) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u32 pull = SXE2_RCMS_SPT_INVALID_REG_PULL; + u32 bar_reg_val; + u32 idx_val; + + mutex_lock(&func_lock); + bar_reg_val = + WRITE_BIT(SXE2_RCMS_SPT_INVALID_MASK_REG_VAL, + SPT_CACHE_INVALID_MASK_M, SPT_CACHE_INVALID_MASK_S); + SXE2_BAR_WRITE_32(bar_reg_val, + dev->hw_regs[RCMS_SPT_CACHE_FAST_INVALID_MASK]); + idx_val = (fpte_idx << 9) | spte_idx; + bar_reg_val = WRITE_BIT(idx_val, SPT_CACHE_INVALID_IDX_SPT_IDX_M, + SPT_CACHE_INVALID_IDX_SPT_IDX_S) | + WRITE_BIT(1, SPT_CACHE_INVALID_IDX_EN_M, + SPT_CACHE_INVALID_IDX_EN_S); + SXE2_BAR_WRITE_32(bar_reg_val, + dev->hw_regs[RCMS_SPT_CACHE_FAST_INVALID_IDX]); + do { + bar_reg_val = SXE2_BAR_READ_32( + dev->hw_regs[RCMS_SPT_CACHE_FAST_INVALID_IDX]); + if (READ_BIT(bar_reg_val, SPT_CACHE_INVALID_IDX_DONE_M, + SPT_CACHE_INVALID_IDX_DONE_S)) { + break; + } + cond_resched(); + } while (--pull); + + if (!pull) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:invalid spt cache err fpte idx=%u spte idx=%u\n", + fpte_idx, spte_idx); + ret = -EINVAL; + } else { + bar_reg_val = WRITE_BIT(0, SPT_CACHE_INVALID_IDX_DONE_M, + SPT_CACHE_INVALID_IDX_DONE_S); + SXE2_BAR_WRITE_32( + bar_reg_val, + dev->hw_regs[RCMS_SPT_CACHE_FAST_INVALID_IDX]); + } + mutex_unlock(&func_lock); + return ret; +} + +int sxe2_rcms_add_spt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, u32 spte_index) +{ + int ret = SXE2_OK; + struct sxe2_rcms_spt *spt; + struct sxe2_rcms_spt_entry *spte; + struct sxe2_rdma_dma_mem page_mem = { 0 }; + u64 *spt_page_addr; + u64 page_pa = 0; + u32 rel_spt_index; + u32 fpte_index; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!rcms_info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:add spte rcms info pointer is null\n"); + ret = -EINVAL; + goto end; + } + fpte_index = spte_index / SXE2_RCMS_SPT_ENTRY_CNT; + if (fpte_index > rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:fpte index %u out of bounds max fpte index=%u\n", + fpte_index, rcms_info->fpt.fpte_cnt); + ret = -EINVAL; + goto end; + } + if (rcms_info->fpt.fpte[fpte_index].entry_type != + SXE2_RCMS_FPT_TYPE_SECOND) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:fpte %u type is not second type = %u\n", + fpte_index, rcms_info->fpt.fpte[fpte_index].entry_type); + ret = -EINVAL; + goto end; + } + rel_spt_index = REL_SPTE_INDEX_GET(spte_index); + spt = &rcms_info->fpt.fpte[fpte_index].u.spt; + spte = &rcms_info->fpt.fpte[fpte_index].u.spt.spte[rel_spt_index]; + if (!spte->valid) { + page_mem.size = ALIGN(SXE2_RCMS_SECOND_CP_SIEZ, + SXE2_RCMS_FPT_SPT_BUF_ALIGNMENT); + page_mem.va = dma_alloc_coherent(dev->hw->device, page_mem.size, + &page_mem.pa, GFP_KERNEL); + if (!page_mem.va) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:fpte index %u rel spte index %u alloc cp err\n", + fpte_index, rel_spt_index); + ret = -ENOMEM; + return ret; + } + memset(page_mem.va, 0, page_mem.size); + memcpy(&spte->cp.page_addr, &page_mem, + sizeof(spte->cp.page_addr)); + spte->cp.fpt_spt_index = spte_index; + spte->cp.entry_type = SXE2_RCMS_FPT_TYPE_SECOND; + page_pa = page_mem.pa; + spt_page_addr = spt->spt_page_addr.va; + spt_page_addr += rel_spt_index; + memcpy(spt_page_addr, &page_pa, sizeof(*spt_page_addr)); + spte->fpt_index = fpte_index; + spte->valid = true; + spt->use_cnt++; + ; + ret = sxe2_rcms_invalidate_spt_cache(dev, fpte_index, + rel_spt_index); + if (ret != SXE2_OK) + goto invalid_spt_err; + } + + spte->cp.use_cnt++; + goto end; +invalid_spt_err: + spt->use_cnt--; + spte->valid = false; + dma_free_coherent(dev->hw->device, page_mem.size, page_mem.va, + page_mem.pa); + page_mem.va = NULL; + +end: + return ret; +} + +static void sxe2_rcms_set_fpte(u64 pa, u16 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type, + struct sxe2_rcms_update_fpt_entry *entry) +{ +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + u32 valid = 1; + + DRV_RDMA_LOG_DEBUG("inject mq debugfs rcms_ctx_err\n"); + if (g_inject_fpte_err == 1) { + valid = 0; + } else if (g_inject_fpte_err == 2) { + entry->data = 0; + entry->cmd = fpte_index; + return; + } else if (g_inject_fpte_err == 3) { + pa = 0xFFFFFFFFFFFFF000; + } else if (g_inject_fpte_err == 4) { + fpte_index = 0xffff; + } + entry->data = + pa | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_CP_COUNT, + (s64)SXE2_RCMS_SPT_ENTRY_CNT) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_TYPE, + (fpte_type == SXE2_RCMS_FPT_TYPE_SECOND ? 0 : 1)) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_VALID, valid); + entry->cmd = fpte_index; + return; +#else + entry->data = + pa | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_CP_COUNT, + (s64)SXE2_RCMS_SPT_ENTRY_CNT) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_TYPE, + (s64)(fpte_type == SXE2_RCMS_FPT_TYPE_SECOND ? 0 : + 1)) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_VALID, (s64)1); + + entry->cmd = fpte_index; +#endif +} + +static void sxe2_rcms_clr_fpte(u16 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type, + struct sxe2_rcms_update_fpt_entry *entry) +{ + entry->data = (u64)( + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_CP_COUNT, + (s64)SXE2_RCMS_SPT_ENTRY_CNT) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_TYPE, + (s64)(fpte_type == SXE2_RCMS_FPT_TYPE_SECOND ? 0 : + 1)) | + FIELD_PREP((s64)SXE2_RCMS_FPT_DATALOW_VALID, (s64)0)); + + entry->cmd = fpte_index; +} + +int sxe2_rcms_update_fpte_wqe_fill(struct sxe2_mq_ctx *mq, + struct sxe2_rcms_update_fptes_info *info, + u64 scratch) +{ + int ret = SXE2_OK; + u64 data; + __le64 *wqe; + u32 mem_entries, wqe_entries; + struct sxe2_rdma_dma_mem *fpte_buf; + struct sxe2_rcms_update_fpte_wqe *update_fpte_wqe; + u64 offset = 0; + u32 wqe_idx; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + u64 *wqe_start; + int i; + + if (!mq || !info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update fpte wqe fill mq or info pointer is null\n"); + ret = -EINVAL; + goto end; + } + fpte_buf = &mq->fptebuf; + wqe = sxe2_kget_next_mq_wqe_idx(mq, scratch, &wqe_idx); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR("RCMS:mq get wqe err ret=%d\n", ret); + ret = -ENOMEM; + goto end; + } + update_fpte_wqe = (struct sxe2_rcms_update_fpte_wqe *)wqe; + wqe_entries = (info->cnt > SXE2_UPDATE_FPTE_WQE_INCLUDE_ENTRY_CNT) ? + SXE2_UPDATE_FPTE_WQE_INCLUDE_ENTRY_CNT : + info->cnt; + mem_entries = info->cnt - wqe_entries; + + if (mem_entries) { + offset = wqe_idx * SXE2_RCMS_UPDATE_FPTE_BUF_SIZE; + memcpy(((char *)fpte_buf->va + offset), &info->entry[3], + (size_t)(mem_entries + << SXE2_RCMS_FPTE_BUF_ENTRIES_SIZE_SHIFT)); + data = (u64)fpte_buf->pa + offset; + } else { + data = 0; + } + + update_fpte_wqe->rcms_fcn_id = info->rcms_fn_id; + update_fpte_wqe->fpt_buffer_address = data >> SXE2_RCMS_FPTE_BUF_SHIFT; + if (wqe_entries >= 1 && wqe_entries <= 3) { + update_fpte_wqe->fpte_cmd_0 = info->entry[0].cmd; + update_fpte_wqe->fpte_data_0 = info->entry[0].data; + } + if (wqe_entries == 2 || wqe_entries == 3) { + update_fpte_wqe->fpte_cmd_1 = info->entry[1].cmd; + update_fpte_wqe->fpt_entry_valid_1 = 1; + update_fpte_wqe->fpte_data_1 = info->entry[1].data; + } + if (wqe_entries == 3) { + update_fpte_wqe->fpte_cmd_2 = info->entry[2].cmd; + update_fpte_wqe->fpt_entry_valid_2 = 1; + update_fpte_wqe->fpte_data_2 = info->entry[2].data; + } + update_fpte_wqe->fpt_entry_cnt = (u64)mem_entries; + update_fpte_wqe->op = SXE2_MQ_OP_UPDATE_FPT; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_fpte", + &rdma_dev->rdma_func->mq.err_cqe_val, update_fpte_wqe); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_fpte"); +#endif + + dma_wmb(); + update_fpte_wqe->wqe_valid = mq->polarity; + wqe_start = (u64 *)wqe; + for (i = 0; i < SXE2_RCMS_MQ_WQE_LEN; i++) { + DRV_RDMA_LOG_DEV_DEBUG("RCMS:update fpte wqe 64bit[%u]=%#llx\n", + i, wqe_start[i]); + } + +end: + return ret; +} + +int sxe2_rcms_update_fptes_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info *info) +{ + int ret = SXE2_OK; + u32 error; + u32 val = 0; + u32 tail = 0; + struct sxe2_mq_ctx *mq = dev->mq; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update fpte cmd info pointer is null\n"); + ret = -EINVAL; + goto end; + } + + ret = sxe2_rcms_update_fpte_wqe_fill(mq, info, + SXE2_RCMS_MQ_CMD_NO_SCRATCH); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS:update fpte wqe fill err ret=%d\n", + ret); + goto end; + } + + sxe2_kget_mq_reg_info(mq, &val, &tail, &error); + sxe2_kpost_mq(mq); + ret = sxe2_kpoll_mq_registers(mq, tail, + mq->dev->hw_attrs.max_done_count); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + if (rdma_dev->rdma_func->mq.ops_failed[MQ_OP_UPDATE_FPT] && !ret) { + DRV_RDMA_LOG_DEV_DEBUG("MQ:opcode %u inject op failed tri\n", + MQ_OP_UPDATE_FPT); + ret = -EBUSY; + } +#endif + + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update fpte kpoll mq registers err ret=%d\n", + ret); + } + +end: + return ret; +} + +int sxe2_rcms_update_pe_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info *info, + u64 scratch) +{ + int ret = SXE2_OK; + struct sxe2_mq_ctx *mq = dev->mq; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!info) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update pe fptes info pointer is null\n"); + ret = -EINVAL; + goto end; + } + + ret = sxe2_rcms_update_fpte_wqe_fill(mq, info, scratch); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update pe fpte wqe fill err ret=%d\n", ret); + goto end; + } + sxe2_kpost_mq(mq); +end: + return ret; +} + +int sxe2_rcms_update_fptes_cmd_complete( + struct sxe2_rdma_ctx_dev *dev, struct sxe2_rcms_update_fptes_info *info) +{ + int ret = SXE2_OK; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + DRV_RDMA_LOG_DEV_ERR("RCMS:get mq request err\n"); + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + memcpy(&mq_info->in.u.update_pe_fptes.info, info, + sizeof(mq_info->in.u.update_pe_fptes.info)); + mq_info->mq_cmd = MQ_OP_UPDATE_FPT; + mq_info->post_mq = 1; + mq_info->in.u.update_pe_fptes.dev = dev; + mq_info->in.u.update_pe_fptes.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:update fptes khandle mq cmd err ret=%d\n", ret); + } + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret; +} + +int sxe2_rcms_update_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u16 *fpte_index_addr, u32 fpte_cnt, bool seted) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_update_fptes_info *update_fpte_info; + u16 fpte_index; + u64 fpte_pa; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + update_fpte_info = kzalloc( + sizeof(*update_fpte_info), GFP_KERNEL); + if (!update_fpte_info) { + DRV_RDMA_LOG_DEV_ERR("RCMS:alloc update fpte info err\n"); + ret = -ENOMEM; + goto end; + } + update_fpte_info->cnt = 0; + update_fpte_info->rcms_fn_id = rcms_info->rcms_fn_id; + for (i = 0; i < fpte_cnt; i++) { + fpte_index = fpte_index_addr[i]; + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (!fpte || (!fpte->valid && seted) || + (fpte->valid && !seted)) { + continue; + } + if (seted) { + fpte_pa = (fpte->entry_type == + SXE2_RCMS_FPT_TYPE_SECOND) ? + fpte->u.spt.spt_page_addr.pa : + fpte->u.cp.page_addr.pa; + sxe2_rcms_set_fpte( + fpte_pa, fpte_index, fpte->entry_type, + &update_fpte_info->entry[update_fpte_info->cnt]); + } else { + sxe2_rcms_clr_fpte( + fpte_index, fpte->entry_type, + &update_fpte_info->entry[update_fpte_info->cnt]); + } + update_fpte_info->cnt++; + if (update_fpte_info->cnt == + SXE2_RCMS_MAX_UPDATE_FPTE_ENTRIES) { + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:mq update fpte err ret=%d\n", + ret); + goto free_update_info; + } + update_fpte_info->cnt = 0; + } + } + if (update_fpte_info->cnt) { + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS:mq update fpte err ret=%d\n", + ret); + } + } +free_update_info: + kfree(update_fpte_info); +end: + return ret; +} + +int sxe2_rcms_vf_update_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u16 *fpte_index_addr, u32 fpte_cnt, bool seted) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rcms_vf_update_fptes_info *update_vf_fpte_info; + u16 fpte_index; + u64 fpte_pa; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + update_vf_fpte_info = kzalloc(sizeof(*update_vf_fpte_info), GFP_KERNEL); + if (!update_vf_fpte_info) { + DRV_RDMA_LOG_ERROR_BDF( + "RCMS:alloc update vf fpte info err\n"); + ret = -ENOMEM; + goto end; + } + + update_vf_fpte_info->set = seted; + + for (i = 0; i < fpte_cnt; i++) { + fpte_index = fpte_index_addr[i]; + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (!fpte || (!fpte->valid && seted) || + (fpte->valid && !seted)) { + continue; + } + if (seted) { + fpte_pa = (fpte->entry_type == + SXE2_RCMS_FPT_TYPE_SECOND) ? + fpte->u.spt.spt_page_addr.pa : + fpte->u.cp.page_addr.pa; + sxe2_rcms_set_fpte( + fpte_pa, fpte_index, fpte->entry_type, + &(update_vf_fpte_info + ->entry[update_vf_fpte_info->cnt])); + } else { + sxe2_rcms_clr_fpte( + fpte_index, fpte->entry_type, + &(update_vf_fpte_info + ->entry[update_vf_fpte_info->cnt])); + }; + update_vf_fpte_info->cnt++; + if (update_vf_fpte_info->cnt == + SXE2_RCMS_VF_MAX_UPDATE_FPTE_ENTRIES) { + ret = sxe2_vchnl_req_update_fpte(dev, + update_vf_fpte_info); + if (ret != SXE2_OK) + goto free_info; + memset(update_vf_fpte_info, 0, + sizeof(*update_vf_fpte_info)); + update_vf_fpte_info->set = seted; + } + } + if (update_vf_fpte_info->cnt) + ret = sxe2_vchnl_req_update_fpte(dev, update_vf_fpte_info); + +free_info: + kfree(update_vf_fpte_info); +end: + return ret; +} + +static int sxe2_rcms_finish_add_fpte_reg(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!info->add_fpte_cnt) + goto end; + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "rcms_ctx_err_qp", info, + &rdma_dev->rdma_func->mq.err_cqe_val); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_ctx_err_qp"); + + INJECT_START(rdma_dev->rdma_func, "rcms_ctx_err_ssnt", info, + &rdma_dev->rdma_func->mq.err_cqe_val); + INJECT_DEACTIVE(rdma_dev->rdma_func, "rcms_ctx_err_ssnt"); +#endif + + if (dev->privileged) { + ret = sxe2_rcms_update_fptes(dev, info->rcms_info, + &info->rcms_info->fpte_indexes[0], + info->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEBUG_BDF("update pf fpte err ret=%d\n", + ret); + } + } else { + ret = sxe2_rcms_vf_update_fptes( + dev, info->rcms_info, &info->rcms_info->fpte_indexes[0], + info->add_fpte_cnt, true); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEBUG_BDF("update vf fpte err ret=%d\n", + ret); + } + } + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + g_inject_fpte_err = 0; +#endif + +end: + return ret; +} + +int sxe2_rcms_remove_spt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, u32 spte_index) +{ + int ret = SXE2_OK; + struct sxe2_rcms_spt_entry *spte; + struct sxe2_rcms_spt *spt; + struct sxe2_rcms_fpt_entry *fpte; + u32 fpte_index; + u32 rel_spt_idx; + u64 *spt_page_addr; + struct sxe2_rdma_dma_mem *page_mem; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + fpte_index = spte_index / SXE2_RCMS_SPT_ENTRY_CNT; + rel_spt_idx = REL_SPTE_INDEX_GET(spte_index); + if (fpte_index > rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:remove spte input fpte index out of bounds fpte index=%u max fpte index=%u\n", + fpte_index, rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (fpte->entry_type != SXE2_RCMS_FPT_TYPE_SECOND) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:remove spte fpte %u type is not second type = %u\n", + fpte_index, rcms_info->fpt.fpte[fpte_index].entry_type); + ret = -EINVAL; + goto end; + } + + spt = &fpte->u.spt; + spte = &spt->spte[rel_spt_idx]; + if (--spte->cp.use_cnt) + goto end; + + spte->valid = false; + spt->use_cnt--; + spt_page_addr = spt->spt_page_addr.va; + spt_page_addr += rel_spt_idx; + memset(spt_page_addr, SXE2_RCMS_SPTE_INVALID_VAL, sizeof(u64)); + sxe2_rcms_invalidate_spt_cache(dev, fpte_index, rel_spt_idx); + page_mem = &spte->cp.page_addr; + if (!page_mem || !page_mem->va) { + DRV_RDMA_LOG_DEV_ERR("RCMS:cp page mem err\n"); + ret = -EINVAL; + goto end; + } + dma_free_coherent(dev->hw->device, page_mem->size, page_mem->va, + page_mem->pa); + page_mem->va = NULL; + if (!spt->use_cnt) { + if (spt->spte_virt_mem.va != NULL) { + kfree(spt->spte_virt_mem.va); + spt->spte_virt_mem.va = NULL; + } + } + +end: + return ret; +} + +void sxe2_rcms_remove_fpt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u32 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type) +{ + struct sxe2_rcms_fpt_entry *fpte = &rcms_info->fpt.fpte[fpte_index]; + + if (fpte->valid) { + fpte->valid = false; + rcms_info->fpt.use_cnt--; + } + + if (fpte_type == SXE2_RCMS_FPT_TYPE_FIRST) { + if (!fpte->u.cp.use_cnt && fpte->u.cp.page_addr.va) { + dma_free_coherent(dev->hw->device, + fpte->u.cp.page_addr.size, + fpte->u.cp.page_addr.va, + fpte->u.cp.page_addr.pa); + fpte->u.cp.page_addr.va = NULL; + } + } else if (fpte_type == SXE2_RCMS_FPT_TYPE_SECOND) { + if (!fpte->u.spt.use_cnt && fpte->u.spt.spt_page_addr.va) { + dma_free_coherent(dev->hw->device, + fpte->u.spt.spt_page_addr.size, + fpte->u.spt.spt_page_addr.va, + fpte->u.spt.spt_page_addr.pa); + fpte->u.spt.spt_page_addr.va = NULL; + } + if (!fpte->u.spt.use_cnt && fpte->u.spt.spte_virt_mem.va) { + kfree(fpte->u.spt.spte_virt_mem.va); + fpte->u.spt.spte_virt_mem.va = NULL; + } + } +} + +static int sxe2_rcms_prep_remove_fpte_cp(struct sxe2_rcms_info *rcms_info, + u32 fpte_index) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (--fpte->u.cp.use_cnt) { + ret = -EBUSY; + goto end; + } + fpte->valid = false; + rcms_info->fpt.use_cnt--; +end: + return ret; +} + +static int sxe2_rcms_prep_remove_fpte_spt(struct sxe2_rcms_info *rcms_info, + u32 fpte_index) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + + fpte = &rcms_info->fpt.fpte[fpte_index]; + if (fpte->u.spt.use_cnt) { + ret = -EBUSY; + goto end; + } + fpte->valid = false; + rcms_info->fpt.use_cnt--; +end: + return ret; +} + +int sxe2_rcms_build_first_type_table(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + u32 fpte_idx; + u32 fpte_lmt; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + sxe2_rcms_get_obj_fpte_range(obj_info->rcms_info, obj_info->rsrc_type, + obj_info->start_idx, obj_info->obj_cnt, + &fpte_idx, &fpte_lmt); + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:obj %u min fpte index=%u lmt fpte index=%u\n", + obj_info->rsrc_type, fpte_idx, fpte_lmt); + if (fpte_idx > obj_info->rcms_info->max_fpte_index || + (fpte_lmt - 1) > obj_info->rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u fpte idx range err fpte idx=%u fpte lmt=%u fpte cnt=%u\n", + obj_info->rsrc_type, fpte_idx, fpte_lmt, + obj_info->rcms_info->fpt.fpte_cnt); + ret = -EINVAL; + goto end; + } + + for (i = fpte_idx; i < fpte_lmt; i++) { + ret = sxe2_rcms_add_fpt_entry(dev, obj_info->rcms_info, i, + obj_info->entry_type); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u add fpte %u err ret=%d\n", + obj_info->rsrc_type, i, ret); + goto fpte_err; + } + fpte = &obj_info->rcms_info->fpt.fpte[i]; + if (fpte->valid) + continue; + + fpte->valid = true; + + obj_info->rcms_info->fpte_indexes[obj_info->add_fpte_cnt] = + (u16)i; + obj_info->add_fpte_cnt++; + } + goto end; + +fpte_err: + while (i && (i > fpte_idx)) { + i--; + fpte = &obj_info->rcms_info->fpt.fpte[i]; + fpte->u.cp.use_cnt--; + } + while (obj_info->add_fpte_cnt) { + obj_info->add_fpte_cnt--; + sxe2_rcms_remove_fpt_entry( + dev, obj_info->rcms_info, + obj_info->rcms_info + ->fpte_indexes[obj_info->add_fpte_cnt], + obj_info->entry_type); + } + +end: + return ret; +} + +static int sxe2_rcms_finish_clr_fpte_reg(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_del_obj_info *info, + bool reset, bool mq_update) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + struct sxe2_rdma_dma_mem *mem; + u16 fpte_idx; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!info->del_fpte_cnt) + goto end; + + if (dev->privileged && !reset && mq_update) { + ret = sxe2_rcms_update_fptes(dev, info->rcms_info, + &info->rcms_info->fpte_indexes[0], + info->del_fpte_cnt, false); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:pf finish clr fpte error ret=%d\n", ret); + } + } else if (!dev->privileged && !reset && mq_update) { + ret = sxe2_rcms_vf_update_fptes( + dev, info->rcms_info, &info->rcms_info->fpte_indexes[0], + info->del_fpte_cnt, false); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "RCMS:vf finish clr fpte error ret=%d\n", ret); + } + } + + for (i = 0; i < info->del_fpte_cnt; i++) { + fpte_idx = info->rcms_info->fpte_indexes[i]; + fpte = &info->rcms_info->fpt.fpte[fpte_idx]; + mem = (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND) ? + &fpte->u.spt.spt_page_addr : + &fpte->u.cp.page_addr; + if (!mem || !mem->va) { + DRV_RDMA_LOG_DEV_ERR("RCMS:fpte %u mem err\n", + fpte_idx); + } else { + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:free fpte %u mem pa=0x%llx\n", fpte_idx, + mem->pa); + dma_free_coherent(dev->hw->device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + } + } +end: + return ret; +} + +int sxe2_rcms_build_second_type_table(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fpt_entry *fpte; + u32 fpte_idx, fpte_lmt; + u32 spte_idx, spte_lmt; + u32 spte_idx_range = 0; + u32 spte_lmt_range = 0; + u32 i = 0; + u32 j = 0; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + sxe2_rcms_get_obj_fpte_range(obj_info->rcms_info, obj_info->rsrc_type, + obj_info->start_idx, obj_info->obj_cnt, + &fpte_idx, &fpte_lmt); + if (fpte_idx > obj_info->rcms_info->max_fpte_index || + (fpte_lmt - 1) > obj_info->rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u fpte idx range err fpte idx=%u fpte lmt=%u fpte cnt=%u\n", + obj_info->rsrc_type, fpte_idx, fpte_lmt, + obj_info->rcms_info->fpt.fpte_cnt); + ret = -EINVAL; + goto end; + } + sxe2_rcms_get_obj_spte_range(obj_info->rcms_info, obj_info->rsrc_type, + obj_info->start_idx, obj_info->obj_cnt, + &spte_idx, &spte_lmt); + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:obj %u min spte index=%u lmt spte index=%u\n", + obj_info->rsrc_type, spte_idx, spte_lmt); + + for (i = fpte_idx; i < fpte_lmt; i++) { + ret = sxe2_rcms_add_fpt_entry(dev, obj_info->rcms_info, i, + obj_info->entry_type); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u second mode add fpte %u err ret=%d\n", + obj_info->rsrc_type, i, ret); + goto fpte_err; + } + fpte = &obj_info->rcms_info->fpt.fpte[i]; + spte_idx_range = max(spte_idx, i * SXE2_RCMS_SPT_ENTRY_CNT); + spte_lmt_range = + min(spte_lmt, (i + 1) * SXE2_RCMS_SPT_ENTRY_CNT); + for (j = spte_idx_range; j < spte_lmt_range; j++) { + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND && + (dev->rcms_info == obj_info->rcms_info && + obj_info->rsrc_type != SXE2_RCMS_OBJ_PBLE)) { + ret = sxe2_rcms_add_spt_entry( + dev, obj_info->rcms_info, j); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u second mode add spte %u err ret=%d\n", + obj_info->rsrc_type, j, ret); + goto spte_err; + } + } + } + if (fpte->valid) + continue; + + obj_info->rcms_info->fpte_indexes[obj_info->add_fpte_cnt] = + (u16)i; + obj_info->add_fpte_cnt++; + fpte->valid = true; + } + goto end; +spte_err: + while (j && (j > spte_idx_range)) { + j--; + sxe2_rcms_remove_spt_entry(dev, obj_info->rcms_info, j); + } + + if (fpte->valid == false) { + sxe2_rcms_remove_fpt_entry(dev, obj_info->rcms_info, i, + obj_info->entry_type); + } else { + fpte->u.cp.use_cnt--; + } +fpte_err: + while (i && (i > fpte_idx)) { + i--; + spte_idx_range = max(spte_idx, i * SXE2_RCMS_SPT_ENTRY_CNT); + spte_lmt_range = + min(spte_lmt, (i + 1) * SXE2_RCMS_SPT_ENTRY_CNT); + j = spte_lmt_range; + while (j > spte_idx_range) { + j--; + sxe2_rcms_remove_spt_entry(dev, obj_info->rcms_info, j); + } + } + while (obj_info->add_fpte_cnt) { + obj_info->add_fpte_cnt--; + sxe2_rcms_remove_fpt_entry( + dev, obj_info->rcms_info, + obj_info->rcms_info + ->fpte_indexes[obj_info->add_fpte_cnt], + obj_info->entry_type); + } +end: + return ret; +} + +int sxe2_rcms_del_obj(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_del_obj_info *info, bool reset, + bool mq_update) +{ + int ret = SXE2_OK; + struct sxe2_rcms_spt *spt; + struct sxe2_rcms_fpt_entry *fpte; + u32 fpte_idx, fpte_lmt; + u32 spte_idx, spte_lmt, rel_spte_idx; + u32 i, j; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (info->start_idx > info->rcms_info->rcms_obj[info->rsrc_type].cnt || + (info->start_idx + info->obj_cnt) > + info->rcms_info->rcms_obj[info->rsrc_type].cnt) { + DRV_RDMA_LOG_ERROR_BDF( + "RCMS:idx err start idx=%u obj type=%u max cnt=%u\n", + info->start_idx, info->rsrc_type, + info->rcms_info->rcms_obj[info->rsrc_type].cnt); + ret = -EINVAL; + goto end; + } + sxe2_rcms_get_obj_spte_range(info->rcms_info, info->rsrc_type, + info->start_idx, info->obj_cnt, &spte_idx, + &spte_lmt); + DRV_RDMA_LOG_DEV_DEBUG("RCMS:del obj %u spte idx=%u spte lmt =%u\n", + info->rsrc_type, spte_idx, spte_lmt); + for (i = spte_idx; i < spte_lmt; i++) { + fpte_idx = SPT_IDX_TO_FPT_IDX(i); + if (fpte_idx > info->rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:del obj %u invalid fpte idx fpte idx=%u max_idx = %u\n", + info->rsrc_type, fpte_idx, + info->rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + if (!info->rcms_info->fpt.fpte[fpte_idx].valid) + continue; + + if (info->rcms_info->fpt.fpte[fpte_idx].entry_type != + SXE2_RCMS_FPT_TYPE_SECOND) { + continue; + } + rel_spte_idx = REL_SPTE_INDEX_GET(i); + spt = &info->rcms_info->fpt.fpte[fpte_idx].u.spt; + if (spt->spte && spt->spte[rel_spte_idx].valid) { + ret = sxe2_rcms_remove_spt_entry(dev, info->rcms_info, + i); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:del obj remove spte %u err ret=%d\n", + i, ret); + goto end; + } + } + } + sxe2_rcms_get_obj_fpte_range(info->rcms_info, info->rsrc_type, + info->start_idx, info->obj_cnt, &fpte_idx, + &fpte_lmt); + if (fpte_idx > info->rcms_info->max_fpte_index || + (fpte_lmt - 1) > info->rcms_info->max_fpte_index) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: del obj %u invalid fpte idx fpte idx=%u fpte_lmt=%u max_idx = %u\n", + info->rsrc_type, fpte_idx, fpte_lmt, + info->rcms_info->max_fpte_index); + ret = -EINVAL; + goto end; + } + for (j = fpte_idx; j < fpte_lmt; j++) { + fpte = &info->rcms_info->fpt.fpte[j]; + if (!info->rcms_info->fpt.fpte[j].valid) + continue; + + if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_FIRST) { + ret = sxe2_rcms_prep_remove_fpte_cp(info->rcms_info, j); + } else if (fpte->entry_type == SXE2_RCMS_FPT_TYPE_SECOND) { + ret = sxe2_rcms_prep_remove_fpte_spt(info->rcms_info, + j); + if (dev->rcms_info != info->rcms_info && + info->rsrc_type == SXE2_RCMS_OBJ_PBLE && + fpte->u.spt.spte) { + kfree(fpte->u.spt.spte_virt_mem.va); + fpte->u.spt.spte_virt_mem.va = NULL; + fpte->u.spt.spte = NULL; + } + } + if (ret == SXE2_OK) { + info->rcms_info->fpte_indexes[info->del_fpte_cnt] = + (u16)j; + info->del_fpte_cnt++; + } else { + ret = SXE2_OK; + } + } + ret = sxe2_rcms_finish_clr_fpte_reg(dev, info, reset, mq_update); +end: + return ret; +} + +static int sxe2_rcms_del_obj_type(struct sxe2_rdma_ctx_dev *dev, + enum sxe2_rcms_rsrc_type obj_type, + struct sxe2_rcms_info *rcms_info, + bool privileged, bool reset, bool mq_update) +{ + int ret = SXE2_OK; + struct sxe2_rcms_del_obj_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + info.rcms_info = rcms_info; + info.rsrc_type = obj_type; + info.privileged = privileged; + info.del_fpte_cnt = 0; + info.start_idx = 0; + info.obj_cnt = rcms_info->rcms_obj[obj_type].cnt; + ret = sxe2_rcms_del_obj(dev, &info, reset, mq_update); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF("RCMS:del obj %u err ret=%d\n", obj_type, + ret); + } + + return ret; +} + +void sxe2_rcms_del_objs(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, bool privileged, + bool reset, bool mq_update) +{ + u32 i; + + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + if (dev->rcms_info->rcms_obj[i].cnt) { + sxe2_rcms_del_obj_type(dev, i, rcms_info, privileged, + reset, mq_update); + } + } +} + +int sxe2_rcms_create_obj(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + if (obj_info->start_idx >= + obj_info->rcms_info->rcms_obj[obj_info->rsrc_type].cnt || + (obj_info->start_idx + obj_info->obj_cnt) > + obj_info->rcms_info->rcms_obj[obj_info->rsrc_type].cnt) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: invalid hmc obj type %u, start = %u, req cnt %u, cnt = %u\n", + obj_info->rsrc_type, obj_info->start_idx, + obj_info->obj_cnt, + obj_info->rcms_info->rcms_obj[obj_info->rsrc_type].cnt); + ret = -EINVAL; + goto end; + } + + if (obj_info->init_mode == FIRST_PAGE_TABLE) { + ret = sxe2_rcms_build_first_type_table(dev, obj_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u build first type table err ret=%d\n", + obj_info->rsrc_type, ret); + goto end; + } + } else { + ret = sxe2_rcms_build_second_type_table(dev, obj_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:obj %u build second type table err ret=%d\n", + obj_info->rsrc_type, ret); + goto end; + } + } + + ret = sxe2_rcms_finish_add_fpte_reg(dev, obj_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("RCMS:obj %u update fpte err ret=%d\n", + obj_info->rsrc_type, ret); + goto update_fpte_err; + } + goto end; +update_fpte_err: + sxe2_rcms_del_obj_type(dev, obj_info->rsrc_type, dev->rcms_info, + obj_info->privileged, rdma_func->reset, false); +end: + return ret; +} + +static int sxe2_rcms_pf_create_objs(struct sxe2_rdma_pci_f *rdma_func, + bool privileged) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rcms_create_obj_info info = {}; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + info.rcms_info = dev->rcms_info; + info.privileged = privileged; + info.init_mode = dev->rcms_info->create_mode; + info.entry_type = info.init_mode == FIRST_PAGE_TABLE ? + SXE2_RCMS_FPT_TYPE_FIRST : + SXE2_RCMS_FPT_TYPE_SECOND; + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + if (i == SXE2_RCMS_OBJ_PBLE) + continue; + if (dev->rcms_info->rcms_obj[i].cnt) { + info.rsrc_type = i; + info.add_fpte_cnt = 0; + info.start_idx = 0; + info.obj_cnt = dev->rcms_info->rcms_obj[i].cnt; + ret = sxe2_rcms_create_obj(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:create rcms obj %u err ret=%d\n", + i, ret); + goto close_obj; + } + } + } + goto end; + +close_obj: + while (i) { + i--; + if (dev->rcms_info->rcms_obj[i].cnt) { + sxe2_rcms_del_obj_type(dev, i, dev->rcms_info, + privileged, false, true); + } + } +end: + return ret; +} + +static int sxe2_rcms_alloc_vf_query_fpm_buf(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_dma_mem *mem) +{ + int ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + ret = sxe2_kget_aligned_mem(rdma_func, mem, SXE2_QUERY_FPM_BUF_SIZE, + SXE2_FPM_QUERY_BUF_ALIGNMENT_M); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("rcms:alloc vf query fpm buf err ret=%d\n", + ret); + } + return ret; +} + +static int +sxe2_rcms_pf_query_vf_fpm_val(struct sxe2_rdma_vchnl_dev *vc_dev, u8 vf_pmf_idx, + struct sxe2_rdma_dma_mem *query_fpm_mem) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + ret = sxe2_rcms_query_fpm_val_cmd_complete(vc_dev->pf_dev, + query_fpm_mem, vf_pmf_idx); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vf %u pmf id %u mq query vf fpm val err ret=%d\n", + vc_dev->vf_id, vf_pmf_idx, ret); + goto end; + } + sxe2_rcms_parse_fpm_query_buf(vc_dev->pf_dev, query_fpm_mem->va, + &vc_dev->rcms_info); +end: + return ret; +} + +static int +sxe2_rcms_pf_commit_vf_fpm_val(struct sxe2_rdma_ctx_dev *dev, u8 vf_pmf_idx, + struct sxe2_rdma_dma_mem *commit_fpm_mem, + struct sxe2_rcms_info *rcms_info) +{ + int ret = SXE2_OK; + __le32 *buf; + struct sxe2_rcms_obj_info *obj_info; + u32 i; + u32 start_byte = 0; + u32 base_val; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + buf = (__le32 *)(commit_fpm_mem->va); + obj_info = rcms_info->rcms_obj; + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + base_val = (u32)( + (obj_info[i].base >> SXE2_RCMS_OBJ_LINER_BASE_SHIFT) & + SXE2_RCMS_OBJ_BASE_MASK); + set_32bit_val(buf, start_byte, base_val); + start_byte += SXE2_RCMS_COMMIT_BUF_SHIFT; + set_32bit_val(buf, start_byte, obj_info[i].cnt); + start_byte += SXE2_RCMS_COMMIT_BUF_SHIFT; + } + set_32bit_val(buf, SXE2_RCMS_MAX_FPTE_CNT_BUF_OFFSET, + rcms_info->max_fpte_cnt); + set_32bit_val(buf, SXE2_RCMS_CEQS_DB_BUF_OFFSET, rcms_info->max_ceqs); + ret = sxe2_rcms_commit_fpm_val_cmd_complete(dev, commit_fpm_mem, + vf_pmf_idx); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:func id %u commit vf fpm val cmd err ret=%d\n", + vf_pmf_idx, ret); + } + return ret; +} + +static int sxe2_rcms_pf_query_commit_vf_fpm_val( + struct sxe2_rdma_vchnl_dev *vc_dev, u8 vf_pmf_idx, + struct sxe2_rdma_dma_mem *fpm_mem, struct sxe2_rcms_info *rcms_info) +{ + int ret = SXE2_OK; + u32 qp_limit_cnt; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(vc_dev->pf_dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + qp_limit_cnt = g_rsrc_limits_table[rdma_func->limits_sel].qp_limit; + ret = sxe2_rcms_pf_query_vf_fpm_val(vc_dev, vf_pmf_idx, fpm_mem); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vmf id %u mq query vf fpm val err ret=%d\n", + vf_pmf_idx, ret); + goto end; + } + ret = sxe2_rcms_modify_fpm_val(vc_dev->pf_dev, rcms_info, qp_limit_cnt); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vmf id %u modify vf fpm val err ret=%d\n", + vf_pmf_idx, ret); + goto end; + } + sxe2_rcms_get_obj_liner_base(vc_dev->pf_dev, rcms_info); + ret = sxe2_rcms_pf_commit_vf_fpm_val(vc_dev->pf_dev, vf_pmf_idx, + fpm_mem, rcms_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vmf id %u mq commit vf fpm val err ret=%d\n", + vf_pmf_idx, ret); + goto end; + } + rcms_info->fpt.fpte = NULL; + +end: + return ret; +} + +int sxe2_rcms_pf_config_vf_fpm_val(struct sxe2_rdma_vchnl_dev *vc_dev) +{ + int ret = SXE2_OK; + struct sxe2_rcms_info *rcms_info; + struct sxe2_rdma_dma_mem query_fpm_mem; + u16 vf_idx; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + u8 vf_pmf_idx = (u8)(vc_dev->pmf_index); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if ((!dev->privileged) || + (vf_pmf_idx > dev->hw_attrs.max_hw_vf_fpm_id || + vf_pmf_idx < dev->hw_attrs.first_hw_vf_fpm_id)) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:dev if pf err or vf pmf id err dev privileged=%u id=%u\n", + dev->privileged, vf_pmf_idx); + ret = -EINVAL; + goto end; + } + + rcms_info = &vc_dev->rcms_info; + vf_idx = vc_dev->vf_idx; + if (!vc_dev->fpm_query_buf) { + if (!dev->vf_fpm_query_buf[vf_idx].va) { + ret = sxe2_rcms_alloc_vf_query_fpm_buf( + dev, &dev->vf_fpm_query_buf[vf_idx]); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: alloc vf query fpm buf err ret=%d\n", + ret); + goto end; + } + } + vc_dev->fpm_query_buf = dev->vf_fpm_query_buf[vf_idx].va; + vc_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[vf_idx].pa; + } + query_fpm_mem.pa = vc_dev->fpm_query_buf_pa; + query_fpm_mem.va = vc_dev->fpm_query_buf; + rcms_info->rcms_fn_id = vf_pmf_idx; + + ret = sxe2_rcms_pf_query_commit_vf_fpm_val(vc_dev, vf_pmf_idx, + &query_fpm_mem, rcms_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:vf %u pmf id %u query commit vf fpm val err ret=%d\n", + vc_dev->vf_id, vf_pmf_idx, ret); + ret = -EINVAL; + goto end; + } +end: + return ret; +} + +static int sxe2_rcms_vf_create_objs(struct sxe2_rdma_pci_f *rdma_func) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + u32 i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_create_obj_info info = {}; + + info.rcms_info = dev->rcms_info; + info.privileged = false; + info.init_mode = FIRST_PAGE_TABLE; + info.entry_type = SXE2_RCMS_FPT_TYPE_FIRST; + + for (i = SXE2_RCMS_OBJ_QP; i < SXE2_RCMS_OBJ_MAX; i++) { + if (i == SXE2_RCMS_OBJ_PBLE) + continue; + if (dev->rcms_info->rcms_obj[i].cnt) { + info.rsrc_type = i; + info.add_fpte_cnt = 0; + info.start_idx = 0; + info.obj_cnt = dev->rcms_info->rcms_obj[i].cnt; + ret = sxe2_rcms_create_obj(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS:vf create rcms obj %u err ret=%d\n", + i, ret); + goto close_obj; + } + } + } + goto end; + +close_obj: + while (i) { + i--; + if (dev->rcms_info->rcms_obj[i].cnt) { + sxe2_rcms_del_obj_type(dev, i, dev->rcms_info, + dev->privileged, false, true); + } + } +end: + return ret; +} + +static int sxe2_rcms_setup_vf(struct sxe2_rdma_pci_f *rdma_func) +{ + int ret = SXE2_OK; + int i; + struct sxe2_vchnl_init_vf_rcms_resp init_rcms_resp; + struct sxe2_vchnl_vf_obj_resp vf_obj_resp; + struct sxe2_rcms_info *rcms_info; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (dev->privileged) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: setup vf privileged err privileged=%u\n", + dev->privileged); + ret = -EINVAL; + goto end; + } + + if (!dev->rcms_info || !dev->rcms_info->rcms_obj) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vf rcms info or obj pointer is null\n"); + ret = -ENOMEM; + goto end; + } + + ret = sxe2_vchnl_req_init_vf_rcms(dev, &init_rcms_resp); + if (ret != SXE2_OK) + goto end; + + rcms_info = dev->rcms_info; + rcms_info->first_fpte_index = init_rcms_resp.first_fpte_index; + rcms_info->max_fpte_index = init_rcms_resp.max_fpte_index; + rcms_info->max_fpte_cnt = init_rcms_resp.max_fpte_cnt; + rcms_info->fpte_needed = init_rcms_resp.fpte_needed; + rcms_info->max_ceqs = init_rcms_resp.max_ceqs; + rcms_info->max_db_page_num = init_rcms_resp.max_db_page_num; + rcms_info->db_bar_addr = init_rcms_resp.db_bar_addr; + rcms_info->first_page_fpte = + rcms_info->max_fpte_cnt - rcms_info->fpte_needed; + rcms_info->max_cc_qp_cnt = init_rcms_resp.max_cc_qp_cnt; + + for (i = 0; i < SXE2_RCMS_OBJ_MAX; i++) + rcms_info->rcms_obj[i].cnt = init_rcms_resp.obj_max_cnt[i]; + + rcms_info->fpt.fpte_cnt = rcms_info->fpte_needed; + + ret = sxe2_vchnl_req_get_vf_obj_info(dev, &vf_obj_resp); + if (ret != SXE2_OK) + goto end; + + for (i = 0; i < SXE2_RCMS_OBJ_MAX; i++) { + rcms_info->rcms_obj[i].size = vf_obj_resp.obj_info[i].size; + rcms_info->rcms_obj[i].base = vf_obj_resp.obj_info[i].base; + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS:vf obj %u max cnt=%u size=%u base=%#llx\n", i, + rcms_info->rcms_obj[i].cnt, rcms_info->rcms_obj[i].size, + rcms_info->rcms_obj[i].base); + } + + rcms_info->pmf_index = init_rcms_resp.pmf_index; + rcms_info->pf_max_ceqs = init_rcms_resp.pf_max_ceqs; + DRV_RDMA_LOG_DEV_DEBUG( + "RCMS VF Capability Val:\n" + "\tvf pmf_index %#x pf_max_ceqs=%#x\n" + "\tvf fpte needed=%u fpte first idx=%u max fpte idx=%u\n" + "\tmax fpte cnt=%u first pgae fpte=%u\n" + "\tvf max ceqs=%u max db page=%u db addr=%#x\n", + rcms_info->pmf_index, rcms_info->pf_max_ceqs, + rcms_info->fpte_needed, rcms_info->first_fpte_index, + rcms_info->max_fpte_index, rcms_info->max_fpte_cnt, + rcms_info->first_page_fpte, rcms_info->max_ceqs, + rcms_info->max_db_page_num, rcms_info->db_bar_addr); + + ret = sxe2_rcms_alloc_fpt_entry_mem(dev, rcms_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: vf alloc fpt entry mem err ret=%d\n", ret); + goto end; + } + ret = sxe2_rcms_vf_create_objs(rdma_func); + if (ret != SXE2_OK) + goto free_fpte_mem; + + goto end; + +free_fpte_mem: + kfree(rcms_info->fpt.fpte); + rcms_info->fpt.fpte = NULL; +end: + return ret; +} + +static int sxe2_rcms_setup_pf(struct sxe2_rdma_pci_f *rdma_func) +{ + int ret = SXE2_OK; + u32 qp_limit_cnt; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->rcms_info || !dev->rcms_info->rcms_obj) { + DRV_RDMA_LOG_DEV_ERR( + "RCMS: rcms info or obj pointer is null\n"); + ret = -ENOMEM; + goto end; + } + if (rdma_func->rcms_mode.ctx_mode >= SXE2_RCMS_INIT_MODE_MAX) { + DRV_RDMA_LOG_DEV_ERR("RCMS:rcms init mode err init mode=%u\n", + rdma_func->rcms_mode.ctx_mode); + ret = -EINVAL; + goto end; + } + if (rdma_func->rcms_mode.ctx_mode == SXE2_RCMS_FIRST_INIT_MODE) + dev->rcms_info->create_mode = FIRST_PAGE_TABLE; + else + dev->rcms_info->create_mode = SECOND_PAGE_TABLE; + + qp_limit_cnt = g_rsrc_limits_table[rdma_func->limits_sel].qp_limit; + dev->rcms_info->rcms_fn_id = dev->rcms_fn_id; + ret = sxe2_rcms_pf_config_fpm_val(dev, qp_limit_cnt); + if (ret != SXE2_OK) + goto end; + + ret = sxe2_rcms_pf_create_objs(rdma_func, dev->privileged); + if (ret != SXE2_OK) + goto create_objs_err; + + goto end; +create_objs_err: + if (dev->rcms_info->fpt.fpte != NULL) { + kfree(dev->rcms_info->fpt.fpte); + dev->rcms_info->fpt.fpte = NULL; + } +end: + return ret; +} + +int sxe2_rcms_setup(struct sxe2_rdma_device *rdma_dev) +{ + int ret = SXE2_OK; + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + + DRV_RDMA_LOG_DEV_DEBUG("RCMS: setup rcms start\n"); + ret = drv_rdma_debug_rcms_add(rdma_dev, rdma_func->ctx_dev.rcms_info); + if (ret != SXE2_OK) + goto end; + + if (rdma_func->ftype == SXE2_RDMA_PF) + ret = sxe2_rcms_setup_pf(rdma_func); + else + ret = sxe2_rcms_setup_vf(rdma_func); + + if (ret != SXE2_OK) + goto end; + + DRV_RDMA_LOG_DEV_DEBUG("RCMS: setup rcms finish\n"); +end: + return ret; +} + +void sxe2_rcms_exit(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_ctx_dev *dev = &rdma_func->ctx_dev; + + DRV_RDMA_LOG_DEV_DEBUG("RCMS:exit rcms start privileged=%u\n", + dev->privileged); + + sxe2_rcms_del_objs(dev, dev->rcms_info, dev->privileged, + rdma_func->reset, true); + if (dev->rcms_info->fpt.fpte != NULL) { + kfree(dev->rcms_info->fpt.fpte); + dev->rcms_info->fpt.fpte = NULL; + } + DRV_RDMA_LOG_DEV_DEBUG("RCMS:exit rcms finish privileged=%u\n", + dev->privileged); +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.h new file mode 100644 index 0000000000000000000000000000000000000000..b9dae733a85f369b3ed635dc75cc7a9623834548 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_rcms.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_rcms.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_common.h" +#ifndef SXE2_RCMS_H +#define SXE2_RCMS_H + +#define SXE2_OK 0 +#define SXE2_FPTE_RCMS_2MB_MASK 0x1FFFFF +#define SXE2_MAX_FPTE_CNT 0x1000 +#define SXE2_RCMS_MAX_CP_COUNT 512 +#define SXE2_MAX_FPT_ENTRIES 35 +#define SXE2_FIRST_VF_FPM_ID 8 +#define SXE2_MIN_PBLE_PAGES 3 +#define SXE2_RCMS_FPM_MULTIPLIER 1024 +#define SXE2_RCMS_FPTE_SIZE 0x200000 +#define SXE2_RCMS_FIRST_CP_SIZE 0x200000 +#define SXE2_RCMS_SECOND_CP_SIEZ 0X1000 +#define SXE2_RCMS_SPT_PAGE_SIZE 0x1000 +#define SXE2_RCMS_FPT_SPT_BUF_ALIGNMENT 4096 +#define SXE2_RCMS_SPTE_INVALID_VAL 0xFF +#define SXE2_RCMS_UPDATE_FPTE_BUF_SIZE 512 +#define SXE2_RDMA_PF 0 +#define SXE2_RDMA_VF 1 +#define SXE2_RCMS_SPT_INVALID_VAL 0xFF +#define SXE2_RCMS_LINER_ADDR_BASE 0x0 +#define SXE2_UPDATE_FPTE_WQE_INCLUDE_ENTRY_CNT 3 +#define SXE2_RCMS_SPT_INVALID_MASK_REG_VAL 0x3FFFFF +#define SXE2_RCMS_SPT_INVALID_REG_PULL 200 +#define SXE2_RCMS_DEL_OBJ_START_IDX 0 +#define SXE2_RCMS_QUERY_BUF_PA_SHIFT 2 +#define SXE2_RCMS_PARSE_QUERY_BUF_SHIFT 4 +#define SXE2_RCMS_MAX_FPTE_CNT_BUF_OFFSET 0x50 +#define SXE2_RCMS_FIRST_FPTE_IDX_BUF_OFFSET 0x54 +#define SXE2_RCMS_CEQS_DB_BUF_OFFSET 0x58 +#define SXE2_RCMS_DB_BAR_ADDR_BUF_OFFSET 0x5C +#define SXE2_RCMS_MAX_FPTE_CNT_MASK GENMASK(13, 0) +#define SXE2_RCMS_MAX_CC_QP_CNT_MASK GENMASK(26, 14) +#define SXE2_RCMS_FIRST_FPTE_IDX_MASK GENMASK(12, 0) +#define SXE2_RCMS_IRRL_OST_NUM_MASK GENMASK(16, 13) +#define SXE2_RCMS_SSNT_OST_NUM_MASK GENMASK(20, 17) +#define SXE2_RCMS_RESP_OST_NUM_MASK GENMASK(24, 21) +#define SXE2_RCMS_MAX_CEQS_MASK GENMASK(9, 0) +#define SXE2_RCMS_MAX_DB_PAGE_NUM_MASK GENMASK(31, 16) +#define SXE2_RCMS_OBJ_BASE_MASK GENMASK(30, 0) +#define SXE2_RCMS_PRINT_HEX_ROW_SIZE 16 +#define SXE2_RCMS_PRINT_HEX_GROUP_SIZE 8 +#define SXE2_RCMS_PRINT_HEX_WQE_TO_BIT 8 +#define SXE2_RCMS_FPTE_BUF_ENTRIES_SIZE_SHIFT 4 +#define SXE2_RCMS_OBJ_LINER_BASE_SHIFT 3 +#define SXE2_RCMS_COMMIT_BUF_SHIFT 4 +#define SXE2_RCMS_MQ_CMD_NO_SCRATCH 0 +#define SXE2_RCMS_FPTE_BUF_SHIFT 7 +#define SXE2_RCMS_MQ_WQE_LEN 8 +#define SXE2_RCMS_QUERY_BUF_LEN 24 +#define SXE2_RCMS_AMO_PAGE_SIZE 12 + +#define FPT_INDEX_GET(a) ((a >> 21)) +#define SPT_INDEX_GET(a) ((a >> 12)) +#define REL_SPTE_INDEX_GET(a) ((a & 0x1FF)) +#define SPT_IDX_TO_FPT_IDX(a) ((a >> 9)) +#define FIST_PAGE_TABLE_CP_OFFSET_GET(a) (a & 0x1FFFFF) +#define SECOND_PAGE_TABLE_CP_OFFSET_GET(a) (a & 0xFFF) +#define LINER_ADDR_TO_REL_SPT_IDX(a) ((a >> 12) & 0x1FF) + +#define SXE2_RCMS_FPT_CMD_WR BIT(31) +#define SXE2_RCMS_FPT_DATALOW_VALID BIT(0) +#define SXE2_RCMS_FPT_DATALOW_TYPE BIT(1) +#define SXE2_RCMS_FPT_DATALOW_CP_COUNT GENMASK(11, 2) +#define SXE2_RCMS_FPT_CMD_PARTSEL BIT(15) + +#define SPT_CACHE_INVALID_MASK_S 0 +#define SPT_CACHE_INVALID_MASK_M GENMASK(21, 0) + +#define SPT_CACHE_INVALID_IDX_SPT_IDX_S 0 +#define SPT_CACHE_INVALID_IDX_SPT_IDX_M GENMASK(21, 0) + +#define SPT_CACHE_INVALID_IDX_EN_S 24 +#define SPT_CACHE_INVALID_IDX_EN_M BIT(24) + +#define SPT_CACHE_INVALID_IDX_DONE_S 25 +#define SPT_CACHE_INVALID_IDX_DONE_M BIT(25) + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) +extern u32 g_inject_fpte_err; +#endif + +struct sxe2_rcms_rsrc_limits { + u32 qp_limit; + u32 mr_limit; + u32 cq_limit; +}; + +struct sxe2_rcms_create_obj_info { + struct sxe2_rcms_info *rcms_info; + u32 rsrc_type; + u32 add_fpte_cnt; + u32 start_idx; + u32 obj_cnt; + enum sxe2_rcms_fpt_entry_type entry_type; + enum sxe2_rcms_creat_table_mode init_mode; + bool privileged; +}; +struct sxe2_rcms_del_obj_info { + struct sxe2_rcms_info *rcms_info; + u32 rsrc_type; + u32 del_fpte_cnt; + u32 start_idx; + u32 obj_cnt; + bool privileged; +}; + +int sxe2_rcms_create_obj(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info); + +int sxe2_rcms_del_obj(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_del_obj_info *info, bool reset, + bool mq_update); + +int sxe2_rcms_pf_config_vf_fpm_val(struct sxe2_rdma_vchnl_dev *vc_dev); + +int sxe2_rcms_commit_fpm_val_cmd(struct sxe2_mq_ctx *mq, u64 scratch, + u16 rcms_fn_id, + struct sxe2_rdma_dma_mem *commit_fpm_mem, + bool post_sq, u8 wait_type); + +int sxe2_rcms_query_fpm_val_cmd(struct sxe2_mq_ctx *mq, u64 scratch, + u16 rcms_fn_id, + struct sxe2_rdma_dma_mem *query_fpm_mem, + bool post_sq, u8 wait_type); + +int sxe2_rcms_add_fpt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, u32 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type); + +int sxe2_rcms_add_spt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, u32 spte_index); + +int sxe2_rcms_remove_spt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u32 spte_index); +void sxe2_rcms_remove_fpt_entry(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u32 fpte_index, + enum sxe2_rcms_fpt_entry_type fpte_type); + +int sxe2_rcms_update_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u16 *fpte_index_addr, u32 fpte_cnt, bool seted); + +int sxe2_rcms_update_pe_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info *info, + u64 scratch); + +int sxe2_rcms_update_fptes_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info *info); + +int sxe2_rcms_update_fptes_cmd_complete( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_update_fptes_info *info); +int sxe2_rcms_vf_update_fptes(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u16 *fpte_index_addr, u32 fpte_cnt, bool seted); + +int sxe2_rcms_setup(struct sxe2_rdma_device *rdma_dev); + +void sxe2_rcms_del_objs(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, bool privileged, + bool reset, bool mq_update); + +void sxe2_rcms_exit(struct sxe2_rdma_device *rdma_dev); + +int sxe2_rcms_pf_config_fpm_val(struct sxe2_rdma_ctx_dev *dev, + u32 qp_limit_count); + +void sxe2_rcms_get_obj_spte_range(struct sxe2_rcms_info *rcms_info, + u32 obj_type, u32 start_idx, u32 obj_cnt, + u32 *spte_idx, u32 *spte_limit); +int sxe2_rcms_invalidate_spt_cache(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 spte_idx); +int sxe2_rcms_update_fpte_wqe_fill(struct sxe2_mq_ctx *mq, + struct sxe2_rcms_update_fptes_info *info, + u64 scratch); + +int sxe2_rcms_build_first_type_table(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info); + +int sxe2_rcms_build_second_type_table( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_create_obj_info *obj_info); + +void sxe2_rcms_get_obj_fpte_range(struct sxe2_rcms_info *rcms_info, + u32 obj_type, u32 start_idx, u32 obj_cnt, + u32 *fpte_idx, u32 *fpte_limit); +int sxe2_rcms_modify_fpm_val(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_info *rcms_info, + u32 qp_limit_count); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.c new file mode 100644 index 0000000000000000000000000000000000000000..cef14366db140fb6ed498ad89b80ab08d241992e --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.c @@ -0,0 +1,1773 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_virtchnl.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_main.h" +#include "sxe2_drv_rdma_qos.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_mbx_public.h" +#include + +struct sxe2_rdma_vchnl_dev * +sxe2_vchnl_find_vc_dev(struct sxe2_rdma_ctx_dev *dev, u16 vf_id) +{ + struct sxe2_rdma_vchnl_dev *vc_dev = NULL; + unsigned long flags = 0; + u16 vf_idx; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + spin_lock_irqsave(&dev->vc_dev_lock, flags); + for (vf_idx = 0; vf_idx < dev->num_vfs; vf_idx++) { + if (dev->vc_dev[vf_idx] && + dev->vc_dev[vf_idx]->vf_id == vf_id) { + vc_dev = dev->vc_dev[vf_idx]; + refcount_inc(&vc_dev->refcnt); + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl:vf id %u find vc dev vf idx %u\n", vf_id, + vf_idx); + break; + } + } + spin_unlock_irqrestore(&dev->vc_dev_lock, flags); + + return vc_dev; +} + +static void sxe2_vchnl_remove_vc_dev(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev *vc_dev) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&dev->vc_dev_lock, flags); + dev->vc_dev[vc_dev->vf_idx] = NULL; + spin_unlock_irqrestore(&dev->vc_dev_lock, flags); +} + +int sxe2_vchnl_send_pf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct aux_core_dev_info *cdev_info = + (struct aux_core_dev_info *)(to_rdmafunc(dev)->cdev); + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->vc_send(cdev_info, vf_id, msg, len, + session_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:vf id %u vc send err ret=%d\n", vf_id, + ret); + } + } + return ret; +} + +static void sxe2_vchnl_pf_send_resp(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, + struct sxe2_vchnl_op_buf *vchnl_msg, + void *param, u16 param_len, int resp_code, + u64 session_id) +{ + int ret = SXE2_OK; + u8 *resp_buf = NULL; + struct sxe2_vchnl_resp_buf *vchnl_msg_resp; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + resp_buf = kzalloc(SXE2_VCHNL_MAX_MSG_SIZE, GFP_KERNEL); + if (!resp_buf) { + DRV_RDMA_LOG_DEV_ERR("vchnl:resp buf alloc err\n"); + goto end; + } + vchnl_msg_resp = (struct sxe2_vchnl_resp_buf *)resp_buf; + vchnl_msg_resp->op_ctx = vchnl_msg->op_ctx; + vchnl_msg_resp->buf_len = + sizeof(struct sxe2_vchnl_resp_buf) + param_len; + vchnl_msg_resp->op_ret = (s16)resp_code; + + if (param_len) + memcpy(vchnl_msg_resp->buf, param, param_len); + + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl:pf send resp vf id=%u resp code=%u session id=%llu\n" + "resp msg op ctx=%#llx op ret=%u buf len=%u\n", + vf_id, resp_code, session_id, vchnl_msg_resp->op_ctx, + vchnl_msg_resp->op_ret, vchnl_msg_resp->buf_len); + ret = sxe2_vchnl_send_pf(dev, vf_id, resp_buf, vchnl_msg_resp->buf_len, + session_id); + + kfree(resp_buf); +end: + return; +} + +static int sxe2_vchnl_alloc_vchnl_req_msg(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_req *vchnl_req, + struct sxe2_vchnl_req_init_info *info) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_op_buf *vchnl_msg; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + vchnl_msg = kzalloc(SXE2_VCHNL_MAX_MSG_SIZE, GFP_KERNEL); + if (!vchnl_msg) { + DRV_RDMA_LOG_DEV_ERR("vchnl:alloc vchnl msg mem err\n"); + ret = -ENOMEM; + goto end; + } + vchnl_msg->op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len; + if (info->req_parm_len) + memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len); + + vchnl_msg->op_code = info->op_code; + vchnl_msg->op_ver = info->op_ver; + + vchnl_req->vchnl_msg = vchnl_msg; + vchnl_req->parm = info->resp_parm; + vchnl_req->parm_len = info->resp_parm_len; +end: + return ret; +} + +static void sxe2_vchnl_free_vchnl_req_msg(struct sxe2_vchnl_req *vchnl_req) +{ + kfree(vchnl_req->vchnl_msg); +} + +int sxe2_vchnl_send_sync(struct sxe2_rdma_ctx_dev *dev, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len) +{ + int ret = SXE2_OK; + struct aux_core_dev_info *cdev_info = + (struct aux_core_dev_info *)(to_rdmafunc(dev)->cdev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->vc_send_sync(cdev_info, msg, len, + recv_msg, recv_len); + if (ret == -ETIMEDOUT) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:virtual channel completion timeout ret=%d\n", + ret); + dev->vchnl_up = false; + } + } + return ret; +} + +static int sxe2_vchnl_req_verify_resp(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_req *vchnl_req, + u16 resp_len) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + switch (vchnl_req->vchnl_msg->op_code) { + case SXE2_VCHNL_OP_GET_VER: + case SXE2_VCHNL_OP_GET_RCMS_FCN: + case SXE2_VCHNL_OP_PUT_RCMS_FCN: + case SXE2_VCHNL_OP_INIT_VF_RCMS: + case SXE2_VCHNL_OP_VLAN_PARSING: + case SXE2_VCHNL_OP_GATHER_STATS: + case SXE2_VCHNL_OP_MANAGE_QSET_NODE: + case SXE2_VCHNL_OP_PBL_SET_FPTE: + case SXE2_VCHNL_OP_PBL_CLEAR_FPTE: + case SXE2_VCHNL_OP_GET_VF_OBJ_INFO: + case SXE2_VCHNL_OP_UPDATE_FPTE: + case SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED: + if (resp_len != vchnl_req->parm_len) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:op code %u resp len err resp len=%u parm len=%u\n", + vchnl_req->vchnl_msg->op_code, resp_len, + vchnl_req->parm_len); + ret = -EBADMSG; + } + break; + case SXE2_VCHNL_OP_GET_RDMA_CAPS: + if (resp_len < SXE2_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:op code %u resp len err resp len=%u min caps size=%u\n", + vchnl_req->vchnl_msg->op_code, resp_len, + SXE2_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE); + ret = -EBADMSG; + } + break; + default: + ret = -EBADMSG; + } + + return ret; +} + +static int sxe2_vchnl_req_get_resp(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_req *vchnl_req) +{ + int ret = SXE2_OK; + u16 resp_len; + struct sxe2_vchnl_resp_buf *vchnl_msg_resp = + (struct sxe2_vchnl_resp_buf *)dev->vc_recv_buf; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: vchnl context value does not match req addr=%lx resp ctx=%lx\n", + (uintptr_t)vchnl_req, + (uintptr_t)vchnl_msg_resp->op_ctx); + ret = -EBADMSG; + goto end; + } + resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp); + resp_len = min(resp_len, vchnl_req->parm_len); + + ret = sxe2_vchnl_req_verify_resp(dev, vchnl_req, resp_len); + if (ret != SXE2_OK) { + ret = -EBADMSG; + goto end; + } + ret = (int)vchnl_msg_resp->op_ret; + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("vchnl: resp msg op ret err ret=%d\n", + ret); + goto end; + } + vchnl_req->resp_len = 0; + if (vchnl_req->parm_len && vchnl_req->parm && resp_len) { + memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len); + vchnl_req->resp_len = resp_len; + DRV_RDMA_LOG_DEBUG_BDF("vchnl: resp data size=%u\n", resp_len); + } + +end: + return ret; +} + +int sxe2_vchnl_req_send_sync(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_req_init_info *info) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req vchnl_req = {}; + u16 resp_len = sizeof(dev->vc_recv_buf); + u16 msg_len; + u8 *msg; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + ret = sxe2_vchnl_alloc_vchnl_req_msg(dev, &vchnl_req, info); + if (ret != SXE2_OK) + goto end; + + msg_len = vchnl_req.vchnl_msg->buf_len; + msg = (u8 *)vchnl_req.vchnl_msg; + mutex_lock(&dev->vchnl_mutex); + ret = sxe2_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf, + resp_len); + if (ret != SXE2_OK) + goto free_req_msg; + + ret = sxe2_vchnl_req_get_resp(dev, &vchnl_req); + +free_req_msg: + mutex_unlock(&dev->vchnl_mutex); + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl: virtual channel send sync ret=%d\n" + "\top=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n", + ret, vchnl_req.vchnl_msg->op_code, vchnl_req.vchnl_msg->op_ver, + vchnl_req.vchnl_msg->buf_len, vchnl_req.parm_len, + vchnl_req.resp_len); + sxe2_vchnl_free_vchnl_req_msg(&vchnl_req); +end: + return ret; +} + +static bool sxe2_vchnl_pf_verify_msg(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_op_buf *vchnl_msg, + u16 len) +{ + bool ret = true; + u16 op_code = vchnl_msg->op_code; + u16 op_size; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl:msg op code=%u op ver=%u buf len=%u op ctx=%#llx\n", + vchnl_msg->op_code, vchnl_msg->op_ver, vchnl_msg->buf_len, + vchnl_msg->op_ctx); + + if (len > SXE2_VCHNL_MAX_MSG_SIZE) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len is out of max msg size msg len=%u max len=%u\n", + op_code, len, SXE2_VCHNL_MAX_MSG_SIZE); + ret = false; + goto end; + } + if (len < sizeof(*vchnl_msg)) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len is less than min size len = %u min len = %zu\n", + op_code, len, sizeof(*vchnl_msg)); + ret = false; + goto end; + } + switch (op_code) { + case SXE2_VCHNL_OP_PBL_SET_FPTE: + op_size = sizeof(struct sxe2_vchnl_pbl_set_fpte_info); + if (len < sizeof(*vchnl_msg) + op_size) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len err len=%u op size=%u\n", + op_code, len, op_size); + ret = false; + goto end; + } + break; + case SXE2_VCHNL_OP_PBL_CLEAR_FPTE: + op_size = sizeof(struct sxe2_vchnl_pbl_clear_fpte_info); + if (len < sizeof(*vchnl_msg) + op_size) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len err len=%u op size=%u\n", + op_code, len, op_size); + ret = false; + goto end; + } + break; + case SXE2_VCHNL_OP_UPDATE_FPTE: + op_size = sizeof(struct sxe2_rcms_vf_update_fptes_info); + if (len < sizeof(*vchnl_msg) + op_size) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len err len=%u op size=%u\n", + op_code, len, op_size); + ret = false; + goto end; + } + break; + case SXE2_VCHNL_OP_GET_RCMS_FCN: + case SXE2_VCHNL_OP_GET_VER: + case SXE2_VCHNL_OP_PUT_RCMS_FCN: + case SXE2_VCHNL_OP_VLAN_PARSING: + case SXE2_VCHNL_OP_GET_RDMA_CAPS: + case SXE2_VCHNL_OP_INIT_VF_RCMS: + case SXE2_VCHNL_OP_GATHER_STATS: + case SXE2_VCHNL_OP_MANAGE_QSET_NODE: + case SXE2_VCHNL_OP_GET_VF_OBJ_INFO: + case SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED: + if (len < sizeof(*vchnl_msg)) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: op %u msg len err len=%u op size=%zu\n", + op_code, len, sizeof(*vchnl_msg)); + ret = false; + goto end; + } + break; + default: + ret = false; + } + +end: + return ret; +} + +void sxe2_vchnl_put_vf_dev(struct sxe2_rdma_vchnl_dev **vc_dev) +{ + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_dev *dev; + + if (*vc_dev == NULL) { + DRV_RDMA_LOG_DEBUG("vc_dev is NULL\n"); + return; + } + + dev = (*vc_dev)->pf_dev; + rdma_dev = to_rdmadev(dev); + + if (refcount_read(&(*vc_dev)->refcnt) == 0) + return; + + if (refcount_dec_and_test(&(*vc_dev)->refcnt)) { + kfree(*vc_dev); + *vc_dev = NULL; + DRV_RDMA_LOG_DEV_DEBUG("vchnl: put vf dev success\n"); + } +} + +static void sxe2_vchnl_negotiate_vchnl_rev(struct aux_ver_info *vchnl_ver, + struct sxe2_rdma_ctx_dev *dev) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + vchnl_ver->major = rdma_dev->fw_ver.major; + vchnl_ver->minor = rdma_dev->fw_ver.minor; +} + +static u16 sxe2_vchnl_get_next_vf_idx(struct sxe2_rdma_ctx_dev *dev) +{ + u16 vf_idx; + + for (vf_idx = 0; vf_idx < dev->num_vfs; vf_idx++) { + if (!dev->vc_dev[vf_idx]) + break; + } + + return vf_idx < dev->num_vfs ? vf_idx : SXE2_VCHNL_INVALID_VF_IDX; +} + +static void sxe2_vchnl_init_vsi_ctx(struct sxe2_rdma_ctx_vsi *vsi, + struct sxe2_vsi_init_info *info) +{ + vsi->dev = info->dev; + vsi->back_vsi = info->back_vsi; + vsi->register_qsets = info->register_qset; + vsi->unregister_qsets = info->unregister_qset; + vsi->mtu = info->params->mtu; + vsi->exception_lan_q = info->exception_lan_q; + vsi->vsi_idx = info->pf_data_vsi_num; + vsi->vm_vf_type = info->vm_vf_type; + sxe2_rdma_set_qos_info(vsi, info->params); +} + +static struct sxe2_rdma_ctx_vsi * +sxe2_vchnl_update_vsi_ctx(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev *vc_dev, bool enable) +{ + struct sxe2_vsi_init_info vsi_info = {}; + struct sxe2_rdma_l2params l2params[QOS_MAX_QSET_NUM_PER_USER_PRI] = {0}; + struct aux_vf_port_info port_info = {}; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_ctx_vsi *vf_vsi; + struct aux_core_dev_info *cdev_info = rdma_func->cdev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + vf_vsi = vc_dev->vf_vsi; + if (!vf_vsi && enable) { + if (!rdma_func->reset && + cdev_info->ops->get_vf_info(cdev_info, vc_dev->vf_id, + &port_info)) { + DRV_RDMA_LOG_DEV_ERR("vchnl: get vf info err\n"); + return NULL; + } + vf_vsi = kzalloc(sizeof(*vf_vsi), GFP_KERNEL); + if (!vf_vsi) { + DRV_RDMA_LOG_DEV_ERR("vchnl: alloc vf vsi mem err\n"); + return NULL; + } + vc_dev->port_vlan_en = port_info.port_vlan_id ? true : false; + l2params[0].up2tc[0] = 0; + l2params[0].mtu = rdma_dev->vsi.mtu; + l2params[0].num_tc = 1; + + vsi_info.vm_vf_type = SXE2_PF_TYPE; + vsi_info.dev = dev; + vsi_info.back_vsi = rdma_dev; + vsi_info.params = l2params; + vsi_info.pf_data_vsi_num = port_info.vport_id; + vsi_info.register_qset = rdma_func->gen_ops.register_qsets; + vsi_info.unregister_qset = rdma_func->gen_ops.unregister_qsets; + sxe2_vchnl_init_vsi_ctx(vf_vsi, &vsi_info); + } + if (!vf_vsi) + return NULL; + + if (!enable) { + kfree(vf_vsi); + vf_vsi = NULL; + vc_dev->vf_vsi = NULL; + } + + return vf_vsi; +} + +static void +sxe2_vchnl_set_rcms_fcn_info(struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_rcms_fcn_info *rcms_fcn_info, + bool free_fcn) +{ + u16 abs_vf_id; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + memset(rcms_fcn_info, 0, sizeof(*rcms_fcn_info)); + abs_vf_id = vc_dev->vf_id + rdma_dev->rdma_func->vfid_base; + rcms_fcn_info->vf_id = abs_vf_id; + rcms_fcn_info->protocol_used = vc_dev->protocol_used; + DRV_RDMA_LOG_DEBUG_BDF("vchnl: rel vf id %u to abs vf id %u\n", + vc_dev->vf_id, rcms_fcn_info->vf_id); +} + +int sxe2_vchnl_manage_rcms_pm_func_table(struct sxe2_mq_ctx *mq, + struct sxe2_rcms_fcn_info *info, + u64 scratch, bool post_sq) +{ + int ret = SXE2_OK; + __le64 *wqe; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + struct sxe2_vchnl_manage_rcms_func_table_wqe *manage_func_table_wqe; + + wqe = sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: mq manage rcms pm func table get wqe err\n"); + ret = -ENOMEM; + goto end; + } + manage_func_table_wqe = + (struct sxe2_vchnl_manage_rcms_func_table_wqe *)wqe; + manage_func_table_wqe->vf_id = info->vf_id; + manage_func_table_wqe->op = SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE; + manage_func_table_wqe->free_func_table = info->free_fcn; + dma_wmb(); + manage_func_table_wqe->wqe_valid = mq->polarity; + + print_hex_dump_debug("wqe: manage rcms pm func table wqe", + DUMP_PREFIX_OFFSET, 16, 8, wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); + if (post_sq) + sxe2_kpost_mq(mq); +end: + return ret; +} + +static int +sxe2_vchnl_mq_manage_rcms_fcn_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_fcn_info *rcms_fcn_info, + u16 *pmf_idx) +{ + int ret = SXE2_OK; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_vchnl_mq_compl_func_tab_ret ret_val; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + mq_info = &mq_request->info; + memcpy(&mq_info->in.u.manage_rcms_pm.info, rcms_fcn_info, + sizeof(mq_info->in.u.manage_rcms_pm.info)); + mq_info->in.u.manage_rcms_pm.dev = dev; + mq_info->mq_cmd = MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE; + mq_info->post_mq = 1; + mq_info->in.u.manage_rcms_pm.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + ret_val.val = mq_request->cmpl_info.op_ret_val; + if (!ret_val.valid) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:mange rcms fcn get invalid relative func id\n"); + ret = -EINVAL; + } else { + *pmf_idx = ret_val.rel_fid; + } + + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret; +} + +static struct sxe2_rdma_vchnl_dev * +sxe2_vchnl_pf_get_vf_rcms_fcn(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, + enum sxe2_protocol_used protocol_used) +{ + int ret = SXE2_OK; + struct sxe2_rcms_fcn_info rcms_fcn_info; + struct sxe2_rdma_virt_mem virt_mem; + struct sxe2_rdma_vchnl_dev *vc_dev; + struct sxe2_rdma_ctx_vsi *vsi; + u16 vf_idx = 0; + u32 size; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + vf_idx = sxe2_vchnl_get_next_vf_idx(dev); + if (vf_idx == SXE2_VCHNL_INVALID_VF_IDX) { + DRV_RDMA_LOG_ERROR_BDF("vchnl: get vf idx err\n"); + return NULL; + } + size = sizeof(*vc_dev) + + sizeof(struct sxe2_rcms_obj_info) * SXE2_RCMS_OBJ_MAX; + virt_mem.size = size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: vf %u alloc vf dev and rcms obj info err\n", + vf_id); + return NULL; + } + vc_dev = virt_mem.va; + vc_dev->pf_dev = dev; + vc_dev->vf_id = vf_id; + vc_dev->vf_idx = vf_idx; + vc_dev->protocol_used = protocol_used; + vc_dev->pf_rcms_initialized = false; + vc_dev->rcms_info.rcms_obj = (struct sxe2_rcms_obj_info *)(&vc_dev[1]); + + vsi = sxe2_vchnl_update_vsi_ctx(dev, vc_dev, true); + if (!vsi) + goto free_vc_dev; + + refcount_set(&vc_dev->refcnt, 1); + dev->vc_dev[vf_idx] = vc_dev; + vc_dev->vf_vsi = vsi; + vsi->vf_id = (u16)vc_dev->vf_id; + vsi->vc_dev = vc_dev; + + sxe2_vchnl_set_rcms_fcn_info(vc_dev, &rcms_fcn_info, false); + ret = sxe2_vchnl_mq_manage_rcms_fcn_cmd(dev, &rcms_fcn_info, + &vc_dev->pmf_index); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl: vf %u error mq get rcms function operation\n", + vf_id); + goto disable_vport; + } + + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl: vf %u mq manage rcms fcn Function pmf idx = %u\n", + vf_id, vc_dev->pmf_index); + + refcount_inc(&vc_dev->refcnt); + return vc_dev; + +disable_vport: + sxe2_vchnl_update_vsi_ctx(dev, vc_dev, false); +free_vc_dev: + dev->vc_dev[vc_dev->vf_idx] = NULL; + kfree(virt_mem.va); + return NULL; +} + +static enum sxe2_protocol_used +sxe2_vchnl_get_protocol_used(struct sxe2_vchnl_op_buf *vchnl_msg) +{ + return SXE2_ROCE_PROTOCOL_ONLY; +} + +void sxe2_vchnl_pf_put_vf_rcms_fcn(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev **vc_dev) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + DRV_RDMA_LOG_DEBUG_BDF("vchnl: vf %u put rcms fcn start\n", + (*vc_dev)->vf_id); + sxe2_vchnl_remove_vc_dev(dev, *vc_dev); + sxe2_vchnl_update_vsi_ctx(dev, *vc_dev, false); + sxe2_vchnl_put_vf_dev(vc_dev); +} + +static int sxe2_vchnl_pf_config_vf_rcms(struct sxe2_rdma_vchnl_dev *vc_dev) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + u8 vf_pmf_idx = (u8)(vc_dev->pmf_index); + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (vf_pmf_idx < dev->hw_attrs.first_hw_vf_fpm_id || + (vf_pmf_idx >= dev->hw_attrs.first_hw_vf_fpm_id + dev->num_vfs)) { + DRV_RDMA_LOG_ERROR_BDF("vchnl: invalid vf pmf idx=%u\n", + vf_pmf_idx); + ret = -EINVAL; + goto end; + } + ret = sxe2_rcms_pf_config_vf_fpm_val(vc_dev); + +end: + return ret; +} + +static void sxe2_vchnl_update_vf_vlan_cfg(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev *vc_dev) +{ + int ret = SXE2_OK; + struct aux_core_dev_info *cdev_info = to_rdmafunc(dev)->cdev; + struct aux_vf_port_info port_info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!rdma_dev->rdma_func->reset) { + ret = cdev_info->ops->get_vf_info(cdev_info, vc_dev->vf_id, + &port_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_DEV_ERR("vchnl:cdev get vf info ret=%d\n", + ret); + return; + } + } + vc_dev->port_vlan_en = port_info.port_vlan_id ? true : false; +} + +static int +sxe2_vchnl_pf_init_vf_rcms(struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_vchnl_init_vf_rcms_resp *init_rcms_resp) +{ + int ret = SXE2_OK; + int i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + if (!vc_dev->pf_rcms_initialized) { + ret = sxe2_vchnl_pf_config_vf_rcms(vc_dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf init vf %u rcms ret=%d\n", + vc_dev->vf_id, ret); + goto end; + } + vc_dev->pf_rcms_initialized = true; + } + init_rcms_resp->first_fpte_index = vc_dev->rcms_info.first_fpte_index; + init_rcms_resp->max_fpte_index = vc_dev->rcms_info.max_fpte_index; + init_rcms_resp->max_fpte_cnt = vc_dev->rcms_info.max_fpte_cnt; + init_rcms_resp->fpte_needed = vc_dev->rcms_info.fpte_needed; + init_rcms_resp->max_ceqs = vc_dev->rcms_info.max_ceqs; + init_rcms_resp->max_db_page_num = vc_dev->rcms_info.max_db_page_num; + init_rcms_resp->db_bar_addr = vc_dev->rcms_info.db_bar_addr; + init_rcms_resp->max_cc_qp_cnt = vc_dev->rcms_info.max_cc_qp_cnt; + + for (i = 0; i < SXE2_RCMS_OBJ_MAX; i++) { + init_rcms_resp->obj_max_cnt[i] = + vc_dev->rcms_info.rcms_obj[i].cnt; + } + + init_rcms_resp->pmf_index = vc_dev->pmf_index; + init_rcms_resp->pf_max_ceqs = + rdma_dev->rdma_func->ctx_dev.rcms_info->max_ceqs; + +end: + return ret; +} + +static int +sxe2_vchnl_pf_get_vf_obj(struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_vchnl_vf_obj_resp *init_obj_resp) +{ + int ret = SXE2_OK; + int i; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + if (!vc_dev->pf_rcms_initialized) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf del vf %u obj err pf not initialized\n", + vc_dev->vf_id); + ret = -EINVAL; + goto end; + } + + for (i = 0; i < SXE2_RCMS_OBJ_MAX; i++) { + init_obj_resp->obj_info[i].size = + vc_dev->rcms_info.rcms_obj[i].size; + init_obj_resp->obj_info[i].base = + vc_dev->rcms_info.rcms_obj[i].base; + } + +end: + return ret; +} + +static int sxe2_vchnl_pf_gather_vf_stats( + struct sxe2_rdma_vchnl_dev *vc_dev, u32 stats_req_type, + struct sxe2_rdma_gather_stats_vf *gather_stats_resp) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u8 vf_pmf_idx = (u8)vc_dev->pmf_index; + + if (vf_pmf_idx < dev->hw_attrs.first_hw_vf_fpm_id || + (vf_pmf_idx >= dev->hw_attrs.first_hw_vf_fpm_id + dev->num_vfs)) { + DRV_RDMA_LOG_ERROR_BDF("vchnl: invalid vf pmf idx=%u\n", + vf_pmf_idx); + ret = -EINVAL; + goto end; + } + + ret = sxe2_kgather_pf_for_vf_stats_val(vc_dev, stats_req_type, + gather_stats_resp); + +end: + return ret; +} + +static int +sxe2_vchnl_pf_gather_stats(struct sxe2_rdma_vchnl_dev *vc_dev, + u32 stats_req_type, + struct sxe2_rdma_gather_stats_vf *gather_stats_resp) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + + ret = sxe2_vchnl_pf_gather_vf_stats(vc_dev, stats_req_type, + gather_stats_resp); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF("vchnl: pf gather stats ret=%d\n", ret); + goto end; + } + +end: + return ret; +} + +static int sxe2_vchnl_pf_manage_vf_qet_node( + struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_vchnl_manage_qet_node_info *qset_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vc_dev->pf_dev); + struct sxe2_rdma_ctx_vsi *vf_vsi = vc_dev->vf_vsi; + u32 user_pri = qset_info->user_pri; + + DRV_RDMA_LOG_DEBUG_BDF("vchnl: pf %s vf %u qset id=%u user pri=%u\n", + qset_info->add ? "add" : "del", vc_dev->vf_id, + qset_info->qset_id, user_pri); + if (qset_info->add) { + vf_vsi->qos[user_pri].qset[0].vsi_index = vf_vsi->vsi_idx; + vf_vsi->qos[user_pri].qset[0].qset_id = qset_info->qset_id; + + ret = vf_vsi->register_qsets( + vf_vsi, &vf_vsi->qos[user_pri].qset[0], NULL); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf manage vf %u qset err ret=%d\n", + vc_dev->vf_id, ret); + goto end; + } + + ret = sxe2_qos_qset_bind_pf_tc( + vf_vsi, vf_vsi->qos[user_pri].qset[0].qset_id, + vf_vsi->qos[user_pri].qset[0].traffic_class, false, + rdma_dev->rdma_func->pf_id); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:pf manage vf %u qset bind pf tc err ret=%d\n", + vc_dev->vf_id, ret); + goto qet_bind_tc_err; + } + vf_vsi->qos[user_pri].valid = true; + } else { + if (!vf_vsi->qos[user_pri].valid || + vf_vsi->qos[user_pri].qset[0].qset_id != + qset_info->qset_id) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:pf unregister qet user pri\n" + "\tqset not valid or qset id err=%d\n", + ret); + ret = -EINVAL; + goto end; + } + vf_vsi->unregister_qsets(vf_vsi, &vf_vsi->qos[user_pri].qset[0], + NULL); + vf_vsi->qos[user_pri].valid = false; + } + + goto end; + +qet_bind_tc_err: + vf_vsi->unregister_qsets(vf_vsi, &vf_vsi->qos[user_pri].qset[0], NULL); + +end: + return ret; +} + +static bool sxe2_vchnl_pf_verify_update_fpte_msg( + struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_rcms_vf_update_fptes_info *update_vf_fpte_info) +{ + bool verify_ret = true; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + struct sxe2_rcms_info *rcms_info = &vc_dev->rcms_info; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u64 fpte_pa; + u64 fpte_idx; + u32 i; + u8 valid; + + if (update_vf_fpte_info->cnt > SXE2_RCMS_VF_MAX_UPDATE_FPTE_ENTRIES) { + verify_ret = false; + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf update vf %u fpte cnt err cnt=%u max cnt=%u\n", + vc_dev->vf_id, update_vf_fpte_info->cnt, + SXE2_RCMS_VF_MAX_UPDATE_FPTE_ENTRIES); + goto end; + } + + for (i = 0; i < update_vf_fpte_info->cnt; i++) { + fpte_idx = update_vf_fpte_info->entry[i].cmd; + fpte_pa = + update_vf_fpte_info->entry[i].data & SXE2_FPTE_PA_MASK; + valid = (u8)(update_vf_fpte_info->entry[i].data & + SXE2_FPTE_PA_VALID_MASK); + if (fpte_idx < rcms_info->first_fpte_index || + fpte_idx > rcms_info->max_fpte_index) { + verify_ret = false; + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf update vf %u fpte idx err fpte idx=%u\n", + vc_dev->vf_id, fpte_idx); + goto end; + } + if ((update_vf_fpte_info->set && (!fpte_pa || !valid)) || + (!update_vf_fpte_info->set && valid)) { + verify_ret = false; + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf update vf %u fpte pa is NULL fpte idx=%u\n", + vc_dev->vf_id, fpte_idx); + goto end; + } + } + +end: + return verify_ret; +} + +static int sxe2_vchnl_pf_update_fpte( + struct sxe2_rdma_vchnl_dev *vc_dev, + struct sxe2_rcms_vf_update_fptes_info *update_vf_fpte_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + struct sxe2_rcms_info *rcms_info = &vc_dev->rcms_info; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rcms_update_fptes_info *update_fpte_info = NULL; + bool verify_msg; + u32 i; + + verify_msg = sxe2_vchnl_pf_verify_update_fpte_msg(vc_dev, + update_vf_fpte_info); + if (!verify_msg) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "vchnl: pf update vf %u fpte verify err\n", + vc_dev->vf_id); + goto end; + } + + DRV_RDMA_LOG_DEBUG_BDF("vchnl: pf update vf %u fpte cnt=%u set=%u\n", + vc_dev->vf_id, update_vf_fpte_info->cnt, + update_vf_fpte_info->set); + update_fpte_info = kzalloc( + sizeof(*update_fpte_info), GFP_KERNEL); + if (!update_fpte_info) { + DRV_RDMA_LOG_DEV_ERR("vchnl:update fpte info alloc err\n"); + ret = -ENOMEM; + goto end; + } + update_fpte_info->cnt = update_vf_fpte_info->cnt; + update_fpte_info->rcms_fn_id = rcms_info->rcms_fn_id; + for (i = 0; i < update_fpte_info->cnt; i++) { + update_fpte_info->entry[i].cmd = + update_vf_fpte_info->entry[i].cmd; + update_fpte_info->entry[i].data = + update_vf_fpte_info->entry[i].data; + DRV_RDMA_LOG_DEBUG_BDF("vchnl: vf fpte cmd=%llx data=%llx\n", + update_fpte_info->entry[i].cmd, + update_fpte_info->entry[i].data); + } + ret = dev->mq->process_mq_fpt(dev, update_fpte_info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: pf update vf %u fpte err ret=%d\n", + vc_dev->vf_id, ret); + } + kfree(update_fpte_info); +end: + return ret; +} + +static void sxe2_vchnl_opcode_process(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev *vc_dev, + u16 vf_id, + struct sxe2_vchnl_work *vchnl_work, + struct sxe2_vchnl_op_buf *vchnl_msg) +{ + void *param = vchnl_msg->buf; + void *resp_param = NULL; + u16 resp_len = 0; + int resp_code = 0; + u8 vlan_parse_en; + struct aux_ver_info vchnl_ver; + struct sxe2_vchnl_init_vf_rcms_resp init_rcms_resp; + struct sxe2_vchnl_vf_obj_resp vf_obj_resp; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_rdma_gather_stats_vf gather_stats_resp = {}; + u32 stats_req_type = 0; + struct sxe2_vchnl_pbl_set_fpte_info *set_fpte_info = NULL; + struct sxe2_vchnl_pbl_clear_fpte_info *clear_fpte_info = NULL; + struct sxe2_vchnl_rdma_caps caps = {}; + u32 port_active_speed = 0; + struct aux_core_dev_info *cdev_info = + (struct aux_core_dev_info *)(rdma_dev->rdma_func->cdev); + + switch (vchnl_msg->op_code) { + case SXE2_VCHNL_OP_GET_VER: + sxe2_vchnl_negotiate_vchnl_rev(&vchnl_ver, dev); + resp_param = &vchnl_ver; + resp_len = sizeof(vchnl_ver); + break; + case SXE2_VCHNL_OP_PUT_RCMS_FCN: + sxe2_vchnl_pf_put_vf_rcms_fcn(dev, &vc_dev); + break; + case SXE2_VCHNL_OP_INIT_VF_RCMS: + resp_code = sxe2_vchnl_pf_init_vf_rcms(vc_dev, &init_rcms_resp); + resp_param = &init_rcms_resp; + resp_len = sizeof(init_rcms_resp); + break; + case SXE2_VCHNL_OP_GET_VF_OBJ_INFO: + resp_code = sxe2_vchnl_pf_get_vf_obj(vc_dev, &vf_obj_resp); + resp_param = &vf_obj_resp; + resp_len = sizeof(vf_obj_resp); + break; + case SXE2_VCHNL_OP_VLAN_PARSING: + sxe2_vchnl_update_vf_vlan_cfg(dev, vc_dev); + vlan_parse_en = !vc_dev->port_vlan_en; + DRV_RDMA_LOG_DEBUG_BDF("vchnl:vf %u vlan_parse_en = 0x%x\n", + vf_id, vlan_parse_en); + resp_param = &vlan_parse_en; + resp_len = sizeof(vlan_parse_en); + break; + case SXE2_VCHNL_OP_GET_RDMA_CAPS: + caps.hw_rev = dev->hw_attrs.uk_attrs.hw_rev; + resp_len = sizeof(caps); + resp_param = ∩︀ + break; + case SXE2_VCHNL_OP_GATHER_STATS: + stats_req_type = *((u32 *)param); + resp_code = sxe2_vchnl_pf_gather_stats(vc_dev, stats_req_type, + &gather_stats_resp); + resp_param = &gather_stats_resp; + resp_len = sizeof(gather_stats_resp); + break; + case SXE2_VCHNL_OP_MANAGE_QSET_NODE: + resp_code = sxe2_vchnl_pf_manage_vf_qet_node(vc_dev, param); + break; + case SXE2_VCHNL_OP_PBL_SET_FPTE: + set_fpte_info = (struct sxe2_vchnl_pbl_set_fpte_info *)param; + resp_code = sxe2_pbl_set_fpte(vc_dev->pf_dev, + set_fpte_info->fpte_idx, + set_fpte_info->page_pa, + vc_dev->pmf_index); + break; + case SXE2_VCHNL_OP_PBL_CLEAR_FPTE: + clear_fpte_info = + (struct sxe2_vchnl_pbl_clear_fpte_info *)param; + resp_code = sxe2_pbl_clear_fpte(vc_dev->pf_dev, + clear_fpte_info->fpte_idx, + clear_fpte_info->pble_cnt, + vc_dev->pmf_index); + break; + case SXE2_VCHNL_OP_UPDATE_FPTE: + resp_code = sxe2_vchnl_pf_update_fpte( + vc_dev, (struct sxe2_rcms_vf_update_fptes_info *)param); + break; + case SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED: + port_active_speed = cdev_info->ops->rdma_get_link_speed(cdev_info); + resp_param = &port_active_speed; + resp_len = sizeof(port_active_speed); + break; + default: + DRV_RDMA_LOG_DEV_ERR("vchnl:vf %u invalid op code 0x%x\n", + vf_id, vchnl_msg->op_code); + resp_code = -EOPNOTSUPP; + } + + if ((vc_dev && vc_dev->reset_en) || rdma_dev->rdma_func->reset) + goto end; + + sxe2_vchnl_pf_send_resp(dev, vf_id, vchnl_msg, resp_param, resp_len, + resp_code, vchnl_work->session_id); +end: + return; +} + +static void sxe2_vchnl_recv_pf_worker(struct work_struct *work) +{ + struct sxe2_vchnl_work *vchnl_work = + container_of(work, struct sxe2_vchnl_work, work); + struct sxe2_vchnl_op_buf *vchnl_msg = + (struct sxe2_vchnl_op_buf *)&vchnl_work->vf_msg_buf; + u16 vf_id = vchnl_work->vf_id; + int resp_code = 0; + struct sxe2_rdma_ctx_dev *dev = vchnl_work->dev; + struct sxe2_rdma_vchnl_dev *vc_dev = NULL; + struct sxe2_rdma_virt_mem virt_mem; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + DRV_RDMA_LOG_DEBUG_BDF("vchnl:vf %u req opcode %u\n", vf_id, + vchnl_msg->op_code); + if (rdma_dev->rdma_func->reset) { + DRV_RDMA_LOG_DEV_DEBUG("vchnl:pf ready reset\n"); + goto free_work; + } + vc_dev = sxe2_vchnl_find_vc_dev(dev, vf_id); + if (vc_dev && vchnl_msg->op_code == SXE2_VCHNL_OP_GET_RCMS_FCN) { + sxe2_rdma_free_one_vf(vc_dev); + vc_dev = NULL; + DRV_RDMA_LOG_DEBUG_BDF("vchnl:vf %u get fcn but find vc dev\n", + vf_id); + } + if (vc_dev && vc_dev->reset_en) { + DRV_RDMA_LOG_DEBUG_BDF("vchnl:vf %u req opcode %u reset en\n", + vf_id, vchnl_msg->op_code); + goto free_work; + } + + if (vchnl_msg->op_code != SXE2_VCHNL_OP_GET_VER && + vchnl_msg->op_code != SXE2_VCHNL_OP_GET_RDMA_CAPS && + vchnl_msg->op_code != SXE2_VCHNL_OP_GET_RCMS_FCN) { + if (!vc_dev) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:vf %u req opcode %u vc dev is null\n", + vf_id, vchnl_msg->op_code); + goto free_work; + } + } + + if (vchnl_msg->op_code == SXE2_VCHNL_OP_GET_RCMS_FCN) { + if (!vc_dev) { + vc_dev = sxe2_vchnl_pf_get_vf_rcms_fcn( + dev, vf_id, + sxe2_vchnl_get_protocol_used(vchnl_msg)); + if (!vc_dev) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:vf %u req opcode %u pf get vf rcms fcn err\n", + vf_id, vchnl_msg->op_code); + resp_code = -ENODEV; + } + } + + if ((!vc_dev || !vc_dev->reset_en) && !rdma_dev->rdma_func->reset) { + sxe2_vchnl_pf_send_resp(dev, vf_id, vchnl_msg, NULL, 0, + resp_code, + vchnl_work->session_id); + } + goto free_work; + } + + sxe2_vchnl_opcode_process(dev, vc_dev, vf_id, vchnl_work, vchnl_msg); + +free_work: + if (vc_dev) + sxe2_vchnl_put_vf_dev(&vc_dev); + virt_mem.va = work; + kfree(virt_mem.va); +} + +int sxe2_vchnl_recv_pf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_work *work; + struct sxe2_rdma_virt_mem workmem; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_vchnl_op_buf *vchnl_msg; + u16 msg_len; + bool verify_msg; + + if (!msg) { + DRV_RDMA_LOG_DEV_ERR("vchnl:msg pointer is NULL err\n"); + ret = -EINVAL; + goto end; + } + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl:recv vf id=%u msg len=%u session id=%#llx\n", vf_id, + len, session_id); + if (len < SXE2VF_MBX_FULL_HDR_SIZE) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:vf %u vf_id mbx msg too short len=%u\n", vf_id, + len); + ret = -EINVAL; + goto end; + } + + vchnl_msg = + (struct sxe2_vchnl_op_buf *)(msg + SXE2VF_MBX_FULL_HDR_SIZE); + msg_len = len - SXE2VF_MBX_FULL_HDR_SIZE; + + verify_msg = sxe2_vchnl_pf_verify_msg(dev, vchnl_msg, msg_len); + if (!verify_msg) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:pf verify msg err\n"); + ret = -EINVAL; + goto end; + } + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:vchnl is not up\n"); + ret = -EBUSY; + goto end; + } + + workmem.size = sizeof(*work); + workmem.va = kzalloc(workmem.size, GFP_KERNEL); + if (!workmem.va) { + DRV_RDMA_LOG_DEV_ERR("vchnl:alloc work mem err\n"); + ret = -ENOMEM; + goto end; + } + work = workmem.va; + memcpy(&work->vf_msg_buf, vchnl_msg, msg_len); + work->dev = dev; + work->vf_id = vf_id; + work->len = msg_len; + work->session_id = session_id; + INIT_WORK(&work->work, sxe2_vchnl_recv_pf_worker); + queue_work(dev->vchnl_wq, &work->work); + +end: + return ret; +} + +int sxe2_vchnl_recv_vf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (len < sizeof(struct sxe2_vchnl_resp_buf)) { + DRV_RDMA_LOG_DEV_ERR("vchnl:alloc work mem err\n"); + ret = -EINVAL; + goto end; + } + if (len > SXE2_VCHNL_MAX_MSG_SIZE) + len = SXE2_VCHNL_MAX_MSG_SIZE; + + memcpy(dev->vc_recv_buf, msg, len); + dev->vc_recv_len = len; +end: + return ret; +} + +int sxe2_vchnl_receive(struct aux_core_dev_info *cdev_info, u32 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = + dev_get_drvdata(&cdev_info->adev->dev); + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + + if (!len || !msg) { + DRV_RDMA_LOG_DEV_ERR( + "vchnl:vchnl receive len or msg pointer err len=%u msg=%p\n", + len, msg); + ret = -EINVAL; + goto end; + } + DRV_RDMA_LOG_DEV_DEBUG( + "vchnl:vchnl receive vf id=%u msg len=%u session id=%#llx\n", + vf_id, len, session_id); + ret = dev->vchnl_if->vchnl_recv(dev, (u16)vf_id, msg, len, session_id); + if (ret != SXE2_OK) + DRV_RDMA_LOG_ERROR_BDF("vchnl:vchnl receive err ret=%d\n", ret); + +end: + return ret; +} + +int sxe2_vchnl_req_get_ver(struct sxe2_rdma_ctx_dev *dev, + struct aux_ver_info *ver_res) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_GET_VER; + info.op_ver = SXE2_VCHNL_CHNL_VER_V1; + info.resp_parm = (void *)ver_res; + info.resp_parm_len = sizeof(*ver_res); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:get ver req send sync err ret=%d\n", + ret); + goto end; + } + + if (ver_res->major != SXE2_FW_COMP_MAJOR_VER) { + DRV_RDMA_LOG_ERROR("vf: major version compare mismatch, fw vrsion:%d rdma %d\n", + ver_res->major, SXE2_FW_COMP_MAJOR_VER); + ret = -EOPNOTSUPP; + goto end; + } + + if (ver_res->minor != SXE2_FW_COMP_MINOR_VER) { + DRV_RDMA_LOG_WARN("vf: minor version compare mismatch, fw vrsion:%d rdma %d.\n", + ver_res->minor, SXE2_FW_COMP_MINOR_VER); + } + +end: + return ret; +} + +int sxe2_vchnl_req_get_rcms_fcn(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_GET_RCMS_FCN; + info.op_ver = SXE2_VCHNL_OP_GET_RCMS_FCN_V1; + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:get rcms fcn req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_put_rcms_fcn(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + info.op_code = SXE2_VCHNL_OP_PUT_RCMS_FCN; + info.op_ver = SXE2_VCHNL_OP_PUT_RCMS_FCN_V1; + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:put rcms fcn req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_get_vlan_parsing_cfg(struct sxe2_rdma_ctx_dev *dev, + u8 *vlan_parse_en) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + info.op_code = SXE2_VCHNL_OP_VLAN_PARSING; + info.op_ver = SXE2_VCHNL_OP_VLAN_PARSING_V1; + info.resp_parm = (void *)vlan_parse_en; + info.resp_parm_len = sizeof(*vlan_parse_en); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:get vlan parsing cfg req send sync err ret=%d\n", + ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_get_caps(struct sxe2_rdma_ctx_dev *dev) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + info.op_code = SXE2_VCHNL_OP_GET_RDMA_CAPS; + info.op_ver = SXE2_VCHNL_OP_GET_RDMA_CAPS_V1; + info.resp_parm = (void *)(&dev->vc_caps); + info.resp_parm_len = sizeof(dev->vc_caps); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:get caps req send sync err ret=%d\n", ret); + goto end; + } + + if (!dev->vc_caps.max_hw_push_len) + dev->vc_caps.max_hw_push_len = SXE2_RDMA_DEFAULT_MAX_PUSH_LEN; + + if (dev->vc_caps.hw_rev > SXE2_RDMA_GEN_MAX || + dev->vc_caps.hw_rev < SXE2_RDMA_GEN_1) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: get caps req unsupported hw_rev version %u\n", + dev->vc_caps.hw_rev); + ret = -EOPNOTSUPP; + } + +end: + return ret; +} + +int sxe2_vchnl_req_init_vf_rcms( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_init_vf_rcms_resp *init_vf_rcms_resp) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + info.op_code = SXE2_VCHNL_OP_INIT_VF_RCMS; + info.op_ver = SXE2_VCHNL_OP_INIT_VF_RCMS_V1; + info.resp_parm = (void *)init_vf_rcms_resp; + info.resp_parm_len = sizeof(*init_vf_rcms_resp); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:init vf rcms req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_get_vf_obj_info(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_vf_obj_resp *vf_obj_resp) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + info.op_code = SXE2_VCHNL_OP_GET_VF_OBJ_INFO; + info.op_ver = SXE2_VCHNL_OP_GET_VF_OBJ_INFO_V1; + info.resp_parm = (void *)vf_obj_resp; + info.resp_parm_len = sizeof(*vf_obj_resp); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:get vf obj req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_gather_stats(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_gather_stats *gather_stats_resp) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + struct sxe2_rdma_gather_stats_vf gather_stats_resp_tx = {}; + struct sxe2_rdma_gather_stats_vf gather_stats_resp_rx = {}; + u32 req_type = 0; +#ifdef SXE2_CFG_DEBUG + u32 i = 0; + const struct sxe2_rdma_hw_stat_map *map = dev->hw_stats_map; +#endif + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_GATHER_STATS; + info.op_ver = SXE2_VCHNL_OP_GATHER_STATS_V1; + req_type = SXE2_RDMA_STATS_VF_TX; + info.req_parm = (void *)(&req_type); + info.req_parm_len = sizeof(req_type); + info.resp_parm = (void *)(&gather_stats_resp_tx); + info.resp_parm_len = sizeof(gather_stats_resp_tx); + if (!rdma_dev->rdma_func->reset) { + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:gather stats req send sync err ret=%d\n", + ret); + goto end; + } + memcpy(gather_stats_resp, &gather_stats_resp_tx, + STATS_VF_TX_BUF_ALL_BYTE); +#ifdef SXE2_CFG_DEBUG + for (i = 0; i < SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS; i++) { + DRV_RDMA_LOG_DEBUG_BDF( + "vf i %u, bitoff %u, val %#llx\n", i, + map[i].byteoff, gather_stats_resp->val[i]); + } +#endif + } else { + ; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_GATHER_STATS; + info.op_ver = SXE2_VCHNL_OP_GATHER_STATS_V1; + req_type = SXE2_RDMA_STATS_VF_RX; + info.req_parm = (void *)(&req_type); + info.req_parm_len = sizeof(req_type); + info.resp_parm = (void *)(&gather_stats_resp_rx); + info.resp_parm_len = sizeof(gather_stats_resp_rx); + if (!rdma_dev->rdma_func->reset) { + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:gather stats req send sync err ret=%d\n", + ret); + goto end; + } + memcpy(&gather_stats_resp->val[STATS_VF_RX_BUF_START_8BYTE], + &gather_stats_resp_rx, STATS_VF_RX_BUF_ALL_BYTE); +#ifdef SXE2_CFG_DEBUG + for (i = SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS; + i < SXE2_RDMA_HW_STAT_INDEX_MAX; i++) { + DRV_RDMA_LOG_DEBUG_BDF( + "vf i %u, bitoff %u, val %#llx\n", i, + map[i].byteoff, gather_stats_resp->val[i]); + } +#endif + } + +end: + return ret; +} + +int sxe2_vchnl_req_manage_qet_node( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_manage_qet_node_info *qset_info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_vchnl_req_init_info info = {}; + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_MANAGE_QSET_NODE; + info.op_ver = SXE2_VCHNL_OP_MANAGE_QSET_NODE_V1; + info.req_parm = (void *)(qset_info); + info.req_parm_len = sizeof(struct sxe2_vchnl_manage_qet_node_info); + + DRV_RDMA_LOG_DEV_DEBUG("vchnl:req %s qet %u node\n", + qset_info->add ? "register" : "unregister", + qset_info->qset_id); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:manage qset node req send sync err ret=%d\n", + ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_set_pbl_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u64 page_pa) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_pbl_set_fpte_info set_fpte_info = {}; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + set_fpte_info.fpte_idx = fpte_idx; + set_fpte_info.page_pa = page_pa; + + info.op_code = SXE2_VCHNL_OP_PBL_SET_FPTE; + info.op_ver = SXE2_VCHNL_OP_PBL_SET_FPTE_V1; + info.req_parm = (void *)(&set_fpte_info); + info.req_parm_len = sizeof(set_fpte_info); + + DRV_RDMA_LOG_DEBUG_BDF( + "vchnl:set pbl fpte idx=%u page pa=0x%llx\n", fpte_idx, + page_pa); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:set pbl fpte req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_clear_pbl_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 pble_cnt) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_pbl_clear_fpte_info clear_fpte_info = {}; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + clear_fpte_info.fpte_idx = fpte_idx; + clear_fpte_info.pble_cnt = pble_cnt; + + info.op_code = SXE2_VCHNL_OP_PBL_CLEAR_FPTE; + info.op_ver = SXE2_VCHNL_OP_PBL_CLEAR_FPTE_V1; + info.req_parm = (void *)(&clear_fpte_info); + info.req_parm_len = sizeof(clear_fpte_info); + + DRV_RDMA_LOG_DEBUG_BDF("vchnl:clear pbl fpte idx=%u pble cnt=%u\n", + fpte_idx, pble_cnt); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:clear pbl fpte req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_update_fpte( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_vf_update_fptes_info *update_vf_fpte_info) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_UPDATE_FPTE; + info.op_ver = SXE2_VCHNL_OP_UPDATE_FPTE_V1; + info.req_parm = (void *)(update_vf_fpte_info); + info.req_parm_len = sizeof(*update_vf_fpte_info); + + DRV_RDMA_LOG_DEBUG_BDF("vchnl:vf req update fpte set=%u cnt=%u\n", + update_vf_fpte_info->set, + update_vf_fpte_info->cnt); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl:add rcms obj req send sync err ret=%d\n", ret); + } + +end: + return ret; +} + +int sxe2_vchnl_req_get_port_acitve_speed( + struct sxe2_rdma_ctx_dev *dev, + u32 *port_active_speed) +{ + int ret = SXE2_OK; + struct sxe2_vchnl_req_init_info info = {}; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + if (!dev->vchnl_up) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:not support vchnl up=%u\n", + dev->vchnl_up); + ret = -EBUSY; + goto end; + } + + info.op_code = SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED; + info.op_ver = SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED_V1; + info.resp_parm = (void *)port_active_speed; + info.resp_parm_len = sizeof(*port_active_speed); + + ret = sxe2_vchnl_req_send_sync(dev, &info); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF("vchnl:get port active speed req send sync err ret=%d\n", + ret); + } +end: + return ret; +} + +int sxe2_vchnl_ctx_init(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_init_info *info) +{ + int ret = SXE2_OK; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + + dev->vchnl_if = info->vchnl_if; + dev->vchnl_up = dev->vchnl_if ? true : false; + dev->privileged = info->privileged; + dev->vchnl_wq = info->vchnl_wq; + dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev; + dev->hw_attrs.uk_attrs.max_hw_push_len = SXE2_RDMA_DEFAULT_MAX_PUSH_LEN; + if (!dev->privileged) { + ret = sxe2_vchnl_req_get_ver(dev, &dev->fw_version); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: vchnl req get ver err ret=%d\n", ret); + goto end; + } + + ret = sxe2_vchnl_req_get_caps(dev); + if (ret != SXE2_OK) { + DRV_RDMA_LOG_ERROR_BDF( + "vchnl: vchnl req get caps err ret=%d\n", ret); + goto end; + } + + dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev; + dev->hw_attrs.uk_attrs.max_hw_push_len = + dev->vc_caps.max_hw_push_len; + } + +end: + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.h new file mode 100644 index 0000000000000000000000000000000000000000..26b5fd9b63dc031ecbe38e3cbe1a61c93e6cdc92 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_rdma_virtchnl.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_rdma_virtchnl.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_RDMA_VIRTCHNL_H +#define SXE2_DRV_RDMA_VIRTCHNL_H + +#include "sxe2_drv_rdma_common.h" +#include "sxe2_drv_rdma_rcms.h" +#include "sxe2_drv_stats.h" +#include "sxe2_drv_aux.h" + +#define SXE2_VCHNL_CHNL_VER_V1 1 +#define SXE2_VCHNL_CHNL_VER_MIN SXE2_VCHNL_CHNL_VER_V1 +#define SXE2_VCHNL_CHNL_VER_MAX SXE2_VCHNL_CHNL_VER_V1 + +#define SXE2_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1 +#define SXE2_VCHNL_OP_GET_RCMS_FCN_V1 1 +#define SXE2_VCHNL_OP_PUT_RCMS_FCN_V1 1 +#define SXE2_VCHNL_OP_VLAN_PARSING_V1 1 +#define SXE2_VCHNL_OP_GET_RDMA_CAPS_V1 1 +#define SXE2_VCHNL_OP_INIT_VF_RCMS_V1 1 +#define SXE2_VCHNL_OP_GATHER_STATS_V1 1 +#define SXE2_VCHNL_OP_MANAGE_QSET_NODE_V1 1 +#define SXE2_VCHNL_OP_PBL_SET_FPTE_V1 1 +#define SXE2_VCHNL_OP_PBL_CLEAR_FPTE_V1 1 +#define SXE2_VCHNL_OP_GET_VF_OBJ_INFO_V1 1 +#define SXE2_VCHNL_OP_UPDATE_FPTE_V1 1 +#define SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED_V1 1 +#define SXE2_VCHNL_INVALID_VF_IDX 0xFFFF + +#define SXE2_FPTE_PA_MASK 0xFFFFFFFFFFFFF000 +#define SXE2_FPTE_PA_VALID_MASK 0x1 + +enum sxe2_vchnl_opcode { + SXE2_VCHNL_OP_GET_VER = + 0, + SXE2_VCHNL_OP_GET_RCMS_FCN = + 1, + SXE2_VCHNL_OP_PUT_RCMS_FCN = + 2, + SXE2_VCHNL_OP_INIT_VF_RCMS = + 3, + SXE2_VCHNL_OP_VLAN_PARSING = + 4, + SXE2_VCHNL_OP_GET_RDMA_CAPS = + 5, + SXE2_VCHNL_OP_GATHER_STATS = + 6, + SXE2_VCHNL_OP_MANAGE_QSET_NODE = + 7, + SXE2_VCHNL_OP_PBL_SET_FPTE = + 8, + SXE2_VCHNL_OP_PBL_CLEAR_FPTE = + 9, + SXE2_VCHNL_OP_GET_VF_OBJ_INFO = + 10, + SXE2_VCHNL_OP_UPDATE_FPTE = + 11, + SXE2_VCHNL_OP_GET_PORT_ACTIVE_SPEED = + 12, +}; + +enum sxe2_rdma_stats_vf_txrx { + SXE2_RDMA_STATS_VF_NONE = 0, + SXE2_RDMA_STATS_VF_TX = 1, + SXE2_RDMA_STATS_VF_RX = 2, +}; + +struct sxe2_vchnl_mq_compl_func_tab_ret { + union { + u32 val; + struct { + u32 rel_fid : 6; + u32 valid : 1; + u32 rsvd : 25; + }; + }; +}; + +struct sxe2_vchnl_work { + struct work_struct work; + u8 vf_msg_buf[SXE2_VCHNL_MAX_MSG_SIZE]; + struct sxe2_rdma_ctx_dev *dev; + u16 vf_id; + u16 len; + u64 session_id; +}; + +struct sxe2_vsi_init_info { + struct sxe2_rdma_ctx_dev *dev; + void *back_vsi; + struct sxe2_rdma_l2params *params; + u16 exception_lan_q; + u16 pf_data_vsi_num; + enum sxe2_rdma_vm_vf_type vm_vf_type; + int (*register_qset)( + struct sxe2_rdma_ctx_vsi *vsi, struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); + void (*unregister_qset)( + struct sxe2_rdma_ctx_vsi *vsi, struct sxe2_rdma_qset *qset1, + struct sxe2_rdma_qset *qset2); +}; + +#pragma pack(push, 1) + +struct sxe2_vchnl_manage_rcms_func_table_wqe { + u64 rsv0; + u64 rsv1; + u64 rsv2; + u64 vf_id : 8; + u64 rsv3 : 24; + u64 op : 6; + u64 rsv4 : 24; + u64 free_func_table : 1; + u64 wqe_valid : 1; + u64 rsv5; + u64 rsv6; + u64 rsv7; + u64 rsv8; +}; + +struct sxe2_vchnl_op_buf { + u16 op_code; + u16 op_ver; + u16 buf_len; + u16 rsvd; + u64 op_ctx; + u8 buf[]; +}; + +struct sxe2_vchnl_resp_buf { + u64 op_ctx; + u16 buf_len; + s16 op_ret; + u16 rsvd[2]; + u8 buf[]; +}; + +struct sxe2_vchnl_pbl_set_fpte_info { + u32 fpte_idx; + u64 page_pa; +}; + +struct sxe2_vchnl_pbl_clear_fpte_info { + u32 fpte_idx; + u32 pble_cnt; +}; + +struct sxe2_vchnl_rdma_caps { + u8 hw_rev; + u16 mq_timeout_s; + u16 mq_def_timeout_s; + u16 mq_hw_push_len; +}; + +struct sxe2_vchnl_manage_qet_node_info { + u16 qset_id; + u8 user_pri; + bool add; +}; + +struct sxe2_vchnl_init_info { + struct workqueue_struct *vchnl_wq; + struct sxe2_rdma_vchnl_if *vchnl_if; + enum sxe2_rdma_vers hw_rev; + bool privileged; +}; + +struct sxe2_vchnl_init_vf_rcms_resp { + u32 first_fpte_index; + u32 max_fpte_index; + u32 max_fpte_cnt; + u32 fpte_needed; + u32 max_cc_qp_cnt; + u32 max_ceqs; + u32 max_db_page_num; + u32 db_bar_addr; + u32 obj_max_cnt[SXE2_RCMS_OBJ_MAX]; + u16 pmf_index; + u32 pf_max_ceqs; +}; + +struct sxe2_vchnl_vf_obj_info { + u32 size; + u64 base; +}; + +struct sxe2_vchnl_vf_obj_resp { + struct sxe2_vchnl_vf_obj_info + obj_info[SXE2_RCMS_OBJ_MAX]; +}; + +struct sxe2_vchnl_req { + struct sxe2_vchnl_op_buf *vchnl_msg; + void *parm; + u32 vf_id; + u16 parm_len; + u16 resp_len; +}; + +struct sxe2_vchnl_req_init_info { + void *req_parm; + void *resp_parm; + u16 req_parm_len; + u16 resp_parm_len; + u16 op_code; + u16 op_ver; +}; + +#pragma pack(pop) + +int sxe2_vchnl_ctx_init(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_init_info *info); + +int sxe2_vchnl_req_get_ver(struct sxe2_rdma_ctx_dev *dev, + struct aux_ver_info *ver_res); + +int sxe2_vchnl_req_get_rcms_fcn(struct sxe2_rdma_ctx_dev *dev); + +int sxe2_vchnl_req_put_rcms_fcn(struct sxe2_rdma_ctx_dev *dev); + +int sxe2_vchnl_req_get_vlan_parsing_cfg(struct sxe2_rdma_ctx_dev *dev, + u8 *vlan_parse_en); + +int sxe2_vchnl_req_get_caps(struct sxe2_rdma_ctx_dev *dev); + +int sxe2_vchnl_req_init_vf_rcms( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_init_vf_rcms_resp *init_vf_rcms_resp); + +int sxe2_vchnl_send_pf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id); + +int sxe2_vchnl_recv_pf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id); + +int sxe2_vchnl_recv_vf(struct sxe2_rdma_ctx_dev *dev, u16 vf_id, u8 *msg, + u16 len, u64 session_id); + +int sxe2_vchnl_receive(struct aux_core_dev_info *cdev_info, u32 vf_id, u8 *msg, + u16 len, u64 session_id); + +int sxe2_vchnl_send_sync(struct sxe2_rdma_ctx_dev *dev, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len); + +int sxe2_vchnl_manage_rcms_pm_func_table(struct sxe2_mq_ctx *mq, + struct sxe2_rcms_fcn_info *info, + u64 scratch, bool post_sq); + +int sxe2_vchnl_req_gather_stats(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_gather_stats *gather_stats_req); + +int sxe2_vchnl_req_manage_qet_node( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_manage_qet_node_info *qset_info); +int sxe2_vchnl_req_set_pbl_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u64 page_pa); + +int sxe2_vchnl_req_clear_pbl_fpte(struct sxe2_rdma_ctx_dev *dev, u32 fpte_idx, + u32 pble_cnt); + +void sxe2_vchnl_pf_put_vf_rcms_fcn(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vchnl_dev **vc_dev); + +void sxe2_vchnl_put_vf_dev(struct sxe2_rdma_vchnl_dev **vc_dev); + +struct sxe2_rdma_vchnl_dev * +sxe2_vchnl_find_vc_dev(struct sxe2_rdma_ctx_dev *dev, u16 vf_id); + +int sxe2_vchnl_req_get_vf_obj_info(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_vf_obj_resp *vf_obj_resp); +int sxe2_vchnl_req_update_fpte( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rcms_vf_update_fptes_info *update_vf_fpte_info); + +int sxe2_vchnl_req_get_port_acitve_speed( + struct sxe2_rdma_ctx_dev *dev, + u32 *port_active_speed); + +int sxe2_vchnl_req_send_sync(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_vchnl_req_init_info *info); + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.c new file mode 100644 index 0000000000000000000000000000000000000000..0343af1161b5433fc9f78392bd1f90adfe091210 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.c @@ -0,0 +1,1265 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_srq.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include "sxe2_compat.h" +#include "sxe2-abi.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_srq.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_qp.h" +#include "sxe2_drv_rdma_pble.h" +#include "sxe2_drv_rdma_rcms.h" + +#define SXE2_DRV_SRQC_PAGESZ_WIDTH_MAX (21) +#define SXE2_DRV_SRQ_PAGE_NUM_MIN (1) +#define SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA0 (0x0) +#define SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA1 (0x1) +#define SXE2_DRV_SRQE_MIN_SIZE (32) +#define SXE2_DRV_SRQ_DB_NOTE_SIZE (16) +#define SXE2_DRV_SRQ_POLARITY_INIT \ + (0) +#define SXE2_DRV_SRQE_MAX_FRAG_CNT_SHIFT \ + (2) +#define SXE2_DRV_SRQE_MAX_FRAG_CNT_SUB \ + (1) +#define SXE2_DRV_SRQ_SIZE_MUL_SHIFT \ + (1) +#define SXE2_DRV_SRQ_MIN_QUANTA (8) +#define SXE2_DRV_SRQ_MQ_OP_POST (1) +#define SXE2_DRV_SRQC_PA_OFFSET (8) +#define SXE2_DRV_SRQ_PAGE_OFFSET (256) + +static int srq_kset_pble(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *ksrq, int page_num, + int page_size) +{ + struct sxe2_umode_srq *usrq_rsc = &ksrq->srq_ctx.usrq_rsc; + struct sxe2_pbl_pble_alloc_info *alloc_info = NULL; + struct sxe2_pbl_pble_rsrc *pble_rsrc = rdma_dev->rdma_func->pble_rsrc; + struct ib_block_iter biter; + u64 pble_liner_index; + u64 pgaddr; + int ret = 0; + + if (page_num == SXE2_DRV_SRQ_PAGE_NUM_MIN) { + rdma_umem_for_each_dma_block(usrq_rsc->srq_umem, &biter, + (ulong)page_size) { + usrq_rsc->pbl_pointer = + rdma_block_iter_dma_address(&biter); + } + usrq_rsc->wqe_access_mod = SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA0; + } else { + alloc_info = kzalloc(sizeof(*alloc_info), GFP_KERNEL); + if (!alloc_info) { + ret = -ENOMEM; + goto end; + } + + ret = sxe2_pbl_get_pble(pble_rsrc, alloc_info, (u32)page_num, + PBL_OBJ_SRQ); + if (ret) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:sxe2_pbl_get_pble failed, ret(%d)\n", ret); + goto free_alloc_info; + } + + usrq_rsc->pble_alloc_info = alloc_info; + usrq_rsc->pbl_pointer = alloc_info->pbl_index; + pble_liner_index = alloc_info->pble_info.liner_addr; + usrq_rsc->wqe_access_mod = SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA1; + + rdma_umem_for_each_dma_block(usrq_rsc->srq_umem, &biter, + (unsigned long)page_size) { + pgaddr = rdma_block_iter_dma_address(&biter); + ret = sxe2_pbl_set_pble(pble_rsrc, pble_liner_index, + pgaddr, false); + if (ret) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:sxe2_pbl_set_pble failed, ret(%d)\n", + ret); + goto free_pble; + } + pble_liner_index += sizeof(u64); + } + } + goto end; + +free_pble: + sxe2_pbl_free_pble(pble_rsrc, alloc_info->pble_info.liner_addr, + (u32)page_num, false); +free_alloc_info: + kfree(alloc_info); + alloc_info = NULL; +end: + return ret; +} + +static int srq_ksetup_umode(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *ksrq, + struct sxe2_srq_init_info *info, + struct ib_udata *udata) +{ + struct sxe2_create_srq_req req = {}; + struct sxe2_srq_drv_init_info *drv_info; + struct sxe2_umode_srq *usrq_rsc; + int page_size; + int page_num; + int ret = 0; + long ret_tmp = 0; + + if (udata->inlen < + offsetofend(struct sxe2_create_srq_req, max_wr_cal)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("SRQ:invalid udata inlen %zu, ret %d", + udata->inlen, ret); + goto end; + } + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { + ret = -EFAULT; + DRV_RDMA_LOG_DEV_ERR("SRQ:copy udata failed, ret %d", ret); + goto end; + } + + ksrq->srq_ctx.user_mode = true; + ksrq->max_wr = req.max_wr_cal; + drv_info = &info->srq_drv_init_info; + drv_info->srq = (struct sxe2_qp_quanta *)(&req.user_srq_buf); + drv_info->db_note = (__le64 *)(&req.user_srq_db_note); + drv_info->srq_buf_size = req.srq_buf_size; + drv_info->srq_size = req.srq_size; + usrq_rsc = &ksrq->srq_ctx.usrq_rsc; + + info->srq_cmpl_ctx = (u64)req.srq_cmpl_ctx; + +#ifdef IB_UMEM_GET_V2 + usrq_rsc->srq_umem = + ib_umem_get(udata, req.user_srq_buf, drv_info->srq_buf_size, + IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + usrq_rsc->srq_umem = + ib_umem_get(udata, req.user_srq_buf, drv_info->srq_buf_size, + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined IB_UMEM_GET_V3 + usrq_rsc->srq_umem = ib_umem_get(ksrq->pd->ibpd.uobject->context, req.user_srq_buf, + drv_info->srq_buf_size, + IB_ACCESS_LOCAL_WRITE, 0); +#else + usrq_rsc->srq_umem = + ib_umem_get(&rdma_dev->ibdev, req.user_srq_buf, + drv_info->srq_buf_size, IB_ACCESS_LOCAL_WRITE); +#endif + if (IS_ERR(usrq_rsc->srq_umem)) { + ret_tmp = PTR_ERR(usrq_rsc->srq_umem); + DRV_RDMA_LOG_DEV_ERR("SRQ:umem ib_umem_get failed, ret %ld\n", + ret_tmp); + ret = (int)ret_tmp; + goto end; + } + +#ifdef HAVE_NO_IB_UMEM_FIND_BEST_PGSZ + page_size = sxe2_set_best_pagesz(req.user_srq_buf, usrq_rsc->srq_umem, + GENMASK(SXE2_DRV_SRQC_PAGESZ_WIDTH_MAX, + PAGE_SHIFT)); +#else + page_size = (int)ib_umem_find_best_pgsz( + usrq_rsc->srq_umem, + GENMASK(SXE2_DRV_SRQC_PAGESZ_WIDTH_MAX, + PAGE_SHIFT), + 0); +#endif + if (!page_size) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:pin buf failed, bufaddr %#llx, ret %d\n", + req.user_srq_buf, ret); + goto free_srq_umem; + } + ksrq->srq_ctx.log_page_size = order_base_2(page_size); + +#ifdef HAVE_IB_UMEM_NUM_DMA_BLOCKS_NOT_SUPPORT + page_num = + sxe2_ib_umem_num_dma_blocks(usrq_rsc->srq_umem, page_size, 0); +#else + page_num = (int)ib_umem_num_dma_blocks(usrq_rsc->srq_umem, + (unsigned long)page_size); +#endif + + ret = srq_kset_pble(rdma_dev, ksrq, page_num, page_size); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:sxe2_set_qp_pble failed, ret %d\n", + ret); + goto free_srq_umem; + } + +#ifdef IB_UMEM_GET_V2 + usrq_rsc->db_note_umem = + ib_umem_get(udata, req.user_srq_db_note, + SXE2_DRV_SRQ_DB_NOTE_SIZE, IB_ACCESS_LOCAL_WRITE); +#elif defined(IB_UMEM_GET_V1) + usrq_rsc->db_note_umem = ib_umem_get(udata, req.user_srq_db_note, + SXE2_DRV_SRQ_DB_NOTE_SIZE, + IB_ACCESS_LOCAL_WRITE, 0); +#elif defined IB_UMEM_GET_V3 + usrq_rsc->db_note_umem = ib_umem_get(ksrq->pd->ibpd.uobject->context, req.user_srq_db_note, + SXE2_DRV_SRQ_DB_NOTE_SIZE, + IB_ACCESS_LOCAL_WRITE, 0); +#else + usrq_rsc->db_note_umem = + ib_umem_get(&rdma_dev->ibdev, req.user_srq_db_note, + SXE2_DRV_SRQ_DB_NOTE_SIZE, IB_ACCESS_LOCAL_WRITE); +#endif + if (IS_ERR(usrq_rsc->db_note_umem)) { + ret_tmp = PTR_ERR(usrq_rsc->db_note_umem); + DRV_RDMA_LOG_DEV_ERR( + "SRQ:dbnote umem ib_umem_get failed, ret %ld\n", + ret_tmp); + ret = (int)ret_tmp; + goto free_pble; + } +#ifdef HAVE_IB_UMEM_SG_HEAD + info->db_note_pa = sg_dma_address(usrq_rsc->db_note_umem->sg_head.sgl) + + (req.user_srq_db_note & ~PAGE_MASK); +#else + info->db_note_pa = + sg_dma_address(usrq_rsc->db_note_umem->sgt_append.sgt.sgl) + + (req.user_srq_db_note & ~PAGE_MASK); +#endif + + goto end; + +free_pble: + if (usrq_rsc->wqe_access_mod == SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA1) { + sxe2_pbl_free_pble( + rdma_dev->rdma_func->pble_rsrc, + usrq_rsc->pble_alloc_info->pble_info.liner_addr, + usrq_rsc->pble_alloc_info->needed_pble_cnt, false); + kfree(usrq_rsc->pble_alloc_info); + usrq_rsc->pble_alloc_info = NULL; + } +free_srq_umem: + ib_umem_release(usrq_rsc->srq_umem); + usrq_rsc->srq_umem = NULL; +end: + return ret; +} + +static void srq_kcal_wqe_shift(u32 max_srq_sge, u8 *srqe_shift) +{ + *srqe_shift = WQE_SIZE_32BYTE; + + if (max_srq_sge > MAX_SGE_SIZE_1) { + if (max_srq_sge < MAX_SGE_SIZE_4) + *srqe_shift = WQE_SIZE_64BYTE; + else if (max_srq_sge < MAX_SGE_SIZE_8) + *srqe_shift = WQE_SIZE_128BYTE; + else + *srqe_shift = WQE_SIZE_256BYTE; + } +} + +static int srq_kget_depth(struct sxe2_common_attrs *common_attrs, u32 srq_size, + u8 shift, u32 *srq_depth) +{ + int ret = 0; + + *srq_depth = sxe2_round_up_pow_2((srq_size << shift) + SXE2_RQ_RSVD); + + if (*srq_depth > common_attrs->max_hw_srq_quanta) + ret = -EINVAL; + + return ret; +} + +static int srq_ksetup_kmode(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *ksrq, + struct sxe2_srq_init_info *info, + struct ib_srq_attr *attr) +{ + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_srq_drv_init_info *drv_info; + struct sxe2_rdma_dma_mem *srq_buf_mem = + &ksrq->srq_ctx.ksrq_rsc.srq_buf_mem; + struct sxe2_rdma_dma_mem *srq_db_note_mem = + &ksrq->srq_ctx.ksrq_rsc.srq_db_note_mem; + u32 depth; + u8 shift; + int ret = 0; + + drv_info = &info->srq_drv_init_info; + + srq_kcal_wqe_shift(drv_info->max_srq_frag_cnt, &shift); + ret = srq_kget_depth(drv_info->common_attrs, attr->max_wr, shift, + &depth); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:calculate srq depth failed, ret:%d\n", + ret); + goto end; + } + + drv_info->srq_buf_size = depth * SXE2_DRV_SRQE_MIN_SIZE; + + drv_info->srq_size = depth >> shift; + + ksrq->srq_ctx.ksrq_rsc.srq_wrid_array = + kcalloc(drv_info->srq_size, + sizeof(*ksrq->srq_ctx.ksrq_rsc.srq_wrid_array), + GFP_KERNEL); + if (!ksrq->srq_ctx.ksrq_rsc.srq_wrid_array) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:kcalloc srq_wrid_array failed, ret: %d\n", ret); + goto end; + } + + ksrq->srq_ctx.ksrq_rsc.srqe_array = + kcalloc(drv_info->srq_size, + sizeof(*ksrq->srq_ctx.ksrq_rsc.srqe_array), GFP_KERNEL); + if (!ksrq->srq_ctx.ksrq_rsc.srqe_array) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("SRQ:kcalloc srqe_array failed, ret: %d\n", + ret); + goto free_srq_wrid; + } + + srq_buf_mem->size = drv_info->srq_buf_size; + srq_buf_mem->va = dma_alloc_coherent(dev->hw->device, srq_buf_mem->size, + &srq_buf_mem->pa, GFP_KERNEL); + if (!srq_buf_mem->va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Kernel Buf dma_alloc_coherent failed, ret %d\n", + ret); + goto free_srqe_array; + } + memset(srq_buf_mem->va, 0, srq_buf_mem->size); + + srq_db_note_mem->size = SXE2_DRV_SRQ_DB_NOTE_SIZE; + srq_db_note_mem->va = + dma_alloc_coherent(dev->hw->device, srq_db_note_mem->size, + &srq_db_note_mem->pa, GFP_KERNEL); + if (!srq_db_note_mem->va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Kernel DB Note Buf dma_alloc_coherent failed, ret %d\n", + ret); + goto free_srq_buf; + } + memset(srq_db_note_mem->va, 0, srq_db_note_mem->size); + + drv_info->srq = (struct sxe2_qp_quanta *)srq_buf_mem->va; + info->srq_pa = (u64)srq_buf_mem->pa; + drv_info->db_note = (__le64 *)srq_db_note_mem->va; + info->db_note_pa = (u64)srq_db_note_mem->pa; + + ksrq->srq_ctx.log_page_size = order_base_2(PAGE_SIZE); + + info->srq_cmpl_ctx = (u64)(&ksrq->srq_ctx); + + ksrq->max_wr = (depth - SXE2_RQ_RSVD) >> shift; + + goto end; + +free_srq_buf: + dma_free_coherent(dev->hw->device, srq_buf_mem->size, srq_buf_mem->va, + srq_buf_mem->pa); + srq_buf_mem->va = NULL; +free_srqe_array: + kfree(ksrq->srq_ctx.ksrq_rsc.srqe_array); + ksrq->srq_ctx.ksrq_rsc.srqe_array = NULL; +free_srq_wrid: + kfree(ksrq->srq_ctx.ksrq_rsc.srq_wrid_array); + ksrq->srq_ctx.ksrq_rsc.srq_wrid_array = NULL; +end: + return ret; +} + +static int srq_kinit_drv_common(struct sxe2_srq_drv *srq, + struct sxe2_srq_drv_init_info *info) +{ + u8 srqshift; + int ret = 0; + + srq->common_attrs = info->common_attrs; + + if (info->max_srq_frag_cnt > srq->common_attrs->max_hw_wq_frags) { + ret = -EINVAL; + goto end; + } + + srq_kcal_wqe_shift(info->max_srq_frag_cnt, &srqshift); + + srq->srq_base = info->srq; + srq->db_note = info->db_note; + + srq->srq_id = info->srq_id; + + srq->srq_polarity = SXE2_DRV_SRQ_POLARITY_INIT; + + srq->srq_size = info->srq_size; + + srq->wqe_size = srqshift; + + srq->max_srq_frag_cnt = info->max_srq_frag_cnt; + + SXE2_RING_INIT(srq->srq_ring, srq->srq_size); + + srq->wqe_size_multiplier = SXE2_DRV_SRQ_SIZE_MUL_SHIFT << srqshift; + + srq->srq_buf_size = info->srq_buf_size; + +end: + return ret; +} + +static int srq_kinit_ctx(struct sxe2_rdma_srq_ctx *srq, + struct sxe2_srq_init_info *info) +{ + u32 srq_size_quanta = 0; + int ret = 0; + + ret = srq_kinit_drv_common(&srq->srq_drv, &info->srq_drv_init_info); + if (ret) + goto end; + + srq->dev = info->pd->dev; + srq->pd = info->pd; + srq->vsi = info->vsi; + + srq->srq_pa = info->srq_pa; + + srq->db_note_pa = info->db_note_pa; + + srq->srq_cmpl_ctx = info->srq_cmpl_ctx; + + srq->srq_limit = info->srq_limit; + + srq_size_quanta = + max((u32)SXE2_DRV_SRQ_MIN_QUANTA, + srq->srq_drv.srq_size * srq->srq_drv.wqe_size_multiplier); + srq->hw_srq_size = sxe2_kget_encoded_wqe_size(srq_size_quanta, + SXE2_QUEUE_TYPE_SRQ); + +end: + return ret; +} + +void sxe2_kadd_srq_ref(struct ib_srq *ib_srq) +{ + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + + refcount_inc(&ksrq->refcnt); +} + +void sxe2_krem_srq_ref(struct ib_srq *ib_srq) +{ + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(ksrq->srq_ctx.dev); + unsigned long flags = 0; + + spin_lock_irqsave(&rdma_func->srqtable_lock, flags); + if (!refcount_dec_and_test(&ksrq->refcnt)) { + spin_unlock_irqrestore(&rdma_func->srqtable_lock, flags); + goto end; + } + + rdma_func->srq_table[ksrq->srq_id] = NULL; + spin_unlock_irqrestore(&rdma_func->srqtable_lock, flags); + + complete(&ksrq->free_srq); + +end: + return; +} + +static void srq_kfree_mode(struct sxe2_rdma_srq *ksrq, + struct sxe2_rdma_ctx_dev *dev, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + struct sxe2_umode_srq *usrq_rsc = &ksrq->srq_ctx.usrq_rsc; + struct sxe2_kmode_srq *ksrq_rsc = &ksrq->srq_ctx.ksrq_rsc; + + if (udata) { + ib_umem_release(usrq_rsc->db_note_umem); + usrq_rsc->db_note_umem = NULL; + if (usrq_rsc->wqe_access_mod == + SXE2_DRV_SRQ_WQE_ACCESS_MOD_PA1) { + sxe2_pbl_free_pble( + rdma_dev->rdma_func->pble_rsrc, + usrq_rsc->pble_alloc_info->pble_info.liner_addr, + usrq_rsc->pble_alloc_info->needed_pble_cnt, + false); + kfree(usrq_rsc->pble_alloc_info); + usrq_rsc->pble_alloc_info = NULL; + } + ib_umem_release(usrq_rsc->srq_umem); + usrq_rsc->srq_umem = NULL; + + } else { + dma_free_coherent(dev->hw->device, + ksrq_rsc->srq_db_note_mem.size, + ksrq_rsc->srq_db_note_mem.va, + ksrq_rsc->srq_db_note_mem.pa); + ksrq_rsc->srq_db_note_mem.va = NULL; + + dma_free_coherent(dev->hw->device, ksrq_rsc->srq_buf_mem.size, + ksrq_rsc->srq_buf_mem.va, + ksrq_rsc->srq_buf_mem.pa); + ksrq_rsc->srq_buf_mem.va = NULL; + + kfree(ksrq_rsc->srqe_array); + ksrq_rsc->srqe_array = NULL; + + kfree(ksrq_rsc->srq_wrid_array); + ksrq_rsc->srq_wrid_array = NULL; + } +}; + +int sxe2_kdestroy_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 scratch, + bool post_mq) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(srq->dev); + struct sxe2_mq_ctx *mq; + struct mq_wqe_of_srq *wqe = NULL; + int ret = 0; + + mq = srq->dev->mq; + + wqe = (struct mq_wqe_of_srq *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("SRQ: get destroy mq wqe failed, ret %d\n", + ret); + goto end; + } + + wqe->SRQ_Completion_Context = srq->srq_cmpl_ctx; + wqe->srqn = srq->srq_drv.srq_id; + wqe->op = SXE2_MQ_OP_DESTROY_SRQ; + wqe->wqe_valid = mq->polarity; + + print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, (__le64 *)wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); + + if (post_mq) + sxe2_kpost_mq(mq); + +end: + return ret; +} + +void sxe2_kdestroy_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_DESTROY_SRQ; + mq_info->post_mq = SXE2_DRV_SRQ_MQ_OP_POST; + mq_info->in.u.srq_destroy.srq = srq; + mq_info->in.u.srq_destroy.scratch = (uintptr_t)mq_request; + mq_info->destroy = true; + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ:mq handle destroy srq cmd failed, ret %d\n", ret); + } + +end: + return; +} + +#ifdef RDMA_DESTROY_SRQ_VER_2 +void sxe2_kdestroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) +#elif defined RDMA_DESTROY_SRQ_VER_3 +int sxe2_kdestroy_srq(struct ib_srq *ib_srq) +#else +int sxe2_kdestroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) +#endif +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ib_srq->device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + + sxe2_krem_srq_ref(ib_srq); + + wait_for_completion(&ksrq->free_srq); + + drv_rdma_debug_srq_remove(rdma_dev, ksrq); + + sxe2_kdestroy_srq_mq_cmd(rdma_func, &ksrq->srq_ctx); + +#ifndef RDMA_DESTROY_SRQ_VER_3 + srq_kfree_mode(ksrq, &rdma_func->ctx_dev, udata); +#endif + + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_srqs, ksrq->srq_id); + DRV_RDMA_LOG_DEV_INFO("SRQ:Drstroy SRQ(%#x) over\n", ksrq->srq_id); +#ifdef RDMA_DESTROY_SRQ_VER_2 + return; +#else + return 0; +#endif +} + +static void sxe2_kfill_srq_wqe(struct mq_wqe_of_srq *wqe, + struct sxe2_rdma_srq_ctx *srq) +{ + wqe->pd = srq->pd->pd_id; + wqe->log_srq_size = srq->hw_srq_size; + wqe->srq_access_mode = srq->user_mode ? srq->usrq_rsc.wqe_access_mod : + QP_SRQ_PA_FIRST_MODE; + wqe->log_page_size = srq->log_page_size; + wqe->dbr_addr = srq->db_note_pa; + wqe->SRQ_Completion_Context = srq->srq_cmpl_ctx; + wqe->srqn = srq->srq_drv.srq_id; + wqe->op = SXE2_MQ_OP_CREATE_SRQ; + wqe->Physical_Buffer_Address = 0; + wqe->srq_pbl_pointer = srq->user_mode ? + srq->usrq_rsc.pbl_pointer : + (u64)srq->ksrq_rsc.srq_buf_mem.pa; + wqe->lwm = srq->srq_limit; + +} + +int sxe2_kcreate_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 scratch, + bool post_mq) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(srq->dev); + struct sxe2_mq_ctx *mq; + struct mq_wqe_of_srq *wqe = NULL; + int ret = 0; + + mq = srq->dev->mq; + + if (srq->srq_drv.srq_id < srq->dev->hw_attrs.min_hw_srq_id || + srq->srq_drv.srq_id > + (srq->dev->rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt - + 1)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ: srqn %#x invalid, srqn should in [%#x, %#x]\n", + srq->srq_drv.srq_id, srq->dev->hw_attrs.min_hw_srq_id, + (srq->dev->rcms_info->rcms_obj[SXE2_RCMS_OBJ_SRQ].cnt - + 1)); + goto end; + } + + wqe = (struct mq_wqe_of_srq *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("SRQ: get create mq wqe failed, ret %d\n", + ret); + goto end; + } + + sxe2_kfill_srq_wqe(wqe, srq); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "srq_limit_flag", + rdma_dev, wqe); +#endif + wqe->log_rq_stride = srq->srq_drv.wqe_size; + wqe->page_offset = + srq->user_mode ? 0 : + (offset_in_page(srq->ksrq_rsc.srq_buf_mem.pa) / + SXE2_DRV_SRQ_PAGE_OFFSET); + wqe->wqe_valid = mq->polarity; + + print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, (__le64 *)wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srqn", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_size"); +#endif + + if (post_mq) + sxe2_kpost_mq(mq); + +end: + return ret; +} + +int sxe2_kcreate_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_CREATE_SRQ; + mq_info->post_mq = SXE2_DRV_SRQ_MQ_OP_POST; + mq_info->in.u.srq_create.srq = srq; + mq_info->in.u.srq_create.scratch = (uintptr_t)mq_request; + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ:mq handle create srq cmd failed, ret %d\n", ret); + } + +end: + return ret; +} + +static int sxe2_creat_info_check(struct ib_srq_init_attr *init_attr, + struct ib_srq_attr *attr, + struct sxe2_common_attrs *common_attrs) +{ + int ret = 0; + + if (init_attr->srq_type != IB_SRQT_BASIC) { + ret = -EOPNOTSUPP; + DRV_RDMA_LOG_ERROR("SRQ:Type inv %d, ret %d\n", + init_attr->srq_type, ret); + goto end; + } + + if (attr->max_sge > common_attrs->max_hw_wq_frags || + attr->max_wr > common_attrs->max_hw_srq_wr) { + ret = -EINVAL; + DRV_RDMA_LOG_ERROR( + "SRQ:attr inv, max_sge %#x, max_wr %#x, ret %d\n", + attr->max_sge, attr->max_wr, ret); + goto end; + } + +end: + return ret; +} + +static void sxe2_creat_complete_info(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq *ksrq, + struct ib_srq_attr *attr) +{ + rdma_func->srq_table[ksrq->srq_id] = ksrq; + + init_completion(&ksrq->free_srq); + + ksrq->max_sge = ksrq->srq_ctx.srq_drv.max_srq_frag_cnt; + attr->max_wr = ksrq->max_wr; + attr->max_sge = ksrq->max_sge; + + DRV_RDMA_LOG_INFO( + "SRQ:Create SRQ(%#x) over, user_mode %d, srq_size %#x, srqe_size %#x\n", + ksrq->srq_id, ksrq->srq_ctx.user_mode, + ksrq->srq_ctx.srq_drv.srq_size, ksrq->srq_ctx.srq_drv.wqe_size); + +} + + #ifdef CREATE_SRQ_V1 +struct ib_srq *sxe2_kcreate_srq(struct ib_pd *ib_pd, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ib_pd->device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct ib_srq_attr *attr = &init_attr->attr; + struct sxe2_rdma_pd *pd = to_kpd(ib_pd); + struct sxe2_rdma_srq *ksrq = NULL; + struct sxe2_common_attrs *common_attrs; + struct sxe2_srq_init_info info = {}; + struct sxe2_srq_drv_init_info *drv_info; + struct sxe2_create_srq_resp resp = {}; + int ret = 0; + + common_attrs = &rdma_func->ctx_dev.hw_attrs.uk_attrs; + drv_info = &info.srq_drv_init_info; + + ret = sxe2_creat_info_check(init_attr, attr, common_attrs); + if (ret) + goto end; + + ksrq = kzalloc(sizeof(*ksrq), GFP_KERNEL); + if (!ksrq) { + DRV_RDMA_LOG_DEV_ERR("ksrq kzalloc failed\n"); + ret = -ENOMEM; + goto end; + } + + refcount_set(&ksrq->refcnt, 1); + spin_lock_init(&ksrq->lock); + ksrq->pd = pd; + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_srqs, + rdma_func->max_srq, &ksrq->srq_id, + &rdma_func->next_srq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:alloc rsrc failed, ret %d\n", ret); + goto free_ksrq; + } + + drv_info->max_srq_frag_cnt = attr->max_sge; + drv_info->common_attrs = common_attrs; + drv_info->srq_id = ksrq->srq_id; + + if (udata) + ret = srq_ksetup_umode(rdma_dev, ksrq, &info, udata); + else + ret = srq_ksetup_kmode(rdma_dev, ksrq, &info, attr); + + if (ret) + goto free_rsrc; + + info.vsi = &rdma_dev->vsi; + info.pd = &pd->pd_ctx; + + ret = srq_kinit_ctx(&ksrq->srq_ctx, &info); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:init ctx failed, ret %d\n", ret); + goto free_mem; + } + + ret = sxe2_kcreate_srq_mq_cmd(rdma_func, &ksrq->srq_ctx); + if (ret) + goto free_mem; + + ret = drv_rdma_debug_srq_add(rdma_dev, ksrq); + if (unlikely(ret)) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding SRQ to debug file system, ret %d\n", + ret); + goto free_hw_srq; + } + + if (udata) { + resp.srq_id = ksrq->srq_id; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + ret = -EPROTO; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:copy to udata failed, ret %d\n", ret); + drv_rdma_debug_srq_remove(rdma_dev, ksrq); + goto free_hw_srq; + } + } + + sxe2_creat_complete_info(rdma_func, ksrq, attr); + goto end; + +free_hw_srq: + sxe2_kdestroy_srq_mq_cmd(rdma_func, &ksrq->srq_ctx); +free_mem: + srq_kfree_mode(ksrq, &rdma_func->ctx_dev, udata); +free_rsrc: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_srqs, ksrq->srq_id); +free_ksrq: + kfree(ksrq); +end: + return ret ? ERR_PTR(ret) : &ksrq->ibsrq; +} +#else +int sxe2_kcreate_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ib_srq->device); + struct sxe2_rdma_pci_f *rdma_func = rdma_dev->rdma_func; + struct ib_srq_attr *attr = &init_attr->attr; + struct sxe2_rdma_pd *pd = to_kpd(ib_srq->pd); + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + struct sxe2_common_attrs *common_attrs; + struct sxe2_srq_init_info info = {}; + struct sxe2_srq_drv_init_info *drv_info; + struct sxe2_create_srq_resp resp = {}; + int ret = 0; + + common_attrs = &rdma_func->ctx_dev.hw_attrs.uk_attrs; + drv_info = &info.srq_drv_init_info; + + ret = sxe2_creat_info_check(init_attr, attr, common_attrs); + if (ret) + goto end; + + refcount_set(&ksrq->refcnt, 1); + spin_lock_init(&ksrq->lock); + + ret = sxe2_kalloc_rsrc(rdma_func, rdma_func->allocated_srqs, + rdma_func->max_srq, &ksrq->srq_id, + &rdma_func->next_srq); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:alloc rsrc failed, ret %d\n", ret); + goto end; + } + + drv_info->max_srq_frag_cnt = attr->max_sge; + drv_info->common_attrs = common_attrs; + drv_info->srq_id = ksrq->srq_id; + + if (udata) + ret = srq_ksetup_umode(rdma_dev, ksrq, &info, udata); + else + ret = srq_ksetup_kmode(rdma_dev, ksrq, &info, attr); + + if (ret) + goto free_rsrc; + + info.vsi = &rdma_dev->vsi; + info.pd = &pd->pd_ctx; + + ret = srq_kinit_ctx(&ksrq->srq_ctx, &info); + if (ret) { + DRV_RDMA_LOG_DEV_ERR("SRQ:init ctx failed, ret %d\n", ret); + goto free_mem; + } + + ret = sxe2_kcreate_srq_mq_cmd(rdma_func, &ksrq->srq_ctx); + if (ret) + goto free_mem; + + ret = drv_rdma_debug_srq_add(rdma_dev, ksrq); + if (unlikely(ret)) { + DRV_RDMA_LOG_DEV_ERR( + "ERR: failed adding SRQ to debug file system, ret %d\n", + ret); + goto free_hw_srq; + } + + if (udata) { + resp.srq_id = ksrq->srq_id; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + ret = -EPROTO; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:copy to udata failed, ret %d\n", ret); + drv_rdma_debug_srq_remove(rdma_dev, ksrq); + goto free_hw_srq; + } + } + + sxe2_creat_complete_info(rdma_func, ksrq, attr); + goto end; + +free_hw_srq: + sxe2_kdestroy_srq_mq_cmd(rdma_func, &ksrq->srq_ctx); +free_mem: + srq_kfree_mode(ksrq, &rdma_func->ctx_dev, udata); +free_rsrc: + sxe2_kfree_rsrc(rdma_func, rdma_func->allocated_srqs, ksrq->srq_id); +end: + return ret; +} +#endif + +int sxe2_kquery_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 query_pa, + u64 scratch, bool post_mq) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(srq->dev); + struct sxe2_mq_ctx *mq; + struct mq_wqe_of_srq *wqe = NULL; + int ret = 0; + + mq = srq->dev->mq; + + wqe = (struct mq_wqe_of_srq *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("SRQ: get destroy mq wqe failed, ret %d\n", + ret); + goto end; + } + + wqe->srqn = srq->srq_drv.srq_id; + wqe->op = SXE2_MQ_OP_QUERY_SRQ; + wqe->Physical_Buffer_Address = query_pa >> SXE2_DRV_SRQC_PA_OFFSET; + wqe->wqe_valid = mq->polarity; + + print_hex_dump_debug("WQE: SRQ_QUERY WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, (__le64 *)wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srqn", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srqn"); +#endif + + if (post_mq) + sxe2_kpost_mq(mq); + +end: + return ret; +} + +int sxe2_kquery_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq, u64 query_pa) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_QUERY_SRQ; + mq_info->post_mq = SXE2_DRV_SRQ_MQ_OP_POST; + mq_info->in.u.srq_query.srq = srq; + mq_info->in.u.srq_query.scratch = (uintptr_t)mq_request; + mq_info->in.u.srq_query.query_pa = query_pa; + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ:mq handle query srq cmd failed, ret %d\n", ret); + } + +end: + return ret; +} + +int sxe2_kquery_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ib_srq->device); + struct sxe2_rdma_ctx_dev *dev_ctx; + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + struct sxe2_rdma_dma_mem query_srq = {}; + struct sxe2_rdma_srqc *srqc; + int ret = 0; + + dev_ctx = &rdma_dev->rdma_func->ctx_dev; + query_srq.size = sizeof(struct sxe2_rdma_srqc); + query_srq.va = dma_alloc_coherent(dev_ctx->hw->device, query_srq.size, + &query_srq.pa, GFP_KERNEL); + if (!query_srq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Query SRQC Buf Alloc failed, ret:%d\n", ret); + goto end; + } + memset(query_srq.va, 0, query_srq.size); + + ret = sxe2_kquery_srq_mq_cmd(rdma_dev->rdma_func, &ksrq->srq_ctx, + (u64)query_srq.pa); + if (ret) + goto free_srqc; + + srqc = (struct sxe2_rdma_srqc *)query_srq.va; + attr->srq_limit = srqc->ssrqc.lwm; + attr->max_wr = ksrq->max_wr; + attr->max_sge = ksrq->max_sge; + DRV_RDMA_LOG_DEV_DEBUG("SRQ:Query SRQC log_srq_size = [%d], log_rq_stride= [%d]\n", + srqc->ssrqc.log_srq_size, srqc->ssrqc.log_rq_stride); +free_srqc: + dma_free_coherent(dev_ctx->hw->device, query_srq.size, query_srq.va, + query_srq.pa); + query_srq.va = NULL; +end: + return ret; +} + +int sxe2_kmodify_srq_ctx(struct sxe2_rdma_srq_ctx *srq, + struct sxe2_rdma_srqc *srqc, u64 scratch, bool post_mq) +{ + struct sxe2_rdma_device *rdma_dev = to_rdmadev(srq->dev); + struct sxe2_mq_ctx *mq; + struct mq_wqe_of_srq *wqe = NULL; + int ret = 0; + + mq = srq->dev->mq; + + wqe = (struct mq_wqe_of_srq *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("SRQ: get destroy mq wqe failed, ret %d\n", + ret); + goto end; + } + + wqe->pd = srqc->ssrqc.pd; + wqe->log_srq_size = srqc->ssrqc.log_srq_size; + wqe->state = srqc->ssrqc.state; + wqe->srq_access_mode = srqc->ssrqc.srq_access_mode; + wqe->log_page_size = srqc->ssrqc.log_page_size; + wqe->dbr_addr = srqc->ssrqc.dbr_addr; + wqe->SRQ_Completion_Context = srqc->ssrqc.SRQ_Completion_Context; + wqe->srq_pbl_pointer = srqc->ssrqc.srq_pbl_pointer; + wqe->lwm = srqc->ssrqc.lwm; + wqe->log_rq_stride = srqc->ssrqc.log_rq_stride; + wqe->page_offset = srqc->ssrqc.page_offset; + + wqe->srqn = srq->srq_drv.srq_id; + wqe->Physical_Buffer_Address = 0; + wqe->op = SXE2_MQ_OP_MODIFY_SRQ; + wqe->wqe_valid = mq->polarity; + + print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, (__le64 *)wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); + +#if defined(SXE2_CFG_DEBUG) && defined(SXE2_SUPPORT_INJECT) + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srqn", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srqn"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srq_swState", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_swState"); + + INJECT_START(rdma_dev->rdma_func, "mq_rcs_wqe_srq_size", + &rdma_dev->rdma_func->mq.err_cqe_val, wqe, srq); + INJECT_DEACTIVE(rdma_dev->rdma_func, "mq_rcs_wqe_srq_size"); +#endif + + if (post_mq) + sxe2_kpost_mq(mq); + +end: + return ret; +} + +int sxe2_kmodify_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq, + struct sxe2_rdma_srqc *srqc) +{ + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + int ret = 0; + + mq_request = sxe2_kalloc_and_get_mq_request(&rdma_func->mq, true); + if (!mq_request) { + ret = -ENOMEM; + goto end; + } + + mq_info = &mq_request->info; + mq_info->mq_cmd = MQ_OP_MODIFY_SRQ; + mq_info->post_mq = SXE2_DRV_SRQ_MQ_OP_POST; + mq_info->in.u.srq_modify.srq = srq; + mq_info->in.u.srq_modify.scratch = (uintptr_t)mq_request; + mq_info->in.u.srq_modify.srqc = srqc; + + ret = sxe2_khandle_mq_cmd(rdma_func, mq_request); + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ:mq handle modify srq cmd failed, ret %d\n", ret); + } + +end: + return ret; +} + +int sxe2_kmodify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ib_srq->device); + struct sxe2_rdma_ctx_dev *dev_ctx; + struct sxe2_rdma_srq *ksrq = to_srq(ib_srq); + struct sxe2_rdma_dma_mem query_srq = {}; + struct sxe2_rdma_srqc *srqc; + int ret = 0; + + if (attr_mask & IB_SRQ_MAX_WR) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Modify SRQ Mask %#x is illegal(has max_wr), ret %d\n", + attr_mask, ret); + goto end; + } + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= ksrq->srq_ctx.srq_drv.srq_size) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Modify SRQ limit %#x >= max_srqe %#x, ret %d\n", + attr->srq_limit, ksrq->srq_ctx.srq_drv.srq_size, + ret); + goto end; + } + } else { + DRV_RDMA_LOG_DEV_WARN( + "SRQ:Modify SRQ Mask %#x is illegal(no limit)\n", + attr_mask); + goto end; + } + + dev_ctx = &rdma_dev->rdma_func->ctx_dev; + query_srq.size = sizeof(struct sxe2_rdma_srqc); + query_srq.va = dma_alloc_coherent(dev_ctx->hw->device, query_srq.size, + &query_srq.pa, GFP_KERNEL); + if (!query_srq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ:Query SRQC Buf Alloc failed, ret:%d\n", ret); + goto end; + } + memset(query_srq.va, 0, query_srq.size); + + ret = sxe2_kquery_srq_mq_cmd(rdma_dev->rdma_func, &ksrq->srq_ctx, + query_srq.pa); + if (ret) + goto free_srqc; + + srqc = (struct sxe2_rdma_srqc *)query_srq.va; + srqc->ssrqc.lwm = attr->srq_limit; + + ret = sxe2_kmodify_srq_mq_cmd(rdma_dev->rdma_func, &ksrq->srq_ctx, + srqc); + if (ret) + goto free_srqc; + + ksrq->srq_ctx.srq_limit = srqc->ssrqc.lwm; + +free_srqc: + dma_free_coherent(dev_ctx->hw->device, query_srq.size, query_srq.va, + query_srq.pa); + query_srq.va = NULL; +end: + return ret; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.h new file mode 100644 index 0000000000000000000000000000000000000000..919635b3a9648d335d8cf4db7883f2aa8920143d --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_srq.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_SRQ_H +#define SXE2_DRV_SRQ_H + +#include "sxe2_drv_rdma_common.h" + +struct sxe2_srq_drv_init_info { + struct sxe2_qp_quanta *srq; + struct sxe2_common_attrs *common_attrs; + __le64 *db_note; + u32 srq_id; + u32 srq_size; + u32 max_srq_frag_cnt; + u32 srq_buf_size; +}; + +struct sxe2_srq_init_info { + struct sxe2_rdma_ctx_pd *pd; + struct sxe2_rdma_ctx_vsi *vsi; + u64 srq_pa; + u64 db_note_pa; + u16 srq_limit; + u64 srq_cmpl_ctx; + struct sxe2_srq_drv_init_info srq_drv_init_info; +}; + +void sxe2_kadd_srq_ref(struct ib_srq *ibsrq); + +void sxe2_krem_srq_ref(struct ib_srq *ib_srq); + +int sxe2_kdestroy_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 scratch, + bool post_mq); + +void sxe2_kdestroy_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq); + +#ifdef RDMA_DESTROY_SRQ_VER_2 +void sxe2_kdestroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); +#elif defined RDMA_DESTROY_SRQ_VER_3 +int sxe2_kdestroy_srq(struct ib_srq *ib_srq); +#else +int sxe2_kdestroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata); +#endif +int sxe2_kcreate_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 scratch, + bool post_mq); + +int sxe2_kcreate_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq); + +#ifdef CREATE_SRQ_V1 +struct ib_srq *sxe2_kcreate_srq(struct ib_pd *ib_pd, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#else +int sxe2_kcreate_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#endif +int sxe2_kquery_srq_ctx(struct sxe2_rdma_srq_ctx *srq, u64 query_pa, + u64 scratch, bool post_mq); + +int sxe2_kquery_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq, u64 query_pa); + +int sxe2_kquery_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr); + +int sxe2_kmodify_srq_ctx(struct sxe2_rdma_srq_ctx *srq, + struct sxe2_rdma_srqc *srqc, u64 scratch, + bool post_mq); + +int sxe2_kmodify_srq_mq_cmd(struct sxe2_rdma_pci_f *rdma_func, + struct sxe2_rdma_srq_ctx *srq, + struct sxe2_rdma_srqc *srqc); + +int sxe2_kmodify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); + +int drv_rdma_debug_srq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *srq); + +void drv_rdma_debug_srq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *srq); +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..1746f05c9ed1849555fc13520222d368eacd7ec8 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_srq_debugfs.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_srq_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_drv_srq.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" + +#define SXE2_DRV_SRQ_DEBUGFS_HEX (16) + +enum { + SRQ_PD, + SRQ_LOG_SRQ_SIZE, + SRQ_STATE, + SRQ_ACCESS_MODE, + SRQ_LOG_PAGE_SIZE, + SRQ_DBR_ADDR, + SRQ_CPML_CTX, + SRQ_PBL_POINTER, + SRQ_LWM, + SRQ_LOG_RQ_STRIDE, + SRQ_PAGE_OFFSET, +}; + +#ifdef SXE2_CFG_DEBUG +static char *srq_fields[] = { + [SRQ_PD] = "pd", + [SRQ_LOG_SRQ_SIZE] = "log_srq_size", + [SRQ_STATE] = "state", + [SRQ_ACCESS_MODE] = "srq_access_mode", + [SRQ_LOG_PAGE_SIZE] = "log_page_size", + [SRQ_DBR_ADDR] = "dbr_addr", + [SRQ_CPML_CTX] = "srq_completion_context", + [SRQ_PBL_POINTER] = "srq_pbl_pointer", + [SRQ_LWM] = "srq_lwm", + [SRQ_LOG_RQ_STRIDE] = "log_rq_stride", + [SRQ_PAGE_OFFSET] = "page_offset", +}; + +static int drv_rdma_srq_opcode_switch(size_t i, u64 temp_value, + struct sxe2_rdma_srqc *srqc) +{ + int ret = 0; + + switch (i) { + case SRQ_PD: + srqc->ssrqc.pd = (u32)temp_value; + break; + case SRQ_LOG_SRQ_SIZE: + srqc->ssrqc.log_srq_size = (u32)temp_value; + break; + case SRQ_STATE: + srqc->ssrqc.state = (u32)temp_value; + break; + case SRQ_ACCESS_MODE: + srqc->ssrqc.srq_access_mode = (u32)temp_value; + break; + case SRQ_LOG_PAGE_SIZE: + srqc->ssrqc.log_page_size = (u32)temp_value; + break; + case SRQ_DBR_ADDR: + srqc->ssrqc.dbr_addr = temp_value; + break; + case SRQ_CPML_CTX: + srqc->ssrqc.SRQ_Completion_Context = temp_value; + break; + case SRQ_PBL_POINTER: + srqc->ssrqc.srq_pbl_pointer = temp_value; + break; + case SRQ_LWM: + srqc->ssrqc.lwm = (u32)temp_value; + break; + case SRQ_LOG_RQ_STRIDE: + srqc->ssrqc.log_rq_stride = (u32)temp_value; + break; + case SRQ_PAGE_OFFSET: + srqc->ssrqc.page_offset = (u32)temp_value; + break; + default: + ret = -EINVAL; + DRV_RDMA_LOG_WARN("invalid index %zu, ret %d\n", i, ret); + } + + return ret; +} +#endif + +static u64 drv_rdma_srq_read_field(struct sxe2_rdma_device *rdma_dev, + void *data, enum drv_rdma_dbg_rsc_type type, + char *buf) +{ + int ret = 0; + struct sxe2_rdma_srqc *srqc; + struct sxe2_rdma_srq *srq; + struct sxe2_rdma_dma_mem query_srq = {}; + struct sxe2_rdma_ctx_dev *dev_ctx; + size_t len = 0; + + if (!rdma_dev || !data) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "SRQ DEBUGFS:RDMA Dev/Data is NULL, ret %d\n", ret); + goto end; + } + + if (type == SXE2_DBG_RSC_SRQ) { + srq = (struct sxe2_rdma_srq *)data; + } else { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:RSC %d err, expected %d, ret %d\n", type, + SXE2_DBG_RSC_SRQ, ret); + goto end; + } + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + query_srq.size = sizeof(struct sxe2_rdma_srqc); + query_srq.va = dma_alloc_coherent(dev_ctx->hw->device, query_srq.size, + &query_srq.pa, GFP_KERNEL); + if (!query_srq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:query srq ctx alloc failed. ret %d\n", + ret); + goto end; + } + memset(query_srq.va, 0, query_srq.size); + + ret = sxe2_kquery_srq_mq_cmd(rdma_dev->rdma_func, &srq->srq_ctx, + (u64)query_srq.pa); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:Query SRQ --mq cmd failed, ret %d\n", ret); + goto free_srqc; + } + + srqc = (struct sxe2_rdma_srqc *)query_srq.va; + + len += dbg_vsnprintf(buf, len, "SRQ:%d Context:\n\n", srq->srq_id); + len += dbg_vsnprintf(buf, len, "Soft Context\n"); + len += dbg_vsnprintf(buf, len, + "pd: %#x\n", + srqc->ssrqc.pd); + len += dbg_vsnprintf(buf, len, "log_srq_size: %#x\n", + srqc->ssrqc.log_srq_size); + len += dbg_vsnprintf(buf, len, "state: %#x\n", + srqc->ssrqc.state); + len += dbg_vsnprintf(buf, len, "srq_access_mode: %#x\n", + srqc->ssrqc.srq_access_mode); + len += dbg_vsnprintf(buf, len, "log_page_size: %#x\n", + srqc->ssrqc.log_page_size); + len += dbg_vsnprintf(buf, len, + "dbr_addr: %#llx\n", + srqc->ssrqc.dbr_addr); + len += dbg_vsnprintf(buf, len, "SRQ_Completion_Context: %#llx\n", + srqc->ssrqc.SRQ_Completion_Context); + len += dbg_vsnprintf(buf, len, + "dbr_addr: %#llx\n", + srqc->ssrqc.srq_pbl_pointer); + len += dbg_vsnprintf(buf, len, "lwm: %#x\n", + srqc->ssrqc.lwm); + len += dbg_vsnprintf(buf, len, "log_rq_stride: %#x\n", + srqc->ssrqc.log_rq_stride); + len += dbg_vsnprintf(buf, len, "page_offset: %#x\n", + srqc->ssrqc.page_offset); + + len += dbg_vsnprintf(buf, len, "Hardware Context\n"); + len += dbg_vsnprintf(buf, len, "state_err_aeq_flag: %#x\n", + srqc->hsrqc.state_err_aeq_flag); + len += dbg_vsnprintf(buf, len, "sw_srq_counter: %#x\n", + srqc->hsrqc.sw_srq_counter); + len += dbg_vsnprintf(buf, len, "hw_srq_counter: %#x\n", + srqc->hsrqc.hw_srq_counter); + len += dbg_vsnprintf(buf, len, "srq_page_pa_sel: %#x\n", + srqc->hsrqc.srq_page_pa_sel); + len += dbg_vsnprintf(buf, len, "srq_page_pa_vld: %#x\n", + srqc->hsrqc.srq_page_pa_vld); + len += dbg_vsnprintf(buf, len, "srq_wqe_vld: %#x\n", + srqc->hsrqc.srq_wqe_vld); + len += dbg_vsnprintf(buf, len, "srq_page_pa0: %#x\n", + srqc->hsrqc.srq_page_pa0); + len += dbg_vsnprintf(buf, len, "srq_page_pa1: %#x\n", + srqc->hsrqc.srq_page_pa1); + +free_srqc: + dma_free_coherent(dev_ctx->hw->device, query_srq.size, query_srq.va, + query_srq.pa); + query_srq.va = NULL; +end: + return len; +} + +static int drv_rdma_srq_write_field(struct sxe2_rdma_device *rdma_dev, + void *data, enum drv_rdma_dbg_rsc_type type, + char *buf) +{ +#ifdef SXE2_CFG_DEBUG + size_t i; + int ret = 0; + u64 temp_value; + struct sxe2_rdma_srqc *srqc; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_srq *srq; + struct sxe2_rdma_dma_mem query_srq = {}; + struct sxe2_rdma_ctx_dev *dev_ctx; + + if (!rdma_dev || !data) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "SRQ DEBUGFS:RDMA Dev/Data is NULL, ret %d\n", ret); + goto end; + } + + if (type == SXE2_DBG_RSC_SRQ) { + srq = (struct sxe2_rdma_srq *)data; + } else { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:RSC %d err, expected %d, ret %d\n", type, + SXE2_DBG_RSC_SRQ, ret); + goto end; + } + + dev_ctx = &(rdma_dev->rdma_func->ctx_dev); + + query_srq.size = sizeof(struct sxe2_rdma_srqc); + query_srq.va = dma_alloc_coherent(dev_ctx->hw->device, query_srq.size, + &query_srq.pa, GFP_KERNEL); + if (!query_srq.va) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:query srq ctx alloc failed. ret:%d\n", + ret); + goto end; + } + memset(query_srq.va, 0, query_srq.size); + + ret = sxe2_kquery_srq_mq_cmd(rdma_dev->rdma_func, &srq->srq_ctx, + (u64)query_srq.pa); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:Query SRQ --mq cmd failed, ret %d\n", ret); + goto free_srqc; + } + + srqc = (struct sxe2_rdma_srqc *)query_srq.va; + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(buf, &argc, argv); + if (ret) + goto free_srqc; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("SRQ DEBUGFS:invalid param nums\n"); + goto free_srqc; + } + + for (i = 0; i < ARRAY_SIZE(srq_fields); i++) { + if (!strncmp(argv[0], srq_fields[i], strlen(srq_fields[i])) && + (strlen(srq_fields[i]) == strlen(argv[0]))) { + break; + } + } + + ret = kstrtoull(argv[1], SXE2_DRV_SRQ_DEBUGFS_HEX, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:get param value failed, ret (%d)\n", ret); + goto free_srqc; + } + + DRV_RDMA_LOG_DEV_INFO("SRQ DEBUGFS:query srq i:%zu, temp_value:%llx\n", + i, temp_value); + + ret = drv_rdma_srq_opcode_switch(i, temp_value, srqc); + if (ret) + goto free_srqc; + + ret = sxe2_kmodify_srq_mq_cmd(rdma_dev->rdma_func, &srq->srq_ctx, srqc); + if (ret) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:Modify SRQ --mq cmd failed, ret %d\n", + ret); + } + +free_srqc: + dma_free_coherent(dev_ctx->hw->device, query_srq.size, query_srq.va, + query_srq.pa); + query_srq.va = NULL; +end: + return ret; +#else + return 0; +#endif +} + +int drv_rdma_debug_srq_add(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *srq) +{ + int ret = 0; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:debugfs root dir not exist, ret (%d)\n", + ret); + goto end; + } + + if (!rdma_dev->hdl->srq_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:srq debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + srq->dbg_node = drv_rdma_add_res_tree(rdma_dev, SXE2_DBG_RSC_SRQ, + rdma_dev->hdl->srq_debugfs, + drv_rdma_srq_read_field, + drv_rdma_srq_write_field, + (int)srq->srq_id, srq); + if (!srq->dbg_node) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:srq debug res tree add failed ret (%d)\n", + ret); + } + +end: + return ret; +} + +void drv_rdma_debug_srq_remove(struct sxe2_rdma_device *rdma_dev, + struct sxe2_rdma_srq *srq) +{ + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + DRV_RDMA_LOG_DEV_ERR( + "SRQ DEBUGFS:debugfs root dir not exist\n"); + goto end; + } + + if (!rdma_dev->hdl->srq_debugfs) { + DRV_RDMA_LOG_DEV_ERR("SRQ DEBUGFS:srq debugfs dir not exist\n"); + goto end; + } + + if (srq->dbg_node) { + drv_rdma_rm_res_tree(srq->dbg_node); + srq->dbg_node = NULL; + } + +end: + return; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..fc96db23946a79139c22d3f74a5305f4ef6b5bdf --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.c @@ -0,0 +1,717 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_stats.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include "sxe2_compat.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_stats.h" +#include "sxe2_drv_rdma_debugfs.h" +#include "sxe2_drv_rdma_virtchnl.h" +#include "sxe2_drv_rdma_rcms.h" + +#define STATS_TIMER_DELAY 3600000 +#define MQ_GATHER_STATS_POST 1 +#define STATS_DEFAULT_LIFESPAN 3600000 + +#ifdef ALLOC_HW_STATS_STRUCT_V1 +const char *const sxe2_rdma_hw_stat_names[] = { + [SXE2_RDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXBND] = "OutRdmaBinds", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXINV] = "OutRdmaLocalInvs", + [SXE2_RDMA_HW_STAT_INDEX_TXCNPSENT] = "OutCnpSent", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXINV] = "InRdmaSendInvs", + [SXE2_RDMA_HW_STAT_INDEX_RXECNMARKEDPKTS] = "InEcnMarkedPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXCNPHANDLED] = "InCnpHandledPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXCNPIGNORED] = "InCnpIgnoredPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXNAKPKTS] = "InNakPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXSEQERR] = "InSequenceErrs", + [SXE2_RDMA_HW_STAT_INDEX_RXRNRNAKPKTS] = "InRnrNakPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXRETRANS] = "TimeoutRetrans", +}; +#else +const struct rdma_stat_desc sxe2_rdma_hw_stat_descs[] = { + [SXE2_RDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXBND].name = "OutRdmaBinds", + [SXE2_RDMA_HW_STAT_INDEX_RDMATXINV].name = "OutRdmaLocalInvs", + [SXE2_RDMA_HW_STAT_INDEX_TXCNPSENT].name = "OutCnpSent", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets", + [SXE2_RDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends", + [SXE2_RDMA_HW_STAT_INDEX_RDMARXINV].name = "InRdmaSendInvs", + [SXE2_RDMA_HW_STAT_INDEX_RXECNMARKEDPKTS].name = "InEcnMarkedPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXCNPHANDLED].name = "InCnpHandledPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXCNPIGNORED].name = "InCnpIgnoredPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXNAKPKTS].name = "InNakPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXSEQERR].name = "InSequenceErrs", + [SXE2_RDMA_HW_STAT_INDEX_RXRNRNAKPKTS].name = "InRnrNakPkts", + [SXE2_RDMA_HW_STAT_INDEX_RXRETRANS].name = "TimeoutRetrans", +}; +#endif + +void sxe2_kupdate_vsi_stats(struct sxe2_rdma_ctx_vsi *vsi) +{ + struct sxe2_rdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats; + struct sxe2_rdma_gather_stats *gather_stats = + vsi->pestat->gather_info.gather_stats_va; + const struct sxe2_rdma_hw_stat_map *map = vsi->dev->hw_stats_map; + u16 max_stats_idx = vsi->dev->hw_attrs.max_stat_idx; + u16 i; + u16 idx; + + mutex_lock(&vsi->pestat->stats_lock); + for (i = 0; i < max_stats_idx; i++) { + idx = map[i].byteoff / sizeof(u64); + hw_stats->stats_val[i] += + ((gather_stats->val[idx] >> map[i].bitoff) & + map[i].bitmask); + } + mutex_unlock(&vsi->pestat->stats_lock); + +} + +void sxe2_kprocess_mq_stats(struct sxe2_mq_request *mq_request) +{ + struct sxe2_rdma_vsi_pestat *pestat = mq_request->param; + + sxe2_kupdate_vsi_stats(pestat->vsi); +} + +int sxe2_kgather_stats(struct sxe2_mq_ctx *mq, + struct sxe2_rdma_stats_gather_info *info, u64 scratch) +{ + struct mq_wqe_gather_stats *wqe = NULL; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(mq->dev); + int ret_code = 0; + + if (info->stats_buff_mem.size < SXE2_GATHER_STATS_BUF_SIZE) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("STATS Buf size < 1K, ret_code %d\n", + ret_code); + goto end; + } + + wqe = (struct mq_wqe_gather_stats *)sxe2_kget_next_mq_wqe(mq, scratch); + if (!wqe) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR("Get MQ WQE fail, ret_code %d\n", + ret_code); + goto end; + } + + wqe->statistics_instance_index = info->stats_inst_index; + wqe->op = SXE2_MQ_OP_GATHER_STATS; + wqe->use_rcms_func_index = info->use_rdma_fcn_index; + wqe->use_statistics_instance = info->use_stats_inst; + wqe->physical_buffer_address = info->stats_buff_mem.pa; + wqe->rcms_fcn_index = info->rcms_fcn_index; + wqe->wqe_valid = mq->polarity; + + DRV_RDMA_LOG_DEV_DEBUG( + "GATHER_STATS WQE: statistics_instance_index %#x, op %#x\n" + "use_rcms_func_index %#x, use_statistics_instance %#x\n" + "physical_buffer_address %#llx, rcms_fcn_index %#x, wqe_valid %#x\n", + wqe->statistics_instance_index, wqe->op, + wqe->use_rcms_func_index, wqe->use_statistics_instance, + wqe->physical_buffer_address, wqe->rcms_fcn_index, + wqe->wqe_valid); + + print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET, + SXE2_PRINT_HEX_BYTE_PER_ROW, + SXE2_PRINT_HEX_BREAK_PER_BYTE, (__le64 *)wqe, + SXE2_MQ_WQE_SIZE * SXE2_PRINT_HEX_MUL_BYTE_8, + false); + + sxe2_kpost_mq(mq); + +end: + return ret_code; +} + +int sxe2_kgather_stats_mq_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vsi_pestat *pestat, bool wait) +{ + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq *mq = &rdma_func->mq; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + int ret_code = 0; + + if (!dev->privileged) { + ret_code = -EIO; + DRV_RDMA_LOG_DEV_ERR("STATS: Current func is %d, ret_code %d\n", + dev->privileged, ret_code); + goto end; + } + + mq_request = sxe2_kalloc_and_get_mq_request(mq, wait); + if (!mq_request) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "STATS:Alloc MQ Request fail, ret_code %d\n", + ret_code); + goto end; + } + + mq_info = &mq_request->info; + memset(mq_info, 0, sizeof(*mq_info)); + mq_info->mq_cmd = MQ_OP_GATHER_STATS; + mq_info->post_mq = MQ_GATHER_STATS_POST; + mq_info->in.u.stats_gather.info = pestat->gather_info; + mq_info->in.u.stats_gather.scratch = (uintptr_t)mq_request; + mq_info->in.u.stats_gather.mq = &rdma_func->mq.mq; + mq_request->param = pestat; + if (!wait) + mq_request->callback_fcn = sxe2_kprocess_mq_stats; + + ret_code = sxe2_khandle_mq_cmd(rdma_func, mq_request); + if (wait) + sxe2_kupdate_vsi_stats(pestat->vsi); + + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret_code; +} + +static int sxe2_kgather_pf_for_vf_stats_val_mq_cmd( + struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_stats_gather_info *gather_stats_info, + u32 stats_req_type, struct sxe2_rdma_gather_stats_vf *gather_stats_resp) +{ + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_mq *mq = &rdma_func->mq; + struct sxe2_mq_request *mq_request; + struct mq_cmds_info *mq_info; + struct sxe2_rdma_gather_stats *gather_stats = + (struct sxe2_rdma_gather_stats *)(gather_stats_info + ->gather_stats_va); + const struct sxe2_rdma_hw_stat_map *map = dev->hw_stats_map; + u16 i; + u16 rx_idx = 0; + int ret_code = 0; + + if (stats_req_type == SXE2_RDMA_STATS_VF_TX) { + DRV_RDMA_LOG_DEV_DEBUG("VF stats_req_type TX %u\n", + stats_req_type); + } else if (stats_req_type == SXE2_RDMA_STATS_VF_RX) { + memcpy(gather_stats_resp->val, + &gather_stats->val[STATS_VF_RX_BUF_START_8BYTE], + STATS_VF_RX_BUF_ALL_BYTE); + for (i = SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS; + i < SXE2_RDMA_HW_STAT_INDEX_MAX; i++) { + rx_idx = (map[i].byteoff / sizeof(u64) - + STATS_VF_RX_BUF_START_8BYTE); + DRV_RDMA_LOG_DEV_DEBUG( + "vf i %u, rx_idx %u, bitoff %u, val %#llx\n", i, + rx_idx, map[i].byteoff, + gather_stats_resp->val[rx_idx]); + } + goto end; + } else { + ret_code = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("VF stats_req_type %u, ret %d\n", + stats_req_type, ret_code); + goto end; + } + + mq_request = sxe2_kalloc_and_get_mq_request(mq, true); + if (!mq_request) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "VF STATS:Alloc MQ Request fail, ret_code %d\n", + ret_code); + goto end; + } + + mq_info = &mq_request->info; + memset(mq_info, 0, sizeof(*mq_info)); + mq_info->mq_cmd = MQ_OP_GATHER_STATS; + mq_info->post_mq = MQ_GATHER_STATS_POST; + memcpy(&mq_info->in.u.stats_gather.info, gather_stats_info, + sizeof(*gather_stats_info)); + mq_info->in.u.stats_gather.scratch = (uintptr_t)mq_request; + mq_info->in.u.stats_gather.mq = &rdma_func->mq.mq; + mq_request->param = NULL; + + ret_code = sxe2_khandle_mq_cmd(rdma_func, mq_request); + + memcpy(gather_stats_resp->val, gather_stats->val, + STATS_VF_TX_BUF_ALL_BYTE); + for (i = 0; i < SXE2_RDMA_HW_STAT_INDEX_IP4RXOCTS; i++) { + DRV_RDMA_LOG_DEV_DEBUG("vf i %u, bitoff %u, val %#llx\n", i, + map[i].byteoff, + gather_stats_resp->val[i]); + } + + sxe2_kput_mq_request(&rdma_func->mq, mq_request); + +end: + return ret_code; +} + +int sxe2_kgather_pf_for_vf_stats_val( + struct sxe2_rdma_vchnl_dev *vc_dev, u32 stats_req_type, + struct sxe2_rdma_gather_stats_vf *gather_stats_resp) +{ + int ret_code = 0; + struct sxe2_rdma_stats_gather_info *gather_stats_info; + u16 vf_idx; + struct sxe2_rdma_ctx_dev *dev = vc_dev->pf_dev; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(dev); + u8 vf_pmf_idx = (u8)vc_dev->pmf_index; + + if ((!dev->privileged) || + (vf_pmf_idx > dev->hw_attrs.max_hw_vf_fpm_id || + vf_pmf_idx < dev->hw_attrs.first_hw_vf_fpm_id)) { + ret_code = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "STATS:dev if pf err or vf pmf id err, dev\n" + "\tprivileged=%u, id=%u, ret_code %d\n", + dev->privileged, vf_pmf_idx, ret_code); + goto end; + } + + gather_stats_info = &vc_dev->gather_stats_info; + vf_idx = vc_dev->vf_idx; + + if (!vc_dev->gather_stats_buf.va) { + if (!dev->vf_gather_stats_buf[vf_idx].va) { + dev->vf_gather_stats_buf[vf_idx].size = + ALIGN(SXE2_GATHER_STATS_BUF_SIZE, + SXE2_STATS_BUF_ALIGN); + dev->vf_gather_stats_buf[vf_idx].va = + dma_alloc_coherent( + rdma_dev->rdma_func->hw.device, + dev->vf_gather_stats_buf[vf_idx].size, + &dev->vf_gather_stats_buf[vf_idx].pa, + GFP_KERNEL); + if (!dev->vf_gather_stats_buf[vf_idx].va) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "STATS:alloc vf stats_buf_mem va fail, ret_code %d\n", + ret_code); + goto end; + } + memset(dev->vf_gather_stats_buf[vf_idx].va, 0, + dev->vf_gather_stats_buf[vf_idx].size); + } + memcpy(&vc_dev->gather_stats_buf, + &dev->vf_gather_stats_buf[vf_idx], + sizeof(vc_dev->gather_stats_buf)); + } + + gather_stats_info->use_rdma_fcn_index = true; + gather_stats_info->use_stats_inst = false; + gather_stats_info->rcms_fcn_index = vf_pmf_idx; + gather_stats_info->stats_inst_index = 0; + memcpy(&gather_stats_info->stats_buff_mem, &vc_dev->gather_stats_buf, + sizeof(gather_stats_info->stats_buff_mem)); + gather_stats_info->gather_stats_va = + gather_stats_info->stats_buff_mem.va; + + ret_code = sxe2_kgather_pf_for_vf_stats_val_mq_cmd( + dev, gather_stats_info, stats_req_type, gather_stats_resp); + if (ret_code) { + ret_code = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "STATS:get pmf id %u gather stats val err ret=%d\n", + vf_pmf_idx, ret_code); + } + +end: + return ret_code; +} + +int sxe2_kgather_vf_stats_mq_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vsi_pestat *pestat, bool wait) +{ + struct sxe2_rdma_pci_f *rdma_func = to_rdmafunc(dev); + struct sxe2_rdma_device *rdma_dev = rdma_func->rdma_dev; + struct sxe2_rdma_dev_hw_stats *hw_stats = + &pestat->vsi->pestat->hw_stats; + const struct sxe2_rdma_hw_stat_map *map = + pestat->vsi->dev->hw_stats_map; + struct sxe2_rdma_gather_stats *pgather_stats_resp = + kzalloc(sizeof(struct sxe2_rdma_gather_stats), GFP_KERNEL); + int ret_code = 0; + u16 i; + u16 idx; + + mutex_lock(&pestat->stats_lock); + + if (dev->privileged) { + ret_code = -EIO; + DRV_RDMA_LOG_DEV_ERR("STATS: Current func is %d, ret_code %d\n", + dev->privileged, ret_code); + goto end; + } + + ret_code = sxe2_vchnl_req_gather_stats(dev, pgather_stats_resp); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("gather stats vf fail, ret_code %d\n", + ret_code); + goto end; + } + + for (i = 0; i < SXE2_RDMA_HW_STAT_INDEX_MAX; i++) { + idx = map[i].byteoff / sizeof(u64); + hw_stats->stats_val[i] += + ((pgather_stats_resp->val[idx] >> map[i].bitoff) & + map[i].bitmask); + } + +end: + kfree(pgather_stats_resp); + mutex_unlock(&pestat->stats_lock); + return ret_code; +} + +static void sxe2_kwork_stats(struct work_struct *work) +{ + struct sxe2_rdma_vsi_pestat *devstat = + container_of(work, struct sxe2_rdma_vsi_pestat, work); + struct sxe2_rdma_ctx_vsi *ctx_vsi = devstat->vsi; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(ctx_vsi->dev); + int ret_code = 0; + + if (ctx_vsi->dev->privileged) { + ret_code = sxe2_kgather_stats_mq_cmd(ctx_vsi->dev, + ctx_vsi->pestat, true); + } else { + ret_code = sxe2_kgather_vf_stats_mq_cmd(ctx_vsi->dev, ctx_vsi->pestat, + true); + } + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("STATS WORK: work failed,ret %d\n", + ret_code); + } + +} + +static void sxe2_ktimeout_hw_stats(struct timer_list *t) +{ + struct sxe2_rdma_vsi_pestat *pf_devstat = + from_timer(pf_devstat, t, stats_timer); + + queue_work(pf_devstat->stats_wq, &pf_devstat->work); + mod_timer(&pf_devstat->stats_timer, + jiffies + msecs_to_jiffies(pf_devstat->timer_delay)); +} + +static int sxe2_kstart_hw_stats_timer(struct sxe2_rdma_ctx_vsi *vsi) +{ + struct sxe2_rdma_vsi_pestat *devstat = vsi->pestat; + struct sxe2_rdma_device *rdma_dev = to_rdmadev(vsi->dev); + int ret_code = 0; + + mutex_init(&devstat->stats_lock); + devstat->stats_wq = alloc_ordered_workqueue( + "stats_wq", WQ_HIGHPRI | WQ_UNBOUND); + if (!devstat->stats_wq) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "failed to create workqueue, ret %d\n", + ret_code); + goto end; + } + INIT_WORK(&devstat->work, sxe2_kwork_stats); + + timer_setup(&devstat->stats_timer, sxe2_ktimeout_hw_stats, 0); + mod_timer(&devstat->stats_timer, + jiffies + msecs_to_jiffies(devstat->timer_delay)); + +end: + return ret_code; +} + +static void sxe2_kstop_hw_stats_timer(struct sxe2_rdma_ctx_vsi *vsi) +{ + struct sxe2_rdma_vsi_pestat *devstat = vsi->pestat; + + del_timer_sync(&devstat->stats_timer); + + if (devstat->stats_wq) + destroy_workqueue(devstat->stats_wq); + mutex_destroy(&devstat->stats_lock); +} + +int sxe2_kinit_vsi_stats(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + struct sxe2_rdma_ctx_vsi *vsi = &rdma_dev->vsi; + struct sxe2_rdma_vsi_stats_info info = {}; + struct sxe2_rdma_dma_mem *stats_buff_mem; + int ret_code = 0; + + info.pestat = kzalloc(sizeof(*info.pestat), GFP_KERNEL); + if (!info.pestat) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "kzalloc stats_info.pestat fail, ret_code %d\n", + ret_code); + goto end; + } + info.fcn_id = dev->rcms_fn_id; + + vsi->pestat = info.pestat; + vsi->pestat->hw = vsi->dev->hw; + vsi->pestat->vsi = vsi; + + stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem; + stats_buff_mem->size = + ALIGN(SXE2_GATHER_STATS_BUF_SIZE, SXE2_STATS_BUF_ALIGN); + stats_buff_mem->va = + dma_alloc_coherent(vsi->pestat->hw->device, + stats_buff_mem->size, &stats_buff_mem->pa, + GFP_KERNEL); + if (!stats_buff_mem->va) { + ret_code = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "kzalloc stats_buf_mem va fail, ret_code %d\n", + ret_code); + goto free_pestat; + } + memset(stats_buff_mem->va, 0, stats_buff_mem->size); + + vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; + vsi->pestat->gather_info.rcms_fcn_index = vsi->dev->hw->rcms.rcms_fn_id; + + vsi->pestat->timer_delay = STATS_TIMER_DELAY; + ret_code = sxe2_kstart_hw_stats_timer(vsi); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("kstart stats timer fail, ret_code %d\n", + ret_code); + goto free_stats_buf; + } + + vsi->stats_idx = info.fcn_id; + +#ifdef SXE2_CFG_DEBUG + ret_code = drv_rdma_debug_stats_add(rdma_dev); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("init stats debugfs fail, ret:%d\n", + ret_code); + sxe2_kfree_vsi_stats(rdma_dev); + goto end; + } +#ifdef SXE2_SUPPORT_INJECT + ret_code = drv_rdma_stats_overflow_inject_debugfs_add(rdma_dev); + if (ret_code) { + DRV_RDMA_LOG_DEV_ERR("init stats inject debugfs fail, ret:%d\n", + ret_code); + goto free_stats_buf; + } +#endif +#endif + + goto end; + +free_stats_buf: + dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size, + stats_buff_mem->va, stats_buff_mem->pa); + stats_buff_mem->va = NULL; +free_pestat: + kfree(info.pestat); + info.pestat = NULL; + vsi->pestat = NULL; +end: + return ret_code; +} + +void sxe2_kfree_vsi_stats(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_ctx_vsi *vsi = &rdma_dev->vsi; + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + u16 idx; + + if (!vsi->pestat) { + DRV_RDMA_LOG_DEV_ERR("vsi->pestats is NULL\n"); + goto end; + } + + sxe2_kstop_hw_stats_timer(vsi); + + for (idx = 0; idx < dev->num_vfs; idx++) { + if (dev->vf_gather_stats_buf[idx].va) { + dma_free_coherent(rdma_dev->rdma_func->hw.device, + dev->vf_gather_stats_buf[idx].size, + dev->vf_gather_stats_buf[idx].va, + dev->vf_gather_stats_buf[idx].pa); + dev->vf_gather_stats_buf[idx].va = NULL; + } + } + + dma_free_coherent(vsi->pestat->hw->device, + vsi->pestat->gather_info.stats_buff_mem.size, + vsi->pestat->gather_info.stats_buff_mem.va, + vsi->pestat->gather_info.stats_buff_mem.pa); + vsi->pestat->gather_info.stats_buff_mem.va = NULL; + + kfree(vsi->pestat); + vsi->pestat = NULL; + +end: + return; +} + +#ifdef ALLOC_HW_STATS_V1 +struct rdma_hw_stats *sxe2_kalloc_hw_port_stats(struct ib_device *ibdev, + u8 port_num) + +#else +struct rdma_hw_stats *sxe2_kalloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num) +#endif +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_ctx_dev *dev = &rdma_dev->rdma_func->ctx_dev; + int num_counters = dev->hw_attrs.max_stat_idx; + unsigned long lifespan = STATS_DEFAULT_LIFESPAN; + + if (!dev->privileged) + lifespan = STATS_DEFAULT_LIFESPAN; + +#ifdef ALLOC_HW_STATS_STRUCT_V1 + return rdma_alloc_hw_stats_struct(sxe2_rdma_hw_stat_names, num_counters, + lifespan); +#else + return rdma_alloc_hw_stats_struct(sxe2_rdma_hw_stat_descs, num_counters, + lifespan); +#endif +} + +#ifdef GET_HW_STATS_V1 +int sxe2_kget_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index) +#else +int sxe2_kget_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index) +#endif +{ + struct sxe2_rdma_device *rdma_dev = to_dev(ibdev); + struct sxe2_rdma_dev_hw_stats *hw_stats = + &rdma_dev->vsi.pestat->hw_stats; + + if (rdma_dev->rdma_func->ctx_dev.privileged) { + sxe2_kgather_stats_mq_cmd(&rdma_dev->rdma_func->ctx_dev, + rdma_dev->vsi.pestat, true); + } else { + sxe2_kgather_vf_stats_mq_cmd(&rdma_dev->rdma_func->ctx_dev, + rdma_dev->vsi.pestat, true); + } + + memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters); + return stats->num_counters; +} + +int sxe2_kget_rdma_features(struct sxe2_rdma_device *rdma_dev) +{ + struct sxe2_rdma_ctx_dev *dev = NULL; + int ret_code = 0; + u32 hw_version = 0; + u32 fw_version = 0; + + dev = &rdma_dev->rdma_func->ctx_dev; + + hw_version = + SXE2_BAR_READ_32(dev->hw_regs[RDMA_FEATURE_HW_VERSION_LOW]); + dev->feature_info[SXE2_RDMA_HW_MQ_MAJOR_VERSION] = + FIELD_GET(SXE2_RDMA_HW_MAJOR_VERSION_BITS, (s64)hw_version); + dev->feature_info[SXE2_RDMA_HW_MQ_MINOR_VERSION] = + FIELD_GET(SXE2_RDMA_HW_MINOR_VERSION_BITS, (s64)hw_version); + DRV_RDMA_LOG_DEV_INFO( + "Get Features:hw_major_version %#x, hw_minor_version %#x\n", + dev->feature_info[SXE2_RDMA_HW_MQ_MAJOR_VERSION], + dev->feature_info[SXE2_RDMA_HW_MQ_MINOR_VERSION]); + + dev->feature_info[SXE2_RDMA_HW_MODEL_VERSION_USED] = + SXE2_BAR_READ_32(dev->hw_regs[RDMA_FEATURE_HW_VERSION_HIGH]); + DRV_RDMA_LOG_DEV_INFO( + "Get Features:hw_model_used %#x\n", + dev->feature_info[SXE2_RDMA_HW_MODEL_VERSION_USED]); + + dev->feature_info[SXE2_RDMA_ENDPT_TRK_EN] = + SXE2_BAR_READ_32(dev->hw_regs[RDMA_FEATURE_ENDPT_TRK]); + DRV_RDMA_LOG_DEV_INFO("Get Features:endpt_trk_en %#x\n", + dev->feature_info[SXE2_RDMA_ENDPT_TRK_EN]); + + dev->feature_info[SXE2_RDMA_QSETS_MAX_NUMBER] = + SXE2_BAR_READ_32(dev->hw_regs[RDMA_FEATURE_QSETS_MAX]); + DRV_RDMA_LOG_DEV_INFO("Get Features:qsets_max_num %#x\n", + dev->feature_info[SXE2_RDMA_QSETS_MAX_NUMBER]); + + fw_version = SXE2_BAR_READ_32(dev->hw_regs[RDMA_FEATURE_FW_VERSION]); + dev->feature_info[SXE2_RDMA_FW_MAIN_VERSION] = + FIELD_GET(SXE2_RDMA_FW_MAIN_VERSION_BITS, (s64)fw_version); + dev->feature_info[SXE2_RDMA_FW_SUB_VERSION] = + FIELD_GET(SXE2_RDMA_FW_SUB_VERSION_BITS, (s64)fw_version); + dev->feature_info[SXE2_RDMA_FW_FIX_VERSION] = + FIELD_GET(SXE2_RDMA_FW_FIX_VERSION_BITS, (s64)fw_version); + dev->feature_info[SXE2_RDMA_FW_BUILD_NUMBER] = + FIELD_GET(SXE2_RDMA_FW_BUILD_NUMBER_BITS, (s64)fw_version); + DRV_RDMA_LOG_DEV_INFO( + "Get Features:fw_main_version %#x, fw_sub_version\n" + "\t%#x, fw_fix_version %#x, fw_build_number %#x\n", + dev->feature_info[SXE2_RDMA_FW_MAIN_VERSION], + dev->feature_info[SXE2_RDMA_FW_SUB_VERSION], + dev->feature_info[SXE2_RDMA_FW_FIX_VERSION], + dev->feature_info[SXE2_RDMA_FW_BUILD_NUMBER]); + dev->fw_ver = fw_version; + + return ret_code; +} diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..07a9b8ed9480f1ce3e1e40e4b8a328eb439268bf --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_stats.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DRV_STATS_H +#define SXE2_DRV_STATS_H + +#include +#include "sxe2_drv_rdma_common.h" + +#define SXE2_STATS_BUF_ALIGN 1 +#define STATS_VF_RX_BUF_START_8BYTE 16 +#define STATS_VF_TX_BUF_ALL_BYTE 128 +#define STATS_VF_RX_BUF_ALL_BYTE 136 + +#define SXE2_RDMA_HW_MAJOR_VERSION_BITS \ + GENMASK(31, 16) +#define SXE2_RDMA_HW_MINOR_VERSION_BITS \ + GENMASK(15, 0) +#define SXE2_RDMA_FW_BUILD_NUMBER_BITS GENMASK(7, 0) +#define SXE2_RDMA_FW_FIX_VERSION_BITS GENMASK(15, 8) +#define SXE2_RDMA_FW_SUB_VERSION_BITS GENMASK(23, 16) +#define SXE2_RDMA_FW_MAIN_VERSION_BITS GENMASK(31, 24) + +#pragma pack(1) +struct mq_wqe_gather_stats { + u64 rsv0; + u64 rsv1; + u64 rsv2; + u32 statistics_instance_index : 1; + u32 rsv3 : 31; + u32 op : 6; + u32 rsv4 : 22; + u32 use_rcms_func_index : 1; + u32 use_statistics_instance : 1; + u32 rsv5 : 1; + u32 wqe_valid : 1; + u64 physical_buffer_address; + u32 rcms_fcn_index : 6; + u64 rsv6 : 58; + u64 rsv7; + u64 rsv8; +}; +#pragma pack(0) + +struct sxe2_rdma_gather_stats { + u64 val[SXE2_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct sxe2_rdma_gather_stats_vf { + u64 val[SXE2_GATHER_STATS_BUF_VF_SIZE / sizeof(u64)]; +}; + +struct sxe2_rdma_vsi_stats_info { + struct sxe2_rdma_vsi_pestat *pestat; + u16 fcn_id; +}; + +void sxe2_kupdate_vsi_stats(struct sxe2_rdma_ctx_vsi *vsi); + +void sxe2_kprocess_mq_stats(struct sxe2_mq_request *mq_request); + +int sxe2_kgather_stats(struct sxe2_mq_ctx *mq, + struct sxe2_rdma_stats_gather_info *info, u64 scratch); + +int sxe2_kgather_stats_mq_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vsi_pestat *pestat, bool wait); + +int sxe2_kgather_pf_for_vf_stats_val( + struct sxe2_rdma_vchnl_dev *vc_dev, u32 stats_req_type, + struct sxe2_rdma_gather_stats_vf *gather_stats_resp); + +int sxe2_kgather_vf_stats_mq_cmd(struct sxe2_rdma_ctx_dev *dev, + struct sxe2_rdma_vsi_pestat *pestat, + bool wait); + +int sxe2_kinit_vsi_stats(struct sxe2_rdma_device *rdma_dev); + +void sxe2_kfree_vsi_stats(struct sxe2_rdma_device *rdma_dev); + +#ifdef ALLOC_HW_STATS_V1 +struct rdma_hw_stats *sxe2_kalloc_hw_port_stats(struct ib_device *ibdev, + u8 port_num); +#else +struct rdma_hw_stats *sxe2_kalloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num); +#endif + +#ifdef GET_HW_STATS_V1 +int sxe2_kget_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u8 port_num, int index); +#else +int sxe2_kget_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index); +#endif + +int sxe2_kget_rdma_features(struct sxe2_rdma_device *rdma_dev); + +#ifdef SXE2_CFG_DEBUG +int drv_rdma_debug_stats_add(struct sxe2_rdma_device *rdma_dev); + +int drv_rdma_stats_overflow_inject_debugfs_add( + struct sxe2_rdma_device *rdma_dev); +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats_debugfs.c b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..b473cab6136d35c2e38ef630f1833311ebdd8732 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_drv_stats_debugfs.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_drv_stats_debugfs.c + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif +#include "sxe2_drv_stats.h" +#include "sxe2_drv_mq.h" +#include "sxe2_drv_hw.h" +#include "sxe2_drv_rdma_log.h" +#include "sxe2_drv_rdma_debugfs.h" + +#ifdef SXE2_CFG_DEBUG + +#ifdef ALLOC_HW_STATS_STRUCT_V1 +extern const char *const sxe2_rdma_hw_stat_names[]; +#else +extern const struct rdma_stat_desc sxe2_rdma_hw_stat_descs[]; +#endif + +#define SECOND_TO_MS(val) (1000 * (val)) +#define MS_TO_SECOND(val) ((val) / 1000) + +static ssize_t drv_rdma_stats_read(struct file *filp, char __user *buf, + size_t count, loff_t *off) +{ + ssize_t ret = 0; + size_t len_total = 0; + char *rsp = NULL; + char *rsp_end; + int i = 0; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_ctx_dev *dev; + struct sxe2_rdma_vsi_pestat *devstat; + struct sxe2_rdma_dev_hw_stats *hw_stats; + + if (*off != 0) + goto end; + + rdma_dev = (struct sxe2_rdma_device *)filp->private_data; + if (!rdma_dev) { + ret = -ENXIO; + DRV_RDMA_LOG_ERROR( + "STATS DEBUGFS:find dev struct from private_data failed, ret %zu\n", + ret); + goto end; + } + + dev = &rdma_dev->rdma_func->ctx_dev; + devstat = rdma_dev->vsi.pestat; + hw_stats = &devstat->hw_stats; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "STATS DEBUGFS:stats rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + if (dev->privileged) + sxe2_kgather_stats_mq_cmd(dev, devstat, true); + else + sxe2_kgather_vf_stats_mq_cmd(dev, devstat, true); + + len_total += dbg_vsnprintf(rsp_end, len_total, "STATS :\n"); + for (i = 0; i < SXE2_RDMA_HW_STAT_INDEX_MAX; i++) { +#ifdef ALLOC_HW_STATS_STRUCT_V1 + len_total += + dbg_vsnprintf(rsp_end, len_total, "%d. %s: %#llx\n", i, + sxe2_rdma_hw_stat_names[i], + hw_stats->stats_val[i]); +#else + len_total += + dbg_vsnprintf(rsp_end, len_total, "%d. %s: %#llx\n", i, + sxe2_rdma_hw_stat_descs[i].name, + hw_stats->stats_val[i]); +#endif + } + + ret = simple_read_from_buffer(buf, count, off, rsp, len_total); + if (ret < 0) + DRV_RDMA_LOG_DEV_ERR("DB DEBUGFS:simple read error %zu\n", ret); + + kfree(rsp); + +end: + return ret; +} + +static const struct file_operations sxe2_rdma_gather_stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_rdma_stats_read, +}; + +int drv_rdma_debug_stats_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR( + "STATS DEBUGFS:debugfs root dir not exist, ret (%d)\n", + ret); + goto end; + } + + if (!hdl->stats_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("stats debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + debugfs_create_file("hw_stats", SXE2_DEBUG_FILE_ONLY_READ, + hdl->stats_debugfs, rdma_dev, + &sxe2_rdma_gather_stats_fops); + +end: + return ret; +} + +#ifdef SXE2_SUPPORT_INJECT +enum { STATS_INJECT_OVERFLOW_ENABLE, + STATS_INJECT_GATHER_INTERVAL, +}; + +static char *stats_inject_fields[] = { + [STATS_INJECT_OVERFLOW_ENABLE] = "overflow_enable", + [STATS_INJECT_GATHER_INTERVAL] = "gather_interval", +}; + +STATIC ssize_t drv_stats_overflow_inject_read(struct file *filp, + char __user *buf, size_t count, + loff_t *pos) +{ + ssize_t ret; + char *rsp = NULL; + char *rsp_end; + size_t len = 0; + struct sxe2_rdma_device *rdma_dev; + + rdma_dev = filp->private_data; + + rsp = kzalloc(sizeof(char) * SXE2_DEBUG_DUMP_BUF_SIZE, + GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + DRV_RDMA_LOG_DEV_ERR( + "STATS DEBUGFS:stats status rsp kmalloc failed, ret %zu\n", + ret); + goto end; + } + rsp_end = rsp; + + len += dbg_vsnprintf(rsp_end, len, "stats inject info\n"); + len += dbg_vsnprintf(rsp_end, len, "gather_interval: %d\n", + MS_TO_SECOND(rdma_dev->vsi.pestat->timer_delay)); + + ret = simple_read_from_buffer(buf, count, pos, rsp, len); + if (ret < 0) { + DRV_RDMA_LOG_DEV_ERR("STATS DEBUGFS:simple read error %zu\n", + ret); + } + + kfree(rsp); + +end: + return ret; +} + +STATIC ssize_t drv_stats_overflow_inject_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *pos) +{ + ssize_t ret; + char in_buf[64] = { 0 }; + u32 i; + u64 temp_value; + int argc; + char *argv[DEBUG_ARGV_COUNT_MAX] = { 0 }; + struct sxe2_rdma_device *rdma_dev; + struct sxe2_rdma_vsi_pestat *devstat; + struct sxe2_rdma_dev_hw_stats *hw_stats; + + rdma_dev = filp->private_data; + devstat = rdma_dev->vsi.pestat; + hw_stats = &devstat->hw_stats; + + if (copy_from_user(in_buf, buf, count)) { + ret = -EFAULT; + goto end; + } + + argc = 0; + memset(argv, 0, sizeof(*argv) * DEBUG_ARGV_COUNT_MAX); + ret = split_command(in_buf, &argc, argv); + if (ret) + goto end; + + if (argc != DEBUG_PARA_CONT2) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("invalid param nums\n"); + goto end; + } + + for (i = 0; i < ARRAY_SIZE(stats_inject_fields); i++) { + if (!strncmp(argv[0], stats_inject_fields[i], + strlen(stats_inject_fields[i]))) { + break; + } + } + + ret = kstrtoull(argv[1], 10, &temp_value); + if (ret != 0) { + DRV_RDMA_LOG_DEV_ERR("get param value failed, ret (%zd)\n", + ret); + goto end; + } + + DRV_RDMA_LOG_DEV_INFO("inject field i:%d, to temp_value:%llx\n", i, + temp_value); + + switch (i) { + case STATS_INJECT_OVERFLOW_ENABLE: + if (temp_value) { + for (i = 0; i < SXE2_RDMA_HW_STAT_INDEX_MAX; i++) { + memset(&(hw_stats->stats_val[i]), 0xFF, + sizeof(u64)); + }; + } + break; + case STATS_INJECT_GATHER_INTERVAL: + if (temp_value) + devstat->timer_delay = (u32)SECOND_TO_MS(temp_value); + break; + default: + DRV_RDMA_LOG_DEV_WARN("invalid index %d\n", i); + ret = -EINVAL; + goto end; + } + + *pos = (loff_t)count; + ret = (ssize_t)count; + +end: + return ret; +} + +static const struct file_operations stats_overflow_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = drv_stats_overflow_inject_read, + .write = drv_stats_overflow_inject_write, +}; + +int drv_rdma_stats_overflow_inject_debugfs_add(struct sxe2_rdma_device *rdma_dev) +{ + int ret = 0; + struct sxe2_rdma_handler *hdl = rdma_dev->hdl; + + if (!sxe2_rdma_debugfs_get_dev_root(rdma_dev)) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("root dir not exist, ret (%d)\n", ret); + goto end; + } + + if (!hdl->stats_debugfs) { + ret = -EINVAL; + DRV_RDMA_LOG_DEV_ERR("stats debugfs dir not exist, ret (%d)\n", + ret); + goto end; + } + + debugfs_create_file("stats_inject", SXE2_DEBUG_FILE_READ_WRITE, + hdl->stats_debugfs, rdma_dev, + &stats_overflow_inject_fops); + +end: + return ret; +} +#endif + +#endif diff --git a/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_rdma_ifc.h b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_rdma_ifc.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8cf985f84845f1f2d6678f041aa8ef00d80ac4 --- /dev/null +++ b/drivers/infiniband/hw/sxe2rdma/rdma/sxe2_rdma_ifc.h @@ -0,0 +1,1486 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, sxe2rdma Technologies Co., Ltd. + * + * @file: sxe2_rdma_ifc.h + * @author: sxe2rdma + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_RDMA_IFC_H__ +#define __SXE2_RDMA_IFC_H__ + +#include "sxe2_mbx_public.h" + +typedef unsigned long long u64; +typedef unsigned int u32; +typedef unsigned short u16; +typedef unsigned char u8; + +#define SXE2_PF_DB_PROC_BAR_OFFSET 0x240000 +#define SXE2_VF_DB_PROC_BAR_OFFSET 0xBE00 + +#define SXE2_GLINT_CTRL_BAR_OFFSET 0x260000 + +#define PF_SXE2_RCMS_MAN_BAR_OFFSET 0x250000 +#define VF_SXE2_RCMS_MAN_BAR_OFFSET 0x22000 + +#define PF_QSET_APPLY_REQ_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6000) +#define PF_QSET_APPLY_RESP_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6004) +#define PF_QSET_QUERY_REQ_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6008) +#define PF_QSET_QUERY_RESP_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x600c) +#define PF_QSET_RELEASE_REQ_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6010) +#define PF_QSET_RELEASE_RESP_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6014) +#define PF_QSET_QP_BIND_REQ_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x6018) +#define PF_QSET_QP_BIND_RESP_OFFSET (SXE2_PF_DB_PROC_BAR_OFFSET + 0x601c) + +#define GLINT_CTRL_PF_INT_AEQCTL_OFFSET (SXE2_GLINT_CTRL_BAR_OFFSET + 0x80) +#define GLINT_CTRL_PF_INT_CEQCTL_OFFSET(_I) \ + (0x26492c + ((_I)*4)) +#define PF_GLINT_CTRL_DYN_CTL_OFFSET(_I) (0x269530 + ((_I)*4)) +#define VF_GLINT_CTRL_DYN_CTL_OFFSET(_I) (0x2800 + ((_I)*4)) + +#define VF_QSET_APPLY_REQ_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x0) +#define VF_QSET_APPLY_RESP_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x4) +#define VF_QSET_QUERY_REQ_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x8) +#define VF_QSET_QUERY_RESP_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0xC) +#define VF_QSET_RELEASE_REQ_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x10) +#define VF_QSET_RELEASE_RESP_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x14) +#define VF_QSET_QP_BIND_REQ_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x18) +#define VF_QSET_QP_BIND_RESP_OFFSET (SXE2_VF_DB_PROC_BAR_OFFSET + 0x1c) + +#define SXE2_PF_MQ_BAR_OFFSET (0x250000 + 0xE000) +#define SXE2_VF_MQ_BAR_OFFSET (0x22000 + 0x300) + +#define PF_MQC_ADDR_HIGH_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x0) +#define PF_MQC_ADDR_LOW_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x4) +#define PF_MQC_ADDR_VLD_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x8) +#define PF_MQ_STATUS_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0xC) +#define PF_MQ_DB_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x10) +#define PF_MQ_WQE_DONE_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x18) +#define PF_MQ_ERRCODES_OFFEST (SXE2_PF_MQ_BAR_OFFSET + 0x1C) + +#define VF_MQC_ADDR_HIGH_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x0) +#define VF_MQC_ADDR_LOW_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x4) +#define VF_MQC_ADDR_VLD_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x8) +#define VF_MQ_STATUS_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0xC) +#define VF_MQ_DB_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x10) +#define VF_MQ_WQE_DONE_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x18) +#define VF_MQ_ERRCODES_OFFEST (SXE2_VF_MQ_BAR_OFFSET + 0x1C) +#define PF_RCMS_SPT_CACHE_FAST_INVALID_MASK_OFFSET \ + (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0x90) +#define VF_RCMS_SPT_CACHE_FAST_INVALID_MASK_OFFSET \ + (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0x90) +#define PF_RCMS_SPT_CACHE_FAST_INVALID_IDX_OFFSET \ + (PF_SXE2_RCMS_MAN_BAR_OFFSET + 0x94) +#define VF_RCMS_SPT_CACHE_FAST_INVALID_IDX_OFFSET \ + (VF_SXE2_RCMS_MAN_BAR_OFFSET + 0x94) + +#define SXE2_PF_TXRX_COM_BAR_OFFSET 0x1F0000 +#define SXE2_VF_TXRX_COM_BAR_OFFSET 0x8C00 + +#define PF_RDMA_CONFIG_PKEY_OFFSET \ + (SXE2_PF_TXRX_COM_BAR_OFFSET + 0x4D800 + 0x1000) +#define VF_RDMA_CONFIG_PKEY_OFFSET (SXE2_VF_TXRX_COM_BAR_OFFSET + 0x0) + +struct sxe2_mqc { + u64 enable_fine_grained_timers : 1; + u64 disable_FPDU_packing : 1; + u64 RoCEv2_RTO_policy : 1; + u64 protocol_used : 2; + u64 rsv0 : 3; + u64 SQ_size : 4; + u64 rsv1 : 12; + u64 struct_version : 8; + u64 num_CEQs_per_VF : 8; + u64 rsv2 : 24; + u64 rsv3 : 9; + u64 SQ_base : 55; + u64 rcms_profile_type : 3; + u64 remote_endpoint_trk_en : 1; + u64 rdpa_assist : 4; + u64 cache_line_64byte_en : 1; + u64 rsv4 : 23; + u64 PE_enabled_vf_count : 6; + u64 rsv5 : 26; + u64 QP_completion_context; + u64 hw_minor_version : 16; + u64 hw_major_version : 16; + u64 rsv6 : 16; + u64 dcqcn_min_rate : 8; + u64 dcqcn_min_dec_factor : 8; + u64 rsv7; + u64 dcqcn_T : 16; + u64 rsv8 : 16; + u64 cc_hai_factor : 16; + u64 cc_rai_factor : 16; + u64 dcqcn_B : 25; + u64 dcqcn_F : 3; + u64 rsv9 : 3; + u64 cc_cfg_valid : 1; + u64 dcqcn_rreduce_mperiod : 32; +}; + +enum sxe2_hw_mq_op { + SXE2_MQ_OP_CREATE_QP = 0x00, + SXE2_MQ_OP_MODIFY_QP = 0x01, + SXE2_MQ_OP_DESTROY_QP = 0x02, + SXE2_MQ_OP_CREATE_CQ = 0x03, + SXE2_MQ_OP_MODIFY_CQ = 0x04, + SXE2_MQ_OP_DESTROY_CQ = 0x05, + SXE2_MQ_OP_ALLOC_MR_key = 0x09, + SXE2_MQ_OP_REG_MR = 0x0a, + SXE2_MQ_OP_QUERY_MR_key = 0x0b, + SXE2_MQ_OP_DEALLOC_MR_key = 0x0d, + SXE2_MQ_OP_MANAGE_PBLE_BP = 0x10, + SXE2_MQ_OP_QUERY_QP = 0x13, + SXE2_MQ_OP_MANAGE_RCMS_PM_FUNC_TABLE = 0x15, + SXE2_MQ_OP_CREATE_CEQ = 0x16, + SXE2_MQ_OP_DESTROY_CEQ = 0x18, + SXE2_MQ_OP_CREATE_AEQ = 0x19, + SXE2_MQ_OP_DESTROY_AEQ = 0x1b, + SXE2_MQ_OP_CREATE_ADDR_HANDLE = 0x1c, + SXE2_MQ_OP_MODIFY_ADDR_HANDLE = 0x1d, + SXE2_MQ_OP_DESTROY_ADDR_HANDLE = 0x1e, + SXE2_MQ_OP_UPDATE_FPT = 0x1f, + SXE2_MQ_OP_QUERY_FPM_VAL = 0x20, + SXE2_MQ_OP_COMMIT_FPM_VAL = 0x21, + SXE2_MQ_OP_NOP = 0x24, + SXE2_MQ_OP_GATHER_STATS = 0x2e, + SXE2_MQ_OP_CREATE_SRQ = 0x30, + SXE2_MQ_OP_MODIFY_SRQ = 0x31, + SXE2_MQ_OP_DESTROY_SRQ = 0x32, + SXE2_MQ_OP_DEREGISTER_MR = 0x33, + SXE2_MQ_OP_MODIFY_CEQ = 0x34, + SXE2_MQ_OP_QUERY_CEQ = 0x35, + SXE2_MQ_OP_MODIFY_AEQ = 0x36, + SXE2_MQ_OP_QUERY_AEQ = 0x37, + SXE2_MQ_OP_QUERY_CQ = 0x38, + SXE2_MQ_OP_QUERY_SRQ = 0x39, + SXE2_MQ_OP_QUERY_MR = 0x3b, + SXE2_MQ_OP_MAX = 0x50, +}; + +struct mcq_cqe { + u64 rsv0; + u64 MQ_completion_context; + u64 operation_return_value : 32; + u64 rsv1 : 32; + u64 minor_error_code : 16; + u64 major_error_code : 16; + u64 MQ_desc_index : 15; + u64 rsv2 : 8; + u64 error : 1; + u64 op : 6; + u64 SQ : 1; + u64 CQE_valid : 1; + u64 rsv3; + u64 CQE_timestamp; + u64 rsv4; + u64 rsv5; +}; + +struct sxe2_cq_wqe { + u64 pbl_index; + u64 dbr_addr; + + u64 eqn : 10; + u64 log_dbr_size : 5; + u64 log_cq_size : 5; + u64 rsv1 : 1; + u64 scqe_break_moderation_en : 1; + u64 oi : 1; + u64 sw_owner_bit : 1; + u64 sw_status : 4; + u64 rsv2 : 4; + u64 log_page_size : 5; + u64 page_offset : 15; + u64 pbl_mode : 2; + u64 TPH_value : 8; + u64 rsv3 : 2; + + u64 rsv4 : 32; + u64 op : 6; + u64 cqn : 19; + u64 rsv5 : 6; + u64 WQE_Valid : 1; + + u64 rsv6 : 8; + u64 physical_buffer_address : 56; + + u64 cq_max_count : 16; + u64 cq_period : 12; + u64 TPH_en : 1; + u64 rsv7 : 3; + u64 vsi_index : 10; + u64 rsv8 : 1; + u64 rsv9 : 2; + u64 rsv10 : 19; + + u64 rsv11; + u64 rsv12; +}; + +struct drv_rdma_soft_cqc { + u64 pbl_index; + u64 dbr_addr; + + u64 eqn : 10; + u64 log_dbr_size : 5; + u64 log_cq_size : 5; + u64 rsv1 : 1; + u64 scqe_break_moderation_en : 1; + u64 oi : 1; + u64 sw_owner_bit : 1; + u64 sw_status : 4; + u64 rsv2 : 4; + u64 log_page_size : 5; + u64 page_offset : 15; + u64 pbl_mode : 2; + u64 TPH_value : 8; + u64 rsv3 : 2; + + u64 cq_max_count : 16; + u64 cq_period : 12; + u64 TPH_en : 1; + u64 rsv4 : 3; + u64 vsi_index : 10; + u64 rsv6 : 1; + u64 rsv7 : 21; +}; + +struct drv_rdma_hw_cqc { + u32 hw_owner_bit : 1; + u32 st : 4; + u32 cmd : 1; + u32 cmd_sn : 2; + u32 hw_status : 1; + u32 last_sol_index_en : 1; + u32 fid : 12; + u32 hw_eqn : 10; + u32 last_notified_index : 24; + u32 last_solicited_index_l : 8; + u32 last_solicited_index_h : 16; + u32 consumer_counter_l : 16; + u32 consumer_counter_h : 8; + u32 producer_counter : 24; + + u32 page_addr_odd_l; + u32 page_addr_odd_h; + + u32 page_addr_even_l; + u32 page_addr_even_h; +}; + +struct sxe2_rdma_cqc { + struct drv_rdma_soft_cqc scqc; + struct drv_rdma_hw_cqc hcqc; +}; + +#define SXE2_OP_TYPE_RDMA_WRITE 0x00 +#define SXE2_OP_TYPE_RDMA_READ 0x01 +#define SXE2_OP_TYPE_RSVD1 0x02 +#define SXE2_OP_TYPE_SEND 0x03 +#define SXE2_OP_TYPE_SEND_INV 0x04 +#define SXE2_OP_TYPE_SEND_SOL 0x05 +#define SXE2_OP_TYPE_SEND_SOL_INV 0x06 +#define SXE2_OP_TYPE_RSVD2 0x07 +#define SXE2_OP_TYPE_BIND_MW 0x08 +#define SXE2_OP_TYPE_FAST_REG_MR 0x09 +#define SXE2_OP_TYPE_LOCAL_INV 0x0a +#define SXE2_OP_TYPE_RDMA_READ_INV 0x0b +#define SXE2_OP_TYPE_NOP 0x0c +#define SXE2_OP_TYPE_RDMA_WRITE_SOL 0x0d +#define SXE2_OP_TYPE_RSVD3 0x0e + +struct sxe2_qpc { + __u64 ack_timeout : 5; + __u64 retry_mode : 1; + __u64 credit_en : 1; + __u64 comm_est : 1; + __u64 rqe_lwm : 16; + __u64 rsv0 : 4; + __u64 qp_state : 4; + __u64 dqpn : 24; + __u64 qp_type : 8; + + __u64 llwqe_page_index : 12; + __u64 llwqe_mode_enable : 1; + __u64 rsv1 : 8; + __u64 retry_resp_op_sel : 1; + __u64 log_rtm : 2; + __u64 log_msg_max : 5; + __u64 pmtu : 3; + __u64 page_offset : 13; + __u64 rsv2 : 11; + __u64 log_rq_size : 4; + __u64 log_sq_size : 4; + + __u64 cqn_snd : 19; + __u64 rsv3 : 5; + __u64 log_rq_stride : 2; + __u64 rsv4 : 1; + __u64 log_page_size : 5; + __u64 cqn_rcv : 19; + __u64 rsv5 : 5; + __u64 min_rnr_nak : 5; + __u64 rsv6 : 2; + __u64 fl : 1; + + __u64 srqn : 18; + __u64 rsv7 : 3; + __u64 bucket_type : 3; + __u64 stat_rate : 4; + __u64 rq_type : 3; + __u64 ipv4 : 1; + __u64 pd : 18; + __u64 rsv8 : 6; + __u64 log_rra_max : 4; + __u64 log_sra_max : 4; + + __u64 send_start_psn : 24; + __u64 tmo_retry : 3; + __u64 udpriv_cqenable : 1; + __u64 rnr_retry : 3; + __u64 rsv9 : 1; + __u64 rcv_start_psn : 24; + __u64 ack_mode : 1; + __u64 rsv10 : 7; + + __u64 pkey_index : 5; + __u64 rq_flush_flag : 1; + __u64 sq_flush_flag : 1; + __u64 rsv46 : 9; + __u64 log_ack_req_freq : 4; + __u64 rsv11 : 4; + __u64 hop_limit : 8; + __u64 q_key : 32; + + __u64 smac_low : 32; + __u64 smac_high : 16; + __u64 src_port_num : 16; + + __u64 rmac_low : 32; + __u64 rmac_high : 16; + __u64 dest_port_num : 16; + + __u64 qp_completion_contex; + + __u64 sq_tph_value : 8; + __u64 rq_tph_value : 8; + __u64 sq_tph_en : 1; + __u64 rq_tph_en : 1; + __u64 xmit_tph_en : 1; + __u64 rcv_tph_en : 1; + __u64 rsv12 : 4; + __u64 cnp_dscp : 6; + __u64 cnp_ecn : 2; + __u64 flow_label : 20; + __u64 dscp : 6; + __u64 ecn : 2; + __u64 rsv13 : 4; + + __u64 dest_ipaddr3 : 32; + __u64 dest_ipaddr2 : 32; + + __u64 dest_ipaddr1 : 32; + __u64 dest_ipaddr0 : 32; + + __u64 local_ipaddr_3 : 32; + __u64 local_ipaddr_2 : 32; + + __u64 local_ipaddr_1 : 32; + __u64 local_ipaddr_0 : 32; + + __u64 dbr_addr; + + __u64 pbl_pointer; + + __u64 tpid_index : 2; + __u64 rsv14 : 4; + __u64 wq_acces_mode : 1; + __u64 use_statistics_intance : 1; + __u64 statistics_instance_index : 1; + __u64 rsv15 : 6; + __u64 insert_vlan_tag : 1; + __u64 vlan_tag : 16; + __u64 src_vsi : 10; + __u64 rsv16 : 6; + __u64 dispatch_min_unit : 14; + __u64 dispatch_mode : 1; + __u64 rsv17 : 1; + __u32 hw_sq_wqebb_counter : 16; + __u32 txw_sq_retry_end_ptr : 16; + + __u32 wqe_page_pa1_low; + + __u32 wqe_page_pa1_high; + + __u32 wqe_page_pa2_low; + + __u32 wqe_page_pa2_high; + + __u32 wqe_page_pa_vld : 2; + __u32 wqe_page_pa_sel : 1; + __u32 txw_resp_retry_flag : 1; + __u32 wqe_send_bk_vld : 1; + __u32 wqe_send_sge_addr : 4; + __u32 txw_resp_occupty : 1; + __u32 wqe_polarity : 1; + __u32 wqe_rx_sqretry : 1; + __u32 txw_sq_occupty : 1; + __u32 txw_resp_break_all : 1; + __u32 txw_ll_type : 1; + __u32 txw_resp_go_back_n : 1; + __u32 wqe_rx_ack_retry : 1; + __u32 wqe_tx_eng_retry : 1; + __u32 wqe_resp_bk_valid : 1; + __u32 txw_sq_retry_flag : 1; + __u32 txw_nxt_need_irrl_num : 4; + __u32 txw_ll_wqe_drop : 1; + __u32 txw_sq_retry_type : 1; + __u32 txw_sq_retry_start_psn_low : 6; + + __u32 txw_sq_retry_start_psn : 18; + __u32 txw_resp_retry_finish_flag : 1; + __u32 rsv18 : 3; + __u32 txw_send_try_flag : 1; + __u32 wqe_proc_qp_state : 1; + __u32 stat_rate_already_init : 1; + __u32 last_wqe_already_sent : 1; + __u32 wqe_nak_retry_cnt : 3; + __u32 wqe_rnr_retry_cnt : 3; + + __u32 wqe_send_offest; + + __u32 txw_resp_retry_cnt : 8; + __u32 txw_resp_retry_end_ptr : 8; + __u32 txw_resp_retry_finish_back_ptr : 8; + __u32 txw_resp_retry_exit_ptr : 8; + + __u32 wqe_ssn_send : 24; + __u32 wqe_ssn_nocredit_cnt_low : 8; + + __u32 wqe_ssn_nocredit_cnt_high : 16; + __u32 wqe_next_send_psn_low : 16; + + __u32 wqe_next_send_psn_high : 8; + __u32 respcache_msn : 24; + + __u32 respcache_credit : 5; + __u32 wqe_credit_check_en : 1; + __u32 wqe_outsdanding_check_en : 1; + __u32 sq_stop : 1; + __u32 respcache_psn : 24; + + __u32 respcache_r_va_low; + + __u32 respcache_r_va_high; + + __u32 respcache_r_key; + + __u32 respcache_dma_len; + + __u32 mcrt_busy : 1; + __u32 txw_rtr2rts_flag : 1; + __u32 wqe_resp_first_rd_flag : 1; + __u32 wqe_resp_rd_ptr_out : 8; + __u32 wqe_resp_rd_ptr_sel : 1; + __u32 wqe_resp_rd_ptr_in : 9; + __u32 txw_wqe_irrl_wptr : 8; + __u32 wqe_psn_check_en : 1; + __u32 wqe_nop_check_en : 1; + __u32 wqe_read_fence_check_en : 1; + + __u32 resp_newest_psn : 24; + __u32 wqe_local_fence_check_en : 1; + __u32 txw_chx_st : 5; + __u32 txw_resp_err : 1; + __u32 wqe_proc_cnp_flag : 1; + + __u32 mcrt_wqe_idx : 16; + __u32 txw_rate_bucket_low : 16; + + __u32 txw_nxt_wqe_psn : 24; + __u32 txw_rate_bucket_high : 2; + __u32 txw_nak_flag : 1; + __u32 rsv20 : 5; + + __u32 txw_token_num : 24; + __u32 txw_sq_retry_end_psn : 8; + + __u32 txw_rate_timestamp; + + __u32 txw_wqe_start_psn : 24; + __u32 rsv22 : 8; + __u32 txeng_qpc_rtr2rts_flag : 1; + __u32 txeng_req_err_flag : 1; + __u32 txeng_resp_err_flag : 1; + __u32 rsv23 : 1; + __u32 rsv24 : 1; + __u32 txeng_flush_err_retry_flag : 1; + __u32 txeng_flush_err_retry_wqe_idx : 16; + __u32 txeng_wqe_start_flag : 1; + __u32 rsv25 : 5; + __u32 txeng_qp_state : 4; + + __u32 txeng_ok_req_ssn : 24; + __u32 txeng_sq_err_syndrom : 8; + + __u32 txeng_tx2rx_retry_end_psn : 24; + __u32 txeng_resp_err_psn_l : 4; + __u32 txeng_resp_wait_nak_flag : 1; + __u32 rsv26 : 1; + __u32 txeng_syn_sch2rxi_retry_phase_tag : 1; + __u32 txeng_sq_flush_flag : 1; + + __u32 txeng_send_req_next_ssn : 24; + __u32 txeng_token_num_l : 8; + + __u32 txeng_npsn : 24; + __u32 txeng_ssnt_tx_wr_totptr : 8; + + __u32 rsv27 : 1; + __u32 txeng_irrl_tx_first_wr_flag : 1; + __u32 txeng_irrl_tx_first_wr_sel : 1; + __u32 txeng_irrl_tx_first_wr_inptr : 10; + __u32 txeng_irrl_tx_wr_sel : 1; + __u32 txeng_irrl_tx_wr_inptr : 10; + __u32 txeng_irrl_tx_wr_outptr : 8; + + __u32 rsv28 : 2; + __u32 txeng_ssnt_tx_first_wr_in_flag : 1; + __u32 txeng_ssnt_tx_first_wr_inptr : 10; + __u32 txeng_ssnt_tx_rd2wr_flag : 1; + __u32 txeng_ssnt_tx_wr_inptr : 10; + __u32 txeng_ssnt_tx_wr_outptr : 8; + + __u32 txeng_irrl_pa_0_31; + + __u32 txeng_irrl_pa_32_63; + + __u32 txeng_irrl_pa_64_95; + + __u32 txeng_irrl_pa_96_127; + + __u32 txeng_ssnt_pa_0_31; + + __u32 txeng_ssnt_pa_32_63; + + __u32 txeng_ssnt_pa_64_95; + + __u32 txeng_ssnt_pa_96_127; + + __u32 rsv29 : 12; + __u32 txeng_resp_err_psn_h : 20; + + __u32 txeng_newest_ack_req_psn : 24; + __u32 txeng_token_num_m : 8; + + __u32 txeng_newest_unack_psn : 24; + __u32 txeng_token_num_h : 8; + + __u32 txeng_oldest_unack_psn : 24; + __u32 rsv30 : 4; + __u32 txeng_tmo_log_rtm : 4; + + __u32 txeng_tmo_stamp_h; + __u32 rxi_newest_unack_psn : 24; + __u32 rxi_qp_error_cqe_syndrome : 8; + + __u32 rxi_qp_state : 4; + __u32 rxi_req_rnr_retry_flag : 1; + __u32 rxi_resp_rnr_nak_tmr : 5; + __u32 rxi_retring_flag : 1; + __u32 rxi_retry_phase_tag : 1; + __u32 rxi_ssnt_wqe_sge_ce : 1; + __u32 rxi_ssnt_in_wr_ptr_from_rx : 10; + __u32 rxi_ssnt_out_rd_ptr : 8; + __u32 rxi_ssnt_ll_wqe_drop : 1; + + __u32 rxi_ssnt_wqe_data_length; + + __u32 rxi_ssnt_wqe_start_psn : 24; + __u32 rxi_ssnt_rx_rd2wr_flag : 1; + __u32 rxi_ssnt_rx_first_rd_flag : 1; + __u32 rxi_ssnt_info_flag : 1; + __u32 rxi_credit : 5; + + __u32 rxi_ssnt_unack_wqe_ssn : 24; + __u32 rxi_ssnt_total_rd_ptr : 8; + + __u32 rxi_ssnt_wqe_wrid : 16; + __u32 rxi_ssnt_in_rd_ptr : 10; + __u32 rxi_ssnt_opcode : 6; + + __u32 rxi_resp_msn : 24; + __u32 rxi_irrl_wr_outptr : 8; + + __u32 rxi_ack_time_stamp; + + __u32 rxi_ae_code : 12; + __u32 rsv31 : 7; + __u32 rxeng_dif_breack_point : 12; + __u32 rx_dif_flag : 1; + + __u32 rxeng_dif_ref_tag; + + __u32 rxeng_dif_crc_seed : 16; + __u32 rxeng_dif_check_speed : 16; + __u32 rxt_qp_state : 4; + __u32 rsv32 : 9; + __u32 rxt_resp_pa_type : 1; + __u32 rxt_newest_credit : 5; + __u32 rx_cnp_flag : 1; + __u32 rxt_resp_retry_wr_cnt : 8; + __u32 rxt_ack_type : 2; + __u32 rxt_nak_flag : 1; + __u32 rxt_ack_retry_flag : 1; + + __u32 rxt_ack_syndrome : 8; + __u32 rxt_ack_msn : 24; + + __u32 rxt_resp_paddr_96_127; + + __u32 rxt_resp_paddr_64_95; + + __u32 rxt_resp_paddr_32_63; + + __u32 rxt_resp_paddr_0_31; + + __u32 rxt_resp_rd_flag : 1; + __u32 rxt_resp_boundary_done : 1; + __u32 rxt_resp_first_wr_ptr_in : 9; + __u32 rxt_resp_first_wr_ptr_sel : 1; + __u32 rxt_resp_first_wr_flag : 1; + __u32 rxt_resp_wr_ptr_in : 9; + __u32 rxt_resp_wr_ptr_out : 8; + __u32 rxt_resp_wr_ptr_sel : 1; + __u32 rxt_resp_pa_vld : 1; + + __u32 rxt_resp_newest_psn : 24; + __u32 rxt_resp_retry_start_ptr : 8; + + __u32 rxt_rmsm : 24; + __u32 rxt_resp_retry_end_ptr : 8; + + __u32 rx_cnp_disp_timing; + __u32 rpp_ini_irrl_rd_ptr_in : 10; + __u32 rsv33 : 3; + __u32 rpp_ini_irrl_rd_ptr_out : 8; + __u32 rpp_ini_irrl_rd_ptr_sel : 1; + __u32 rpp_ini_irrl_first_rd_flag : 1; + __u32 rpp_ini_irrl_delete_en : 1; + __u32 rpp_ini_irrl_delete_done : 1; + __u32 rsv34 : 7; + + __u32 rpp_ini_irrl_sge_va_low; + + __u32 rpp_ini_irrl_sge_va_high; + + __u32 rpp_ini_irrl_sge_lkey; + + __u32 rpp_ini_irrl_sge_len; + + __u32 rpp_ini_irrl_sge_totallen; + + __u32 rsv35 : 8; + __u32 rpp_ini_irrl_sge_spsn : 24; + + __u32 rpp_ini_irrl_sge_ssn : 24; + __u32 rsv36 : 8; + + __u32 rpp_ini_rcv_messlen; + + __u32 rsv37 : 8; + __u32 rpp_ini_unack_psn : 24; + + __u32 rsv38 : 4; + __u32 rpp_ini_irrl_info_flag : 1; + __u32 rpp_ini_qpsts_err_flag : 1; + __u32 rpp_ini_read_retry_flag : 1; + __u32 rpp_ini_write_retry_flag : 1; + __u32 rpp_ini_cur_ssn : 24; + + __u32 rpp_tgt_sge_va_low; + + __u32 rpp_tgt_sge_va_high; + + __u32 rpp_tgt_sge_key; + + __u32 rpp_tgt_sge_len : 31; + __u32 rpp_wqe_err : 1; + + __u32 rpp_tgt_read_ots_ptr : 8; + __u32 rpp_tgt_rnr_flag : 1; + __u32 rpp_tgt_nak_flag : 1; + __u32 rpp_tgt_rtr_aeq_flag : 1; + __u32 rsv39 : 13; + __u32 rpp_tgt_sge_num : 5; + __u32 rpp_tgt_page_pa_sel : 1; + __u32 rpp_tgt_page_pa_vld : 2; + + __u32 rpp_tgt_page_pa_1_low; + + __u32 rpp_tgt_page_pa_1_high; + + __u32 rpp_tgt_rq_ud_len_err : 1; + __u32 rpp_tgt_page_pa_l : 31; + + __u32 rpp_tgt_page_pa_h; + + __u32 rpp_tgt_qp_error : 1; + __u32 rpp_wqe_vld : 1; + __u32 rsv40 : 1; + __u32 rpp_tgt_bth_opcode : 5; + __u32 rpp_tgt_epsn : 24; + + __u32 rpp_tgt_rcv_messlen; + + __u32 rpp_tgt_sw_counter : 16; + __u32 rpp_tgt_hw_counter : 16; + + __u32 rpp_ini_pkt_psn : 24; + __u32 rsv41 : 8; + + __u32 rsv42; + __u32 pf_id : 3; + __u32 vf_relative_id : 5; + __u32 vf_id : 8; + __u32 vf_valid : 1; + __u32 rsv43 : 5; + __u32 sqpn_8_17 : 10; + + __u32 hw_cqn_snd : 19; + __u32 rsv44 : 5; + __u32 sqpn_0_7 : 8; + + __u32 hw_cqn_rcv : 19; + __u32 rsv45 : 13; + + __u32 hw_srqn : 18; + __u32 sw_cc_enable : 2; + __u32 sw_cc_index : 12; +}; + +#define SXE2_RCMS_MAX_FPT_COUNT 4096 +#define SXE2_RCMS_SPT_ENTRY_CNT 512 + +enum sxe2_rcms_fpt_entry_type { + SXE2_RCMS_FPT_TYPE_FIRST = 1, + SXE2_RCMS_FPT_TYPE_SECOND = 2, +}; + +enum sxe2_rcms_rsrc_type { + SXE2_RCMS_OBJ_QP = 0, + SXE2_RCMS_OBJ_CQ = 1, + SXE2_RCMS_OBJ_SRQ = 2, + SXE2_RCMS_OBJ_MR = 3, + SXE2_RCMS_OBJ_RESP = 4, + SXE2_RCMS_OBJ_IRRL = 5, + SXE2_RCMS_OBJ_SSNT = 6, + SXE2_RCMS_OBJ_AH = 7, + SXE2_RCMS_OBJ_ACK_TIMEOUT = 8, + SXE2_RCMS_OBJ_PBLE = 9, + SXE2_RCMS_OBJ_MAX, +}; + +struct sxe2_rcms_obj_info { + u64 base; + u32 max_cnt; + u32 cnt; + u32 size; +}; + +#pragma pack(1) +struct sxe2_rcms_query_fpm_value_wqe { + u64 rsv0; + u64 rsv1; + u64 rcms_fuction_id : 6; + u64 rsv2 : 58; + u64 rsv4 : 32; + u64 op : 6; + u64 rsv5 : 4; + u64 rsv6 : 21; + u64 wqe_valid : 1; + u64 rsv7 : 2; + u64 query_buf_pa : 62; + u64 rsv8; + u64 rsv9; + u64 rsv10; +}; +#pragma pack(0) + +#pragma pack(1) +struct sxe2_rcms_commit_fpm_value_wqe { + u64 rsv0; + u64 rsv1; + u64 rcms_fuction_id : 6; + u64 rsv2 : 58; + u64 rsv3 : 32; + u64 op : 6; + u64 rsv4 : 4; + u64 rsv5 : 21; + u64 wqe_valid : 1; + u64 rsv6 : 2; + u64 commit_buf_pa : 62; + u32 rsv7; + u32 rsv8; + u32 rsv9; +}; +#pragma pack(0) + +#pragma pack(1) +struct sxe2_rcms_manage_vf_pble_cp_wqe { + u64 rsv0; + u64 rsv1; + u64 spte_cnt : 10; + u64 rsv2 : 6; + u64 first_spte_idx : 9; + u64 rsv3 : 7; + u64 fpte_idx : 13; + u64 rsv4 : 19; + u64 rsv5 : 32; + u64 op : 6; + u64 rsv6 : 4; + u64 rsv7 : 20; + u64 invalidate_spte : 1; + u64 wqe_valid : 1; + u64 rsv8 : 3; + u64 spt_pagelist_buf_pa : 61; + u64 rsv9; + u64 rsv10; + u64 rsv11; +}; +#pragma pack(0) + +#pragma pack(1) +struct qp_mq_wqe { + __u64 modify_qp_type : 1; + __u64 modify_qp_state : 1; + __u64 rsvd0 : 29; + __u64 modify_qp_mode : 1; + __u64 rsvd1 : 32; + + __u64 rsvd2 : 32; + __u64 rsvd3 : 32; + + __u64 qp_context_address; + + __u64 qpn : 18; + __u64 rsvd4 : 14; + __u64 op : 6; + __u64 rsvd5 : 10; + __u64 qp_type : 3; + __u64 rsvd6 : 2; + __u64 sw_cc_enable : 2; + __u64 rsvd7 : 5; + __u64 next_rdma_state : 3; + __u64 wqe_valid : 1; + + __u64 rsvd8 : 32; + __u64 rsvd9 : 32; + + __u64 rsvd10 : 32; + __u64 rsvd11 : 32; + + __u64 rsvd12 : 32; + __u64 rsvd13 : 32; + + __u64 rsvd14 : 32; + __u64 rsvd15 : 32; +}; +#pragma pack(0) + +#pragma pack(1) +struct qp_mq_create_cc_qp_wqe { + u64 timely_min_rtt_h : 12; + u64 timely_alpha : 20; + u64 timely_thigh_h : 12; + u64 timely_tlow : 16; + u64 timely_min_rtt_l : 4; + u64 timely_pre_rtt_h : 8; + u64 timely_beta : 20; + u64 timely_thigh_l : 4; + u64 rsv0 : 8; + u64 timely_rtt_diff : 16; + u64 timely_pre_rtt_l : 8; + u64 qp_context_address; + u64 qp_id : 18; + u64 dcqcn_b_l : 14; + u64 op : 6; + u64 rsv1 : 1; + u64 dcqcn_b_h : 11; + u64 dcqcn_bc : 3; + u64 sw_cc_enable : 2; + u64 rsv2 : 8; + u64 wqe_valid : 1; + u64 dcqcn_t_interval_h : 12; + u64 dcqcn_g : 20; + u64 dcqcn_rhai_h : 8; + u64 dcqcn_rai : 16; + u64 dcqcn_f : 4; + u64 dcqcn_t_interval_l : 4; + u64 dcqcn_rreduce_mperiod_h : 8; + u64 dcqcn_k : 16; + u64 dcqcn_rhai_l : 8; + u64 dcqcn_min_dec_factor : 8; + u64 dcqcn_rreduce_next_node_info : 16; + u64 dcqcn_rreduce_mperiod_l : 8; + u64 dcqcn_rc_h : 4; + u64 dcqcn_alpha : 20; + u64 dcqcn_min_rate : 8; + u64 dcqcn_rt_h : 16; + u64 dcqcn_rc_l : 16; + u64 dcqcn_func_id : 12; + u64 dcqcn_t_next_node_info : 16; + u64 dcqcn_rt_l : 4; + u64 dcqcn_t_counter : 3; + u64 dcqcn_byte_counter : 25; + u64 dcqcn_decrease_rate_valid : 4; +}; +#pragma pack(0) + +#pragma pack(1) +struct sxe2_rcms_update_fpte_wqe { + u64 fpte_cmd_0 : 32; + u64 rsv0 : 32; + u64 fpte_data_0; + u64 rcms_fcn_id : 6; + u64 rsv1 : 1; + u64 fpt_buffer_address : 57; + u64 fpt_entry_cnt : 6; + u64 rsv2 : 1; + u64 skip_fpt_entry_0 : 1; + u64 rsv3 : 24; + u64 op : 6; + u64 rsv4 : 4; + u64 rsv5 : 21; + u64 wqe_valid : 1; + u64 fpte_cmd_1 : 32; + u64 rsv6 : 31; + u64 fpt_entry_valid_1 : 1; + u64 fpte_data_1; + u64 fpte_cmd_2 : 32; + u64 rsv7 : 31; + u64 fpt_entry_valid_2 : 1; + u64 fpte_data_2; +}; +#pragma pack(0) + +#define SXE2_SQ_RSVD 8 +#define SXE2_RQ_RSVD 8 + +enum sxe2_mr_caps_const { + SXE2_MAX_MR_SIZE = 0xffffffffffffffff, +}; + +enum sxe2_device_caps_const { + SXE2_WQE_SIZE = 4, + SXE2_MQ_WQE_SIZE = 8, + SXE2_CQE_SIZE = 8, + SXE2_EXTENDED_CQE_SIZE = 8, + SXE2_AEQE_SIZE = 2, + SXE2_CEQE_SIZE = 1, + SXE2_MQ_CTX_SIZE = 8, + SXE2_SHADOW_AREA_SIZE = 8, + SXE2_GATHER_STATS_BUF_SIZE = 900, + SXE2_GATHER_STATS_BUF_VF_SIZE = 144, + SXE2_MIN_IW_QP_ID = 0, + SXE2_MIN_IW_SRQ_ID = 0, + SXE2_QUERY_FPM_BUF_SIZE = 96, + SXE2_COMMIT_FPM_BUF_SIZE = 96, + SXE2_MAX_IW_QP_ID = 262143, + SXE2_MIN_CEQID = 0, + SXE2_MAX_CEQID = 1023, + SXE2_RDMA_CEQ_MAX_COUNT = SXE2_MAX_CEQID + 1, + SXE2_MIN_CQID = 0, + SXE2_MAX_CQID = 524287, + SXE2_MIN_AEQ_ENTRIES = 4, + SXE2_MAX_AEQ_ENTRIES = 262144, + SXE2_MIN_CEQ_ENTRIES = 4, + SXE2_MAX_CEQ_ENTRIES = 262144, + SXE2_MIN_CQ_SIZE = 4, + SXE2_MAX_CQ_SIZE = 2097152, + SXE2_DB_ID_ZERO = 0, + SXE2_MAX_PE_ENA_VF_COUNT = 32, + SXE2_MAX_VF_FPM_ID = 39, + SXE2_MAX_SQ_PAYLOAD_SIZE = 2145386496, + SXE2_MAX_INLINE_DATA_SIZE = 225, + SXE2_MAX_WQ_ENTRIES = 32768, + SXE2_MAX_OUTBOUND_MSG_SIZE = 2147483647, + SXE2_MAX_INBOUND_MSG_SIZE = 2147483647, + SXE2_Q2_BUF_SIZE = 256, + SXE2_MAX_PDS = 262144, + SXE2_QP_SW_MAX_WQ_QUANTA = 32768, + SXE2_QP_SW_MAX_SQ_QUANTA = 32768, + SXE2_QP_SW_MAX_RQ_QUANTA = 32768, + SXE2_SRQ_SW_MAX_SRQ_QUANTA = 65536, + SXE2_MAX_SRQ_WRS = (SXE2_QP_SW_MAX_RQ_QUANTA - SXE2_RQ_RSVD), + SXE2_DONE_COUNT = 100000, + SXE2_SLEEP_COUNT = 10, + SXE2_MQ_COMPL_WAIT_TIME_MS = 10, + SXE2_MAX_PUSH_PAGE_COUNT = 256, + SXE2_MAX_WQ_FRAGMENT_COUNT = 13, + SXE2_MAX_SGE_RD = 13, + SXE2_MAX_RRA_SIZE = 64, + SXE2_MAX_SRA_SIZE = 64, + SXE2_MIN_WQ_SIZE = 8, + SXE2_MAX_QUANTA_PER_WR = 8, + SXE2_MAX_BAR_REGS = 30, + SXE2_VF_MAX_QSET_CNT = 8, + SXE2_PF_MAX_QSET_CNT = 16, + SXE2_PF_MAX_QSET_CNT_NO_LAG_AA = 8, + SXE2_RDMA_DEFAULT_MAX_PUSH_LEN = 8192, + SXE2_VCHNL_MAX_MSG_SIZE = 1024, + SXE2_HCA_CORE_CLOCK_KHZ = 1000000UL, +}; + +#define SXE2_ACCESS_FLAGS_LOCALREAD 0x01 +#define SXE2_ACCESS_FLAGS_LOCALWRITE 0x02 +#define SXE2_ACCESS_FLAGS_REMOTEREAD 0x04 +#define SXE2_ACCESS_FLAGS_REMOTEWRITE 0x08 +#define SXE2_ACCESS_FLAGS_BIND_WINDOW 0x10 +#define SXE2_ACCESS_FLAGS_ZERO_BASED 0x20 + +#define SXE2_MR_ACCESS_MODE_PA 0 +#define SXE2_MR_ACCESS_MODE_VA 1 + +enum sxe2_pbl_mr_mode { + MR_TABLE_FIRST_MODE = 1, + MR_TABLE_SECOND_MODE = 2, + MR_TABLE_THIRD_MODE = 3, +}; + +enum sxe2_pbl_cq_eq_mode { + CQ_EQ_PA_FIRST_MODE = 0, + CQ_EQ_PA_SECOND_MODE = 1, + CQ_EQ_TABLE_SECOND_MODE = 2, + CQ_EQ_TABLE_THIRD_MODE = 3, +}; + +union sxe2_reg_mr_info { + struct { + u64 rsv1 : 7; + u64 rsv2 : 1; + u64 pbl_mode : 2; + u64 access_right : 5; + u64 va_based_flag : 1; + u64 rsv3 : 1; + u64 rsv4 : 7; + u64 access_mode : 5; + u64 mr_type : 1; + u64 rsv5 : 1; + u64 rsv6 : 1; + u64 mr_key : 8; + u64 rsv7 : 18; + u64 rsv8 : 6; + u64 ref_tag : 32; + u64 pd : 18; + u64 is_len64 : 1; + u64 rsv12 : 13; + u64 start_addr; + u64 mr_idx : 22; + u64 rsv14 : 10; + u64 op : 6; + u64 rsv15 : 25; + u64 wqe_valid : 1; + u64 rsv16; + u64 len; + u64 pbl_index; + u64 log_entity_size : 5; + u64 rsv17 : 27; + u64 rsv18 : 32; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +union sxe2_dereg_mr_info { + struct { + u64 rsv1; + u64 rsv2 : 24; + u64 rsv3 : 8; + u64 pd : 18; + u64 rsv4 : 1; + u64 rsv5 : 1; + u64 rsv6 : 12; + u64 rsv7; + u64 mr_idx : 22; + u64 rsv8 : 10; + u64 op : 6; + u64 rsv9 : 25; + u64 wqe_valid : 1; + u64 rsv10; + u64 rsv11; + u64 rsv12; + u64 rsv13; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +union sxe2_query_mr_info { + struct { + u64 rsv1; + u64 rsv2; + u64 buf_addr; + u64 mr_index : 22; + u64 rsv3 : 10; + u64 op : 6; + u64 rsv4 : 25; + u64 wqe_valid : 1; + u64 rsv5; + u64 rsv6; + u64 rsv7; + u64 rsv8; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +union sxe2_hw_mrc { + struct { + u64 rsv1 : 8; + u64 pbl_mode : 2; + u64 access_rights : 5; + u64 va_based_flag : 1; + u64 mw_type : 1; + u64 rsv2 : 7; + u64 access_mode : 5; + u64 mrct_type : 1; + u64 free : 1; + u64 rsv3 : 1; + u64 mr_key : 8; + u64 qpn : 18; + u64 rsv4 : 6; + u64 mw_bind_num : 24; + u64 rsv5 : 8; + u64 pd : 18; + u64 is_len64 : 1; + u64 rsv7 : 13; + u64 start_addr; + u64 len; + u64 parent_mr_stag : 32; + u64 ref_tag : 32; + u64 dif_pbl_index : 33; + u64 rsv9 : 1; + u64 dif_offset : 30; + u64 pbl_index : 34; + u64 data_offset : 30; + u64 log_entity_size : 5; + u64 pfvf_id : 12; + u64 rsv11 : 15; + u64 app_tag : 16; + u64 dif_mode : 4; + u64 block_size : 1; + u64 rsv6 : 1; + u64 sge_type : 1; + u64 rsv12 : 9; + } field; + __u64 buf[SXE2_MQ_WQE_SIZE]; +}; + +#define SXE2_RDMA_DB_EQ_INFO_OFFSET (4) +#define SXE2_RDMA_DB_CQ_ARM_OFFSET (8) +#define SXE2_RDMA_DB_CQ_INFO_OFFSET (12) + +#define SXE2_QP_TYPE_ROCE_RC 3 +#define SXE2_QP_TYPE_ROCE_UD 4 + +enum sxe2_major_opcode { + SXE2_SUCCESS = 0, + SXE2_LOCAL_LEN_ERR = 0x1, + SXE2_LOCAL_QP_OP_ERR = 0x2, + SXE2_RSV1_ERR, + SXE2_LOCAL_PROTECTION_ERR = 0x4, + SXE2_WR_FLUSH_ERR = 0x5, + SXE2_MW_BIND_ERR = 0x6, + SXE2_FAST_REGISTER_ERR = 0x7, + SXE2_INVALID_KEY_ERR = 0x8, + SXE2_BAD_RESPONSE_ERR = 0x10, + SXE2_LOCAL_ACCESS_ERR = 0x11, + SXE2_REMOTE_INVALID_REQUEST_ERR = 0x12, + SXE2_REMOTE_ACCESS_ERR = 0x13, + SXE2_REMOTE_OPERATION_ERR = 0x14, + SXE2_TRANS_RETRY_CNT_EXCEED_ERR = 0x15, + SXE2_RNR_RETRY_CNT_EXCEED_ERR = 0x16, + SXE2_ABORT_ERR = 0x22, + SXE2_DIF_CHECK_ERR = 0x23, + SXE2_SQ_FAKE_ERR = 0x24, +}; + +#pragma pack(1) +struct drv_rdma_soft_eqc { + u64 oi : 1; + u64 vsi_index : 10; + u64 rsv1 : 16; + u64 sw_owner_bit : 1; + u64 sw_status : 4; + u64 uar_page : 24; + u64 log_eq_size : 5; + u64 pbl_mode : 2; + u64 rsv2 : 1; + + u64 rsv3 : 12; + u64 TPH_en : 1; + u64 TPH_value : 8; + u64 rsv4 : 11; + u64 rsv5 : 11; + u64 page_offset : 13; + u64 log_page_size : 5; + u64 rsv6 : 3; + + u64 pbl_index; + + u32 rsv7; +}; + +struct drv_rdma_hw_eqc { + u32 hw_owner_bit : 1; + u32 over_flag : 1; + u32 rsv1 : 6; + u32 hw_status : 1; + u32 pfvf_id : 12; + u32 rsv2 : 11; + + u32 rsv3; + u32 rsv4; + + u32 consumer_counter : 24; + u32 rsv5 : 8; + + u32 producer_counter : 24; + u32 rsv6 : 8; + + u32 page_addr_odd_l; + u32 page_addr_odd_h; + + u32 page_addr_even_l; + u32 page_addr_even_h; +}; + +struct sxe2_rdma_eqc { + struct drv_rdma_soft_eqc seqc; + struct drv_rdma_hw_eqc heqc; +}; +#pragma pack(0) + +#define SXE2_AE_AMP_UNALLOCATED_STAG 0x0102 +#define SXE2_AE_AMP_INVALID_STAG 0x0103 +#define SXE2_AE_AMP_BAD_QP 0x0104 +#define SXE2_AE_AMP_BAD_PD 0x0105 +#define SXE2_AE_AMP_BAD_STAG_KEY 0x0106 +#define SXE2_AE_AMP_BAD_STAG_INDEX 0x0107 +#define SXE2_AE_AMP_BOUNDS_VIOLATION 0x0108 +#define SXE2_AE_AMP_RIGHTS_VIOLATION 0x0109 +#define SXE2_AE_AMP_FASTREG_VALID_STAG 0x010c +#define SXE2_AE_AMP_FASTREG_MW_STAG 0x010d +#define SXE2_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e +#define SXE2_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 +#define SXE2_AE_AMP_REMOTE_INVALIDATE_MR_RD_ERR 0x0111 +#define SXE2_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 +#define SXE2_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 +#define SXE2_AE_AMP_MWBIND_VALID_STAG 0x0114 +#define SXE2_AE_AMP_MWBIND_OF_MR_STAG 0x0115 +#define SXE2_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 +#define SXE2_AE_AMP_MWBIND_TO_MW_STAG 0x0117 +#define SXE2_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 +#define SXE2_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 +#define SXE2_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a +#define SXE2_AE_AMP_MWBIND_BIND_DISABLED 0x011b +#define SXE2_AE_AMP_INVALIDATE_TYPE1_MW 0x011d +#define SXE2_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e +#define SXE2_AE_AMP_MWBIND_WRONG_TYPE 0x0120 +#define SXE2_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121 +#define SXE2_AE_CQ_OPERATION_ERROR 0x0203 +#define SXE2_AE_LLP_TOO_MANY_RETRIES 0x050a +#define SXE2_AE_QP_SUSPEND_COMPLETE 0x0900 + +#define SXE2_AE_LLWQE_ERR 0x0a00 +#define SXE2_AE_DOORBELL_ERR 0x0a01 +#define SXE2_AE_LLWQE_8B_INVALID 0x0a02 +#define SXE2_AE_DOORBELL_QSET_ERR 0x0a06 +#define SXE2_AE_DOORBELL_QPN_ERR 0x0a08 +#define SXE2_AE_DOORBELL_EQN_ERR 0x0a09 +#define SXE2_AE_DOORBELL_CQN_ERR 0x0a0a +#define SXE2_AE_DOORBELL_QPN_NUM_ERR 0x0a0b + +#define SXE2_AE_WQE_LOAD_QPC_PBL_ERR 0x0b00 +#define SXE2_AE_WQE_LOAD_QPC_DMA_ERR 0x0b01 +#define SXE2_AE_WQE_QPC_CFG_ERR 0x0b03 +#define SXE2_AE_WQE_RD_DMA_ERR 0x0b04 +#define SXE2_AE_WQE_CFG_ERR 0x0b05 +#define SXE2_AE_WQE_OPCODE_ERR 0x0b06 +#define SXE2_AE_WQE_LENGTH_ERR 0x0b07 +#define SXE2_AE_WQE_LLWQE_ERR 0x0b08 +#define SXE2_AE_WQE_RD_RESP_DMA_ERR 0x0b09 +#define SXE2_AE_WQE_RD_AH_ERR 0x0b0a +#define SXE2_AE_WQE_AH_CFG_ERR 0x0b0b + +#define SXE2_AE_WQE_PBL_DMA_ERR 0x0b0c + +#define SXE2_AE_RXENG_RXT_OPCODE_ERR 0x0c01 +#define SXE2_AE_RXENG_RXT_LENGTH_ERR 0x0c02 +#define SXE2_AE_RXENG_RXT_RDMA_READ_OUTSTANDING_ERR 0x0c03 +#define SXE2_AE_RXENG_RXI_BAD_RESPONSE 0x0c04 +#define SXE2_AE_RXENG_RXI_LENGTH_ERR 0x0c05 +#define SXE2_AE_RXENG_RXI_IRRL_DMA_ERR 0x0c06 +#define SXE2_AE_RXENG_RXT_RESP_WR_PADDR_ERR 0x0c07 +#define SXE2_AE_RXENG_RXT_RESP_RD_DMA_ERR 0x0c08 +#define SXE2_AE_RXENG_RXI_SSNT_OVERSIZE_ERR 0x0c09 +#define SXE2_AE_RXENG_RXI_SSNT_DMA_ERR 0x0c0a +#define SXE2_AE_RXENG_RXT_RQE_LIMIT 0x0c0b +#define SXE2_AE_RXENG_RX_LOAD_QPC_DMA_ERR 0x0c0c +#define SXE2_AE_RXENG_RX_LOAD_QPC_PA_ERR 0x0c0d +#define SXE2_AE_RXENG_RX_LOAD_SRQC_DMA_ERR 0x0c0e +#define SXE2_AE_RXENG_RX_LOAD_SRQC_PA_ERR 0x0c0f +#define SXE2_AE_RXENG_RQ_PI_DMA_ERR 0x0c10 +#define SXE2_AE_RXENG_RQ_ADDR_ERR 0x0c11 +#define SXE2_AE_RXENG_RQ_DMA_ERR 0x0c12 +#define SXE2_AE_RXENG_RQ_WQE_VLD_ERR 0x0c13 +#define SXE2_AE_RXENG_SRQC_STS_ERR 0x0c14 +#define SXE2_AE_RXENG_SRQC_STS_RSV 0x0c15 +#define SXE2_AE_RXENG_SRQ_LAST_WQE 0x0c16 +#define SXE2_AE_RXENG_DBR_VALUE_ERR 0x0c17 +#define SXE2_AE_RXENG_RTR_FIRST_PKT 0x0c18 + +#define SXE2_AE_SRQC_LOAD_SRQC_DMA_ERR 0x0c19 +#define SXE2_AE_SRQC_LOAD_SRQC_PA_ERR 0x0c1a +#define SXE2_AE_SRQC_STS_ERR 0x0c1b +#define SXE2_AE_RXENG_REMOTE_INVALID_ERROR 0x0c1c +#define SXE2_AE_RXENG_REMOTE_ACCESS_ERROR 0x0c1d +#define SXE2_AE_RXENG_REMOTE_QP_ERROR 0x0c1e +#define SXE2_AE_RXENG_RXI_WQE_MISMATCH 0x0c1f + +#define SXE2_AE_TXENG_PAYLOAD_RD_DMA_ERR 0x0d00 +#define SXE2_AE_TXENG_RD_PKEY_ERR 0x0d01 +#define SXE2_AE_TXENG_RD_SSNT_PBL_ERR 0x0d02 +#define SXE2_AE_TXENG_RD_IRRL_PBL_ERR 0x0d03 +#define SXE2_AE_TXENG_RESP_PAYLOAD_RD_DMA_ERR 0x0d10 + +#define SXE2_AE_TMO_WR_PA_ERR 0x0f00 +#define SXE2_AE_TMO_RD_PA_ERR 0x0f01 +#define SXE2_AE_TMO_RD_DMA_ERR 0x0f02 +#define SXE2_AE_TMO_RD_QPC_ERR 0x0f03 +#define SXE2_AE_CC_QP_RATE 0x0f10 +#define SXE2_AE_CEQ_OVERFLOW 0x0f11 +#define SXE2_AE_EQ_STATUS_ERR 0x0f12 +#define SXE2_AE_CQ_STATUS_ERR 0x0f13 +#define SXE2_AE_QP_CQ_OVERFLOW 0x0f14 + +#define SXE2_AE_SOURCE_RSVD 0x0 +#define SXE2_AE_SOURCE_RQ 0x1 +#define SXE2_AE_SOURCE_CQ 0x2 +#define SXE2_AE_SOURCE_SQ 0x5 +#define SXE2_AE_SOURCE_SRQ 0x7 +#define SXE2_AE_SOURCE_DB_PROC 0x9 +#define SXE2_AE_SOURCE_TMO 0xa +#define SXE2_AE_SOURCE_CC 0xb + +#pragma pack(1) +struct sxe2_eq_wqe { + u64 oi : 1; + u64 vsi_index : 10; + u64 rsv1 : 16; + u64 sw_owner_bit : 1; + u64 sw_status : 4; + u64 uar_page : 24; + u64 log_eq_size : 5; + u64 pbl_mode : 2; + u64 rsv2 : 1; + + u64 rsv3 : 12; + u64 TPH_en : 1; + u64 TPH_value : 8; + u64 rsv4 : 11; + u64 rsv5 : 11; + u64 page_offset : 13; + u64 log_page_size : 5; + u64 rsv6 : 3; + + u64 pbl_index; + + u64 rsv7 : 32; + u64 op : 6; + u64 eqn : 10; + u64 pfvf_id : 12; + u64 rsv8 : 3; + u64 WQE_Valid : 1; + + u64 rsv9 : 8; + u64 physical_buffer_address : 56; + + u64 rsv10; + u64 rsv11; + + u64 rsv12; +}; +#pragma pack(0) + +struct drv_rdma_soft_srqc { + u64 pd : 18; + u64 rsv0 : 2; + u64 log_srq_size : 4; + u64 state : 8; + u64 rsv1 : 25; + u64 srq_access_mode : 1; + u64 log_page_size : 5; + u64 rsv2 : 1; + u64 dbr_addr; + u64 SRQ_Completion_Context; + u64 srq_pbl_pointer; + u64 lwm : 16; + u64 rsv3 : 1; + u64 log_rq_stride : 2; + u64 page_offset : 13; +}; + +struct drv_rdma_hw_srqc { + u64 state_err_aeq_flag : 1; + u64 rsv4 : 31; + u64 sw_srq_counter : 16; + u64 hw_srq_counter : 16; + u64 srq_page_pa_sel : 1; + u64 srq_page_pa_vld : 2; + u64 srq_wqe_vld : 1; + u64 rsv5 : 28; + u64 srq_page_pa0; + u64 srq_page_pa1; +}; + +#pragma pack(1) +struct sxe2_rdma_srqc { + struct drv_rdma_soft_srqc ssrqc; + struct drv_rdma_hw_srqc hsrqc; +}; +#pragma pack(0) + +#pragma pack(1) +struct mq_wqe_of_srq { + u64 pd : 18; + u64 rsv0 : 2; + u64 log_srq_size : 4; + u64 state : 8; + u64 rsv1 : 25; + u64 srq_access_mode : 1; + u64 log_page_size : 6; + u64 dbr_addr; + u64 SRQ_Completion_Context; + u64 srqn : 18; + u64 rsv2 : 14; + u64 op : 6; + u64 rsv3 : 25; + u64 wqe_valid : 1; + u64 rsv4 : 8; + u64 Physical_Buffer_Address : 56; + u64 srq_pbl_pointer; + u64 lwm : 16; + u64 rsv5 : 1; + u64 log_rq_stride : 2; + u64 page_offset : 13; + u64 rsv6 : 32; + u64 rsv7; +}; +#pragma pack(0) + +#endif diff --git a/drivers/net/ethernet/linkdata/Kconfig b/drivers/net/ethernet/linkdata/Kconfig index 74f7bce3cbd2068d2ddb4b8b1854bba1c843c2d3..fa42dbcce80a567909f6ca715ef996a29b0dcacc 100644 --- a/drivers/net/ethernet/linkdata/Kconfig +++ b/drivers/net/ethernet/linkdata/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: GPL-2.0 # # Linkdata network device configuration # @@ -17,4 +17,6 @@ config NET_VENDOR_LINKDATA if NET_VENDOR_LINKDATA source "drivers/net/ethernet/linkdata/sxe/Kconfig" source "drivers/net/ethernet/linkdata/sxevf/Kconfig" +source "drivers/net/ethernet/linkdata/sxe2/Kconfig" +source "drivers/net/ethernet/linkdata/sxe2vf/Kconfig" endif # NET_VENDOR_LINKDATA diff --git a/drivers/net/ethernet/linkdata/Makefile b/drivers/net/ethernet/linkdata/Makefile index 3a33b8f6a4c3f056b6ff0f7528f53666c7048928..cb873343678b8ce921ead4d928a74d725d17209c 100644 --- a/drivers/net/ethernet/linkdata/Makefile +++ b/drivers/net/ethernet/linkdata/Makefile @@ -4,3 +4,5 @@ # obj-$(CONFIG_SXE) += sxe/ obj-$(CONFIG_SXE_VF) += sxevf/ +obj-$(CONFIG_SXE) += sxe2/ +obj-$(CONFIG_SXE_VF) += sxe2vf/ diff --git a/drivers/net/ethernet/linkdata/sxe2/Kconfig b/drivers/net/ethernet/linkdata/sxe2/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..ffc517965c83677f77543d53af4fe3d638cedea3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sxe2 network device configuration +# + +config SXE2 + tristate "sxe2 PCI Express adapters support" + depends on (X86 || ARM64) && PCI + select MDIO + select PHYLIB + select PTP_1588_CLOCK + default m + help + This driver supports sxe2 PCI Express family of adapters. + + To compile this driver as a module, choose M here. The module + will be called ngbe. diff --git a/drivers/net/ethernet/linkdata/sxe2/Makefile b/drivers/net/ethernet/linkdata/sxe2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..498ab5d6fd6d8d2fff8519c85458316edcaa0f5f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/Makefile @@ -0,0 +1,88 @@ +# SPDX-License-Identifier: GPL-2.0 +CONFIG_MODULE_SIG=n + +#当前目录下的Makefile路径 +MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST))) +#当前路径 +CURDIR :=$(shell dirname $(MAKEPATH)) +KDIR := /lib/modules/$(shell uname -r)/build +MT_BUILDMODE?=asic + +ifneq ($(KERNELRELEASE),) +#编译 +CONFIG_SXE2 ?= m +obj-$(CONFIG_SXE2) += sxe2.o +sxe2-objs += $(patsubst %.c, sxe2pf/%.o, $(notdir $(wildcard $(CURDIR)/sxe2pf/*.c))) +sxe2-objs += $(patsubst %.c, sxe2pf/com_parse/%.o, $(notdir $(wildcard $(CURDIR)/sxe2pf/com_parse/*.c))) +sxe2-objs += $(patsubst %.c, common/mbx/%.o, $(notdir $(wildcard $(CURDIR)/common/mbx/*.c))) +sxe2-objs += $(patsubst %.c, common/sxe2pf/%.o, $(notdir $(wildcard $(CURDIR)/common/sxe2pf/*.c))) +sxe2-objs += $(patsubst %.c, base/log/%.o, $(notdir $(wildcard $(CURDIR)/base/log/*.c))) +sxe2-objs += $(patsubst %.c, base/compat/%.o, $(notdir $(wildcard $(CURDIR)/base/compat/*.c))) +sxe2-objs += $(patsubst %.c, base/ioctl/%.o, $(notdir $(wildcard $(CURDIR)/base/ioctl/*.c))) + +# 生成 linux kernel version code +ifneq ($(wildcard $(CURDIR)/vercode_build.sh),) + KER_DIR=$(srctree) + SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR)) + ifneq ($(SPECIFIC_LINUX),) + ccflags-y += -DSPECIFIC_LINUX + ccflags-y += -D$(SPECIFIC_LINUX) + endif +endif + +#是否编译辅助设备 +ifeq ($(SPECIFIC_LINUX),BCLINUX_21_10U4) +obj-$(CONFIG_SXE2) += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +else +CHECK_AUX_BUS ?= $(CURDIR)/check_aux_support +NEED_AUX_BUS := $(shell bash ${CHECK_AUX_BUS} --ksrc="$(KDIR)" >/dev/null 2>&1; echo $$?) +$(info "NEED_AUX_BUS = $(NEED_AUX_BUS)") +ifeq ($(NEED_AUX_BUS), 2) +obj-$(CONFIG_SXE2) += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +endif +endif + +#添加编译选项和编译宏 +ccflags-y += -Werror -Wmaybe-uninitialized -frecord-gcc-switches +ccflags-y += -I$(CURDIR)/sxe2pf +ccflags-y += -I$(CURDIR)/sxe2pf/com_parse +ccflags-y += -I$(CURDIR)/common/mbx +ccflags-y += -I$(CURDIR)/common/sxe2pf +ccflags-y += -I$(CURDIR)/common/ioctl +ccflags-y += -I$(CURDIR)/common/sxe2flow +ccflags-y += -I$(CURDIR)/include +ccflags-y += -I$(CURDIR)/base/compat +ccflags-y += -I$(CURDIR)/base/auxiliary +ccflags-y += -I$(CURDIR)/base/log +ccflags-y += -I$(CURDIR)/base/trace +ccflags-y += -I$(CURDIR)/base/cdev +ccflags-y += -I$(CURDIR)/base/ioctl +ccflags-y += -DSXE2_HOST_DRIVER +ccflags-y += -DSXE2_MBX_SUPPORT +ccflags-y += -DCUR_DIR=$(CURDIR) +# ccflags-y += -DSXE2_DRIVER_TRACE +ccflags-y += -DSXE2_CFG_DEBUG + +#默认支持fpga平台,支持不同的硬件平台 +ifeq ($(MT_BUILDMODE),asic) + ccflags-y += -DSXE2_HARDWARE_ASIC +else #ifeq ($(MT_BUILDMODE),emu) + ccflags-y += -DSXE2_HARDWARE_EMU +endif + +else # KERNELRELEASE +#内核树路径 +KDIR := /lib/modules/$(shell uname -r)/build + +all: + $(info "HWVERSION = $(VER)" ) + @$(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt + @rm -rf ./sxe2pf/*.o ./sxe2pf/.*.cmd + @rm -rf ./base/log/*.o ./common/mbx/*.o ./common/sxe2pf/*.o ./base/auxiliary/*.o ./base/compat/*.o + +endif # KERNELRELEASE diff --git a/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/Makefile b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4a05f84853fc90187b0412209b2407ccdd1cf570 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/Makefile @@ -0,0 +1,13 @@ +################################################################################ +# 专用于辅助设备驱动的编译 +################################################################################ +obj-m := sxe2_auxiliary.o + +KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +all: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary.c b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary.c new file mode 100644 index 0000000000000000000000000000000000000000..34570f1768ba6bbe30fc0bf8a8e5d222a9c06ab2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: auxiliary.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include "auxiliary_bus.h" + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + size_t match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +static int auxiliary_match(struct device *dev, struct device_driver *drv) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv); + + return !!auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *name, *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX, + (int)(p - name), name); +} + +static const struct dev_pm_ops auxiliary_dev_pm_ops = { + SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) +}; + +static int auxiliary_bus_probe(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + if (auxdrv->probe) { + ret = auxdrv->probe(auxdev, + auxiliary_match_id(auxdrv->id_table, + auxdev)); + if (ret) + dev_pm_domain_detach(dev, true); + } else { + ret = 0; + } + } + + return ret; +} + +static int auxiliary_bus_remove(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->remove) + auxdrv->remove(auxdev); + dev_pm_domain_detach(dev, true); + + return 0; +} + +static void auxiliary_bus_shutdown(struct device *dev) +{ + struct auxiliary_driver *auxdrv = NULL; + struct auxiliary_device *auxdev; + + if (dev->driver) { + auxdrv = to_auxiliary_drv(dev->driver); + auxdev = to_auxiliary_dev(dev); + } + + if (auxdrv && auxdrv->shutdown) + auxdrv->shutdown(auxdev); +} + +static struct bus_type auxiliary_bus_type = { + .name = "sxe2_auxiliary", + .probe = auxiliary_bus_probe, + .remove = auxiliary_bus_remove, + .shutdown = auxiliary_bus_shutdown, + .match = auxiliary_match, + .uevent = auxiliary_uevent, + .pm = &auxiliary_dev_pm_ops, +}; + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &auxiliary_bus_type; + device_initialize(&auxdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_device_init); + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname) +{ + struct device *dev = &auxdev->dev; + int ret; + + if (!modname) { + dev_err(dev, "auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret) + dev_err(dev, "adding auxiliary device failed!: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_device_add); + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)) +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)) +#endif +{ + struct device *dev; + + dev = bus_find_device(&auxiliary_bus_type, start, data, match); + if (!dev) + return NULL; + + return to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_find_device); + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + int ret; + + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + if (!auxdrv->driver.name) + return -ENOMEM; + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + ret = driver_register(&auxdrv->driver); + if (ret) + kfree(auxdrv->driver.name); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_driver_register); + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_driver_unregister); + +static int __init auxiliary_bus_init(void) +{ + return bus_register(&auxiliary_bus_type); +} + +static void __exit auxiliary_bus_exit(void) +{ + bus_unregister(&auxiliary_bus_type); +} + +module_init(auxiliary_bus_init); +module_exit(auxiliary_bus_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Auxiliary Bus Standalone"); +MODULE_AUTHOR("linux.tucana@Stars Micro System.com"); diff --git a/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_bus.h b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_bus.h new file mode 100644 index 0000000000000000000000000000000000000000..b33cdae94dc7bfb0aaa294714546a6c0a6b518b7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_bus.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: auxiliary_bus.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#include +#include +#include +#include "auxiliary_compat.h" + +#ifndef HAVE_AUXILIARY_DEVICE_ID +#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_MODULE_PREFIX "sxe2_auxiliary:" +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; +#endif + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct auxiliary_device, dev); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev); +#define auxiliary_device_init(auxdev) sxe2_auxiliary_device_init(auxdev) + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); + +#define auxiliary_device_add(auxdev) __sxe2_auxiliary_device_add(auxdev, KBUILD_MODNAME) + +static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define auxiliary_driver_register(auxdrv) \ + __sxe2_auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +#define auxiliary_driver_unregister(auxdrv) sxe2_auxiliary_driver_unregister(auxdrv) + +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)); +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)); +#endif + +#define auxiliary_find_device sxe2_auxiliary_find_device + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_compat.h b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..fb588676c19b21e3d4acbe4f750cbf5435906520 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/auxiliary/auxiliary_compat.h @@ -0,0 +1,44 @@ +#ifndef __AUXILIARY_COMPAT_H__ +#define __AUXILIARY_COMPAT_H__ + +#include +#include + +#include "sxe2_compat_gcc.h" + +#include "sxe2_compat_inc.h" + +#include "sxe2_compat.h" + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/cdev/sxe2_cdev.h b/drivers/net/ethernet/linkdata/sxe2/base/cdev/sxe2_cdev.h new file mode 100644 index 0000000000000000000000000000000000000000..31ed4bb402520e38ae026774651f32f4a15c9113 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/cdev/sxe2_cdev.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CDEV_H__ +#define __SXE2_CDEV_H__ + +#include + +struct sxe2_cdev_info { + struct cdev cdev; + dev_t dev_no; + struct device *device; + struct semaphore cdev_sem; +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.c b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..6d4b404ea0b6af8d5113bd7ded65c848562e0b9b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_log.h" +#include "sxe2_compat.h" + +#ifdef NEED_DEFINE_ETHTOOL_SPRINTF +void ethtool_sprintf_compat(u8 **data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + *data += ETH_GSTRING_LEN; +} +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.h b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ecdd7868fb210aa7ecc9efa6e5bc42eb2a4803 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat.h @@ -0,0 +1,739 @@ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat.h + * @author: Linkdata + * @date: 2026.04.18 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_H__ +#define __SXE2_COMPAT_H__ + +#include "sxe2_compat_gcc.h" +#include +#include +#include "sxe2_compat_flow_offload.h" + +#include +#include +#include +#include + +#define HAVE_MACSEC_SUPPORT +#define SUPPORT_ETHTOOL_GET_RMON_STATS +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NETDEV_BPF_XSK_POOL +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_RXQ +#define HAVE_AF_XDP_ZC_SUPPORT +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_AF_XDP_NETDEV_UMEM +#define SUPPORTED_COALESCE_PARAMS +#define IPSEC_HAVE_REAL_DEV +#define SUPPORTED_FLASH_UPDATE_PARAMS +#define HAVE_NDO_ETH_IOCTL +#define HAVE_FLOW_BLOCK_API +#define HAVE_FLOW_INDR_BLOCK_API +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_FLOW_OFFLOAD_H +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_INDIR_BLOCK +#define HAVE_TC_FLOWER_ENC +#define HAVE_VXLAN_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_GRETAP_TYPE +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#define HAVE_XDP_BUFF_IN_XDP_H +#define HAVE_SCTP +#define HAVE_METADATA_PORT_INFO +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_NDO_OFFLOAD_STATS +#define HAVE_XDP_SUPPORT +#define HAVE_NETDEV_MIN_MAX_MTU +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_NETDEV_SB_DEV +#define HAVE_ETHTOOL_FLOW_RSS + +#define NEED_INCLUDE_NETDEV_RX_QUEUE_H +#define NEED_NAPI_ALLOC_SKB +#define RANDOM_ETHER_ADDR_RENAME +#undef HAVE_ETHTOOL_RXFH_PARAM +#define SUPPORT_MACSEC_INFO_STRUCT +#define SUPPORTED_ETHTOOL_EEPROM_BY_PAGE +#define SXE2_KERNEL_MATCHED +#define VM_FLAGS_ONLY_READ +#define IOMMU_MAP_6_PARAMS +#define PIN_USER_PAGE_6_PARAMS +#define NOT_SUPP_IOMMU_CAPABLE + +#ifdef RANDOM_ETHER_ADDR_RENAME +#define random_ether_addr eth_random_addr +#endif + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#endif + +#ifdef XDP_BUFF_TO_FRAME_RENAME +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif + +#ifdef NEED_XSK_UMEM_RELEASE_RENAME +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif + +#ifdef NEED_DEFINE_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifdef NEED_FLOW_CLS_OFFLOAD +struct tc_cls_flower_offload; +#define flow_cls_offload tc_cls_flower_offload +struct tc_block_offload; +#define flow_block_offload tc_block_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#endif + +#ifdef NEED_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifdef NEED_FLOW_BLOCK_BINDER_TYPE +#define FLOW_BLOCK_BINDER_TYPE_UNSPEC TCF_BLOCK_BINDER_TYPE_UNSPEC +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS +#endif + +#ifdef NEED_FLOW_BLOCK_BIND +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND +#endif + +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +#ifdef NEED_XDP_CONVERT_BUFF_TO_FRAME +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif + +#ifndef NETIF_NAPI_ADD_API_NEED_4_PARAMS +static inline void +netif_napi_add_compat(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netif_napi_add(dev, napi, poll); +} + +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add(dev, napi, poll, weight) netif_napi_add_compat(dev, napi, poll, weight) +#endif + +#ifdef BPF_WARN_INVALID_XDP_ACTION_NEED_1_PARAM +static inline void +bpf_warn_invalid_xdp_action_compat(struct net_device *dev, struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#ifdef bpf_warn_invalid_xdp_action +#undef bpf_warn_invalid_xdp_action +#endif +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + bpf_warn_invalid_xdp_action_compat(dev, prog, act) +#endif + +#ifndef CLASS_CREATE_API_NEED_2_PARAMS +static inline struct class *class_create_compat(struct module *owner, const char *devname) +{ + return class_create(devname); +} + +#ifdef class_create +#undef class_create +#endif +#define class_create(owner, devname) class_create_compat(owner, devname) +#endif + +#ifdef XDP_RXQ_INFO_REG_NEED_3_PARAMS +static inline int +xdp_rxq_info_reg_compat(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + xdp_rxq_info_reg_compat(xdp_rxq, dev, queue_index, napi_id) +#endif + +#ifdef DEVLINK_ALLOC_NEED_2_PARAMS +static inline struct devlink * +devlink_alloc_compat(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} + +#define devlink_alloc(ops, priv_size, dev) devlink_alloc_compat(ops, priv_size, dev) +#endif + +#ifdef XDP_DO_FLUSH_MAP_DROP +#define xdp_do_flush_map xdp_do_flush +#endif + +#ifdef HAVE_STRSCPY +#define SXE2_STRCPY strscpy +#else +#define SXE2_STRCPY strlcpy +#endif + +#ifdef NEED_NAPI_ALLOC_SKB +#define __napi_alloc_skb(napi, len, gfp_mask) napi_alloc_skb(napi, len) +#endif + +#ifdef NEED_NAPI_BUILD_SKB +#define napi_build_skb(data, frag_size) build_skb(data, frag_size) +#endif + +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->(MEMBER)))) +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifdef NEED_DEFINE_BITS_TO_U32 +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#endif + +#ifdef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 +xsk_umem_get_rx_frame_size_compat(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size xsk_umem_get_rx_frame_size_compat +#endif +#endif +#endif + +#ifdef NEED_DEFINE_ETHTOOL_SPRINTF +#ifndef ethtool_sprintf +void ethtool_sprintf_compat(u8 **data, const char *fmt, ...); +#define ethtool_sprintf ethtool_sprintf_compat +#endif +#endif + +#ifdef NEED_DEFINE_KREALLOC_ARRAY +static inline void *__must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} +#endif + +#ifdef NEED_DEFINE_DEV_PAGE_IS_REUSABLE +static inline bool dev_page_is_reusable(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} +#endif + +#ifdef NEED_DEFINE_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif + +#ifdef NEED_DEFINE_XDP_PREPARE_BUFF +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + + xdp->data_hard_start = hard_start; + xdp->data = data; + xdp->data_end = data + data_len; + xdp->data_meta = meta_valid ? data : data + 1; +} +#endif + +#ifdef NEED_DEFINE_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif + +#ifdef NEED_DEFINE_MUL_U64_U64_DIV_U64 +static inline u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + if (ilog2(a) + ilog2(b) > 62) { + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif + +#ifdef NEED_DEFINE_FLEX_ARRAY_SIZE +#ifndef array_size +static inline size_t size_mul(size_t factor1, size_t factor2) +{ + size_t bytes; + + if (check_mul_overflow(factor1, factor2, &bytes)) + return SIZE_MAX; + + return bytes; +} + +#define array_size(a, b) size_mul(a, b) +#endif + +#undef flex_array_size +#define flex_array_size(p, member, count) ({ \ + typeof((p)->(member)) _local_member = (p)->(member); \ + typeof(count) _local_count = (count); \ + (void)_local_member; \ + array_size(_local_count, sizeof(*_local_member)) + __must_be_array(_local_member); \ +}) + +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif + +#ifdef NEED_DEFINE_KFREE_SENSITIVE +static inline void kfree_sensitive(const void *p) +{ + size_t ks; + void *mem = (void *)p; + + ks = ksize(mem); + if (ks) + memzero_explicit(mem, ks); + kfree(mem); +} +#endif + +#ifdef NEED_DEFINE_NETIF_IS_GENEVE +static inline bool netif_is_geneve(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "geneve"); +} +#endif + +#ifdef NEED_DEFINE_SKB_FRAG_OFF +#define skb_frag_off(frag) ((frag)->page_offset) +#endif + +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif + +#ifdef NEED_PTP_SYSTEM_TIMESTAMP +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif + +#ifdef NEED_PTP_SYSTEM_TIMESTAMP_INTERFACE +#include +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif + +#ifdef HAVE_SKB_XMIT_MORE +#define netdev_xmit_more() (first_buf->skb->xmit_more) +#endif + +#ifdef NEED_NETDEV_TX_SENT_QUEUE +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif + +#ifdef ETH_GET_HEADLEN_NEED_2_PRAMS +static inline u32 eth_get_headlen_compat(const struct net_device __always_unused *dev, + void *data, unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) eth_get_headlen_compat(dev, data, len) +#endif + +#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_simple_get(ida, min, 0, gfp); +} + +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, 0, max, gfp); +} + +static inline int +ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, min, max, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif + +#ifndef struct_size +#define flex_array_size(p, member, count) ({ \ + typeof((p)->(member)) _local_member = (p)->(member); \ + typeof(count) _local_count = (count); \ + (void)_local_member; \ + array_size(_local_count, sizeof(*_local_member)) + __must_be_array(_local_member); \ +}) + +#endif + +#ifdef NEED_BITMAP_FROM_ARR32 +#if BITS_PER_LONG == 64 +static inline void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i / 2] = (unsigned long)buf[i]; + if (++i < halfwords) + bitmap[i / 2] |= ((unsigned long)buf[i]) << 32; + } + + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *)(bitmap), \ + (const unsigned long *)(buf), (nbits)) +#endif +#endif + +#ifdef NEED_BITMAP_TO_ARR32 +#if BITS_PER_LONG == 64 +static inline void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + buf[i] = (u32)(bitmap[i / 2] & UINT_MAX); + if (++i < halfwords) + buf[i] = (u32)(bitmap[i / 2] >> 32); + } + + if (nbits % BITS_PER_LONG) + buf[halfwords - 1] &= (u32)(UINT_MAX >> ((-nbits) & 31)); +} +#else +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *)(buf), \ + (const unsigned long *)(bitmap), (nbits)) +#endif +#endif + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif + +#ifdef NEED_DEFINE_PAGE_FRAG_FREE +#define page_frag_free __free_page_frag +#endif + +#ifdef NEED_ASSIGN_BIT +static inline void assign_bit(long nr, unsigned long *addr, bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} +#endif + +#ifdef NEED_DMA_ATTRS +#include +static inline +dma_addr_t __kc_dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} + +#define dma_map_page_attrs __kc_dma_map_page_attrs + +static inline +void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} + +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + WARN_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif + +#ifdef FIXUP_USER_FAULT_5_PARAMS +static inline int +fixup_user_fault_compat(struct mm_struct *mm, + unsigned long address, unsigned int fault_flags, + bool *unlocked) +{ + return fixup_user_fault(NULL, mm, address, fault_flags, unlocked); +} + +#ifdef fixup_user_fault +#undef fixup_user_fault +#endif +#define fixup_user_fault(mm, address, fault_flags, unlocked) \ + fixup_user_fault_compat((mm), (address), (fault_flags), (unlocked)) +#endif + +#ifdef NOT_SUPP_MMAP_READ_LOCK +static inline void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_sem); +} + +static inline void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_sem); +} +#endif + +#ifdef NOT_SUPP_UNTAGGED_ADDR +#include +#ifndef untagged_addr +#define untagged_addr(addr) (addr) +#endif +#endif + +#ifdef NOT_SUPP_VMA_LOOKUP +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + return find_vma_intersection(mm, addr, addr + 1); +} +#endif + +#ifdef PIN_USER_PAGE_8_PARAMS +static inline long pin_user_pages_remote_compat(struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked) +{ + return pin_user_pages_remote(NULL, mm, start, nr_pages, gup_flags, pages, vmas, locked); +} + +#ifdef pin_user_pages_remote +#undef pin_user_pages_remote +#endif +#define pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked) \ + pin_user_pages_remote_compat(mm, start, nr_pages, gup_flags, pages, vmas, locked) +#elif defined(PIN_USER_PAGE_6_PARAMS) +static inline long pin_user_pages_remote_compat(struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked) +{ + (void)vmas; + return pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, locked); +} + +#ifdef pin_user_pages_remote +#undef pin_user_pages_remote +#endif +#define pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked) \ + pin_user_pages_remote_compat(mm, start, nr_pages, gup_flags, pages, vmas, locked) +#endif + +#ifdef EVENTFD_SIGNAL_1PARAM +static inline int eventfd_signal_compat(struct eventfd_ctx *ctx, int n) +{ + (void)eventfd_signal(ctx); + + return n; +} + +#ifdef eventfd_signal +#undef eventfd_signal +#endif +#define eventfd_signal(ctx, n) \ + eventfd_signal_compat(ctx, n) +#endif + +#ifndef VM_FLAGS_ONLY_READ +static inline void vm_flags_set(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->vm_flags |= flags; +} +#endif + +#ifdef IOMMU_MAP_6_PARAMS +static inline int iommu_map_compat(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL_ACCOUNT); +} + +#ifdef iommu_map +#undef iommu_map +#endif +#define iommu_map(domain, iova, paddr, size, prot) \ + iommu_map_compat(domain, iova, paddr, size, prot) +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.c b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.c new file mode 100644 index 0000000000000000000000000000000000000000..1a3875434eafdf882b595cde168f43bd21705b6a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_dim.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2_compat_dim.h" +#ifdef NEED_COMPAT_DIM +#include +#include + +bool dim_on_top(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + return true; + case DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +void dim_turn(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + dim->tune_state = DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case DIM_GOING_LEFT: + dim->tune_state = DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +void dim_park_on_top(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = DIM_PARKING_ON_TOP; +} + +void dim_park_tired(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = DIM_PARKING_TIRED; +} + +void dim_calc_stats(struct dim_sample *start, const struct dim_sample *end, + struct dim_stats *curr_stats) +{ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr, + start->comp_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, + delta_us); + curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us); + if (curr_stats->epms != 0) + curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(curr_stats->cpms * 100, + curr_stats->epms); + else + curr_stats->cpe_ratio = 0; +} + +static int net_dim_step(struct dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return DIM_TOO_TIRED; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return DIM_STEPPED; +} + +static void net_dim_exit_parking(struct dim *dim) +{ + dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT; + net_dim_step(dim); +} + +static int net_dim_stats_compare(struct dim_stats *curr, + struct dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->epms) + return DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + return DIM_STATS_SAME; +} + +static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case DIM_GOING_RIGHT: + case DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_BETTER) + dim_turn(dim); + + if (dim_on_top(dim)) { + dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case DIM_ON_EDGE: + dim_park_on_top(dim); + break; + case DIM_TOO_TIRED: + dim_park_tired(dim); + break; + } + + break; + } + + if (prev_state != DIM_PARKING_ON_TOP || + dim->tune_state != DIM_PARKING_ON_TOP) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +void net_dim(struct dim *dim, const struct dim_sample end_sample) +{ + struct dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < DIM_NEVENTS) + break; + dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + fallthrough; + case DIM_START_MEASURE: + dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, + end_sample.byte_ctr, &dim->start_sample); + dim->state = DIM_MEASURE_IN_PROGRESS; + break; + case DIM_APPLY_NEW_PROFILE: + break; + } +} +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.h b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..8b08f951087b4e350aec5c200049e7f815f0a7b6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_dim.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_dim.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_COMAPT_DIM_H_ +#define _SXE2_COMAPT_DIM_H_ + +#ifdef NEED_COMPAT_DIM +#include +#include +#include +#include +#include + +#define NET_DIM_PARAMS_NUM_PROFILES 5 + +#define IS_SIGNIFICANT_DIFF(val, ref) ({ \ + typeof(val) _val = (val); \ + typeof(ref) _ref = (ref); \ + (_ref != 0) ? ((((100UL * abs(_val - _ref))) / _ref) > 10) : 0; \ +}) + +#define DIM_NEVENTS 64 + +#define BIT_GAP(bits, end, start) ({ \ + typeof(bits) _bits = (bits); \ + typeof(end) _end = (end); \ + typeof(start) _start = (start); \ + (((_end) - (_start)) + _bits) & ((_bits) - 1); \ +}) + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum dim_tune_state { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +enum dim_stats_state { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +enum dim_step_result { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +enum dim_state { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +bool dim_on_top(struct dim *dim); + +void dim_turn(struct dim *dim); + +void dim_park_on_top(struct dim *dim); + +void dim_park_tired(struct dim *dim); + +void dim_calc_stats(struct dim_sample *start, const struct dim_sample *end, + struct dim_stats *curr_stats); + +static inline void +dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +void net_dim(struct dim *dim, const struct dim_sample end_sample); +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_flow_offload.h b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_flow_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..19d4bfd89013689a20303041baa8718089f80e44 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_flow_offload.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_flow_offload.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_FLOW_OFFLOAD_H__ +#define __SXE2_COMPAT_FLOW_OFFLOAD_H__ + +#ifdef NEED_FLOW_MATCH +#include +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; +#endif + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +struct flow_rule { + struct flow_match match; +}; + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +#define FLOW_DISSECTOR_MATCH(_rule, _type, _out) do { \ + typeof(_rule) _local_rule = (_rule); \ + typeof(_type) _local_type = (_type); \ + typeof(_out) _local_out = (_out); \ + const struct flow_dissector *_d = (_local_rule)->match; \ + struct flow_match _m = (_local_rule)->mask; \ + (_local_out)->key = skb_flow_dissector_target(_d, _local_type, (_m)->key); \ + (_local_out)->mask = skb_flow_dissector_target(_d, _local_type, (_m)->mask); \ +} while (0) + +static inline void +flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void +flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void +flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +static inline void +flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +static inline void +flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +static inline void +flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} +#endif + +static inline void +flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +static inline void +flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +static inline void +flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif +#endif + +static inline void +flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void +flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} +#endif + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +static inline bool __must_check __must_check_overflow(bool overflow) +{ + return unlikely(overflow); +} + +#define check_add_overflow(a, b, d) \ + __must_check_overflow(__builtin_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __must_check_overflow(__builtin_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __must_check_overflow(__builtin_mul_overflow(a, b, d)) +#endif + +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_gcc.h b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_gcc.h new file mode 100644 index 0000000000000000000000000000000000000000..992156294dccb2bfa209e61679c38bdeceb3b480 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_gcc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_gcc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_GCC_H__ +#define __SXE2_COMPAT_GCC_H__ + +#ifndef SXE2_TEST +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) +#endif +#else +# define fallthrough do {} while (0) +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_inc.h b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2e034f0919a6b869702e154be197e31870e581 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/compat/sxe2_compat_inc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_inc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_INC_H__ +#define __SXE2_COMPAT_INC_H__ + +#include + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.c b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.c new file mode 100644 index 0000000000000000000000000000000000000000..7329824e40d0b01cb6e4210f3e8cc80acf581129 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.c @@ -0,0 +1,828 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_cdev.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_log.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_MAX_COM_CMDS (1) + +STATIC dev_t sxe2_com_cdev_major; +STATIC struct class *sxe2_com_cdev_class; +STATIC struct sxe2_com_dev_mgr sxe2_com_mgr; +STATIC struct mutex sxe2_com_minor_lock; +STATIC DEFINE_IDR(sxe2_com_minor_idr); + +static struct sxe2_com_dev_mgr *sxe2_com_dev_get(void) +{ + return &sxe2_com_mgr; +} + +STATIC s32 sxe2_com_handshake(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_cmd_common_hdr hdr = {}; + u32 arg_sz; + u32 dpdk_ver; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_HANDSHAKE); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_HANDSHAKE); + goto l_unlock; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&hdr, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + LOG_ERROR_BDF_COM("copy user arg failed ver: 0x%x arg_sz:%u.\n", com_ctxt->dpdk_ver, arg_sz); + goto l_unlock; + } + + if (SXE2_MK_VER_MAJOR(hdr.dpdk_ver) != SXE2_MK_VER_MAJOR(SXE2_COM_VER)) { + LOG_ERROR_BDF_COM("incompatible dpdk ver: 0x%x.\n", hdr.dpdk_ver); + ret = -EINVAL; + goto l_unlock; + } + + dpdk_ver = hdr.dpdk_ver; + + memset(&hdr, 0, sizeof(hdr)); + hdr.drv_ver = SXE2_COM_VER; + hdr.msg_len = arg_sz; + + if (SXE2_COM_IOMMU_SUPP(com_ctxt)) + hdr.cap |= (1 << SXE2_COM_CAP_IOMMU_MAP); + + if (copy_to_user((void __user *)arg, &hdr, arg_sz)) { + ret = -EFAULT; + LOG_ERROR_BDF_COM("copy user hdr failed ver: 0x%x.\n", com_ctxt->dpdk_ver); + goto l_unlock; + } + + com_ctxt->dpdk_ver = dpdk_ver; + com_ctxt->is_handshake = true; + +l_unlock: + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +static s32 sxe2_com_cmd_exec(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret; + struct sxe2_drv_cmd_params param = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_PASSTHROUGH); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_PASSTHROUGH); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) + return -EFAULT; + + if (!com_ctxt->ops || !com_ctxt->ops->cmd_exec) + return -EFAULT; + + ret = com_ctxt->ops->cmd_exec(com_ctxt->adapter, &com_ctxt->obj, ¶m); + + LOG_DEBUG_BDF_COM("result:%d\n", ret); + + return ret; +} + +STATIC long sxe2_com_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + long ret = 0; + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filep->private_data; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + if (filep == NULL || cmd == 0 || arg == 0 || com_ctxt == NULL) { + LOG_ERROR_BDF_COM("filep=%pK cmd=%d arg=%ld, com_ctxt=%pK\n", filep, cmd, arg, com_ctxt); + ret = -EINVAL; + goto l_end; + } + + LOG_DEBUG_BDF_COM("driver dpdk cmd=%x, arg=0x%lx\n", cmd, arg); + + mutex_lock(&dev_mgr->lock); + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) { + mutex_unlock(&dev_mgr->lock); + ret = -EACCES; + goto l_end; + } + mutex_unlock(&dev_mgr->lock); + + if (!com_ctxt->is_handshake && cmd != SXE2_COM_CMD_HANDSHAKE) { + LOG_WARN_BDF_COM("plase handshake first\n"); + ret = -ERESTARTSYS; + goto l_end; + } + + if (down_interruptible(&com_ctxt->cdev_info.cdev_sem)) { + LOG_WARN_BDF_COM("ioctl concurrency full\n"); + ret = -ERESTARTSYS; + goto l_end; + } + + switch (cmd) { + case SXE2_COM_CMD_HANDSHAKE: + ret = sxe2_com_handshake(com_ctxt, arg); + break; + case SXE2_COM_CMD_IO_IRQS_REQ: + ret = sxe2_com_io_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_EVT_IRQ_REQ: + ret = sxe2_com_event_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_EVT_CAUSE_GET: + ret = sxe2_com_event_cause_get(com_ctxt, arg); + break; + case SXE2_COM_CMD_RST_IRQ_REQ: + ret = sxe2_com_reset_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_DMA_MAP: + ret = sxe2_com_dma_map(com_ctxt, arg); + break; + case SXE2_COM_CMD_DMA_UNMAP: + ret = sxe2_com_dma_unmap(com_ctxt, arg); + break; + case SXE2_COM_CMD_PASSTHROUGH: + ret = sxe2_com_cmd_exec(com_ctxt, arg); + break; + default: + ret = -EINVAL; + LOG_ERROR_BDF_COM("unknown ioctl cmd, filep=%p, cmd=%d, arg=0x%8.8lx\n", + filep, cmd, arg); + break; + } + + up(&com_ctxt->cdev_info.cdev_sem); + +l_end: + if (ret) { + LOG_ERROR_BDF_COM("driver ioctl filep=%p, cmd=%x, arg=0x%lx end, ret:%ld\n", + filep, cmd, arg, ret); + } else { + LOG_DEBUG_BDF_COM("driver ioctl filep=%p, cmd=%x, arg=0x%lx end, ret:%ld\n", + filep, cmd, arg, ret); + } + + return ret; +} + +static s32 sxe2_com_add_vma(struct sxe2_com_context *com_ctxt, struct vm_area_struct *vma) +{ + struct sxe2_com_vma_device *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &com_ctxt->vma.vma_list); + + return 0; +} + +STATIC vm_fault_t __sxe2_com_mmap(struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_vma_device *mmap_vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&com_ctxt->vma.vma_lock); + + list_for_each_entry(mmap_vma, &com_ctxt->vma.vma_list, vma_next) { + if (mmap_vma->vma == vma) + goto l_out; + } + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + ret = VM_FAULT_SIGBUS; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + goto l_out; + } + + if (sxe2_com_add_vma(com_ctxt, vma)) { + ret = VM_FAULT_OOM; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + +l_out: + mutex_unlock(&com_ctxt->vma.vma_lock); + return ret; +} + +STATIC vm_fault_t __sxe2_com_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + s32 ret = VM_FAULT_NOPAGE; + struct page *new_page = NULL; + unsigned long pfn; + + mutex_lock(&com_ctxt->vma.vma_lock); + + if (vma->vm_flags & FAULT_FLAG_WRITE) { + new_page = com_ctxt->vma.write_page; + LOG_WARN_BDF_COM("write fault at VA 0x%lx\n", vmf->address); + } else { + new_page = com_ctxt->vma.read_page; + LOG_WARN_BDF_COM("read fault at VA 0x%lx\n", vmf->address); + } + + pfn = page_to_pfn(new_page); + + ret = vmf_insert_pfn(vma, vmf->address, pfn); + if (ret & VM_FAULT_ERROR) { + LOG_ERROR_BDF_COM("failed to insert pfn into page tables\n"); + goto l_out; + } + + LOG_INFO_BDF_COM("successfully handled page fault\n"); + +l_out: + mutex_unlock(&com_ctxt->vma.vma_lock); + return ret; +} + +static vm_fault_t sxe2_com_mmap_fault(struct vm_fault *vmf) +{ + vm_fault_t ret = VM_FAULT_SIGBUS; + struct vm_area_struct *vma = vmf->vma; + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) + ret = __sxe2_com_mmap_fault(vmf); + else + ret = __sxe2_com_mmap(vma); + + mutex_unlock(&dev_mgr->lock); + + return ret; +} + +static void sxe2_com_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void sxe2_com_mmap_clear(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_vma_device *mmap_vma; + struct sxe2_com_vma_device *tmp; + + mutex_lock(&com_ctxt->vma.vma_lock); + list_for_each_entry_safe(mmap_vma, tmp, &com_ctxt->vma.vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + } + + mutex_unlock(&com_ctxt->vma.vma_lock); +} + +static void sxe2_com_mmap_close(struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_vma_device *mmap_vma; + + mutex_lock(&com_ctxt->vma.vma_lock); + list_for_each_entry(mmap_vma, &com_ctxt->vma.vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&com_ctxt->vma.vma_lock); +} + +static const struct vm_operations_struct sxe2_com_mmap_ops = { + .open = sxe2_com_mmap_open, + .close = sxe2_com_mmap_close, + .fault = sxe2_com_mmap_fault, +}; + +STATIC s32 sxe2_com_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filp->private_data; + struct pci_dev *pdev = com_ctxt->pdev; + u32 bar_idx; + u64 phys_len, req_len, pgoff, req_start; + + bar_idx = vma->vm_pgoff >> (SXE2_COM_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + if (bar_idx >= DEVICE_COUNT_RESOURCE) { + return -EINVAL; + (void)pdev; + } + + if (vma->vm_end <= vma->vm_start) { + LOG_WARN_BDF_COM("end:%lu large than start:%lu.\n", vma->vm_end, vma->vm_start); + return -EINVAL; + (void)pdev; + } + if ((vma->vm_flags & VM_SHARED) == 0) { + return -EINVAL; + (void)pdev; + } + + phys_len = PAGE_ALIGN(pci_resource_len(pdev, bar_idx)); + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & ((1U << (SXE2_COM_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (req_start + req_len > phys_len) + return -EINVAL; + + vma->vm_private_data = com_ctxt; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, bar_idx) >> PAGE_SHIFT) + pgoff; + + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &sxe2_com_mmap_ops; + + return 0; +} + +STATIC void fill_page_with_ff(struct page *page) +{ + void *kaddr; + + kaddr = kmap_local_page(page); + if (kaddr) { + memset(kaddr, 0xFF, PAGE_SIZE); + kunmap_local(kaddr); + } +} + +STATIC int sxe2_com_bar_init(struct sxe2_com_context *com_ctxt) +{ + int ret = 0; + + com_ctxt->vma.read_page = dev_alloc_pages(0); + if (unlikely(!com_ctxt->vma.read_page)) { + LOG_ERROR_BDF_COM("failed to allocate page in fault handler\n"); + ret = -ENOMEM; + goto l_out; + } + fill_page_with_ff(com_ctxt->vma.read_page); + + com_ctxt->vma.write_page = dev_alloc_pages(0); + if (unlikely(!com_ctxt->vma.write_page)) { + LOG_ERROR_BDF_COM("failed to allocate page in fault handler\n"); + ret = -ENOMEM; + goto l_read_page_failed; + } + + return ret; +l_read_page_failed: + __free_pages(com_ctxt->vma.read_page, 0); +l_out: + return ret; +} + +STATIC void sxe2_com_bar_deinit(struct sxe2_com_context *com_ctxt) +{ + __free_pages(com_ctxt->vma.read_page, 0); + __free_pages(com_ctxt->vma.write_page, 0); +} + +STATIC s32 sxe2_com_open(struct inode *node, struct file *filep) +{ + s32 ret = 0; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + struct sxe2_com_context *com_ctxt; + + com_ctxt = container_of(node->i_cdev, struct sxe2_com_context, cdev_info.cdev); + + filep->private_data = com_ctxt; + + mutex_lock(&dev_mgr->lock); + + if (!atomic_read(&com_ctxt->ref_count) == 0) { + LOG_DEV_WARN_COM("only allow open once\n"); + ret = -EBUSY; + goto l_unlock; + } + + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) { + ret = -EACCES; + goto l_unlock; + } + + atomic_inc(&com_ctxt->ref_count); + + com_ctxt->obj.func_type = com_ctxt->func_type; + com_ctxt->obj.drv_type = SXE2_DPDK_DRV; + com_ctxt->obj.pf_id = com_ctxt->pf_id; + com_ctxt->obj.vf_id = com_ctxt->vf_id; + com_ctxt->obj.drv_id = 0; + +l_unlock: + mutex_unlock(&dev_mgr->lock); + return ret; +} + +STATIC void sxe2_com_ctxt_clear(struct sxe2_com_context *com_ctxt) +{ + mutex_lock(&com_ctxt->com_lock); + com_ctxt->dpdk_ver = SXE2_COM_INVAL_U32; + com_ctxt->is_handshake = false; + mutex_unlock(&com_ctxt->com_lock); +} + +STATIC void sxe2_com_resource_clear(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_mmap_clear(com_ctxt); + + sxe2_com_irqs_clear(com_ctxt); + + sxe2_com_dma_clear(com_ctxt); +} + +STATIC void sxe2_com_clear(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_ctxt_clear(com_ctxt); + + if (com_ctxt->ops && com_ctxt->ops->release) + com_ctxt->ops->release(com_ctxt->adapter, &com_ctxt->obj); + + (void)sxe2_com_irq_notifier_call_chain(com_ctxt, SXE2_COM_EC_RESET); + + sxe2_com_resource_clear(com_ctxt); +} + +STATIC s32 sxe2_com_close(struct inode *node, struct file *filep) +{ + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filep->private_data; + s32 ref_count = 0; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + sxe2_com_clear(com_ctxt); + + mutex_lock(&dev_mgr->lock); + ref_count = atomic_dec_return(&com_ctxt->ref_count); + if (ref_count == 0) + wake_up(&com_ctxt->waitq); + mutex_unlock(&dev_mgr->lock); + + return 0; +} + +void sxe2_com_disable(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + if (!com_ctxt->ops || !com_ctxt->ops->com_mode_get || + (com_ctxt->ops->com_mode_get(com_ctxt->adapter) == SXE2_COM_MODULE_KERNEL)) + return; + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + sxe2_com_clear(com_ctxt); + mutex_unlock(&dev_mgr->lock); +} + +void sxe2_com_enable(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_NORMAL; + mutex_unlock(&dev_mgr->lock); +} + +static void sxe2_com_cdev_wait_clear(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + unsigned long cur_jiffies; + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + mutex_unlock(&dev_mgr->lock); + + do { + cur_jiffies = msecs_to_jiffies(SXE2_COM_CMD_DFLT_TIMEOUT_MS); + ret = (s32)wait_event_timeout(com_ctxt->waitq, (atomic_read(&com_ctxt->ref_count) == 0), + (long)cur_jiffies); + if (!ret) { + LOG_INFO_BDF_COM("cdev wait ref count time out.\n"); + } else { + mutex_lock(&dev_mgr->lock); + if (atomic_read(&com_ctxt->ref_count) == 0) { + mutex_unlock(&dev_mgr->lock); + break; + } + mutex_unlock(&dev_mgr->lock); + } + } while (1); +} + +static s32 sxe2_com_minor_get(s32 *dev_minor) +{ + s32 ret = -ENOMEM; + + mutex_lock(&sxe2_com_minor_lock); + ret = idr_alloc(&sxe2_com_minor_idr, NULL, 0, (s32)SXE2_MAX_DEVICES_NUM, GFP_KERNEL); + if (ret >= 0) { + *dev_minor = ret; + ret = 0; + } + mutex_unlock(&sxe2_com_minor_lock); + return ret; +} + +static void sxe2_com_minor_free(s32 dev_minor) +{ + mutex_lock(&sxe2_com_minor_lock); + idr_remove(&sxe2_com_minor_idr, dev_minor); + mutex_unlock(&sxe2_com_minor_lock); +} + +const struct file_operations sxe2_com_cdev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sxe2_com_ioctl, + .mmap = sxe2_com_mmap, + .open = sxe2_com_open, + .release = sxe2_com_close, +}; + +STATIC s32 sxe2_com_cdev_create(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + s32 dev_major, dev_minor; + struct pci_dev *pdev = com_ctxt->pdev; + struct sxe2_cdev_info *cdev_info = NULL; + + ret = sxe2_com_minor_get(&dev_minor); + if (ret) { + LOG_ERROR_BDF_COM("cdev minor get failed, ret=%d\n", ret); + ret = -ENOMEM; + goto l_get_minor_failed; + } + + cdev_info = &com_ctxt->cdev_info; + dev_major = (s32)MAJOR(sxe2_com_cdev_major); + cdev_info->dev_no = (dev_t)MKDEV(dev_major, dev_minor); + cdev_init(&cdev_info->cdev, &sxe2_com_cdev_fops); + cdev_info->cdev.owner = THIS_MODULE; + + LOG_INFO_BDF_COM("cdev_add: dev_major: %d, dev_minor: %d.\n", dev_major, dev_minor); + + ret = cdev_add(&cdev_info->cdev, cdev_info->dev_no, 1); + if (ret) { + LOG_ERROR_BDF_COM("failed to add cdev dev_no=%ld, ret=%d\n", (unsigned long)cdev_info->dev_no, ret); + goto l_add_cdev_failed; + } + + cdev_info->device = device_create(sxe2_com_cdev_class, NULL, cdev_info->dev_no, NULL, + SXE2_COM_CHRDEV_NAME "-%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus), + pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (IS_ERR(cdev_info->device)) { + ret = (s32)PTR_ERR(cdev_info->device); + LOG_ERROR_BDF_COM("failed to create device, dev_no=%ld\n", (unsigned long)cdev_info->dev_no); + goto l_create_dev_failed; + } + + LOG_INFO_BDF_COM("create char dev[%p] dev_no[major:minor=%u:%u] on pci_dev[%p] belongs to class dev[%p] success\n", + &cdev_info->cdev, dev_major, dev_minor, pdev, cdev_info->device); + + return 0; + +l_create_dev_failed: + cdev_del(&cdev_info->cdev); +l_add_cdev_failed: + sxe2_com_minor_free(dev_minor); +l_get_minor_failed: + return ret; +} + +STATIC void sxe2_com_cdev_delete(struct sxe2_com_context *com_ctxt) +{ + s32 dev_minor; + struct sxe2_cdev_info *cdev_info = &com_ctxt->cdev_info; + + dev_minor = (s32)MINOR(cdev_info->dev_no); + + sxe2_com_cdev_wait_clear(com_ctxt); + + LOG_INFO_BDF_COM("delete char dev[%p], dev_no[major:minor=%u:%u]\n", &cdev_info->cdev, MAJOR(cdev_info->dev_no), + dev_minor); + + device_destroy(sxe2_com_cdev_class, cdev_info->dev_no); + cdev_del(&cdev_info->cdev); + sxe2_com_minor_free(dev_minor); +} + +STATIC s32 sxe2_com_ctxt_init_once(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops) +{ + const char *device_name; + u32 device_len; + size_t copy_result; + + com_ctxt->adapter = adapter; + com_ctxt->ops = ops; + + com_ctxt->ops->com_ctxt_fill(adapter); + + com_ctxt->com_log_param.pdev = com_ctxt->pdev; + com_ctxt->dma_dev.dev = &(com_ctxt->pdev->dev); + atomic_set(&com_ctxt->ref_count, 0); + com_ctxt->dpdk_mode = ops->com_mode_get(adapter); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + + device_name = dev_name(&com_ctxt->pdev->dev); + device_len = (u32)(strlen(device_name) + 1); + copy_result = + SXE2_STRCPY(com_ctxt->com_log_param.dev_name, device_name, min_t(u32, device_len, SXE2_COM_DEV_NAME)); + + sema_init(&com_ctxt->cdev_info.cdev_sem, SXE2_MAX_COM_CMDS); + init_waitqueue_head(&com_ctxt->waitq); + mutex_init(&com_ctxt->vma.vma_lock); + INIT_LIST_HEAD(&com_ctxt->vma.vma_list); + mutex_init(&com_ctxt->dma_dev.lock); + INIT_LIST_HEAD(&com_ctxt->dma_dev.buffer_list); + INIT_LIST_HEAD(&com_ctxt->list); + mutex_init(&com_ctxt->irqs.lock); + + mutex_init(&com_ctxt->com_lock); + + sxe2_com_ctxt_clear(com_ctxt); + + (void)copy_result; + return 0; +} + +STATIC void sxe2_com_ctxt_deinit_once(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->adapter = NULL; + com_ctxt->pdev = NULL; + com_ctxt->ops = NULL; + com_ctxt->dma_dev.dev = NULL; + com_ctxt->dpdk_ver = SXE2_COM_INVAL_U32; + memset(&com_ctxt->com_log_param, 0, sizeof(com_ctxt->com_log_param)); + + mutex_destroy(&com_ctxt->com_lock); + mutex_destroy(&com_ctxt->vma.vma_lock); + mutex_destroy(&com_ctxt->dma_dev.lock); + mutex_destroy(&com_ctxt->irqs.lock); +} + +s32 sxe2_com_init(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops) +{ + s32 ret; + + if (!com_ctxt || !ops || !ops->com_ctxt_fill || !ops->com_mode_get || !adapter) { + LOG_DEV_ERR_COM("sxe2_com_ctxt_init_once failed, com_ctxt: %pK, adapter:%pK, ops: %pK\n", + com_ctxt, adapter, ops); + return -EINVAL; + } + + if (ops->com_mode_get(adapter) == SXE2_COM_MODULE_KERNEL) + return 0; + + ret = sxe2_com_ctxt_init_once(com_ctxt, adapter, ops); + if (ret) + goto l_end; + + ret = sxe2_com_irqs_init(com_ctxt); + if (ret) { + LOG_DEV_ERR_COM("dpdk irqs init failed, ret: %d\n", ret); + goto l_com_deinit; + } + + ret = sxe2_com_bar_init(com_ctxt); + if (ret) { + LOG_DEV_ERR_COM("dpdk irqs init failed, ret: %d\n", ret); + goto l_irqs_deinit; + } + + ret = sxe2_com_cdev_create(com_ctxt); + if (ret != 0) { + LOG_DEV_ERR_COM("eth dpdk cdev create failed, ret=%d\n", ret); + goto l_bar_deinit; + } + + sxe2_com_enable(com_ctxt); + + return 0; + +l_bar_deinit: + sxe2_com_bar_deinit(com_ctxt); +l_irqs_deinit: + sxe2_com_irqs_deinit(com_ctxt); +l_com_deinit: + sxe2_com_ctxt_deinit_once(com_ctxt); +l_end: + return ret; +} + +void sxe2_com_deinit(struct sxe2_com_context *com_ctxt) +{ + if (!com_ctxt->adapter || !com_ctxt->ops || !com_ctxt->ops->com_mode_get || + (com_ctxt->ops->com_mode_get(com_ctxt->adapter) == SXE2_COM_MODULE_KERNEL)) + return; + + sxe2_com_disable(com_ctxt); + sxe2_com_cdev_delete(com_ctxt); + sxe2_com_bar_deinit(com_ctxt); + sxe2_com_irqs_deinit(com_ctxt); + sxe2_com_ctxt_deinit_once(com_ctxt); +} + +s32 sxe2_com_adapter_register(enum sxe2_func_type func_type) +{ + s32 ret; + + memset(&sxe2_com_mgr, 0, sizeof(sxe2_com_mgr)); + mutex_init(&sxe2_com_mgr.lock); + + ret = alloc_chrdev_region(&sxe2_com_cdev_major, 0, SXE2_MAX_DEVICES_NUM, + func_type == SXE2_PF ? SXE2_COM_CHRDEV_NAME : SXE2VF_COM_CHRDEV_NAME); + if (ret) { + LOG_ERROR("alloc cdev number failed: %d\n", ret); + goto l_alloc_cdev_failed; + } + + sxe2_com_cdev_class = + class_create(THIS_MODULE, func_type == SXE2_PF ? SXE2_COM_CHRDEV_NAME : SXE2VF_COM_CHRDEV_NAME); + if (IS_ERR(sxe2_com_cdev_class)) { + ret = (s32)PTR_ERR(sxe2_com_cdev_class); + LOG_ERROR("create cdev class failed: %d\n", ret); + goto l_create_class_failed; + } + + mutex_init(&sxe2_com_minor_lock); + + return 0; + +l_create_class_failed: + unregister_chrdev_region(sxe2_com_cdev_major, SXE2_MAX_DEVICES_NUM); +l_alloc_cdev_failed: + return ret; +} + +void sxe2_com_adapter_unregister(void) +{ + class_destroy(sxe2_com_cdev_class); + unregister_chrdev_region(sxe2_com_cdev_major, SXE2_MAX_DEVICES_NUM); + idr_destroy(&sxe2_com_minor_idr); + + mutex_destroy(&sxe2_com_minor_lock); + mutex_destroy(&sxe2_com_mgr.lock); +} + +void sxe2_com_info_print(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + + if (!com_ctxt->adapter) + return; + + LOG_DEV_INFO_COM("com cdev status: %s\n", com_ctxt->status == 0 ? "Unaccess" : "Normal"); + + sxe2_com_dma_print(com_ctxt); + + mutex_unlock(&dev_mgr->lock); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.h b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.h new file mode 100644 index 0000000000000000000000000000000000000000..5fa1f6d3e42d9dc50711222aa7def46f63666761 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_cdev.h @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_cdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_CDEV_H__ +#define __SXE2_COM_CDEV_H__ + +#include +#include +#include + +#include "sxe2_log.h" +#include "sxe2_cdev.h" +#include "sxe2_com_dma.h" +#include "sxe2_com_irq.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_COM_DEV_MGR_DATA_SIZE (128) +#define SXE2_MAX_IOCTL_CMDS (1) +#define SXE2_COM_CHRDEV_NAME "sxe2-dpdk" +#define SXE2VF_COM_CHRDEV_NAME "sxe2vf-dpdk" +#define SXE2_MAX_DEVICES_NUM BIT(MINORBITS) +#define SXE2_COM_CMD_DFLT_TIMEOUT (30) +#define SXE2_COM_CMD_DFLT_TIMEOUT_MS (30000) +#define SXE2_COM_DEV_MGR_DATA_CNT (1) + +#define SXE2_COM_DEV_NAME 16 + +struct com_log_param { + char dev_name[SXE2_COM_DEV_NAME]; + struct pci_dev *pdev; +}; + +#define LOG_ERROR_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_WARN_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_WARN_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_INFO_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_INFO_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEBUG_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_ERR_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_ERR(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_WARN_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_WARN(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_INFO_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_INFO(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_DEBUG_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_DEBUG(fmt, ##__VA_ARGS__); \ + } + +enum sxe2_com_dev_status { + SXE2_COM_CDEV_STATUS_UNACCESS = 0, + SXE2_COM_CDEV_STATUS_NORMAL, +}; + +enum sxe2_com_module { + SXE2_COM_MODULE_KERNEL = 0, + SXE2_COM_MODULE_DPDK, + SXE2_COM_MODULE_MIXED, + SXE2_COM_MODULE_RDMA, + SXE2_COM_MODULE_UNDEFINED, + SXE2_COM_MODULE_INVAL, +}; + +enum sxe2_func_type { + SXE2_PF = 0, + SXE2_VF, +}; + +enum sxe2_drv_type { + SXE2_KERNEL_DRV = 0, + SXE2_DPDK_DRV, +}; + +struct sxe2_obj { + u32 func_type : 2; + u32 resv : 2; + u32 pf_id : 4; + u32 vf_id : 12; + u32 resv1 : 4; + u32 drv_type : 2; + u32 drv_id : 6; +}; + +struct sxe2_com_vma_mgr { + /* in order to protect the data */ + struct mutex vma_lock; + struct list_head vma_list; + struct page *read_page; + struct page *write_page; + u8 reserved[3]; +}; + +struct sxe2_com_ops { + void (*com_ctxt_fill)(void *adapter); + s32 (*cmd_exec)(void *adapter, struct sxe2_obj *obj, struct sxe2_drv_cmd_params *param); + s32 (*get_irq_num)(void *adapter); + s32 (*get_vector)(void *adapter, u16 irq_id_in_com); + s32 (*release)(void *adapter, struct sxe2_obj *obj); + s32 (*com_mode_get)(void *adapter); +}; + +struct sxe2_com_context { + void *adapter; + struct pci_dev *pdev; + struct sxe2_com_ops *ops; + struct com_log_param com_log_param; + u8 dpdk_mode; + atomic_t ref_count; + struct list_head list; + wait_queue_head_t waitq; + enum sxe2_com_dev_status status; + struct sxe2_cdev_info cdev_info; + enum sxe2_func_type func_type; + u16 pf_id; + u16 vf_id; + bool is_handshake; + u32 dpdk_ver; + struct sxe2_com_irqs_ctxt irqs; + struct sxe2_com_vma_mgr vma; + struct sxe2_com_dma_dev dma_dev; + struct sxe2_obj obj; + /* in order to protect the data */ + struct mutex com_lock; +}; + +struct sxe2_com_dev_mgr { + /* in order to protect the data */ + struct mutex lock; +}; + +struct sxe2_com_vma_device { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +#define SXE2_DRV_MSG_INFO_SIZE (sizeof(struct drv_msg_info)) +#define SXE2_COM_DRV_REQ_MSG_HDR(param) ((struct drv_msg_info *)(param)->req_buff) + +#define SXE2_DRV_MSG_MAX_SIZE (8192) +#define SXE2_DRV_MSG_MAGIC_CODE (0x56781234) +#define SXE2_MOD_DRV (1) +#define SXE2_SUB_MOD_DEV (1) + +#define MODULE_ID_SHIFT (24) +#define SUB_MODULE_ID_SHIFT (16) +#define ERROR_INDEX_MASK (0xFFFF0000) +#define SXE2_MAKE_ERR_CODE_INDEX(module, sub_module) \ + ((((u32)((module) << MODULE_ID_SHIFT)) | ((u32)((sub_module) << SUB_MODULE_ID_SHIFT))) & \ + ERROR_INDEX_MASK) + +#define SXE2_COM_MODE_NAME_SIZE 64 +#define SXE2_COM_KERNEL_MODE_NAME "kernel" +#define SXE2_COM_DPDK_MODE_NAME "dpdk" +#define SXE2_COM_RDMA_MODE_NAME "rdma" +#define SXE2_COM_MIXED_MODE_NAME "mixed" +#define SXE2_COM_UNDEFINED_MODE_NAME "undefined" + +void sxe2_com_deinit(struct sxe2_com_context *com_ctxt); + +s32 sxe2_com_init(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops); + +s32 sxe2_com_adapter_register(enum sxe2_func_type func_type); + +void sxe2_com_adapter_unregister(void); + +s32 sxe2_com_mode_get(void *adapter); + +void sxe2_com_disable(struct sxe2_com_context *com_ctxt); + +void sxe2_com_enable(struct sxe2_com_context *com_ctxt); + +void sxe2_com_info_print(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.c b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.c new file mode 100644 index 0000000000000000000000000000000000000000..fff0e14f3cbd8ae2009aed1033c62d31eb6dd477 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_dma.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_dma.h" +#include "sxe2_compat.h" + +struct sxe2_com_batch { + struct page **pages; + struct page *fallback_page; + int capacity; + int size; + int offset; +}; + +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int put_pfn(unsigned long pfn, int prot) +{ + if (!is_invalid_reserved_pfn(pfn)) { + struct page *page = pfn_to_page(pfn); +#ifdef NOT_SUPP_UNPIN_USER_PAGE + if (prot & IOMMU_WRITE) + SetPageDirty(page); + put_page(page); +#else + unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); +#endif + return 1; + } + return 0; +} + +#define SXE2_COM_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void sxe2_com_batch_init(struct sxe2_com_batch *batch) +{ + batch->size = 0; + batch->offset = 0; + + batch->pages = (struct page **)__get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = SXE2_COM_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void sxe2_com_batch_unpin(struct sxe2_com_batch *batch, struct sxe2_com_dma *dma) +{ + while (batch->size) { + unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); + + put_pfn(pfn, dma->prot); + batch->offset++; + batch->size--; + } +} + +static void sxe2_com_batch_fini(struct sxe2_com_batch *batch) +{ + if (batch->capacity == SXE2_COM_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + +static int follow_fault_pfn(struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ +#ifdef NOT_SUPP_FOLLOW_PTE + (void)mm; + (void)write_fault; + *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + return 0; +#else + pte_t *ptep; + /* in order to protect the data */ + spinlock_t *ptl; + bool unlocked; + int ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) { + unlocked = false; + ret = fixup_user_fault(mm, vaddr, + FAULT_FLAG_REMOTE | (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) + return ret; + } + + if (write_fault && !pte_write(*ptep)) + ret = -EFAULT; + else + *pfn = pte_pfn(*ptep); + + pte_unmap_unlock(ptep, ptl); + return ret; +#endif +} + +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) +{ + struct vm_area_struct *vma; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + mmap_read_lock(mm); + +#ifndef NOT_SUPP_PIN_USER_PAGE + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, pages, NULL, NULL); +#elif !defined(NOT_SUPP_FOLL_LONGTERM) + ret = get_user_pages(vaddr, npages, flags | FOLL_LONGTERM, pages, NULL); +#elif !defined(NOT_SUPP_GET_USER_PAGES_LONGTERM) + ret = get_user_pages_longterm(vaddr, npages, flags, pages, NULL); +#else + ret = get_user_pages_fast(vaddr, npages, !!(prot & IOMMU_WRITE), pages); +#endif + + if (ret > 0) { + *pfn = page_to_pfn(pages[0]); + goto done; + } + + vaddr = untagged_addr(vaddr); + +retry: + vma = vma_lookup(mm, vaddr); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } + } +done: + mmap_read_unlock(mm); + return ret; +} + +static long sxe2_com_pin_pages_remote(struct sxe2_com_dma *dma, unsigned long vaddr, long npage, + unsigned long *pfn_base, struct sxe2_com_batch *batch) +{ + unsigned long pfn = 0; + struct mm_struct *mm = current->mm; + long ret = 0; + long pinned = 0; + bool rsvd = 0; + dma_addr_t iova = vaddr - dma->vaddr + dma->iova; + + if (!mm) + return -ENODEV; + + if (batch->size) { + *pfn_base = page_to_pfn(batch->pages[batch->offset]); + pfn = *pfn_base; + rsvd = is_invalid_reserved_pfn(*pfn_base); + } else { + *pfn_base = 0; + } + + while (npage) { + if (!batch->size) { + long req_pages = min_t(long, npage, batch->capacity); + + ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot, &pfn, batch->pages); + if (ret < 0) + goto unpin_out; + + batch->size = ret; + batch->offset = 0; + + if (!*pfn_base) { + *pfn_base = pfn; + rsvd = is_invalid_reserved_pfn(*pfn_base); + } + } + + while (true) { + if (pfn != *pfn_base + pinned || rsvd != is_invalid_reserved_pfn(pfn)) + goto unpin_out; + + pinned++; + npage--; + vaddr += PAGE_SIZE; + iova += PAGE_SIZE; + batch->offset++; + batch->size--; + + if (!batch->size) + break; + + pfn = page_to_pfn(batch->pages[batch->offset]); + } + } + +unpin_out: + if (batch->size == 1 && !batch->offset) { + put_pfn(pfn, dma->prot); + batch->size = 0; + } + + if (ret < 0) { + if (pinned && !rsvd) { + for (pfn = *pfn_base; pinned; pfn++, pinned--) + put_pfn(pfn, dma->prot); + } + sxe2_com_batch_unpin(batch, dma); + + return ret; + } + + return pinned; +} + +static int sxe2_com_iommu_map(struct sxe2_com_context *com_ctxt, + dma_addr_t iova, unsigned long pfn, long npage, + int prot) +{ +#ifdef NOT_SUPP_IOMMU_CAPABLE + prot |= IOMMU_CACHE; +#else + if (iommu_capable(SXE2_COM_TO_BUS(com_ctxt), IOMMU_CAP_CACHE_COHERENCY)) + prot |= IOMMU_CACHE; +#endif + + return iommu_map(SXE2_COM_TO_DOMAIN(com_ctxt), + iova, (phys_addr_t)pfn << PAGE_SHIFT, npage << PAGE_SHIFT, prot); +} + +static long sxe2_com_unpin_pages_remote(struct sxe2_com_dma *dma, + dma_addr_t iova, unsigned long pfn, long npage) +{ + long unlocked = 0; + long i; + + for (i = 0; i < npage; i++, iova += PAGE_SIZE) { + if (put_pfn(pfn++, dma->prot)) + unlocked++; + } + + return unlocked; +} + +static size_t unmap_unpin(struct sxe2_com_context *com_ctxt, + struct sxe2_com_dma *dma, dma_addr_t *iova, size_t len, + phys_addr_t phys, long *unlocked) +{ + size_t unmapped = iommu_unmap(SXE2_COM_TO_DOMAIN(com_ctxt), *iova, len); + + if (unmapped) { + *unlocked += sxe2_com_unpin_pages_remote(dma, *iova, phys >> PAGE_SHIFT, + unmapped >> PAGE_SHIFT); + *iova += unmapped; + cond_resched(); + } + return unmapped; +} + +static long sxe2_com_unmap_unpin(struct sxe2_com_context *com_ctxt, struct sxe2_com_dma *dma) +{ + dma_addr_t iova = dma->iova, end = dma->iova + dma->size; + long unlocked = 0; + + if (!dma->size) + return 0; + + while (iova < end) { + size_t unmapped; + size_t len = PAGE_SIZE; + phys_addr_t phys; + + phys = iommu_iova_to_phys(SXE2_COM_TO_DOMAIN(com_ctxt), iova); + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; + } + + unmapped = unmap_unpin(com_ctxt, dma, &iova, len, phys, &unlocked); + if (WARN_ON(!unmapped)) + break; + } + + return unlocked; +} + +static int sxe2_com_pin_map_dma(struct sxe2_com_context *com_ctxt, + struct sxe2_com_dma *dma, size_t map_size) +{ + dma_addr_t iova = dma->iova; + unsigned long vaddr = dma->vaddr; + struct sxe2_com_batch batch; + size_t size = map_size; + long npage; + unsigned long pfn; + int ret = 0; + + sxe2_com_batch_init(&batch); + + while (size) { + npage = sxe2_com_pin_pages_remote(dma, vaddr + dma->size, + size >> PAGE_SHIFT, &pfn, &batch); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + break; + } + + ret = sxe2_com_iommu_map(com_ctxt, iova + dma->size, pfn, npage, dma->prot); + if (ret) { + sxe2_com_unpin_pages_remote(dma, iova + dma->size, pfn, npage); + sxe2_com_batch_unpin(&batch, dma); + break; + } + + size -= npage << PAGE_SHIFT; + dma->size += npage << PAGE_SHIFT; + } + + sxe2_com_batch_fini(&batch); + + return ret; +} + +STATIC struct sxe2_com_dma *sxe2_com_dma_find_unlock(struct sxe2_com_context *com_ctxt, + dma_addr_t start, u32 size) +{ + struct sxe2_com_dma *dma; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + list_for_each_entry(dma, &dma_dev->buffer_list, list) { + if ((start + size > dma->iova) && (start < dma->iova + dma->size)) + return dma; + } + + return NULL; +} + +static s32 sxe2_com_dma_alloc(struct sxe2_com_context *com_ctxt, + struct sxe2_ioctl_iommu_dma_map *map) +{ + int ret = 0; + struct sxe2_com_dma *dma = NULL; + dma_addr_t iova = map->iova; + unsigned long vaddr = map->vaddr; + size_t size = map->size; + size_t pgsize; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + if (!SXE2_COM_IOMMU_SUPP(com_ctxt)) + return -EOPNOTSUPP; + + if (map->size != size || map->vaddr != vaddr || map->iova != iova) { + ret = -EINVAL; + goto l_out; + } + + pgsize = (size_t)1 << __ffs(PAGE_MASK); + if (!size || (size | iova | vaddr) & (pgsize - 1)) { + ret = -EINVAL; + goto l_out; + } + + if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { + ret = -EINVAL; + goto l_out; + } + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + ret = -ENOMEM; + goto l_out; + } + + dma->vaddr = map->vaddr; + dma->iova = iova; + dma->prot = IOMMU_WRITE | IOMMU_READ; + + mutex_lock(&dma_dev->lock); + if (sxe2_com_dma_find_unlock(com_ctxt, iova, size)) { + ret = -EEXIST; + goto l_free; + } + + ret = sxe2_com_pin_map_dma(com_ctxt, dma, size); + if (ret) + goto l_free; + + list_add(&dma->list, &dma_dev->buffer_list); + + ret = 0; + goto l_unlock; + +l_free: + kfree(dma); +l_unlock: + mutex_unlock(&dma_dev->lock); +l_out: + LOG_INFO_BDF_COM("vaddr: 0x%lx, iova: 0x%llx, size: %zu, ret:%d\n", vaddr, iova, size, + ret); + return ret; +} + +static int sxe2_com_dma_free(struct sxe2_com_context *com_ctxt, dma_addr_t iova, u8 free_all) +{ + int ret = free_all ? 0 : -ENOENT; + struct sxe2_com_dma *dma; + struct sxe2_com_dma *tmp; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + mutex_lock(&dma_dev->lock); + list_for_each_entry_safe(dma, tmp, &dma_dev->buffer_list, list) { + if (free_all) { + list_del(&dma->list); + sxe2_com_unmap_unpin(com_ctxt, dma); + kfree(dma); + continue; + } + + if (dma->iova == iova) { + list_del(&dma->list); + sxe2_com_unmap_unpin(com_ctxt, dma); + kfree(dma); + ret = 0; + break; + } + } + + mutex_unlock(&dma_dev->lock); + LOG_INFO_BDF_COM("iova: 0x%llx, free_all %d, ret:%d\n", iova, free_all, ret); + return ret; +} + +s32 sxe2_com_dma_map(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_iommu_dma_map map = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_MAP); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", + com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_MAP); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&map, (void __user *)arg, arg_sz)) + return -EFAULT; + + ret = sxe2_com_dma_alloc(com_ctxt, &map); + + return ret; +} + +s32 sxe2_com_dma_unmap(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_iommu_dma_unmap map = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_UNMAP); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", + com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_UNMAP); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&map, (void __user *)arg, arg_sz)) + return -EFAULT; + + return sxe2_com_dma_free(com_ctxt, map.iova, false); +} + +s32 sxe2_com_dma_clear(struct sxe2_com_context *com_ctxt) +{ + return sxe2_com_dma_free(com_ctxt, 0xFF, true); +} + +void sxe2_com_dma_print(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dma *dma; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + mutex_lock(&dma_dev->lock); + LOG_DEV_INFO_COM("dma map list:\n"); + list_for_each_entry(dma, &dma_dev->buffer_list, list) { + LOG_DEV_INFO_COM("\tdma vaddr: 0x%lx, iova:0x%llx, size:%zu\n", + dma->vaddr, dma->iova, dma->size); + } + mutex_unlock(&dma_dev->lock); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.h b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.h new file mode 100644 index 0000000000000000000000000000000000000000..d753cc74365304b5053334871975308b7c1840d1 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_dma.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_dma.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_DMA_H__ +#define __SXE2_COM_DMA_H__ + +#include + +struct sxe2_com_context; + +#define SXE2_COM_TO_DOMAIN(com_ctxt) (iommu_get_domain_for_dev((com_ctxt)->dma_dev.dev)) +#define SXE2_COM_IOMMU_SUPP(com_ctxt) \ + (SXE2_COM_TO_DOMAIN(com_ctxt) && SXE2_COM_TO_DOMAIN(com_ctxt)->type & __IOMMU_DOMAIN_PAGING) +#define SXE2_COM_TO_BUS(com_ctxt) (((com_ctxt)->dma_dev.dev)->bus) + +struct sxe2_com_dma { + struct list_head list; + + unsigned long vaddr; + size_t size; + int prot; + dma_addr_t iova; +}; + +struct sxe2_com_dma_dev { + struct device *dev; + struct mutex lock; + struct list_head buffer_list; +}; + +s32 sxe2_com_dma_map(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_dma_unmap(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_dma_clear(struct sxe2_com_context *com_ctxt); + +void sxe2_com_dma_print(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.c b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..23d60eb6a691814d9504deeff3c93fac7ac606dc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_irq.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_com_irq.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_compat.h" + +STATIC irqreturn_t sxe2_com_io_irq_handler(int irq, void *dev) +{ + struct sxe2_com_irq_entry *entry = dev; + + if (likely(entry->trigger)) + eventfd_signal(entry->trigger, 1); + + return IRQ_HANDLED; +} + +static s32 sxe2_com_io_set_trigger(struct sxe2_com_context *com_ctxt, int index, int fd, irq_handler_t handler) +{ + struct sxe2_com_irq_entry *entry = &com_ctxt->irqs.entry[index]; + struct eventfd_ctx *trigger; + int ret; + + if (entry->trigger) { + synchronize_irq(entry->vector); + free_irq(entry->vector, entry); + kfree(entry->name); + eventfd_ctx_put(entry->trigger); + entry->trigger = NULL; + } + + if (fd < 0) + return 0; + + entry->name = kasprintf(GFP_KERNEL, "sxe2-com-irq[%d](%s)", entry->vector, com_ctxt->com_log_param.dev_name); + if (!entry->name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(entry->name); + return PTR_ERR(trigger); + } + + entry->trigger = trigger; + + ret = request_irq(entry->vector, handler, 0, entry->name, entry); + if (ret) { + kfree(entry->name); + eventfd_ctx_put(trigger); + entry->trigger = NULL; + return ret; + } + + return 0; +} + +STATIC s32 sxe2_com_io_irq_init(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + u32 num_irq; + u32 i; + + if (!com_ctxt->ops || !com_ctxt->ops->get_irq_num || !com_ctxt->ops->get_vector) + return -EFAULT; + + ret = com_ctxt->ops->get_irq_num(com_ctxt->adapter); + if (ret <= 0) { + LOG_ERROR_BDF_COM("get irq num failed: %d\n", ret); + return -ENOMEM; + } + num_irq = ret; + + com_ctxt->irqs.entry = kcalloc(num_irq, sizeof(*com_ctxt->irqs.entry), GFP_KERNEL); + if (!com_ctxt->irqs.entry) + return -ENOMEM; + + for (i = 0; i < num_irq; i++) { + com_ctxt->irqs.entry[i].vector = com_ctxt->ops->get_vector(com_ctxt->adapter, i); + if (com_ctxt->irqs.entry[i].vector < 0) + goto l_err; + } + com_ctxt->irqs.num_irqs = num_irq; + + return 0; +l_err: + kfree(com_ctxt->irqs.entry); + com_ctxt->irqs.entry = NULL; + return -EINVAL; +} + +STATIC void sxe2_com_io_irq_clear(struct sxe2_com_context *com_ctxt) +{ + int i; + + for (i = 0; i < com_ctxt->irqs.num_irqs; i++) + sxe2_com_io_set_trigger(com_ctxt, i, -1, NULL); +} + +STATIC void sxe2_com_io_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_io_irq_clear(com_ctxt); + + com_ctxt->irqs.num_irqs = 0; + kfree(com_ctxt->irqs.entry); +} + +s32 sxe2_com_io_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_irq_set param = {}; + u32 arg_sz; + u32 i; + s32 *fd; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_IO_IRQS_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("get arg sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_IO_IRQS_REQ); + ret = -EINVAL; + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if (param.cnt == 0 || param.cnt > com_ctxt->irqs.num_irqs || + param.base_irq_in_com + param.cnt - 1 > com_ctxt->irqs.num_irqs - 1 || param.event_fd == NULL) { + ret = -EINVAL; + goto l_end; + } + + fd = memdup_user((void __user *)(param.event_fd), param.cnt * sizeof(*param.event_fd)); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto l_end; + } + + for (i = 0; i < param.cnt; i++) { + ret = sxe2_com_io_set_trigger(com_ctxt, i + param.base_irq_in_com, fd[i], sxe2_com_io_irq_handler); + if (ret) + goto l_roll_back; + } + + goto l_free; + +l_roll_back: + for (i = 0; i < param.cnt; i++) + (void)sxe2_com_io_set_trigger(com_ctxt, i + param.base_irq_in_com, -1, NULL); +l_free: + kfree(fd); +l_end: + LOG_INFO_BDF_COM("cnt:%u:%u, base_irq_in_com: %u, ret: %d.\n", param.cnt, + com_ctxt->irqs.num_irqs, param.base_irq_in_com, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +STATIC s32 sxe2_com_irq_notifier_register(struct sxe2_com_context *com_ctxt, struct sxe2_nb *irq_nb) +{ + return atomic_notifier_chain_register(&com_ctxt->irqs.irq_nh, &irq_nb->nb); +} +STATIC s32 sxe2_com_irq_notifier_unregister(struct sxe2_com_context *com_ctxt, struct sxe2_nb *irq_nb) +{ + return atomic_notifier_chain_unregister(&com_ctxt->irqs.irq_nh, &irq_nb->nb); +} +s32 sxe2_com_irq_notifier_call_chain(struct sxe2_com_context *com_ctxt, enum sxe2_com_event_cause ec) +{ + SXE2_BUG_ON(ec >= SXE2_COM_EC_MAX); + if (ec >= SXE2_COM_EC_MAX) + return -EFAULT; + + return atomic_notifier_call_chain(&com_ctxt->irqs.irq_nh, ec, NULL); +} + +STATIC u64 sxe2_com_event_cause_rc(struct sxe2_com_context *com_ctxt) +{ + u64 ec; + unsigned long flags; + + spin_lock_irqsave(&com_ctxt->irqs.evt_lock, flags); + ec = com_ctxt->irqs.evt_cause; + com_ctxt->irqs.evt_cause = 0; + spin_unlock_irqrestore(&com_ctxt->irqs.evt_lock, flags); + + return ec; +} + +STATIC s32 sxe2_com_event_nb_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct sxe2_com_irqs_ctxt *irqs = sxe2_nb_cof(nb, struct sxe2_com_irqs_ctxt, evt_nb); + enum sxe2_com_event_cause ec = (enum sxe2_com_event_cause)event; + unsigned long flags; + + SXE2_BUG_ON(ec >= SXE2_COM_EC_MAX); + if (ec >= SXE2_COM_EC_MAX) + return NOTIFY_BAD; + + if (ec == SXE2_COM_EC_RESET) + return NOTIFY_DONE; + + spin_lock_irqsave(&irqs->evt_lock, flags); + if (!test_bit(ec, (unsigned long *)&irqs->evt_sub_map)) { + spin_unlock_irqrestore(&irqs->evt_lock, flags); + return NOTIFY_DONE; + } + + set_bit(ec, (unsigned long *)&irqs->evt_cause); + spin_unlock_irqrestore(&irqs->evt_lock, flags); + + eventfd_signal(irqs->evt_trigger, 1); + + return NOTIFY_OK; +} + +STATIC void sxe2_com_event_irq_init(struct sxe2_com_context *com_ctxt) +{ + spin_lock_init(&com_ctxt->irqs.evt_lock); + com_ctxt->irqs.evt_sub_map = 0; + com_ctxt->irqs.evt_trigger = NULL; + + SXE2_NB_INIT(&com_ctxt->irqs.evt_nb, sxe2_com_event_nb_call, 0); +} + +STATIC void sxe2_com_event_irq_clear(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->irqs.evt_sub_map = 0; + + if (com_ctxt->irqs.evt_trigger) { + eventfd_ctx_put(com_ctxt->irqs.evt_trigger); + com_ctxt->irqs.evt_trigger = NULL; + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.evt_nb); + } +} + +STATIC void sxe2_com_event_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_event_irq_clear(com_ctxt); + SXE2_NB_INIT(&com_ctxt->irqs.evt_nb, NULL, 0); +} + +s32 sxe2_com_event_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_other_evt_set param = {}; + u32 arg_sz; + struct eventfd_ctx *trigger; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_EVT_IRQ_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_EVT_IRQ_REQ); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if ((param.eventfd >= 0 && com_ctxt->irqs.evt_trigger) || (param.eventfd < 0 && !com_ctxt->irqs.evt_trigger) || + (param.eventfd >= 0 && param.filter_table == 0)) { + ret = -EINVAL; + goto l_end; + } + + if (param.eventfd < 0) { + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.evt_nb); + eventfd_ctx_put(com_ctxt->irqs.evt_trigger); + com_ctxt->irqs.evt_trigger = NULL; + com_ctxt->irqs.evt_sub_map = 0; + goto l_end; + } + + trigger = eventfd_ctx_fdget(param.eventfd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto l_end; + } + com_ctxt->irqs.evt_trigger = trigger; + com_ctxt->irqs.evt_sub_map = param.filter_table; + sxe2_com_irq_notifier_register(com_ctxt, &com_ctxt->irqs.evt_nb); + +l_end: + LOG_INFO_BDF_COM("eventfd:%d, filter_table: %llu, ret: %d.\n", param.eventfd, + param.filter_table, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +s32 sxe2_com_event_cause_get(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_other_evt_get ec = {}; + u32 arg_sz; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_EVT_CAUSE_GET); + if (ret < 0) { + LOG_ERROR_BDF_COM("get arg sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_EVT_CAUSE_GET); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + ec.evt_cause = sxe2_com_event_cause_rc(com_ctxt); + + if (copy_to_user((void __user *)arg, &ec, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + LOG_INFO_BDF_COM("ec:%llu, ret: %d.\n", ec.evt_cause, ret); + +l_end: + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +s32 sxe2_com_reset_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_reset_sub_set param = {}; + u32 arg_sz; + struct eventfd_ctx *trigger; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_RST_IRQ_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_RST_IRQ_REQ); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if ((param.eventfd >= 0 && com_ctxt->irqs.rst_trigger) || (param.eventfd < 0 && !com_ctxt->irqs.rst_trigger)) { + ret = -EINVAL; + goto l_end; + } + + if (param.eventfd < 0) { + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.rst_nb); + eventfd_ctx_put(com_ctxt->irqs.rst_trigger); + com_ctxt->irqs.rst_trigger = NULL; + goto l_end; + } + + trigger = eventfd_ctx_fdget(param.eventfd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto l_end; + } + com_ctxt->irqs.rst_trigger = trigger; + sxe2_com_irq_notifier_register(com_ctxt, &com_ctxt->irqs.rst_nb); + +l_end: + LOG_INFO_BDF_COM("eventfd:%d, ret: %d.\n", param.eventfd, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +static s32 sxe2_com_reset_nb_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct sxe2_com_irqs_ctxt *irqs = sxe2_nb_cof(nb, struct sxe2_com_irqs_ctxt, rst_nb); + enum sxe2_com_event_cause ec = (enum sxe2_com_event_cause)event; + + if (ec != SXE2_COM_EC_RESET) + return NOTIFY_DONE; + + eventfd_signal(irqs->rst_trigger, 1); + + return NOTIFY_OK; +} + +STATIC void sxe2_com_reset_irq_init(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->irqs.rst_trigger = NULL; + SXE2_NB_INIT(&com_ctxt->irqs.rst_nb, sxe2_com_reset_nb_call, 0); +} +STATIC void sxe2_com_reset_irq_clear(struct sxe2_com_context *com_ctxt) +{ + if (com_ctxt->irqs.rst_trigger) { + eventfd_ctx_put(com_ctxt->irqs.rst_trigger); + com_ctxt->irqs.rst_trigger = NULL; + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.rst_nb); + } +} + +STATIC void sxe2_com_reset_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_reset_irq_clear(com_ctxt); + + SXE2_NB_INIT(&com_ctxt->irqs.rst_nb, NULL, 0); +} + +s32 sxe2_com_irqs_init(struct sxe2_com_context *com_ctxt) +{ + (void)sxe2_com_io_irq_init(com_ctxt); + + sxe2_com_event_irq_init(com_ctxt); + + sxe2_com_reset_irq_init(com_ctxt); + + return 0; +} + +void sxe2_com_irqs_clear(struct sxe2_com_context *com_ctxt) +{ + mutex_lock(&com_ctxt->com_lock); + + sxe2_com_reset_irq_clear(com_ctxt); + sxe2_com_event_irq_clear(com_ctxt); + sxe2_com_io_irq_clear(com_ctxt); + mutex_unlock(&com_ctxt->com_lock); +} + +void sxe2_com_irqs_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_reset_irq_deinit(com_ctxt); + sxe2_com_event_irq_deinit(com_ctxt); + sxe2_com_io_irq_deinit(com_ctxt); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.h b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..4d56b28e5c628121e6ce300fc7e31b1a12c7d148 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_irq.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_irq.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_IRQ_H__ +#define __SXE2_COM_IRQ_H__ + +#include + +#include "sxe2_ioctl_chnl.h" + +struct sxe2_com_context; + +struct sxe2_nb { + struct notifier_block nb; + u32 event_type; + void *priv; +}; + +#define sxe2_nb_cof(ptr, type, member) \ + (container_of(container_of(ptr, struct sxe2_nb, nb), type, member)) + +#define sxe2_nb_priv(ptr) (((struct sxe2_nb *)(container_of(ptr, struct sxe2_nb, nb)))->priv) + +#define SXE2_NB_INIT(name, handler, event) \ +do { \ + typeof(name) _name = (name); \ + _name->nb.notifier_call = handler; \ + _name->event_type = event; \ +} while (0) + +struct sxe2_com_irq_entry { + s32 vector; + char *name; + struct eventfd_ctx *trigger; +}; + +struct sxe2_com_irqs_ctxt { + /* in order to protect the data */ + struct mutex lock; + struct atomic_notifier_head irq_nh; + u32 num_irqs; + struct sxe2_com_irq_entry *entry; + /* in order to protect the data */ + spinlock_t evt_lock; + struct eventfd_ctx *evt_trigger; + u64 evt_cause; + u64 evt_sub_map; + struct sxe2_nb evt_nb; + + struct eventfd_ctx *rst_trigger; + struct sxe2_nb rst_nb; +}; + +s32 sxe2_com_irq_notifier_call_chain(struct sxe2_com_context *com_ctxt, + enum sxe2_com_event_cause ec); + +s32 sxe2_com_io_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_event_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); +s32 sxe2_com_reset_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_event_cause_get(struct sxe2_com_context *com_ctxt, unsigned long arg); + +void sxe2_com_irqs_deinit(struct sxe2_com_context *com_ctxt); + +s32 sxe2_com_irqs_init(struct sxe2_com_context *com_ctxt); +void sxe2_com_irqs_clear(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.c b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..2f693541e1e77b183373a9b9590563ffbf60f1f9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ver_compat.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_COM_VER_ARG_SZ_END { SXE2_COM_INVAL_U32, SXE2_COM_INVAL_U32 } + +struct sxe2_com_ver_arg_sz io_irq_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_irq_set, event_fd) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz other_evt_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_other_evt_set, filter_table) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz reset_irqs_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_reset_sub_set, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz evt_cause_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_other_evt_get, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz dma_map_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_iommu_dma_map, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz dma_unmap_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_iommu_dma_unmap, iova) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz cmd_send_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_drv_cmd_params, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_cmd_arg_sz g_cmd_arg_sz[] = { + [SXE2_DEVICE_IO_IRQS_REQ] = { io_irq_sz }, + [SXE2_DEVICE_EVT_IRQ_REQ] = { other_evt_sz }, + [SXE2_DEVICE_RST_IRQ_REQ] = { reset_irqs_sz }, + [SXE2_DEVICE_DMA_MAP] = { dma_map_sz }, + [SXE2_DEVICE_DMA_UNMAP] = { dma_unmap_sz }, + [SXE2_DEVICE_PASSTHROUGH] = { cmd_send_sz }, + [SXE2_DEVICE_EVT_CAUSE_GET] = { evt_cause_sz }, + [SXE2_DEVICE_MAX] = {}, +}; + +s32 sxe2_com_get_arg_sz(u32 ver, u32 cmd) +{ + struct sxe2_com_ver_arg_sz *ver_arg_sz; + u32 minor_ver = SXE2_MK_VER_MINOR(ver); + + if (cmd == SXE2_DEVICE_HANDSHAKE) + return sizeof(struct sxe2_ioctl_cmd_common_hdr); + + if (cmd >= SXE2_DEVICE_MAX) + return -EINVAL; + + if (ver == SXE2_COM_INVAL_U32) + return -EINVAL; + + ver_arg_sz = g_cmd_arg_sz[cmd].ver_arg_sz; + + if (ver_arg_sz->arg_size == 0) + return ver_arg_sz->arg_size; + + while (ver_arg_sz->ver != SXE2_COM_INVAL_U32) { + if (SXE2_MK_VER_MINOR(ver_arg_sz->ver) <= minor_ver) + break; + + ver_arg_sz++; + } + + return ver_arg_sz->arg_size == SXE2_COM_INVAL_U32 ? -EINVAL : ver_arg_sz->arg_size; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.h b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5657807ff99613e66fdb6c0041f8f41f1db0ff4b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/ioctl/sxe2_com_ver_compat.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ver_compat.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_VER_COMPAT_H__ +#define __SXE2_COM_VER_COMPAT_H__ + +#include "sxe2_ioctl_chnl.h" + +struct sxe2_com_cmd_arg_sz { + struct sxe2_com_ver_arg_sz *ver_arg_sz; +}; + +struct sxe2_com_ver_arg_sz { + u32 ver; + u32 arg_size; +}; + +s32 sxe2_com_get_arg_sz(u32 ver, u32 cmd); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.c b/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.c new file mode 100644 index 0000000000000000000000000000000000000000..c7c959c4d691b02a48bf6c6013a1a478006412bc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.c @@ -0,0 +1,1120 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_log.h" +#include "sxe2_compat.h" + +#if (defined SXE2_CFG_DEBUG && defined __KERNEL__) || (defined SXE2_DRIVER_TRACE) + +int time_for_file_name(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + + return snprintf(buff, buf_len, "%04ld-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +int sxe2_file_write(struct file *file, char *buf, int len) +{ + int ret = 0; + + void *journal; + + journal = current->journal_info; + current->journal_info = NULL; + + if (!file) + return 0; + + do { +#ifdef KERNEL_WRITE_POS_LOFF + ret = kernel_write(file, buf, len, file->f_pos); +#else + ret = kernel_write(file, buf, len, &file->f_pos); +#endif + } while (ret == -EINTR); + + if (ret >= 0) + fsnotify_modify(file); + + current->journal_info = journal; + + return ret; +} +#endif + +#if !defined SXE2_CFG_RELEASE || !defined __KERNEL__ +static s32 sxe2_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + long size_check = (long)size; + s32 len = 0; + + if (size_check <= 0) + return len; + + va_start(args, fmt); + len = vsnprintf(buf, size, fmt, args); + va_end(args); + + return len; +} +#endif + +#if defined SXE2_CFG_DEBUG && defined __KERNEL__ +#define FILE_NAME_SIZE 128 +#define SXE2_KLOG_OUT_WAIT (5 * HZ) +#define SWITCH_FILE +#define LOG_PATH_LEN 100 +#define DRV_LOG_FILE_SIZE_MIN_MB 10 +#define DRV_LOG_FILE_SIZE_MAX_MB 200 + +struct sxe2_debug_t g_sxe2_debug; +char g_log_path_str[LOG_PATH_LEN] = {0}; +char g_log_path_bin[LOG_PATH_LEN] = {0}; + +static char g_log_path[80] = {0}; +module_param_string(g_log_path, g_log_path, 80, 0644); +MODULE_PARM_DESC(g_log_path, + "the path host driver will be saved(<80 chars) Default: /var/log"); + +static u32 g_log_file_size = 200; +module_param(g_log_file_size, uint, 0644); +MODULE_PARM_DESC(g_log_file_size, + "single driver log file size(10MB ~ 200MB), Default: 200, Unit: MB"); + +static u32 g_log_space_size; +module_param(g_log_space_size, uint, 0644); +MODULE_PARM_DESC(g_log_space_size, + "the space allowed host driver log to be store,\t" + "Default: 0(unlimited), Unit: MB"); + +static u32 g_log_tty; +module_param(g_log_tty, uint, 0644); +MODULE_PARM_DESC(g_log_tty, + "allow driver log(ERROR, WARN, INFO) output to tty console,\t" + "Default: 0(not allowed)"); + +u32 g_sxe2_dmesg_level = LOGLEVEL_INFO; +module_param_named(dmesg_level, g_sxe2_dmesg_level, uint, 0644); +MODULE_PARM_DESC(dmesg_level, + "modify sxe2 dmesg log level. Default: INFO(release):INFO(debug)"); + +static inline int time_for_log(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_nsec * 1000); +} + +static inline char *sxe2_stack_top(void) +{ + unsigned long *ptr = (unsigned long *)(task_thread_info(current) + 1); + + return (char *)(ptr + 1); +} + +static inline struct sxe2_thread_local_t *sxe2_thread_local_get(struct sxe2_thread_key_t *key) +{ + return (struct sxe2_thread_local_t *)(sxe2_stack_top() + key->offset); +} + +void sxe2_thread_key_create(int size, struct sxe2_thread_key_t *key) +{ + key->offset = g_sxe2_debug.key_offset; + g_sxe2_debug.key_offset += sizeof(struct sxe2_thread_local_t) + size; +} + +void *sxe2_thread_get_specific(struct sxe2_thread_key_t *key) +{ + struct sxe2_thread_local_t *local = sxe2_thread_local_get(key); + + if (local->magic != DEBUG_TRACE_MAGIC) + return NULL; + + return (void *)local->data; +} + +void sxe2_thread_clear_specific(struct sxe2_thread_key_t *key) +{ + struct sxe2_thread_local_t *local = sxe2_thread_local_get(key); + + local->magic = 0; +} + +int sxe2_filter_file_add(char *name) +{ + struct debug_file_t *file = NULL; + + file = kmalloc(sizeof(*file), GFP_ATOMIC); + if (!file) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strscpy(file->name, name, sizeof(file->name)); + INIT_LIST_HEAD(&file->list); + + list_add_rcu(&file->list, &g_sxe2_debug.filter_file); + return 0; +} + +void sxe2_filter_file_del(char *filename) +{ + struct debug_file_t *file = NULL; + + list_for_each_entry_rcu(file, &g_sxe2_debug.filter_file, list) { + if (!strcmp(file->name, filename)) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + return; + } + } +} + +void sxe2_log_level_modify(u32 level) +{ + sxe2_level_set(level); +} + +STATIC char *sxe2_log_path_query(void) +{ +#ifndef __cplusplus + return g_log_path; +#else + return NULL; +#endif +} + +STATIC u32 sxe2_log_space_size_query(void) +{ + return g_log_space_size; +} + +STATIC u32 sxe2_log_file_size_query(void) +{ + return g_log_file_size; +} + +STATIC void sxe2_log_file_size_modify(u32 size) +{ + g_log_file_size = size; +} + +STATIC u32 sxe2_log_tty_query(void) +{ + return g_log_tty; +} + +#ifndef SXE2_CFG_RELEASE +static inline int sxe2_filter_file_print(const char *filename) +{ + struct debug_file_t *file; + + rcu_read_lock(); + + list_for_each_entry_rcu(file, &g_sxe2_debug.filter_file, list) { + if (!strcmp(file->name, filename)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static inline int sxe2_filter_func_print(const char *name) +{ + struct debug_func_t *func; + + rcu_read_lock(); + list_for_each_entry_rcu(func, &g_sxe2_debug.filter_func, list) { + if (!strcmp(func->name, name)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +#endif +void sxe2_filter_file_clear(void) +{ + struct debug_file_t *file = NULL; + + do { + file = list_first_or_null_rcu(&g_sxe2_debug.filter_file, + struct debug_file_t, + list); + if (file) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + } + } while (file); +} + +int sxe2_filter_func_add(char *name) +{ + struct debug_func_t *func = NULL; + + func = kmalloc(sizeof(*func), GFP_ATOMIC); + if (!func) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strscpy(func->name, name, sizeof(func->name)); + INIT_LIST_HEAD(&func->list); + + list_add_rcu(&func->list, &g_sxe2_debug.filter_func); + return 0; +} + +void sxe2_filter_func_del(char *name) +{ + struct debug_func_t *func = NULL; + + list_for_each_entry_rcu(func, &g_sxe2_debug.filter_func, list) { + if (!strcmp(func->name, name)) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + return; + } + } +} + +void sxe2_filter_func_clear(void) +{ + struct debug_func_t *func = NULL; + + do { + func = list_first_or_null_rcu(&g_sxe2_debug.filter_func, + struct debug_func_t, + list); + if (func) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + } + } while (func); +} + +static void sxe2_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +static int sxe2_file_open(struct sxe2_log_t *log, struct file **pp_file) +{ + struct file *file; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int flags_rewrite = O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC; + int err = 0; + int len = 0; + char filename[FILE_NAME_SIZE]; + +#ifdef SWITCH_FILE + memset(filename, 0, FILE_NAME_SIZE); + len += snprintf(filename, PAGE_SIZE, "%s", log->file_path); + if (log->file_num == 0) { + time_for_file_name(filename + len, FILE_NAME_SIZE - len); + } else { + snprintf(filename + len, FILE_NAME_SIZE - len, "%04d", log->index++); + log->index = log->index % log->file_num; + } + + if (log->file_num == 1 && log->file) { + sxe2_file_close(&log->file); + log->file_pos = 0; + } +#else + memset(filename, 0, FILE_NAME_SIZE); + strscpy(filename, path, FILE_NAME_SIZE); +#endif + if (log->file_num == 0) { + file = filp_open(filename, flags_new, 0666); + } else { + file = filp_open(filename, flags_rewrite, 0666); + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + if (err == -ENOENT) + file = filp_open(filename, flags_new, 0666); + } + } + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + sxe2_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", filename, err); + goto l_out; + } + mapping_set_gfp_mask(file->f_path.dentry->d_inode->i_mapping, GFP_NOFS); + + sxe2_print(KERN_INFO, NULL, "redirect file %s\n", filename); + + *pp_file = file; + +l_out: + return err; +} + +static void sxe2_file_sync(struct file *file) +{ + struct address_space *mapping; + void *journal; + int ret = 0; + int err; + + (void)ret; + (void)err; + + if (!file || !file->f_op || !file->f_op->fsync) + goto l_end; + + journal = current->journal_info; + current->journal_info = NULL; + + mapping = file->f_mapping; + + ret = filemap_fdatawrite(mapping); + + err = file->f_op->fsync(file, 0, file->f_mapping->host->i_size, 1); + + current->journal_info = journal; + +l_end: + return; +} + +static void sxe2_klog_in(struct sxe2_log_t *log, char *buf, unsigned int *length) +{ + int begin = 0; + int end = 0; + int free_size; + unsigned long flags; + unsigned int len = 0; + + spin_lock_irqsave(&log->lock, flags); + len = *length; + if (len == 0) { + spin_unlock_irqrestore(&log->lock, flags); + goto l_out; + } + len = min((unsigned int)PER_CPU_PAGE_SIZE, len); + + if (log->head > log->tail) { + sxe2_print(KERN_WARNING, NULL, "FAILURE: log head exceeds log tail\n"); + SXE2_BUG_NO_SYNC(); + } + + free_size = log->buf_size - (log->tail - log->head); + + if (free_size <= len) { + log->is_drop = 1; + *length = 0; + spin_unlock_irqrestore(&log->lock, flags); + goto l_out; + } + + begin = log->tail % log->buf_size; + end = (log->tail + len) % log->buf_size; + + if (begin < end) { + memcpy(log->buf + begin, buf, len); + } else { + memcpy(log->buf + begin, buf, log->buf_size - begin); + memcpy(log->buf, buf + log->buf_size - begin, end); + } + + log->tail = log->tail + len; + *length = 0; + spin_unlock_irqrestore(&log->lock, flags); + +l_out: + return; +} + +static void sxe2_klog_out(struct sxe2_log_t *log) +{ + int len = 0; + int rc = 0; + long long tail; + int begin; + int end; + int schedule_count_th = 0; + const int max_loop = 4096; + +#ifdef SWITCH_FILE + struct file *file = NULL; +#endif + + if (!log->file) { + rc = sxe2_file_open(log, &log->file); + if (!log->file) + return; + + log->file_pos = 0; + } + + do { + tail = log->tail; + begin = log->head % log->buf_size; + end = tail % log->buf_size; + len = 0; + rc = 0; + + schedule_count_th++; + if (schedule_count_th >= max_loop) { + schedule_count_th = 0; + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + } + + if (log->is_drop) { + rc = sxe2_file_write(log->file, DEBUG_DROP_LOG_STRING, + strlen(DEBUG_DROP_LOG_STRING)); + if (rc < 0) + break; + + log->is_drop = 0; + } + + if (begin < end) { + rc = sxe2_file_write(log->file, log->buf + begin, end - begin); + if (rc > 0) + len += rc; + } else if (begin > end) { + rc = sxe2_file_write(log->file, log->buf + begin, log->buf_size - begin); + if (rc > 0) { + len += rc; + rc = sxe2_file_write(log->file, log->buf, end); + if (rc > 0) + len += rc; + } + } + log->head += len; + log->file_pos += len; + + LOG_BUG_ON(log->head > log->tail, "FAILURE: log head exceeds log tail\n"); + } while (log->head != log->tail && rc > 0); + + if (rc < 0) { + sxe2_print(KERN_ERR, NULL, "write file %s error %d\n", log->file_path, rc); + return; + } + +#ifdef SWITCH_FILE + if (log->file_pos >= log->file_size) { + rc = sxe2_file_open(log, &file); + if (rc >= 0 && log->file && log->file_num != 1) { + sxe2_file_close(&log->file); + log->file = file; + log->file_pos = 0; + } + } +#endif +} + +static int sxe2_klog_flush(void *arg) +{ + int i; + + while (!kthread_should_stop()) { + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug.log); i++) + sxe2_klog_out(&g_sxe2_debug.log[i]); + } + return 0; +} + +static int sxe2_klog_init(struct sxe2_log_t *log, + long long buf_size, + char *file_path, + long long file_size, + u32 file_num) +{ + int rc = 0; + + memset(log, 0, sizeof(*log)); + spin_lock_init(&log->lock); + + log->buf = vmalloc(buf_size + PER_CPU_PAGE_SIZE); + if (!log->buf) { + rc = -ENOMEM; + goto l_end; + } + + log->file = NULL; + log->head = 0; + log->tail = 0; + log->buf_size = buf_size; + + log->file_path = file_path; + log->file_pos = 0; + log->file_size = file_size; + log->file_num = file_num; + log->index = 0; +l_end: + return rc; +} + +static void sxe2_klog_exit(struct sxe2_log_t *log) +{ + if (log->buf) + vfree(log->buf); + + if (log->file) + sxe2_file_close(&log->file); +} + +static inline char *sxe2_file_name_locale(char *file) +{ + char *p_slash = strrchr(file, '/'); + + return (!p_slash) ? file : (p_slash + 1); +} + +void sxe2_level_set(int level) +{ + g_sxe2_debug.level = level; +} + +s32 sxe2_level_get(void) +{ + return (s32)g_sxe2_debug.level; +} + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_debug.status = status; +} + +s32 sxe2_bin_status_get(void) +{ + return (s32)g_sxe2_debug.status; +} + +void sxe2_log_string(enum debug_level_e level, + const char *dev_name, + const char *file, + const char *func, + int line, + const char *fmt, ...) +{ + struct sxe2_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + const char *name = dev_name ? dev_name : ""; + + va_list args; + + if (level > g_sxe2_debug.level) { +#ifndef SXE2_CFG_RELEASE + if (!sxe2_filter_file_print(file) && + !sxe2_filter_func_print(func)) { + return; + } +#else + return; +#endif + } + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + len = ctxt->len; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%s", + sxe2_debug_level_name(level)); + len += time_for_log(buf + len, PER_CPU_PAGE_SIZE - len); + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "[%d][%d][%s]%s:%4d:%s:", + raw_smp_processor_id(), current->pid, + name, + sxe2_file_name_locale((char *)file), line, func); + + if (len < PER_CPU_PAGE_SIZE) { + va_start(args, fmt); + len += vsnprintf(buf + len, + PER_CPU_PAGE_SIZE - len, + fmt, + args); + va_end(args); + } + + if (len >= PER_CPU_PAGE_SIZE) { + g_sxe2_debug.log[DEBUG_TYPE_STRING].is_drop = 1; + len = PER_CPU_PAGE_SIZE; + } + ctxt->len = len; + + if (!in_interrupt()) + local_irq_restore(flags); + + if (sxe2_log_tty_query()) { + if (buf[0] == 'I' || buf[0] == 'W') + pr_warn_ratelimited("%s", buf + LOG_INFO_PREFIX_LEN); + else if (buf[0] == 'E') + pr_warn_ratelimited("%s", buf + LOG_ERROR_PREFIX_LEN); + } + + sxe2_klog_in(&g_sxe2_debug.log[DEBUG_TYPE_STRING], ctxt->buff, &ctxt->len); + + wake_up_process(g_sxe2_debug.task); +} + +void sxe2_log_binary(const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + struct sxe2_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + u32 i = 0; + u32 j = 0; + u32 max; + u32 mod; + + if (sxe2_bin_status_get() != true) + return; + + max = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + len = ctxt->len; + + len += time_for_log(buf + len, PER_CPU_PAGE_SIZE - len); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "[%d] %s %s():%d %s size:%d\n", + current->pid, sxe2_file_name_locale((char *)file), func, + line, str, size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < max; i++) { + j = i * LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + if (len >= PER_CPU_PAGE_SIZE) { + g_sxe2_debug.log[DEBUG_TYPE_BINARY].is_drop = 1; + len = PER_CPU_PAGE_SIZE; + } + + ctxt->len = len; + if (!in_interrupt()) + local_irq_restore(flags); + + sxe2_klog_in(&g_sxe2_debug.log[DEBUG_TYPE_BINARY], ctxt->buff, &ctxt->len); + + wake_up_process(g_sxe2_debug.task); +} + +void sxe2_log_sync(void) +{ + sxe2_file_sync(g_sxe2_debug.log[DEBUG_TYPE_STRING].file); + sxe2_file_sync(g_sxe2_debug.log[DEBUG_TYPE_BINARY].file); +} + +static void sxe2_log_file_prefix_add(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, BINARY_FILE_PREFIX); + } +} + +static void sxe2_log_file_prefix_add_default(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, BINARY_FILE_PREFIX); + } +} + +static void sxe2_log_file_path_set(bool is_vf) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", VF_LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", VF_BINARY_FILE_PATH); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", BINARY_FILE_PATH); + } +} + +static int sxe2_log_path_init(bool is_vf) +{ + int rc = 0; + u32 file_num = 0; + char *log_path_p = NULL; + u32 log_path_len = 0; + u32 input_log_space = sxe2_log_space_size_query(); + u32 input_log_file_size = sxe2_log_file_size_query(); + unsigned int log_file_size = 0; + struct sxe2_log_t *log_bin = &g_sxe2_debug.log[DEBUG_TYPE_BINARY]; + struct sxe2_log_t *log_str = &g_sxe2_debug.log[DEBUG_TYPE_STRING]; + + log_path_p = sxe2_log_path_query(); + log_path_len = strlen(log_path_p); + if (log_path_p && log_path_p[0] == '/') { + if (log_path_p[log_path_len] == '/') + sxe2_log_file_prefix_add(is_vf, log_path_p); + else + sxe2_log_file_prefix_add_default(is_vf, log_path_p); + } else { + sxe2_log_file_path_set(is_vf); + } + if (input_log_file_size < DRV_LOG_FILE_SIZE_MIN_MB || + input_log_file_size > DRV_LOG_FILE_SIZE_MAX_MB) { + sxe2_log_file_size_modify(LOG_FILE_SIZE >> MEGABYTE); + input_log_file_size = LOG_FILE_SIZE >> MEGABYTE; + } + if (input_log_space && input_log_space < input_log_file_size) { + sxe2_log_file_size_modify(input_log_space); + input_log_file_size = input_log_space; + } + log_file_size = input_log_file_size << MEGABYTE; + + if (input_log_space) { + file_num = input_log_space / input_log_file_size; + if (file_num == 0) { + sxe2_print(KERN_ERR, NULL, "filenum shouldnot be 0\n"); + SXE2_BUG(); + } + } else { + file_num = 0; + } + + rc = sxe2_klog_init(log_str, + BUF_SIZE, + g_log_path_str, + log_file_size, + file_num); + if (rc < 0) + goto l_end; + + rc = sxe2_klog_init(log_bin, + BUF_SIZE, + g_log_path_bin, + BINARY_FILE_SIZE, + 0); + if (rc < 0) + goto l_free_string; + + sxe2_print(KERN_INFO, NULL, "sxe2 debug init logpath[%s] strlogsize[%dM] filenum[%d]\n", + g_log_path_str, (log_file_size >> MEGABYTE), log_str->file_num); + rc = 0; + return rc; +l_free_string: + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_STRING]); +l_end: + return rc; +} + +int sxe2_log_init(bool is_vf) +{ + struct task_struct *task = NULL; + struct sxe2_ctxt_t *ctxt = NULL; + int rc = 0; + int i; + int nid; + + INIT_LIST_HEAD(&g_sxe2_debug.filter_file); + INIT_LIST_HEAD(&g_sxe2_debug.filter_func); + +#ifdef SXE2_CFG_RELEASE + g_sxe2_debug.level = LEVEL_INFO; + g_sxe2_debug.status = false; +#else + g_sxe2_debug.level = LEVEL_DEBUG; + g_sxe2_debug.status = true; +#endif + + g_sxe2_debug.ctxt = alloc_percpu(struct sxe2_ctxt_t); + if (!g_sxe2_debug.ctxt) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "alloc percpu failed\n"); + goto l_end; + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + memset(ctxt, 0, sizeof(*ctxt)); + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + nid = cpu_to_node(i); + + ctxt->page = alloc_pages_node(nid, GFP_ATOMIC, PAGE_ORDER); + if (!ctxt->page) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PER_CPU_PAGE_SIZE); + goto l_free_cpu_buff; + } + ctxt->buff = page_address(ctxt->page); + } + rc = sxe2_log_path_init(is_vf); + if (rc < 0) + goto l_free_cpu_buff; + + task = kthread_create(sxe2_klog_flush, NULL, "sxe2_klog_flush"); + if (IS_ERR(task)) { + rc = (int)PTR_ERR(task); + sxe2_print(KERN_ERR, NULL, "Create kernel thread, err: %d\n", rc); + goto l_free_binary; + } + wake_up_process(task); + g_sxe2_debug.task = task; + rc = 0; +l_end: + return rc; + +l_free_binary: + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_BINARY]); + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_STRING]); + +l_free_cpu_buff: + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + free_percpu(g_sxe2_debug.ctxt); + goto l_end; +} + +void sxe2_log_exit(void) +{ + int i = 0; + struct sxe2_ctxt_t *ctxt; + + if (!g_sxe2_debug.task) + return; + + kthread_stop(g_sxe2_debug.task); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug.log); i++) + sxe2_klog_exit(&g_sxe2_debug.log[i]); + + if (g_sxe2_debug.ctxt) { + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + + free_percpu(g_sxe2_debug.ctxt); + g_sxe2_debug.ctxt = NULL; + } +} + +#elif defined SXE2_CFG_RELEASE && defined __KERNEL__ +u32 g_sxe2_dmesg_level = LOGLEVEL_INFO; + +#elif !defined SXE2_CFG_RELEASE + +s32 g_sxe2_log_level = LEVEL_INFO; +s32 g_sxe2_bin_status; +char *test_bin_buf; + +s32 sxe2_log_init(bool is_vf) +{ + return 0; +} + +void sxe2_level_set(s32 level) +{ + g_sxe2_log_level = level; +} + +s32 sxe2_level_get(void) +{ + return g_sxe2_log_level; +} + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_bin_status = status; +} + +s32 sxe2_bin_status_get(void) +{ + return g_sxe2_bin_status; +} + +void sxe2_log_sync(void) +{ +} + +void sxe2_log_exit(void) +{ + if (test_bin_buf) + free(test_bin_buf); +} + +void sxe2_log_binary(const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + u32 i = 0; + u32 j = 0; + u32 iMax; + u32 mod; + char *buf = NULL; + int len = 0; + + if (sxe2_bin_status_get() != true) + return; + + buf = zalloc(PER_CPU_PAGE_SIZE); + test_bin_buf = buf; + + iMax = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "%s size:%d\n", str, size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < iMax; i++) { + j = i * LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + printf("buf:%s", buf); +} + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.h b/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.h new file mode 100644 index 0000000000000000000000000000000000000000..9a237c3bd74bc6873984a7bdc0514bcc8ca5f3b3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/log/sxe2_log.h @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_LOG_H_ +#define _SXE2_LOG_H_ + +#ifdef SXE2_TEST +#define STATIC +#define pr_err printf +#else +#define STATIC static +#endif + +#ifdef __cplusplus +extern "C"{ +#endif + +#define SXE2_HOST(ins) ((ins)->host->host_no) + +#define LOG_INFO_PREFIX_LEN 32 +#define LOG_ERROR_PREFIX_LEN 33 +#define MEGABYTE 20 + +enum debug_level_e { + LEVEL_ERROR, + LEVEL_WARN, + LEVEL_INFO, + LEVEL_DEBUG, +}; + +static inline const char *sxe2_debug_level_name(enum debug_level_e lv) +{ + static const char * const level[] = { + [LEVEL_ERROR] = "ERROR", + [LEVEL_WARN] = "WARN", + [LEVEL_INFO] = "INFO", + [LEVEL_DEBUG] = "DEBUG", + }; + + return level[lv]; +} + +#ifdef __KERNEL__ + +#define PRINT_DEBUG KERN_DEBUG +#define PRINT_INFO KERN_INFO +#define PRINT_WARN KERN_WARNING +#define PRINT_ERR KERN_ERR + +#define sxe2_print(level, bdf, fmt, ...) do { \ + if (strcmp(level, KERN_DEBUG)) { \ + pr_debug("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_INFO)) { \ + pr_info("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_WARNING)) { \ + pr_warn("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_ERR)) { \ + pr_err("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } \ +} while (0) + +#else + +#define PRINT_DEBUG LEVEL_DEBUG +#define PRINT_INFO LEVEL_INFO +#define PRINT_WARN LEVEL_WARN +#define PRINT_ERR LEVEL_ERROR + +#include +#include +#include +#include + +#define __percpu + +static inline U64 get_now_ms(void) +{ + struct timeval tv; + U64 timestamp = 0; + + gettimeofday(&tv, NULL); + timestamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; + return timestamp; +} + +#define filename_printf(x) (strrchr((x), '/') ? strrchr((x), '/') + 1 : (x)) + +#define sxe2_print(level, bdf, fmt, ...) do { \ + if (level <= sxe2_level_get()) { \ + if (level == LEVEL_DEBUG) { \ + (void)printf("DEBUG:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_INFO) { \ + (void)printf("INFO:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_WARN) { \ + (void)printf("WARN:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_ERROR) { \ + (void)printf("ERROR:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } \ + } \ +} while (0) + +#endif + +#define LOG_BUG_ON(cond, fmt, ...) do { \ + if ((cond)) { \ + LOG_ERROR(fmt, ##__VA_ARGS__); \ + LOG_SYNC(); \ + BUG(); \ + } \ +} while (0) + +#define DEBUG_TRACE_MAGIC 0x456789 +#define BUF_SIZE (1024LL << 10) + +#define PAGE_ORDER 2 +#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2)) + +#define LOG_FILE_SIZE (200LL << 20) +#define BINARY_FILE_SIZE (200LL << 20) + +#define VF_LOG_FILE_PATH "/var/log/sxe2vf.log" +#define VF_LOG_FILE_PREFIX "sxe2vf.log" +#define VF_BINARY_FILE_PATH "/var/log/sxe2vf.bin" +#define VF_BINARY_FILE_PREFIX "sxe2vf.bin" + +#define LOG_FILE_PATH "/var/log/sxe2.log" +#define LOG_FILE_PREFIX "sxe2.log" +#define BINARY_FILE_PATH "/var/log/sxe2.bin" +#define BINARY_FILE_PREFIX "sxe2.bin" + +#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n" + +enum { + DEBUG_TYPE_STRING, + DEBUG_TYPE_BINARY, + DEBUG_TYPE_NR, +}; + +struct debug_func_t { + struct list_head list; + char name[64]; +}; + +struct debug_file_t { + struct list_head list; + char name[64]; +}; + +struct sxe2_log_t { + struct { + char *buf; + int buf_size; + long long head; + long long tail; + /* in order to protect the data */ + spinlock_t lock; + unsigned char is_drop; + }; + struct { + char *file_path; + struct file *file; + long long file_pos; + long long file_size; + unsigned int file_num; + unsigned int index; + }; +}; + +struct sxe2_thread_local_t { + s32 magic; + char data[]; +}; + +struct sxe2_ctxt_t { + struct page *page; + void *buff; + unsigned int len; +}; + +struct sxe2_thread_key_t { + s32 offset; +}; + +struct sxe2_debug_t { + enum debug_level_e level; + bool status; + u16 key_offset; + struct sxe2_ctxt_t __percpu *ctxt; + struct list_head filter_func; + struct list_head filter_file; + struct task_struct *task; + struct sxe2_log_t log[DEBUG_TYPE_NR]; +}; + +void sxe2_level_set(int level); +s32 sxe2_level_get(void); + +void sxe2_bin_status_set(bool status); +s32 sxe2_bin_status_get(void); + +int sxe2_log_init(bool is_vf); +void sxe2_log_exit(void); + +void sxe2_log_string(enum debug_level_e level, + const char *dev_name, const char *file, const char *func, + int line, const char *fmt, ...); + +void sxe2_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str); + +#ifndef SXE2_CFG_RELEASE +#define DATA_DUMP(ptr, size, str) \ + sxe2_log_binary(__FILE__, __func__, __LINE__, (u8 *)ptr, 0, size, str) +#else +#define DATA_DUMP(ptr, size, str) +#endif +void sxe2_log_sync(void); + +void sxe2_thread_key_create(int size, struct sxe2_thread_key_t *key); + +void *sxe2_thread_get_specific(struct sxe2_thread_key_t *key); + +void sxe2_thread_clear_specific(struct sxe2_thread_key_t *key); + +int sxe2_filter_file_add(char *name); + +void sxe2_filter_file_del(char *filename); + +void sxe2_log_level_modify(u32 level); + +void sxe2_filter_file_clear(void); + +int sxe2_filter_func_add(char *name); + +void sxe2_filter_func_del(char *name); + +void sxe2_filter_func_clear(void); + +#ifdef SXE2_DRIVER_TRACE +int time_for_file_name(char *buff, int buf_len); +int sxe2_file_write(struct file *file, char *buf, int len); +#endif + +#if defined __KERNEL__ +extern u32 g_sxe2_dmesg_level; + +#if defined SXE2_CFG_DEBUG +int time_for_file_name(char *buff, int buf_len); +int sxe2_file_write(struct file *file, char *buf, int len); + +#define WRITE_LOG(level, bdf, fmt, ...) \ + sxe2_log_string(level, bdf, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__) +#define LOG_SYNC() sxe2_log_sync() +#define LOG_DEBUG_BDF(fmt, ...) WRITE_LOG(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) WRITE_LOG(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) WRITE_LOG(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) WRITE_LOG(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) +#else +#define UNUSED(x) ((void)(x)) +#define WRITE_LOG(level, bdf, fmt, ...) do {\ + UNUSED(level); \ + UNUSED(bdf); \ + UNUSED(fmt); \ +} while (0) +#define LOG_SYNC() +#define LOG_DEBUG_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) + +#define LOG_INFO_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#define LOG_WARN_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#define LOG_ERROR_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#endif + +#define FL_PR_DEBUG(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + pr_debug(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_INFO(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + pr_info(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_WARN(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + pr_warn(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_ERR(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + pr_err(fmt, ##__VA_ARGS__); } \ +} while (0) + +#define FL_DEV_DBG(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + dev_dbg(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_INFO(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + dev_info(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_WARN(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + dev_warn(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_ERR(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + dev_err(dev, fmt, ##__VA_ARGS__); } \ +} while (0) + +#define FL_NETDEV_DBG(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + netdev_dbg(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_INFO(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + netdev_info(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_WARN(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + netdev_warn(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_ERR(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + netdev_err(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) + +#define LOG_DEBUG(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) + +#define LOG_INFO(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__) + +#define LOG_WARN(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) + +#define LOG_ERROR(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) +#define LOG_INFO_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__) + +#define LOG_WARN_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) + +#define LOG_ERROR_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_DEBUG(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_INFO_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_INFO(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_WARN_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_WARN(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_ERROR_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_ERR(fmt, ##__VA_ARGS__);\ +} while (0) + +#define LOG_DEV_DEBUG(format, arg...) do {\ + FL_DEV_DBG(&adapter->pdev->dev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_INFO(format, arg...) do {\ + FL_DEV_INFO(&adapter->pdev->dev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_WARN(format, arg...) do {\ + FL_DEV_WARN(&adapter->pdev->dev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_ERR(format, arg...) do {\ + FL_DEV_ERR(&adapter->pdev->dev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_DEBUG(msglvl, format, arg...) do {\ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_INFO(msglvl, format, arg...) do {\ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_WARN(msglvl, format, arg...) do {\ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_ERR(msglvl, format, arg...) do {\ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ +} while (0) + +#define LOG_PR_DEBUG(format, arg...) FL_PR_DEBUG("sxe2: "format, ## arg) +#define LOG_PR_INFO(format, arg...) FL_PR_INFO("sxe2: "format, ## arg) +#define LOG_PR_WARN(format, arg...) FL_PR_WARN("sxe2: "format, ## arg) +#define LOG_PR_ERR(format, arg...) FL_PR_ERR("sxe2: "format, ## arg) +#define LOG_PRVF_DEBUG(format, arg...) FL_PR_DEBUG("sxe2vf: "format, ## arg) +#define LOG_PRVF_INFO(format, arg...) FL_PR_INFO("sxe2vf: "format, ## arg) +#define LOG_PRVF_WARN(format, arg...) FL_PR_WARN("sxe2vf: "format, ## arg) +#define LOG_PRVF_ERR(format, arg...) FL_PR_ERR("sxe2vf: "format, ## arg) + +#define LOG_NETDEV_DEBUG(format, arg...) do {\ + FL_NETDEV_DBG(netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_INFO(format, arg...) do {\ + FL_NETDEV_INFO(netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_WARN(format, arg...) do {\ + FL_NETDEV_WARN(netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_ERR(format, arg...) do {\ + FL_NETDEV_ERR(netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#else + +#define LOG_DEBUG(fmt, ...) sxe2_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) sxe2_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) sxe2_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) sxe2_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_IRQ(fmt, ...) sxe2_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO_IRQ(fmt, ...) sxe2_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN_IRQ(fmt, ...) sxe2_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR_IRQ(fmt, ...) sxe2_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_DEV_DEBUG(fmt, ...) \ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_INFO(fmt, ...) \ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_WARN(fmt, ...) \ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_ERR(fmt, ...) \ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_MSG_DEBUG(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_INFO(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_WARN(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_ERR(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) + +#define LOG_NETDEV_DEBUG(fmt, ...) do {\ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_INFO(fmt, ...) do {\ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_WARN(fmt, ...) do {\ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_ERR(fmt, ...) do {\ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_DEBUG_D(fmt, ...) UNUSED(fmt) +#define LOG_INFO_D(fmt, ...) UNUSED(fmt) +#define LOG_WARN_D(fmt, ...) UNUSED(fmt) +#define LOG_ERROR_D(fmt, ...) UNUSED(fmt) + +#define LOG_PR_DEBUG(fmt, ...) \ + sxe2_print(PRINT_DEBUG, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_INFO(fmt, ...) \ + sxe2_print(PRINT_INFO, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_WARN(fmt, ...) \ + sxe2_print(PRINT_WARN, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_ERR(fmt, ...) \ + sxe2_print(PRINT_ERR, "sxe2", fmt, ##__VA_ARGS__) +#define LOG_PRVF_DEBUG(fmt, ...) \ + sxe2_print(PRINT_DEBUG, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_INFO(fmt, ...) \ + sxe2_print(PRINT_INFO, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_WARN(fmt, ...) \ + sxe2_print(PRINT_WARN, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_ERR(fmt, ...) \ + sxe2_print(PRINT_ERR, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_SYNC() + +#endif + +#if defined SXE2_CFG_RELEASE +#define SXE2_BUG_ON(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +} while (0) + +#define SXE2_BUG() {pr_err("trigger bug on test.\n"); } +#define SXE2_BUG_ON_NO_SYNC(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +} while (0) + +#define SXE2_BUG_NO_SYNC() +#else +#define SXE2_BUG_ON(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_SYNC(); \ + } \ + BUG_ON(cond); \ +} while (0) + +#define SXE2_BUG(void) do { \ + LOG_SYNC(); \ + BUG(void); \ +} while (0) + +#define SXE2_BUG_ON_NO_SYNC(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ + BUG_ON(cond); \ +} while (0) + +#define SXE2_BUG_NO_SYNC(void) \ + BUG(void) + +#endif + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2_trace.h b/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..f73ce8aa35c2defba716cb00f2747d1a14923f9e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2_trace.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_trace.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if !IS_ENABLED(CONFIG_TRACEPOINTS) || defined(__CHECKER__) || !defined(SXE2_DRIVER_TRACE) +#if !defined(_SXE2_TRACE_H_) +#define _SXE2_TRACE_H_ + +#define sxe2_trace(trace_name, args...) +#define sxe2_trace_enabled(trace_name) ((void)"" #trace_name, 0) +#endif +#else + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sxe2 + +#if !defined(_SXE2_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _SXE2_TRACE_H_ + +#include "sxe2_irq.h" +#include "sxe2_queue.h" + +#include + +#define _SXE2_TRACE_NAME(trace_name) (trace_##sxe2##_##trace_name) +#define SXE2_TRACE_NAME(trace_name) _SXE2_TRACE_NAME(trace_name) + +#define sxe2_trace(trace_name, args...) SXE2_TRACE_NAME(trace_name)(args) + +DECLARE_EVENT_CLASS(sxe2_irq_rxclean, TP_PROTO(struct sxe2_irq_data *irq_data, int total_clean), + TP_ARGS(irq_data, total_clean), + TP_STRUCT__entry(__string(irqname, irq_data->name) __field(int, total_clean)), + TP_fast_assign(__assign_str(irqname, irq_data->name); + __entry->total_clean = total_clean;), + TP_printk("irqname: %s total_clean: %d", __get_str(irqname), + __entry->total_clean)); +#define DEFINE_IRQ_RXCLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2_irq_rxclean, name, \ + TP_PROTO(struct sxe2_irq_data *irq_data, int total_clean), \ + TP_ARGS(irq_data, total_clean)) +DEFINE_IRQ_RXCLEAN_EVENT(sxe2_irq_rxclean_begin); +DEFINE_IRQ_RXCLEAN_EVENT(sxe2_irq_rxclean_end); + +TRACE_EVENT(sxe2_rxq_clean_begin, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi;), + TP_printk("rxq idx in vsi: %u", __entry->idx_in_vsi)); +TRACE_EVENT(sxe2_rxq_clean_end, TP_PROTO(struct sxe2_queue *rxq, s32 clean), TP_ARGS(rxq, clean), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(s32, clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; __entry->clean = clean), + TP_printk("rxq idx in vsi: %u, clean: %d", __entry->idx_in_vsi, __entry->clean)); + +DECLARE_EVENT_CLASS(sxe2_pkt_clean, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(u16, next_to_clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; + __entry->next_to_clean = rxq->next_to_clean;), + TP_printk("idx_in_vsi: %u next_to_clean: %u", __entry->idx_in_vsi, + __entry->next_to_clean)); +#define DEFINE_PKT_CLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2_pkt_clean, name, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq)) +DEFINE_PKT_CLEAN_EVENT(sxe2_rx_pkt_clean_begin); +DEFINE_PKT_CLEAN_EVENT(sxe2_rx_pkt_clean_end); + +DECLARE_EVENT_CLASS(sxe2_xmit_template, TP_PROTO(struct sxe2_queue *queue, struct sk_buff *skb), + TP_ARGS(queue, skb), + TP_STRUCT__entry(__field(void *, queue) __field(void *, skb) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; __entry->skb = skb; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s skb: %pK queue: %pK", __get_str(devname), __entry->skb, + __entry->queue)); + +#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2_xmit_template, name, \ + TP_PROTO(struct sxe2_queue *queue, struct sk_buff *skb), TP_ARGS(queue, skb)) + +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2_queue_xmit); +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2_queue_xmit_drop); + +union sxe2_tx_data_desc; +struct sxe2_tx_buf; +DECLARE_EVENT_CLASS(sxe2_tx_clean_template, + TP_PROTO(struct sxe2_queue *txq, + union sxe2_tx_data_desc *tx_desc, + struct sxe2_tx_buf *tx_buf), + TP_ARGS(txq, tx_desc, tx_buf), + TP_STRUCT__entry(__field(void *, txq) + __field(void *, tx_desc) + __field(void *, tx_buf) + __string(devname, txq->netdev->name)), + TP_fast_assign(__entry->txq = txq; + __entry->tx_desc = tx_desc; + __entry->tx_buf = tx_buf; + __assign_str(devname, txq->netdev->name);), + TP_printk("netdev: %s txq_idx: %u txq: %p tx_desc: %p tx_buf %p", __get_str(devname), + ((struct sxe2_queue *)__entry->txq)->idx_in_vsi, __entry->txq, + __entry->tx_desc, __entry->tx_buf) +); + +#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ +DEFINE_EVENT(sxe2_tx_clean_template, name, \ + TP_PROTO(struct sxe2_queue *txq, \ + union sxe2_tx_data_desc *tx_desc, \ + struct sxe2_tx_buf *tx_buf), \ + TP_ARGS(txq, tx_desc, tx_buf)) + +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq_unmap); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq_unmap_eop); + +DECLARE_EVENT_CLASS(sxe2_txts_template, TP_PROTO(struct sk_buff *skb, int idx), TP_ARGS(skb, idx), + TP_STRUCT__entry(__field(void *, skb) __field(int, idx)), + TP_fast_assign(__entry->skb = skb; __entry->idx = idx;), + TP_printk("skb %pK idx %d", __entry->skb, __entry->idx)); + +#define DEFINE_TX_TSTAMP_OP_EVENT(name) \ + DEFINE_EVENT(sxe2_txts_template, name, TP_PROTO(struct sk_buff *skb, int idx), \ + TP_ARGS(skb, idx)) + +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_request); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_hw_request); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_hw_done); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_complete); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH CUR_DIR "/base/trace" +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sxe2_trace +#include +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2vf_trace.h b/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2vf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..f1a589bc4d1f0c6550b5fdc79da4e7499ae6eef3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/base/trace/sxe2vf_trace.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_trace.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if !IS_ENABLED(CONFIG_TRACEPOINTS) || defined(__CHECKER__) || !defined(SXE2_DRIVER_TRACE) +#if !defined(_SXE2VF_TRACE_H_) +#define _SXE2VF_TRACE_H_ + +#define sxe2vf_trace(trace_name, args...) +#define sxe2vf_trace_enabled(trace_name) ((void)"" #trace_name, 0) +#endif +#else + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sxe2vf + +#if !defined(_SXE2VF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _SXE2VF_TRACE_H_ + +#include "sxe2vf_irq.h" +#include "sxe2vf_queue.h" +#include + +#define _SXE2VF_TRACE_NAME(trace_name) (trace_##sxe2vf##_##trace_name) +#define SXE2VF_TRACE_NAME(trace_name) _SXE2VF_TRACE_NAME(trace_name) + +#define sxe2vf_trace(trace_name, args...) SXE2VF_TRACE_NAME(trace_name)(args) + +DECLARE_EVENT_CLASS(sxe2vf_irq_rxclean, + TP_PROTO(struct sxe2vf_irq_data *irq_data, int total_clean), + TP_ARGS(irq_data, total_clean), + TP_STRUCT__entry(__string(irqname, irq_data->name) __field(int, total_clean)), + TP_fast_assign(__assign_str(irqname, irq_data->name); + __entry->total_clean = total_clean;), + TP_printk("irqname: %s total_clean: %d", __get_str(irqname), + __entry->total_clean)); +#define DEFINE_IRQ_RXCLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2vf_irq_rxclean, name, \ + TP_PROTO(struct sxe2vf_irq_data *irq_data, int total_clean), \ + TP_ARGS(irq_data, total_clean)) +DEFINE_IRQ_RXCLEAN_EVENT(sxe2vf_irq_rxclean_begin); +DEFINE_IRQ_RXCLEAN_EVENT(sxe2vf_irq_rxclean_end); + +TRACE_EVENT(sxe2vf_rxq_clean_begin, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi;), + TP_printk("rxq idx in vsi: %u", __entry->idx_in_vsi)); +TRACE_EVENT(sxe2vf_rxq_clean_end, TP_PROTO(struct sxe2vf_queue *rxq, s32 clean), + TP_ARGS(rxq, clean), TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(s32, clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; __entry->clean = clean), + TP_printk("rxq idx in vsi: %u, clean: %d", __entry->idx_in_vsi, __entry->clean)); + +DECLARE_EVENT_CLASS(sxe2vf_pkt_clean, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(u16, next_to_clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; + __entry->next_to_clean = rxq->next_to_clean;), + TP_printk("idx_in_vsi: %u next_to_clean: %u", __entry->idx_in_vsi, + __entry->next_to_clean)); +#define DEFINE_PKT_CLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2vf_pkt_clean, name, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq)) +DEFINE_PKT_CLEAN_EVENT(sxe2vf_rx_pkt_clean_begin); +DEFINE_PKT_CLEAN_EVENT(sxe2vf_rx_pkt_clean_end); + +union sxe2vf_tx_data_desc; +struct sxe2vf_tx_buf; +DECLARE_EVENT_CLASS(sxe2vf_tx_template, + TP_PROTO(struct sxe2vf_queue *queue, union sxe2vf_tx_data_desc *desc, + struct sxe2vf_tx_buf *buf), + TP_ARGS(queue, desc, buf), + TP_STRUCT__entry(__field(void *, queue) __field(void *, desc) + __field(void *, buf) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; + __entry->desc = desc; __entry->buf = buf; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s queue: %pK desc: %pK buf %pK", __get_str(devname), + __entry->queue, __entry->desc, __entry->buf)); + +#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2vf_tx_template, name, \ + TP_PROTO(struct sxe2vf_queue *queue, union sxe2vf_tx_data_desc *desc, \ + struct sxe2vf_tx_buf *buf), \ + TP_ARGS(queue, desc, buf)) + +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean_unmap); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean_unmap_eop); + +DECLARE_EVENT_CLASS(sxe2vf_xmit_template, + TP_PROTO(struct sxe2vf_queue *queue, struct sk_buff *skb), + TP_ARGS(queue, skb), + TP_STRUCT__entry(__field(void *, queue) __field(void *, skb) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; __entry->skb = skb; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s skb: %pK queue: %pK", __get_str(devname), __entry->skb, + __entry->queue)); + +#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2vf_xmit_template, name, \ + TP_PROTO(struct sxe2vf_queue *queue, struct sk_buff *skb), TP_ARGS(queue, skb)) + +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2vf_queue_xmit); +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2vf_queue_xmit_drop); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH CUR_DIR "/base/trace" +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sxe2vf_trace +#include +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/check_aux_support b/drivers/net/ethernet/linkdata/sxe2/check_aux_support new file mode 100755 index 0000000000000000000000000000000000000000..d08339f69c1026bc43dcecaa2641d1e9a932a927 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/check_aux_support @@ -0,0 +1,120 @@ +#!/bin/bash + +msg() +{ + echo -e $1 +} + +exit_builtin_auxiliary_enabled() { exit 0; } +exit_kconfig_invalid() { exit 1; } +exit_need_oot_auxiliary() { exit 2; } +exit_not_found_failure() { exit 3; } + +find_aux_bus_inc() +{ + aux_bus_inc=$(find -L ${ksrc} -name "auxiliary_bus.h") + msg "auxiliary_bus.h location: ${aux_bus_inc}" +} + +LINUX_INCLUDE_DIR="include/linux" + +find_config_file() +{ + file_locations=(${ksrc}/include/generated/autoconf.h \ + ${ksrc}/include/linux/autoconf.h \ + /boot/bmlinux.autoconf.h) + + for file in "${file_locations[@]}"; do + if [ -f ${file} ]; then + kconfig=${file} + break + fi + done + + if [ -z ${kconfig} ]; then + msg "Kernel config file not found at any of the expected locations." + fi +} + +get_config_auxiliary_bus() +{ + # CONFIG_AUXILIARY_BUS=0 corresponds to CONFIG_AUXILIARY_BUS=n + # CONFIG_AUXILIARY_BUS=1 corresponds to CONFIG_AUXILIARY_BUS=y + # CONFIG_AUXILIARY_BUS= corresponds to CONFIG_AUXILIARY_BUS not available in the kernel + CONFIG_AUXILIARY_BUS=$(grep CONFIG_AUXILIARY_BUS ${kconfig} | awk -F" " '{print $3}') + msg "CONFIG_AUXILIARY_BUS=${CONFIG_AUXILIARY_BUS}" +} + +ksrc="" +verbose=0 + +options=$(getopt -o "k:vh" --long ksrc:,verbose,help -- "$@") +eval set -- "$options" +while :; do + case $1 in + -k|--ksrc) ksrc=$2; shift;; + -v|--verbose) verbose=1 ;; + -h|--help) usage && exit 0;; + --) shift; break;; + esac + shift +done + +if [ $verbose == 1 ]; then + set -x +fi + +set -x +find_config_file + +if [ ! -z $kconfig ]; then + # if we found the kernel .config file then exit the script based on various + # conditions that depend on the CONFIG_AUXILIARY_BUS string being found + get_config_auxiliary_bus + + if [ -z "$CONFIG_AUXILIARY_BUS" ]; then + msg "CONFIG_AUXILIARY_BUS not found in ${kconfig}." + # CONFIG_AUXILIARY_BUS string was not found, so OOT auxiliary is needed + exit_need_oot_auxiliary + elif [ "$CONFIG_AUXILIARY_BUS" = "1" ]; then + msg "CONFIG_AUXILIARY_BUS=y in ${kconfig}." + # CONFIG_AUXILIARY_BUS=y, so OOT auxiliary is not needed + exit_builtin_auxiliary_enabled + else + msg "" + msg "kernel $build_kernel supports auxiliary bus, but CONFIG_AUXILIARY_BUS" + msg "is not set in ${kconfig}. Rebuild your kernel with" + msg "CONFIG_AUXILIARY_BUS=y" + msg "" + # CONFIG_AUXILIARY_BUS is not "=y", but the string was found, so report + # the failure so it can be used to fail build/install + exit_kconfig_invalid + fi +else + if [ ! -d ${ksrc}/${LINUX_INCLUDE_DIR} ] && [ ! -d ${ksrc}/source/${LINUX_INCLUDE_DIR} ]; then + echo "${ksrc}/${LINUX_INCLUDE_DIR} and ${ksrc}/source/${LINUX_INCLUDE_DIR} do not exist" + exit_not_found_failure + fi + + # We didn't find a kernel .config file, so check to see if auxiliary_bus.h + # is found in the kernel source include directory + find_aux_bus_inc + + if [ -f "$aux_bus_inc" ]; then + # AUXILIARY_MODULE_PREFIX is defined only in out-of-tree auxiliary bus + if [ $(grep -c AUXILIARY_MODULE_PREFIX $aux_bus_inc) -eq 0 ]; then + msg "in-tree auxiliary_bus.h found at ${ksrc}/${LINUX_INCLUDE_DIR}" + # If auxiliary_bus.h is included at ${ksrc} and it isn't our OOT version, then + # don't build OOT auxiliary as part of the driver makefile + exit_builtin_auxiliary_enabled + else + msg "OOT auxiliary_bus.h found at ${ksrc}/${LINUX_INCLUDE_DIR}" + # If auxiliary bus is included at ${ksrc} and it is our OOT version, then + # build OOT auxiliary as part of the driver makefile + exit_need_oot_auxiliary + fi + else + msg "auxiliary_bus.h not found at ${ksrc}/${LINUX_INCLUDE_DIR}" + exit_need_oot_auxiliary + fi +fi diff --git a/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_drv_cmd.h b/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_drv_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..cf5c2fa964bef4ff7456ddd4622613c676dadbb5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_drv_cmd.h @@ -0,0 +1,893 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_cmd.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DRV_CMD_H__ +#define __SXE2_DRV_CMD_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_type.h" +#include "sxe2_cmd.h" +#include "sxe2_flow_public.h" + +#define SXE2_DPDK_RESOURCE_INSUFFICIENT +#endif + +#ifdef SXE2_LINUX_DRIVER +#ifdef __KERNEL__ +#include +#include +#endif +#endif + +#ifdef __KERNEL__ +#define SXE2_ATTRIBUTE __aligned(4) +#else +#define SXE2_ATTRIBUTE +#pragma pack(4) +#endif + +#define SXE2_DRV_CMD_MODULE_S (16) +#define SXE2_MK_DRV_CMD(module, cmd) ((module) << SXE2_DRV_CMD_MODULE_S | (cmd)) + +#define SXE2_DEV_CAPS_OFFLOAD_L2 BIT(0) +#define SXE2_DEV_CAPS_OFFLOAD_VLAN BIT(1) +#define SXE2_DEV_CAPS_OFFLOAD_RSS BIT(2) +#define SXE2_DEV_CAPS_OFFLOAD_IPSEC BIT(3) +#define SXE2_DEV_CAPS_OFFLOAD_FNAV BIT(4) +#define SXE2_DEV_CAPS_OFFLOAD_TM BIT(5) +#define SXE2_DEV_CAPS_OFFLOAD_PTP BIT(6) +#define SXE2_DEV_CAPS_OFFLOAD_Q_MAP BIT(7) +#define SXE2_DEV_CAPS_OFFLOAD_FC_STATE BIT(8) + +#define SXE2_TXQ_STATS_MAP_MAX_NUM 16 +#define SXE2_RXQ_STATS_MAP_MAX_NUM 4 +#define SXE2_RXQ_MAP_Q_MAX_NUM 256 + +#define SXE2_STAT_MAP_INVALID_QID 0xFFFF + +#define SXE2_SCHED_MODE_DEFAULT 0 +#define SXE2_SCHED_MODE_TM 1 +#define SXE2_SCHED_MODE_HIGH_PERFORMANCE 2 +#define SXE2_SCHED_MODE_INVALID 3 + +#define SXE2_SRCVSI_PRUNE_MAX_NUM 2 + +#define SXE2_PTYPE_UNKNOWN BIT(0) +#define SXE2_PTYPE_L2_ETHER BIT(1) +#define SXE2_PTYPE_L3_IPV4 BIT(2) +#define SXE2_PTYPE_L3_IPV6 BIT(4) +#define SXE2_PTYPE_L4_TCP BIT(6) +#define SXE2_PTYPE_L4_UDP BIT(7) +#define SXE2_PTYPE_L4_SCTP BIT(8) +#define SXE2_PTYPE_INNER_L2_ETHER BIT(9) +#define SXE2_PTYPE_INNER_L3_IPV4 BIT(10) +#define SXE2_PTYPE_INNER_L3_IPV6 BIT(12) +#define SXE2_PTYPE_INNER_L4_TCP BIT(14) +#define SXE2_PTYPE_INNER_L4_UDP BIT(15) +#define SXE2_PTYPE_INNER_L4_SCTP BIT(16) +#define SXE2_PTYPE_TUNNEL_GRENAT BIT(17) + +#define SXE2_PTYPE_L2_MASK (SXE2_PTYPE_L2_ETHER) +#define SXE2_PTYPE_L3_MASK (SXE2_PTYPE_L3_IPV4 | SXE2_PTYPE_L3_IPV6) +#define SXE2_PTYPE_L4_MASK (SXE2_PTYPE_L4_TCP | SXE2_PTYPE_L4_UDP | \ + SXE2_PTYPE_L4_SCTP) +#define SXE2_PTYPE_INNER_L2_MASK (SXE2_PTYPE_INNER_L2_ETHER) +#define SXE2_PTYPE_INNER_L3_MASK (SXE2_PTYPE_INNER_L3_IPV4 | \ + SXE2_PTYPE_INNER_L3_IPV6) +#define SXE2_PTYPE_INNER_L4_MASK (SXE2_PTYPE_INNER_L4_TCP | \ + SXE2_PTYPE_INNER_L4_UDP | \ + SXE2_PTYPE_INNER_L4_SCTP) +#define SXE2_PTYPE_TUNNEL_MASK (SXE2_PTYPE_TUNNEL_GRENAT) + +enum sxe2_dev_type { + SXE2_DEV_T_PF = 0, + SXE2_DEV_T_VF, + SXE2_DEV_T_PF_BOND, + SXE2_DEV_T_MAX, +}; + +struct sxe2_drv_queue_caps { + __le16 queues_cnt; + __le16 base_idx_in_pf; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_msix_caps { + __le16 msix_vectors_cnt; + __le16 base_idx_in_func; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rss_hash_caps { + __le16 hash_key_size; + __le16 lut_key_size; +} SXE2_ATTRIBUTE; + +enum sxe2_vf_vsi_valid { + SXE2_VF_VSI_BOTH = 0, + SXE2_VF_VSI_ONLY_DPDK, + SXE2_VF_VSI_ONLY_KERNEL, + SXE2_VF_VSI_MAX, +}; + +struct sxe2_drv_vsi_caps { + __le16 func_id; + __le16 dpdk_vsi_id; + __le16 kernel_vsi_id; + __le16 vsi_type; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_representor_caps { + __le16 cnt_repr_vf; + u8 rsv[2]; + struct sxe2_drv_vsi_caps repr_vf_id[256]; +} SXE2_ATTRIBUTE; + +enum sxe2_phys_port_name_type { + SXE2_PHYS_PORT_NAME_TYPE_NOTSET = 0, + SXE2_PHYS_PORT_NAME_TYPE_LEGACY, + SXE2_PHYS_PORT_NAME_TYPE_UPLINK, + SXE2_PHYS_PORT_NAME_TYPE_PFVF, + SXE2_PHYS_PORT_NAME_TYPE_UNKNOWN, +}; + +struct sxe2_switchdev_info { + u8 is_switchdev; + u8 master; + u8 representor; + u8 port_name_type; + __le32 ctrl_num; + __le32 pf_num; + __le32 vf_num; + __le32 mpesw_owner; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_uplink_info { + u8 pf_id; + u8 is_set; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_repr_info { + u8 pf_id; + u8 is_set; + u8 rsv[2]; + __le16 cp_vsi_id; + __le16 repr_pf_id; + __le16 repr_vf_id; + __le16 repr_q_id; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_mode_info { + u8 pf_id; + u8 is_switchdev; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_cpvsi_info { + __le16 cp_vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_txsch_caps { + u8 layer_cap; + u8 tm_mid_node_num; + u8 prio_num; + u8 rev; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_caps_resp { + struct sxe2_drv_queue_caps queue_caps; + struct sxe2_drv_msix_caps msix_caps; + struct sxe2_drv_rss_hash_caps rss_hash_caps; + struct sxe2_drv_vsi_caps vsi_caps; + struct sxe2_txsch_caps txsch_caps; + struct sxe2_drv_representor_caps repr_caps; + u8 port_idx; + u8 pf_idx; + u8 dev_type; + u8 rev; + __le32 cap_flags; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_info_resp { + __le64 dsn; + __le16 vsi_id; + u8 rsv[2]; + u8 mac_addr[ETH_ALEN]; + u8 rsv2[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_fw_info_resp { + u8 main_version_id; + u8 sub_version_id; + u8 fix_version_id; + u8 build_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rxq_ctxt { + __le64 dma_addr; + __le32 max_lro_size; + __le32 split_type_mask; + __le16 hdr_len; + __le16 buf_len; + __le16 depth; + __le16 queue_id; + u8 lro_en; + u8 keep_crc_en; + u8 split_en; + u8 desc_size; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rxq_cfg_req { + __le16 q_cnt; + __le16 vsi_id; + __le16 max_frame_size; + u8 rsv[2]; + struct sxe2_drv_rxq_ctxt cfg[]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_txq_ctxt { + __le64 dma_addr; + __le32 sched_mode; + __le16 queue_id; + __le16 depth; + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_txq_cfg_req { + __le16 q_cnt; + __le16 vsi_id; + struct sxe2_drv_txq_ctxt cfg[]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_q_switch_req { + __le16 q_idx; + __le16 vsi_id; + u8 is_enable; + u8 sched_mode; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_queue_irq_bind_req { + __le16 q_idx; + __le16 msix_idx; + u8 itr_idx; + u8 bind; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_create_req_resp { + __le16 vsi_id; + __le16 vsi_type; + struct sxe2_drv_queue_caps used_queues; + struct sxe2_drv_msix_caps used_msix; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_free_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_info_get_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_info_get_resp { + __le16 vsi_id; + __le16 vsi_type; + struct sxe2_drv_queue_caps used_queues; + struct sxe2_drv_msix_caps used_msix; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_udp_tunnel_req { + u8 type; + u8 rsv; + __le16 port; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_udp_tunnel_resp { + u8 type; + u8 enable; + u8 dst; + u8 src; + u16 port; + u8 fw_used; + u8 rsv; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rx_map_req { + __le16 queue_id; + u8 pool_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_tx_map_req { + __le16 queue_id; + u8 pool_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vlan_cfg_query_resp { + __le16 vsi_id; + u8 port_vlan_exist; + u8 is_switchdev; + __le16 tpid; + __le16 vid; + u8 outer_insert; + u8 outer_strip; + u8 inner_insert; + u8 inner_strip; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vlan_offload_cfg_req { + __le16 vsi_id; + __le16 tpid; + u8 outer_insert; + u8 outer_strip; + u8 inner_insert; + u8 inner_strip; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_port_vlan_cfg_req { + __le16 vsi_id; + __le16 tpid; + __le16 vid; + u8 prio; + u8 rsv; +} SXE2_ATTRIBUTE; + +enum sxe2_mac_filter_type { + SXE2_MAC_FILTER_TYPE_UC = 0, + SXE2_MAC_FILTER_TYPE_MC, + SXE2_MAC_FILTER_TYPE_MAX, +}; + +struct sxe2_mac_filter_cfg_req { + __le16 vsi_id; + u8 addr[ETH_ALEN]; + u8 type; + u8 is_add; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +enum sxe2_promisc_filter_type { + SXE2_PROMISC_FILTER_TYPE_PROMISC = 0, + SXE2_PROMISC_FILTER_TYPE_ALLMULTI, + SXE2_PROMISC_FILTER_TYPE_MAX, +}; + +struct sxe2_promisc_filter_cfg_req { + __le16 vsi_id; + u8 type; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_srcvsi_ext_cfg_req { + __le16 vsi_id; + __le16 srcvsi_list[SXE2_SRCVSI_PRUNE_MAX_NUM]; + u8 srcvsi_cnt; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_vlan_filter_cfg_req { + __le16 vsi_id; + __le16 vlan_id; + __le16 tpid_id; + u8 prio; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_vlan_filter_switch_req { + __le16 vsi_id; + u8 is_oper_enable; + u8 rsv; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_key_req { + __le16 vsi_id; + __le16 key_size; + u8 key[]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_lut_req { + __le16 vsi_id; + __le16 lut_size; + u8 lut[]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_func_req { + __le16 vsi_id; + u8 func; + u8 rsv[1]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_hf_req { + __le16 vsi_id; + u8 rsv[2]; + __le32 headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + __le32 hash_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + __le32 hdr_type; + u8 symm; + u8 rsv1[3]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_capa_resq { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; + __le16 ip_id_cnt; + __le16 udp_group_cnt; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_add_req { + __le32 mode; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_add_resp { + __le16 index; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_add_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 is_over_sdn; + u8 sdn_group_id; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_add_resp { + u8 ip_id; + u8 udp_group_id; + __le16 sa_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_del_req { + __le16 sa_idx; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_del_req { + u8 ip_id; + u8 group_id; + __le16 sa_idx; + __le32 spi; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_link_info_resp { + __le32 speed; + u8 status; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_filter_req { + __le32 flow_id; + struct sxe2_flow_meta meta; + enum sxe2_flow_engine_type engine_type; + struct sxe2_flow_pattern pattern_outer; + struct sxe2_flow_pattern pattern_inner; + struct sxe2_flow_action action; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_filter_resp { + enum sxe2_flow_engine_type engine_type; + __le32 flow_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_get_stat_id_req { + u8 need_update; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_get_stat_id_resp { + __le32 stat_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_free_stat_id_req { + __le32 stat_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_query_stat_req { + __le32 stat_id; + __le32 stat_ctrl; + __le32 is_clear; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_query_stat_resp { + __le32 stat_index; + __le64 stat_hits; + __le64 stat_bytes; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_sw_stats { + __le64 rx_packets; + __le64 rx_bytes; + __le64 tx_packets; + __le64 tx_bytes; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_stats_req { + __le16 vsi_id; + u8 rsv[2]; + struct sxe2_drv_vsi_sw_stats sw_stats; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_stats_resp { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_mac_stats_resp { + __le64 rx_out_of_buffer; + __le64 rx_qblock_drop; + __le64 tx_frame_good; + __le64 rx_frame_good; + __le64 rx_crc_errors; + __le64 tx_bytes_good; + __le64 rx_bytes_good; + __le64 tx_multicast_good; + __le64 tx_broadcast_good; + __le64 rx_multicast_good; + __le64 rx_broadcast_good; + __le64 rx_len_errors; + __le64 rx_out_of_range_errors; + __le64 rx_oversize_pkts_phy; + __le64 rx_symbol_err; + __le64 rx_pause_frame; + __le64 tx_pause_frame; + __le64 rx_discards_phy; + __le64 rx_discards_ips_phy; + __le64 tx_dropped_link_down; + __le64 rx_undersize_good; + __le64 rx_runt_error; + __le64 tx_bytes_good_bad; + __le64 tx_frame_good_bad; + __le64 rx_jabbers; + __le64 rx_size_64; + __le64 rx_size_65_127; + __le64 rx_size_128_255; + __le64 rx_size_256_511; + __le64 rx_size_512_1023; + __le64 rx_size_1024_1522; + __le64 rx_size_1523_max; + __le64 rx_pcs_symbol_err_phy; + __le64 rx_corrected_bits_phy; + __le64 rx_err_lane_0_phy; + __le64 rx_err_lane_1_phy; + __le64 rx_err_lane_2_phy; + __le64 rx_err_lane_3_phy; + __le64 rx_prio_buf_discard[SXE2_MAX_USER_PRIORITY]; + __le64 rx_illegal_bytes; + __le64 rx_oversize_good; + __le64 tx_unicast; + __le64 tx_broadcast; + __le64 tx_multicast; + __le64 tx_vlan_packet_good; + __le64 tx_size_64; + __le64 tx_size_65_127; + __le64 tx_size_128_255; + __le64 tx_size_256_511; + __le64 tx_size_512_1023; + __le64 tx_size_1024_1522; + __le64 tx_size_1523_max; + __le64 tx_underflow_error; + __le64 rx_byte_good_bad; + __le64 rx_frame_good_bad; + __le64 rx_unicast_good; + __le64 rx_vlan_packets; + __le64 prio_xoff_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xoff_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_2_xoff[SXE2_MAX_USER_PRIORITY]; +} SXE2_ATTRIBUTE; + +enum sxe2_fc_type { + SXE2_FC_T_DIS = 0, + SXE2_FC_T_LFC, + SXE2_FC_T_PFC, + + SXE2_FC_T_UNKNOW = 255, +}; + +struct sxe2_drv_vsi_fc_get_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_fc_get_resp { + u8 fc_enable; + u8 rsv[3]; +} SXE2_ATTRIBUTE; + +struct sxe2_tm_res { + __le16 teid; +}; + +struct sxe2_tm_info { + __le32 committed; + __le32 peak; + u8 priority; + u8 reserve; + __le16 weight; +}; + +struct sxe2_tm_add_mid_msg { + __le16 parent_teid; + u8 adj_lvl; + struct sxe2_tm_info info; +}; + +struct sxe2_tm_add_queue_msg { + __le16 parent_teid; + __le16 queue_id; + u8 adj_lvl; + struct sxe2_tm_info info; +}; + +struct sxe2_stats_txq_map_pool { + __le16 queue_id_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + u8 curr_map_idx; +}; + +struct sxe2_stats_hw_txq_map_pool { + __le16 txq_id; +}; + +struct sxe2_stats_hw_txq_map { + struct sxe2_stats_hw_txq_map_pool hw_txq_map_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + u8 curr_map_idx; +}; + +struct sxe2_stats_rxq_map_pool { + u8 pool_id; + __le16 queue_id_pool[SXE2_RXQ_MAP_Q_MAX_NUM]; + __le16 curr_map_idx; +}; + +struct sxe2_stats_txq_map { + struct sxe2_stats_txq_map_pool txq_map_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + struct sxe2_stats_hw_txq_map hw_txq_map; +}; + +struct sxe2_stats_rxq_map { + struct sxe2_stats_rxq_map_pool rxq_map_pool[SXE2_RXQ_STATS_MAP_MAX_NUM]; +}; + +struct sxe2_txq_map_info { + __le32 txq_lan_pkt_cnt; + __le32 txq_lan_byte_cnt; +}; + +struct sxe2_rxq_map_info { + __le64 rxq_lan_in_pkt_cnt; + __le64 rxq_lan_in_byte_cnt; + + __le64 rxq_fd_in_pkt_cnt; + + __le64 rxq_mng_in_pkt_cnt; + __le64 rxq_mng_in_byte_cnt; + __le64 rxq_mng_out_pkt_cnt; +}; + +struct sxe2_queue_map_info { + struct sxe2_rxq_map_info + rxq_stats_map_info[SXE2_RXQ_STATS_MAP_MAX_NUM]; + struct sxe2_txq_map_info + txq_stats_map_info[SXE2_TXQ_STATS_MAP_MAX_NUM]; +}; + +struct sxe2_stats_map { + struct sxe2_stats_txq_map txq_map; + struct sxe2_stats_rxq_map rxq_map; + + struct sxe2_queue_map_info q_info; +}; + +struct sxe2_drv_sfp_req { + u8 is_wr; + u8 is_qsfp; + __le16 bus_addr; + __le16 page_cnt; + __le16 offset; + __le16 data_len; + __le16 rvd; + u8 data[]; +}; + +struct sxe2_drv_sfp_resp { + u8 is_wr; + u8 is_qsfp; + __le16 data_len; + u8 data[]; +}; + +enum sxe2_drv_cmd_module { + SXE2_DRV_CMD_MODULE_HANDSHAKE = 0, + SXE2_DRV_CMD_MODULE_DEV = 1, + SXE2_DRV_CMD_MODULE_VSI = 2, + SXE2_DRV_CMD_MODULE_QUEUE = 3, + SXE2_DRV_CMD_MODULE_STATS = 4, + SXE2_DRV_CMD_MODULE_SUBSCRIBE = 5, + SXE2_DRV_CMD_MODULE_RSS = 6, + SXE2_DRV_CMD_MODULE_FLOW = 7, + SXE2_DRV_CMD_MODULE_TM = 8, + SXE2_DRV_CMD_MODULE_IPSEC = 9, + SXE2_DRV_CMD_MODULE_PTP = 10, + + SXE2_DRV_CMD_MODULE_VLAN = 11, + SXE2_DRV_CMD_MODULE_RDMA = 12, + SXE2_DRV_CMD_MODULE_LINK = 13, + SXE2_DRV_CMD_MODULE_MACADDR = 14, + SXE2_DRV_CMD_MODULE_PROMISC = 15, + + SXE2_DRV_CMD_MODULE_LED = 16, + SXE2_DEV_CMD_MODULE_OPT = 17, + SXE2_DEV_CMD_MODULE_SWITCH = 18, + SXE2_DRV_CMD_MODULE_ACL = 19, + SXE2_DRV_CMD_MODULE_UDPTUNEEL = 20, + SXE2_DRV_CMD_MODULE_QUEUE_MAP = 21, + + SXE2_DRV_CMD_MODULE_SCHED = 22, + + SXE2_DRV_CMD_MODULE_IRQ = 23, + + SXE2_DRV_CMD_MODULE_OPT = 24, +}; + +enum sxe2_drv_cmd_code { + SXE2_DRV_CMD_HANDSHAKE_ENABLE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_HANDSHAKE, 1), + SXE2_DRV_CMD_HANDSHAKE_DISABLE, + + SXE2_DRV_CMD_DEV_GET_CAPS = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_DEV, 1), + SXE2_DRV_CMD_DEV_GET_INFO, + SXE2_DRV_CMD_DEV_GET_FW_INFO, + SXE2_DRV_CMD_DEV_RESET, + SXE2_DRV_CMD_DEV_GET_SWITCHDEV_INFO, + + SXE2_DRV_CMD_VSI_CREATE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_VSI, 1), + SXE2_DRV_CMD_VSI_FREE, + SXE2_DRV_CMD_VSI_INFO_GET, + SXE2_DRV_CMD_VSI_SRCVSI_PRUNE, + SXE2_DRV_CMD_VSI_FC_GET, + + SXE2_DRV_CMD_RX_MAP_SET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_QUEUE_MAP, 1), + SXE2_DRV_CMD_TX_MAP_SET, + SXE2_DRV_CMD_TX_RX_MAP_GET, + SXE2_DRV_CMD_TX_RX_MAP_RESET, + SXE2_DRV_CMD_TX_RX_MAP_INFO_CLEAR, + + SXE2_DRV_CMD_SCHED_ROOT_TREE_ALLOC = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_SCHED, 1), + SXE2_DRV_CMD_SCHED_ROOT_TREE_RELEASE, + SXE2_DRV_CMD_SCHED_ROOT_CHILDREN_DELETE, + SXE2_DRV_CMD_SCHED_TM_ADD_MID_NODE, + SXE2_DRV_CMD_SCHED_TM_ADD_QUEUE_NODE, + + SXE2_DRV_CMD_RXQ_CFG_ENABLE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_QUEUE, 1), + SXE2_DRV_CMD_TXQ_CFG_ENABLE, + SXE2_DRV_CMD_RXQ_DISABLE, + SXE2_DRV_CMD_TXQ_DISABLE, + + SXE2_DRV_CMD_VSI_STATS_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_STATS, 1), + SXE2_DRV_CMD_VSI_STATS_CLEAR, + SXE2_DRV_CMD_MAC_STATS_GET, + SXE2_DRV_CMD_MAC_STATS_CLEAR, + + SXE2_DRV_CMD_RSS_KEY_SET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_RSS, 1), + SXE2_DRV_CMD_RSS_LUT_SET, + SXE2_DRV_CMD_RSS_FUNC_SET, + SXE2_DRV_CMD_RSS_HF_ADD, + SXE2_DRV_CMD_RSS_HF_DEL, + SXE2_DRV_CMD_RSS_HF_CLEAR, + + SXE2_DRV_CMD_FLOW_FILTER_ADD = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_FLOW, 1), + SXE2_DRV_CMD_FLOW_FILTER_DEL, + SXE2_DRV_CMD_FLOW_FILTER_CLEAR, + SXE2_DRV_CMD_FLOW_FNAV_STAT_ALLOC, + SXE2_DRV_CMD_FLOW_FNAV_STAT_FREE, + SXE2_DRV_CMD_FLOW_FNAV_STAT_QUERY, + + SXE2_DRV_CMD_DEL_TM_ROOT = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_TM, 1), + SXE2_DRV_CMD_ADD_TM_ROOT, + SXE2_DRV_CMD_ADD_TM_NODE, + SXE2_DRV_CMD_ADD_TM_QUEUE, + + SXE2_DRV_CMD_GET_PTP_CLOCK = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_PTP, 1), + + SXE2_DRV_CMD_VLAN_FILTER_ADD_DEL = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_VLAN, 1), + SXE2_DRV_CMD_VLAN_FILTER_SWITCH, + SXE2_DRV_CMD_VLAN_OFFLOAD_CFG, + SXE2_DRV_CMD_VLAN_PORTVLAN_CFG, + SXE2_DRV_CMD_VLAN_CFG_QUERY, + + SXE2_DRV_CMD_RDMA_DUMP_PCAP = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_RDMA, 1), + + SXE2_DRV_CMD_LINK_STATUS_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_LINK, 1), + + SXE2_DRV_CMD_MAC_ADDR_UC = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_MACADDR, 1), + SXE2_DRV_CMD_MAC_ADDR_MC, + + SXE2_DRV_CMD_PROMISC_CFG = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_PROMISC, 1), + SXE2_DRV_CMD_ALLMULTI_CFG, + + SXE2_DRV_CMD_LED_CTRL = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_LED, 1), + + SXE2_DRV_CMD_OPT_EEP = + SXE2_MK_DRV_CMD(SXE2_DEV_CMD_MODULE_OPT, 1), + + SXE2_DRV_CMD_SWITCH = + SXE2_MK_DRV_CMD(SXE2_DEV_CMD_MODULE_SWITCH, 1), + SXE2_DRV_CMD_SWITCH_UPLINK, + SXE2_DRV_CMD_SWITCH_REPR, + SXE2_DRV_CMD_SWITCH_MODE, + SXE2_DRV_CMD_SWITCH_CPVSI, + + SXE2_DRV_CMD_UDPTUNNEL_ADD = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_UDPTUNEEL, 1), + SXE2_DRV_CMD_UDPTUNNEL_DEL, + SXE2_DRV_CMD_UDPTUNNEL_GET, + + SXE2_DRV_CMD_IPSEC_CAP_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_IPSEC, 1), + SXE2_DRV_CMD_IPSEC_TXSA_ADD, + SXE2_DRV_CMD_IPSEC_RXSA_ADD, + SXE2_DRV_CMD_IPSEC_TXSA_DEL, + SXE2_DRV_CMD_IPSEC_RXSA_DEL, + SXE2_DRV_CMD_IPSEC_RESOURCE_CLEAR, + + SXE2_DRV_CMD_EVT_IRQ_BAND_RXQ = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_IRQ, 1), + + SXE2_DRV_CMD_OPT_EEP_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_OPT, 1), + +}; + +#ifndef __KERNEL__ +#pragma pack() +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_ioctl_chnl.h b/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_ioctl_chnl.h new file mode 100644 index 0000000000000000000000000000000000000000..07118eecdbf2da3a777eec846e61b9a3ce7a9a85 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/ioctl/sxe2_ioctl_chnl.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ioctl_chnl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_IOCTL_CHNL_H__ +#define __SXE2_IOCTL_CHNL_H__ + +#ifdef SXE2_DPDK_DRIVER + +#include +#if (RTE_VERSION_NUM(22, 0, 0, 0) <= RTE_VERSION) +#include +#else +#include +#include +#endif + +#include "sxe2_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#ifdef __KERNEL__ +#include +#include +#endif +#endif + +#include "sxe2_internal_ver.h" + +#define SXE2_COM_INVAL_U32 0xFFFFFFFF + +#define SXE2_COM_PCI_OFFSET_SHIFT 40 + +#define SXE2_COM_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << SXE2_COM_PCI_OFFSET_SHIFT) +#define SXE2_COM_PCI_OFFSET_MASK (((u64)(1) << SXE2_COM_PCI_OFFSET_SHIFT) - 1) +#define SXE2_COM_PCI_OFFSET_GEN(index, off) ((((u64)(index)) << SXE2_COM_PCI_OFFSET_SHIFT) | \ + (((u64)(off)) & SXE2_COM_PCI_OFFSET_MASK)) + +#define SXE2_DRV_TRACE_ID_COUNT_MASK 0x003FFFFFFFFFFFFFLLU + +#define SXE2_DRV_CMD_DFLT_TIMEOUT (30) + +#define SXE2_COM_VER_MAJOR 1 +#define SXE2_COM_VER_MINOR 0 +#define SXE2_COM_VER SXE2_MK_VER(SXE2_COM_VER_MAJOR, SXE2_COM_VER_MINOR) + +enum SXE2_COM_CMD { + SXE2_DEVICE_HANDSHAKE = 1, + SXE2_DEVICE_IO_IRQS_REQ, + SXE2_DEVICE_EVT_IRQ_REQ, + SXE2_DEVICE_RST_IRQ_REQ, + SXE2_DEVICE_EVT_CAUSE_GET, + SXE2_DEVICE_DMA_MAP, + SXE2_DEVICE_DMA_UNMAP, + SXE2_DEVICE_PASSTHROUGH, + SXE2_DEVICE_MAX, +}; + +#define SXE2_CMD_TYPE 'S' + +#define SXE2_COM_CMD_HANDSHAKE _IO(SXE2_CMD_TYPE, SXE2_DEVICE_HANDSHAKE) +#define SXE2_COM_CMD_IO_IRQS_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_IO_IRQS_REQ) +#define SXE2_COM_CMD_EVT_IRQ_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_EVT_IRQ_REQ) +#define SXE2_COM_CMD_RST_IRQ_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_RST_IRQ_REQ) +#define SXE2_COM_CMD_EVT_CAUSE_GET _IO(SXE2_CMD_TYPE, SXE2_DEVICE_EVT_CAUSE_GET) +#define SXE2_COM_CMD_DMA_MAP _IO(SXE2_CMD_TYPE, SXE2_DEVICE_DMA_MAP) +#define SXE2_COM_CMD_DMA_UNMAP _IO(SXE2_CMD_TYPE, SXE2_DEVICE_DMA_UNMAP) +#define SXE2_COM_CMD_PASSTHROUGH _IO(SXE2_CMD_TYPE, SXE2_DEVICE_PASSTHROUGH) + +enum sxe2_com_cap { + SXE2_COM_CAP_IOMMU_MAP = 0, +}; + +struct sxe2_ioctl_cmd_common_hdr { + u32 dpdk_ver; + u32 drv_ver; + u32 msg_len; + u32 cap; + u8 reserved[32]; +}; + +struct sxe2_drv_cmd_params { + u64 trace_id; + u32 timeout; + u32 opcode; + u16 vsi_id; + u16 repr_id; + u32 req_len; + u32 resp_len; + void *req_data; + void *resp_data; + u8 resv[32]; +}; + +struct sxe2_ioctl_irq_set { + u32 cnt; + u8 resv[4]; + u32 base_irq_in_com; + s32 *event_fd; +}; + +enum sxe2_com_event_cause { + SXE2_COM_EC_LINK_CHG = 0, + SXE2_COM_SW_MODE_LEGACY, + SXE2_COM_SW_MODE_SWITCHDEV, + SXE2_COM_FC_ST_CHANGE, + + SXE2_COM_EC_RESET = 62, + SXE2_COM_EC_MAX = 63, +}; + +struct sxe2_ioctl_other_evt_set { + s32 eventfd; + u8 resv[4]; + u64 filter_table; +}; + +struct sxe2_ioctl_other_evt_get { + u64 evt_cause; + u8 resv[8]; +}; + +struct sxe2_ioctl_reset_sub_set { + s32 eventfd; + u8 resv[4]; +}; + +struct sxe2_ioctl_iommu_dma_map { + u64 vaddr; + u64 iova; + u64 size; + u8 resv[4]; +}; + +struct sxe2_ioctl_iommu_dma_unmap { + u64 iova; +}; + +union sxe2_drv_trace_info { + u64 id; + struct { + u64 count : 54; + u64 cpu_id : 10; + } sxe2_drv_trace_id_param; +}; +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/mbx/sxe2_mbx_public.h b/drivers/net/ethernet/linkdata/sxe2/common/mbx/sxe2_mbx_public.h new file mode 100644 index 0000000000000000000000000000000000000000..b3a59a9b723ecdd1a1ded92d484b58ce9202aa6c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/mbx/sxe2_mbx_public.h @@ -0,0 +1,837 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_public.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_VF_PUBLIC_H__ +#define __SXE2_VF_PUBLIC_H__ + +#include "sxe2_spec.h" +#ifdef __KERNEL__ +#include "sxe2_compat.h" +#endif + +#include "sxe2_host_regs.h" +#include "sxe2_flow_public.h" + +#pragma pack(1) + +#define SXE2_VF_VERSION_MAJOR 1 +#define SXE2_VF_VERSION_MINOR 1 + +#define SXE2_VF_MAX_VSI_CNT 4 + +#define SXE2_VF_VLAN_STATUS_INVALID (0xFF) + +#define SXE2_VF_ETH_Q_NUM 16 +#define SXE2_VF_DPDK_Q_NUM 16 + +#define SXE2_VF_DRV_TO_HW (0x3) +#define SXE2_VF_VF_TO_PF (0x0801) +#define SXE2_VF_PF_TO_VF (0x0802) + +#define SXE2_VF_MBX_MAGIC (0xFEFEEFEF) + +#define SXE2_VF_PROMISC BIT_ULL(0) +#define SXE2_VF_PROMISC_MULTICAST BIT_ULL(1) +#define SXE2_VF_VLAN_FILTER BIT_ULL(2) + +#define SXE2_VF_OFFLOAD_L2 BIT(0) +#define SXE2_VF_OFFLOAD_VLAN BIT(1) +#define SXE2_VF_OFFLOAD_RSS BIT(2) +#define SXE2_VF_OFFLOAD_IPSEC BIT(3) +#define SXE2_VF_OFFLOAD_FNAV BIT(4) +#define SXE2_VF_OFFLOAD_TM BIT(5) +#define SXE2_VF_OFFLOAD_PTP BIT(6) + +#define SXE2_IPSEC_DIR_TX (0) +#define SXE2_IPSEC_DIR_RX (1) +#define SXE2_IPSEC_INVAILID_SA_IDX (0xFFFF) + +enum sxe2vf_vsi_type { + SXE2VF_VSI_TYPE_ETH = 0, + SXE2VF_VSI_TYPE_DPDK, + SXE2VF_VSI_TYPE_NR, +}; + +#define SXE2_VF_VSI_CNT_USED SXE2VF_VSI_TYPE_NR + +enum sxe2_vf_opcode { + SXE2_VF_UNKOWN = 0, + SXE2_VF_RESET_REQUEST = 0x1, + SXE2_VF_VERSION_MATCH = 0x2, + SXE2_VF_HW_RES_GET = 0x3, + SXE2_VF_IRQ_MAP = 0x4, + SXE2_VF_QUEUES_DISABLE = 0x5, + SXE2_VF_RXQ_CFG_AND_ENABLE = 0x6, + SXE2_VF_TXQ_CFG_AND_ENABLE = 0x7, + SXE2_VF_MAC_ADDR_ADD = 0x8, + SXE2_VF_MAC_ADDR_DEL = 0x9, + SXE2_VF_VLAN_ADD = 0xa, + SXE2_VF_VLAN_DEL = 0xb, + SXE2_VF_STATS_GET = 0xc, + SXE2_VF_LINK_UPDATE_NOTIFY = 0xd, + SXE2_VF_PROMISC_CFG = 0xe, + SXE2_VF_VLAN_CAPS_GET = 0xf, + SXE2_VF_VLAN_OFFLOAD_CFG = 0x10, + SXE2_VF_VLAN_FILTER_CFG = 0x11, + SXE2_VF_LINK_STATUS_GET = 0x12, + SXE2_VF_RESET_NOTIFY = 0x13, + SXE2_VF_RDMA = 0x14, + SXE2_VF_QV_MAP = 0x15, + SXE2_VF_QV_UNMAP = 0x16, + SXE2_VF_RDMA_MGR_CMD = 0x17, + + SXE2_VF_GET_RSS_KEY = 0x18, + SXE2_VF_GET_RSS_LUT = 0x19, + SXE2_VF_SET_RSS_KEY = 0x1a, + SXE2_VF_SET_RSS_LUT = 0x1b, + SXE2_VF_ADD_RSS_CFG = 0x1c, + + SXE2_VF_DEL_RSS_CFG = 0x1d, + SXE2_VF_CLEAR_RSS_CFG = 0x1e, + SXE2_VF_SET_RSS_HASH_CTRL = 0X1f, + + SXE2_VF_FNAV_FILTER_ADD = 0x20, + SXE2_VF_FNAV_FILTER_DEL = 0x21, + SXE2_VF_FNAV_FILTER_CLEAR = 0X22, + SXE2_VF_FNAV_ALLOC_STAT = 0X23, + SXE2_VF_FNAV_FREE_STAT = 0X24, + SXE2_VF_FNAV_QUERY_STAT = 0x25, + + SXE2_VF_STATS_CLEAR = 0x26, + SXE2_VF_RXQ_DISABLE = 0x27, + SXE2_VF_TXQ_DISABLE = 0x28, + + SXE2_VF_GET_PTP_CLOCK = 0x29, + SXE2_VF_IPSEC_SA_ADD = 0x2a, + SXE2_VF_IPSEC_SA_CLEAR = 0x2b, + SXE2_VF_IPSEC_GET_CAPA = 0x2c, + + SXE2_VF_RDMA_DUMP_PCAP = 0x2d, + + SXE2_VF_IRQ_UNMAP = 0x2e, + + SXE2_VF_ADD_DEFAULT_RSS_CFG = 0x2f, + SXE2_VF_REPLAY_RSS_CFG = 0x30, + SXE2_VF_STATS_PUSH = 0x31, + SXE2_VF_GET_ETHTOOL_INFO = 0x32, + SXE2_VF_FNAV_MATCH_CLEAR = 0x33, + SXE2_VF_VSI_CFG = 0x34, + SXE2_VF_USER_DRIVER_RELEASE = 0x35, + + SXE2_VF_MAC_ADDR_UPDATE = 0x36, + SXE2_VF_PROMISC_UPDATE = 0x37, + SXE2_VF_USER_VLAN_PROCESS = 0x38, + + SXE2_VF_ACL_FILTER_ADD = 0x39, + SXE2_VF_ACL_FILTER_DEL = 0x3a, + SXE2_VF_ACL_FILTER_CLEAR = 0x3b, + + SXE2_VF_PASSTHROUGH_USER_VF_DATA = 0x3c, + + SXE2_VF_DRV_MODE_SET = 0x3d, + SXE2_VF_DRV_MODE_GET = 0x3e, + + SXE2_VF_OPCODE_NR, + + SXE2_VF_MBX_DISABLE = 0xFFFF, +}; + +enum sxe2_vf_err_code { + SXE2_VF_ERR_SUCCESS = 0, + SXE2_VF_ERR_PARAM = 1024, + SXE2_VF_ERR_NO_MEMORY, + SXE2_VF_ERR_HANDLE_ERROR, + SXE2_VF_ERR_CQP_COMPL_ERROR, + SXE2_VF_ERR_INVALID_VF_ID, + SXE2_VF_ERR_ADMIN_QUEUE_ERROR, + SXE2_VF_ERR_NOT_SUPPORTED, + SXE2_VF_ERR_PF_STATUS_ABNORMAL, + SXE2_VF_ERR_VF_STATUS_ABNORMAL, +}; + +enum sxe2_vf_msg_type { + SXE2VF_MSG_TYPE_PF_TO_VF = 3, + SXE2VF_MSG_TYPE_VF_TO_PF, + SXE2VF_MSG_TYPE_DRV_TO_HW, + SXE2VF_MSG_TYPE_PF_REPLY_VF, +}; + +#define SXE2VF_CMD_HDR_SIZE sizeof(struct sxe2vf_cmd_hdr) + +#define SXE2VF_MBX_MSG_HDR_SIZE \ + sizeof(struct sxe2vf_mbx_msg_hdr) + +#define SXE2VF_MBX_RAW_MSG_MAX_SPEC (4096) + +#define SXE2VF_MBX_RAW_MSG_OFFSET (SXE2VF_CMD_HDR_SIZE + SXE2VF_MBX_MSG_HDR_SIZE) + +#define SXE2VF_MBX_FULL_HDR_SIZE SXE2VF_MBX_RAW_MSG_OFFSET + +#define SXE2VF_MBX_RAW_MSG_MAX_SIZE \ + (SXE2VF_MBX_RAW_MSG_MAX_SPEC - SXE2VF_MBX_RAW_MSG_OFFSET) + +#define SXE2VF_MBX_DATA_OFFSET(buf) \ + ((((struct sxe2_cmd_hdr *)(buf))->hdr_len) + \ + (SXE2_MBX_MSG_HDR_PTR((struct sxe2_cmd_hdr *)buf)->data_offset)) + +#define SXE2_FNAV_MAX_NUM_PROTO_HDRS (9) +#define SXE2_FNAV_MAX_NUM_ACTIONS (3) +#define SXE2_FNAV_IPV6_ADDR_LEN_TO_U32 (4) +#define SXE2_FNAV_ETH_ADDR_LEN (6) +#define SXE2_VF_FNAV_INVALID_LOC (0xFFFF) +#define SXE2_VF_FNAV_INVALID_FLOW_ID (0xFFFF) +#define SXE2_VF_FNAV_INVALID_STAT_IDX (0xFFFF) + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F + +struct sxe2vf_cmd_hdr { + __le32 magic_code; + __le16 in_len; + __le16 out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 resv[28]; + u8 body[]; +}; + +struct sxe2vf_mbx_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 recv[14]; + u8 body[]; +}; + +enum sxe2_driver_type { + SXE2_DRIVER_TYPE_VF = 0, +}; + +struct sxe2_vf_vfres_msg_req { + u8 driver_type; + u8 support_sw_stats; + u8 reserve[2]; +}; + +struct sxe2_vf_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_ver_msg { + __le16 major; + __le16 minor; +}; + +struct sxe2_vf_rxq_ctxt { + u8 lro_status; + u8 keep_crc_en; + __le16 queue_id; + __le16 depth; + __le16 buf_len; + __le64 dma_addr; +}; + +struct sxe2_vf_rxq_msg { + __le16 q_cnt; + __le16 vsi_id; + __le16 max_frame_size; + struct sxe2_vf_rxq_ctxt ctxt[]; +}; + +struct sxe2_vf_vsi_sw_stats { + __le64 rx_packets; + __le64 rx_bytes; + __le64 tx_packets; + __le64 tx_bytes; +}; + +struct sxe2_vf_sw_stats { + __le16 vsi_id; + struct sxe2_vf_vsi_sw_stats sw_stats; + __le16 fnav_stats_idx; +}; + +struct sxe2_vf_vsi_res { + __le16 vsi_id; +}; + +struct sxe2_vf_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; +}; + +struct sxe2_vf_hw_stats_rsp { + struct sxe2_vf_vsi_hw_stats hw_stats; + __le64 fnav_match; +}; + +struct sxe2_fw_ver_msg { + u8 main_version_id; + u8 sub_version_id; + u8 fix_version_id; + u8 build_id; +}; + +struct sxe2_vf_txsch_caps { + u8 layer_cap; + u8 tm_mid_node_num; + u8 prio_num; +}; + +struct sxe2_vf_rxft_caps { + __le16 rss_lut_type; + __le16 rss_key_size; + __le16 rss_lut_size; + __le16 fnav_space_gsize; + __le16 fnav_space_bsize; +}; + +struct sxe2_vf_vfres_msg { + __le16 num_vsis; + __le16 max_vectors; + __le16 q_cnt; + __le16 itr_gran; + u8 addr[ETH_ALEN]; + __le16 max_vlan_cnt; + u8 port_vlan_exsit; + u8 is_switchdev; + u8 pf_cnt; + u8 parent_pfid; + __le16 vf_id_in_dev; + struct sxe2_vf_rxft_caps rxft_cap; + struct sxe2_vf_vsi_res vsi_res[SXE2_VF_MAX_VSI_CNT]; + struct sxe2_vf_txsch_caps vf_txsch_cap; + struct sxe2_fw_ver_msg fw_ver; + __le32 cap_flags; + u8 tm_layers; + u8 parent_portid; + u8 mode; +}; + +struct sxe2_vf_irq_map { + __le16 irq_id; + __le16 txq_map; + __le16 rxq_map; + __le16 rxitr_idx; + __le16 txitr_idx; +}; + +struct sxe2_vf_irq_map_msg { + __le16 num_irqs; + __le16 vsi_id; + struct sxe2_vf_irq_map irq_maps[]; +}; + +struct sxe2_vf_irq_unmap_msg { + __le16 vsi_id; +}; + +enum { + SXE2_VF_MAC_TYPE_P = 0, + SXE2_VF_MAC_TYPE_C, +}; + +struct sxe2_vf_addr { + u8 addr[ETH_ALEN]; + u8 type; +}; + +struct sxe2_vf_addr_msg { + bool is_user; + __le16 vsi_id; + __le16 addr_cnt; + struct sxe2_vf_addr elem[]; +}; + +struct sxe2_vf_addr_update_msg { + bool to_user; + __le16 vsi_id; + u8 addr[ETH_ALEN]; +}; + +struct sxe2_vf_promisc_update_msg { + bool to_user; + bool is_promisc; + __le16 vsi_id; +}; + +struct sxe2_vf_link_msg { + __le32 speed; + u8 status; +}; + +struct sxe2_vf_txq_stop_msg { + __le16 q_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_txq_ctxt { + __le16 vsi_id; + __le16 queue_id; + __le16 depth; + __le64 dma_addr; + __le32 sched_mode; +}; + +struct sxe2_vf_txq_ctxt_msg { + __le16 q_cnt; + __le16 vsi_id; + struct sxe2_vf_txq_ctxt ctxs[]; +}; + +struct sxe2_vf_qps_dis_msg { + __le16 qps_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_q_stop_msg { + __le16 vsi_id; + __le16 q_idx; +}; + +struct sxe2_vf_promisc_msg { + bool is_user; + __le16 vsi_id; + u8 resv[2]; + __le32 flags; +}; + +struct sxe2_vf_vlan_caps { + u8 port_vlan_exsit; + __le16 max_cnt; +}; + +struct sxe2_vf_vlan_offload_cfg { + u8 stag_strip_enable; + u8 ctag_strip_enable; + u8 stag_insert_enable; + u8 ctag_insert_enable; +}; + +struct sxe2_vf_vlan_filter_cfg { + bool is_user; + u8 ctag_filter_enable; + u8 stag_filter_enable; +}; + +struct sxe2_vf_vlan { + __le16 vid; + __le16 tpid; +}; + +struct sxe2_vf_vlan_filter_msg { + __le16 vsi_id; + __le16 vlan_cnt; + struct sxe2_vf_vlan elem[]; +}; + +struct sxe2_vf_user_vlan_msg { + bool is_add; + __le16 vsi_id; + struct sxe2_vf_vlan vlan; +}; + +struct sxe2_vf_user_vlan_fltr_msg { + bool is_en; + __le16 vsi_id; +}; + +struct sxe2_vf_rss_hash_ctrl { + u8 hash_func; +}; + +struct sxe2_vf_rss_hash_msg { + __le32 headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + __le32 hash_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + __le32 hdr_type; + u8 symm; +}; + +enum sxe2_fnav_flow_type { + SXE2_FNAV_FLOW_TYPE_NONE = SXE2_FLOW_TYPE_NONE, + SXE2_FNAV_FLOW_TYPE_FRAG_IPV4 = SXE2_FLOW_MAC_IPV4_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_ETH = SXE2_FLOW_MAC_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_OTHER = SXE2_FLOW_MAC_IPV4_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_UDP = SXE2_FLOW_MAC_IPV4_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_TCP = SXE2_FLOW_MAC_IPV4_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_SCTP = SXE2_FLOW_MAC_IPV4_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_FRAG_IPV6 = SXE2_FLOW_MAC_IPV6_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER = SXE2_FLOW_MAC_IPV6_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_UDP = SXE2_FLOW_MAC_IPV6_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_TCP = SXE2_FLOW_MAC_IPV6_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_SCTP = SXE2_FLOW_MAC_IPV6_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP = SXE2_FLOW_TYPE_MAX, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP, + SXE2_FNAV_FLOW_TYPE_MAX, +}; + +enum sxe2_fnav_act_type { + SXE2_FNAV_ACTION_DROP = 0, + SXE2_FNAV_ACTION_TC_REDIRECT, + SXE2_FNAV_ACTION_PASSTHRU, + SXE2_FNAV_ACTION_QUEUE, + SXE2_FNAV_ACTION_Q_REGION, + SXE2_FNAV_ACTION_MARK, + SXE2_FNAV_ACTION_COUNT, +}; + +enum sxe2_fnav_tunnel_level { + SXE2_FNAV_TUNNEL_OUTER, + SXE2_FNAV_TUNNEL_INNER, + SXE2_FNAV_TUNNEL_ANY, +}; + +enum sxe2_fnav_tunnel_flag_type { + SXE2_FNAV_TUN_FLAG_NO_TUNNEL, + SXE2_FNAV_TUN_FLAG_TUNNEL, + SXE2_FNAV_TUN_FLAG_ANY, +}; + +struct sxe2_fnav_comm_eth { + u8 dst[SXE2_FNAV_ETH_ADDR_LEN]; + u8 src[SXE2_FNAV_ETH_ADDR_LEN]; + __be16 etype; +}; + +struct sxe2_fnav_comm_vlan { + __be16 vlan_vid; + __be16 vlan_tci; + __be16 vlan_type; +}; + +struct sxe2_fnav_comm_ipv4 { + __be32 saddr; + __be32 daddr; + u8 tos; + u8 ttl; + u8 proto; +}; + +struct sxe2_fnav_comm_ipv6 { + __be32 dst_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + __be32 src_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + u8 tc; + u8 proto; + u8 hlim; +}; + +struct sxe2_fnav_comm_l4 { + __be16 dst_port; + __be16 src_port; +}; + +struct sxe2_fnav_comm_vxlan { + __be32 vni; +}; + +struct sxe2_fnav_comm_geneve { + __be32 vni; +}; + +struct sxe2_fnav_comm_gtpu { + __be32 teid; +}; + +struct sxe2_fnav_comm_gre { + __be32 tni; +}; + +struct sxe2_fnav_comm_proto_hdr { + u8 tunnel_level; + u8 type; + __le32 flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + union { + struct sxe2_fnav_comm_eth eth; + struct sxe2_fnav_comm_vlan vlan; + struct sxe2_fnav_comm_ipv4 ipv4; + struct sxe2_fnav_comm_ipv6 ipv6; + struct sxe2_fnav_comm_l4 l4; + struct sxe2_fnav_comm_vxlan vxlan; + struct sxe2_fnav_comm_geneve geneve; + struct sxe2_fnav_comm_gtpu gtpu; + struct sxe2_fnav_comm_gre gre; + }; +}; + +struct sxe2_fnav_comm_action_queue { + __le16 q_index; +}; + +struct sxe2_fnav_comm_action_queue_region { + __le16 q_index; + u8 region; +}; + +struct sxe2_fnav_comm_action_mark { + __le32 mark_id; +}; + +struct sxe2_fnav_comm_action_count { + __le32 stat_index; + __le32 stat_ctrl; +}; + +struct sxe2_fnav_comm_action { + __le32 type; + union { + struct sxe2_fnav_comm_action_queue act_queue; + struct sxe2_fnav_comm_action_queue_region act_q_region; + struct sxe2_fnav_comm_action_mark act_mark; + struct sxe2_fnav_comm_action_count act_count; + }; +}; + +struct sxe2_fnav_comm_user_data { + u8 has_flex_filed; + u8 resv[3]; + __le16 flex_offset; + __be16 flex_word; +}; + +struct sxe2_fnav_comm_full_msg { + __le32 filter_loc; + __le32 flow_type; + __le32 tunn_flag; + u8 action_cnt; + u8 proto_cnt; + u8 rsv[2]; + struct sxe2_fnav_comm_action action[SXE2_FNAV_MAX_NUM_ACTIONS]; + struct sxe2_fnav_comm_proto_hdr proto_hdr[SXE2_FNAV_MAX_NUM_PROTO_HDRS]; + struct sxe2_fnav_comm_user_data usr_data; +}; + +struct sxe2_vf_fnav_filter_del_msg { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_add_filter_resp { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_stat_alloc_req_msg { + u8 need_update; +}; + +struct sxe2_vf_fnav_stat_msg { + __le16 stat_index; +}; + +struct sxe2_vf_fnav_stat_query_req_msg { + __le16 stat_index; + __le32 stat_ctrl; + __le32 is_clear; +}; + +struct sxe2_vf_fnav_stat_query_resp_msg { + __le16 stat_index; + __le64 stat_hits; + __le64 stat_bytes; +}; + +struct sxe2_vf_qv_info { + __le32 v_idx; + __le16 ceq_idx; + __le16 aeq_idx; + u8 itr_idx; + u8 pad[3]; +}; + +struct sxe2_vf_qv_map_msg { + __le32 num_vectors; + struct sxe2_vf_qv_info qv_info[]; +}; + +struct sxe2_vf_rdma_mgr_cmd_msg { + __le32 opcode; + __le32 msg_len; + __le32 resv_len; + u8 msg[]; +}; + +struct sxe2_vf_tm_res { + __le16 teid; +}; + +struct sxe2_vf_tm_info { + __le32 committed; + __le32 peak; + u8 priority; + u8 reserve; + __le16 weight; +}; + +struct sxe2_vf_tm_add_root_msg { + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_node_msg { + __le16 parent_teid; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_queue_msg { + __le16 parent_teid; + __le16 queue_id; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_ptp_clock_res { + __le32 clock_ns; + __le64 clock_s; +}; + +struct sxe2_mbx_obj { + __le32 func_type : 2; + __le32 resv : 2; + __le32 pf_id : 4; + __le32 vf_id : 12; + __le32 resv1 : 4; + __le32 drv_type : 2; + __le32 drv_id : 6; +}; + +struct sxe2_com_user_data_passthrough_req { + struct sxe2_mbx_obj obj; + u32 opcode; + u16 func_id; + u16 vsi_id; + u32 req_len; + u32 resp_len; + u32 buff_len; + u8 cmd_buff[]; +}; + +struct sxe2_com_user_data_passthrough_resp { + u32 buff_len; + u8 cmd_buff[]; +}; + +#define SXE2_MBX_IPSEC_IPV6 BIT(0) +#define SXE2_MBX_IPSEC_SM4 BIT(1) +#define SXE2_MBX_IPSEC_AUTH BIT(2) +#define SXE2_MBX_IPSEC_KEY_LEN (32) +#define SCBGE_MBX_IPSEC_IPV4_LEN (4) +#define SCBGE_MBX_IPSEC_IPV6_LEN (16) + +struct sxe2_vf_ipsec_sa_add_msg { + __le32 spi; + u8 dir; + u8 mode; + u8 rsvd[2]; + __le32 addr[SCBGE_MBX_IPSEC_IPV6_LEN / 4]; + u8 enc_key[SXE2_MBX_IPSEC_KEY_LEN]; + u8 auth_key[SXE2_MBX_IPSEC_KEY_LEN]; + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_add_resp { + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_del_msg { + u8 dir; + u8 rsvd[3]; + __le32 sa_idx; +}; + +struct sxe2vf_get_capa_response { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; +}; + +struct sxe2vf_acl_filter_del_req { + __le32 filter_id; +}; + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD SXE2_VSI_L2TAGSTXVALID_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_VLAN SXE2_VSI_L2TAGSTXVALID_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q SXE2_VSI_TSR_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD SXE2_VSI_TSR_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1 SXE2_VSI_TSR_ID_OUT_VLAN2 + +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_VLAN +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID + +#define SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1 SXE2_VSI_TSR_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_FIELD (0X0F) +#define SXE2_DPDK_OFFLOAD_TAGID_FIELD (0X07) + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK (SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1) +#define SXE2_DPDK_OFFLOAD_STRIP_OFFSET SXE2_VSI_TSR_SHOW_TAG_S + +#define SXE2_DPDK_OFFLOAD_INSERT_ENABLE (BIT(3)) + +struct sxe2_dpdk_portvlan_cfg { + u16 vf_idx; + u16 tpid; + u16 vid; + u8 prio; + u8 rsv; +}; + +struct sxe2vf_rdma_dump_pcap_msg { + u8 mac[ETH_ALEN]; + u8 rsvd[2]; + bool is_add; + u8 rsvd1[3]; +}; + +struct sxe2_vf_vsi_cfg { + bool is_clear; + __le16 txq_base_idx; + __le16 txq_cnt; + __le16 rxq_base_idx; + __le16 rxq_cnt; + __le16 irq_base_idx; + __le16 irq_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_user_driver_release { + u8 func_id; + u8 drv_id; +}; + +#pragma pack() +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2flow/sxe2_flow_public.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2flow/sxe2_flow_public.h new file mode 100644 index 0000000000000000000000000000000000000000..dc15aebd9163b7e42331d58f8f2a3b555746b7e2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2flow/sxe2_flow_public.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_flow_public.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_FLOW_PUBLIC_H__ +#define __SXE2_FLOW_PUBLIC_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_osal.h" +#endif +enum sxe2_flow_type { + SXE2_FLOW_TYPE_NONE = 0, + SXE2_FLOW_MAC_PAY = 1, + SXE2_FLOW_MAC_IPV4_FRAG_PAY = 22, + SXE2_FLOW_MAC_IPV4_PAY = 23, + SXE2_FLOW_MAC_IPV4_UDP_PAY = 24, + SXE2_FLOW_MAC_IPV4_TCP_PAY = 26, + SXE2_FLOW_MAC_IPV4_SCTP_PAY = 27, + SXE2_FLOW_MAC_IPV4_IPV4_FRAG_PAY = 29, + SXE2_FLOW_MAC_IPV4_IPV4_PAY = 30, + SXE2_FLOW_MAC_IPV4_IPV4_UDP_PAY = 31, + SXE2_FLOW_MAC_IPV4_IPV4_TCP_PAY = 33, + SXE2_FLOW_MAC_IPV4_IPV4_SCTP_PAY = 34, + SXE2_FLOW_MAC_IPV4_IPV6_FRAG_PAY = 36, + SXE2_FLOW_MAC_IPV4_IPV6_PAY = 37, + SXE2_FLOW_MAC_IPV4_IPV6_UDP_PAY = 38, + SXE2_FLOW_MAC_IPV4_IPV6_TCP_PAY = 40, + SXE2_FLOW_MAC_IPV4_IPV6_SCTP_PAY = 41, + SXE2_FLOW_MAC_IPV4_GRE_PAY = 43, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_FRAG_PAY = 44, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_PAY = 45, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_UDP_PAY = 46, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_TCP_PAY = 48, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_SCTP_PAY = 49, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_FRAG_PAY = 51, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_PAY = 52, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_UDP_PAY = 53, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_TCP_PAY = 55, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_SCTP_PAY = 56, + SXE2_FLOW_MAC_IPV4_GRE_MAC_PAY = 58, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_FRAG_PAY = 59, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_PAY = 60, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_UDP_PAY = 61, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_TCP_PAY = 63, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_SCTP_PAY = 64, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_FRAG_PAY = 66, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_PAY = 67, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_UDP_PAY = 68, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_TCP_PAY = 70, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_SCTP_PAY = 71, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_PAY = 73, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_FRAG_PAY = 74, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_PAY = 75, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_UDP_PAY = 76, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_TCP_PAY = 78, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_SCTP_PAY = 79, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_FRAG_PAY = 81, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_PAY = 82, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_UDP_PAY = 83, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_TCP_PAY = 85, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_SCTP_PAY = 86, + SXE2_FLOW_MAC_IPV6_FRAG_PAY = 88, + SXE2_FLOW_MAC_IPV6_PAY = 89, + SXE2_FLOW_MAC_IPV6_UDP_PAY = 90, + SXE2_FLOW_MAC_IPV6_TCP_PAY = 92, + SXE2_FLOW_MAC_IPV6_SCTP_PAY = 93, + SXE2_FLOW_MAC_IPV6_IPV4_FRAG_PAY = 95, + SXE2_FLOW_MAC_IPV6_IPV4_PAY = 96, + SXE2_FLOW_MAC_IPV6_IPV4_UDP_PAY = 97, + SXE2_FLOW_MAC_IPV6_IPV4_TCP_PAY = 99, + SXE2_FLOW_MAC_IPV6_IPV4_SCTP_PAY = 100, + SXE2_FLOW_MAC_IPV6_IPV6_FRAG_PAY = 102, + SXE2_FLOW_MAC_IPV6_IPV6_PAY = 103, + SXE2_FLOW_MAC_IPV6_IPV6_UDP_PAY = 104, + SXE2_FLOW_MAC_IPV6_IPV6_TCP_PAY = 106, + SXE2_FLOW_MAC_IPV6_IPV6_SCTP_PAY = 107, + SXE2_FLOW_MAC_IPV6_GRE_PAY = 109, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_FRAG_PAY = 110, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_PAY = 111, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_UDP_PAY = 112, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_TCP_PAY = 114, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_SCTP_PAY = 115, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_FRAG_PAY = 117, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_PAY = 118, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_UDP_PAY = 119, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_TCP_PAY = 121, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_SCTP_PAY = 122, + SXE2_FLOW_MAC_IPV6_GRE_MAC_PAY = 124, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_FRAG_PAY = 125, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_PAY = 126, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_UDP_PAY = 127, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_TCP_PAY = 129, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_SCTP_PAY = 130, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_FRAG_PAY = 132, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_PAY = 133, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_UDP_PAY = 134, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_TCP_PAY = 136, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_SCTP_PAY = 137, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_PAY = 139, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_FRAG_PAY = 140, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_PAY = 141, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_UDP_PAY = 142, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_TCP_PAY = 144, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_SCTP_PAY = 145, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_FRAG_PAY = 147, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_PAY = 148, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_UDP_PAY = 149, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_TCP_PAY = 151, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_SCTP_PAY = 152, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_PAY = 329, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_PAY = 330, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_FRAG_PAY = 331, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_PAY = 332, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_UDP_PAY = 333, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_TCP_PAY = 334, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_SCTP_PAY = 335, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_FRAG_PAY = 336, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_PAY = 337, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_UDP_PAY = 338, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_TCP_PAY = 339, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_SCTP_PAY = 340, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_FRAG_PAY = 341, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_PAY = 342, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_UDP_PAY = 343, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_TCP_PAY = 344, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_SCTP_PAY = 345, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_FRAG_PAY = 346, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_PAY = 347, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_UDP_PAY = 348, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_TCP_PAY = 349, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_SCTP_PAY = 350, + SXE2_FLOW_MAC_IPV6_MAC_PAY = 820, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_FRAG_PAY = 821, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_PAY = 822, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_UDP_PAY = 823, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_TCP_PAY = 824, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_SCTP_PAY = 825, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_FRAG_PAY = 827, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_PAY = 828, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_UDP_PAY = 829, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_TCP_PAY = 830, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_SCTP_PAY = 831, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_PAY = 835, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_FRAG_PAY = 836, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_PAY = 837, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_UDP_PAY = 838, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_TCP_PAY = 839, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_SCTP_PAY = 840, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_FRAG_PAY = 842, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_PAY = 843, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_UDP_PAY = 844, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_TCP_PAY = 845, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_SCTP_PAY = 846, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_PAY = 878, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_FRAG_PAY = 877, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_PAY = 876, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_UDP_PAY = 879, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_TCP_PAY = 880, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_SCTP_PAY = 875, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_FRAG_PAY = 871, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_PAY = 870, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_UDP_PAY = 872, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_TCP_PAY = 873, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_SCTP_PAY = 869, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_PAY = 891, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_FRAG_PAY = 890, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_PAY = 889, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_UDP_PAY = 892, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_TCP_PAY = 893, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_SCTP_PAY = 888, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_FRAG_PAY = 884, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_PAY = 883, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_UDP_PAY = 885, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_TCP_PAY = 886, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_SCTP_PAY = 882, + SXE2_FLOW_MAC_IPV6_UDP_GRE_PAY = 904, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_FRAG_PAY = 903, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_PAY = 902, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_UDP_PAY = 905, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_TCP_PAY = 906, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_SCTP_PAY = 901, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_FRAG_PAY = 897, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_PAY = 896, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_UDP_PAY = 898, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_TCP_PAY = 899, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_SCTP_PAY = 895, + SXE2_FLOW_MAC_IPV4_UDP_GRE_PAY = 917, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_FRAG_PAY = 916, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_PAY = 915, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_UDP_PAY = 918, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_TCP_PAY = 919, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_SCTP_PAY = 914, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_FRAG_PAY = 910, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_PAY = 909, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_UDP_PAY = 911, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_TCP_PAY = 912, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_SCTP_PAY = 908, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_PAY = 930, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 929, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 928, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 931, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 932, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 927, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 923, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 922, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 924, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 925, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 921, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_PAY = 943, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 942, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 941, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 944, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 945, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 940, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 936, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 935, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 937, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 938, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 934, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_PAY = 956, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 955, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_PAY = 954, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 957, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 958, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 953, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 949, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_PAY = 948, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 950, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 951, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 947, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_PAY = 969, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 968, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_PAY = 967, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 970, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 971, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 966, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 962, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_PAY = 961, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 963, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 964, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 960, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_PAY = 982, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 981, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_PAY = 980, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_UDP_PAY = 983, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_TCP_PAY = 984, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 979, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 975, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_PAY = 974, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_UDP_PAY = 976, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_TCP_PAY = 977, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 973, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_PAY = 995, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 994, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_PAY = 993, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_UDP_PAY = 996, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_TCP_PAY = 997, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 992, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 988, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_PAY = 987, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_UDP_PAY = 989, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_TCP_PAY = 990, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 986, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_PAY = 1008, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_FRAG_PAY = 1007, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_PAY = 1006, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_UDP_PAY = 1009, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_TCP_PAY = 1010, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_SCTP_PAY = 1005, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_FRAG_PAY = 1001, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_PAY = 1000, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_UDP_PAY = 1002, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_TCP_PAY = 1003, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_SCTP_PAY = 999, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_PAY = 1021, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_FRAG_PAY = 1020, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_PAY = 1019, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_UDP_PAY = 1022, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_TCP_PAY = 1023, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_SCTP_PAY = 1018, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_FRAG_PAY = 1014, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_PAY = 1013, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_UDP_PAY = 1015, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_TCP_PAY = 1016, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_SCTP_PAY = 1012, + SXE2_FLOW_TYPE_MAX = 2048, +}; + +enum sxe2_rss_cfg_hdr_type { + SXE2_RSS_OUTER_HEADERS, + SXE2_RSS_INNER_HEADERS, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GTPU, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GTPU, + SXE2_RSS_ANY_HEADERS +}; + +enum sxe2_flow_hdr { + SXE2_FLOW_HDR_ETH = 0, + SXE2_FLOW_HDR_VLAN, + SXE2_FLOW_HDR_QINQ, + SXE2_FLOW_HDR_IPV4, + SXE2_FLOW_HDR_IPV6, + SXE2_FLOW_HDR_ICMP = 5, + SXE2_FLOW_HDR_TCP, + SXE2_FLOW_HDR_UDP, + SXE2_FLOW_HDR_SCTP, + SXE2_FLOW_HDR_GRE, + SXE2_FLOW_HDR_VXLAN = 10, + SXE2_FLOW_HDR_GENEVE, + SXE2_FLOW_HDR_GTPU, + SXE2_FLOW_HDR_IPV_FRAG, + SXE2_FLOW_HDR_IPV_OTHER, + SXE2_FLOW_HDR_ETH_NON_IP = 15, + SXE2_FLOW_HDR_MAX = 128, +}; + +enum sxe2_flow_fld_id { + SXE2_FLOW_FLD_ID_ETH_DA = 0, + SXE2_FLOW_FLD_ID_ETH_SA, + SXE2_FLOW_FLD_ID_S_TCI, + SXE2_FLOW_FLD_ID_C_TCI, + SXE2_FLOW_FLD_ID_S_TPID, + SXE2_FLOW_FLD_ID_C_TPID = 5, + SXE2_FLOW_FLD_ID_S_VID, + SXE2_FLOW_FLD_ID_C_VID, + SXE2_FLOW_FLD_ID_ETH_TYPE, + SXE2_FLOW_FLD_ID_IPV4_TOS, + SXE2_FLOW_FLD_ID_IPV6_DSCP = 10, + SXE2_FLOW_FLD_ID_IPV4_TTL, + SXE2_FLOW_FLD_ID_IPV4_PROT, + SXE2_FLOW_FLD_ID_IPV6_TTL, + SXE2_FLOW_FLD_ID_IPV6_PROT, + SXE2_FLOW_FLD_ID_IPV4_SA = 15, + SXE2_FLOW_FLD_ID_IPV4_DA, + SXE2_FLOW_FLD_ID_IPV6_SA, + SXE2_FLOW_FLD_ID_IPV6_DA, + SXE2_FLOW_FLD_ID_IPV4_CHKSUM, + SXE2_FLOW_FLD_ID_IPV4_ID = 20, + SXE2_FLOW_FLD_ID_IPV6_ID, + SXE2_FLOW_FLD_ID_IPV6_PRE32_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE32_DA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_DA = 25, + SXE2_FLOW_FLD_ID_IPV6_PRE64_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE64_DA, + SXE2_FLOW_FLD_ID_TCP_SRC_PORT, + SXE2_FLOW_FLD_ID_TCP_DST_PORT, + SXE2_FLOW_FLD_ID_UDP_SRC_PORT = 30, + SXE2_FLOW_FLD_ID_UDP_DST_PORT, + SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, + SXE2_FLOW_FLD_ID_SCTP_DST_PORT, + SXE2_FLOW_FLD_ID_TCP_FLAGS, + SXE2_FLOW_FLD_ID_TCP_CHKSUM = 35, + SXE2_FLOW_FLD_ID_UDP_CHKSUM, + SXE2_FLOW_FLD_ID_SCTP_CHKSUM, + SXE2_FLOW_FLD_ID_VXLAN_VNI, + SXE2_FLOW_FLD_ID_GENEVE_VNI, + SXE2_FLOW_FLD_ID_GTPU_TEID = 40, + SXE2_FLOW_FLD_ID_NVGRE_TNI, + + SXE2_FLOW_FLD_ID_MAX = 128, +}; + +struct sxe2_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be16 ether_type; +}; + +struct sxe2_vlan_hdr { + __be16 type; + __be16 vlan; +}; + +struct sxe2_ipv4_hdr { + u8 ver_ihl; + u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + u8 ttl; + u8 protocol; + __be16 check; + __be32 saddr; + __be32 daddr; +}; + +#define SXE2_IPV6_ADDR_LENGTH (16) +#define SXE2_IPV6_TC_SHIFT (20) +#define SXE2_IPV6_TC_MASK (0xFF) +struct sxe2_ipv6_hdr { + __be32 pri_ver_flow; + __be16 payload_len; + u8 nexthdr; + u8 hop_limit; + union { + u8 saddr[16]; + __be16 saddr16[8]; + __be32 saddr32[4]; + }; + union { + u8 daddr[16]; + __be16 daddr16[8]; + __be32 daddr32[4]; + }; +}; + +struct sxe2_tcp_hdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __be16 flag; + __be16 window; + __be16 check; + __be16 urg_ptr; +}; + +struct sxe2_udp_hdr { + __be16 source; + __be16 dest; + __be16 len; + __be16 check; +}; + +struct sxe2_sctp_hdr { + __be16 src_port; + __be16 dst_port; +}; + +struct sxe2_nvgre_hdr { + __be16 flags; + __be16 protocol; + __be32 tni; +}; + +struct sxe2_geneve_hdr { + __be16 flags; + __be16 protocol; + __be32 vni; +}; + +struct sxe2_gtpu_hdr { + u8 flag; + u8 msg_type; + __be16 msg_len; + __be32 teid; +}; + +struct sxe2_vxlan_hdr { + u8 flag; + u8 resvd0; + u8 resvd1; + u8 protocol; + __be32 vni; +}; + +enum sxe2_flow_act_type { + SXE2_FLOW_ACTION_DROP = 0, + SXE2_FLOW_ACTION_TC_REDIRECT, + SXE2_FLOW_ACTION_TO_VSI, + SXE2_FLOW_ACTION_TO_VSI_LIST, + SXE2_FLOW_ACTION_PASSTHRU, + SXE2_FLOW_ACTION_QUEUE, + SXE2_FLOW_ACTION_Q_REGION, + SXE2_FLOW_ACTION_MARK, + SXE2_FLOW_ACTION_COUNT, + SXE2_FLOW_ACTION_RSS, + SXE2_FLOW_ACTION_MAX = 32, +}; + +enum sxe2_rss_hash_key_func { + SXE2_RSS_HASH_FUNC_TOEPLITZ = 0, + SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ = 1, + SXE2_RSS_HASH_FUNC_XOR = 2, + SXE2_RSS_HASH_FUNC_JEKINS = 3 +}; + +struct sxe2_flow_action_rss { + DECLARE_BITMAP(hdr_out, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hdr_in, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(fld, SXE2_FLOW_FLD_ID_MAX); + u8 is_inner; + u8 func; + u8 hdr_type; +}; + +struct sxe2_flow_action_queue { + u16 vsi_index; + u16 q_index; +}; + +struct sxe2_flow_action_queue_region { + u16 vsi_index; + u16 q_index; + u8 region; +}; + +struct sxe2_flow_action_passthru { + u16 vsi_index; +}; + +struct sxe2_flow_action_mark { + u32 mark_id; +}; + +#define SXE2_VSI_MAX (2048) +struct sxe2_flow_action_vsi { + u16 vsi_index; +}; + +struct sxe2_flow_action_vsi_list { + DECLARE_BITMAP(vsi_list_map, SXE2_VSI_MAX); + u16 vsi_cnt; +}; + +enum sxe2_fnav_stat_ctrl_type { + SXE2_FNAV_STAT_ENA_NONE = 0, + SXE2_FNAV_STAT_ENA_PKTS, + SXE2_FNAV_STAT_ENA_BYTES, + SXE2_FNAV_STAT_ENA_ALL, +}; + +struct sxe2_flow_action_count { + u32 user_id; + u32 driver_id; + u32 stat_index; + u32 stat_ctrl; +}; + +enum sxe2_flow_engine_type { + SXE2_FLOW_ENGINE_ACL, + SXE2_FLOW_ENGINE_SWITCH, + SXE2_FLOW_ENGINE_FNAV, + SXE2_FLOW_ENGINE_RSS, + SXE2_FLOW_ENGINE_MAX, +}; + +struct sxe2_flow_item { + struct sxe2_ether_hdr eth; + struct sxe2_vlan_hdr vlan; + struct sxe2_vlan_hdr qinq; + struct sxe2_ipv4_hdr ipv4; + struct sxe2_ipv6_hdr ipv6; + struct sxe2_udp_hdr udp; + struct sxe2_tcp_hdr tcp; + struct sxe2_sctp_hdr sctp; + struct sxe2_gtpu_hdr gtpu; + struct sxe2_vxlan_hdr vxlan; + struct sxe2_nvgre_hdr nvgre; + struct sxe2_geneve_hdr geneve; +}; + +enum sxe2_flow_sw_direct_type { + SXE2_FLOW_SW_DIRECT_TX, + SXE2_FLOW_SW_DIRECT_RX, + SXE2_FLOW_SW_DIRECT_MAX, +}; + +enum sxe2_flow_sw_pattern_type { + SXE2_FLOW_SW_PATTERN_ONLY, + SXE2_FLOW_SW_PATTERN_LAST, + SXE2_FLOW_SW_PATTERN_FIRST, + SXE2_FLOW_SW_PATTERN_MAX, +}; + +enum sxe2_flow_tunnel_type { + SXE2_FLOW_TUNNEL_TYPE_NONE, + SXE2_FLOW_TUNNEL_TYPE_PARENT, + SXE2_FLOW_TUNNEL_TYPE_VXLAN, + SXE2_FLOW_TUNNEL_TYPE_GTPU, + SXE2_FLOW_TUNNEL_TYPE_GENEVE, + SXE2_FLOW_TUNNEL_TYPE_GRE, + SXE2_FLOW_TUNNEL_TYPE_IPIP, +}; + +struct sxe2_flow_meta { + u8 switch_pattern_dup_allow; + u8 switch_src_direct; + u16 flow_src_vsi; + u16 flow_rule_vsi; + u32 flow_prio; + u16 flow_type; + u8 tunnel_type; + u8 rsv; +}; + +struct sxe2_flow_pattern { + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(map_spec, SXE2_FLOW_FLD_ID_MAX); + DECLARE_BITMAP(map_mask, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_flow_item item_spec; + struct sxe2_flow_item item_mask; + u64 rss_type_allow; +}; + +struct sxe2_flow_action { + DECLARE_BITMAP(act_types, SXE2_FLOW_ACTION_MAX); + struct sxe2_flow_action_rss rss; + struct sxe2_flow_action_queue queue; + struct sxe2_flow_action_queue_region q_region; + struct sxe2_flow_action_passthru passthru; + struct sxe2_flow_action_vsi vsi; + struct sxe2_flow_action_vsi_list vsi_list; + struct sxe2_flow_action_mark mark; + struct sxe2_flow_action_count count; +}; +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.c b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.c new file mode 100644 index 0000000000000000000000000000000000000000..e7f345838b22226c2aee62a4285aa74938b0deea --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cli_drv_priv.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2.h" +#include "sxe2_cli_drv_priv.h" +#include "sxe2_cli_drv_msg.h" + +STATIC s32 sxe2_cli_get_netdev_name_handler(struct sxe2_adapter *adapter, + struct sxe2_cmd_trans_info *param) +{ + struct drv_msg_info *rsp_hdr = SXE2_CLI_DRV_RSP_MSG_HDR(param); + struct sxe2_cli_drv_get_pname_rsp_msg *dev_name_rsp_msg = + (struct sxe2_cli_drv_get_pname_rsp_msg *)&rsp_hdr->body[0]; + u32 rsp_len = SXE2_DRV_MSG_INFO_SIZE + + sizeof(struct sxe2_cli_drv_get_pname_rsp_msg); + + if ((param->resp_len - SXE2_DRV_MSG_INFO_SIZE) < + sizeof(struct sxe2_cli_drv_get_pname_rsp_msg)) { + LOG_ERROR_BDF("params is invalid.\n"); + return -EINVAL; + } + + memset(dev_name_rsp_msg->netdev_name, 0, + sizeof(dev_name_rsp_msg->netdev_name)); + + rtnl_lock(); + strscpy(dev_name_rsp_msg->netdev_name, + adapter->vsi_ctxt.main_vsi->netdev->name, IFNAMSIZ); + rtnl_unlock(); + + rsp_hdr->ack_length = rsp_len; + return SXE2_CLI_DRV_SUCCESS; +} + +STATIC struct sxe2_cmd_handler_info sxe2_cli_drv_cmd_map[] = { + {SXE2_CLI_CMD_GET_NETDEV_NAME, sxe2_cli_get_netdev_name_handler}, +}; + +STATIC s32 sxe2_cmd_cli_drv_params_chk(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, + struct sxe2_cmd_trans_info *cmd_ctxt) +{ + s32 ret = SXE2_CLI_DRV_SUCCESS; + struct drv_msg_info *req_hdr; + struct sxe2_cmd_trans_info *trans_info = cmd_ctxt; + + if (!cmd_params->resp_data || !cmd_params->req_data) { + ret = -EINVAL; + goto end; + } + + if (cmd_params->req_len < SXE2_DRV_MSG_INFO_SIZE || + cmd_params->resp_len < SXE2_DRV_MSG_INFO_SIZE || + cmd_params->req_len > SXE2_DRV_MSG_MAX_SIZE || + cmd_params->resp_len > SXE2_DRV_MSG_MAX_SIZE) { + ret = -EINVAL; + goto end; + } + + trans_info->req_len = cmd_params->req_len; + trans_info->resp_len = cmd_params->resp_len; + + trans_info->req_buff = kzalloc(trans_info->req_len, GFP_KERNEL); + if (!trans_info->req_buff) { + LOG_ERROR_BDF("malloc failed: size %u.\n", trans_info->req_len); + ret = -ENOMEM; + trans_info->req_len = 0; + trans_info->resp_len = 0; + goto end; + } + if (copy_from_user(trans_info->req_buff, cmd_params->req_data, + cmd_params->req_len)) { + ret = -ENOMEM; + LOG_ERROR_BDF("req copy failed: size %u.\n", trans_info->req_len); + goto l_copy_failed; + } + + trans_info->resp_buff = kzalloc(trans_info->resp_len, GFP_KERNEL); + if (!trans_info->resp_buff) { + ret = -ENOMEM; + trans_info->resp_len = 0; + LOG_ERROR_BDF("malloc failed: size %u.\n", trans_info->resp_len); + goto l_copy_failed; + } + + req_hdr = SXE2_CLI_DRV_REQ_MSG_HDR(trans_info); + if (req_hdr->magic != SXE2_DRV_MSG_MAGIC_CODE) { + ret = -EINVAL; + LOG_ERROR_BDF("magic inval %x. opcode inval %x trace:%lld\n", + req_hdr->magic, req_hdr->opcode, req_hdr->trace_id); + goto l_magic_failed; + } + + req_hdr->ack_length = cmd_params->resp_len; + goto end; + +l_magic_failed: + kfree(trans_info->resp_buff); + trans_info->resp_buff = NULL; + trans_info->resp_len = 0; + +l_copy_failed: + kfree(trans_info->req_buff); + trans_info->req_buff = NULL; + trans_info->req_len = 0; + +end: + return ret; +} + +STATIC void sxe2_cmd_cli_fill_rsp_hdr(struct drv_msg_info *rsp_hdr, + struct drv_msg_info *req_hdr, s32 rc) +{ + rsp_hdr->magic = SXE2_DRV_MSG_MAGIC_CODE; + rsp_hdr->error = (u32)rc; + rsp_hdr->trace_id = req_hdr->trace_id; +} + +STATIC s32 sxe2_cmd_free_trans_info(struct sxe2_adapter *adapter, + struct sxe2_cmd_trans_info *trans_info, + struct sxe2_cmd_params *params) +{ + s32 ret = 0; + + kfree(trans_info->req_buff); + trans_info->req_buff = NULL; + trans_info->req_len = 0; + + if (trans_info->resp_buff) { + if (trans_info->resp_len) { + if (copy_to_user((void __user *)params->resp_data, + trans_info->resp_buff, + trans_info->resp_len)) { + LOG_ERROR_BDF("cmd trace_id=0x%llx copy to user\t" + "err\n", + params->trace_id); + ret = -ENOMEM; + } + } + kfree(trans_info->resp_buff); + trans_info->resp_buff = NULL; + trans_info->resp_len = 0; + } + + return ret; +} + +s32 sxe2_cmd_cli_drv_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + u32 i; + sxe2_cb_func cb = NULL; + s32 rc; + struct drv_msg_info *rsp_hdr; + struct drv_msg_info *req_hdr; + u32 table_cnt = (sizeof(sxe2_cli_drv_cmd_map) / + sizeof(struct sxe2_cmd_handler_info)); + struct sxe2_cmd_trans_info trans_info = {0}; + + rc = sxe2_cmd_cli_drv_params_chk(adapter, cmd_params, &trans_info); + if (rc) { + LOG_INFO_BDF("cli cmd params check failed.(%d)\n", rc); + goto end; + } + + req_hdr = SXE2_CLI_DRV_REQ_MSG_HDR((&trans_info)); + for (i = 0; i < table_cnt; i++) { + if (sxe2_cli_drv_cmd_map[i].opcode == req_hdr->opcode) + cb = sxe2_cli_drv_cmd_map[i].handler; + } + + if (cb) { + rsp_hdr = SXE2_CLI_DRV_RSP_MSG_HDR((&trans_info)); + rc = cb(adapter, &trans_info); + sxe2_cmd_cli_fill_rsp_hdr(rsp_hdr, req_hdr, rc); + + if (rc) + LOG_INFO_BDF("cli cmd failed!.(%d)\n", rc); + } else { + rc = -EINVAL; + LOG_INFO_BDF("cli cmd is not support.(%d)\n", rc); + } + +end: + if (sxe2_cmd_free_trans_info(adapter, &trans_info, cmd_params)) + rc = -ENOMEM; + + return rc; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..e39b90c8ea326362a4647d3bf6a3186813fcbaee --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cli_drv_priv.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cli_drv_priv.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_cmd.h" + +struct sxe2_adapter; +struct sxe2_hw; + +#define SXE2_DRV_MSG_INFO_SIZE (sizeof(struct drv_msg_info)) +#define SXE2_CLI_DRV_RSP_MSG_HDR(param) \ + ((struct drv_msg_info *)((param)->resp_buff)) + +#define SXE2_CLI_DRV_REQ_MSG_HDR(param) \ + ((struct drv_msg_info *)((param)->req_buff)) + +typedef s32 (*sxe2_cb_func)(struct sxe2_adapter *adapter, + struct sxe2_cmd_trans_info *param); + +struct sxe2_cmd_handler_info { + u32 opcode; + sxe2_cb_func handler; +}; + +s32 sxe2_cmd_cli_drv_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params); diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.c b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.c new file mode 100644 index 0000000000000000000000000000000000000000..7eddca771871d54c96d3b1466ba9e7b74175ebd6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.c @@ -0,0 +1,2631 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cmd_channel.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef SXE2_DPDK_DRIVER +#include +#include +#include "sxe2.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_common.h" +#include "sxe2_internal_ver.h" +#include "sxe2_misc.h" +#include "sxe2_log.h" +#include "sxe2_monitor.h" +#include "sxe2_event.h" +#include "sxe2_mbx_msg.h" + +#define NS_TO_MS_UNIT (1000000) + +#define SXE2_WAIT_DONE_MIN (10) +#define SXE2_WAIT_DONE_MAX (20) + +#define SXE2_CMD_REQ_LEN(_cmd_hdr) ((_cmd_hdr)->tran_in_len) +#define SXE2_CMD_RESP_LEN(_cmd_hdr) ((_cmd_hdr)->tran_out_len) + +static DEFINE_PER_CPU(union sxe2_trace_info, sxe2_trace_id); + +#define TRACE_ID_CHIP_OUT_COUNT_MASK 0x0003FFFFFFFFFFFFLLU +#define TRACE_ID_CHIP_OUT_CPUID_MASK 0x3FFLLU +#define TRACE_ID_CHIP_OUT_TYPE_MASK 0xFLLU + +#ifndef SXE2_TEST +#ifdef SXE2_CFG_DEBUG +#define SXE2_CMD_TQ_WB_DFLT_TIMEOUT \ + (3) +#else +#define SXE2_CMD_TQ_WB_DFLT_TIMEOUT (1) +#endif +#define SXE2_CMD_WB_WAIT_INTERVAL 1 +#else +#define SXE2_CMD_TQ_WB_DFLT_TIMEOUT (0) +#define SXE2_CMD_WB_WAIT_INTERVAL 0 +#endif +#define SXE2_CMD_CHNL_RELEASE_CHECK_INTERVAL 1 +#define SXE2_CMD_RETRY_INTERVAL 1 +#define SXE2_CMD_RETRY_COUNT 3 +#define SXE2_CMD_POLLING_INTERVAL 10 +#define SXE2_CMD_CHNL_RECV_MULIT_PACK_INTERVAL 3 + +#define SXE2_CMD_WORK_NAME "SXE2-CMD" + +#define SXE2_POLLING_CMDQ_IDLE_INTERVAL (10) +#define SXE2_POLLING_CMDQ_IDLE_TIMEOUT (1000) + +#define SXE2_WAIT_INTERRUPTIBLE_INTERVAL (1) +#define SXE2_CMD_RECV_MULIT_DFLT_TIMEOUT 1 + +#ifndef secs_to_jiffies +#define secs_to_jiffies(_secs) (msecs_to_jiffies((_secs) * 1000)) +#endif + +#define SXE2_HDR_SIZE(type) \ + (type == SXE2_CMD_TYPE_CLI ? SXE2_CMD_HDR_SIZE \ + : (SXE2_CMD_HDR_SIZE + SXE2_DRV_MSG_HDR_SIZE)) + +#define SXE2_MSG_BODY(msg_hdr) \ + ({ \ + const typeof(*(msg_hdr)) *__mh = (msg_hdr); \ + (void *)__mh + le32_to_cpu(__mh->data_offset); \ + }) + +#define SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, chnl_type) \ + (&(adapter)->cmd_channel_ctxt.channel[(chnl_type)]) +#define SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type) \ + (&(adapter)->cmd_channel_ctxt.channel[(chnl_type)] \ + .queue[(q_type)]) + +#define SXE2_CMD_QUEUE_DESC(queue, i) \ + (&(((struct sxe2_cmd_desc *)((queue)->desc.va))[i])) +#define SXE2_CMD_QUEUE_BUF(queue, i) ((queue)->buf[i].va) + +#define SXE2_CMD_QUEUE_DESC_UNUSED(queue) \ +({ \ + const typeof(*(queue)) *__q = (queue); \ + u16 ntc = __q->ntc; \ + u16 ntu = __q->ntu; \ + u16 depth = __q->depth; \ + \ + (u16)(((ntc > ntu) ? 0 : depth) + ntc - ntu - 1); \ +}) + +#define SXE2_QUEUE_IDX_INC(i, depth) \ + do { \ + typeof(i) __i = (i); \ + __i++; \ + if (__i == (depth)) \ + __i = 0; \ + (i) = __i; \ + } while (0) + +#define SXE2_WB_DONE(desc) (le16_to_cpu((desc)->flags) & SXE2_CMD_DONE) +#define SXE2_CMD_RQ_WEIGHT 256 + +#define SXE2_MSG_HANDLING_MAX_CNT (1024) + +struct workqueue_struct *sxe2_rq_recv_wq; +struct workqueue_struct *sxe2_msg_handle_wq; + +STATIC atomic64_t g_cmd_session_id; + +enum sxe2_cmd_work_state { + SXE2_RQ_RECV_WORK_SCHED, + SXE2_RQ_RECV_WORK_STOPPED, + SXE2_MSG_HANDLE_WORK_SCHED, + SXE2_MSG_HANDLE_WORK_STOPPED, +}; + +enum sxe2_cmd_exec_mode { + SXE2_CMD_EXEC_NOTIFY, + SXE2_CMD_EXEC_POLLING, + SXE2_CMD_EXEC_NO_RESP, +}; + +static void sxe2_cmd_work_init(struct sxe2_adapter *adapter); + +STATIC void sxe2_cmd_work_exit(struct sxe2_adapter *adapter); + +STATIC void sxe2_msg_list_add(struct sxe2_adapter *adapter, + struct sxe2_recv_msg *msg); + +static void sxe2_msg_list_del(struct sxe2_adapter *adapter, + struct sxe2_recv_msg **msg); + +static void sxe2_msg_handle_work_schedule(struct sxe2_adapter *adapter); + +static s32 sxe2_cmd_drv_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, + enum sxe2_cmd_type type); + +#ifdef SXE2_CFG_DEBUG +STATIC void sxe2_dump_fwc(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + + LOG_DEBUG_BDF("ATQT: 0x%x, ATQH: 0x%x, ARQT: 0x%x, ARQH: 0x%x.\n", + sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQT), + sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQH), + sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ARQT), + sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ARQH)); +} +#endif + +static inline bool is_interrupt_signal(struct task_struct *p) +{ + if (sigismember(&p->pending.signal, SIGINT) || + sigismember(&p->pending.signal, SIGKILL) || + sigismember(&p->pending.signal, SIGQUIT)) { + return true; + } + return false; +} + +STATIC bool signal_pending_is_interrupt(void) +{ + if (!signal_pending(current)) + return false; + + return is_interrupt_signal(current); +} + +STATIC void sxe2_trace_id_init(void) +{ + u32 cpu; + union sxe2_trace_info *id; + + for_each_possible_cpu(cpu) { + id = (union sxe2_trace_info *)&per_cpu(sxe2_trace_id, cpu); + id->sxe2_trace_id_param.cpu_id = + (cpu & TRACE_ID_CHIP_OUT_CPUID_MASK); + id->sxe2_trace_id_param.count = 0; + id->sxe2_trace_id_param.type = (SXE2_CMD_TYPE_DRV_TO_FW & + TRACE_ID_CHIP_OUT_TYPE_MASK); + } +} + +void sxe2_trace_id_alloc(u64 *trace_id) +{ + union sxe2_trace_info *trace; + u64 trace_id_count; + + preempt_disable(); + trace = this_cpu_ptr(&sxe2_trace_id); + + trace_id_count = trace->sxe2_trace_id_param.count; + ++trace_id_count; + trace->sxe2_trace_id_param.count = + (trace_id_count & TRACE_ID_CHIP_OUT_COUNT_MASK); + + *trace_id = trace->id; + preempt_enable(); +} + +STATIC void sxe2_cmd_queue_ops_init(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + + if (chnl_type == SXE2_CHNL_FW && q_type == SXE2_CMD_TQ) { + queue->ops.enable = sxe2_hw_fw_tq_enable; + queue->ops.disable = sxe2_hw_fw_tq_disable; + queue->ops.is_idle = sxe2_hw_fw_tq_is_idle; + queue->ops.write_tail = sxe2_hw_fw_tq_write_tail; + queue->ops.read_head = sxe2_hw_fw_tq_read_head; + queue->ops.get_error = sxe2_hw_fw_tq_get_error; + } else if (chnl_type == SXE2_CHNL_FW && q_type == SXE2_CMD_RQ) { + queue->ops.enable = sxe2_hw_fw_rq_enable; + queue->ops.disable = sxe2_hw_fw_rq_disable; + queue->ops.is_idle = sxe2_hw_fw_rq_is_idle; + queue->ops.write_tail = sxe2_hw_fw_rq_write_tail; + queue->ops.read_head = sxe2_hw_fw_rq_read_head; + queue->ops.get_error = sxe2_hw_fw_rq_get_error; + } else if (chnl_type == SXE2_CHNL_MBX && q_type == SXE2_CMD_TQ) { + queue->ops.enable = sxe2_hw_mbx_tq_enable; + queue->ops.disable = sxe2_hw_mbx_tq_disable; + queue->ops.write_tail = sxe2_hw_mbx_tq_write_tail; + queue->ops.read_head = sxe2_hw_mbx_tq_read_head; + queue->ops.get_error = sxe2_hw_mbx_tq_get_error; + queue->ops.is_idle = NULL; + } else if (chnl_type == SXE2_CHNL_MBX && q_type == SXE2_CMD_RQ) { + queue->ops.enable = sxe2_hw_mbx_rq_enable; + queue->ops.disable = sxe2_hw_mbx_rq_disable; + queue->ops.write_tail = sxe2_hw_mbx_rq_write_tail; + queue->ops.read_head = sxe2_hw_mbx_rq_read_head; + queue->ops.get_error = sxe2_hw_mbx_rq_get_error; + queue->ops.is_idle = NULL; + } else { + LOG_ERROR_BDF("unknown chnl_type %d, q_type %d.\n", chnl_type, + q_type); + } +} + +STATIC void sxe2_cmd_channel_sw_init(struct sxe2_cmd_channel *channel) +{ + mutex_init(&channel->queue[SXE2_CMD_TQ].lock); + mutex_init(&channel->queue[SXE2_CMD_RQ].lock); + spin_lock_init(&channel->wq.lock); + init_waitqueue_head(&channel->wq.wq); + hash_init(channel->wq.table); +} + +STATIC void sxe2_channel_sw_deinit(struct sxe2_cmd_channel *channel) +{ + mutex_destroy(&channel->queue[SXE2_CMD_TQ].lock); + mutex_destroy(&channel->queue[SXE2_CMD_RQ].lock); +} + +STATIC void sxe2_cmd_session_id_init(void) +{ + s64 session_id; + + get_random_bytes(&session_id, sizeof(session_id)); + + atomic64_set(&g_cmd_session_id, (u64)session_id); +} + +STATIC void sxe2_cmd_channel_init_once(struct sxe2_adapter *adapter) +{ + sxe2_cmd_session_id_init(); + sxe2_trace_id_init(); + sxe2_cmd_channel_sw_init(SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, SXE2_CHNL_FW)); + sxe2_cmd_channel_sw_init(SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, + SXE2_CHNL_MBX)); + INIT_LIST_HEAD(&adapter->cmd_channel_ctxt.head); + mutex_init(&adapter->cmd_channel_ctxt.lock); + spin_lock_init(&adapter->cmd_channel_ctxt.recv_work_lock); + spin_lock_init(&adapter->cmd_channel_ctxt.handle_work_lock); + mutex_init(&adapter->cmd_channel_ctxt.event_lock); + mutex_init(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_FW].lock); + mutex_init(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_MBX].lock); +} + +STATIC void sxe2_cmd_channel_deinit_once(struct sxe2_adapter *adapter) +{ + mutex_destroy(&adapter->cmd_channel_ctxt.event_lock); + mutex_destroy(&adapter->cmd_channel_ctxt.lock); + mutex_destroy(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_FW].lock); + mutex_destroy(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_MBX].lock); + sxe2_channel_sw_deinit(SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, SXE2_CHNL_FW)); + sxe2_channel_sw_deinit(SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, SXE2_CHNL_MBX)); +} + +static inline s32 sxe2_dma_alloc_coherent(struct sxe2_adapter *adapter, + struct sxe2_dma_mem *dma, size_t size) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + dma->va = dmam_alloc_coherent(dev, size, &dma->pa, GFP_KERNEL | __GFP_ZERO); + if (!dma->va) { + LOG_DEV_ERR("alloc dma mem failed, size %zu.\n", size); + ret = -ENOMEM; + goto l_end; + } + + dma->size = size; +l_end: + return ret; +} + +static inline void sxe2_dma_free_coherent(struct sxe2_adapter *adapter, + struct sxe2_dma_mem *dma) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (!dma->va) + return; + + dmam_free_coherent(dev, dma->size, dma->va, dma->pa); + dma->va = NULL; + dma->pa = 0; + dma->size = 0; +} + +STATIC s32 sxe2_cmd_queue_desc_alloc(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue) +{ + struct sxe2_dma_mem *dma = &queue->desc; + size_t size = queue->depth * sizeof(struct sxe2_cmd_desc); + + return sxe2_dma_alloc_coherent(adapter, dma, size); +} + +STATIC void sxe2_cmd_queue_desc_free(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue) +{ + sxe2_dma_free_coherent(adapter, &queue->desc); +} + +STATIC void sxe2_cmd_queue_bufs_free(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_dma_mem *dma; + u16 i; + struct sxe2_recv_cache_buff *cache_buff = &queue->cache_buff; + + if (!queue->buf) + return; + + if (cache_buff->buf) { + kfree(cache_buff->buf); + cache_buff->buf = NULL; + } + + for (i = 0; i < queue->depth; i++) { + dma = &queue->buf[i]; + sxe2_dma_free_coherent(adapter, dma); + } + + devm_kfree(dev, queue->buf); + queue->buf = NULL; +} + +STATIC s32 sxe2_cmd_queue_buf_alloc(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue, u16 idx) +{ + struct sxe2_dma_mem *dma = &queue->buf[idx]; + size_t size = queue->buf_size; + + return sxe2_dma_alloc_coherent(adapter, dma, size); +} + +STATIC s32 sxe2_cmd_queue_bufs_alloc(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 i; + + queue->buf = devm_kcalloc(dev, queue->depth, sizeof(*queue->buf), + GFP_KERNEL); + if (!queue->buf) { + LOG_DEV_ERR("alloc buf_dma_list failed, cnt %d, size %zu.\n", + queue->depth, sizeof(*queue->buf)); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < queue->depth; i++) { + ret = sxe2_cmd_queue_buf_alloc(adapter, queue, i); + if (ret) + goto l_alloc_failed; + } + + queue->cache_buff.buf_offset = 0; + queue->cache_buff.finish = true; + queue->cache_buff.buf = kzalloc(queue->buf_size, GFP_KERNEL); + if (!queue->cache_buff.buf) { + LOG_DEV_ERR("alloc cache_buff failed, buf_size %d.\n", + queue->buf_size); + ret = -ENOMEM; + goto l_alloc_failed; + } + + return 0; + +l_alloc_failed: + sxe2_cmd_queue_bufs_free(adapter, queue); +l_end: + return ret; +} + +STATIC s32 sxe2_cmd_queue_enable(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + s32 ret; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + + ret = sxe2_err_code_trans_hw(queue->ops.enable(hw, queue->depth, + queue->desc.pa)); + if (ret) + LOG_DEV_ERR("enable channel %d, queue %d failed.\n", chnl_type, + q_type); + + return ret; +} + +STATIC void sxe2_cmd_queue_disable(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + u32 total_delay = 0; + + queue->ops.disable(hw); + + if (!queue->ops.is_idle) + return; + + do { + if (queue->ops.is_idle(hw)) + break; + msleep(SXE2_POLLING_CMDQ_IDLE_INTERVAL); + total_delay++; + if (total_delay == SXE2_POLLING_CMDQ_IDLE_TIMEOUT) { + LOG_DEV_ERR("Cmd queue chnl_type %d, q_type %d disable\t" + "timeout\n", + chnl_type, q_type); + total_delay = 0; + } + } while (1); +} + +STATIC void sxe2_cmd_queue_param_init(enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type, + struct sxe2_cmd_queue *queue) +{ + if (chnl_type == SXE2_CHNL_FW && q_type == SXE2_CMD_TQ) { + queue->depth = SXE2_DEPTH_FW_TQ; + queue->buf_size = SXE2_BUF_SIZE_FW_TQ; + } else if (chnl_type == SXE2_CHNL_FW && q_type == SXE2_CMD_RQ) { + queue->depth = SXE2_DEPTH_FW_RQ; + queue->buf_size = SXE2_BUF_SIZE_FW_RQ; + } else if (chnl_type == SXE2_CHNL_MBX && q_type == SXE2_CMD_TQ) { + queue->depth = SXE2_DEPTH_MBX_TQ; + queue->buf_size = SXE2_BUF_SIZE_MBX_TQ; + } else if (chnl_type == SXE2_CHNL_MBX && q_type == SXE2_CMD_RQ) { + queue->depth = SXE2_DEPTH_MBX_RQ; + queue->buf_size = SXE2_BUF_SIZE_MBX_RQ; + } else { + LOG_ERROR("unknown chnl_type %d, q_type %d.\n", chnl_type, q_type); + } +} + +STATIC void sxe2_cmd_rq_desc_fill(enum sxe2_cmd_channel_type chnl_type, + struct sxe2_cmd_queue *queue, u16 i) +{ + struct sxe2_cmd_desc *desc; + struct sxe2_dma_mem *buf_dma; + + desc = SXE2_CMD_QUEUE_DESC(queue, i); + buf_dma = &queue->buf[i]; + + memset(desc, 0, sizeof(*desc)); + memset(buf_dma->va, 0, buf_dma->size); + + desc->flags |= cpu_to_le16(SXE2_CMD_BUF); + if (buf_dma->size > SXE2_CMD_LARGE_BUF_SIZE) + desc->flags |= cpu_to_le16(SXE2_CMD_LARGE_BUF); + + if (chnl_type == SXE2_CHNL_MBX) + desc->opcode = cpu_to_le16(SXE2_CMD_MBX_TO_PF); + + desc->data_len = cpu_to_le16((u16)buf_dma->size); + desc->buf_addr_h = cpu_to_le32(upper_32_bits(buf_dma->pa)); + desc->buf_addr_l = cpu_to_le32(lower_32_bits(buf_dma->pa)); +} + +STATIC void sxe2_cmd_rq_descs_fill(enum sxe2_cmd_channel_type chnl_type, + struct sxe2_cmd_queue *queue) +{ + u16 i; + + for (i = 0; i < queue->depth; i++) + sxe2_cmd_rq_desc_fill(chnl_type, queue, i); +} + +STATIC s32 sxe2_cmd_queue_init(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + s32 ret; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + + mutex_lock(&queue->lock); + + if (queue->is_enable) { + ret = 0; + goto l_end; + } + + queue->ntu = 0; + queue->ntc = 0; + + sxe2_cmd_queue_param_init(chnl_type, q_type, queue); + + ret = sxe2_cmd_queue_desc_alloc(adapter, queue); + if (ret) + goto l_end; + + ret = sxe2_cmd_queue_bufs_alloc(adapter, queue); + if (ret) + goto l_buf_alloc_failed; + + sxe2_cmd_queue_ops_init(adapter, chnl_type, q_type); + + ret = sxe2_cmd_queue_enable(adapter, chnl_type, q_type); + if (ret) + goto l_q_enable_failed; + + if (q_type == SXE2_CMD_RQ) { + sxe2_cmd_rq_descs_fill(chnl_type, queue); + queue->ops.write_tail(hw, queue->depth - 1); + } + + queue->is_enable = true; + + goto l_end; + +l_q_enable_failed: + sxe2_cmd_queue_bufs_free(adapter, queue); +l_buf_alloc_failed: + sxe2_cmd_queue_desc_free(adapter, queue); +l_end: + mutex_unlock(&queue->lock); + return ret; +} + +STATIC void sxe2_cmd_queue_deinit(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + + mutex_lock(&queue->lock); + + if (!queue->is_enable) { + mutex_unlock(&queue->lock); + return; + } + + queue->is_enable = false; + + sxe2_cmd_queue_disable(adapter, chnl_type, q_type); + sxe2_cmd_queue_bufs_free(adapter, queue); + sxe2_cmd_queue_desc_free(adapter, queue); + mutex_unlock(&queue->lock); +} + +STATIC s32 sxe2_fwc_cmd_channel_handshake(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_channel_handshake_req req = {}; + struct sxe2_channel_handshake_resp resp = {}; + + req.drv_ver = cpu_to_le32(SXE2_FW_COMP_VER); + req.drv_mode = SXE2_NIC_MODE_NORMAL; + req.timestamp = cpu_to_le64(ktime_get_real_ns() / NS_TO_MS_UNIT); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_Q_HANDSHAKE, &req, sizeof(req), + &resp, sizeof(resp)); + cmd.is_interruptible = false; + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("cmd channel %d handshake failed: ret %d.\n", chnl_type, + ret); + ret = -EIO; + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_cmd_channel_close(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_Q_DISABLE, NULL, 0, NULL, 0); + cmd.is_interruptible = false; + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("cmd channel %d disable failed: ret %d.\n", chnl_type, + ret); + return ret; +} + +STATIC s32 sxe2_mbxc_cmd_channel_close(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_fill(&cmd, SXE2_CMD_Q_DISABLE, NULL, 0, NULL, 0, + SXE2_DRV_CMD_DFLT_TIMEOUT, false, true); + ret = sxe2_cmd_drv_exec(adapter, &cmd, SXE2_CMD_TYPE_DRV_TO_HW); + if (ret) + LOG_ERROR_BDF("cmd channel %d disable failed: ret %d.\n", chnl_type, + ret); + return ret; +} + +static s32 sxe2_fw_comp_version_check(struct sxe2_adapter *adapter) +{ + u32 fw_ver = 0; + s32 ret = 0; + + fw_ver = sxe2_fw_comp_ver_get(&adapter->hw); + if (fw_ver == SXE2_REG_INVALID_VALUE) { + ret = -EIO; + LOG_DEV_ERR("get fw comp ver fail\n"); + goto l_out; + } + + adapter->fw_ver.major = SXE2_MK_VER_MAJOR(fw_ver); + adapter->fw_ver.minor = SXE2_MK_VER_MINOR(fw_ver); + + if (adapter->fw_ver.major != SXE2_FW_COMP_MAJOR_VER) { + ret = -EINVAL; + LOG_DEV_ERR("unsupport fw version expected %d.%d received %d.%d\n", + SXE2_FW_COMP_MAJOR_VER, SXE2_FW_COMP_MINOR_VER, + adapter->fw_ver.major, adapter->fw_ver.minor); + } +l_out: + return ret; +} + +STATIC s32 sxe2_cmd_channel_init(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + s32 ret = 0; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, chnl_type); + + channel->chnl_type = chnl_type; + + mutex_lock(&channel->lock); + if (channel->is_enable) + goto l_end; + + ret = sxe2_cmd_queue_init(adapter, chnl_type, SXE2_CMD_TQ); + if (ret) + goto l_end; + + ret = sxe2_cmd_queue_init(adapter, chnl_type, SXE2_CMD_RQ); + if (ret) + goto l_rq_init_failed; + + if (chnl_type == SXE2_CHNL_FW) { + ret = sxe2_fw_comp_version_check(adapter); + if (ret) + goto l_post_init_failed; + + ret = sxe2_fwc_cmd_channel_handshake(adapter, SXE2_CHNL_FW); + if (ret) + goto l_post_init_failed; + } + + channel->is_enable = true; + + mutex_unlock(&channel->lock); + return 0; + +l_post_init_failed: + sxe2_cmd_queue_deinit(adapter, chnl_type, SXE2_CMD_RQ); +l_rq_init_failed: + sxe2_cmd_queue_deinit(adapter, chnl_type, SXE2_CMD_TQ); +l_end: + mutex_unlock(&channel->lock); + return ret; +} + +STATIC void sxe2_msg_list_clean(struct sxe2_adapter *adapter) +{ + struct sxe2_cmd_channel_context *ctxt = &adapter->cmd_channel_ctxt; + struct sxe2_recv_msg *msg; + + mutex_lock(&ctxt->lock); + while (!list_empty(&ctxt->head)) { + msg = list_first_entry(&ctxt->head, struct sxe2_recv_msg, node); + list_del(&msg->node); + kfree(msg); + } + mutex_unlock(&ctxt->lock); +} + +STATIC void sxe2_cmd_channel_deinit(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, chnl_type); + s32 ret; + + mutex_lock(&channel->lock); + if (!channel->is_enable) { + mutex_unlock(&channel->lock); + return; + } + + channel->is_enable = false; + if (chnl_type == SXE2_CHNL_FW) { + ret = sxe2_fwc_cmd_channel_close(adapter, chnl_type); + } else { + ret = sxe2_mbxc_cmd_channel_close(adapter, chnl_type); + if (ret && (ret != -EOWNERDEAD)) + sxe2_trigger_and_wait_resetting(adapter); + } + + sxe2_cmd_queue_deinit(adapter, chnl_type, SXE2_CMD_RQ); + sxe2_cmd_queue_deinit(adapter, chnl_type, SXE2_CMD_TQ); + + sxe2_wait_task_cancel(channel); + mutex_unlock(&channel->lock); +} + +s32 sxe2_cmd_channels_enable(struct sxe2_adapter *adapter) +{ + s32 ret; + + sxe2_cmd_work_init(adapter); + + ret = sxe2_cmd_channel_init(adapter, SXE2_CHNL_FW); + if (ret) { + LOG_DEV_ERR("sxe2_cmd_channel_init fw failed, ret=%d\n", ret); + goto l_fw_chnl_init_failed; + } + ret = sxe2_cmd_channel_init(adapter, SXE2_CHNL_MBX); + if (ret) { + LOG_DEV_ERR("sxe2_cmd_channel_init mbx failed, ret=%d\n", ret); + goto l_mbx_chnl_init_failed; + } + return 0; + +l_mbx_chnl_init_failed: + sxe2_cmd_channel_deinit(adapter, SXE2_CHNL_FW); + +l_fw_chnl_init_failed: + sxe2_cmd_work_exit(adapter); + return ret; +} + +s32 sxe2_cmd_channels_init(struct sxe2_adapter *adapter) +{ + s32 ret; + + sxe2_cmd_channel_init_once(adapter); + + ret = sxe2_cmd_channels_enable(adapter); + if (ret) + goto l_channels_enable_failed; + return 0; +l_channels_enable_failed: + sxe2_cmd_channel_deinit_once(adapter); + + return ret; +} + +void sxe2_cmd_channels_disable(struct sxe2_adapter *adapter) +{ + sxe2_cmd_channel_deinit(adapter, SXE2_CHNL_MBX); + sxe2_cmd_channel_deinit(adapter, SXE2_CHNL_FW); + sxe2_cmd_work_exit(adapter); + sxe2_msg_list_clean(adapter); +} + +s32 sxe2_mbx_channel_enable(struct sxe2_adapter *adapter) +{ + return sxe2_cmd_channel_init(adapter, SXE2_CHNL_MBX); +} + +STATIC void sxe2_mbx_msg_list_clean(struct sxe2_adapter *adapter) +{ + struct sxe2_cmd_channel_context *ctxt = &adapter->cmd_channel_ctxt; + struct sxe2_recv_msg *msg, *msg_tmp; + struct sxe2_cmd_hdr *cmd_hdr; + + mutex_lock(&ctxt->lock); + list_for_each_entry_safe(msg, msg_tmp, &ctxt->head, node) { + cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + if (cmd_hdr->cmd_type == SXE2_CMD_TYPE_VF_TO_PF) { + list_del(&msg->node); + kfree(msg); + } + } + mutex_unlock(&ctxt->lock); +} + +void sxe2_mbx_channel_disable(struct sxe2_adapter *adapter) +{ + sxe2_cmd_channel_deinit(adapter, SXE2_CHNL_MBX); + + sxe2_mbx_msg_list_clean(adapter); +} + +void sxe2_cmd_channels_deinit(struct sxe2_adapter *adapter) +{ + sxe2_cmd_channels_disable(adapter); + sxe2_cmd_channel_deinit_once(adapter); +} + +static inline void sxe2_cmd_session_id_alloc(u64 *session_id) +{ + *session_id = (u64)atomic64_add_return(1, &g_cmd_session_id); +} + +static s32 sxe2_queue_head_read(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue, u16 *head) +{ + struct sxe2_hw *hw = &adapter->hw; + u32 val = queue->ops.read_head(hw); + + if (val > queue->depth) { + LOG_DEBUG_BDF("read_head invalid value %d.\n", val); + return -EIO; + } + + *head = (u16)val; + return 0; +} + +STATIC s32 sxe2_cmd_recv_packet_merge(struct sxe2_adapter *adapter, + struct sxe2_cmd_channel *channel, + struct sxe2_recv_msg *msg, bool *finish) +{ + s32 ret = 0; + struct sxe2_cmd_queue *rq = &channel->queue[SXE2_CMD_RQ]; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + struct sxe2_recv_cache_buff *cache_buff = &rq->cache_buff; + struct sxe2_cmd_hdr *last_hdr = (struct sxe2_cmd_hdr *)cache_buff->buf; + u8 curr_multi_packet = cmd_hdr->multi_packet; + u8 last_multi_packet = last_hdr->multi_packet; + + LOG_DEBUG_BDF("cache finish: %d offset: %d hdr.\n" + " cache trace: 0x%llx session: 0x%llx out_len:%d in_len:%d\t" + "mpacket:0x%x type:%d.\n" + " curr trace: 0x%llx session: 0x%llx out_len:%d in_len:%d\t" + "mpacket:0x%x type:%d.\n", + cache_buff->finish, cache_buff->buf_offset, last_hdr->trace_id, + last_hdr->session_id, last_hdr->tran_out_len, + last_hdr->cur_in_len, last_multi_packet, last_hdr->cmd_type, + cmd_hdr->trace_id, cmd_hdr->session_id, cmd_hdr->tran_out_len, + cmd_hdr->cur_in_len, curr_multi_packet, cmd_hdr->cmd_type); + + if (curr_multi_packet & SXE2_CMD_HDR_MULTI_START) { + if ((last_multi_packet != 0) && + !(last_multi_packet & SXE2_CMD_HDR_MULTI_END)) { + LOG_ERROR_BDF("data lost trace:0x%llx session:0x%llx data len: %d\t" + "current session_id:0x%llx.\n", + last_hdr->trace_id, last_hdr->session_id, + cache_buff->buf_offset, cmd_hdr->session_id); + } + + memset(cache_buff->buf, 0, rq->buf_size); + cache_buff->buf_offset = 0; + } else { + if (last_hdr->session_id != cmd_hdr->session_id) { + LOG_ERROR_BDF("last session_id: 0x%llx current session_id:0x%llx.\n", + last_hdr->session_id, cmd_hdr->session_id); + ret = -EBADSLT; + goto l_mulit_packet; + } + if (((last_multi_packet & SXE2_CMD_HDR_MULTI_CMD_ID_MASK) + 1) != + (curr_multi_packet & SXE2_CMD_HDR_MULTI_CMD_ID_MASK)) { + LOG_ERROR_BDF("last No: %d current No:%d.\n", + (last_multi_packet & + SXE2_CMD_HDR_MULTI_CMD_ID_MASK), + curr_multi_packet & + SXE2_CMD_HDR_MULTI_CMD_ID_MASK); + ret = -EBADRQC; + goto l_mulit_packet; + } + } + memcpy(last_hdr, cmd_hdr, cmd_hdr->hdr_len); + + if (!((curr_multi_packet & SXE2_CMD_HDR_MULTI_START) && + (curr_multi_packet & SXE2_CMD_HDR_MULTI_END))) { + last_hdr->multi_packet = curr_multi_packet; + if ((cache_buff->buf_offset + cmd_hdr->cur_in_len) > rq->buf_size) { + LOG_ERROR_BDF("Buffer overflow: current_len(%d) +\t" + "recv_len(%d) > buffer_size(%d).\n", + cmd_hdr->cur_in_len, cache_buff->buf_offset, + rq->buf_size); + ret = -ENOMSG; + goto l_mulit_packet; + } + memcpy(((u8 *)last_hdr + last_hdr->hdr_len) + cache_buff->buf_offset, + ((u8 *)cmd_hdr + cmd_hdr->hdr_len), cmd_hdr->cur_in_len); + cache_buff->buf_offset += cmd_hdr->cur_in_len; + if (curr_multi_packet & SXE2_CMD_HDR_MULTI_END) { + memcpy(&msg->buf[cmd_hdr->hdr_len], + ((u8 *)last_hdr + last_hdr->hdr_len), + cache_buff->buf_offset); + msg->buf_len = cache_buff->buf_offset + cmd_hdr->hdr_len; + cache_buff->finish = true; + } else { + cache_buff->finish = false; + } + } else { + cache_buff->finish = true; + } + +l_end: + *finish = cache_buff->finish; + return ret; +l_mulit_packet: + last_hdr->session_id = 0; + cache_buff->finish = true; + goto l_end; +} + +STATIC u8 sxe2_xor8_checksum_get(u8 *buf, u32 len) +{ + u32 i = 0; + u8 sum = 0; + + for (i = 0; i < len; i++) + sum ^= buf[i]; + return sum; +} + +STATIC s32 sxe2_cmd_recv_single(struct sxe2_adapter *adapter, + struct sxe2_cmd_channel *channel, + struct sxe2_recv_msg *msg) +{ + s32 ret = 0; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *rq = &channel->queue[SXE2_CMD_RQ]; + struct sxe2_cmd_desc *desc; + u16 data_len; + void *buf; + u8 checksum_recv = 0; + u8 checksum_calc = 0; + + desc = SXE2_CMD_QUEUE_DESC(rq, rq->ntc); + buf = SXE2_CMD_QUEUE_BUF(rq, rq->ntc); + + if (!(desc->flags & SXE2_CMD_DONE)) { + ret = -ENODATA; + goto l_end; + } + + LOG_DEBUG_BDF("rq get the #%dth desc\n", rq->ntc); + DATA_DUMP(desc, sizeof(*desc), "rq cmd desc"); + + if (le16_to_cpu(desc->ret)) { + LOG_ERROR_BDF("rq recv msg failed, ret: %d.\n", + le16_to_cpu(desc->ret)); + ret = -EIO; + goto l_ntc_inc; + } + + data_len = le16_to_cpu(desc->data_len); + if (data_len > rq->buf_size) { + ret = -EINVAL; + LOG_ERROR_BDF("rq recv msg failed, data_len: %d invalid.\n", + data_len); + goto l_ntc_inc; + } + + if (channel->chnl_type == SXE2_CHNL_FW) { + checksum_calc = sxe2_xor8_checksum_get(buf, data_len); + checksum_recv = desc->checksum; + if (checksum_calc != checksum_recv) { + ret = -EINVAL; + LOG_ERROR_BDF("rq recv msg failed, checksum resv:%d not eq calc:%d.\n", + checksum_recv, checksum_calc); + goto l_ntc_inc; + } + } + + DATA_DUMP(buf, data_len, "rq cmd buff"); + memcpy(&msg->desc, desc, sizeof(msg->desc)); + if (data_len) { + memcpy(msg->buf, buf, data_len); + msg->buf_len = data_len; + } + + sxe2_cmd_rq_desc_fill(channel->chnl_type, rq, rq->ntc); + rq->ops.write_tail(hw, rq->ntc); + +l_ntc_inc: + SXE2_QUEUE_IDX_INC(rq->ntc, rq->depth); + +l_end: + return ret; +} + +STATIC s32 sxe2_cmd_recv(struct sxe2_adapter *adapter, + struct sxe2_cmd_channel *channel, struct sxe2_recv_msg *msg) +{ + s32 ret = 0; + struct sxe2_cmd_queue *rq = &channel->queue[SXE2_CMD_RQ]; + unsigned long expired_time = + jiffies + secs_to_jiffies(SXE2_CMD_RECV_MULIT_DFLT_TIMEOUT); + bool finish = true; + u16 index = 0; + + mutex_lock(&rq->lock); + + if (!rq->is_enable) { + ret = -ENODATA; + goto l_end; + } + + do { + ret = sxe2_cmd_recv_single(adapter, channel, msg); + if (ret) { + if (index == 0 || (-ENODATA != ret)) { + goto l_end; + } else { + if (!time_before(jiffies, expired_time)) { + LOG_ERROR_BDF("pack recv time out buf_len: %d\t" + "data_len: %d opcode: %d invalid.\n", + msg->buf_len, + msg->desc.data_len, + msg->desc.opcode); + ret = -ETIMEDOUT; + break; + } + udelay(SXE2_CMD_CHNL_RECV_MULIT_PACK_INTERVAL); + continue; + } + } + ret = sxe2_cmd_recv_packet_merge(adapter, channel, msg, &finish); + if (ret != 0) { + LOG_ERROR_BDF("rq recv msg mulit packet, buf_len: %d\t" + "data_len: %d opcode: %d invalid.\n", + msg->buf_len, msg->desc.data_len, + msg->desc.opcode); + break; + } + index++; + } while (!finish); + +l_end: + mutex_unlock(&rq->lock); + return ret; +} + +STATIC void sxe2_cmd_event_handler(struct sxe2_adapter *adapter, + struct sxe2_recv_msg *msg) +{ + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + struct sxe2_drv_msg_hdr *msg_hdr = SXE2_DRV_MSG_HDR_PTR(cmd_hdr); + u16 event_code = (u16)le32_to_cpu(msg_hdr->op_code); + s32 ret; + + ret = (s32)le32_to_cpu(msg_hdr->err_code); + if (ret < 0) { + LOG_ERROR_BDF("event code %d report failed: %d.\n", event_code, ret); + return; + } + + ret = sxe2_event_handle(adapter, event_code, SXE2_MSG_BODY(msg_hdr), + le32_to_cpu(msg_hdr->data_len)); + if (ret) + LOG_ERROR_BDF("event code %d handler failed: %d.\n", event_code, + ret); + + LOG_DEBUG_BDF("event code %d receive, result: %d.\n", event_code, ret); +} + +STATIC void sxe2_cmd_rsp_handler(struct sxe2_adapter *adapter, + struct sxe2_cmd_channel *channel, + struct sxe2_recv_msg *msg) +{ + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + struct sxe2_cmd_wait_task *cmd_wait_elem; + bool found = false; + unsigned long flags = 0; + + spin_lock_irqsave(&channel->wq.lock, flags); + hash_for_each_possible(channel->wq.table, cmd_wait_elem, entry, + le64_to_cpu(cmd_hdr->session_id)) { + if (cmd_wait_elem->state != SXE2_CMD_STATE_WAITING || + le64_to_cpu(cmd_hdr->session_id) != cmd_wait_elem->session_id) { + continue; + } + + found = true; + + SXE2_BUG_ON(msg->buf_len > cmd_wait_elem->resp_len); + if (msg->buf_len > cmd_wait_elem->resp_len) { + LOG_ERROR_BDF("msg->buf_len %d more than out_len %d.\n", + msg->buf_len, cmd_wait_elem->resp_len); + cmd_wait_elem->state = SXE2_CMD_STATE_FAULT; + break; + } + memcpy(cmd_wait_elem->resp_data, msg->buf, msg->buf_len); + + cmd_wait_elem->state = SXE2_CMD_STATE_DONE; + } + spin_unlock_irqrestore(&channel->wq.lock, flags); + + if (found) + wake_up(&channel->wq.wq); +} + +STATIC s32 sxe2_recv_msg_check(struct sxe2_adapter *adapter, + struct sxe2_recv_msg *msg) +{ + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + struct sxe2_drv_msg_hdr *msg_hdr; + u32 hdr_len = 0; + + msg_hdr = (struct sxe2_drv_msg_hdr *)((u8 *)cmd_hdr + cmd_hdr->hdr_len); + + if (cmd_hdr->cmd_type == SXE2_CMD_TYPE_CLI) + hdr_len = cmd_hdr->hdr_len; + else + hdr_len = cmd_hdr->hdr_len + msg_hdr->data_offset; + + if (msg->buf_len < hdr_len) { + LOG_ERROR_BDF("recv cmd type %d, buf len %d hdr len :%u invalid.\n", + cmd_hdr->cmd_type, msg->buf_len, hdr_len); + return -EINVAL; + } + + if (le32_to_cpu(cmd_hdr->magic_code) != SXE2_CMD_MAGIC) { + LOG_ERROR_BDF("recv cmd magic check failed.\n"); + return -EINVAL; + } + + if (cmd_hdr->cmd_type != SXE2_CMD_TYPE_CLI) { + if ((u8 *)SXE2_MSG_BODY(msg_hdr) + msg_hdr->data_len > + msg->buf + msg->buf_len) { + LOG_ERROR_BDF("msg hdr buf len %d invalid.\n", + msg_hdr->data_len); + return -EINVAL; + } + } + return 0; +} + +STATIC void sxe2_msg_handle_async(struct sxe2_adapter *adapter, + struct sxe2_recv_msg *msg) +{ + struct sxe2_recv_msg *tmp_msg; + u32 len = sizeof(*tmp_msg) + msg->buf_len; + + tmp_msg = kzalloc(len, GFP_KERNEL); + if (!tmp_msg) { + LOG_DEV_ERR("malloc failed, size: %u.\n", len); + return; + } + memcpy(tmp_msg, msg, len); + INIT_LIST_HEAD(&tmp_msg->node); + + sxe2_msg_list_add(adapter, tmp_msg); + sxe2_msg_handle_work_schedule(adapter); +} + +STATIC void sxe2_cmd_rq_handle(struct sxe2_adapter *adapter, + struct sxe2_cmd_channel *channel, + struct sxe2_recv_msg *msg) +{ + s32 ret; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + + ret = sxe2_recv_msg_check(adapter, msg); + if (ret) + goto l_end; + + ret = (s32)le32_to_cpu(cmd_hdr->ret); + if (ret < 0) { + LOG_ERROR_BDF("recv cmd failed, ret: %d.\n", ret); + goto l_end; + } + + switch (cmd_hdr->cmd_type) { + case SXE2_CMD_TYPE_FW_NOTIFY: + case SXE2_CMD_TYPE_VF_TO_PF: + sxe2_msg_handle_async(adapter, msg); + break; + case SXE2_CMD_TYPE_CLI: + case SXE2_CMD_TYPE_DRV_TO_FW: + sxe2_cmd_rsp_handler(adapter, channel, msg); + break; + default: + LOG_ERROR_BDF("unknown cmd type: %d.\n", cmd_hdr->cmd_type); + SXE2_BUG(); + break; + } + +l_end: + return; +} + +STATIC bool sxe2_cmd_rq_clean(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + s32 ret = 0; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, chnl_type); + struct sxe2_cmd_queue *rq = &channel->queue[SXE2_CMD_RQ]; + struct sxe2_recv_msg *msg; + u16 cleaned = 0; + + msg = kzalloc(sizeof(*msg) + rq->buf_size, GFP_KERNEL); + if (!msg) { + LOG_ERROR_BDF("malloc failed, size: %zu.\n", + sizeof(*msg) + rq->buf_size); + ret = -ENOMEM; + goto l_end; + } + + do { + ret = sxe2_cmd_recv(adapter, channel, msg); + if (ret == -ENODATA) { + ret = 0; + break; + } + if (ret == 0) + sxe2_cmd_rq_handle(adapter, channel, msg); + + cleaned++; + } while (cleaned < SXE2_CMD_RQ_WEIGHT); + + kfree(msg); +l_end: + return !!ret || (cleaned == SXE2_CMD_RQ_WEIGHT); +} + +STATIC void sxe2_cmd_queue_check_hw_error(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type, + enum sxe2_cmd_queue_type q_type) +{ + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, q_type); + u32 hw_err; + + if (chnl_type == SXE2_CHNL_MBX) + goto l_out; + + hw_err = queue->ops.get_error(hw); + + if (hw_err & SXE2_CMD_REG_LEN_CRIT_M) + LOG_DEV_ERR("cmd channel %d queue %d critical error detected.\n", + chnl_type, q_type); + if (hw_err & SXE2_CMD_REG_LEN_VFE_M) + LOG_DEV_ERR("cmd channel %d queue %d VF error detected.\n", + chnl_type, q_type); + +l_out: + return; +} + +bool sxe2_cmd_channel_work(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + return sxe2_cmd_rq_clean(adapter, chnl_type); +} + +void sxe2_cmd_params_fill(struct sxe2_cmd_params *cmd, enum sxe2_drv_cmd_opcode opc, + void *req_data, u32 req_len, void *resp_data, u32 resp_len, + u32 timeout, bool is_interruptible, bool no_resp) +{ + cmd->opcode = opc; + cmd->req_data = req_data; + cmd->req_len = (u16)req_len; + cmd->resp_data = resp_data; + cmd->resp_len = (u16)resp_len; + cmd->is_interruptible = (u8)is_interruptible; + cmd->timeout = timeout; + cmd->no_resp = (u8)no_resp; + + sxe2_trace_id_alloc(&cmd->trace_id); +} + +void sxe2_cmd_params_dflt_fill(struct sxe2_cmd_params *cmd, + enum sxe2_drv_cmd_opcode opc, void *in_data, + u32 in_len, void *out_data, u32 out_len) +{ + sxe2_cmd_params_fill(cmd, opc, in_data, in_len, out_data, out_len, + SXE2_DRV_CMD_DFLT_TIMEOUT, false, false); +} + +void sxe2_cmd_params_no_interruptible_fill(struct sxe2_cmd_params *cmd, + enum sxe2_drv_cmd_opcode opc, + void *req_data, u32 req_len, + void *resp_data, u32 resp_len) +{ + cmd->opcode = opc; + cmd->req_data = req_data; + cmd->req_len = (u16)req_len; + cmd->resp_data = resp_data; + cmd->resp_len = (u16)resp_len; + cmd->is_interruptible = false; + cmd->timeout = SXE2_DRV_CMD_DFLT_TIMEOUT; + cmd->no_resp = false; + + sxe2_trace_id_alloc(&cmd->trace_id); +} + +STATIC void sxe2_cmd_trans(struct sxe2_cmd_params *cmd_params, + struct sxe2_cmd_hdr *cmd_hdr, enum sxe2_cmd_type type, + u64 session_id) +{ + cmd_hdr->magic_code = cpu_to_le32(SXE2_CMD_MAGIC); + cmd_hdr->timeout = cpu_to_le32(cmd_params->timeout); + cmd_hdr->trace_id = cpu_to_le64(cmd_params->trace_id); + cmd_hdr->session_id = cpu_to_le64(session_id); + SXE2_CMD_REQ_LEN(cmd_hdr) = cpu_to_le16(cmd_params->req_len); + SXE2_CMD_RESP_LEN(cmd_hdr) = cpu_to_le16(cmd_params->resp_len); + cmd_hdr->hdr_len = cpu_to_le16((u16)sizeof(*cmd_hdr)); + cmd_hdr->cmd_type = type; + cmd_hdr->no_resp = (u8)cmd_params->no_resp; + cmd_hdr->cur_in_len = cpu_to_le16(cmd_params->req_len); + cmd_hdr->multi_packet = SXE2_CMD_HDR_MULTI_END | SXE2_CMD_HDR_MULTI_START | + (0 & SXE2_CMD_HDR_MULTI_CMD_ID_MASK); +} + +STATIC void sxe2_cmd_wait_list_fill(struct sxe2_cmd_context *cmd_ctxt) +{ + struct sxe2_cmd_wait_task *wait_task = &cmd_ctxt->wait_task; + + wait_task->session_id = cmd_ctxt->session_id; + wait_task->state = SXE2_CMD_STATE_WAITING; + wait_task->resp_len = cmd_ctxt->trans_info.resp_len; + wait_task->resp_data = cmd_ctxt->trans_info.resp_buff; +} + +STATIC void sxe2_cmd_wait_list_add(struct sxe2_cmd_context *cmd_ctxt) +{ + unsigned long flags; + struct sxe2_cmd_wait_task *wait_task = &cmd_ctxt->wait_task; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(cmd_ctxt->adapter, cmd_ctxt->chnl_type); + + spin_lock_irqsave(&channel->wq.lock, flags); + hash_add(channel->wq.table, &wait_task->entry, wait_task->session_id); + spin_unlock_irqrestore(&channel->wq.lock, flags); +} + +STATIC void sxe2_cmd_wait_list_del(struct sxe2_cmd_context *cmd_ctxt) +{ + unsigned long flags; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(cmd_ctxt->adapter, cmd_ctxt->chnl_type); + + spin_lock_irqsave(&channel->wq.lock, flags); + hash_del(&cmd_ctxt->wait_task.entry); + spin_unlock_irqrestore(&channel->wq.lock, flags); +} + +STATIC s32 sxe2_cmd_check(struct sxe2_cmd_params *cmd_params, + enum sxe2_cmd_type type) +{ + s32 ret = 0; + u16 req_len = cmd_params->req_len; + u16 resp_len = cmd_params->resp_len; + + if (type != SXE2_CMD_TYPE_CLI && type != SXE2_CMD_TYPE_DRV_TO_FW && + type != SXE2_CMD_TYPE_PF_TO_VF && type != SXE2_CMD_TYPE_PF_REPLY_VF && + type != SXE2_CMD_TYPE_DRV_TO_HW) { + ret = -EINVAL; + goto l_end; + } + + if ((!req_len && cmd_params->req_data) || + (req_len && !cmd_params->req_data) || + (!resp_len && cmd_params->resp_data) || + (resp_len && !cmd_params->resp_data)) { + ret = -EINVAL; + goto l_end; + } + + if (type == SXE2_CMD_TYPE_CLI || type == SXE2_CMD_TYPE_DRV_TO_FW) { + if (req_len >= SXE2_CMD_MAX_TRANSMIT_DATA_SIZE || + resp_len >= SXE2_CMD_MAX_TRANSMIT_DATA_SIZE) { + ret = -EINVAL; + goto l_end; + } + } + + if (type == SXE2_CMD_TYPE_PF_TO_VF || type == SXE2_CMD_TYPE_PF_REPLY_VF) { + if (req_len >= SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX || + resp_len >= SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX) { + ret = -EINVAL; + goto l_end; + } + } + +l_end: + return ret; +} + +STATIC void sxe2_cmd_context_fill(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, + enum sxe2_cmd_type type, + struct sxe2_cmd_context *cmd_ctxt) +{ + u32 timeout = cmd_params->timeout ? cmd_params->timeout + : SXE2_DRV_CMD_DFLT_TIMEOUT; + + cmd_ctxt->adapter = adapter; + cmd_ctxt->type = type; + + cmd_ctxt->expired_time = jiffies + secs_to_jiffies(timeout); + + sxe2_cmd_session_id_alloc(&cmd_ctxt->session_id); + cmd_ctxt->params = cmd_params; + + switch (type) { + case SXE2_CMD_TYPE_CLI: + case SXE2_CMD_TYPE_DRV_TO_FW: + cmd_ctxt->chnl_type = SXE2_CHNL_FW; + break; + case SXE2_CMD_TYPE_PF_TO_VF: + case SXE2_CMD_TYPE_DRV_TO_HW: + cmd_ctxt->chnl_type = SXE2_CHNL_MBX; + break; + case SXE2_CMD_TYPE_PF_REPLY_VF: + cmd_ctxt->chnl_type = SXE2_CHNL_MBX; + cmd_ctxt->session_id = cmd_params->session_id; + break; + default: + LOG_ERROR_BDF("unknown cmd type: %d.\n", type); + break; + } +} + +STATIC s32 sxe2_cmd_add_hdr(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret = 0; + struct sxe2_cmd_hdr *cmd_hdr; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_cmd_trans_info *trans_info = + &cmd_ctxt->trans_info; + struct sxe2_cmd_params *params = cmd_ctxt->params; + u64 trace_id = cmd_ctxt->params->trace_id; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + trans_info->req_len = params->req_len + SXE2_CMD_HDR_SIZE; + trans_info->resp_len = params->resp_len + SXE2_CMD_HDR_SIZE; + + trans_info->req_buff = kzalloc(cmd_ctxt->trans_info.req_len, GFP_KERNEL); + if (!trans_info->req_buff) { + ret = -ENOMEM; + trans_info->req_len = 0; + LOG_ERROR_TRACEID("malloc failed: size %u.\n", trans_info->req_len); + goto l_end; + } + cmd_hdr = trans_info->req_buff; + + sxe2_cmd_trans(params, cmd_hdr, cmd_ctxt->type, cmd_ctxt->session_id); + + if (params->req_len) { + if (cmd_ctxt->type == + SXE2_CMD_TYPE_CLI) { + if (copy_from_user(cmd_hdr->body, + (void __user *)params->req_data, + params->req_len)) { + LOG_ERROR_TRACEID( + "cmd trace_id=0x%llx copy from user " + "err\n", + cmd_ctxt->params->trace_id); + ret = -EFAULT; + goto l_copy_failed; + } + } else { + memcpy(cmd_hdr->body, params->req_data, params->req_len); + } + } + + trans_info->resp_buff = kzalloc(trans_info->resp_len, GFP_KERNEL); + if (!trans_info->resp_buff) { + ret = -ENOMEM; + trans_info->resp_len = 0; + LOG_ERROR_TRACEID("malloc failed: size %u.\n", trans_info->resp_len); + goto l_copy_failed; + } + return 0; + +l_copy_failed: + kfree(trans_info->req_buff); + trans_info->req_buff = NULL; + trans_info->req_len = 0; +l_end: + return ret; +} + +s32 sxe2_cmd_strip_hdr(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret = 0; + struct sxe2_cmd_hdr *cmd_hdr; + struct sxe2_cmd_trans_info *trans_info = &cmd_ctxt->trans_info; + struct sxe2_cmd_params *params = cmd_ctxt->params; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + u64 trace_id = cmd_ctxt->params->trace_id; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + if (trans_info->req_buff) { + kfree(trans_info->req_buff); + trans_info->req_buff = NULL; + trans_info->req_len = 0; + } + + if (trans_info->resp_buff) { + cmd_hdr = trans_info->resp_buff; + if (params->resp_len) { + if (cmd_ctxt->type == + SXE2_CMD_TYPE_CLI) { + if (copy_to_user((void __user *)params->resp_data, + ((u8 *)cmd_hdr + cmd_hdr->hdr_len), + params->resp_len)) { + LOG_ERROR_TRACEID("cmd trace_id=0x%llx copy to user err\n", + cmd_ctxt->params->trace_id); + ret = -EFAULT; + } + } else { + memcpy(params->resp_data, + ((u8 *)cmd_hdr + cmd_hdr->hdr_len), + params->resp_len); + } + } + kfree(trans_info->resp_buff); + trans_info->resp_buff = NULL; + trans_info->resp_len = 0; + } + + return ret; +} + +STATIC s32 sxe2_cmd_check_and_fill(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, + enum sxe2_cmd_type type, + struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret; +#ifndef SXE2_TEST + u64 trace_id = cmd_params->trace_id; +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif +#endif + + ret = sxe2_cmd_check(cmd_params, type); + if (ret) + goto l_end; + + sxe2_cmd_context_fill(adapter, cmd_params, type, cmd_ctxt); + +l_end: +#ifndef SXE2_TEST + LOG_INFO_TRACEID("send cmd: cmd_type:%d, session_id:0x%llx,\t" + "trace_id:0x%llx, is_interruptible:%d\n" + "timeout:%d, opcode:0x%x req_len:%d, out_len:%d, ret:%d.\n", + cmd_ctxt->type, cmd_ctxt->session_id, cmd_params->trace_id, + cmd_params->is_interruptible, cmd_params->timeout, + cmd_params->opcode, cmd_params->req_len, + cmd_params->resp_len, ret); +#endif + return ret; +} + +STATIC void sxe2_cmd_tq_desc_fill(struct sxe2_cmd_context *cmd_ctxt, + struct sxe2_cmd_desc *desc) +{ + switch (cmd_ctxt->type) { + case SXE2_CMD_TYPE_PF_TO_VF: + case SXE2_CMD_TYPE_PF_REPLY_VF: + desc->opcode = cpu_to_le16(SXE2_CMD_MBX_TO_VF); + desc->custom2 = cpu_to_le32(cmd_ctxt->params->vf_idx); + break; + case SXE2_CMD_TYPE_VF_TO_PF: + desc->opcode = cpu_to_le16(SXE2_CMD_MBX_TO_PF); + break; + case SXE2_CMD_TYPE_DRV_TO_HW: + default: + desc->opcode = cpu_to_le16((u16)cmd_ctxt->params->opcode); + break; + } + + desc->data_len = cpu_to_le16(cmd_ctxt->trans_info.req_len); + + desc->flags |= cpu_to_le16(SXE2_CMD_NO_INTR); + if (cmd_ctxt->trans_info.req_len) { + desc->flags |= cpu_to_le16(SXE2_CMD_BUF); + desc->flags |= cpu_to_le16(SXE2_CMD_READ); + if (cmd_ctxt->trans_info.req_len > SXE2_CMD_LARGE_BUF_SIZE) + desc->flags |= cpu_to_le16(SXE2_CMD_LARGE_BUF); + } +} + +STATIC bool sxe2_cmd_tq_pending(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *queue) +{ + u16 val; + + if (sxe2_queue_head_read(adapter, queue, &val)) + return false; + + return queue->ntc != val; +} + +STATIC u16 sxe2_cmd_tq_put_desc(struct sxe2_adapter *adapter, + struct sxe2_cmd_queue *tq) +{ + struct sxe2_cmd_desc *desc; + struct sxe2_dma_mem *buf_dma; + + while (sxe2_cmd_tq_pending(adapter, tq)) { + desc = SXE2_CMD_QUEUE_DESC(tq, tq->ntc); + buf_dma = &tq->buf[tq->ntc]; + + memset(desc, 0, sizeof(*desc)); + memset(buf_dma->va, 0, sizeof(buf_dma->size)); + SXE2_QUEUE_IDX_INC(tq->ntc, tq->depth); + } + + return SXE2_CMD_QUEUE_DESC_UNUSED(tq); +} + +STATIC void sxe2_cmd_tq_get_desc(struct sxe2_cmd_context *cmd_ctxt, + struct sxe2_cmd_desc **desc) +{ + struct sxe2_cmd_queue *tq = + SXE2_ADAPTER_TO_CMD_QUEUE(cmd_ctxt->adapter, + cmd_ctxt->chnl_type, + SXE2_CMD_TQ); + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_dma_mem *buf_dma; + void *buf; + u64 trace_id = cmd_ctxt->params->trace_id; +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + LOG_DEBUG_TRACEID("tq get the #%dth desc\n", tq->ntu); + + *desc = SXE2_CMD_QUEUE_DESC(tq, tq->ntu); + buf_dma = &tq->buf[tq->ntu]; + buf = buf_dma->va; + SXE2_QUEUE_IDX_INC(tq->ntu, tq->depth); + + sxe2_cmd_tq_desc_fill(cmd_ctxt, *desc); + + if (cmd_ctxt->trans_info.req_buff) { + memcpy(buf, cmd_ctxt->trans_info.req_buff, + cmd_ctxt->trans_info.req_len); + (*desc)->buf_addr_h = cpu_to_le32(upper_32_bits(buf_dma->pa)); + (*desc)->buf_addr_l = cpu_to_le32(lower_32_bits(buf_dma->pa)); + if (cmd_ctxt->chnl_type == SXE2_CHNL_FW) + (*desc)->checksum = + sxe2_xor8_checksum_get(buf, + cmd_ctxt->trans_info.req_len); + } +} + +STATIC s32 sxe2_cmd_wait_desc_wb(struct sxe2_cmd_context *cmd_ctxt, + struct sxe2_cmd_desc *desc) +{ + s32 ret = 0; + unsigned long expired_time = cmd_ctxt->expired_time; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + u64 trace_id = cmd_ctxt->params->trace_id; + unsigned long time_out = 0; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + udelay(6); + + time_out = (unsigned long)(jiffies + + secs_to_jiffies(SXE2_CMD_TQ_WB_DFLT_TIMEOUT)); + expired_time = min(expired_time, time_out); + do { + if (SXE2_WB_DONE(desc)) + break; + if (cmd_ctxt->params->is_interruptible) { + if (msleep_interruptible(SXE2_CMD_WB_WAIT_INTERVAL) && + signal_pending_is_interrupt()) { + ret = -ECANCELED; + LOG_DEV_INFO("[trace id 0x%llx] cmd interrupted,\t" + "exit polling\n", + trace_id); + goto l_end; + } + } else { + usleep_range(SXE2_WAIT_DONE_MIN, SXE2_WAIT_DONE_MAX); + } + } while (time_before(jiffies, expired_time)); + + if (!SXE2_WB_DONE(desc)) { + ret = -ETIMEDOUT; + sxe2_cmd_queue_check_hw_error(adapter, cmd_ctxt->chnl_type, + SXE2_CMD_TQ); + LOG_DEBUG_TRACEID("send cmd timeout, opcode: 0x%x, ret: %d.\n", + cmd_ctxt->params->opcode, ret); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_cmd_desc_err_trans(struct sxe2_cmd_context *cmd_ctxt, + struct sxe2_cmd_desc *desc) +{ + s32 ret; + u16 desc_ret = le16_to_cpu(desc->ret); + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + u64 trace_id = cmd_ctxt->params->trace_id; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + if (desc_ret) { + LOG_ERROR_TRACEID("send cmd failed, channel: %d, opcode: 0x%x, ret: %u.\n", + cmd_ctxt->chnl_type, cmd_ctxt->params->opcode, + desc_ret); + } + + switch (desc_ret) { + case 0: + ret = 0; + break; + case SXE2_CMD_DESC_ERR_DES_ERR: + case SXE2_CMD_DESC_ERR_BUF_ERR: + case SXE2_CMD_DESC_ERR_BUF_NUM_ERR: + case SXE2_CMD_DESC_ERR_SRC_BUSY: + ret = -EAGAIN; + break; + default: + ret = -EIO; + break; + } + + return ret; +} + +STATIC s32 sxe2_cmd_send_single(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_cmd_queue *tq = + SXE2_ADAPTER_TO_CMD_QUEUE(cmd_ctxt->adapter, + cmd_ctxt->chnl_type, + SXE2_CMD_TQ); + struct sxe2_cmd_desc *desc = NULL; + u16 head; + u64 trace_id = cmd_ctxt->params->trace_id; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + if (!tq->is_enable) { + ret = -EBUSY; + goto l_end; + } + + if (sxe2_queue_head_read(adapter, tq, &head)) { + ret = -EIO; + goto l_end; + } + + if (sxe2_cmd_tq_put_desc(adapter, tq) == 0) { + LOG_DEV_ERR("cmd queue is full, head: %d, ntc: %d.\n", head, + tq->ntc); + ret = -EAGAIN; + LOG_ERROR_TRACEID("cmd queue full, head: %d.\n", head); + goto l_end; + } + + sxe2_cmd_tq_get_desc(cmd_ctxt, &desc); + + DATA_DUMP(desc, sizeof(*desc), "tq cmd desc before"); + DATA_DUMP(cmd_ctxt->trans_info.req_buff, cmd_ctxt->trans_info.req_len, + "tq cmd buff"); + + tq->ops.write_tail(hw, tq->ntu); + sxe2_flush(hw); + + ret = sxe2_cmd_wait_desc_wb(cmd_ctxt, desc); + if (ret) + goto l_end; + + ret = sxe2_cmd_desc_err_trans(cmd_ctxt, desc); + +l_end: + if (desc) + DATA_DUMP(desc, sizeof(*desc), "tq cmd desc after"); + return ret; +} + +STATIC s32 sxe2_cmd_send(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_cmd_queue *tq = + SXE2_ADAPTER_TO_CMD_QUEUE(cmd_ctxt->adapter, + cmd_ctxt->chnl_type, + SXE2_CMD_TQ); + u16 trans_in_len = 0; + u16 current_offset = SXE2_CMD_HDR_SIZE; + u8 multi_packet = 0; + u8 packet_total_num = 1; + u8 packet_index = 0; + u16 retry_cnt = 0; + struct sxe2_cmd_trans_info *trans_info = &cmd_ctxt->trans_info; + struct sxe2_cmd_hdr *cmd_hdr = trans_info->req_buff; + + trans_in_len = trans_info->req_len; + + if (cmd_ctxt->chnl_type == SXE2_CHNL_FW) + packet_total_num = (u8)DIV_ROUND_UP(trans_in_len - SXE2_CMD_HDR_SIZE, + SXE2_CMD_ATQ_SEND_APP_MAX_LEN); + + mutex_lock(&tq->lock); + do { + multi_packet = 0; + if (packet_index == 0) + multi_packet |= SXE2_CMD_HDR_MULTI_START; + if ((packet_index + 1) == packet_total_num) + multi_packet |= SXE2_CMD_HDR_MULTI_END; + multi_packet |= packet_index; + cmd_hdr->multi_packet = multi_packet; + + if ((current_offset + SXE2_CMD_ATQ_SEND_APP_MAX_LEN) > trans_in_len) + trans_info->req_len = trans_in_len - current_offset + + SXE2_CMD_HDR_SIZE; + else + trans_info->req_len = SXE2_CMD_ATQ_SEND_MAX_LEN; + cmd_hdr->cur_in_len = trans_info->req_len - SXE2_CMD_HDR_SIZE; + + if (packet_index != 0) { + (void)memmove(trans_info->req_buff + SXE2_CMD_HDR_SIZE, + trans_info->req_buff + current_offset, + trans_info->req_len - SXE2_CMD_HDR_SIZE); + } + + LOG_DEBUG_BDF("Tran Len:%u Packet Num:%u-%u Flag:0x%x offset:%u\t" + "current packet len:%u.\n", + trans_in_len, packet_index, packet_total_num, + multi_packet, current_offset, cmd_hdr->cur_in_len); + retry_cnt = 0; + do { + ret = sxe2_cmd_send_single(cmd_ctxt); + if (ret != -EAGAIN) + break; + mdelay(SXE2_CMD_RETRY_INTERVAL); + + } while (++retry_cnt < SXE2_CMD_RETRY_COUNT); + if (ret != 0) + break; + current_offset += (trans_info->req_len - SXE2_CMD_HDR_SIZE); + packet_index++; + } while (packet_index < packet_total_num); + + mutex_unlock(&tq->lock); + return ret; +} + +STATIC s32 sxe2_cmd_wait_rsp(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, cmd_ctxt->chnl_type); + s32 timeout; + u64 trace_id = cmd_ctxt->params->trace_id; + + while (1) { + timeout = (s32)(cmd_ctxt->expired_time - jiffies); + if (timeout < 0) + break; + + if (cmd_ctxt->params->is_interruptible) { + ret = + (s32)wait_event_interruptible_timeout(channel->wq.wq, + (cmd_ctxt->wait_task.state != + SXE2_CMD_STATE_WAITING), + timeout); + } else { + ret = (s32)wait_event_timeout(channel->wq.wq, + (cmd_ctxt->wait_task.state != + SXE2_CMD_STATE_WAITING), + timeout); + } + + if (ret == -ERESTARTSYS && signal_pending_is_interrupt()) { + ret = -ECANCELED; + LOG_DEV_INFO("[trace id 0x%llx] cmd interrupted, exit wait.\n", + trace_id); + goto l_end; + } else if (ret == -ERESTARTSYS && !signal_pending_is_interrupt()) { + msleep(SXE2_WAIT_INTERRUPTIBLE_INTERVAL); + continue; + } else { + break; + } + } + + switch (cmd_ctxt->wait_task.state) { + case SXE2_CMD_STATE_WAITING: + ret = -ETIMEDOUT; + cmd_ctxt->wait_task.state = SXE2_CMD_STATE_CANCELED; + LOG_WARN_TRACEID("cmd timeout, exit wait.\n"); + goto l_end; + case SXE2_CMD_STATE_CANCELED: + ret = -ECANCELED; + LOG_WARN_TRACEID("cmd canceled, exit wait.\n"); + goto l_end; + case SXE2_CMD_STATE_FAULT: + ret = -EFAULT; + LOG_WARN_TRACEID("cmd fault, exit wait.\n"); + goto l_end; + case SXE2_CMD_STATE_DONE: + ret = 0; + break; + default: + LOG_DEV_WARN("Unexpected wait queue state: %d.\n", + cmd_ctxt->wait_task.state); + SXE2_BUG(); + break; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_cmd_wait_rsp_polling(struct sxe2_cmd_context *cmd_ctxt) +{ + s32 ret; + struct sxe2_adapter *adapter = cmd_ctxt->adapter; + struct sxe2_cmd_channel *channel = + SXE2_ADAPTER_TO_CMD_CHANNEL(adapter, cmd_ctxt->chnl_type); + struct sxe2_recv_msg *event; + u64 trace_id = cmd_ctxt->params->trace_id; + + event = kzalloc(sizeof(*event) + channel->queue[SXE2_CMD_RQ].buf_size, + GFP_KERNEL); + if (!event) { + LOG_ERROR_TRACEID("malloc failed, size: %zu.\n", + sizeof(*event) + channel->queue[SXE2_CMD_RQ] + .buf_size); + ret = -ENOMEM; + goto l_end; + } + + do { + ret = sxe2_cmd_recv(adapter, channel, event); + if (ret == 0) { + struct sxe2_cmd_hdr *cmd_hdr = + (struct sxe2_cmd_hdr *)event->buf; + + if (le32_to_cpu(cmd_hdr->magic_code) != SXE2_CMD_MAGIC) { + LOG_ERROR_TRACEID("recv cmd magic check failed.\n"); + ret = -EIO; + goto l_end; + } + ret = (s32)le32_to_cpu(cmd_hdr->ret); + if (ret < 0) { + LOG_ERROR_TRACEID("recv cmd failed, ret: %d.\n", + ret); + ret = -EIO; + goto l_end; + } + if (cmd_hdr->cmd_type != cmd_ctxt->type || + le64_to_cpu(cmd_hdr->session_id) != + cmd_ctxt->session_id) { + LOG_ERROR_TRACEID("recv invalid cmd, type: %d,\t" + "session id: 0x%llx cmd_ctxt session_id: 0x%llx .\n", + cmd_hdr->cmd_type, + le64_to_cpu(cmd_hdr->session_id), + le64_to_cpu(cmd_ctxt->session_id)); + ret = -ETIMEDOUT; + continue; + } + + SXE2_BUG_ON(event->buf_len > cmd_ctxt->trans_info.resp_len); + if (event->buf_len > cmd_ctxt->trans_info.resp_len) { + ret = -EIO; + LOG_DEV_ERR("[trace id 0x%llx] rq receive error,\t" + "buf_len %d, resp_len %d.\n", + trace_id, event->buf_len, + cmd_ctxt->trans_info.resp_len); + goto l_end; + } + memcpy(cmd_ctxt->trans_info.resp_buff, event->buf, + event->buf_len); + goto l_end; + } else if (ret != -ENODATA) { + LOG_DEV_INFO("[trace id 0x%llx] rq receive error cmd, ret %d.\n", + trace_id, ret); + break; + } + if (cmd_ctxt->params->is_interruptible) { + if (msleep_interruptible(SXE2_CMD_WB_WAIT_INTERVAL) && + signal_pending_is_interrupt()) { + ret = -ECANCELED; + LOG_DEV_INFO("[trace id 0x%llx] cmd interrupted,\t" + "exit polling\n", + trace_id); + goto l_end; + } + } else { + usleep_range(SXE2_WAIT_DONE_MIN, SXE2_WAIT_DONE_MAX); + } + } while (time_before(jiffies, (unsigned long)cmd_ctxt->expired_time)); + +l_end: + kfree(event); + if (ret == -ENODATA) + ret = -ETIMEDOUT; + return ret; +} + +static inline bool sxe2_cmd_exec_mode_get(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, + enum sxe2_cmd_exec_mode *mode) +{ + enum sxe2_dev_state state; + enum sxe2_reset_type reset_type; + bool is_resetting; + + sxe2_dev_state_get(adapter, &state, &reset_type); + + if (state == SXE2_DEVSTATE_RESETTING) { + is_resetting = true; + *mode = SXE2_CMD_EXEC_NO_RESP; + } else { + is_resetting = false; + if (cmd_params->no_resp) + *mode = SXE2_CMD_EXEC_NO_RESP; + else if (adapter->cmd_channel_ctxt.mode == SXE2_CMD_POLLING) + *mode = SXE2_CMD_EXEC_POLLING; + else + *mode = SXE2_CMD_EXEC_NOTIFY; + } + + return is_resetting; +} + +STATIC s32 sxe2_cmd_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, enum sxe2_cmd_type type) +{ + s32 ret = 0; + struct sxe2_cmd_context cmd_ctxt = {0}; + struct sxe2_cmd_hdr *cmd_hdr; + enum sxe2_cmd_exec_mode mode; + unsigned long flags; + u64 trace_id = cmd_params->trace_id; + bool is_resetting; + +#ifdef SXE2_CFG_RELEASE + UNUSED(trace_id); +#endif + + ret = sxe2_cmd_check_and_fill(adapter, cmd_params, type, &cmd_ctxt); + if (ret) + goto l_end; + + ret = sxe2_cmd_add_hdr(&cmd_ctxt); + if (ret) + goto l_end; + + sxe2_cmd_wait_list_fill(&cmd_ctxt); + + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.cmd_list_lock, flags); + is_resetting = sxe2_cmd_exec_mode_get(adapter, cmd_params, &mode); + if (is_resetting) { + ret = -EOWNERDEAD; + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.cmd_list_lock, flags); + goto l_strip_hdr; + } else { + if (mode == SXE2_CMD_EXEC_NOTIFY) + sxe2_cmd_wait_list_add(&cmd_ctxt); + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.cmd_list_lock, flags); + } + + ret = sxe2_cmd_send(&cmd_ctxt); + if (ret == -EAGAIN) { + ret = -EBUSY; + goto l_list_del; + } else if (ret == -ECANCELED) { + goto l_cancel; + } else if (ret) { + goto l_list_del; + } + + if (mode != SXE2_CMD_EXEC_NO_RESP) { + if (mode == SXE2_CMD_EXEC_NOTIFY) + ret = sxe2_cmd_wait_rsp(&cmd_ctxt); + else + ret = sxe2_cmd_wait_rsp_polling(&cmd_ctxt); + if (ret == -ECANCELED || ret == -ETIMEDOUT) + goto l_cancel; + else if (ret) + goto l_list_del; + cmd_hdr = cmd_ctxt.trans_info.resp_buff; + ret = (s32)cmd_hdr->ret; + if (unlikely(ret < 0)) { + LOG_ERROR_TRACEID("cmd transmit failed, ret: %d.\n", ret); + ret = -EIO; + } + } + + goto l_list_del; + +l_cancel: +l_list_del: + if (mode == SXE2_CMD_EXEC_NOTIFY) + sxe2_cmd_wait_list_del(&cmd_ctxt); +l_strip_hdr: + if (sxe2_cmd_strip_hdr(&cmd_ctxt)) + ret = -EFAULT; +l_end: +#ifdef SXE2_CFG_DEBUG + if (ret && (type == SXE2_CMD_TYPE_CLI || type == SXE2_CMD_TYPE_DRV_TO_FW)) + sxe2_dump_fwc(adapter); +#endif + return ret; +} + +STATIC s32 sxe2_cmd_add_msg_hdr(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *params, + struct sxe2_cmd_params *params_with_hdr) +{ + s32 ret = 0; + struct sxe2_drv_msg_hdr *msg_hdr; + + params_with_hdr->vf_idx = params->vf_idx; + params_with_hdr->err_code = params->err_code; + params_with_hdr->req_len = params->req_len + SXE2_DRV_MSG_HDR_SIZE; + params_with_hdr->resp_len = params->resp_len + SXE2_DRV_MSG_HDR_SIZE; + params_with_hdr->session_id = params->session_id; + + params_with_hdr->req_data = kzalloc(params_with_hdr->req_len, GFP_KERNEL); + if (!params_with_hdr->req_data) { + ret = -ENOMEM; + params_with_hdr->req_len = 0; + LOG_ERROR_BDF("malloc failed: size %u.\n", params_with_hdr->req_len); + goto l_end; + } + msg_hdr = params_with_hdr->req_data; + + msg_hdr->err_code = cpu_to_le32((u32)params->err_code); + msg_hdr->op_code = cpu_to_le32(params->opcode); + msg_hdr->data_offset = cpu_to_le32(SXE2_DRV_MSG_HDR_SIZE); + msg_hdr->data_len = cpu_to_le32((u32)params->req_len); + msg_hdr->vf_id = cpu_to_le16(SXE2_VF_ID_INVAL); + if (params->req_len) + memcpy(msg_hdr->body, params->req_data, params->req_len); + + params_with_hdr->resp_data = kzalloc(params_with_hdr->resp_len, GFP_KERNEL); + if (!params_with_hdr->resp_data) { + ret = -ENOMEM; + params_with_hdr->resp_len = 0; + LOG_ERROR_BDF("malloc failed: size %u.\n", + params_with_hdr->resp_len); + goto l_malloc_failed; + } + return 0; + +l_malloc_failed: + kfree(params_with_hdr->req_data); + params_with_hdr->req_data = NULL; + params_with_hdr->req_len = 0; +l_end: + return ret; +} + +STATIC void sxe2_cmd_strip_msg_hdr(struct sxe2_cmd_params *params, + struct sxe2_cmd_params *params_with_hdr) +{ + struct sxe2_drv_msg_hdr *msg_hdr; + + if (params_with_hdr->req_data) { + kfree(params_with_hdr->req_data); + params_with_hdr->req_data = NULL; + params_with_hdr->req_len = 0; + } + + if (params_with_hdr->resp_data) { + msg_hdr = params_with_hdr->resp_data; + if (params->resp_len) + memcpy(params->resp_data, SXE2_MSG_BODY(msg_hdr), + params->resp_len); + kfree(params_with_hdr->resp_data); + params_with_hdr->resp_data = NULL; + params_with_hdr->resp_len = 0; + } +} + +s32 sxe2_err_code_trans_fw(struct sxe2_adapter *adapter, u64 trace_id, s32 err) +{ + s32 ret = 0; + + if (unlikely(err < 0)) + LOG_ERROR_TRACEID("drv cmd exec failed, err: %d.\n", err); + + if (err > -SXE2_CMD_DRV_HW_OP_ERR) + return err; + + switch (err) { + case SXE2_CMD_DRV_SUCCESS: + case SXE2_CMD_DRV_LINK_REBUILD_FAILED: + ret = 0; + break; + case -SXE2_CMD_DRV_NO_FREE_VSI: + case -SXE2_CMD_DRV_HW_NOSPC: + case -SXE2_CMD_DRV_FW_NOMEM: + case -SXE2_CMD_DRV_HW_NO_RES: + case -SXE2_CMD_DRV_TXSCHED_TEID_ALLOC_FAILED: + case -SXE2_CMD_DRV_TXSCHED_CHILDIDX_ALLOC_FAILED: + case -SXE2_CMD_DRV_TXSCHED_ALLOC_FAILED: + ret = -ENOMEM; + break; + case -SXE2_CMD_DRV_HW_OP_ERR: + case -SXE2_CMD_DRV_UNSUPPORT: + ret = -EOPNOTSUPP; + break; + case -SXE2_CMD_DRV_HW_EXIST: + case -SXE2_CMD_DRV_HW_HID_EXIST: + ret = -EEXIST; + break; + case -SXE2_CMD_DRV_HW_TIMEOUT: + case -SXE2_CMD_DRV_TXSCHED_TIMEOUT: + ret = -ETIMEDOUT; + break; + case -SXE2_OPT_DEV_BUSY: + ret = -EBUSY; + break; + case -SXE2_CMD_DRV_PARAM_INVALID: + case -SXE2_CMD_DRV_UDP_TUNNEL_WRONG_PORT: + ret = -EINVAL; + break; + case -SXE2_CMD_DUMP_LOG_FAILED: + case -SXE2_CMD_DRV_RXQ_CFG_FAIL: + case -SXE2_CMD_DRV_TXQ_EN_FAIL: + case -SXE2_CMD_DRV_TXQ_DISA_FAIL: + case -SXE2_CMD_DRV_PFR_FAILED: + case -SXE2_CMD_DRV_VFR_FAILED: + case -SXE2_CMD_DRV_HW_RETURN: + case -SXE2_CMD_DRV_HW_MISMATCH: + case -SXE2_CMD_DRV_HW_NOENT: + case -SXE2_CMD_DRV_TLV_ERROR: + case -SXE2_CMD_DRV_TXSCHED_CFG_FAILED: + case -SXE2_CMD_DRV_LINK_UPDATE_FAILED: + default: + ret = -EIO; + break; + } + + return ret; +} + +s32 sxe2_cmd_drv_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params, enum sxe2_cmd_type type) +{ + s32 ret; + struct sxe2_cmd_params params_with_hdr = {0}; + struct sxe2_drv_msg_hdr *msg_hdr; + + if (type == SXE2_CMD_TYPE_PF_TO_VF || type == SXE2_CMD_TYPE_PF_REPLY_VF) { + if (cmd_params->req_len > SXE2_DRV_CMD_MAX_MSG_SIZE_MBX || + cmd_params->resp_len > SXE2_DRV_CMD_MAX_MSG_SIZE_MBX) { + ret = -EINVAL; + goto l_end; + } + } else if ((cmd_params->req_len > SXE2_DRV_CMD_MAX_MSG_SIZE) || + (cmd_params->resp_len > SXE2_DRV_CMD_MAX_MSG_SIZE)) { + ret = -EINVAL; + goto l_end; + } + + memcpy(¶ms_with_hdr, cmd_params, sizeof(*cmd_params)); + ret = sxe2_cmd_add_msg_hdr(adapter, cmd_params, ¶ms_with_hdr); + if (ret) + goto l_end; + + ret = sxe2_cmd_exec(adapter, ¶ms_with_hdr, type); + if (ret) + goto l_cmd_failed; + + msg_hdr = params_with_hdr.resp_data; + + ret = sxe2_err_code_trans_fw(adapter, cmd_params->trace_id, + (s32)le32_to_cpu(msg_hdr->err_code)); + +l_cmd_failed: + sxe2_cmd_strip_msg_hdr(cmd_params, ¶ms_with_hdr); +l_end: + return ret; +} + +s32 sxe2_cmd_fw_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + return sxe2_cmd_drv_exec(adapter, cmd_params, SXE2_CMD_TYPE_DRV_TO_FW); +} + +s32 sxe2_cmd_mbx_reply(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + return sxe2_cmd_drv_exec(adapter, cmd_params, SXE2_CMD_TYPE_PF_REPLY_VF); +} + +s32 sxe2_cmd_mbx_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + return sxe2_cmd_drv_exec(adapter, cmd_params, SXE2_CMD_TYPE_PF_TO_VF); +} + +s32 sxe2_cmd_cli_exec(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + return sxe2_cmd_exec(adapter, cmd_params, SXE2_CMD_TYPE_CLI); +} + +void sxe2_wait_task_cancel(struct sxe2_cmd_channel *channel) +{ + struct sxe2_cmd_wait_task *cmd_wait_elem; + u32 bkt; + unsigned long flags; + + spin_lock_irqsave(&channel->wq.lock, flags); + hash_for_each(channel->wq.table, bkt, cmd_wait_elem, entry) + cmd_wait_elem->state = SXE2_CMD_STATE_CANCELED; + spin_unlock_irqrestore(&channel->wq.lock, flags); + + wake_up(&channel->wq.wq); +} + +void sxe2_wait_task_cancel_all(struct sxe2_adapter *adapter) +{ + sxe2_wait_task_cancel(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_FW]); + sxe2_wait_task_cancel(&adapter->cmd_channel_ctxt.channel[SXE2_CHNL_MBX]); +} + +void sxe2_msg_list_add(struct sxe2_adapter *adapter, struct sxe2_recv_msg *msg) +{ + struct sxe2_cmd_channel_context *ctxt = &adapter->cmd_channel_ctxt; + + mutex_lock(&ctxt->lock); + list_add_tail(&msg->node, &ctxt->head); + mutex_unlock(&ctxt->lock); +} + +void sxe2_msg_list_del(struct sxe2_adapter *adapter, struct sxe2_recv_msg **msg) +{ + struct sxe2_cmd_channel_context *ctxt = &adapter->cmd_channel_ctxt; + + mutex_lock(&ctxt->lock); + + if (list_empty(&ctxt->head)) { + *msg = NULL; + mutex_unlock(&ctxt->lock); + return; + } + + *msg = list_first_entry(&ctxt->head, struct sxe2_recv_msg, node); + list_del(&(*msg)->node); + mutex_unlock(&ctxt->lock); +} + +bool sxe2_cmd_rq_pending(struct sxe2_adapter *adapter, + enum sxe2_cmd_channel_type chnl_type) +{ + u16 val; + struct sxe2_cmd_queue *queue = + SXE2_ADAPTER_TO_CMD_QUEUE(adapter, chnl_type, SXE2_CMD_RQ); + + if (sxe2_queue_head_read(adapter, queue, &val)) + return false; + + return queue->ntc != val; +} + +STATIC void sxe2_rq_recv_work_complete(struct sxe2_adapter *adapter) +{ + BUG_ON(!test_bit(SXE2_RQ_RECV_WORK_SCHED, + &adapter->cmd_channel_ctxt.recv_work_state)); + + smp_mb__before_atomic(); + clear_bit(SXE2_RQ_RECV_WORK_SCHED, + &adapter->cmd_channel_ctxt.recv_work_state); +} + +STATIC void sxe2_msg_handle_work_complete(struct sxe2_adapter *adapter) +{ + BUG_ON(!test_bit(SXE2_MSG_HANDLE_WORK_SCHED, + &adapter->cmd_channel_ctxt.handle_work_state)); + smp_mb__before_atomic(); + clear_bit(SXE2_MSG_HANDLE_WORK_SCHED, + &adapter->cmd_channel_ctxt.handle_work_state); +} + +STATIC bool sxe2_fw_channel_work(struct sxe2_adapter *adapter) +{ + if (sxe2_cmd_rq_pending(adapter, SXE2_CHNL_FW)) + return sxe2_cmd_channel_work(adapter, SXE2_CHNL_FW); + else + return false; +} + +bool sxe2_mbx_channel_work(struct sxe2_adapter *adapter) +{ + if (sxe2_cmd_rq_pending(adapter, SXE2_CHNL_MBX)) + return sxe2_cmd_channel_work(adapter, SXE2_CHNL_MBX); + else + return false; +} + +STATIC void sxe2_rq_recv_work_cb(struct work_struct *work) +{ + struct sxe2_cmd_channel_context *cmd_channel_ctxt = + container_of(work, struct sxe2_cmd_channel_context, recv_work); + struct sxe2_adapter *adapter = + container_of(cmd_channel_ctxt, struct sxe2_adapter, cmd_channel_ctxt); + + (void)sxe2_fw_channel_work(adapter); + (void)sxe2_mbx_channel_work(adapter); + + sxe2_rq_recv_work_complete(adapter); + + if (sxe2_fw_channel_work(adapter) || sxe2_mbx_channel_work(adapter)) + sxe2_rq_recv_work_schedule(adapter); +} + +void sxe2_rq_recv_work_schedule(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_channel_ctxt.recv_work_lock, flags); + if (!test_bit(SXE2_RQ_RECV_WORK_STOPPED, + &adapter->cmd_channel_ctxt.recv_work_state) && + !test_and_set_bit(SXE2_RQ_RECV_WORK_SCHED, + &adapter->cmd_channel_ctxt.recv_work_state)) { + sxe2_queue_work(adapter, sxe2_rq_recv_wq, + &adapter->cmd_channel_ctxt.recv_work); + } + spin_unlock_irqrestore(&adapter->cmd_channel_ctxt.recv_work_lock, flags); +} + +STATIC void sxe2_msg_handle(struct sxe2_adapter *adapter, struct sxe2_recv_msg *msg) +{ + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + + switch (cmd_hdr->cmd_type) { + case SXE2_CMD_TYPE_FW_NOTIFY: + sxe2_cmd_event_handler(adapter, msg); + break; + + case SXE2_CMD_TYPE_VF_TO_PF: + sxe2_cmd_vf_msg_handler(adapter, msg); + break; + + default: + LOG_ERROR_BDF("unknown cmd type: %d.\n", cmd_hdr->cmd_type); + SXE2_BUG(); + break; + } +} + +STATIC void sxe2_msg_handle_polling(struct sxe2_adapter *adapter) +{ + struct sxe2_recv_msg *msg; + int schedule_count_th = 0; + + while (1) { + sxe2_msg_list_del(adapter, &msg); + if (!msg) + break; + + sxe2_msg_handle(adapter, msg); + kfree(msg); + + schedule_count_th++; + if (schedule_count_th == SXE2_MSG_HANDLING_MAX_CNT) { + schedule_count_th = 0; + cond_resched(); + } + } +} + +STATIC void sxe2_msg_handle_work_cb(struct work_struct *work) +{ + struct sxe2_cmd_channel_context *cmd_channel_ctxt = + container_of(work, struct sxe2_cmd_channel_context, handle_work); + struct sxe2_adapter *adapter = + container_of(cmd_channel_ctxt, struct sxe2_adapter, cmd_channel_ctxt); + + sxe2_msg_handle_polling(adapter); + + sxe2_msg_handle_work_complete(adapter); + + sxe2_msg_handle_polling(adapter); +} + +void sxe2_msg_handle_work_schedule(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_channel_ctxt.handle_work_lock, flags); + if (!test_bit(SXE2_MSG_HANDLE_WORK_STOPPED, + &adapter->cmd_channel_ctxt.handle_work_state) && + !test_and_set_bit(SXE2_MSG_HANDLE_WORK_SCHED, + &adapter->cmd_channel_ctxt.handle_work_state)) { + sxe2_queue_work(adapter, sxe2_msg_handle_wq, + &adapter->cmd_channel_ctxt.handle_work); + } + spin_unlock_irqrestore(&adapter->cmd_channel_ctxt.handle_work_lock, flags); +} + +void sxe2_cmd_work_init(struct sxe2_adapter *adapter) +{ + INIT_WORK(&adapter->cmd_channel_ctxt.recv_work, sxe2_rq_recv_work_cb); + INIT_WORK(&adapter->cmd_channel_ctxt.handle_work, sxe2_msg_handle_work_cb); + + clear_bit(SXE2_RQ_RECV_WORK_STOPPED, + &adapter->cmd_channel_ctxt.recv_work_state); + clear_bit(SXE2_MSG_HANDLE_WORK_STOPPED, + &adapter->cmd_channel_ctxt.handle_work_state); +} + +void sxe2_cmd_work_exit(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_channel_ctxt.recv_work_lock, flags); + set_bit(SXE2_RQ_RECV_WORK_STOPPED, + &adapter->cmd_channel_ctxt.recv_work_state); + spin_unlock_irqrestore(&adapter->cmd_channel_ctxt.recv_work_lock, flags); + cancel_work_sync(&adapter->cmd_channel_ctxt.recv_work); + clear_bit(SXE2_RQ_RECV_WORK_SCHED, + &adapter->cmd_channel_ctxt.recv_work_state); + + spin_lock_irqsave(&adapter->cmd_channel_ctxt.handle_work_lock, flags); + set_bit(SXE2_MSG_HANDLE_WORK_STOPPED, + &adapter->cmd_channel_ctxt.handle_work_state); + spin_unlock_irqrestore(&adapter->cmd_channel_ctxt.handle_work_lock, flags); + cancel_work_sync(&adapter->cmd_channel_ctxt.handle_work); + clear_bit(SXE2_MSG_HANDLE_WORK_SCHED, + &adapter->cmd_channel_ctxt.handle_work_state); +} + +s32 sxe2_cmd_work_create(void) +{ + sxe2_rq_recv_wq = alloc_workqueue("%s-RQ-RECV", 0, 0, SXE2_DRV_NAME); + if (!sxe2_rq_recv_wq) { + LOG_PR_ERR("failed to create rq recv workqueue\n"); + return -ENOMEM; + } + + sxe2_msg_handle_wq = alloc_workqueue("%s-MSG-HANDLE", 0, 0, SXE2_DRV_NAME); + if (!sxe2_msg_handle_wq) { + LOG_PR_ERR("failed to create msg handle workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +void sxe2_cmd_work_destroy(void) +{ + destroy_workqueue(sxe2_rq_recv_wq); + sxe2_rq_recv_wq = NULL; + + destroy_workqueue(sxe2_msg_handle_wq); + sxe2_msg_handle_wq = NULL; +} + +struct mutex *sxe2_cmd_channel_get_event_lock(struct sxe2_adapter *adapter) +{ + return &adapter->cmd_channel_ctxt.event_lock; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..2d63a862f120893a1218857186dde7730947849a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_cmd_channel.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cmd_channel.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CMD_CHANNEL_H__ +#define __SXE2_CMD_CHANNEL_H__ + +#include +#include +#include +#include + +#include "sxe2_cmd.h" +#include "sxe2_spec.h" + +struct sxe2_adapter; +struct sxe2_hw; + +#define SXE2_CMD_HASH_TABLE_ORDER 8 + +#define SXE2_DEPTH_FW_TQ 256 +#define SXE2_DEPTH_FW_RQ 256 +#define SXE2_DEPTH_MBX_TQ 64 +#define SXE2_DEPTH_MBX_RQ 512 + +#define SXE2_CMD_ATQ_SEND_MAX_LEN (512) +#define SXE2_CMD_ARQ_RECV_MAX_LEN (352) + +#define SXE2_CMD_ATQ_SEND_APP_MAX_LEN (SXE2_CMD_ATQ_SEND_MAX_LEN - SXE2_CMD_HDR_SIZE) +#define SXE2_CMD_ARQ_RECV_APP_MAX_LEN (SXE2_CMD_ARQ_RECV_MAX_LEN - SXE2_CMD_HDR_SIZE) + +#define SXE2_CMD_ATQ_SEND_PACKET_NUM(totalNum, cmd_params) \ + do { \ + if (SXE2_CMD_ATQ_SEND_MAX_LEN < \ + (SXE2_CMD_HDR_SIZE + (cmd_params)->req_len)) { \ + totalNum = ((cmd_params)->req_len + \ + SXE2_CMD_ATQ_SEND_APP_MAX_LEN) / \ + SXE2_CMD_ATQ_SEND_APP_MAX_LEN; \ + } \ + } while (0) + +#define SXE2_DRV_CMD_DFLT_TIMEOUT (30) + +#define LOG_DEBUG_TRACEID(fmt, ...) LOG_DEBUG_BDF("[trace id 0x%llx] " fmt, trace_id, ##__VA_ARGS__) +#define LOG_INFO_TRACEID(fmt, ...) LOG_INFO_BDF("[trace id 0x%llx] " fmt, trace_id, ##__VA_ARGS__) +#define LOG_WARN_TRACEID(fmt, ...) LOG_WARN_BDF("[trace id 0x%llx] " fmt, trace_id, ##__VA_ARGS__) +#define LOG_ERROR_TRACEID(fmt, ...) LOG_ERROR_BDF("[trace id 0x%llx] " fmt, trace_id, ##__VA_ARGS__) + +union sxe2_trace_info { + u64 id; + struct { + u64 count : 50; + u64 cpu_id : 10; + u64 type : 4; + } sxe2_trace_id_param; +}; + +enum sxe2_cmd_channel_type { + SXE2_CHNL_FW = 0, + SXE2_CHNL_MBX, + SXE2_CMD_CHANNEL_MAX, +}; + +enum sxe2_cmd_queue_type { + SXE2_CMD_TQ = 0, + SXE2_CMD_RQ, + SXE2_CMD_QUEUE_MAX, +}; + +enum sxe2_cmd_state { + SXE2_CMD_STATE_WAITING = 0, + SXE2_CMD_STATE_DONE, + SXE2_CMD_STATE_CANCELED, + SXE2_CMD_STATE_FAULT, +}; + +enum sxe2_cmd_channel_mode { + SXE2_CMD_POLLING = 0, + SXE2_CMD_NOTIFY, +}; + +struct sxe2_dma_mem { + void *va; + dma_addr_t pa; + size_t size; +}; + +struct sxe2_cmd_queue_operations { + s32 (*enable)(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + void (*disable)(struct sxe2_hw *hw); + s32 (*is_idle)(struct sxe2_hw *hw); + void (*write_tail)(struct sxe2_hw *hw, u32 value); + u32 (*read_head)(struct sxe2_hw *hw); + u32 (*get_error)(struct sxe2_hw *hw); +}; + +struct sxe2_recv_cache_buff { + u16 buf_offset; + bool finish; + u8 res; + u8 *buf; +}; + +struct sxe2_cmd_queue { + u16 depth; + u16 buf_size; + u16 ntu; + u16 ntc; + u8 is_enable; + struct sxe2_dma_mem desc; + struct sxe2_dma_mem *buf; + /* in order to protect the data */ + struct mutex lock; + struct sxe2_cmd_queue_operations ops; + struct sxe2_recv_cache_buff cache_buff; +}; + +struct sxe2_recv_msg { + struct list_head node; + u16 buf_len; + struct sxe2_cmd_desc desc; + u8 buf[]; +}; + +struct sxe2_cmd_wait_task { + struct hlist_node entry; + u64 session_id; + enum sxe2_cmd_state state; + u16 resp_len; + void *resp_data; +}; + +struct sxe2_cmd_wait_queue { + /* in order to protect the data */ + spinlock_t lock; + wait_queue_head_t wq; + DECLARE_HASHTABLE(table, SXE2_CMD_HASH_TABLE_ORDER); +}; + +struct sxe2_cmd_channel { + struct sxe2_cmd_queue queue[SXE2_CMD_QUEUE_MAX]; + struct sxe2_cmd_wait_queue wq; + enum sxe2_cmd_channel_type chnl_type; + /* in order to protect the data */ + struct mutex lock; + u8 is_enable; +}; + +struct sxe2_cmd_channel_context { + enum sxe2_cmd_channel_mode mode; + struct work_struct recv_work; + unsigned long recv_work_state; + struct work_struct handle_work; + unsigned long handle_work_state; + /* in order to protect the data */ + spinlock_t recv_work_lock; + /* in order to protect the data */ + spinlock_t handle_work_lock; + struct list_head head; + /* in order to protect the data */ + struct mutex lock; + struct sxe2_cmd_channel channel[SXE2_CMD_CHANNEL_MAX]; + /* in order to protect the data */ + struct mutex event_lock; +}; + +struct sxe2_cmd_params { + u64 trace_id; + s32 err_code; + u16 vf_idx; + u8 is_interruptible; + bool no_resp; + u32 timeout; + u32 opcode; + u64 session_id; + u16 req_len; + u16 resp_len; + void *req_data; + void *resp_data; +}; + +struct sxe2_cmd_trans_info { + u16 req_len; + u16 resp_len; + void *req_buff; + void *resp_buff; +}; + +struct sxe2_cmd_context { + struct sxe2_adapter *adapter; + enum sxe2_cmd_type type; + enum sxe2_cmd_channel_type chnl_type; + u16 cancelable; + u64 session_id; + u64 expired_time; + struct sxe2_cmd_params *params; + struct sxe2_cmd_wait_task wait_task; + struct sxe2_cmd_trans_info trans_info; +}; + +s32 sxe2_cmd_channels_init(struct sxe2_adapter *adapter); + +void sxe2_cmd_channels_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_cmd_channels_enable(struct sxe2_adapter *adapter); + +void sxe2_cmd_channels_disable(struct sxe2_adapter *adapter); + +void sxe2_cmd_params_fill(struct sxe2_cmd_params *cmd, + enum sxe2_drv_cmd_opcode opc, + void *req_data, u32 req_len, + void *resp_data, u32 resp_len, + u32 timeout, + bool is_interruptible, + bool no_resp); + +void sxe2_cmd_params_no_interruptible_fill(struct sxe2_cmd_params *cmd, + enum sxe2_drv_cmd_opcode opc, + void *req_data, + u32 req_len, + void *resp_data, + u32 resp_len); + +void sxe2_cmd_params_dflt_fill(struct sxe2_cmd_params *cmd, + enum sxe2_drv_cmd_opcode opc, + void *in_data, u32 in_len, + void *out_data, u32 out_len); +s32 sxe2_cmd_fw_exec(struct sxe2_adapter *adapter, struct sxe2_cmd_params *cmd_params); + +s32 sxe2_cmd_mbx_reply(struct sxe2_adapter *adapter, struct sxe2_cmd_params *cmd_params); + +s32 sxe2_cmd_mbx_exec(struct sxe2_adapter *adapter, struct sxe2_cmd_params *cmd_params); + +s32 sxe2_cmd_cli_exec(struct sxe2_adapter *adapter, struct sxe2_cmd_params *cmd_params); + +void sxe2_wait_task_cancel(struct sxe2_cmd_channel *channel); + +void sxe2_wait_task_cancel_all(struct sxe2_adapter *adapter); + +bool sxe2_cmd_channel_work(struct sxe2_adapter *adapter, enum sxe2_cmd_channel_type chnl_type); + +bool sxe2_cmd_rq_pending(struct sxe2_adapter *adapter, enum sxe2_cmd_channel_type chnl_type); + +void sxe2_rq_recv_work_schedule(struct sxe2_adapter *adapter); + +s32 sxe2_cmd_work_create(void); + +void sxe2_cmd_work_destroy(void); + +s32 sxe2_cmd_strip_hdr(struct sxe2_cmd_context *cmd_ctxt); + +void sxe2_trace_id_alloc(u64 *trace_id); + +bool sxe2_mbx_channel_work(struct sxe2_adapter *adapter); + +s32 sxe2_err_code_trans_fw(struct sxe2_adapter *adapter, u64 trace_id, s32 err); + +struct mutex *sxe2_cmd_channel_get_event_lock(struct sxe2_adapter *adapter); + +void sxe2_mbx_channel_disable(struct sxe2_adapter *adapter); + +s32 sxe2_mbx_channel_enable(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_drv_aux.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_drv_aux.h new file mode 100644 index 0000000000000000000000000000000000000000..6de4bb332f48e4fce8853b5e3dcaba1fd25f6c8d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_drv_aux.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_aux.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_AUX_H_ +#define _SXE2_DRV_AUX_H_ + +#include +#include +#include +#include "sxe2_compat.h" +#include "sxe2_cmd.h" + +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif + +#define AUX_MAJOR_VER (1) +#define AUX_MINOR_VER (1) +#define SXE2_RDMA_INDEX (0x1) +#define AUX_RDMA_INVALID_PF_IDX (0xFF) +#define AUX_MAX_USER_PRIORITY (8) +#define AUX_MAX_APPS (64) +#define AUX_MAX_DSCP_MAPPING (64) +#define AUX_MAX_NUM_AUX (5) +#define SXE2_RDMA_VCHNL_Q_INVALID_IDX (0xFFFF) + +#define SXE2_RDMA_INVALID_PF 0xFF +#define SXE2_RDMA_PF0 BIT(0) +#define SXE2_RDMA_PF1 BIT(1) +#define SXE2_RDMA_BOTH_PF 0x3 + +#define SXE2_DRV_VER_STR_LEN 32 + +enum aux_rdma_opcode { + RDMA_MAC_RULE_ADD, + RDMA_MAC_RULE_DELETE, + RDMA_OPCODE_MAX +}; + +enum aux_reset_type { + AUX_PFR, + AUX_CORER, + AUX_GLOBR, +}; + +enum aux_function_type { + AUX_FUNCTION_TYPE_PF, + AUX_FUNCTION_TYPE_VF, +}; + +enum aux_rdma_gen { + AUX_RDMA_GEN_RESERVED = 0, + AUX_RDMA_GEN_1 = 1, + AUX_RDMA_GEN_2 = 2, + AUX_RDMA_GEN_3 = 3, +}; + +struct aux_rdma_caps { + u8 gen; +}; + +enum aux_event_type { + SXE2_EVENT_MTU_CHANGED, + SXE2_EVENT_NOTIFY_RESET, + SXE2_EVENT_VF_RESET, + SXE2_EVENT_AEQ_OVERFLOW, + SXE2_EVENT_FAILOVER, + SXE2_EVENT_TC_CHANGE, + SXE2_EVENT_MAX +}; + +struct aux_ver_info { + u16 major; + u16 minor; + u64 support; +}; + +struct aux_core_dev_info; + +struct aux_rdma_qset_params { + u16 teid; + u16 qset_id; + u16 vport_id; + u8 tc[2]; + u8 user_pri; + u8 qset_port; +}; + +struct aux_rdma_multi_qset_params { + u16 teid[2]; + u16 qset_id[2]; + u8 qset_port[2]; + u16 vport_id; + u8 tc[2]; + u8 num; + u8 rdma_port[2]; + u8 active_ports; + u8 user_pri; +}; + +struct aux_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct aux_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct aux_qos_params { + struct aux_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[AUX_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u32 num_apps; + u8 pfc_mode; + struct aux_dcb_app_info apps[AUX_MAX_APPS]; + u8 dscp_map[AUX_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +struct aux_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct aux_qvlist_info { + u32 num_vectors; + struct aux_qv_info qv_info[]; +}; + +struct aux_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +struct sxe2_core_ops { + int (*alloc_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*free_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*request_reset)(struct aux_core_dev_info *cdev_info, + enum aux_reset_type reset_type); + int (*update_vport_filter)(struct aux_core_dev_info *cdev_info, + u16 vport_id, bool enable); + int (*get_vf_info)(struct aux_core_dev_info *cdev_info, + u16 vf_id, struct aux_vf_port_info *vf_port_info); + int (*vc_send)(struct aux_core_dev_info *cdev_info, + u16 vf_id, u8 *msg, u16 len, u64 session_id); + int (*vc_send_sync)(struct aux_core_dev_info *cdev_info, u8 *msg, + u16 len, u8 *recv_msg, u16 recv_len); + int (*rdma_send_cmd)(struct aux_core_dev_info *cdev_info, + enum sxe2_drv_cmd_opcode opcode, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len); + int (*rdma_drv_config)(struct aux_core_dev_info *cdev_info, u8 opcode, u8 *msg); + int (*vc_queue_vec_map_unmap)(struct aux_core_dev_info *cdev_info, + struct aux_qvlist_info *qvl_info, bool map); + int (*alloc_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*free_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*dump_pcap_cmd)(struct aux_core_dev_info *cdev_info, u8 *mac, bool is_add); + void (*notify_rdma_load)(struct aux_core_dev_info *cdev_info, bool loaded); + u32 (*rdma_get_link_speed)(struct aux_core_dev_info *cdev_info); +}; + +struct sxe2_rdma_event_info { + DECLARE_BITMAP(type, SXE2_EVENT_MAX); + u16 vf_id; + struct aux_qos_params port_qos; +}; + +struct aux_core_dev_info { + struct pci_dev *pdev; + struct auxiliary_device *adev; + u8 __iomem *hw_addr; + struct aux_ver_info ver; + char drv_ver[SXE2_DRV_VER_STR_LEN]; + enum aux_function_type ftype; + const struct sxe2_aux_ops *aux_ops; + struct sxe2_core_ops *ops; + int cdev_info_id; + u8 pf_id; + u8 pf_cnt; + u16 vfid_base; + u16 vport_id; + struct aux_qos_params qos_info[2]; + struct net_device *netdev; + struct msix_entry *msix_entries; + u32 msix_count; + struct aux_rdma_caps rdma_caps; + struct sxe2_adapter *adapter; + u8 bond_mode; + u8 rdma_pf_bitmap; + void *ext_ops; + void *ext_info; +}; + +struct sxe2_aux_ops { + void (*event_handler)(struct aux_core_dev_info *cdev_info, + struct sxe2_rdma_event_info *event); + int (*vc_receive)(struct aux_core_dev_info *cdev_info, + u32 vf_id, u8 *msg, u16 len, u64 session_id); +}; + +struct sxe2_auxiliary_device { + struct auxiliary_device adev; + struct aux_core_dev_info *cdev_info; +}; + +struct sxe2_auxiliary_drv { + struct auxiliary_driver adrv; + struct sxe2_aux_ops aux_ops; +}; + +void sxe2_rdma_aux_adev_release(struct device *dev); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.c b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.c new file mode 100644 index 0000000000000000000000000000000000000000..2b9ab3f20b63998ae4dc08cff0dda3992732465d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_event.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_dcb.h" +#include "sxe2_cmd.h" +#include "sxe2_event.h" +#include "sxe2_log_export.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_linkchg.h" +#include "sxe2_event.h" + +#define SXE2_FW_LOG_PATH "/var/log/sxe2_fw_log/" + +struct sxe2_event_handle { + u16 event_code; + s32 (*handler)(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + enum sxe2_cmd_event_status event_status; +}; + +STATIC struct sxe2_event_handle event_table[] = { + {SXE2_EVENT_CODE_AUTO_LOG, sxe2_event_log_export, + SXE2_CMD_EVENT_STATUS_INIT}, + {SXE2_EVENT_CODE_MIB_NOTIFY, sxe2_dcb_process_lldp_set_mib_change, + SXE2_CMD_EVENT_STATUS_INIT}, + {SXE2_EVENT_CODE_SFP_WHITE_LIST, sxe2_white_list_mib, + SXE2_CMD_EVENT_STATUS_INIT}, + {SXE2_EVENT_CODE_SFP_TX_FAULT, sxe2_tx_fault_mib, + SXE2_CMD_EVENT_STATUS_INIT}, + {SXE2_EVENT_CODE_QSFP_TX_FAULT_COUNT, sxe2_tx_fault_event_count_mib, + SXE2_CMD_EVENT_STATUS_INIT}, + {SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, sxe2_lldp_fw_agent_change, + SXE2_CMD_EVENT_STATUS_INIT}, + { + SXE2_EVENT_CODE_INVAL, + } +}; + +s32 sxe2_event_handle(struct sxe2_adapter *adapter, u16 event_code, void *buf, + u32 buf_len) +{ + s32 ret = 0; + const struct sxe2_event_handle *event_handler = NULL; + struct mutex *event_lock = sxe2_cmd_channel_get_event_lock(adapter); + + mutex_lock(event_lock); + for (event_handler = &event_table[0]; event_handler->event_code; + event_handler++) { + if (event_handler->event_code == event_code) + break; + } + + if (!event_handler->handler || + event_handler->event_code == SXE2_EVENT_CODE_INVAL || + event_handler->event_status != SXE2_CMD_EVENT_STATUS_SUB) { + ret = -EINVAL; + LOG_ERROR_BDF("event %d buf_len %d, ret=%d\n", event_code, buf_len, + ret); + goto l_unlock; + } + + ret = event_handler->handler(adapter, buf, buf_len); +l_unlock: + mutex_unlock(event_lock); + return ret; +} + +s32 sxe2_fwc_event_subscribe(struct sxe2_adapter *adapter, + struct sxe2_fwc_event *subscribe) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_EVENT_SUBSCRIBE, subscribe, + sizeof(*subscribe), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("%d event %d subscribe failed, ret=%d\n", + subscribe->count, le16_to_cpu(subscribe->code[0]), + ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fwc_event_unsubscribe(struct sxe2_adapter *adapter, + struct sxe2_fwc_event *unsubscribe) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_EVENT_UNSUBSCRIBE, unsubscribe, + sizeof(*unsubscribe), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("%d event %d unsubscribe failed, ret=%d\n", + unsubscribe->count, le16_to_cpu(unsubscribe->code[0]), + ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_set_event_status(struct sxe2_adapter *adapter, u16 event_code, + enum sxe2_cmd_event_status status) +{ + struct sxe2_event_handle *event_handler = NULL; + struct mutex *event_lock = sxe2_cmd_channel_get_event_lock(adapter); + s32 ret = 0; + + mutex_lock(event_lock); + for (event_handler = &event_table[0]; event_handler->event_code; + event_handler++) { + if (event_handler->event_code == event_code) + break; + } + + if (event_handler->event_code == SXE2_EVENT_CODE_INVAL) { + ret = -EINVAL; + LOG_ERROR_BDF("event %d status %d, ret=%d\n", event_code, status, + ret); + goto l_unlock; + } + + event_handler->event_status = status; + +l_unlock: + mutex_unlock(event_lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.h new file mode 100644 index 0000000000000000000000000000000000000000..9c76b75b041838ca03292c443130519841e21836 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_event.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_event.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_EVENT_H__ +#define __SXE2_EVENT_H__ + +enum sxe2_cmd_event_status { + SXE2_CMD_EVENT_STATUS_INIT = 0, + SXE2_CMD_EVENT_STATUS_SUB, + SXE2_CMD_EVENT_STATUS_UNSUB, +}; + +s32 sxe2_event_handle(struct sxe2_adapter *adapter, u16 event_code, void *buf, u32 buf_len); + +s32 sxe2_fwc_event_subscribe(struct sxe2_adapter *adapter, struct sxe2_fwc_event *subscribe); + +s32 sxe2_fwc_event_unsubscribe(struct sxe2_adapter *adapter, struct sxe2_fwc_event *unsubscribe); + +s32 sxe2_set_event_status(struct sxe2_adapter *adapter, + u16 event_code, + enum sxe2_cmd_event_status status); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.c b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..773b343f83f56025a12ec2a66265d3fe01bc9c7a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.c @@ -0,0 +1,1710 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_hw.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_ethdev.h" +#else +#include "sxe2.h" +#endif +#include "sxe2_hw.h" +#include "sxe2_log.h" +#include "sxe2_spec.h" + +#ifdef SXE2_CFG_DEBUG +extern int reg_log; +#endif + +#ifdef SXE2_DPDK_DRIVER +#define SXE2_HW_REG_WRITE(hw, reg, value) \ + do { \ + (void)hw; \ + rte_write32(value, reg); \ + } while (0) + +#define SXE2_HW_REG_READ(hw, reg) ((void)(hw), rte_read32(reg)) + +#define SXE2_HW_PCI_DEV(_ad) \ + (RTE_DEV_TO_PCI(rte_eth_devices[_ad->dev_info.dev_data->port_id].device)) +#else +#define SXE2_HW_REG_WRITE(hw, reg, value) hw->reg_write(value, reg) +#define SXE2_HW_REG_READ(hw, reg) hw->reg_read(reg) +#endif + +#define SXE2_SET_USED(x) ((void)(x)) + +void __iomem *sxe2_reg_addr_get(struct sxe2_hw *hw, resource_size_t reg) +{ +#ifdef SXE2_DPDK_DRIVER + return (u8 __iomem *)hw->hw_map + reg; +#else + u32 i; + struct sxe2_map_info *map; + struct sxe2_hw_map *hw_addr = (struct sxe2_hw_map *)hw->hw_map; + + if (WARN_ON(!hw_addr)) + return (void __iomem *)ERR_PTR(-SXE2_HW_ERR_FAULT); + + for (i = 0, map = hw_addr->maps; i < hw_addr->map_cnt; i++, map++) + if (reg >= map->start && reg < map->end) + return (u8 __iomem *)map->addr + (reg - map->start); + + LOG_WARN("Unable to map register address 0x%llx to kernel address", reg); + + return (void __iomem *)ERR_PTR(-SXE2_HW_ERR_FAULT); +#endif +} + +bool sxe2_hw_is_fault(struct sxe2_hw *hw) +{ + u32 val; + + val = sxe2_hw_read_pcie_sys_ready(hw); + if (val == SXE2_REG_INVALID_VALUE) + return true; + else + return false; +} + +u32 sxe2_read_reg(struct sxe2_hw *hw, u32 reg) +{ + u32 value; + struct sxe2_adapter *adapter = hw->adapter; + u8 __iomem *reg_addr = sxe2_reg_addr_get(hw, reg); + + SXE2_SET_USED(adapter); + + if (IS_ERR(reg_addr)) { + LOG_DEBUG_BDF("reg addr:0x%x is error.\n", reg); + value = SXE2_REG_INVALID_VALUE; + goto l_ret; + } + + value = SXE2_HW_REG_READ(hw, reg_addr); + +#ifdef SXE2_CFG_DEBUG + if (reg_log) + LOG_DEBUG_BDF("reg: 0x%x, value: 0x%x\n", reg, value); +#endif + +l_ret: + return value; +} + +STATIC u32 sxe2_read_reg_valid(struct sxe2_hw *hw, u32 reg, u32 dflt_val) +{ + u32 val = sxe2_read_reg(hw, reg); + + return val == SXE2_REG_INVALID_VALUE ? dflt_val : val; +} + +u64 sxe2_read_reg64(struct sxe2_hw *hw, u32 reg) +{ + u32 low, high; + + low = sxe2_read_reg(hw, reg); + high = sxe2_read_reg(hw, reg + 4); + return low + ((u64)high << 32); +} + +void sxe2_write_reg(struct sxe2_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = sxe2_reg_addr_get(hw, reg); + struct sxe2_adapter *adapter = hw->adapter; + + SXE2_SET_USED(adapter); + + if (unlikely(sxe2_hw_is_fault(hw)) || IS_ERR(reg_addr)) + goto l_ret; + + SXE2_HW_REG_WRITE(hw, reg_addr, value); + +#ifdef SXE2_CFG_DEBUG + if (reg_log) + LOG_DEBUG_BDF("reg:0x%x write value:0x%x read value:0x%x.\n", reg, + value, SXE2_HW_REG_READ(hw, reg_addr)); +#endif + +l_ret: + return; +} + +static void sxe2_hw_evt_irq_cause_cfg(struct sxe2_hw *hw, u32 cause) +{ + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, 0); + (void)sxe2_read_reg(hw, SXE2_PF_INT_OICR); + (void)sxe2_hw_fw_irq_cause_get(hw); + + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, cause); +} + +static void sxe2_hw_evt_irq_cause_enable(struct sxe2_hw *hw, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = (irq_idx & SXE2_PF_INT_OICR_CTL_MSIX_IDX) | + (itr_idx << SXE2_PF_INT_OICR_CTL_ITR_IDX_S & + SXE2_PF_INT_OICR_CTL_ITR_IDX) | + SXE2_PF_INT_OICR_CTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_OICR_CTL, value); +} + +void sxe2_hw_evt_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_OICR_CTL, 0); + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, 0); + (void)sxe2_read_reg(hw, SXE2_PF_INT_OICR); +} + +u32 sxe2_hw_evt_irq_mask_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); +} + +u32 sxe2_hw_evt_irq_cause_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg_valid(hw, SXE2_PF_INT_OICR, 0); +} + +void sxe2_hw_evt_irq_cfg(struct sxe2_hw *hw, u32 value, u16 itr_idx, u16 irq_idx) +{ + sxe2_hw_evt_irq_cause_cfg(hw, value); + + sxe2_hw_evt_irq_cause_enable(hw, itr_idx, irq_idx); +} + +void sxe2_hw_fwq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx) +{ + u32 value; + (void)itr_idx; + value = (irq_idx & SXE2_PF_INT_FWQ_CTL_MSIX_IDX) | + SXE2_PF_INT_FWQ_CTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_FWQ_CTL, value); +} + +void sxe2_hw_mbxq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx) +{ + u32 value; + (void)itr_idx; + value = (irq_idx & SXE2_PF_INT_MBX_CTL_MSIX_IDX) | + SXE2_PF_INT_MBX_CTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_MBX_CTL, value); +} + +void sxe2_hw_fwq_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_FWQ_CTL, 0); +} + +void sxe2_hw_mbxq_irq_clear(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_PF_INT_MBX_CTL, 0); +} + +void sxe2_hw_irq_enable(struct sxe2_hw *hw, u16 irq_idx) +{ + u32 value = SXE2_VF_DYN_CTL_INTENABLE | + SXE2_VF_DYN_CTL_CLEARPBA | + (SXE2_ITR_IDX_NONE + << SXE2_VF_DYN_CTL_ITR_IDX_S); + + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_disable(struct sxe2_hw *hw, u16 irq_idx) +{ + u32 value = (SXE2_ITR_IDX_NONE << SXE2_VF_DYN_CTL_ITR_IDX_S); + + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_trigger(struct sxe2_hw *hw, u16 irq_idx) +{ + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), + (SXE2_ITR_IDX_NONE << SXE2_VF_DYN_CTL_ITR_IDX_S) | + SXE2_VF_DYN_CTL_SWINT_TRIG | + SXE2_VF_DYN_CTL_INTENABLE_MSK); +} + +void sxe2_hw_irq_dyn_ctl(struct sxe2_hw *hw, u16 irq_idx, u32 value) +{ + sxe2_write_reg(hw, SXE2_VF_DYN_CTL(irq_idx), value); +} + +void sxe2_hw_irq_itr_set(struct sxe2_hw *hw, u16 irq_idx, u16 itr_idx, u16 interval) +{ + u32 value = (interval / hw->hw_cfg.itr_gran) & SXE2_VF_INT_ITR_INTERVAL; + + sxe2_write_reg(hw, SXE2_VF_INT_ITR(itr_idx, irq_idx), value); +} + +void sxe2_hw_irq_rate_limit_set(struct sxe2_hw *hw, u16 irq_idx, u16 rate_limit) +{ + u32 value, rate_limit_reg, credit_max_value_reg; + + if (rate_limit == 0) { + value = 0; + } else { + rate_limit_reg = (u32)FIELD_PREP( + SXE2_PF_INT_RATE_CREDIT_INTERVAL, + (rate_limit / + hw->hw_cfg.credit_interval_gran)); + credit_max_value_reg = + SXE2_PF_INT_RATE_CREDIT_MAX_VALUE; + + value = rate_limit_reg | credit_max_value_reg | + SXE2_PF_INT_RATE_INTRL_ENABLE; + } + + sxe2_write_reg(hw, SXE2_PF_INT_RATE(irq_idx), value); +} + +u32 sxe2_hw_irq_gran_info_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_PFG_INT_CTL); +} + +void sxe2_hw_txq_irq_cause_setup(struct sxe2_hw *hw, u16 txq_idx, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = FIELD_PREP(SXE2_PF_INT_TQCTL_MSIX_IDX, + irq_idx) | + FIELD_PREP(SXE2_PF_INT_TQCTL_ITR_IDX, + itr_idx) | + SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_txq_irq_cause_clear(struct sxe2_hw *hw, u16 txq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_TQCTL(txq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_txq_irq_cause_switch(struct sxe2_hw *hw, u16 txq_idx, bool enable) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_TQCTL(txq_idx)); + u32 value; + + if (enable) + value = old_value | SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + else + value = old_value & ~SXE2_PF_INT_TQCTL_CAUSE_ENABLE; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_TQCTL(txq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_setup(struct sxe2_hw *hw, u16 rxq_idx, u16 itr_idx, + u16 irq_idx) +{ + u32 value; + + value = FIELD_PREP(SXE2_PF_INT_RQCTL_MSIX_IDX, + irq_idx) | + FIELD_PREP(SXE2_PF_INT_RQCTL_ITR_IDX, + itr_idx) | + SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_clear(struct sxe2_hw *hw, u16 rxq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + value = old_value & ~SXE2_PF_INT_RQCTL_MSIX_IDX; + value |= irq_idx; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +void sxe2_hw_rxq_irq_cause_switch(struct sxe2_hw *hw, u16 rxq_idx, bool enable) +{ + u32 old_value = sxe2_read_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx)); + u32 value; + + if (enable) + value = old_value | SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + else + value = old_value & ~SXE2_PF_INT_RQCTL_CAUSE_ENABLE; + + if (old_value != value) + sxe2_write_reg(hw, SXE2_PF_INT_RQCTL(rxq_idx), value); +} + +#define SXE2_HW_CMD_QUEUE_ENABLE(type) \ + do { \ + s32 ret = 0; \ + u32 value; \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAL, lower_32_bits(addr)); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAH, upper_32_bits(addr)); \ + value = FIELD_PREP(SXE2_CMD_REG_LEN_M, depth) | \ + SXE2_CMD_REG_LEN_ENABLE_M; \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + if (sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##BAL) != \ + lower_32_bits(addr)) { \ + ret = -SXE2_HW_ERR_IO; \ + } \ + return ret; \ + } while (0) + +#define SXE2_HW_CMD_QUEUE_DISABLE(type) \ + do { \ + bool is_admin = false; \ + bool old_tail = false; \ + bool old_head = false; \ + u32 value = sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##LEN); \ + value &= ~(SXE2_CMD_REG_LEN_VFE_M | SXE2_CMD_REG_LEN_OVFL_M | \ + SXE2_CMD_REG_LEN_CRIT_M | SXE2_CMD_REG_LEN_ENABLE_M); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + is_admin = ((SXE2_PF_CTRLQ_##type##LEN == \ + SXE2_PF_CTRLQ_FW_ARQLEN) || \ + (SXE2_PF_CTRLQ_##type##LEN == SXE2_PF_CTRLQ_FW_ATQLEN)) \ + ? true \ + : false; \ + if (!is_admin) { \ + if (SXE2_PF_CTRLQ_##type##LEN == \ + SXE2_PF_CTRLQ_MBX_ATQLEN) { \ + old_tail = sxe2_read_reg(hw, \ + SXE2_PF_CTRLQ_##type##T); \ + old_head = sxe2_read_reg(hw, \ + SXE2_PF_CTRLQ_##type##H); \ + if (old_tail >= old_head) { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + } else { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + old_tail); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, \ + 0); \ + } \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + } else { \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##H, 0); \ + } \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAL, 0); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##BAH, 0); \ + } \ + } while (0) + +#define SXE2_HW_CMD_QUEUE_WRITE_TAIL(type) \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##T, value &SXE2_CMD_REG_HEAD_M) +#define SXE2_HW_CMD_QUEUE_READ_HEAD(type) \ + ({ (sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##H) & SXE2_CMD_REG_HEAD_M); }) + +#define SXE2_HW_CMD_QUEUE_GET_ERR(type) \ + do { \ + u32 err = 0; \ + u32 value = sxe2_read_reg(hw, SXE2_PF_CTRLQ_##type##LEN); \ + if (value == SXE2_REG_INVALID_VALUE) \ + return 0; \ + if (value & SXE2_CMD_REG_LEN_VFE_M) { \ + err |= SXE2_CMD_REG_LEN_VFE_M; \ + } else if (value & SXE2_CMD_REG_LEN_CRIT_M) { \ + err |= SXE2_CMD_REG_LEN_CRIT_M; \ + } \ + if (err) { \ + value &= ~(SXE2_CMD_REG_LEN_VFE_M | \ + SXE2_CMD_REG_LEN_CRIT_M); \ + sxe2_write_reg(hw, SXE2_PF_CTRLQ_##type##LEN, value); \ + } \ + return err; \ + } while (0) + +s32 sxe2_hw_fw_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(FW_ATQ); +} + +void sxe2_hw_fw_tq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(FW_ATQ); +} + +s32 sxe2_hw_fw_tq_is_idle(struct sxe2_hw *hw) +{ + u32 val; + s32 ret; + u32 old_tail; + u32 old_head; + + val = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_HW_STS); + if (val == SXE2_REG_INVALID_VALUE) { + ret = true; + goto l_out; + } + ret = val & SXE2_PF_CTRLQ_FW_ATQ_IDLE_MASK; + +l_out: + if (ret) { + old_tail = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQT); + old_head = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_ATQH); + if (old_tail >= old_head) { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + } else { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, old_tail); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQT, 0); + } + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQH, 0); + + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQBAL, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ATQBAH, 0); + } + + return ret; +} + +void sxe2_hw_fw_tq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(FW_ATQ); +} + +u32 sxe2_hw_fw_tq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(FW_ATQ); +} + +u32 sxe2_hw_fw_tq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(FW_ATQ); +} + +s32 sxe2_hw_fw_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(FW_ARQ); +} + +void sxe2_hw_fw_rq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(FW_ARQ); +} + +s32 sxe2_hw_fw_rq_is_idle(struct sxe2_hw *hw) +{ + u32 val; + s32 ret; + + SXE2_SET_USED(hw); + val = sxe2_read_reg(hw, SXE2_PF_CTRLQ_FW_HW_STS); + if (val == SXE2_REG_INVALID_VALUE) { + ret = true; + goto l_out; + } + ret = val & SXE2_PF_CTRLQ_FW_ARQ_IDLE_MASK; + +l_out: + if (ret) { + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQH, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQT, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQBAL, 0); + sxe2_write_reg(hw, SXE2_PF_CTRLQ_FW_ARQBAH, 0); + } + + return ret; +} + +void sxe2_hw_fw_rq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(FW_ARQ); +} + +u32 sxe2_hw_fw_rq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(FW_ARQ); +} + +u32 sxe2_hw_fw_rq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(FW_ARQ); +} + +s32 sxe2_hw_mbx_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(MBX_ATQ); +} + +void sxe2_hw_mbx_tq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(MBX_ATQ); +} + +void sxe2_hw_mbx_tq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(MBX_ATQ); +} + +u32 sxe2_hw_mbx_tq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(MBX_ATQ); +} + +u32 sxe2_hw_mbx_tq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(MBX_ATQ); +} + +s32 sxe2_hw_mbx_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(depth); + SXE2_SET_USED(addr); + + SXE2_HW_CMD_QUEUE_ENABLE(MBX_ARQ); +} + +void sxe2_hw_mbx_rq_disable(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_DISABLE(MBX_ARQ); +} + +void sxe2_hw_mbx_rq_write_tail(struct sxe2_hw *hw, u32 value) +{ + SXE2_SET_USED(hw); + SXE2_SET_USED(value); + + SXE2_HW_CMD_QUEUE_WRITE_TAIL(MBX_ARQ); +} + +u32 sxe2_hw_mbx_rq_read_head(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + return SXE2_HW_CMD_QUEUE_READ_HEAD(MBX_ARQ); +} + +u32 sxe2_hw_mbx_rq_get_error(struct sxe2_hw *hw) +{ + SXE2_SET_USED(hw); + + SXE2_HW_CMD_QUEUE_GET_ERR(MBX_ARQ); +} + +void sxe2_hw_rxq_ctxt_cfg(struct sxe2_hw *hw, struct sxe2_hw_rxq_ctxt *rxq_ctxt, + u16 rxq_idx) +{ + u8 i; + struct sxe2_adapter *adapter = hw->adapter; + u32 value[SXE2_RX_CTXT_CNT]; + u32 base_addr_h = rxq_ctxt->base_addr >> SXE2_RX_CTXT_BASE_L_W; + u16 depth_h = rxq_ctxt->depth >> SXE2_RX_CTXT_DEPTH_L_W; + + SXE2_SET_USED(adapter); + + value[SXE2_RX_CTXT0] = SXE2_CTXT_REG_VALUE(rxq_ctxt->base_addr, + SXE2_RX_CTXT_BASE_L_S, + SXE2_RX_CTXT_BASE_L_W); + + value[SXE2_RX_CTXT1] = SXE2_CTXT_REG_VALUE( + base_addr_h, SXE2_RX_CTXT_BASE_H_S, SXE2_RX_CTXT_BASE_H_W); + value[SXE2_RX_CTXT1] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->depth, SXE2_RX_CTXT_DEPTH_L_S, + SXE2_RX_CTXT_DEPTH_L_W); + + value[SXE2_RX_CTXT2] = SXE2_CTXT_REG_VALUE(depth_h, SXE2_RX_CTXT_DEPTH_H_S, + SXE2_RX_CTXT_DEPTH_H_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->dbuff_len, + SXE2_RX_CTXT_DBUFF_S, + SXE2_RX_CTXT_DBUFF_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hbuff_len, + SXE2_RX_CTXT_HBUFF_S, + SXE2_RX_CTXT_HBUFF_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_type, + SXE2_RX_CTXT_HSPLT_TYPE_S, + SXE2_RX_CTXT_HSPLT_TYPE_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->desc_type, + SXE2_RX_CTXT_DESC_TYPE_S, + SXE2_RX_CTXT_DESC_TYPE_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->crc_strip, SXE2_RX_CTXT_CRC_S, SXE2_RX_CTXT_CRC_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->l2tag1_show, + SXE2_RX_CTXT_L2TAG_FLAG_S, + SXE2_RX_CTXT_L2TAG_FLAG_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_0, + SXE2_RX_CTXT_HSPLT_0_S, + SXE2_RX_CTXT_HSPLT_0_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->hsplit_1, + SXE2_RX_CTXT_HSPLT_1_S, + SXE2_RX_CTXT_HSPLT_1_W); + value[SXE2_RX_CTXT2] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->inner_vlan_strip, + SXE2_RX_CTXT_INVALN_STP_S, + SXE2_RX_CTXT_INVALN_STP_W); + + value[SXE2_RX_CTXT3] = SXE2_CTXT_REG_VALUE(rxq_ctxt->lro_enable, + SXE2_RX_CTXT_LRO_ENABLE_S, + SXE2_RX_CTXT_LRO_ENABLE_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->cpuid, SXE2_RX_CTXT_CPUID_S, SXE2_RX_CTXT_CPUID_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->max_frame_size, + SXE2_RX_CTXT_MAX_FRAME_SIZE_S, + SXE2_RX_CTXT_MAX_FRAME_SIZE_W); + value[SXE2_RX_CTXT3] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->lro_desc_max, + SXE2_RX_CTXT_LRO_DESC_MAX_S, + SXE2_RX_CTXT_LRO_DESC_MAX_W); + + value[SXE2_RX_CTXT4] = SXE2_CTXT_REG_VALUE(rxq_ctxt->tphrdesc_enable, + SXE2_RX_CTXT_THPRDESC_ENABLE_S, + SXE2_RX_CTXT_THPRDESC_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphwdesc_enable, + SXE2_RX_CTXT_THPWDESC_ENABLE_S, + SXE2_RX_CTXT_THPWDESC_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphdata_enable, + SXE2_RX_CTXT_THPRDATA_ENABLE_S, + SXE2_RX_CTXT_THPRDATA_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->tphhead_enable, + SXE2_RX_CTXT_THPHEAD_ENABLE_S, + SXE2_RX_CTXT_THPHEAD_ENABLE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE(rxq_ctxt->low_desc_waterline, + SXE2_RX_CTXT_LOW_DESC_LINE_S, + SXE2_RX_CTXT_LOW_DESC_LINE_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->vfid, SXE2_RX_CTXT_VF_ID_S, SXE2_RX_CTXT_VF_ID_W); + value[SXE2_RX_CTXT4] |= SXE2_CTXT_REG_VALUE( + rxq_ctxt->pfid, SXE2_RX_CTXT_PF_ID_S, SXE2_RX_CTXT_PF_ID_W); + value[SXE2_RX_CTXT4] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->vfen, SXE2_RX_CTXT_VF_ENABLE_S, + SXE2_RX_CTXT_VF_ENABLE_W); + value[SXE2_RX_CTXT4] |= + SXE2_CTXT_REG_VALUE(rxq_ctxt->vsi_id, SXE2_RX_CTXT_VSI_ID_S, + SXE2_RX_CTXT_VSI_ID_W); + + for (i = 0; i < SXE2_RX_CTXT_CNT; i++) { + SXE2_REG_WRITE(hw, SXE2_RXQ_CTXT(i, rxq_idx), value[i]); + LOG_INFO_BDF("rxq:%u ctxt[%u]:0x%x.\n", rxq_idx, i, value[i]); + } +} + +void sxe2_hw_vf_irq_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq) +{ + u32 value; + u16 idx; + + value = (((vf_irq->first_in_dev << SXE2_PFVP_INT_ALLOC_FIRST_S) & + SXE2_PFVP_INT_ALLOC_FIRST_M) | + ((vf_irq->last_in_dev << SXE2_PFVP_INT_ALLOC_LAST_S) & + SXE2_PFVP_INT_ALLOC_LAST_M) | + SXE2_PFVP_INT_ALLOC_VALID); + SXE2_REG_WRITE(hw, SXE2_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), value); + + value = (((vf_irq->first_in_dev << SXE2_PCI_PFVP_INT_ALLOC_FIRST_S) & + SXE2_PCI_PFVP_INT_ALLOC_FIRST_M) | + ((vf_irq->last_in_dev << SXE2_PCI_PFVP_INT_ALLOC_LAST_S) & + SXE2_PCI_PFVP_INT_ALLOC_LAST_M) | + SXE2_PCI_PFVP_INT_ALLOC_VALID); + SXE2_REG_WRITE(hw, SXE2_PCI_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), value); + + for (idx = vf_irq->first_in_pf; idx <= vf_irq->last_in_pf; idx++) { + value = (((vf_irq->vfid_in_dev << SXE2_PCIEPROC_INT2FUNC_VF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_VF_NUM_M) | + ((vf_irq->pf_id << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_PF_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_PCIEPROC_INT2FUNC(idx), value); + } + + SXE2_REG_WRITE(hw, SXE2_VSI_PF(vf_irq->vfid_in_dev), + SXE2_VSI_PF_EN_M | (vf_irq->pf_id & SXE2_VSI_PF_ID_M)); + + SXE2_REG_WRITE(hw, SXE2_MBX_CTL(vf_irq->vfid_in_dev), + SXE2_MBX_CTL_CAUSE_ENA_M | (vf_irq->first_in_pf & + SXE2_MBX_CTL_MSIX_INDX_M)); +} + +void sxe2_hw_vf_queue_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue) +{ + u32 reg; + + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_MAPENA(vf_queue->vfid_in_pf), + SXE2_VF_TXQ_MAPENA_M); + reg = (((vf_queue->txq_first_in_pf << SXE2_VF_TXQ_BASE_FIRST_Q_S) & + SXE2_VF_TXQ_BASE_FIRST_Q_M) | + (((vf_queue->txq_cnt - 1) << SXE2_VF_TXQ_BASE_Q_NUM_S) & + SXE2_VF_TXQ_BASE_Q_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_BASE(vf_queue->vfid_in_pf), reg); + + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_MAPENA(vf_queue->vfid_in_pf), + SXE2_VF_RXQ_MAPENA_M); + + reg = (((vf_queue->rxq_first_in_pf << SXE2_VF_RXQ_BASE_FIRST_Q_S) & + SXE2_VF_RXQ_BASE_FIRST_Q_M) | + (((vf_queue->rxq_cnt - 1) << SXE2_VF_RXQ_BASE_Q_NUM_S) & + SXE2_VF_RXQ_BASE_Q_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_BASE(vf_queue->vfid_in_pf), reg); +} + +void sxe2_hw_vf_irq_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq) +{ + u16 idx; + u32 value; + + SXE2_REG_WRITE(hw, SXE2_MBX_CTL(vf_irq->vfid_in_dev), 0); + + SXE2_REG_WRITE(hw, SXE2_VSI_PF(vf_irq->vfid_in_dev), 0); + + for (idx = vf_irq->first_in_pf; idx <= vf_irq->last_in_pf; idx++) { + value = (((1 << SXE2_PCIEPROC_INT2FUNC_IS_PF_S) & + SXE2_PCIEPROC_INT2FUNC_IS_PF_M) | + ((vf_irq->pf_id << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) & + SXE2_PCIEPROC_INT2FUNC_PF_NUM_M)); + SXE2_REG_WRITE(hw, SXE2_PCIEPROC_INT2FUNC(idx), value); + } + + SXE2_REG_WRITE(hw, SXE2_PCI_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_PFVP_INT_ALLOC(vf_irq->vfid_in_pf), 0); +} + +void sxe2_hw_vf_queue_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue) +{ + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_BASE(vf_queue->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_VF_RXQ_MAPENA(vf_queue->vfid_in_pf), 0); + + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_BASE(vf_queue->vfid_in_pf), 0); + SXE2_REG_WRITE(hw, SXE2_VF_TXQ_MAPENA(vf_queue->vfid_in_pf), 0); +} + +s32 sxe2_hw_rxq_status_check(struct sxe2_hw *hw, u32 reg_idx, bool enable) +{ + struct sxe2_adapter *adapter = hw->adapter; + s32 ret; + u8 i; + u32 value; + + (void)adapter; + + for (i = 0; i < SXE2_QUEUE_WAIT_RETRY_CNT; i++) { + value = SXE2_REG_READ(hw, SXE2_RXQ_CTRL(reg_idx)); + if ((enable == !!(value & SXE2_RXQ_CTRL_STATUS_ACTIVE)) || + (value == SXE2_REG_INVALID_VALUE)) { + ret = 0; + LOG_DEBUG_BDF("rxq[%u] %s done.\n", reg_idx, + enable ? "enable" : "disable"); + goto l_out; + } + + usleep_range(20, 40); + } + + ret = -SXE2_HW_ERR_TIMEDOUT; + +l_out: + return ret; +} + +s32 sxe2_hw_rxq_ctrl(struct sxe2_hw *hw, u16 reg_idx, bool enable, bool wait, + bool cde) +{ + s32 ret = 0; + u32 ctrl_reg = SXE2_REG_READ(hw, SXE2_RXQ_CTRL(reg_idx)); + u32 value; + + if (enable == !!(ctrl_reg & SXE2_RXQ_CTRL_STATUS_ACTIVE)) { + LOG_WARN("rxq idx:%u status:%u already.\n", reg_idx, enable); + return ret; + } + + if (enable) { + value = cde ? (SXE2_RXQ_CTRL_ENABLED | SXE2_RXQ_CTRL_CDE_ENABLE) + : SXE2_RXQ_CTRL_ENABLED; + ctrl_reg |= value; + } else { + ctrl_reg &= ~(SXE2_RXQ_CTRL_ENABLED | SXE2_RXQ_CTRL_CDE_ENABLE); + } + + SXE2_REG_WRITE(hw, SXE2_RXQ_CTRL(reg_idx), ctrl_reg); + + if (!wait) + return ret; + + sxe2_flush(hw); + ret = sxe2_hw_rxq_status_check(hw, reg_idx, enable); + + sxe2_flush(hw); + + return ret; +} + +u32 sxe2_fw_state_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_STATE); +} + +u32 sxe2_fw_ver_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_VER); +} + +u32 sxe2_fw_comp_ver_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_COMP_VER_ADDR); +} + +u32 sxe2_fw_mode_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_MISC) & SXE2_FW_MISC_MODE_M; +} + +u32 sxe2_fw_pop_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_MISC) & SXE2_FW_MISC_POP_M; +} + +void sxe2_hw_l2tag_accept(struct sxe2_hw *hw, u16 vsi_hw_id) +{ + u32 val; + + val = (SXE2_PFP_L2TAGSEN_ALL_TAG << SXE2_VSI_TAR_UNTAGGED_SHIFT) | + SXE2_PFP_L2TAGSEN_ALL_TAG; + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); +} + +static s32 sxe2_tpid_to_bits(bool pvlan_exist, bool is_strip, u16 tpid, u32 *bits) +{ + if (!pvlan_exist) { + switch (tpid) { + case ETH_P_8021Q: + *bits = is_strip ? SXE2_VSI_TSR_ID_OUT_VLAN1 + : SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1; + break; + case ETH_P_8021AD: + *bits = is_strip ? SXE2_VSI_TSR_ID_STAG + : SXE2_VSI_L2TAGSTXVALID_ID_STAG; + break; + case ETH_P_QINQ1: + *bits = is_strip ? SXE2_VSI_TSR_ID_OUT_VLAN2 + : SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2; + break; + default: + return -SXE2_HW_ERR_INVAL; + } + } else { + if (tpid != ETH_P_8021Q) + return -SXE2_HW_ERR_INVAL; + *bits = is_strip ? SXE2_VSI_TSR_ID_VLAN + : SXE2_VSI_L2TAGSTXVALID_ID_VLAN; + } + + return 0; +} + +s32 sxe2_hw_desc_vlan_param_check(bool pvlan_exist, bool is_strip, u16 tpid) +{ + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, is_strip, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + return 0; +} + +s32 sxe2_hw_desc_vlan_strip_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, + bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, true, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TSR(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_TSR_STRIP_TAG_S) | + (bits << SXE2_VSI_TSR_SHOW_TAG_S); + else + val &= ~((bits << SXE2_VSI_TSR_STRIP_TAG_S) | + (bits << SXE2_VSI_TSR_SHOW_TAG_S)); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_desc_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, + bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, false, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID; + else + val &= ~((SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_M + << SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_desc_outer_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, + u16 tpid, bool pvlan_exist, bool en) +{ + u32 val; + u32 bits = 0; + + if (sxe2_tpid_to_bits(pvlan_exist, false, tpid, &bits)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id)); + if (en) + val |= (bits << SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID; + else + val &= ~((SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_M + << SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S) | + SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + + return 0; +} + +s32 sxe2_hw_port_vlan_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 vlan_info, + u16 tpid) +{ + u32 val; + u32 strip_bits = 0; + u32 insert_bits = 0; + + if (vlan_info) { + if (sxe2_tpid_to_bits(false, true, tpid, &strip_bits)) + return -SXE2_HW_ERR_INVAL; + if (sxe2_tpid_to_bits(false, false, tpid, &insert_bits)) + return -SXE2_HW_ERR_INVAL; + + val = strip_bits << SXE2_VSI_TSR_STRIP_TAG_S; + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), val); + + val = (insert_bits << SXE2_VSI_L2TAGSTXVALID_TIR0_ID_S) | + SXE2_VSI_L2TAGSTXVALID_TIR0_VALID; + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), val); + } else { + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_hw_id), 0); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi_hw_id), 0); + } + + sxe2_write_reg(hw, SXE2_VSI_TIR0(vsi_hw_id), vlan_info); + + return 0; +} + +s32 sxe2_hw_port_inner_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, + bool acceptedtagged, + bool accepteduntagged) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TAR(vsi_hw_id)); + if (acceptedtagged) + val |= (BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_TAGGED_S); + else + val &= ~(BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_TAGGED_S); + + if (accepteduntagged) + val |= (BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_UNTAGGED_S); + else + val &= ~(BIT(SXE2_VSI_L2TAGSTXVALID_ID_VLAN) + << SXE2_ACCEPT_RULE_UNTAGGED_S); + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); + + return 0; +} + +void sxe2_hw_rx_vlan_filter_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_RX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE; + else + val &= ~SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE; + sxe2_write_reg(hw, SXE2_VSI_RX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_loopback_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN; + else + val &= ~SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_mac_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_MACAS_EN; + else + val &= ~SXE2_VSI_TX_SW_CTRL_MACAS_EN; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +void sxe2_hw_vsi_vlan_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id)); + if (en) + val |= SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE; + else + val &= ~SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE; + sxe2_write_reg(hw, SXE2_VSI_TX_SWITCH_CTRL(vsi_hw_id), val); +} + +u32 sxe2_hw_fw_irq_cause_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg_valid(hw, SXE2_PF_INT_FW_EVENT, 0); +} + +s32 sxe2_hw_corer_irq_cause_get(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + + if (val == SXE2_REG_INVALID_VALUE) + return 0; + if (val & SXE2_PCIE_SYS_READY_CORER_ASSERT) + pci_write_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, + SXE2_PCIE_SYS_READY_CORER_ASSERT); +#else + u32 val_wr; + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); + if (val & SXE2_PCIE_SYS_READY_CORER_ASSERT) { + val_wr = SXE2_PCIE_SYS_READY_CORER_ASSERT; + (void)rte_pci_write_config(SXE2_HW_PCI_DEV(adapter), &val_wr, + sizeof(val_wr), SXE2_PCIE_SYS_READY); + } +#endif + + return val & SXE2_PCIE_SYS_READY_CORER_ASSERT; +} + +void sxe2_hw_trigger_pfr(struct sxe2_hw *hw) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_PFGEN_CTRL); + + sxe2_write_reg(hw, SXE2_PFGEN_CTRL, (reg | SXE2_PFGEN_CTRL_PFSWR)); +} + +s32 sxe2_hw_pfr_done(struct sxe2_hw *hw) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_PFGEN_CTRL); + if (val == SXE2_REG_INVALID_VALUE) { + if (!sxe2_hw_stop_drop_done(hw)) + return -EBUSY; + + return 0; + } + + return !(val & SXE2_PFGEN_CTRL_PFSWR); +} + +void sxe2_hw_trigger_corer(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, SXE2_TOP_CFG_CORE, SXE2_TOP_CFG_CORE_RST_CODE); +} + +s32 sxe2_hw_corer_done(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + + if (val == SXE2_REG_INVALID_VALUE) + return 0; +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val & SXE2_PCIE_SYS_READY_R5; +} + +void sxe2_hw_stop_drop(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + pci_write_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, + (val | SXE2_PCIE_SYS_READY_STOP_DROP)); +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); + val = val | SXE2_PCIE_SYS_READY_STOP_DROP; + (void)rte_pci_write_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif +} + +s32 sxe2_hw_stop_drop_done(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; + +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + if (val == SXE2_REG_INVALID_VALUE) + return 0; +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val & SXE2_PCIE_SYS_READY_STOP_DROP_DONE; +} + +u32 sxe2_hw_read_pcie_sys_ready(struct sxe2_hw *hw) +{ + u32 val; + struct sxe2_adapter *adapter = (struct sxe2_adapter *)hw->adapter; +#ifndef SXE2_DPDK_DRIVER + pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); +#else + rte_pci_read_config(SXE2_HW_PCI_DEV(adapter), &val, sizeof(val), + SXE2_PCIE_SYS_READY); +#endif + return val; +} + +u32 sxe2_hw_heartbeat_get(struct sxe2_hw *hw) +{ + return sxe2_read_reg(hw, SXE2_FW_HEARTBEAT); +} + +void sxe2_hw_trigger_vfr(struct sxe2_hw *hw, u16 vf_id) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_VFGEN_CTRL(vf_id)); + + sxe2_write_reg(hw, SXE2_VFGEN_CTRL(vf_id), (reg | SXE2_VFGEN_CTRL_VFSWR)); + + LOG_INFO("vf:%d vfr triggered.\n", vf_id); +} + +u32 sxe2_hw_vfr_done(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + struct sxe2_adapter *adapter = hw->adapter; + +#ifdef SXE2_DPDK_DRIVER + UNUSED(adapter); +#endif + + val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + if (val == SXE2_REG_INVALID_VALUE) { + LOG_WARN_BDF("core reset or pfr detected.\n"); + return SXE2_REG_UNACCESS; + } + + return (val & SXE2_VF_VRC_VFGEN_VFRSTAT_COMPLETE); +} + +void sxe2_hw_vf_active(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + u32 reg_val; + struct sxe2_adapter *adapter = hw->adapter; + +#ifdef SXE2_DPDK_DRIVER + UNUSED(adapter); +#endif + + reg_val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + + val = FIELD_PREP(SXE2_VF_VRC_VFGEN_VFRSTAT, + SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE) | + SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK | reg_val; + sxe2_write_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id), val); + + LOG_INFO_BDF("vf_id:%u activated 0x%x.\n", vf_id, + sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id))); +} + +void sxe2_hw_vf_deactive(struct sxe2_hw *hw, u16 vf_id) +{ + u32 val; + + val = sxe2_read_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id)); + val &= ~(SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE); + sxe2_write_reg(hw, SXE2_VF_VRC_VFGEN_RSTAT(vf_id), val); +} + +bool sxe2_hw_vflr_cause_get(struct sxe2_hw *hw, u16 vf_id_in_dev) +{ + u32 val; + u32 reg_idx, bit_idx; + + reg_idx = vf_id_in_dev / 32; + bit_idx = vf_id_in_dev % 32; + + val = sxe2_read_reg(hw, SXE2_GLGEN_VFLRSTAT(reg_idx)); + return !!(val & BIT(bit_idx)); +} + +void sxe2_hw_vflr_cause_clear(struct sxe2_hw *hw, u16 vf_id_in_dev) +{ + u32 reg_idx, bit_idx; + + reg_idx = vf_id_in_dev / 32; + bit_idx = vf_id_in_dev % 32; + + sxe2_write_reg(hw, (u32)SXE2_GLGEN_VFLRSTAT(reg_idx), (u32)BIT(bit_idx)); +} + +s32 sxe2_hw_port_outer_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, + u16 tpid, bool acceptedtagged, + bool accepteduntagged) +{ + u32 val; + u32 tag_id = 0; + + if (sxe2_tpid_to_bits(false, false, tpid, &tag_id)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TAR(vsi_hw_id)); + if (acceptedtagged) + val |= (u32)(BIT(tag_id) << SXE2_ACCEPT_RULE_TAGGED_S); + else + val &= (u32)(~(BIT(tag_id) << SXE2_ACCEPT_RULE_TAGGED_S)); + + if (accepteduntagged) + val |= (u32)(BIT(tag_id) << SXE2_ACCEPT_RULE_UNTAGGED_S); + else + val &= (u32)(~(BIT(tag_id) << SXE2_ACCEPT_RULE_UNTAGGED_S)); + + sxe2_write_reg(hw, SXE2_VSI_TAR(vsi_hw_id), val); + + return 0; +} + +void sxe2_hw_ptp_main_enable(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, GLTSYN, GLTSYN_ENA_M); +} + +void sxe2_hw_ptp_main_disable(struct sxe2_hw *hw) +{ + u32 val = sxe2_read_reg(hw, GLTSYN); + + val &= ~GLTSYN_ENA_M; + sxe2_write_reg(hw, GLTSYN, val); +} + +bool sxe2_hw_ptp_main_is_enabled(struct sxe2_hw *hw) +{ + u32 val = sxe2_read_reg(hw, GLTSYN); + + if (val & GLTSYN_ENA_M) + return true; + + return false; +} + +void sxe2_hw_ptp_init_incval(struct sxe2_hw *hw, u64 incval) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, upper_32_bits(incval)); + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, lower_32_bits(incval)); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_INIT_INCVAL); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_tsyn_switch(struct sxe2_hw *hw, bool on) +{ + u32 reg; + + reg = sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); + if (on) + reg |= SXE2_PF_INT_OICR_TSYN_TX; + else + reg &= ~SXE2_PF_INT_OICR_TSYN_TX; + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, reg); +} + +void sxe2_hw_ptp_tsyn_event_switch(struct sxe2_hw *hw, bool on) +{ + u32 oicr_value; + + oicr_value = sxe2_read_reg(hw, SXE2_PF_INT_OICR_ENABLE); + if (on) + oicr_value |= SXE2_PF_INT_OICR_TSYN_EVENT; + else + oicr_value &= ~SXE2_PF_INT_OICR_TSYN_EVENT; + sxe2_write_reg(hw, SXE2_PF_INT_OICR_ENABLE, oicr_value); +} + +void sxe2_hw_ptp_aux_in_set(struct sxe2_hw *hw, u32 index, u32 value) +{ + sxe2_write_reg(hw, (u32)GLTSYN_AUXIN(index), value); +} + +u64 sxe2_hw_ptp_get_event_second(struct sxe2_hw *hw, u32 index) +{ + u32 lo; + u32 hi; + + lo = sxe2_read_reg(hw, (u32)GLTSYN_EVENT_S_L(index)); + hi = sxe2_read_reg(hw, (u32)GLTSYN_EVENT_S_H(index)); + + return (((u64)(hi & GLTSYN_EVENT_S_H_MASK)) << 32) | lo; +} + +u64 sxe2_hw_ptp_get_event_nanosecond(struct sxe2_hw *hw, u32 index) +{ + return sxe2_read_reg(hw, (u32)GLTSYN_EVENT_NS(index)); +} + +static bool sxe2_hw_ptp_tx_tstamp_valid(struct sxe2_hw *hw, u8 port_id, u32 index) +{ + u32 val; + + val = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32)); + if (!(val & BIT(index % 32))) + return false; + + return true; +} + +static bool sxe2_hw_ptp_mac_tx_tstamp_valid(struct sxe2_hw *hw, u8 phy_id, u32 index) +{ + u32 val; + + val = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32)); + if (!(val & BIT(index % 32))) + return false; + + return true; +} + +#define SXE2_TSTAMP_TX_HI_SHIFT (24) +#define SXE2_TSTAMP_TX_LO_SHIFT (8) +bool sxe2_hw_ptp_tx_tstamp_read(struct sxe2_hw *hw, u8 port_id, u32 index, + u64 *timestamp) +{ + u32 lo; + u32 hi; + + if (!sxe2_hw_ptp_tx_tstamp_valid(hw, port_id, index)) + return false; + + hi = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TXHI(port_id, index)); + lo = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TXLO(port_id, index)); + + *timestamp = ((u64)hi << SXE2_TSTAMP_TX_HI_SHIFT | + (u64)lo >> SXE2_TSTAMP_TX_LO_SHIFT); + return true; +} + +bool sxe2_hw_ptp_mac_tx_tstamp_read(struct sxe2_hw *hw, u8 phy_id, u8 index, + u64 *timestamp) +{ + u32 lo; + u32 hi; + + if (!sxe2_hw_ptp_mac_tx_tstamp_valid(hw, phy_id, index)) + return false; + + lo = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TXLO(phy_id, index)); + hi = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TXHI(phy_id, index)) & 0x7F; + *timestamp = ((u64)hi << 24 | (u64)lo >> 8); + return true; +} + +void sxe2_hw_ptp_tx_tstamp_discard(struct sxe2_hw *hw, u8 port_id, u32 index) +{ + u32 value = sxe2_read_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32)); + + value &= (u32)~BIT(index % 32); + sxe2_write_reg(hw, (u32)PFP_CGM_TX_TSMEM(port_id, index / 32), value); +} + +void sxe2_hw_ptp_mac_tx_tstamp_discard(struct sxe2_hw *hw, u8 phy_id, u32 index) +{ + u32 value = sxe2_read_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32)); + + value &= (u32)(~(1ULL << (index % 32))); + sxe2_write_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, index / 32), value); +} + +void sxe2_hw_ptp_mac_tx_tstamp_clear_all(struct sxe2_hw *hw, u8 phy_id, u32 reg_idx) +{ + sxe2_write_reg(hw, PFP_CGM_MAC_TX_TSMEM(phy_id, reg_idx), 0); +} + +bool sxe2_hw_ptp_acquire_1588_lock(struct sxe2_hw *hw) +{ + u32 value; + + value = sxe2_read_reg(hw, GLTSYN_SEM); + value = value & GLTSYN_SEM_BUSY_M; + return !(!!value); +} + +void sxe2_hw_ptp_release_1588_lock(struct sxe2_hw *hw) +{ + sxe2_write_reg(hw, GLTSYN_SEM, 0); +} + +void sxe2_hw_ptp_1588_timestamp_read(struct sxe2_hw *hw, u64 *second, + u64 *nanosecond) +{ + u32 sh_hi; + u32 sh_lo; + u32 sh_ns; + u32 sh_ns2; + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_LATCHING_SHTIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); + sh_ns = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + sh_hi = sxe2_read_reg(hw, GLTSYN_SHTIME_S_H); + sh_lo = sxe2_read_reg(hw, GLTSYN_SHTIME_S_L); + sh_ns2 = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + + if (sh_ns != sh_ns2) { + sh_hi = sxe2_read_reg(hw, GLTSYN_SHTIME_S_H); + sh_lo = sxe2_read_reg(hw, GLTSYN_SHTIME_S_L); + sh_ns = sxe2_read_reg(hw, GLTSYN_SHTIME_NS); + } + *nanosecond = sh_ns; + + *second = (((u64)(sh_hi & 0xFFFF)) << 32) | sh_lo; +} + +void sxe2_hw_ptp_1588_timestamp_write(struct sxe2_hw *hw, u64 second, u32 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_SHTIME_S_H, upper_32_bits(second)); + sxe2_write_reg(hw, GLTSYN_SHTIME_S_L, lower_32_bits(second)); + sxe2_write_reg(hw, GLTSYN_SHTIME_NS, nanosecond); + sxe2_write_reg(hw, GLTSYN_SHTIME_SUBNS, 0); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_INIT_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_1588_clockout_write(struct sxe2_hw *hw, u32 index, u64 period, + u64 second, u64 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_CLKO(index), (u32)period); + sxe2_write_reg(hw, GLTSYN_TGT_NS(index), (u32)nanosecond); + + sxe2_write_reg(hw, GLTSYN_TGT_S_L(index), (u32)second); + sxe2_write_reg(hw, GLTSYN_TGT_S_H(index), second >> 32); +} + +u32 sxe2_hw_ptp_auxout_get(struct sxe2_hw *hw, u32 index) +{ + return sxe2_read_reg(hw, GLTSYN_AUXOUT(index)); +} + +void sxe2_hw_ptp_auxout_set(struct sxe2_hw *hw, u32 index, u32 value) +{ + sxe2_write_reg(hw, GLTSYN_AUXOUT(index), value); +} + +#define PTP_GLTSYN_SHADJ_NS_POS (0x3fffffff) +#define PTP_GLTSYN_SHADJ_NS_NEG (0x80000000) +void sxe2_hw_ptp_1588_timestamp_adjust(struct sxe2_hw *hw, u32 nanosecond, bool neg) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, 0); + if (!neg) + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, + nanosecond & PTP_GLTSYN_SHADJ_NS_POS); + else + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, + nanosecond | PTP_GLTSYN_SHADJ_NS_NEG); + + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_ADJ_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +void sxe2_hw_ptp_1588_timestamp_adjust_at_time(struct sxe2_hw *hw, u32 nanosecond) +{ + sxe2_write_reg(hw, GLTSYN_SHADJ_SUBNS, 0); + sxe2_write_reg(hw, GLTSYN_SHADJ_NS, nanosecond); + sxe2_write_reg(hw, GLTSYN_CMD, GLTSYN_CMD_ADJ_TIME_AT_TIME); + sxe2_write_reg(hw, GLTSYN_SYNC, GLTSYN_SYNC_EXEC | GLTSYN_SYNC_GEN_PULSE); +} + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw) +{ + return (s32)sxe2_read_reg(hw, GLTSYN_STAT); +} + +#define SXE2_IPSEC_RX_SPI_TBL (0x1) +#define SXE2_IPSEC_RX_KEY_TBL (0x2) + +void sxe2_hw_ipsec_tcam_clear(struct sxe2_hw *hw, u32 sa_index) +{ + u32 val = 0; + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSIPID_ADDR, 0); + + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSSPI0_ADDR, 0); + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSSPI0_ADDR, + 0 ^ SXE2_IPSEC_RX_IPSSPI1_SPI_Y_MASK); + + val = (SXE2_IPSEC_RX_SPI_TBL << SXE2_IPSEC_RX_IPSIDX_TABLE_SHIFT) & + SXE2_IPSEC_RX_IPSIDX_TABLE_MASK; + val &= ~(BIT(SXE2_IPSEC_RX_IPSIDX_VBI_SHIFT)); + val |= (sa_index << SXE2_IPSEC_RX_IPSIDX_SA_IDX_SHIFT) & + SXE2_IPSEC_RX_IPSIDX_SA_IDX_MASK; + val |= SXE2_IPSEC_RX_IPSIDX_SWRITE_SHIFT; + sxe2_write_reg(hw, SXE2_IPSEC_RX_IPSIDX_ADDR, val); +} + +STATIC void sxe2_stat_update32(struct sxe2_hw *hw, u32 reg, u64 *prev_stat, + u64 *cur_stat, bool prev_stat_loaded) +{ + u32 new_data; + + new_data = sxe2_read_reg(hw, reg); + + if (new_data == BIT_ULL(32) - 1) + goto l_end; + + if (!prev_stat_loaded) { + *prev_stat = new_data; + return; + } + + if (new_data >= *prev_stat) + *cur_stat += new_data - *prev_stat; + else + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + *prev_stat = new_data; + +l_end: + return; +} + +void sxe2_hw_pause_stats_update(struct sxe2_hw *hw, u8 port_idx, bool prev_loaded, + struct sxe2_pause_stats *cur, + struct sxe2_pause_stats *prev) +{ + u32 i; + + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) { + sxe2_stat_update32(hw, SXE2_TXPFCXONFRAMES_LO(port_idx, i), + &prev->prio_xon_tx[i], &cur->prio_xon_tx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_TXPFCXOFFFRAMES_LO(port_idx, i), + &prev->prio_xoff_tx[i], &cur->prio_xoff_tx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_TXPFCXONTOXOFFFRAMES_LO(port_idx, i), + &prev->prio_xon_2_xoff[i], + &cur->prio_xon_2_xoff[i], prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPFCXONFRAMES_LO(port_idx, i), + &prev->prio_xon_rx[i], &cur->prio_xon_rx[i], + prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPFCXOFFFRAMES_LO(port_idx, i), + &prev->prio_xoff_rx[i], &cur->prio_xoff_rx[i], + prev_loaded); + } + + sxe2_stat_update32(hw, SXE2_TXPAUSEXOFFFRAMES_LO(port_idx), &prev->tx_pause, + &cur->tx_pause, prev_loaded); + + sxe2_stat_update32(hw, SXE2_RXPAUSEXOFFFRAMES_LO(port_idx), &prev->rx_pause, + &cur->rx_pause, prev_loaded); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..bddd7024b4d634708b17ccf3c0945709811b613b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_hw.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_hw.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HW_H__ +#define __SXE2_HW_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_osal.h" + +#ifndef IEEE_8021Q_MAX_PRIORITIES +#define IEEE_8021Q_MAX_PRIORITIES 8 +#endif +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif + +#else +#include +#include +#include +#include "sxe2_cmd_channel.h" +#include "sxe2_mbx_public.h" +#endif + +#include "sxe2_cmd.h" +#include "sxe2_host_regs.h" + +#define SXE2_BAR_RDMA_WB_START 0x03F0000 +#define SXE2_BAR_RDMA_WB_END 0x0BFFFFF + +#define SXE2_REG_INVALID_VALUE 0xffffffffU +#define SXE2_REG_RETRY_CNT 5 + +#define SXE2_FW_STATE_MASK 0xF0000 +#define SXE2_FW_STATE_FINISH 0x20000 +#define SXE2_FW_STATE_ABNORMAL 0x30000 + +#define SXE2_REG_UNACCESS (2) +#define sxe2_flush(hw) ((void)sxe2_read_reg((hw), SXE2_STATUS)) +#define SXE2_REG_READ(hw, addr) sxe2_read_reg(hw, addr) +#define SXE2_REG_WRITE(hw, reg, value) sxe2_write_reg(hw, reg, value) + +struct sxe2_adapter; + +enum sxe2_hw_err_code { + SXE2_HW_ERR_SUCCESS = 0, + SXE2_HW_ERR_FAULT, + SXE2_HW_ERR_TIMEDOUT, + SXE2_HW_ERR_IO, + SXE2_HW_ERR_INVAL, +}; + +struct sxe2_hw_cfg { + u16 itr_gran; + u16 credit_interval_gran; +}; + +struct sxe2_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct sxe2_map_info { + void __iomem *addr; + resource_size_t start; + resource_size_t end; + u32 bar_idx; +}; + +struct sxe2_hw_map { + u32 map_cnt; + struct sxe2_map_info maps[]; +}; + +struct sxe2_hw { + u8 *hw_map; + struct sxe2_hw_cfg hw_cfg; + void *adapter; + struct sxe2_mac_info mac_info; + + u8 *pkg_copy; + u32 pkg_size; + +#ifndef SXE2_DPDK_DRIVER + u32 (*reg_read)(const __iomem void *reg); + void (*reg_write)(u32 value, __iomem void *reg); + struct sxe2_fw_ver_msg fw_ver; +#endif + bool is_pop_type; +}; + +struct sxe2_hw_vf_irq { + u16 first_in_pf; + u16 last_in_pf; + u16 first_in_dev; + u16 last_in_dev; + u16 vfid_in_pf; + u16 vfid_in_dev; + u16 pf_id; +}; + +struct sxe2_hw_vf_queue { + u16 txq_first_in_pf; + u16 txq_cnt; + u16 rxq_first_in_pf; + u16 rxq_cnt; + u16 vfid_in_pf; +}; + +struct sxe2_pause_stats { + __le64 prio_xoff_rx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_rx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_tx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xoff_tx[IEEE_8021Q_MAX_PRIORITIES]; + __le64 prio_xon_2_xoff[IEEE_8021Q_MAX_PRIORITIES]; + __le64 rx_pause; + __le64 tx_pause; +}; + +void sxe2_hw_pause_stats_update(struct sxe2_hw *hw, u8 port_idx, bool prev_loaded, struct sxe2_pause_stats *cur, + struct sxe2_pause_stats *prev); + +void __iomem *sxe2_reg_addr_get(struct sxe2_hw *hw, resource_size_t reg); + +#ifndef SXE2_DPDK_DRIVER +static inline void sxe2_hw_reg_handle_init(struct sxe2_hw *hw, u32 (*read)(const __iomem void *), + void (*write)(u32, __iomem void *)) +{ + hw->reg_read = read; + hw->reg_write = write; +} +#endif +void sxe2_write_reg(struct sxe2_hw *hw, u32 reg, u32 value); + +u32 sxe2_read_reg(struct sxe2_hw *hw, u32 reg); + +u64 sxe2_read_reg64(struct sxe2_hw *hw, u32 reg); + +bool sxe2_hw_is_fault(struct sxe2_hw *hw); + +u32 sxe2_hw_read_pcie_sys_ready(struct sxe2_hw *hw); + +u32 sxe2_hw_evt_irq_cause_get(struct sxe2_hw *hw); + +void sxe2_hw_irq_enable(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_disable(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_trigger(struct sxe2_hw *hw, u16 irq_idx); + +void sxe2_hw_irq_dyn_ctl(struct sxe2_hw *hw, u16 irq_idx, u32 value); + +void sxe2_hw_irq_itr_set(struct sxe2_hw *hw, u16 irq_idx, u16 itr_idx, u16 interval); + +void sxe2_hw_irq_rate_limit_set(struct sxe2_hw *hw, u16 irq_idx, u16 intrl); + +u32 sxe2_hw_irq_gran_info_get(struct sxe2_hw *hw); + +void sxe2_hw_txq_irq_cause_setup(struct sxe2_hw *hw, u16 txq_idx, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_txq_irq_cause_clear(struct sxe2_hw *hw, u16 txq_idx); + +void sxe2_hw_txq_irq_cause_switch(struct sxe2_hw *hw, u16 txq_idx, bool enable); +void sxe2_hw_rxq_irq_cause_setup(struct sxe2_hw *hw, u16 rxq_idx, u16 itr_idx, u16 irq_idx); +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx); + +void sxe2_hw_rxq_irq_idx_change(struct sxe2_hw *hw, u16 rxq_idx, u16 irq_idx); + +void sxe2_hw_rxq_irq_cause_clear(struct sxe2_hw *hw, u16 rxq_idx); + +void sxe2_hw_rxq_irq_cause_switch(struct sxe2_hw *hw, u16 rxq_idx, bool enable); + +void sxe2_hw_evt_irq_cfg(struct sxe2_hw *hw, u32 value, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_fwq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_mbxq_irq_cfg(struct sxe2_hw *hw, u16 itr_idx, u16 irq_idx); + +void sxe2_hw_evt_irq_clear(struct sxe2_hw *hw); + +u32 sxe2_hw_evt_irq_mask_get(struct sxe2_hw *hw); + +void sxe2_hw_fwq_irq_clear(struct sxe2_hw *hw); + +void sxe2_hw_mbxq_irq_clear(struct sxe2_hw *hw); + +struct sxe2_hw_rxq_ctxt { + u64 base_addr; + u16 depth; + + u16 dbuff_len; + u16 hbuff_len; + u8 hsplit_type; + u8 desc_type; + u8 crc_strip; + u8 l2tag1_show; + u8 hsplit_0; + u8 hsplit_1; + u8 inner_vlan_strip; + + u8 lro_enable; + u8 cpuid; + u16 max_frame_size; + u16 lro_desc_max; + u8 relax_data; + u8 relax_wb_desc; + u8 relax_rd_desc; + + u8 tphrdesc_enable; + u8 tphwdesc_enable; + u8 tphdata_enable; + u8 tphhead_enable; + u8 low_desc_waterline; + u16 vfid; + u8 pfid; + + u8 vfen; + u16 vsi_id; + + u8 pref_enable; + u16 head; +}; + +s32 sxe2_hw_fw_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_fw_tq_disable(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_tq_is_idle(struct sxe2_hw *hw); + +void sxe2_hw_fw_tq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_fw_tq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_fw_tq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_fw_rq_disable(struct sxe2_hw *hw); + +s32 sxe2_hw_fw_rq_is_idle(struct sxe2_hw *hw); + +void sxe2_hw_fw_rq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_fw_rq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_fw_rq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_mbx_tq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_mbx_tq_disable(struct sxe2_hw *hw); + +void sxe2_hw_mbx_tq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_mbx_tq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_mbx_tq_get_error(struct sxe2_hw *hw); + +s32 sxe2_hw_mbx_rq_enable(struct sxe2_hw *hw, u16 depth, dma_addr_t addr); + +void sxe2_hw_mbx_rq_disable(struct sxe2_hw *hw); + +void sxe2_hw_mbx_rq_write_tail(struct sxe2_hw *hw, u32 value); + +u32 sxe2_hw_mbx_rq_read_head(struct sxe2_hw *hw); + +u32 sxe2_hw_mbx_rq_get_error(struct sxe2_hw *hw); + +void sxe2_hw_rxq_ctxt_cfg(struct sxe2_hw *hw, struct sxe2_hw_rxq_ctxt *rxq_ctxt, u16 rxq_idx); + +s32 sxe2_hw_rxq_ctrl(struct sxe2_hw *hw, u16 reg_idx, bool enable, bool wait, bool cde); + +s32 sxe2_hw_rxq_status_check(struct sxe2_hw *hw, u32 reg_idx, bool enable); + +u32 sxe2_fw_state_get(struct sxe2_hw *hw); + +u32 sxe2_fw_ver_get(struct sxe2_hw *hw); + +u32 sxe2_fw_comp_ver_get(struct sxe2_hw *hw); + +u32 sxe2_fw_mode_get(struct sxe2_hw *hw); +u32 sxe2_fw_pop_get(struct sxe2_hw *hw); + +void sxe2_hw_l2tag_accept(struct sxe2_hw *hw, u16 vsi_hw_id); + +s32 sxe2_hw_desc_vlan_param_check(bool pvlan_exist, bool is_strip, u16 tpid); + +s32 sxe2_hw_desc_vlan_strip_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_desc_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_port_vlan_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 vlan_info, u16 tpid); + +void sxe2_hw_rx_vlan_filter_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_loopback_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_mac_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +void sxe2_hw_vsi_vlan_spoofchk_switch(struct sxe2_hw *hw, u16 vsi_hw_id, bool en); + +u32 sxe2_hw_fw_irq_cause_get(struct sxe2_hw *hw); + +s32 sxe2_hw_corer_irq_cause_get(struct sxe2_hw *hw); + +void sxe2_hw_trigger_pfr(struct sxe2_hw *hw); + +s32 sxe2_hw_pfr_done(struct sxe2_hw *hw); + +void sxe2_hw_trigger_corer(struct sxe2_hw *hw); + +s32 sxe2_hw_corer_done(struct sxe2_hw *hw); + +void sxe2_hw_stop_drop(struct sxe2_hw *hw); + +s32 sxe2_hw_stop_drop_done(struct sxe2_hw *hw); + +u32 sxe2_hw_heartbeat_get(struct sxe2_hw *hw); + +void sxe2_hw_trigger_vfr(struct sxe2_hw *hw, u16 vf_id); + +u32 sxe2_hw_vfr_done(struct sxe2_hw *hw, u16 vf_id); + +void sxe2_hw_vf_active(struct sxe2_hw *hw, u16 vf_id); + +void sxe2_hw_vf_deactive(struct sxe2_hw *hw, u16 vf_id); + +bool sxe2_hw_vflr_cause_get(struct sxe2_hw *hw, u16 vf_id_in_dev); + +void sxe2_hw_vflr_cause_clear(struct sxe2_hw *hw, u16 vf_id_in_dev); + +s32 sxe2_hw_desc_outer_vlan_insert_switch(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool pvlan_exist, bool en); + +s32 sxe2_hw_port_inner_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, bool acceptedtagged, + bool accepteduntagged); + +s32 sxe2_hw_port_outer_vlan_acceptrule_setup(struct sxe2_hw *hw, u16 vsi_hw_id, u16 tpid, bool acceptedtagged, + bool accepteduntagged); + +void sxe2_hw_vf_irq_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq); + +void sxe2_hw_vf_queue_cfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue); + +void sxe2_hw_ptp_main_enable(struct sxe2_hw *hw); + +void sxe2_hw_ptp_main_disable(struct sxe2_hw *hw); + +bool sxe2_hw_ptp_main_is_enabled(struct sxe2_hw *hw); +void sxe2_hw_ptp_init_incval(struct sxe2_hw *hw, u64 incval); + +void sxe2_hw_ptp_tsyn_switch(struct sxe2_hw *hw, bool on); +void sxe2_hw_ptp_tsyn_event_switch(struct sxe2_hw *hw, bool on); +u64 sxe2_hw_ptp_get_event_second(struct sxe2_hw *hw, u32 index); +u64 sxe2_hw_ptp_get_event_nanosecond(struct sxe2_hw *hw, u32 index); +void sxe2_hw_ptp_aux_in_set(struct sxe2_hw *hw, u32 index, u32 value); + +bool sxe2_hw_ptp_tx_tstamp_read(struct sxe2_hw *hw, u8 port_id, u32 index, u64 *timestamp); +bool sxe2_hw_ptp_mac_tx_tstamp_read(struct sxe2_hw *hw, u8 phy_id, u8 index, u64 *timestamp); +void sxe2_hw_ptp_tx_tstamp_discard(struct sxe2_hw *hw, u8 port_id, u32 index); +void sxe2_hw_ptp_mac_tx_tstamp_discard(struct sxe2_hw *hw, u8 phy_id, u32 index); + +void sxe2_hw_ptp_mac_tx_tstamp_clear_all(struct sxe2_hw *hw, u8 phy_id, u32 reg_idx); +bool sxe2_hw_ptp_acquire_1588_lock(struct sxe2_hw *hw); +void sxe2_hw_ptp_release_1588_lock(struct sxe2_hw *hw); +void sxe2_hw_ptp_1588_timestamp_read(struct sxe2_hw *hw, u64 *second, u64 *nanosecond); +void sxe2_hw_ptp_1588_timestamp_write(struct sxe2_hw *hw, u64 second, u32 nanosecond); +void sxe2_hw_ptp_1588_clockout_write(struct sxe2_hw *hw, u32 index, u64 period, u64 second, u64 nanosecond); +void sxe2_hw_ptp_1588_timestamp_adjust(struct sxe2_hw *hw, u32 nanosecond, bool neg); +void sxe2_hw_ptp_1588_timestamp_adjust_at_time(struct sxe2_hw *hw, u32 nanosecond); +u32 sxe2_hw_ptp_auxout_get(struct sxe2_hw *hw, u32 index); +void sxe2_hw_ptp_auxout_set(struct sxe2_hw *hw, u32 index, u32 value); + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw); + +s32 sxe2_hw_ptp_stat_get(struct sxe2_hw *hw); + +void sxe2_hw_vf_queue_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_queue *vf_queue); + +void sxe2_hw_vf_irq_decfg(struct sxe2_hw *hw, struct sxe2_hw_vf_irq *vf_irq); + +void sxe2_hw_ipsec_tcam_clear(struct sxe2_hw *hw, u32 sa_index); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_linkchg.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_linkchg.h new file mode 100644 index 0000000000000000000000000000000000000000..e71f070ab43fdbcc185e3f19a038d6dd10611e4e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_linkchg.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_linkchg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_LINKCHG_H__ +#define __SXE2_LINKCHG_H__ +#include +#include +#include "sxe2_cmd.h" +#include "sxe2_drv_aux.h" + +s32 sxe2_white_list_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +s32 sxe2_tx_fault_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +s32 sxe2_tx_fault_event_count_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +void sxe2_link_get_info_config(struct sxe2_adapter *adapter, u8 *link_state, u32 *link_speed); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.c b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.c new file mode 100644 index 0000000000000000000000000000000000000000..00d0923695326be737d889d37f247229a19ebe8d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log_export.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#define SXE2_DUMP_FILE_DIR "/var/log/" +#define SXE2_DUMP_FILE_SIZE_LIMIT (200 * 1024 * 1024) +#define SXE2_SINCE_YEAR (1900) +#define SXE2_SINCE_MONTH (1) +#define SXE2_MINUTE_TO_SECES (60) + +#include +#include + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_event.h" +#include "sxe2_log_export.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_compat.h" + +static s32 sxe2_export_local_time(struct rtc_time *tm) +{ + struct timespec64 time; + time64_t local_time; + + ktime_get_real_ts64(&time); + local_time = (time64_t)(time.tv_sec - + (sys_tz.tz_minuteswest * SXE2_MINUTE_TO_SECES)); + rtc_time64_to_tm(local_time, tm); + + tm->tm_mon += SXE2_SINCE_MONTH; + tm->tm_year += SXE2_SINCE_YEAR; + return 0; +} + +static void sxe2_export_filename_build(struct sxe2_export_context *ctxt, + s8 *filename, u32 len) +{ + struct sxe2_adapter *adapter = ctxt->adapter; + struct rtc_time tm; + struct pci_dev *pdev = adapter->pdev; + s8 *p_str = filename; + + (void)sxe2_export_local_time(&tm); + p_str += snprintf(p_str, len - (p_str - filename), "%s", SXE2_DUMP_FILE_DIR); + p_str += snprintf(p_str, len - (p_str - filename), "sxe2-fw"); + p_str += snprintf(p_str, len - (p_str - filename), "-%04x:%02x:%02x.%x.log.", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + p_str += snprintf(p_str, len - (p_str - filename), + "%04d%02d%02d-%02d%02d%02d", tm.tm_year, tm.tm_mon, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); +} + +static void sxe2_export_file_close(struct sxe2_export_file_info *file_info) +{ + bool err = IS_ERR(file_info->fp); + + if (file_info && file_info->fp) { + SXE2_BUG_ON(err); + (void)filp_close(file_info->fp, NULL); + + file_info->fp = NULL; + } +} + +static s32 sxe2_export_file_open(struct sxe2_export_context *ctxt) +{ + struct sxe2_adapter *adapter = ctxt->adapter; + struct sxe2_export_file_info *file_info = &ctxt->file; + s8 filename[SXE2_DUMP_FILE_NAME_LEN] = {0}; + s32 ret = 0; + struct file *filp = NULL; + + if (file_info->fp && file_info->file_size < ctxt->file_size_limit) + goto l_out; + + sxe2_export_file_close(file_info); + + memset(file_info, 0, sizeof(struct sxe2_export_file_info)); + + sxe2_export_filename_build(ctxt, filename, SXE2_DUMP_FILE_NAME_LEN); + + filp = (struct file *)filp_open(filename, + O_CREAT | O_RDWR | O_TRUNC | O_LARGEFILE, 0); + if (IS_ERR(filp)) { + LOG_ERROR_BDF("export file create: filp_open error filename %s\t" + "errno %d\n", + filename, (int)PTR_ERR(filp)); + ret = -EIO; + goto l_out; + } + + memcpy(file_info->filename, filename, sizeof(filename)); + file_info->fp = filp; + +l_out: + return ret; +} + +static s32 sxe2_export_file_write(struct sxe2_export_file_info *file_info, u8 *buf, + u32 len) +{ + struct file *filp = file_info->fp; + s32 ret = 0; + u32 pos = 0; + + while (pos < len) { + do { +#ifdef KERNEL_WRITE_POS_LOFF + ret = (s32)kernel_write(filp, buf + pos, len - pos, + filp->f_pos); +#else + ret = (s32)kernel_write(filp, buf + pos, len - pos, + &filp->f_pos); +#endif + } while (ret == -EINTR); + + if (ret < 0) + return ret; + if (ret == 0) + return -EIO; + + fsnotify_modify(filp); + file_info->file_size += len; + file_info->file_w_cnt++; + pos += ret; + } + + return 0; +} + +s32 sxe2_log_export_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_event event = {}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_AUTO_LOG); + ret = sxe2_fwc_event_subscribe(adapter, &event); + if (ret) + goto l_subscribe_failed; + + ctxt->adapter = adapter; + ctxt->file_size_limit = SXE2_DUMP_FILE_SIZE_LIMIT; + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_AUTO_LOG, + SXE2_CMD_EVENT_STATUS_SUB); + + return 0; + +l_subscribe_failed: + return ret; +} + +void sxe2_log_export_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_event event = {}; + + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_AUTO_LOG, + SXE2_CMD_EVENT_STATUS_UNSUB); + + ctxt->adapter = NULL; + sxe2_export_file_close(&ctxt->file); + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_AUTO_LOG); + (void)sxe2_fwc_event_unsubscribe(adapter, &event); +} + +static s32 sxe2_fwc_log_export_ack(struct sxe2_adapter *adapter, + struct sxe2_fwc_fw_log_ack *ack) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_fill(&cmd, SXE2_CMD_EVENT_FW_LOG_ACK, ack, sizeof(*ack), + NULL, 0, SXE2_DRV_CMD_DFLT_TIMEOUT, false, true); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("export log ack failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_event_log_export(struct sxe2_adapter *adapter, void *buf, u32 buf_len) +{ + s32 ret; + struct sxe2_export_context *ctxt = &adapter->export_ctxt; + struct sxe2_fwc_fw_log_ack ack = {}; + s32 result; + + if (!ctxt->adapter) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_export_file_open(ctxt); + if (ret) + goto l_ack; + + ret = sxe2_export_file_write(&ctxt->file, buf, buf_len); + if (ret) { + LOG_ERROR_BDF("file %s write %d failed: %d\n", ctxt->file.filename, + buf_len, ret); + } + +l_ack: + result = ret ? -SXE2_CMD_DUMP_LOG_FAILED : 0; + ack.result = cpu_to_le32((u32)result); + ret = sxe2_fwc_log_export_ack(adapter, &ack); +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.h b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.h new file mode 100644 index 0000000000000000000000000000000000000000..ca36e094b3d16a2956354da05492fbaf284324da --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/common/sxe2pf/sxe2_log_export.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log_export.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_LOG_EXPORT_H__ +#define __SXE2_LOG_EXPORT_H__ + +#include +#include +#include +#include + +struct sxe2_adapter; + +#define SXE2_DUMP_FILE_DIR_LEN (128) +#define SXE2_DUMP_FILE_NAME_LEN (256) + +struct sxe2_export_file_info { + u8 filename[SXE2_DUMP_FILE_NAME_LEN]; + struct file *fp; + u64 file_size; + u32 file_w_cnt; + u32 file_status; + u8 reserved[4]; +}; + +struct sxe2_export_context { + struct sxe2_adapter *adapter; + struct sxe2_export_file_info file; + u32 file_size_limit; +}; + +s32 sxe2_log_export_init(struct sxe2_adapter *adapter); + +void sxe2_log_export_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_event_log_export(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/ps3_cfg_lanData.h b/drivers/net/ethernet/linkdata/sxe2/include/ps3_cfg_lanData.h new file mode 100644 index 0000000000000000000000000000000000000000..5f11c0be920f29909dc8ff567045c850c14e7ee4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/ps3_cfg_lanData.h @@ -0,0 +1,657 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: ps3_cfg_lanData.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __PS3_CFG_DP_INFO_H__ +#define __PS3_CFG_DP_INFO_H__ + +typedef struct PS3CfgDpHeader { + U32 magic ; + U32 version ; + U32 segment_cnt; + U32 reserved0[16]; +}__attribute__((packed))PS3CfgDpHeader_s; + +typedef struct PS3CfgDpInfo { + U32 magic ; + U16 cnt ; + U16 elem_size; + U8 type ; +}__attribute__((packed))PS3CfgDpInfo_s; + +typedef struct PS3CfgDpSwptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpSwptgentry_s; + +typedef struct PS3CfgDpSwtcam { + U32 data_l ; + U32 data_h :8; + U32 data_rsv :24; + U32 mask_l ; + U32 mask_h :8; + U32 valid :1; + U32 mask_rsv :23; +}__attribute__((packed))PS3CfgDpSwtcam_s; + +typedef struct PS3CfgDpSwextractor { + U32 protocol_id0 :8; + U32 offset0 :9; + U32 enable0 :1; + U32 rsv0 :14; + U32 protocol_id1 :8; + U32 offset1 :9; + U32 enable1 :1; + U32 rsv1 :14; + U32 protocol_id2 :8; + U32 offset2 :9; + U32 enable2 :1; + U32 rsv2 :14; + U32 protocol_id3 :8; + U32 offset3 :9; + U32 enable3 :1; + U32 rsv3 :14; + U32 protocol_id4 :8; + U32 offset4 :9; + U32 enable4 :1; + U32 rsv4 :14; + U32 protocol_id5 :8; + U32 offset5 :9; + U32 enable5 :1; + U32 rsv5 :14; + U32 protocol_id6 :8; + U32 offset6 :9; + U32 enable6 :1; + U32 rsv6 :14; + U32 protocol_id7 :8; + U32 offset7 :9; + U32 enable7 :1; + U32 rsv7 :14; + U32 protocol_id8 :8; + U32 offset8 :9; + U32 enable8 :1; + U32 rsv8 :14; + U32 protocol_id9 :8; + U32 offset9 :9; + U32 enable9 :1; + U32 rsv9 :14; + U32 protocol_id10 :8; + U32 offset10 :9; + U32 enable10 :1; + U32 rsv10 :14; + U32 protocol_id11 :8; + U32 offset11 :9; + U32 enable11 :1; + U32 rsv11 :14; + U32 protocol_id12 :8; + U32 offset12 :9; + U32 enable12 :1; + U32 rsv12 :14; + U32 protocol_id13 :8; + U32 offset13 :9; + U32 enable13 :1; + U32 rsv13 :14; + U32 protocol_id14 :8; + U32 offset14 :9; + U32 enable14 :1; + U32 rsv14 :14; + U32 protocol_id15 :8; + U32 offset15 :9; + U32 enable15 :1; + U32 rsv15 :14; + U32 protocol_id16 :8; + U32 offset16 :9; + U32 enable16 :1; + U32 rsv16 :14; + U32 protocol_id17 :8; + U32 offset17 :9; + U32 enable17 :1; + U32 rsv17 :14; + U32 protocol_id18 :8; + U32 offset18 :9; + U32 enable18 :1; + U32 rsv18 :14; + U32 protocol_id19 :8; + U32 offset19 :9; + U32 enable19 :1; + U32 rsv19 :14; + U32 protocol_id20 :8; + U32 offset20 :9; + U32 enable20 :1; + U32 rsv20 :14; + U32 protocol_id21 :8; + U32 offset21 :9; + U32 enable21 :1; + U32 rsv21 :14; + U32 protocol_id22 :8; + U32 offset22 :9; + U32 enable22 :1; + U32 rsv22 :14; + U32 protocol_id23 :8; + U32 offset23 :9; + U32 enable23 :1; + U32 rsv23 :14; + U32 protocol_id24 :8; + U32 offset24 :9; + U32 enable24 :1; + U32 rsv24 :14; + U32 protocol_id25 :8; + U32 offset25 :9; + U32 enable25 :1; + U32 rsv25 :14; + U32 protocol_id26 :8; + U32 offset26 :9; + U32 enable26 :1; + U32 rsv26 :14; + U32 protocol_id27 :8; + U32 offset27 :9; + U32 enable27 :1; + U32 rsv27 :14; + U32 protocol_id28 :8; + U32 offset28 :9; + U32 enable28 :1; + U32 rsv28 :14; + U32 protocol_id29 :8; + U32 offset29 :9; + U32 enable29 :1; + U32 rsv29 :14; + U32 protocol_id30 :8; + U32 offset30 :9; + U32 enable30 :1; + U32 rsv30 :14; + U32 protocol_id31 :8; + U32 offset31 :9; + U32 enable31 :1; + U32 rsv31 :14; + U32 protocol_id32 :8; + U32 offset32 :9; + U32 enable32 :1; + U32 rsv32 :14; + U32 protocol_id33 :8; + U32 offset33 :9; + U32 enable33 :1; + U32 rsv33 :14; + U32 protocol_id34 :8; + U32 offset34 :9; + U32 enable34 :1; + U32 rsv34 :14; + U32 protocol_id35 :8; + U32 offset35 :9; + U32 enable35 :1; + U32 rsv35 :14; + U32 protocol_id36 :8; + U32 offset36 :9; + U32 enable36 :1; + U32 rsv36 :14; + U32 protocol_id37 :8; + U32 offset37 :9; + U32 enable37 :1; + U32 rsv37 :14; + U32 protocol_id38 :8; + U32 offset38 :9; + U32 enable38 :1; + U32 rsv38 :14; + U32 protocol_id39 :8; + U32 offset39 :9; + U32 enable39 :1; + U32 rsv39 :14; + U32 protocol_id40 :8; + U32 offset40 :9; + U32 enable40 :1; + U32 rsv40 :14; + U32 protocol_id41 :8; + U32 offset41 :9; + U32 enable41 :1; + U32 rsv41 :14; + U32 protocol_id42 :8; + U32 offset42 :9; + U32 enable42 :1; + U32 rsv42 :14; + U32 protocol_id43 :8; + U32 offset43 :9; + U32 enable43 :1; + U32 rsv43 :14; + U32 protocol_id44 :8; + U32 offset44 :9; + U32 enable44 :1; + U32 rsv44 :14; + U32 protocol_id45 :8; + U32 offset45 :9; + U32 enable45 :1; + U32 rsv45 :14; + U32 protocol_id46 :8; + U32 offset46 :9; + U32 enable46 :1; + U32 rsv46 :14; + U32 protocol_id47 :8; + U32 offset47 :9; + U32 enable47 :1; + U32 rsv47 :14; +}__attribute__((packed))PS3CfgDpSwextractor_s; + +typedef struct PS3CfgDpSwmap { + U32 map0 :8; + U32 map1 :8; + U32 map2 :8; + U32 map3 :8; +}__attribute__((packed))PS3CfgDpSwmap_s; + +typedef struct PS3CfgDpSwrcp { + U8 rid :6; + U8 rcp_rsv0 :1; + U8 is_root :1; + U8 lookup_index0 :7; + U8 lookup_index0_valid :1; + U8 lookup_index1 :7; + U8 lookup_index1_valid :1; + U8 lookup_index2 :7; + U8 lookup_index2_valid :1; + U8 lookup_index3 :7; + U8 lookup_index3_valid :1; + U8 lookup_index4 :7; + U8 lookup_index4_valid :1; + U8 join_priority; + U8 priority :3; + U8 need_pass_l2 :1; + U8 allow_pass_l2 :1; + U8 inverse_action :1; + U8 prune_idx :2; + U32 default_action :19; + U32 rcp_rsv1 :4; + U32 default_action_valid:1; + U32 rcp_rsv2 :8; + U32 fv4_bitmask :16; + U32 fv3_bitmask :16; + U32 fv2_bitmask :16; + U32 fv1_bitmask :16; + U32 fv0_bitmask :16; + U32 rcp_rsv3 :16; +}__attribute__((packed))PS3CfgDpSwrcp_s; + +typedef struct PS3CfgDpSwprofilercpbitmap { + U32 recipe_high; + U32 recipe_low; +}__attribute__((packed))PS3CfgDpSwprofilercpbitmap_s; + +typedef struct PS3CfgDpSwvsigentry { + U32 vsig0 :12; + U32 vsig_rsv0 :4; + U32 vsig1 :12; + U32 vsig_rsv1 :4; + U32 vsig2 :12; + U32 vsig_rsv2 :4; + U32 vsig3 :12; + U32 vsig_rsv3 :4; +}__attribute__((packed))PS3CfgDpSwvsigentry_s; + +typedef struct PS3CfgDpRssptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpRssptgentry_s; + +typedef struct PS3CfgDpRssvsigentry { + U32 vsig0 :12; + U32 vsig_rsv0 :4; + U32 vsig1 :12; + U32 vsig_rsv1 :4; + U32 vsig2 :12; + U32 vsig_rsv2 :4; + U32 vsig3 :12; + U32 vsig_rsv3 :4; +}__attribute__((packed))PS3CfgDpRssvsigentry_s; + +typedef struct PS3CfgDpRsstcam { + U32 data_l ; + U32 data_h :8; + U32 data_rsv :24; + U32 mask_l ; + U32 mask_h :8; + U32 valid :1; + U32 mask_rsv :23; +}__attribute__((packed))PS3CfgDpRsstcam_s; + +typedef struct PS3CfgDpRssextractor { + U32 protocol_id0 :8; + U32 offset0 :9; + U32 enable0 :1; + U32 rsv0 :14; + U32 protocol_id1 :8; + U32 offset1 :9; + U32 enable1 :1; + U32 rsv1 :14; + U32 protocol_id2 :8; + U32 offset2 :9; + U32 enable2 :1; + U32 rsv2 :14; + U32 protocol_id3 :8; + U32 offset3 :9; + U32 enable3 :1; + U32 rsv3 :14; + U32 protocol_id4 :8; + U32 offset4 :9; + U32 enable4 :1; + U32 rsv4 :14; + U32 protocol_id5 :8; + U32 offset5 :9; + U32 enable5 :1; + U32 rsv5 :14; + U32 protocol_id6 :8; + U32 offset6 :9; + U32 enable6 :1; + U32 rsv6 :14; + U32 protocol_id7 :8; + U32 offset7 :9; + U32 enable7 :1; + U32 rsv7 :14; + U32 protocol_id8 :8; + U32 offset8 :9; + U32 enable8 :1; + U32 rsv8 :14; + U32 protocol_id9 :8; + U32 offset9 :9; + U32 enable9 :1; + U32 rsv9 :14; + U32 protocol_id10 :8; + U32 offset10 :9; + U32 enable10 :1; + U32 rsv10 :14; + U32 protocol_id11 :8; + U32 offset11 :9; + U32 enable11 :1; + U32 rsv11 :14; + U32 protocol_id12 :8; + U32 offset12 :9; + U32 enable12 :1; + U32 rsv12 :14; + U32 protocol_id13 :8; + U32 offset13 :9; + U32 enable13 :1; + U32 rsv13 :14; + U32 protocol_id14 :8; + U32 offset14 :9; + U32 enable14 :1; + U32 rsv14 :14; + U32 protocol_id15 :8; + U32 offset15 :9; + U32 enable15 :1; + U32 rsv15 :14; + U32 protocol_id16 :8; + U32 offset16 :9; + U32 enable16 :1; + U32 rsv16 :14; + U32 protocol_id17 :8; + U32 offset17 :9; + U32 enable17 :1; + U32 rsv17 :14; + U32 protocol_id18 :8; + U32 offset18 :9; + U32 enable18 :1; + U32 rsv18 :14; + U32 protocol_id19 :8; + U32 offset19 :9; + U32 enable19 :1; + U32 rsv19 :14; + U32 protocol_id20 :8; + U32 offset20 :9; + U32 enable20 :1; + U32 rsv20 :14; + U32 protocol_id21 :8; + U32 offset21 :9; + U32 enable21 :1; + U32 rsv21 :14; + U32 protocol_id22 :8; + U32 offset22 :9; + U32 enable22 :1; + U32 rsv22 :14; + U32 protocol_id23 :8; + U32 offset23 :9; + U32 enable23 :1; + U32 rsv23 :14; +}__attribute__((packed))PS3CfgDpRssextractor_s; + +typedef struct PS3CfgDpRssmap { + U32 rssmap0 :7; + U32 rssrsv0 :1; + U32 rssmap1 :7; + U32 rssrsv1 :1; + U32 rssmap2 :7; + U32 rssrsv2 :1; + U32 rssmap3 :7; + U32 rssrsv3 :1; +}__attribute__((packed))PS3CfgDpRssmap_s; + +typedef struct PS3CfgDpRssipset { + U32 fwWordIndex0 :5; + U32 ipSetRsv0 :2; + U32 fwWordIndexEn0 :1; + U32 fwWordIndex1 :5; + U32 ipSetRsv1 :2; + U32 fwWordIndexEn1 :1; + U32 fwWordIndex2 :5; + U32 ipSetRsv2 :2; + U32 fwWordIndexEn2 :1; + U32 fwWordIndex3 :5; + U32 ipSetRsv3 :2; + U32 fwWordIndexEn3 :1; + U32 fwWordIndex4 :5; + U32 ipSetRsv4 :2; + U32 fwWordIndexEn4 :1; + U32 fwWordIndex5 :5; + U32 ipSetRsv5 :2; + U32 fwWordIndexEn5 :1; + U32 fwWordIndex6 :5; + U32 ipSetRsv6 :2; + U32 fwWordIndexEn6 :1; + U32 fwWordIndex7 :5; + U32 ipSetRsv7 :2; + U32 fwWordIndexEn7 :1; + U32 fwWordIndex8 :5; + U32 ipSetRsv8 :2; + U32 fwWordIndexEn8 :1; + U32 fwWordIndex9 :5; + U32 ipSetRsv9 :2; + U32 fwWordIndexEn9 :1; + U32 fwWordIndex10 :5; + U32 ipSetRsv10 :2; + U32 fwWordIndexEn10 :1; + U32 fwWordIndex11 :5; + U32 ipSetRsv11 :2; + U32 fwWordIndexEn11 :1; + U32 fwWordIndex12 :5; + U32 ipSetRsv12 :2; + U32 fwWordIndexEn12 :1; + U32 fwWordIndex13 :5; + U32 ipSetRsv13 :2; + U32 fwWordIndexEn13 :1; + U32 fwWordIndex14 :5; + U32 ipSetRsv14 :2; + U32 fwWordIndexEn14 :1; + U32 fwWordIndex15 :5; + U32 ipSetRsv15 :2; + U32 fwWordIndexEn15 :1; + U32 fwWordIndex16 :5; + U32 ipSetRsv16 :2; + U32 fwWordIndexEn16 :1; + U32 fwWordIndex17 :5; + U32 ipSetRsv17 :2; + U32 fwWordIndexEn17 :1; + U32 fwWordIndex18 :5; + U32 ipSetRsv18 :2; + U32 fwWordIndexEn18 :1; + U32 fwWordIndex19 :5; + U32 ipSetRsv19 :2; + U32 fwWordIndexEn19 :1; + U32 fwWordIndex20 :5; + U32 ipSetRsv20 :2; + U32 fwWordIndexEn20 :1; + U32 fwWordIndex21 :5; + U32 ipSetRsv21 :2; + U32 fwWordIndexEn21 :1; + U32 fwWordIndex122 :5; + U32 ipSetRsv22 :2; + U32 fwWordIndexEn22 :1; + U32 fwWordIndex23 :5; + U32 ipSetRsv23 :2; + U32 fwWordIndexEn23 :1; +}__attribute__((packed))PS3CfgDpRssipset_s; + +typedef struct PS3CfgDpFnavptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpFnavptgentry_s; + +typedef struct PS3CfgDpFnavmask { + U32 val :16; + U32 rsv :16; + U32 fldbit_l; + U32 fldbit_h; +}__attribute__((packed))PS3CfgDpFnavmask_s; + +typedef struct PS3CfgDpRssmask { + U32 val :16; + U32 rsv :16; + U32 fldbit_l; + U32 fldbit_h; +}__attribute__((packed))PS3CfgDpRssmask_s; + +typedef struct PS3CfgDpAclptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpAclptgentry_s; + +typedef struct PS3CfgDpPagtpuexhdr { + U32 hdrLen :8; + U32 hdrrsv :24; +}__attribute__((packed))PS3CfgDpPagtpuexhdr_s; + +typedef struct PS3CfgDpPkghdr { + struct PS3CfgDpHeader dpHeader; +}__attribute__((packed))PS3CfgDpPkghdr_s; + +typedef struct PS3CfgDpSwptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwptgentry dpSwptgentry[256]; +}__attribute__((packed))PS3CfgDpSwptgcfg_s; + +typedef struct PS3CfgDpSwtcamcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwtcam dpSwtcam[1]; +}__attribute__((packed))PS3CfgDpSwtcamcfg_s; + +typedef struct PS3CfgDpSwextractorcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwextractor dpSwextractor[3]; +}__attribute__((packed))PS3CfgDpSwextractorcfg_s; + +typedef struct PS3CfgDpSwmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwmap dpSwmap[1]; +}__attribute__((packed))PS3CfgDpSwmapcfg_s; + +typedef struct PS3CfgDpSwrcpcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwrcp dpSwrcp[9]; +}__attribute__((packed))PS3CfgDpSwrcpcfg_s; + +typedef struct PS3CfgDpSwprofilercpbitmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwprofilercpbitmap dpSwprofilercpbitmap[3]; +}__attribute__((packed))PS3CfgDpSwprofilercpbitmapcfg_s; + +typedef struct PS3CfgDpSwvsigcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwvsigentry dpSwvsigentry[1]; +}__attribute__((packed))PS3CfgDpSwvsigcfg_s; + +typedef struct PS3CfgDpRssptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssptgentry dpRssptgentry[1]; +}__attribute__((packed))PS3CfgDpRssptgcfg_s; + +typedef struct PS3CfgDpRssvsigcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssvsigentry dpRssvsigentry[1]; +}__attribute__((packed))PS3CfgDpRssvsigcfg_s; + +typedef struct PS3CfgDpRsstcamcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRsstcam dpRsstcam[1]; +}__attribute__((packed))PS3CfgDpRsstcamcfg_s; + +typedef struct PS3CfgDpRssextractorcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssextractor dpRssextractor[1]; +}__attribute__((packed))PS3CfgDpRssextractorcfg_s; + +typedef struct PS3CfgDpRssmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssmap dpRssmap[1]; +}__attribute__((packed))PS3CfgDpRssmapcfg_s; + +typedef struct PS3CfgDpRssipsetcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssipset dpRssipset[1]; +}__attribute__((packed))PS3CfgDpRssipsetcfg_s; + +typedef struct PS3CfgDpFnavptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpFnavptgentry dpFnavptgentry[1]; +}__attribute__((packed))PS3CfgDpFnavptgcfg_s; + +typedef struct PS3CfgDpFnavmaskcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpFnavmask dpFnavmask[1]; +}__attribute__((packed))PS3CfgDpFnavmaskcfg_s; + +typedef struct PS3CfgDpRssmaskcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssmask dpRssmask[1]; +}__attribute__((packed))PS3CfgDpRssmaskcfg_s; + +typedef struct PS3CfgDpAclptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpAclptgentry dpAclptgentry[1]; +}__attribute__((packed))PS3CfgDpAclptgcfg_s; + +typedef struct PS3CfgDpPagtpuexhdrcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpPagtpuexhdr dpPagtpuexhdr[1]; +}__attribute__((packed))PS3CfgDpPagtpuexhdrcfg_s; + +typedef struct PS3CfgDpArea { + struct PS3CfgDpPkghdr dpPkghdr; + struct PS3CfgDpSwptgcfg dpSwptgcfg; + struct PS3CfgDpSwtcamcfg dpSwtcamcfg; + struct PS3CfgDpSwextractorcfg dpSwextractorcfg; + struct PS3CfgDpSwmapcfg dpSwmapcfg; + struct PS3CfgDpSwrcpcfg dpSwrcpcfg; + struct PS3CfgDpSwprofilercpbitmapcfg dpSwprofilercpbitmapcfg; + struct PS3CfgDpSwvsigcfg dpSwvsigcfg; + struct PS3CfgDpRssptgcfg dpRssptgcfg; + struct PS3CfgDpRssvsigcfg dpRssvsigcfg; + struct PS3CfgDpRsstcamcfg dpRsstcamcfg; + struct PS3CfgDpRssextractorcfg dpRssextractorcfg; + struct PS3CfgDpRssmapcfg dpRssmapcfg; + struct PS3CfgDpRssipsetcfg dpRssipsetcfg; + struct PS3CfgDpFnavptgcfg dpFnavptgcfg; + struct PS3CfgDpFnavmaskcfg dpFnavmaskcfg; + struct PS3CfgDpRssmaskcfg dpRssmaskcfg; + struct PS3CfgDpAclptgcfg dpAclptgcfg; + struct PS3CfgDpPagtpuexhdrcfg dpPagtpuexhdrcfg; +}__attribute__((aligned(8)))PS3CfgDpArea_s; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cli_drv_msg.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cli_drv_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..c035fcf6e33089b34aa6af2fe5d45e3ae2704c92 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cli_drv_msg.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cli_drv_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CLI_DRV_MSG_H__ +#define __SXE2_CLI_DRV_MSG_H__ + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define SXE2_DRV_MSG_MAX_SIZE (8192) +#define SXE2_DRV_MSG_MAGIC_CODE (0x56781234) +#define SXE2_MAX_NETDEV_NAME_SIZE (128) + +#define SXE2_CLI_DRV_SUCCESS (0) +#define SXE2_MOD_DRV (1) +#define SXE2_SUB_MOD_DEV (1) + +#define MODULE_ID_SHIFT (24) +#define SUB_MODULE_ID_SHIFT (16) +#define ERROR_INDEX_MASK (0xFFFF0000) +#define SXE2_MAKE_ERR_CODE_INDEX(module, sub_module) \ + ((((u32)((module) << MODULE_ID_SHIFT)) | ((u32)((sub_module) << SUB_MODULE_ID_SHIFT))) & \ + ERROR_INDEX_MASK) + +enum sxe2_priv_drv_err_code { + SXE2_ERR_DRV_DEV = SXE2_MAKE_ERR_CODE_INDEX(SXE2_MOD_DRV, SXE2_SUB_MOD_DEV), + SXE2_ERR_DRV_DEV_PARAMS_INVAL, + SXE2_ERR_DRV_DEV_NULL_PTR, + SXE2_ERR_DRV_DEV_NOT_FOUND, + SXE2_ERR_DRV_DEV_NOT_SUPPORT, + SXE2_ERR_DRV_DEV_NO_MEM, + SXE2_ERR_DRV_DEV_FAULT, + SXE2_ERR_DRV_DEV_MAGIC_INVAL, +}; + +enum sxe2_cli_drv_cmd_opcode { + SXE2_CLI_CMD_GET_NETDEV_NAME = 0, + SXE2_CLI_CMD_MAX = 0xFFFF, +}; + +struct drv_msg_info { + u32 magic; + u32 opcode; + u32 error; + u32 timeout; + u32 runver; + u32 req_length; + u32 ack_length; + u16 hdr_len; + u8 reserved[2]; + u64 trace_id; + u8 pad[8]; + u8 body[]; +}; + +struct sxe2_cli_drv_get_pname_rsp_msg { + char netdev_name[SXE2_MAX_NETDEV_NAME_SIZE]; +}; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cmd.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..be85edad72d0e2b575cee3fa0144f69e405e877e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_cmd.h @@ -0,0 +1,3687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cmd.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CMD_H__ +#define __SXE2_CMD_H__ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#if defined(SXE2_SUPPORT_UEFI) || defined(SXE2_SUPPORT_IPXE) +#include "sxe2_uefi_type.h" +#include "sxe2_uefi_def.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "rte_os.h" +#include "sxe2_type.h" +#include "sxe2_common.h" +#include "sxe2_osal.h" +#endif + +#ifndef SXE2_DRIVER_SIM +#include "sxe2_spec.h" +#endif + +#pragma pack(4) + +#define SXE2_VSI_MAX_CNT (768) + +#define SXE2_INVAL_U8 (0xFF) +#define SXE2_INVAL_U16 (0xFFFF) +#define SXE2_INVAL_U32 (0xFFFFFFFF) +#define SXE2_VF_ID_INVAL (0xFFFF) + +#define SXE2_CMD_MAGIC (0xFEFEEFEF) + +#define SXE2_FW_COMP_MAJOR_VER (1) +#define SXE2_FW_COMP_MINOR_VER (1) +#define SXE2_FW_COMP_VER \ + (SXE2_FW_COMP_MAJOR_VER << 16 | \ + SXE2_FW_COMP_MINOR_VER) + +#define SXE2_CMD_LARGE_BUF_SIZE (512) +#define SXE2_CMD_MAX_BUF \ + (2 * 4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE \ + (SXE2_CMD_MAX_BUF - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_MAX_BUF_MBX \ + (4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX \ + (SXE2_CMD_MAX_BUF_MBX - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE_MBX \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_DD BIT(0) +#define SXE2_CMD_COMPLETE BIT(1) +#define SXE2_CMD_ERROR BIT(2) +#define SXE2_CMD_LARGE_BUF BIT(9) +#define SXE2_CMD_READ BIT(10) +#define SXE2_CMD_BUF BIT(12) +#define SXE2_CMD_NO_INTR BIT(13) + +#define SXE2_CMD_DONE (SXE2_CMD_DD | SXE2_CMD_COMPLETE | SXE2_CMD_ERROR) + +#define SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT \ + (16) +#define SXE2_TC_MAX_CNT (8) +#define SXE2_TXQ_CTXT_LEN (24) + +#define SXE2_CMD_HDR_SIZE sizeof(struct sxe2_cmd_hdr) +#define SXE2_DRV_MSG_HDR_SIZE \ + sizeof(struct sxe2_drv_msg_hdr) + +#define SXE2_MBX_DATA_PTR(type, msg_hdr_ptr) \ + ((type *)((u8 *)(msg_hdr_ptr) + (msg_hdr_ptr)->data_offset)) + +#define SXE2_MBX_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2vf_mbx_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_DRV_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2_drv_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_CMD_MODULE_S (8) +#define SXE2_MK_CMD(module, cmd) ((module) << SXE2_CMD_MODULE_S | (cmd)) + +#define SXE2_TCAM_KEY_VALUE_LEN (5) +#define SXE2_TCAM_KEY_LEN (2 * SXE2_TCAM_KEY_VALUE_LEN) + +#define SXE2_FULLKEY_DWORD_CNT (3) +#define SXE2_PACKET_INFO_DWORD_CNT (20) +#define SXE2_SWITCH_FV_CNT (48) +#define SXE2_PACKET_MAX_RECIPES (32) + +#define SXE2_MAX_NUM_RECIPES (64) +#define SXE2_MAX_NUM_RECIPES_PER_PROFILE (32) +#define SXE2_MAX_NUM_ROOT_RECIPES_PER_PROFILE (24) + +#if defined(SXE2_TEST) +#define SXE2_MAX_NUM_RECIPES_VER_I_O (64) +#else +#define SXE2_MAX_NUM_RECIPES_VER_I_O (24) +#endif + +#define SXE2_MAX_NUM_PROFILES (256) +#define SXE2_NUM_WORDS_RECIPE (4) +#define SXE2_MAX_REPLY_RECIPE (4) +#define SXE2_MAX_CHAIN_RECIPE (SXE2_MAX_REPLY_RECIPE + 1) +#define SXE2_MAX_CHAIN_WORDS (SXE2_NUM_WORDS_RECIPE * \ + SXE2_MAX_REPLY_RECIPE) +#define SXE2_VSI_LIST_DAT_LEN DIV_ROUND_UP(SXE2_VSI_MAX_CNT, \ + (BITS_PER_BYTE * sizeof(u32))) + +#define SXE2_ACTION_PRIORITY_HIGH (7) + +#define SXE2_CMD_SWITCH_RULE_FLAG_COMPLEX BIT(0) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE BIT(1) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_INC BIT(2) + +#define SXE2_SINGLE_ACT_LB_ENABLE BIT(16) +#define SXE2_SINGLE_ACT_LAN_ENABLE BIT(15) + +#define SXE2_SINGLE_ACT_VSI_TYPE_S (17) +#define SXE2_SINGLE_ACT_VSI_FORWARD (0x0 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_VSI_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_LIST_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST BIT(4) +#define SXE2_SINGLE_ACT_VALID_BIT BIT(1) +#define SXE2_SINGLE_ACT_DROP BIT(0) + +#define SXE2_SINGLE_ACT_TO_Q (0x1 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_Q_INDEX_S (4) +#define SXE2_SINGLE_ACT_Q_INDEX_M (0x7FF << SXE2_SINGLE_ACT_Q_INDEX_S) +#define SXE2_SINGLE_ACT_Q_REGION_S (1) +#define SXE2_SINGLE_ACT_Q_REGION_M (0x7 << SXE2_SINGLE_ACT_Q_REGION_S) +#define SXE2_SINGLE_ACT_Q_PRIORITY BIT(0) + +#define SXE2_SINGLE_ACT_PRUNE (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_EGRESS BIT(3) +#define SXE2_SINGLE_ACT_INGRESS BIT(2) +#define SXE2_SINGLE_ACT_PRUNET BIT(1) + +#define SXE2_SINGLE_ACT_MIRROR (0x3 << SXE2_SINGLE_ACT_VSI_TYPE_S) + +#define SXE2_SINGLE_ACT_POINTER (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_TO_LARGE BIT(0) +#define SXE2_SINGLE_ACT_HASFWD BIT(1) + +#define SXE2_MAC_NUM (4) + +#define SXE2_RSS_FV_CNT (24) + +#define SXE2_RSS_FV_TRACE_CNT (12) + +#define SXE2_OG_BUF_SIZE (4096) +#define SXE2_FV_CNT_MAX SXE2_SWITCH_FV_CNT +#define SXE2_FNAV_INPUT_CNT (30) + +#define SXE2_BFD_FV_CNT_MAX (32) + +#define SXE2_RXFT_PPE_INFO_REG_CNT (20) + +#define SXE2_FV_DIRECTION_OFFSET (10) +#define SXE2_FV_DIRECTION_MASK BIT(SXE2_FV_DIRECTION_OFFSET) +#define SXE2_FV_DIRECTION_TX (0) +#define SXE2_FV_DIRECTION_RX (1) + +#define SXE2_FV_CAST_OFFSET (0) +#define SXE2_FV_CAST_UNI (0) +#define SXE2_FV_CAST_MULTI (1) +#define SXE2_FV_CAST_BROAD (2) + +#define SXE2_FV_PKT_SRC_OFFSET (10) +#define SXE2_FV_PKT_SRC_MASK (0x3 << SXE2_FV_PKT_SRC_OFFSET) +#define SXE2_FV_PKT_SRC_TX (0x3) +#define SXE2_FV_PKT_SRC_RX (0x0) + +#define SXE2_FV_VSI_NUM_OFFSET (0) +#define SXE2_FV_VSI_NUM_MASK (0x3ff << SXE2_FV_VSI_NUM_OFFSET) + +#define SXE2_FV_PKT_TO_RDMA_OFFSET (8) +#define SXE2_FV_PKT_TO_RDMA_MASK (0x1 << SXE2_FV_PKT_TO_RDMA_OFFSET) +#define SXE2_FV_PKT_TO_RDMA (1) +#define SXE2_FV_PKT_TO_RDMA_NO (0) + +#define SXE2_SWITCH_RECIPE_PRIO_7 (7) +#define SXE2_SWITCH_RECIPE_PRIO_6 (6) + +#define SXE2_LLDP_FRAME_MAX_SIZE (1500) +#define SXE2_MAX_TRAFFIC_CLASS (8) +#define SXE2_MAX_USER_PRIORITY (8) +#define SXE2_DCBX_MAX_APPS (64) +#define SXE2_DSCP_MAX_NUM (64) + +#define SXE2_DSCP_OUI (0xFFFFFFU) +#define SXE2_DSCP_SUBTYPE_DSCP2UP (0x41U) +#define SXE2_DSCP_SUBTYPE_ENFORCE (0x42U) +#define SXE2_DSCP_SUBTYPE_TCBW (0x43U) +#define SXE2_DSCP_SUBTYPE_PFC (0x44U) +#define SXE2_DSCP_IPV6_OFFSET (80) +#define SXE2_DSCP_IPV4_UNTAG_OFFSET (64) +#define SXE2_DSCP_IPV6_UNTAG_OFFSET (144) + +#define SXE2_CMD_VSI_STATS_MAX_CNT (16) + +#define SXE2_SERIAL_NUM_LEN (20) + +#define SXE2_MDD_TYPE_TX (1) +#define SXE2_MDD_TYPE_RX (2) + +#define SXE2_FNAV_DEFAULT_MASK_CNT (6) + +#define SXE2_RSS_CORE_LUT_SIZE (32) + +#define SXE2_LARGE_ACTION_COUNT_IN_GROUP (4) +#define SXE2_FLM_VENDOR_LEN 16 +#define SXE2_FLM_VENDOR_PN_LEN 16 +#define SXE2_HOST_FLM_VENDOR_LEN 32 +#define SXE2_HOST_FLM_VENDOR_PN_LEN 32 + +#define SXE2_LLDP_FW_AGENT_DISABLE 0 +#define SXE2_LLDP_FW_AGENT_ENABLE 1 + +enum sxe2_txq_quanta_prof_cfg { + SXE2_TXQ_QUANTA_PROF_DEFAULT = 0, + SXE2_TXQ_QUANTA_PROF_SIMPLE, + SXE2_TXQ_QUANTA_PROF_COMPLEX, +}; + +enum sxe2_cmd_type { + SXE2_CMD_TYPE_CLI = 0, + SXE2_CMD_TYPE_DRV_TO_FW, + SXE2_CMD_TYPE_FW_NOTIFY, + SXE2_CMD_TYPE_PF_TO_VF, + SXE2_CMD_TYPE_VF_TO_PF, + SXE2_CMD_TYPE_DRV_TO_HW, + SXE2_CMD_TYPE_PF_REPLY_VF, +}; + +enum sxe2_cmd_module { + SXE2_CMD_MODULE_HANDSHAKE = 0, + SXE2_CMD_MODULE_CAPS = 1, + SXE2_CMD_MODULE_VSI = 2, + SXE2_CMD_MODULE_QUEUE = 3, + SXE2_CMD_MODULE_CFG = 4, + SXE2_CMD_MODULE_SWITCH = 5, + SXE2_CMD_MODULE_RULE = 6, + SXE2_CMD_MODULE_EVENT = 7, + SXE2_CMD_MODULE_MBX = 8, + SXE2_CMD_MODULE_TXSCHED = 9, + SXE2_CMD_MODULE_STATS = 11, + SXE2_CMD_MODULE_OPT = 12, + SXE2_CMD_MODULE_RSS = 13, + SXE2_CMD_MODULE_LED = 14, + SXE2_CMD_MODULE_OG = 15, + SXE2_CMD_MODULE_RDMA = 16, + SXE2_CMD_MODULE_IPSEC = 17, + SXE2_CMD_MODULE_FNAV = 18, + SXE2_CMD_MODULE_PXE = 19, + SXE2_CMD_MODULE_DCB = 20, + SXE2_CMD_MODULE_LLDP = 21, + SXE2_CMD_MODULE_PTP = 22, + SXE2_CMD_MODULE_MACADDR = 23, + SXE2_CMD_MODULE_MACSEC = 24, + SXE2_CMD_MODULE_UPGRADE = 25, + SXE2_CMD_MODULE_ETHTOOL = 26, + SXE2_CMD_MODULE_FLM = 27, + SXE2_CMD_MODULE_SFP = 28, + SXE2_CMD_MODULE_RWREG = 29, + SXE2_CMD_MODULE_UDPTUNEEL = 30, + SXE2_CMD_MODULE_NCD = 31, + SXE2_CMD_MODULE_BFD = 32, + SXE2_CMD_MODULE_NCD_UDF = 33, + SXE2_CMD_MODULE_QUEUE_STATS_MAP = 34, + SXE2_CMD_MODULE_ACL = 35, +}; + +enum sxe2_drv_cmd_opcode { + + SXE2_CMD_Q_HANDSHAKE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 1), + SXE2_CMD_Q_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 3), + + SXE2_CMD_DEV_CAPS = SXE2_MK_CMD(SXE2_CMD_MODULE_CAPS, 1), + SXE2_CMD_FUNC_CAPS, + SXE2_CMD_PF_CFG_CLEAR, + SXE2_CMD_PF_SRIOV_SET, + SXE2_CMD_PF_DDP_REF_CLR, + SXE2_CMD_PHY_PORT_INFO_GET, + SXE2_CMD_PF_SERIAL_GET, + SXE2_CMD_DRV_MODE_GET, + SXE2_CMD_DRV_MODE_SET, + + SXE2_CMD_VSI_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_VSI, 1), + SXE2_CMD_UPDATE_VSI, + SXE2_CMD_FREE_VSI, + SXE2_CMD_VSI_VLAN_FILTER, + SXE2_CMD_VSI_LOOPBACK, + SXE2_CMD_VSI_SPOOFCHK, + SXE2_CMD_VSI_SRC_PRUNE, + SXE2_CMD_VSI_MDD_CHECK, + SXE2_CMD_VSI_VF_QUEUE_SET, + SXE2_CMD_VSI_VF_QUEUE_CLEAR, + + SXE2_CMD_TXQ_CFG_AND_ENABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 1), + SXE2_CMD_RXQ_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 2), + SXE2_CMD_RX_FB = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 3), + SXE2_CMD_TXQ_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 4), + SXE2_CMD_TXQ_STATE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 5), + + SXE2_CMD_CFG_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_CFG, 1), + SXE2_CMD_CFG_UPDATE, + SXE2_CMD_DP_DLD_PRE, + SXE2_CMD_DP_DLD_PROC, + SXE2_CMD_DP_DLD_DONE, + SXE2_CMD_DP_DLD_STATE, + + SXE2_CMD_SWITCH_RULE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_SWITCH, 1), + SXE2_CMD_SWITCH_RULE_DEL, + SXE2_CMD_SWITCH_RULE_UPDATE, + SXE2_CMD_SWITCH_RULE_GET, + SXE2_CMD_SWITCH_VSI_LIST_ADD, + SXE2_CMD_SWITCH_VSI_LIST_DEL, + SXE2_CMD_SWITCH_VSI_LIST_GET, + SXE2_CMD_SWITCH_VSI_LIST_UPDATE, + SXE2_CMD_SWITCH_LARGE_ACTION_CFG, + SXE2_CMD_SWITCH_RULE_CPX_ADD, + SXE2_CMD_SWITCH_RULE_CPX_DEL, + SXE2_CMD_SWITCH_RULE_CPX_UPDATE, + SXE2_CMD_SWITCH_RULE_CPX_GET, + SXE2_CMD_SWITCH_TRACE_TRIGGER, + SXE2_CMD_SWITCH_TRACE_RECORDER, + SXE2_CMD_HW_DFX_SHOW, + SXE2_CMD_SWITCH_RECIPE_GET, + SXE2_CMD_SWITCH_PROFILE_RECIPE_MAP_GET, + SXE2_CMD_SWITCH_SHARE_ID_GET, + SXE2_CMD_SWITCH_DFX_IRQ, + + SXE2_CMD_PARSE_RULE = SXE2_MK_CMD(SXE2_CMD_MODULE_RULE, 1), + SXE2_CMD_UDP_TUNNEL_PORT, + + SXE2_CMD_EVENT_SUBSCRIBE = SXE2_MK_CMD(SXE2_CMD_MODULE_EVENT, 1), + SXE2_CMD_EVENT_UNSUBSCRIBE, + SXE2_CMD_EVENT_FW_LOG_ACK, + + SXE2_CMD_MBX_TO_PF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 1), + SXE2_CMD_MBX_TO_VF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 2), + + SXE2_CMD_TXSCHED_CAP_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 1), + SXE2_CMD_TXSCHED_DFLT_TOPO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 2), + SXE2_CMD_TX_SCHED_NODE_INFO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 3), + SXE2_CMD_TX_SCHED_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 4), + SXE2_CMD_TX_SCHED_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 5), + SXE2_CMD_TX_SCHED_NODE_SUSPEND = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 6), + SXE2_CMD_TX_SCHED_NODE_RESUME = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 7), + SXE2_CMD_TX_SCHED_LEAF_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 10), + SXE2_CMD_TX_SCHED_LEAF_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 11), + SXE2_CMD_TX_SCHED_NODE_RL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 12), + SXE2_CMD_TX_SCHED_Q_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 13), + SXE2_CMD_TX_SCHED_Q_STOP = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 14), + SXE2_CMD_TX_SCHED_ETS_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 15), + SXE2_CMD_TX_SCHED_LEAF_NODE_MOVE = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 16), + SXE2_CMD_TX_SCHED_QSET_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 17), + SXE2_CMD_TX_SCHED_QSET_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 18), + SXE2_CMD_TX_SCHED_PRIO_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 19), + SXE2_CMD_TX_SCHED_WEIGHT_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 20), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 21), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 22), + SXE2_CMD_TX_SCHED_NODE_SRL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 23), + SXE2_CMD_TX_SCHED_PROFILE_RL_PRE_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 24), + SXE2_CMD_TX_SCHED_PROFILE_SRL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 25), + SXE2_CMD_TX_SCHED_PROFILE_SRL_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 26), + SXE2_CMD_TX_SCHED_PROFILE_SRL_UPD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 27), + + SXE2_CMD_GET_PF_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 1), + SXE2_CMD_GET_VSI_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 2), + SXE2_CMD_GET_PPE_DFX, + + SXE2_CMD_OPT_EEP = SXE2_MK_CMD(SXE2_CMD_MODULE_OPT, 1), + + SXE2_CMD_RSS_VSI_HCTRL_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_RSS, 1), + SXE2_CMD_RSS_LUT_SET, + SXE2_CMD_RSS_LUT_GET, + SXE2_CMD_RSS_HKEY_SET, + SXE2_CMD_RSS_HKEY_GET, + SXE2_CMD_RSS_SYMM_FV_SET, + SXE2_CMD_RSS_TRACE_TRIGGER, + SXE2_CMD_RSS_TRACE_RECORDER, + + SXE2_CMD_LED_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_LED, 1), + + SXE2_CMD_OG_CFG_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_OG, 1), + SXE2_CMD_OG_TCAM_ENTRY_ALLOC, + SXE2_CMD_OG_TCAM_ENTRY_FREE, + SXE2_CMD_OG_TCAM_ENTRY_BATCH, + SXE2_CMD_OG_PROF_ID_ALLOC, + SXE2_CMD_OG_PROF_ID_FREE, + SXE2_CMD_OG_MASK_SEL_UPDATE, + + SXE2_CMD_RDMA_QP_ATTACH_MC = SXE2_MK_CMD(SXE2_CMD_MODULE_RDMA, 1), + SXE2_CMD_RDMA_QP_DETACH_MC, + SXE2_CMD_RDMA_QET_BIND_TC, + SXE2_CMD_RDMA_PF_FUNC_TABLE_INIT, + SXE2_CMD_RDMA_DESTROY_CC_QP, + SXE2_CMD_RDMA_GET_CC_QP_DFX, + SXE2_CMD_RDMA_NOTIFY_STATUS, + + SXE2_CMD_IPSEC_GET_CAPA = SXE2_MK_CMD(SXE2_CMD_MODULE_IPSEC, 1), + SXE2_CMD_IPSEC_TXSA_ADD, + SXE2_CMD_IPSEC_TXSA_DEL, + SXE2_CMD_IPSEC_TXSA_SET, + SXE2_CMD_IPSEC_RXSA_ADD, + SXE2_CMD_IPSEC_RXSA_DEL, + SXE2_CMD_IPSEC_RXSA_SET, + SXE2_CMD_IPSEC_STATS_SHOW, + SXE2_CMD_IPSEC_TXRX_SWITCH, + SXE2_CMD_IPSEC_DRV_CLEAR, + + SXE2_CMD_FNAV_FILTER_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_FNAV, 1), + SXE2_CMD_FNAV_TRACE_TRIGGER, + SXE2_CMD_FNAV_TRACE_RECORDER, + SXE2_CMD_FNAV_HW_STS, + SXE2_CMD_FNAV_HW_CLEAR, + SXE2_CMD_RXFT_PPE_INFO, + SXE2_CMD_VF_FNAV_FILTER_CLEAR, + SXE2_CMD_FNAV_STATS_GET, + SXE2_CMD_FNAV_DFLT_COMP_QIDX_SET, + SXE2_CMD_FNAV_SPACE_CNT_GET, + SXE2_CMD_FNAV_MATCH_GET_BATCH, + + SXE2_PXE_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_PXE, 1), + SXE2_UEFI_PRIV_DATA_SET, + SXE2_UEFI_PRIV_DATA_GET, + SXE2_UEFI_SOCINFO_GET, + + SXE2_CMD_QOS_MODE_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_DCB, 1), + SXE2_CMD_QOS_MODE_GET, + SXE2_CMD_LINK_FLOW_CONTROL_GET, + SXE2_CMD_LINK_FLOW_CONTROL_SET, + + SXE2_CMD_LLDP_MIB_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_LLDP, 1), + SXE2_CMD_LLDP_MIB_GET, + SXE2_CMD_LLDP_MIB_NOTIFY, + SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, + SXE2_CMD_LLDP_DCBX_FW_AGENT_GET, + SXE2_CMD_LLDP_FW_STATS, + SXE2_CMD_LLDP_REMOTE_MIBS_INFO, + SXE2_CMD_LLDP_REMOTE_MIBS_DUMP, + SXE2_CMD_LLDP_FW_AGENT_SET, + SXE2_CMD_LLDP_FW_AGENT_GET, + + SXE2_CMD_PTP_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_PTP, 1), + SXE2_CMD_PTP_RX_MODE_SET, + SXE2_CMD_PTP_SEM_CLEAN, + + SXE2_CMD_MAC_ADDR_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_MACADDR, 1), + SXE2_CMD_MAC_ADDR_SET, + SXE2_CMD_MAC_MTU_SET, + + SXE2_CMD_MACSEC_TXSC_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_MACSEC, 1), + SXE2_CMD_MACSEC_TXSA_CFG, + SXE2_CMD_MACSEC_RXSC_CFG, + SXE2_CMD_MACSEC_RXSA_CFG, + SXE2_CMD_MACSEC_FIX_CFG, + + SXE2_CMD_FW_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_UPGRADE, 1), + SXE2_CMD_FW_DOWNLOAD_PRE, + SXE2_CMD_FW_DOWNLOAD_OPEN, + SXE2_CMD_FW_DOWNLOAD_FLASH, + SXE2_CMD_FW_DOWNLOAD_CLOSE, + SXE2_CMD_FW_DOWNLOAD_END, + + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE_STATS_MAP, 1), + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_RES_REL, + SXE2_CMD_TXQUEUE_STATS_MAP_RES_REL, + + SXE2_CMD_ETHTOOL_LOOPBACK_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_ETHTOOL, 1), + + SXE2_CMD_FLM_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_FLM, 1), + SXE2_CMD_FLM_LINK_UP, + SXE2_CMD_FLM_LINK_DOWN, + SXE2_CMD_FLM_FEC_GET, + SXE2_CMD_FLM_FEC_SET, + SXE2_CMD_FLM_AN_SET, + SXE2_CMD_FLM_LINK_INFO_SET, + SXE2_CMD_FLM_LINK_INFO_GET, + SXE2_CMD_FLM_LINK_STATUS_SET, + SXE2_CMD_FLM_LINK_STATUS_SYNC, + SXE2_CMD_TEST_LINK_STATUS, + SXE2_CMD_FLM_LINK_UP_DOWN_SET, + + SXE2_CMD_SFP_WHITE_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_SFP, 1), + SXE2_CMD_SFP_TX_FAULT_CFG, + SXE2_CMD_SFP_SET_FEC_CFG, + SXE2_CMD_SFP_GET_FEC_CFG, + SXE2_CMD_SFP_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_SET_LINKINFO_CFG, + SXE2_CMD_PERSIST_GET_LINK_CFG, + SXE2_CMD_SUPPORT_SPEED_GET_CFG, + SXE2_CMD_CURRENT_SPEED_STATUS_GET_CFG, + SXE2_CMD_CURRENT_SPEED_GET_CFG, + SXE2_CMD_SFP_SET_LINK_CFG, + SXE2_CMD_GET_LINKST_CFG, + SXE2_CMD_GET_VENDOR_INFO_CHECK_WARNING, + SXE2_CMD_GET_OPT_DATA_INFO, + + SXE2_CMD_CLI_READ_REG = SXE2_MK_CMD(SXE2_CMD_MODULE_RWREG, 1), + SXE2_CMD_CLI_WRITE_REG, + + SXE2_CMD_UDPTUNNEL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_UDPTUNEEL, 1), + SXE2_CMD_UDPTUNNEL_DEL, + SXE2_CMD_UDPTUNNEL_GET, + + SXE2_CMD_BFD_INTRQ_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_BFD, 1), + SXE2_CMD_BFD_CAPA_GET, + SXE2_CMD_BFD_CFG_SET, + SXE2_CMD_BFD_SESS_CFG_SET, + SXE2_CMD_BFD_SESS_STATE_GET, + SXE2_CMD_BFD_PACK_PROF_SET, + SXE2_CMD_BFD_FLOW_RULE_SET, + SXE2_CMD_BFD_KEYLEN_SET, + + SXE2_CMD_NCD_CORE_NUM = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD, 1), + SXE2_CMD_NCD_CORE_FS_QUEUE_SET, + SXE2_CMD_NCD_CORE_FS_QUEUE_GET, + + SXE2_CMD_NCD_UDF_CAPA_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD_UDF, 1), + + SXE2_CMD_NCD_SWITCH_TAG_EN, + SXE2_CMD_NCD_SWITCH_TAG_SET, + SXE2_CMD_NCD_SWITCH_TAG_GET, + SXE2_CMD_NCD_TXLEN_ADJ_SET, + SXE2_CMD_NCD_TXLEN_ADJ_GET, + + SXE2_CMD_NCD_SDF_EN, + SXE2_CMD_NCD_SDF_SET, + SXE2_CMD_NCD_SDF_GET, + + SXE2_CMD_NCD_SDN_UDP_ADD, + SXE2_CMD_NCD_SDN_UDP_DEL, + SXE2_CMD_NCD_SDN_UDP_QUERY, + SXE2_CMD_NCD_SDN_ADD, + SXE2_CMD_NCD_SDN_DEL, + SXE2_CMD_NCD_SDN_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_UDP_ADD, + SXE2_CMD_NCD_SDN_IPSEC_UDP_DEL, + + SXE2_CMD_NCD_APP_PORT_SET, + + SXE2_CMD_NCD_PKT_PRI_SET, + + SXE2_CMD_ACL_LUT_ALLOC = SXE2_MK_CMD(SXE2_CMD_MODULE_ACL, 1), + SXE2_CMD_ACL_LUT_DEALLOC, + SXE2_CMD_ACL_PROF_SEL_BASE_SET, + SXE2_CMD_ACL_SCEN_ALLOC, + SXE2_CMD_ACL_SCEN_DEALLOC, + SXE2_CMD_ACL_LUT_ENTRY_SET, + SXE2_CMD_ACL_ACT_ENTRY_SET, + SXE2_CMD_ACL_TRACE_TRIGGER, + SXE2_CMD_ACL_TRACE_RECORDER, + SXE2_CMD_ACL_DFX_INFO_GET, + + SXE2_CMD_MAX = 0xFFFF, +}; + +enum sxe2_drv_event_code { + SXE2_EVENT_CODE_INVAL = 0, + SXE2_EVENT_CODE_AUTO_LOG, + SXE2_EVENT_CODE_MIB_NOTIFY, + SXE2_EVENT_CODE_SFP_WHITE_LIST, + SXE2_EVENT_CODE_SFP_TX_FAULT, + SXE2_EVENT_CODE_QSFP_TX_FAULT_COUNT, + SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, + + SXE2_EVENT_CODE_MAX, + SXE2_EVENT_CODE_ALL = 255, +}; + +enum sxe2_desc_err_code { + SXE2_CMD_DESC_ERR_NONE = 0, + SXE2_CMD_DESC_ERR_DES_ERR, + SXE2_CMD_DESC_ERR_BUF_ERR, + SXE2_CMD_DESC_ERR_BUF_NUM_ERR, + SXE2_CMD_DESC_ERR_SRC_BUSY, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK2, + SXE2_CMD_DESC_ERR_SESSION_BUFFER_OV, + SXE2_CMD_DESC_ERR_CMD_BUFFER_OV, + SXE2_CMD_DESC_ERR_IN_OUT_LEN_LACK, + SXE2_CMD_DESC_ERR_UNKNOW_OPCODE, + SXE2_CMD_DESC_ERR_UNKNOW_CMD_TYPE, + SXE2_CMD_DESC_ERR_ADMINQ_STATE, + SXE2_CMD_DESC_ERR_FIND_JOB, + SXE2_CMD_DESC_ERR_NONE_START, + SXE2_CMD_DESC_ERR_JOB_DELIVERY, + SXE2_CMD_DESC_ERR_PF_FLR, + SXE2_CMD_DESC_ERR_OVER_FLOW, + SXE2_CMD_DESC_ERR_SEQ_ERR, + SXE2_CMD_DESC_ERR_NR, +}; + +enum sxe2_cmd_drv_err_code { + SXE2_CMD_DRV_SUCCESS = 0, + SXE2_CMD_DRV_HW_OP_ERR = 1024, + + SXE2_CMD_DRV_NO_FREE_VSI, + SXE2_CMD_DUMP_LOG_FAILED, + + SXE2_CMD_DRV_RXQ_CFG_FAIL, + SXE2_CMD_DRV_TXQ_EN_FAIL, + SXE2_CMD_DRV_TXQ_DISA_FAIL, + + SXE2_CMD_DRV_PFR_FAILED, + SXE2_CMD_DRV_VFR_FAILED, + SXE2_CMD_DRV_PARAM_INVALID, + SXE2_CMD_DRV_HW_RETURN, + SXE2_CMD_DRV_HW_TIMEOUT, + SXE2_CMD_DRV_HW_MISMATCH, + SXE2_CMD_DRV_HW_NOSPC, + SXE2_CMD_DRV_HW_EXIST, + SXE2_CMD_DRV_HW_HID_EXIST, + SXE2_CMD_DRV_HW_NOENT, + SXE2_CMD_DRV_FW_NOMEM, + SXE2_CMD_DRV_HW_NO_RES, + SXE2_CMD_DRV_TLV_ERROR, + SXE2_CMD_DRV_DCB_ERROR, + SXE2_CMD_DRV_LINK_REBUILD_FAILED, + + SXE2_CMD_DRV_UNSUPPORT, + SXE2_CMD_DRV_TXSCHED_CFG_FAILED, + SXE2_CMD_DRV_TXSCHED_TIMEOUT, + SXE2_CMD_DRV_TXSCHED_TEID_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_CHILDIDX_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_ALLOC_FAILED, + + SXE2_CMD_DRV_UDP_TUNNEL_WRONG_PORT, + + SXE2_CMD_DRV_NCD_UNSUPPORT, + SXE2_CMD_DRV_BFD_INTQ_NOP, + SXE2_CMD_DRV_BFD_FLOW_NOSPC, + SXE2_CMD_DRV_BFD_FLOW_HT_COLLISION, + + SXE2_CMD_DRV_LINK_UPDATE_FAILED, + SXE2_OPT_DEV_BUSY, + +}; + +enum sxe2_fwc_mapping_mode { + SXE2_MAPPING_CONTIG = 0, + SXE2_MAPPING_SCATTER, +}; + +enum sxe2_fwc_vsi_type_hw { + SXE2_VSI_HW_T_VF = 0, + SXE2_VSI_HW_T_VMDQ2 = 1, + SXE2_VSI_HW_T_PF = 2, + SXE2_VSI_HW_T_MNG = 3, +}; + +enum sxe2_cmd_buffer_st { + SXE2_CMD_BUFFER_ST_NORMAL = (s16)0, + SXE2_CMD_BUFFER_ST_OVERFLOW, + SXE2_CMD_BUFFER_ST_SEQ_ERR, + SXE2_CMD_BUFFER_ST_NR, +}; + +enum sxe2_cmd_queue_stats_map_add { + SXE2_CMD_QUEUE_STATS_MAP_ADD_SUCCEED = 0, + SXE2_CMD_QUEUE_STATS_MAP_ADD_FAIL = 1, +}; + +enum sxe2_weight_type { + SXE2_UNKNOWN_TYPE = 0, + SXE2_CIR_WEIGHT, + SXE2_PIR_WEIGHT, +}; + +struct sxe2_cmd_desc { + __le16 flags; + __le16 opcode; + __le16 data_len; + __le16 ret; + u8 checksum; + u8 rsvd[3]; + __le32 custom1; + __le32 custom2; + __le32 custom3; + __le32 buf_addr_h; + __le32 buf_addr_l; +}; + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F +struct sxe2_cmd_hdr { + __le32 magic_code; + __le16 tran_in_len; + __le16 tran_out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 no_resp; + u8 resv1; + __le16 cur_in_len; + u8 resv[24]; + u8 body[]; +}; + +struct sxe2_drv_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 mac_id; + u8 mac_id_valid; + u8 resv[12]; + u8 body[]; +}; + +struct sxe2_channel_handshake_req { + __le32 drv_ver; + u8 drv_mode; + u8 resv[3]; + __le64 timestamp; +}; + +struct sxe2_channel_handshake_resp { + __le32 fw_ver; +}; + +struct sxe2_fwc_serial_num_resp { + u8 serial_num[SXE2_SERIAL_NUM_LEN]; +}; + +struct sxe2_fwc_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_vf_caps { + __le16 cnt; + __le16 base_idx; + u8 sriov_cap; + u8 resv[27]; +}; + +struct sxe2_fwc_queue_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_msix_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_vsi_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_ppe_caps { + __le16 rss_lut_size; + __le16 fnav_space_bsize; + __le16 fnav_space_gsize; + __le16 fnav_counter_base; + __le16 fnav_counter_num; + __le16 bfd_sess_size; + __le16 rss_global_lut_base; + __le16 rss_global_lut_num; + u8 resv[16]; +}; + +struct sxe2_dev_common_caps { + u8 rdma_support; + u8 ipsec_support; + u8 macsec_support; + u8 rss_support; + u8 fnav_support; + u8 acl_support; + u8 switch_support; + u8 bfd_support; + u8 sdn_support; + u8 sdf_support; + u8 core_fs_support; + u8 switch_tag_support; + u8 mac_ts_support; + u8 resv[19]; +}; + +struct sxe2_fwc_dev_caps { + u8 pf_cnt; + u8 port_cnt; + __le16 vf_cnt; + struct sxe2_dev_common_caps dev_common_caps; + u8 pad[92]; +}; + +struct sxe2_common_caps { + u8 vmdq_support; + u8 ptp_owner; + u8 resv[30]; +}; + +struct sxe2_fwc_func_caps { + struct sxe2_fwc_vf_caps vf_caps; + struct sxe2_fwc_queue_caps tx_caps; + struct sxe2_fwc_queue_caps rx_caps; + struct sxe2_fwc_msix_caps msix_caps; + struct sxe2_fwc_vsi_caps vsi_caps; + struct sxe2_fwc_ppe_caps ppe_caps; + struct sxe2_common_caps common_caps; + u8 pf_idx; + u8 port_idx; + u8 mode; + u8 resv; +}; + +struct sxe2_fwc_sw_cfg_entry { + __le16 type; + __le16 idx; + __le16 sw_id; + __le16 pf_vf_id; + u8 resv[8]; +}; + +struct sxe2_fwc_phy_port_info { + u8 mac_to_phy_port[SXE2_MAC_NUM]; +}; + +struct sxe2_fwc_sw_cfg { + __le16 count; + __le16 remain; + struct sxe2_fwc_sw_cfg_entry caps_entry[]; +}; + +struct sxe2_fwc_tc_rxq_info { + __le16 pow; + __le16 offset; +}; + +struct sxe2_fwc_vsi_q_info { + u8 mapping_mode; + u8 resv[7]; + __le16 cnt; + u8 resv1[6]; + union { + __le16 base_idx; + __le16 q_id[SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT]; + }; + + struct sxe2_fwc_tc_rxq_info tc_q_map[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_fnav_info { + u8 fnav_enable; + u8 auto_evict; + u8 prog_enable; + u8 rsv0[1]; + __le16 gsize; + __le16 bsize; +}; + +struct sxe2_fwc_vsi_props { + u8 rxq_valid; + u8 rsv[31]; + struct sxe2_fwc_vsi_q_info rxq_info; + struct sxe2_fwc_vsi_q_info txq_info; + struct sxe2_fwc_vsi_fnav_info fnav_info; + +}; + +struct sxe2_fwc_vsi_crud_info { + __le16 vsi_id; + __le16 vf_id; + u8 type; + u8 is_clear; + u8 resv[10]; + struct sxe2_fwc_vsi_props props; +}; + +struct sxe2_fwc_vsi_crud_resp { + __le16 vsi_id; + u8 resv[14]; +}; + +struct sxe2_fwc_ena_txq_entry { + __le16 q_id; + u8 resv[2]; + u8 txq_ctxt[SXE2_TXQ_CTXT_LEN]; +}; + +struct sxe2_fwc_ena_txqs { + __le16 cnt; + u8 resv[14]; + struct sxe2_fwc_ena_txq_entry txq[]; +}; + +struct sxe2_fwc_dis_txqs { + __le16 cnt; + u8 resv[2]; + __le16 q_id[]; +}; + +#define SXE2_TXSCHED_PROFIDX_INVALID U16_MAX +#define SXE2_TXSCHED_TEID_INVALID 0x7FFF +#define SXE2_TXSHCED_HW_DEFT_LAYER 2 +#define SXE2_TXSCHED_NODE_CHILD_MAX 8 + +#define SXE2_TXSCHED_MIN_BW 500 +#define SXE2_TXSCHED_MAX_BW 100000000 +#define SXE2_TXSCHED_BW_50G 50000000 +#define SXE2_TXSCHED_BW_25G 25000000 +#define SXE2_TXSCHED_BW_10G 10000000 + +#define SXE2_TXSCHED_DFLT_BW 0xFFFFFFFF +#define SXE2_TXSCHED_CLK_FREQ 500000000 +#define SXE2_TXSCHED_ARB_CREDIT_TOTAL 32768 +#define SXE2_TXSCHED_ARB_CREDIT_UNIT 328 +#define SXE2_TXSCHED_ARB_CREDIT_DFLT SXE2_TXSCHED_ARB_CREDIT_TOTAL + +#define SXE2_NODE_RL_TYPE_CIR BIT(0) +#define SXE2_NODE_RL_TYPE_EIR BIT(1) +#define SXE2_NODE_RL_TYPE_SRL BIT(2) + +#define SXE2_NODE_ARB_MODE_BPS 0 +#define SXE2_NODE_ARB_MODE_PPS 1 + +#define SXE2_NODE_STATUS_ENABLE 0x0 +#define SXE2_NODE_STATUS_SUSPEND 0x1 + +#define SXE2_TXSCHED_DFLT_RL_PROF_ID 0 + +enum sxe2_txsched_node_owner { + SXE2_TXSCHED_NODE_OWNER_LAN = 0, + SXE2_TXSCHED_NODE_OWNER_RDMA, + SXE2_TXSCHED_NODE_OWNER_USER, + SXE2_TXSCHED_NODE_OWNER_UNKOWN, +}; + +enum sxe2_txsched_hw_layer { + SXE2_TXSCHED_HW_LAYER_UNDEFINED = 0, + SXE2_TXSCHED_HW_LAYER_PORT, + SXE2_TXSCHED_HW_LAYER_TC, + SXE2_TXSCHED_HW_LAYER_SW_ENTRY, + SXE2_TXSCHED_HW_LAYER_4, + SXE2_TXSCHED_HW_LAYER_5, + SXE2_TXSCHED_HW_LAYER_6, + SXE2_TXSCHED_HW_LAYER_7, +}; + +struct sxe2_txsched_generic_props { + u8 layer_max; + + __le32 clk_freq; +}; + +struct sxe2_txsched_layer_props { + u8 hw_layer; + __le16 max_rl_cir_prof; + __le16 max_rl_pir_prof; + __le16 max_rl_srl_prof; +}; + +struct sxe2_fwc_txsched_cap_resp { + struct sxe2_txsched_generic_props generic; + + struct sxe2_txsched_layer_props layer[SXE2_TXSCHED_HW_LAYER_7]; +}; + +struct scbge_txsched_node_bw { + __le32 bw; + __le32 prof_id; + __le16 weight; + __le16 rsv; +}; + +struct sxe2_txsched_node_props { + u8 prio; + u8 status; + u8 arb_mode; + u8 rl_type; + enum sxe2_txsched_hw_layer hw_layer; + struct scbge_txsched_node_bw cir; + struct scbge_txsched_node_bw srlPir; + u8 adj_lvl; + u8 rsv[3]; +}; + +struct sxe2_txsched_node_info { + __le16 parent_teid; + __le16 node_teid; + __le32 sibling_idx; + struct sxe2_txsched_node_props data; +}; + +struct sxe2_fwc_txsched_dflt_topo_resp { + struct sxe2_txsched_node_info node_info[SXE2_TXSHCED_HW_DEFT_LAYER]; +}; + +struct sxe2_txsched_topo_upd_hdr { + __le16 parent_teid; + __le16 node_num; + __le16 start_child_idx; + __le16 rsv; +}; + +struct sxe2_fwc_txsched_del_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_move_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_query_node_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_txsched_query_node_resp { + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_txsched_pri_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + u8 prio; +}; + +struct sxe2_fwc_txsched_weight_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + __le16 weight; + enum sxe2_weight_type type; +}; + +struct sxe2_fwc_txsched_add_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + struct sxe2_txsched_node_info node[]; +}; + +struct sxe2_fwc_txsched_add_nodes_resp { + __le32 add_node_num; + __le16 node_teid[SXE2_TXSCHED_NODE_CHILD_MAX]; + __le16 sibling_idx[SXE2_TXSCHED_NODE_CHILD_MAX]; +}; + +struct sxe2_fwc_txq_stats_map_pool_get_resp { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_pool_get_resp { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_pool_set_req { + u8 hw_index; + u32 cfg_info; +}; + +struct sxe2_fwc_rxq_stats_map_pool_set_req { + u8 hw_pool_idx; + u32 cfg_info; +}; + +struct sxe2_fwc_txq_stats_map_get_info_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_info_clear_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_info_clear_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_get_info_resp { + u32 txq_lan_pkt_cnt; + u32 txq_lan_byte_cnt; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_resp { + u64 rxq_lan_in_pkt_cnt; + u64 rxq_lan_in_byte_cnt; + + u64 rxq_fd_in_pkt_cnt; + + u64 rxq_mng_in_pkt_cnt; + u64 rxq_mng_in_byte_cnt; + u64 rxq_mng_out_pkt_cnt; +}; + +struct sxe2_fwc_rxlan_rxq_stats_map_get_info_resp { + u64 rxq_lan_out_pkt_cnt; + u64 rxq_lan_out_byte_cnt; +}; + +struct sxe2_txq_ctxt { + + __le16 q_idx_in_nic; + u8 rsv[2]; + + __le64 base_addr; + + __le16 cgd_idx; + __le16 vmvf_idx; + u8 port_idx; + u8 pf_idx; + u8 vmvf_type; + u8 tsyn_enable; + u8 alt_vlan; + u8 wb_mode; + u8 itr_notify_mode; + u8 legacy_enable; + u8 adv_sso; + u8 rsv1[3]; + + __le16 src_vsi; + __le16 cpuid; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; + u8 rsv2; + + __le16 q_idx_in_func; + u8 rd_desc_ro; + u8 wb_desc_ro; + __le32 qlen; + u8 ptp_en; + u8 rsv3[3]; + + u8 quanta_prof_idx; + + u8 is_tm; + u8 rsv4[2]; +}; + +struct sxe2_txsched_add_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_cfg_txq_req { + struct sxe2_txq_ctxt ctxt; + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_st_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; +}; + +struct sxe2_fwc_st_txq_resp { + u8 state; +}; + +struct sxe2_fwc_add_qset_req { + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_add_qset_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_cfg_txq_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_txsched_del_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + __le16 parent_teid; + __le16 sibling_idx; + __le16 node_teid; + __le16 rsv; +}; + +struct sxe2_txsched_rl_profile_pre_query_req { + u8 hw_layer; + u8 prof_type; + __le16 rsv; + u32 bw; +}; + +struct sxe2_txsched_rl_profile_pre_query_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_req { + u8 hw_layer; + u8 prof_type; + __le16 orig_prof_id; + + u32 bw; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_resp { + u8 hw_layer; + u8 prof_type; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_req { + u8 hw_layer; + u8 rsv; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_srl_req { + u8 hw_layer; + u8 attach; + __le16 prof_id; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_tc_node { + __le16 teid; + __le16 parent_teid; + __le16 silbing_idx; + __le16 rsv; +}; + +struct sxe2_txsched_ets_query_rep { + u8 tc_cnt; + u8 rsv[3]; +}; + +struct sxe2_txsched_ets_query_resp { + u8 tc_cnt; + struct sxe2_txsched_tc_node tc_node[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_del_qset_req { + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_disable_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_txsched_suspend_node_req { + u8 port; + u8 tc; + __le16 node_teid; + __le16 parent_teid; + u8 child_idx; +}; + +struct sxe2_fwc_txsched_resume_node_req { + u8 port; + u8 tc; + struct sxe2_txsched_node_info node; +}; + +enum sxe2QosMode { + SXE2_QOS_MODE_VLAN = 0, + SXE2_QOS_MODE_DSCP, +}; + +enum sxe2_block_id { + SXE2_HW_BLOCK_ID_SWITCH = 0x1, + SXE2_HW_BLOCK_ID_ACL, + SXE2_HW_BLOCK_ID_RSS, + SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_BFD = SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_PE, + SXE2_HW_BLOCK_ID_MAX +}; + +enum sxe2_class_id { + SXE2_XLT0_CLASS_ID = 0x1, + SXE2_XLT2_CLASS_ID, + SXE2_EXTRACTOR_CLASS_ID, + SXE2_MAP_CLASS_ID, + SXE2_TCAM_CLASS_ID, + SXE2_RECIPE_CLASS_ID, +}; + +#define SXE2_CFG_ID(block_id, class_id) ((block_id) << 16 | (class_id)) +#define SXE2_CFG_CLASS_ID_MASK (0xFFFF) + +#define SXE2_CFG_GROUP_SIZE SXE2_DRV_CMD_MAX_MSG_SIZE + +enum { + SXE2_SWITCH_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_SWITCH, SXE2_XLT0_CLASS_ID), + SXE2_SWITCH_XLT2_CLASS_ID, + SXE2_SWITCH_EXTRACTOR_CLASS_ID, + SXE2_SWITCH_MAP_CLASS_ID, + SXE2_SWITCH_TCAM_CLASS_ID, + + SXE2_ACL_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_ACL, SXE2_XLT0_CLASS_ID), +}; + +enum sxe2_udp_tunnel_protocol { + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN = 0, + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN_GPE, + SXE2_UDP_TUNNEL_PROTOCOL_GENEVE, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_C = 4, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_U, + SXE2_UDP_TUNNEL_PROTOCOL_PFCP, + SXE2_UDP_TUNNEL_PROTOCOL_ECPRI, + SXE2_UDP_TUNNEL_PROTOCOL_MPLS, + SXE2_UDP_TUNNEL_PROTOCOL_NVGRE = 10, + SXE2_UDP_TUNNEL_PROTOCOL_L2TP, + SXE2_UDP_TUNNEL_PROTOCOL_TEREDO, + SXE2_UDP_TUNNEL_MAX, +}; + +struct sxe2_cfg_group_hdr { + __le16 class_cnt; + __le16 size; +}; + +struct sxe2_cfg_class { + __le32 class_id; + __le16 offset; + __le16 size; +}; + +struct sxe2_pipeline_group { + struct sxe2_cfg_group_hdr hdr; + struct sxe2_cfg_class class[]; +}; + +struct sxe2_es_fv { + u8 prot_id; + u8 rsv; + __le16 off; +}; + +struct sxe2_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; + __le64 rx_lan_engine_packets; +}; + +struct sxe2_pf_hw_stats { + + __le64 tx_frame_good; + __le64 rx_frame_good; + __le64 rx_crc_errors; + __le64 tx_bytes_good; + __le64 rx_bytes_good; + __le64 tx_multicast_good; + __le64 tx_broadcast_good; + __le64 rx_multicast_good; + __le64 rx_broadcast_good; + __le64 rx_len_errors; + __le64 rx_out_of_range_errors; + __le64 rx_symbol_err; + __le64 rx_pause_frame; + __le64 tx_pause_frame; + + __le64 rx_discards_phy; + __le64 tx_dropped_link_down; + __le64 tx_bytes_good_bad; + __le64 tx_frame_good_bad; + __le64 rx_size_64; + __le64 rx_size_65_127; + __le64 rx_size_128_255; + __le64 rx_size_256_511; + __le64 rx_size_512_1023; + __le64 rx_size_1024_1522; + __le64 rx_size_1523_max; + __le64 rx_illegal_bytes; + __le64 tx_unicast; + __le64 tx_broadcast; + __le64 tx_multicast; + __le64 tx_vlan_packet_good; + __le64 tx_size_64; + __le64 tx_size_65_127; + __le64 tx_size_128_255; + __le64 tx_size_256_511; + __le64 tx_size_512_1023; + __le64 tx_size_1024_1522; + __le64 tx_size_1523_max; + __le64 tx_underflow_error; + __le64 rx_byte_good_bad; + __le64 rx_frame_good_bad; + __le64 rx_unicast_good; + __le64 rx_vlan_packets; + __le64 prio_xoff_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xoff_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_2_xoff[SXE2_MAX_USER_PRIORITY]; + __le64 rx_pause; + __le64 tx_pause; + __le64 rx_undersize_good; + __le64 rx_runt_error; + __le64 rx_oversize_good; + __le64 rx_jabbers; + __le64 rx_oversize_pkts_phy; + + __le64 rx_out_of_buffer; + __le64 rx_qblock_drop; + __le64 rx_discards_ips_phy; + + __le64 rx_pcs_symbol_err_phy; + __le64 rx_corrected_bits_phy; + + __le64 rx_err_lane_0_phy; + __le64 rx_err_lane_1_phy; + __le64 rx_err_lane_2_phy; + __le64 rx_err_lane_3_phy; + __le64 rx_prio_buf_discard[8]; + __le64 fnav_match; + __le64 spoof_mac_packets; + __le64 spoof_vlan_packets; +}; + +#define SXE2_FNAV_INVALID_STATS_IDX (0xFFFF) + +struct sxe2_fwc_pf_stats_req { + __le16 fnav_stats_idx; +}; + +struct sxe2_fwc_pf_stats_resp { + struct sxe2_pf_hw_stats stats; +}; + +struct sxe2_fwc_vsi_stats { + struct sxe2_vsi_hw_stats stats; + __le16 vsi_id; +}; + +struct sxe2_fwc_vsi_stats_req { + __le16 vsi_cnt; + __le16 vsi_ids[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_stats_resp { + __le16 vsi_cnt; + struct sxe2_fwc_vsi_stats vsi_stats[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_pxe_req { + u8 ena; +}; + +#define SXE2_EVENT_SUBSCRIBE_MAX_COUNT 32 + +struct sxe2_fwc_event { + u8 count; + u8 rsv[3]; + __le16 code[SXE2_EVENT_SUBSCRIBE_MAX_COUNT]; +}; + +struct sxe2_fwc_fw_log_ack { + __le32 result; +}; + +enum sxe2_default_recipe_id { + SXE2_DEFAULT_RECIPE_MAC = 0, + SXE2_DEFAULT_RECIPE_VLAN, + SXE2_DEFAULT_RECIPE_TX_ETYPE, + SXE2_DEFAULT_RECIPE_RX_ETYPE, + SXE2_DEFAULT_RECIPE_ALLMULTI, + SXE2_DEFAULT_RECIPE_PROMISC, + SXE2_DEFAULT_RECIPE_SRCVSI, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT, + SXE2_DEFAULT_RECIPE_SRCVSI_EXT, + SXE2_DEFAULT_RECIPE_MAX +}; + +union sxe2_switch_full_key_dw0 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv0 : 16; + u32 rid : 6; + u32 rsvd0 : 9; + u32 is_root : 1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 is_root : 1; + u32 rsvd0 : 9; + u32 rid : 6; + u32 fv0 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw1 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv2 : 16; + u32 fv1 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv1 : 16; + u32 fv2 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw2 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv4 : 16; + u32 fv3 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv3: 16; + u32 fv4: 16; +#endif + } field; +}; + +struct sxe2_fwc_switch_rule { + __le16 flag; + __le16 recipe_id; + __le32 act; + __le16 rule_id; + u8 recv[2]; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + u8 add_fkot; + u8 resv2[3]; +}; + +struct sxe2_fwc_switch_vsi_list { + __le16 flag; + __le16 vsi_list_id; + __le16 vsi_cnt; + __le16 vsi[]; +}; + +union sxe2_switch_large_action { + u32 val; + struct { + u32 rsv0 : 8; + u32 valid : 1; + u32 rsv1 : 2; + u32 list : 1; + u32 vsi_list : 10; + u32 fwd_vsi000 : 3; + u32 rsv2 : 8; + } reg; +}; + +struct sxe2_fwc_switch_large_action { + union sxe2_switch_large_action action[SXE2_LARGE_ACTION_COUNT_IN_GROUP]; + __le32 idx; +}; + +struct sxe2_fwc_switch_recipe { + u8 rid :6; + u8 rcp_rsv0 :1; + u8 is_root :1; + u8 lookup_index0 :7; + u8 lookup_index0_valid :1; + u8 lookup_index1 :7; + u8 lookup_index1_valid :1; + u8 lookup_index2 :7; + u8 lookup_index2_valid :1; + u8 lookup_index3 :7; + u8 lookup_index3_valid :1; + u8 lookup_index4 :7; + u8 lookup_index4_valid :1; + u8 join_priority; + u8 priority :3; + u8 need_pass_l2 :1; + u8 allow_pass_l2 :1; + u8 inverse_action :1; + u8 prune_idx :2; + __le32 default_action :19; + __le32 rcp_rsv1 :4; + __le32 default_action_valid:1; + __le32 rcp_rsv2 :8; + __le32 fv4_bitmask :16; + __le32 fv3_bitmask :16; + __le32 fv2_bitmask :16; + __le32 fv1_bitmask :16; + __le32 fv0_bitmask :16; + __le32 rcp_rsv3 :16; + __le16 ref_cnt; +}; + +struct sxe2_fwc_switch_profile_recipe_map { + __le16 profile_id; + __le32 map[2]; +}; + +struct sxe2_fwc_switch_share_id { + __le32 usage; + __le32 share_id[SXE2_MAX_NUM_RECIPES]; + __le32 bitmap[SXE2_MAX_NUM_RECIPES]; +}; + +struct sxe2_fwc_switch_rule_resp { + __le16 index; + u8 resv1[2]; + __le32 act; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + __le16 ref_cnt; + u8 resv2[2]; +}; + +struct sxe2_fwc_switch_vsi_list_resp { + __le16 index; + u8 resv1[2]; + __le32 vsi[SXE2_VSI_LIST_DAT_LEN]; + u8 resv2[4]; +}; + +struct sxe2_fwc_switch_mac_info { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fwc_switch_mac_info_resp { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fw_mtu_info { + __le32 mtu; + u8 is_set_hw; + u8 resv0; + __le16 resv1; +}; + +struct sxe2_fwc_switch_complex_rule { + __le16 flag; + __le32 act; + u8 priority; + u8 resv; + u8 add_fkot; + + __le16 word_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 profile_cnt; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_switch_complex_rule_resp { + __le32 act; + + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_og_trace_rcd { + u8 done; + u8 status; + u8 profile_id; + u8 resv; + __le16 fv[SXE2_SWITCH_FV_CNT]; +}; + +struct sxe2_recp_trace_rcd { + __le16 recipe_id; + u8 ht1_hit; + u8 ht2_hit; + u8 fkot_hit; + u8 kt_hit; + __le16 index; +}; + +struct sxe2_swe_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + struct sxe2_recp_trace_rcd recp[SXE2_PACKET_MAX_RECIPES]; +}; + +struct sxe2_rg_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + __le32 ppe_info[SXE2_PACKET_INFO_DWORD_CNT]; +}; + +struct sxe2_fwc_switch_trace_req { + u8 is_rx; + u8 resv[3]; +}; + +struct sxe2_fwc_switch_trace_resp { + struct sxe2_og_trace_rcd og; + struct sxe2_swe_trace_rcd swe; + struct sxe2_rg_trace_rcd rg; +}; + +struct sxe2_fwc_recipe_get_req { + __le16 recipe_id; + u8 resv[2]; +}; + +struct sxe2_fwc_recipe_get_resp { + u8 is_root; + u8 priority; + u8 is_inverse; + u8 resv; + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_req { + u8 is_root; + u8 priority; + __le16 profile_cnt; + __le16 recipe_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_resp { + __le16 recipe_cnt; + __le16 recipe_root; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_recipe_del_req { + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +enum sxe2_switch_dfx_stats_index { + SXE2_SW_DFX_PROFILE_ID_BYPASS = 0, + SXE2_SW_DFX_PROFILE_TCAM_HIT, + SXE2_SW_DFX_PROFILE_TCAM_MISS, + SXE2_SW_DFX_RX_FB_INPUT, + SXE2_SW_DFX_TX_PA_INPUT, + SXE2_SW_DFX_OG_PROCESS_RX, + SXE2_SW_DFX_OG_PROCESS_TX, + SXE2_SW_DFX_OUTPUT_TO_SWE, + SXE2_SW_DFX_OUTPUT_TO_RG, + SXE2_SW_DFX_MEMORY_HT1_IN, + SXE2_SW_DFX_MEMORY_HT1_OUT, + SXE2_SW_DFX_MEMORY_HT2_IN, + SXE2_SW_DFX_MEMORY_HT2_OUT, + SXE2_SW_DFX_MEMORY_KT_IN, + SXE2_SW_DFX_MEMORY_KT_OUT, + SXE2_SW_DFX_SWE_OG_IN, + SXE2_SW_DFX_SWE_TX_IN, + SXE2_SW_DFX_SWE_RX_IN, + SXE2_SW_DFX_SWE_OUTPUT_ACTION, + SXE2_SW_DFX_PIPE_HASH_MISS, + SXE2_SW_DFX_PIPE_HASH_HIT, + SXE2_SW_DFX_PIPE_KT_HIT, + SXE2_SW_DFX_PIPE_HI1_HIT, + SXE2_SW_DFX_PIPE_HI2_HIT, + SXE2_SW_DFX_PIPE_FKOT_HIT, + SXE2_SW_DFX_PIPE_HW_SEARCH_ERR, + + SXE2_SW_DFX_MAX, +}; + +struct sxe2_fwc_switch_dfx_stats { + __le32 stats[SXE2_SW_DFX_MAX]; +}; + +enum sxe2_ipsec_stats_index { + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC0, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC0, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC1, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC1, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC2, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC2, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC3, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC3, + + SXE2_IPSEC_STATS_MAX, +}; + +struct sxe2_ipsec_stats { + __le64 stats[SXE2_IPSEC_STATS_MAX]; +}; + +struct sxe2_fwc_vsi_vlan_filter { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_src_prune { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_loopback { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_spoofchk { + __le16 vsi_hw_id; + u8 mac_enable; + u8 vlan_enable; +}; + +struct sxe2_fwc_switch_dfx_irq { + u8 enable; + u8 resv[3]; +}; + +struct sxe2_rss_vsi_hctrl { + __le16 vsi_hw_id; + u8 hash_type; + u8 resv; +}; + +struct sxe2_rss_lut_cfg { + __le16 vsi_hw_id; + u8 lut_type; + u8 global_lut_id; + __le16 lut_size; + u8 resv[2]; + u8 lut[]; +}; + +struct sxe2_rss_core_lut_info { + u8 lut[SXE2_RSS_CORE_LUT_SIZE]; +}; + +struct sxe2_rss_hkey_cfg { + __le16 vsi_hw_id; + u8 resv[2]; + u8 key[]; +}; + +struct sxe2_rss_symm_fv { + u8 fv_idx : 5; + u8 rsv : 2; + u8 valid : 1; +}; + +struct sxe2_rss_symm_fv_cfg { + __le16 prof_id; + u8 rsv[2]; + struct sxe2_rss_symm_fv fv[SXE2_RSS_FV_CNT]; +}; + +struct sxe2_rss_trace_recorder { + u8 trace_status0; + u8 rsv0[3]; + __le32 profile_id0; + __le32 fv[SXE2_RSS_FV_TRACE_CNT]; + u8 trace_status1; + u8 rsv1[3]; + __le32 hash1; + u8 trace_status2; + u8 rsv2[3]; + __le32 hash2; + u8 profile_id2; + u8 bad_profile; + __le16 q_index; + u8 thread_id; + u8 rsv3[1]; + __le16 vsi; +}; + +struct sxe2_fwc_xlt2_entry { + __le16 vsi_hw_idx; + __le16 vsig; +}; + +struct sxe2_fwc_tcam_entry { + __le16 addr; + u8 key[SXE2_TCAM_KEY_LEN]; + u8 prof_id; + u8 rsv; +}; + +struct sxe2_fwc_es_entry { + u8 prof_id; + u8 cnt; + struct sxe2_es_fv fv[SXE2_FV_CNT_MAX]; +}; + +struct sxe2_fwc_prof_section { + u8 type; + u8 rsv[1]; + __le16 offset; + __le16 size; +}; + +struct sxe2_fwc_prof_buf { + __le16 entry_cnt; + __le16 data_end; + struct sxe2_fwc_prof_section sect[]; +}; + +struct sxe2_fwc_prof_pkg { + u8 blk; + u8 buf[]; +}; + +struct sxe2_fwc_tcam_idx { + u8 blk; + u8 rsv; + __le16 tcam_idx; +}; + +enum sxe2_fwc_tcam_action { + SXE2_FWC_TCAM_ACTION_ADD, + SXE2_FWC_TCAM_ACTION_DEL, +}; + +struct sxe2_fwc_tcam_info { + u8 action; + __le16 tcam_idx; +}; + +struct sxe2_fwc_tcam_idx_batch { + u8 blk; + u16 tcam_cnt; + struct sxe2_fwc_tcam_info tcam_info[]; +}; + +struct sxe2_fwc_prof_id { + u8 blk; + u8 rsv; + __le16 prof_id; +}; + +struct sxe2_fwc_mask_idx { + u8 blk; + u8 rsv; + __le16 mask_idx; +}; + +struct sxe2_fwc_mask_val { + u8 blk; + u8 rsv; + __le16 mask_idx; + __le16 fv_idx; + __le16 mask; +}; + +struct sxe2_fwc_mask_sel { + u8 blk; + u8 rsv; + __le16 prof_id; + __le32 mask_sel; +}; + +struct sxe2_fwc_fnav_kt_entry { + u8 is_add; + u8 prof_id; + u8 fdid_prio; + u8 toq_prio; + u8 drop; + u8 stat_ena; + u8 to_queue; + u8 inputset[SXE2_FNAV_INPUT_CNT * 2]; + u8 fd_space; + __le16 ori_vsi; + __le16 vsi; + __le16 flow_id; + __le16 qindex; + __le16 stat_cnt; + u8 rsv1[2]; + __le32 fdid; +}; + +struct sxe2_fwc_fnav_kt_resp { + __le32 gcnt_pf; + __le32 bcnt_global; +}; + +struct sxe2_fnav_hit_info { + + __le32 hit_flg : 4; + __le32 ck1 : 13; + __le32 ht_index1 : 11; + __le32 ck2_3_0 : 4; + + __le32 ck2_12_4 : 9; + __le32 ht_index2 : 11; + __le32 ht1_avl : 8; + __le32 ht2_avl_3_0 : 4; + + __le32 ht2_avl_7_4 : 4; + __le32 kt_index : 15; + __le32 entry_vld : 1; + __le32 qindex : 11; + __le32 stat_0 : 1; + + __le32 stat_13_1 : 13; + __le32 stat_ena : 2; + __le32 evict_ena : 1; + __le32 toqueue : 3; + __le32 toqueue_prio : 3; + __le32 ad_drop : 1; + __le32 fdid_8_0 : 9; + + __le32 fdid_31_9 : 23; + __le32 fdid_prio : 3; + __le32 flow_id_5_0 : 6; + + __le32 flow_id_15_6 : 10; + __le32 ad_fd_vsi : 10; + __le32 gl_space : 1; + __le32 pf_space : 1; + __le32 vsi_space : 1; + __le32 ad2 : 4; + __le32 inset_511_507 : 5; + + __le32 inset_506_475; + + __le32 inset_474_443; + + __le32 inset_442_411; + + __le32 inset_410_379; + + __le32 inset_378_347; + + __le32 inset_346_315; + + __le32 inset_314_283; + + __le32 inset_282_251; + + __le32 inset_250_219; + + __le32 inset_218_187; + + __le32 inset_186_155; + + __le32 inset_154_123; + + __le32 inset_122_91; + + __le32 inset_90_59; + + __le32 inset_58_27; + + __le32 inset_26_0 : 27; + __le32 profile_id_4_0 : 5; + + __le32 profile_id_6_5 : 2; + __le32 rsv0 : 1; + __le32 vsi : 10; + __le32 rsv1 : 6; + __le32 fail_sts : 3; + __le32 cmd : 4; + __le32 thread_id_5_0 : 6; + + __le32 thread_id_6 : 1; + __le32 pf : 3; + __le32 vf_vm : 10; + __le32 function_type : 2; + __le32 bypass_ft : 1; + __le32 pcmd : 2; + __le32 comp_report : 2; + __le32 fd_vsi : 10; + __le32 comp_queue : 1; + + __le32 not_enabled : 1; + __le32 bad_profile_id : 1; + __le32 drop : 1; + __le32 round_drop : 1; + __le32 round_cnt : 4; + __le32 rsv2 : 24; +}; + +struct sxe2_fnav_addition_info { + + __le32 fd_profile_id : 7; + __le32 hit_flg : 4; + __le32 rlt_sel : 2; + __le32 dst_vsi : 10; + __le32 rlt_queue_8_0 : 9; + + __le32 rlt_queue_10_9 : 2; + __le32 rlt_toqueue : 3; + __le32 rlt_toqueue_pri : 3; + __le32 drop : 1; + __le32 cmd : 4; + __le32 bypass_absq : 1; + __le32 fd_search_ena : 1; + __le32 pkt_id : 7; + __le32 deflt_qindx_pri : 3; + __le32 sa_toqueue_pri : 3; + __le32 rsv : 3; + __le32 search_rss_fs_hit : 1; + + __le32 bypass_rss : 1; + __le32 rsv1 : 31; +}; + +struct sxe2_fnav_trace_recorder { + u8 trace_status0; + u8 trace_status1; + struct sxe2_fnav_hit_info hit_info; + struct sxe2_fnav_addition_info addition_info; + u8 trace_status2; +}; + +struct sxe2_fnav_glspace_cnt { + __le32 bcnt; + __le32 gcnt; +}; + +#define SXE2_FNAV_MAX_COUNTER_BANK_NUM (2) + +enum sxe2_fnav_counter_bank_type { + SXE2_FNAV_COUNTER_BANK_0, + SXE2_FNAV_COUNTER_BANK_1, + SXE2_FNAV_COUNTER_BANK_ALL, + SXE2_FNAV_COUNTER_BANK_MAX, +}; + +struct sxe2_fwc_fnav_stats_req { + u8 is_clear; + u8 bank_type; + __le16 counter_idx; +}; + +struct sxe2_fwc_fnav_stats_resp { + __le64 stats[SXE2_FNAV_MAX_COUNTER_BANK_NUM]; +}; + +struct sxe2_fwc_fnav_match_req { + __le16 vsi_id; + __le16 stat_idx; +}; + +struct sxe2_fwc_fnav_match_req_batch { + u8 is_clear; + u8 bank_type; + __le16 stat_cnt; + struct sxe2_fwc_fnav_match_req match_req[]; +}; + +struct sxe2_fwc_fnav_match_rsp_batch { + __le16 stat_cnt; + __le64 fnav_match[]; +}; + +struct sxe2_fwc_fnav_dlft_compq_req { + __le16 vsi_idx_in_dev; + __le16 rxq_idx_in_func; +}; + +struct sxe2_fnav_vsispace_cnt { + __le32 bcnt; + __le32 gcnt; + __le16 vsi_id; +}; + +struct sxe2_fnav_space_cnt { + __le32 bcnt_global; + __le32 gcnt_global; + __le32 bcnt_pf; + __le32 gcnt_pf; + __le32 bcnt_vsi; + __le32 gcnt_vsi; + __le16 vsi_id; +}; + +struct sxe2_vf_fnav_clear_ctxt { + __le16 vf_indev; + u8 io_close; +}; + +enum sxe2_rxft_dbg_ppe_info_type { + SXE2_RXFT_PPE_INFO_TX_IN, + SXE2_RXFT_PPE_INFO_TX_EX, + SXE2_RXFT_PPE_INFO_RX_IN, + SXE2_RXFT_PPE_INFO_RX_EX, + SXE2_RXFT_PPE_INFO_LP_IN, + SXE2_RXFT_PPE_INFO_LP_EX, + SXE2_RXFT_PPE_INFO_TYPE_MAX, +}; + +struct sxe2_rxft_ppe_info { + struct { + __le32 data[SXE2_RXFT_PPE_INFO_REG_CNT]; + } info[SXE2_RXFT_PPE_INFO_TYPE_MAX]; +}; + +struct sxe2_rxq_ctxt { + __le64 base_addr; + __le16 depth; + + __le16 dbuff_len; + __le16 hbuff_len; + u8 hsplit_type; + u8 desc_type; + u8 crc_strip; + u8 l2tag1_show; + u8 hsplit_0; + u8 hsplit_1; + u8 inner_vlan_strip; + + u8 lro_enable; + u8 cpuid; + __le16 max_frame_size; + __le16 lro_desc_max; + u8 relax_data; + u8 relax_wb_desc; + u8 relax_rd_desc; + + u8 tphrdesc_enable; + u8 tphwdesc_enable; + u8 tphdata_enable; + u8 tphhead_enable; + + u8 low_desc_waterline; + __le16 vfid; + u8 pfid; + u8 vfen; + __le16 vsi_id; + + u8 pref_enable; + __le16 head; +}; + +struct sxe2_fwc_cfg_rxq_req { + u8 pf_idx; + __le16 idx_in_dev; + struct sxe2_rxq_ctxt rxq_ctxt; +}; + +struct sxe2_fwc_local_mib_set { + __le16 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_local_mib_get { + u8 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_fw_agent { + u8 enable; + u8 resv[3]; +}; + +#ifndef FW_LLDP_STATE +#define FW_LLDP_STATE +enum sxe2LldpStatus { + sxe2_lldp_enabled_rx_tx = 0, + sxe2_lldp_enabled_tx_only, + sxe2_lldp_enabled_rx_only, + sxe2_lldp_disabled, +}; +#endif + +struct sxe2_fwc_lldp_fw_agent { + u8 status; + u8 resv[3]; +}; + +struct sxe2_fwc_notify_lldp_fw_agent { + u8 stats; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_stats { + u8 rx_state; + u8 tx_state; + u8 lldp_enable; + u8 admin_status; + __le32 tx_failed; + __le32 tx_frames_out_total; + __le32 tx_lldpdu_length_errors; + __le32 rx_ageouts_total; + __le32 rx_frames_discarded_total; + __le32 rx_frames_in_errors_total; + __le32 rx_frames_in_total; + __le32 rx_tlvs_discarded_total; + __le32 rx_tlvs_unrecognized_total; +}; + +struct sxe2_fwc_lldp_mibs_info { + u8 count; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_mibs_dump_req { + u8 index; + u8 resv[3]; +}; + +struct sxe2_lldp_mibs_tl { + __le16 offset; + __le16 length; +}; + +struct sxe2_lldp_mibs_ets { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioTable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tsatable[SXE2_MAX_TRAFFIC_CLASS]; +}; + +struct sxe2_lldp_mibs_pfc { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcena; +}; + +struct sxe2_lldp_mibs_app { + __le16 protId; + u8 priority; + u8 selector; +}; + +struct sxe2_fwc_lldp_mibs_dump_resp { + u8 index; + u8 resv1[3]; + u8 buffer[SXE2_LLDP_FRAME_MAX_SIZE]; + __le16 size; + u8 num_apps; + u8 resv2[3]; + struct sxe2_lldp_mibs_ets ets_cfg; + struct sxe2_lldp_mibs_ets ets_rec; + struct sxe2_lldp_mibs_pfc pfc_cfg; + struct sxe2_lldp_mibs_app app_cfg[SXE2_DCBX_MAX_APPS]; +}; + +enum sxe2FlowCtrlMode { + SXE2_FC_MODE_DISABLE, + SXE2_FC_MODE_LFC, + SXE2_FC_MODE_PFC, + SXE2_FC_MDDE_COUNT, +}; + +struct sxe2_fwc_lfc_info { + u8 rx_en; + u8 tx_en; + u8 tc_num; + u8 fc_mode; + __le32 port_size; + __le32 high_water[SXE2_MAX_TRAFFIC_CLASS]; + __le32 low_water[SXE2_MAX_TRAFFIC_CLASS]; + __le16 pause_time[SXE2_MAX_TRAFFIC_CLASS]; + u8 priority; + u8 resv1; +}; + +struct sxe2_mdd_vf_req { + __le16 vf_idx; + u8 q_mapping_mode; + u8 reserve; +}; + +struct sxe2_fwc_mdd_req { + __le16 vf_cnt; + u8 mdd_check; + u8 reserve; + struct sxe2_mdd_vf_req vfs[]; +}; + +struct sxe2_mdd_vf_resp { + __le16 vf_idx; + u8 mdd; + u8 reserve; +}; + +struct sxe2_fwc_mdd_resp { + __le32 vf_mdd_tx_event; + __le32 pf_mdd_tx_event; + + u8 vf_mdd_rx_event; + u8 pf_mdd_rx_event; + + __le16 mdd_vf_cnt; + struct sxe2_mdd_vf_resp mdd_vfs[]; +}; + +struct sxe2_fwc_ptp_filter_addr { + u8 filter_type; + __le32 ipv4; + __le32 ipv6[4]; + __le32 mac[2]; +}; + +struct sxe2_fwc_ptp_correction { + __le32 ingress_corr_nanosec; + __le32 ingress_corr_subnanosec; + __le32 egress_corr_nanosec; + __le32 egress_corr_subnanosec; + __le32 ingress_sync_corr; + __le32 egress_sync_corr; +}; + +struct sxe2_fwc_ptp_init_req { + u8 sample_type; + u8 threshold; + struct sxe2_fwc_ptp_filter_addr filter_addr; + struct sxe2_fwc_ptp_correction corr; +}; + +enum sxe2_rx_timestamp_mode { + SXE2_RX_TIMESTAMP_MODE_PTP = 0, + SXE2_RX_TIMESTAMP_MODE_ALL_1024, + SXE2_RX_TIMESTAMP_MODE_ALL_2048, + SXE2_RX_TIMESTAMP_MODE_ALL_4096, + SXE2_RX_TIMESTAMP_MODE_ALL_8192, + SXE2_RX_TIMESTAMP_MODE_ALL_16384, + SXE2_RX_TIMESTAMP_MODE_MAX, +}; + +struct sxe2_fwc_ptp_mode_set_req { + u8 mode; +}; + +#define SXE2_IPSEC_KEY_LEN (32) +#define SXE2_IPV6_ADDR_LEN (4) +struct sxe2_fwc_ipsec_txsa_add_req { + __le32 mode; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_set_req { + __le32 mode; + __le32 sa_index; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_add_resp { + __le16 index; +}; + +struct sxe2_fwc_ipsec_rxsa_add_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 is_over_sdn; + u8 sdn_group_id; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_set_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 sa_index; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_add_resp { + u8 ip_id; + u8 udp_group_id; + __le16 sa_idx; +}; + +struct sxe2_fwc_ipsec_txsa_del_req { + __le16 sa_idx; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_del_req { + u8 ip_id; + u8 group_id; + __le16 sa_idx; + __le32 spi; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_switch_op_req { + u8 dir; + u8 op; + __le16 mac_id; +}; + +struct sxe2_fwc_ipsec_drv_clr_req { + u8 func_type; + u8 func_id; + u8 drv_id; + u8 rsv; +}; + +#define SXE2_IPSEC_WDRR_COUNT (4) +struct sxe2_fwc_ipsec_wdrr_req { + __le16 tx_wdrr[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_iqm[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_oqm[SXE2_IPSEC_WDRR_COUNT]; +}; + +struct sxe2_fwc_ipsec_capa_resq { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; + __le16 ip_id_cnt; + __le16 udp_group_cnt; +}; + +#define MACSEC_PN_LEN_MAX (2) +#define MACSEC_SALT_COUNT (3) +#define MACSEC_KEY_LEN (4) + +enum sxe2_macsec_validate_mode { + SXE2_MACSEC_VALIDATE_DISABLED = 0, + SXE2_MACSEC_VALIDATE_CHECK = 1, + SXE2_MACSEC_VALIDATE_STRICT = 2, + SXE2_MACSEC_VALIDATE_END, +}; + +struct sxe2_fw_macsec_sa { + u8 active; + u8 an_value; + __le32 pn[MACSEC_PN_LEN_MAX]; + __le32 ssci; + __le32 salt[MACSEC_SALT_COUNT]; + __le32 key[MACSEC_KEY_LEN]; +}; + +struct sxe2_fw_macsec_txsc { + u8 active; + u8 xpn; + u8 aisci; + u8 es; + u8 encrypt; + __le64 sci; +}; + +struct sxe2_fw_macsec_rxsc { + u8 active; + u8 xpn; + u8 protect; + u8 validate_mode; + __le64 sci; +}; + +struct sxe2_fwc_ddp_state { + u8 act_pfid; + u8 pad; + __le16 ver; + __le32 state; +}; + +struct flm_link_cap { + u32 speed; + u32 fecMode; + + u8 an; + u8 lscEn; +}; + +enum sxe2_fec_mode { + SXE2_ETHTOOL_FEC_NONE = 0, + SXE2_ETHTOOL_FEC_OFF = 1, + SXE2_ETHTOOL_FEC_BASER = 2, + SXE2_ETHTOOL_FEC_RS = 3, + SXE2_ETHTOOL_FEC_AUTO = 15, + SXE2_ETHTOOL_FEC_MAX, +}; + +enum flm_link_speed_fec { + FEC_MOD_UNNKOW = 0x0, + FEC_MOD_10G = 0x7, + FEC_MOD_50G = 0xC, + FEC_MOD_25G = 0xF, + FEC_MOD_100G = 0XD, +}; + +enum flm_link_speed_info { + FEC_MOD_SPEED_UNNKOW = 0x0, + FEC_MOD_SPEED_10G = 0x2, + FEC_MOD_SPEED_25G = 0x4, + FEC_MOD_SPEED_50G = 0x8, + FEC_MOD_SPEED_100G = 0X10, +}; + +enum sxe2_speed_mode { + SXE2_ETHTOOL_SPEED_UNKNOWN = 0, + SXE2_ETHTOOL_SPEED_10GB = 10000, + SXE2_ETHTOOL_SPEED_25GB = 25000, + SXE2_ETHTOOL_SPEED_50GB = 50000, + SXE2_ETHTOOL_SPEED_100GB = 100000, + SXE2_ETHTOOL_SPEED_AUTO = 200000, +}; + +struct flm_link_fec_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct configure_fc { + u8 rx_en; + u8 tx_en; + u8 resv[2]; +}; + +struct configure_an { + u32 port; + u32 speed; + u32 fec_mode; + u32 lt_en; + struct configure_fc fc_mode; + u32 an_en; +}; + +enum sxe2_fw_connect_mode { + SXE2_FW_CONNECT_MODE_DAC, + SXE2_FW_CONNECT_MODE_AOC, + SXE2_FW_CONNECT_MODE_TRANSCEIVER, + SXE2_FW_CONNECT_MODE_BACKPLANE, + SXE2_FW_CONNECT_MDDE_UNKNOW, +}; + +enum flm_link_speed { + FLM_FW_SPEED_10G = 0, + FLM_FW_SPEED_25G = 1, + FLM_FW_SPEED_50G = 2, + FLM_FW_SPEED_100G = 3, + FLM_FW_SPEED_AUTO = 15, + FLM_FW_SPEED_MAX +}; + +struct flm_link_ret { + u32 speed; + u32 module_type; + u32 link_status; + u32 fec_mode; + struct configure_fc fc_mode; +}; + +struct ethtool_flm_link_info { + s32 speed; + u32 link_status; +}; + +struct flm_link_info_pasist { + u8 speed; + u8 link_status; + u8 fec_mode; + u8 resv; + struct configure_fc fc_mode; +}; + +struct sxe2_fw_loop_back_config { + u8 enable; + u8 resv[3]; +}; + +struct flm_link_info { + u32 port_num; + u32 is_link_up; + u32 module_type; + u32 is_an_enable; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct flm_link_config { + u32 port_num; + u32 speed; + u32 fec; + u32 port; +}; + +struct flm_link_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct flm_ethtool_get_link_req { + u32 port_num; +}; + +enum sxe2_support_fec_mode { + SXE2_SUPPORTR_FEC_NONE = 0, + SXE2_SUPPORTR_FEC_BASER = 1, + SXE2_SUPPORTR_FEC_RS = 2, + SXE2_SUPPORTR_FEC_AUTO = 15, + SXE2_SUPPORTR_FEC_MAX, +}; + +enum sxe2_advertis_fec_mode { + SXE2_ADVERTIS_FEC_NONE = 0, + SXE2_ADVERTIS_FEC_BASER = 1, + SXE2_ADVERTIS_FEC_RS = 2, + SXE2_ADVERTIS_FEC_AUTO = 15, + SXE2_ADVERTIS_FEC_MAX, +}; + +enum sxe2_support_speed_duplex_mode { + SXE2_SUPPORTR_SPEED_10G, + SXE2_SUPPORTR_SPEED_25G, + SXE2_SUPPORTR_SPEED_50G, + SXE2_SUPPORTR_SPEED_100G, + SXE2_SUPPORTR_SPEED_AUTO = 15, + SXE2_SUPPORTR_SPEED_MAX, +}; + +enum sxe2_support_duplex { + HALF_DUPLEX = 0, + FULL_DUPLEX = 1, + MAX_DUPLEX, +}; + +enum sxe2_duplex_type { + CURRENT_HALF_DUPLEX = 0, + CURRENT_FULL_DUPLEX = 1, + CURRENT_MAX_DUPLEX, +}; + +enum sxe2_support_media_type { + SXE2_MEDIA_UNKNOWN = 0, + SXE2_MEDIA_FIBER, + SXE2_MEDIA_BASET, + SXE2_MEDIA_BACKPLANE, + SXE2_MEDIA_DA, +}; + +enum sxe2_support_pause_frame { + SCGEB_EN_TX_LINK_PAUSE, + SCGEB_EN_RX_LINK_PAUSE, + SCGEB_EN_TX_RX_LINK_PAUSE, + SCGEB_DIS_EN_LINK_PAUSE, +}; + +enum sxe2_an_status { + SXE2_AN_ENABLE = 0, + SXE2_AN_TRANSMIT_DISABLE = 1, + SXE2_AN_ABILITY_DETECT = 2, + SXE2_AN_ACKNOWLEDGE_DETECT = 3, + SXE2_AN_COMPLETE_ACKNOWLEDGE = 4, + SXE2_AN_NEXT_PAGE_WAIT = 5, + SXE2_AN_LINK_STATUS_CHECK = 6, + SXE2_AN_PARALLET_DETECT_FAULT = 7, + SXE2_AN_GOOD_CHECK = 8, + SXE2_AN_GOOD = 9, +}; + +struct sxe2_pause_publicity_ability { + u8 bit_pause; + u8 bit_asym; + u8 resv[2]; +}; + +struct sxe2_local_suppet_advertis_an_en { + u8 suppert_an; + u8 advertis_an; + u8 resv[2]; +}; + +struct sxe2_peer_suppet_an_en { + u8 suppert_an; + u8 resv[3]; +}; + +enum sxe2_current_media_type { + CURRENT_MEDIA_UNKNOWN = 0, + CURRENT_MEDIA_FIBER, + CURRENT_MEDIA_BASET, + CURRENT_MEDIA_BACKPLANE, + CURRENT_MEDIA_DA, +}; + +struct sxe2_current_an_en { + u8 current_an; + u8 resv[3]; +}; + +enum flm_fec_mode { + FLM_FEC_NONE = 0, + FLM_FEC_BSFEC = 1, + FLM_FEC_528 = 2, + FLM_FEC_544 = 3, + FLM_FEC_AUTO = 15, + FLM_FEC_MAX +}; + +enum flm_link_status { + FLM_PORT_DOWN = 0, + FLM_PORT_UP = 1, + FLM_PORT_MAX = 2 +}; + +struct fec_ability_supported { + u32 fec_br : 1; + u32 fec_528 : 1; + u32 fec_544 : 1; + u32 rec : 29; +}; + +struct spec_entry { + u8 spec_id; + u8 reserved[3]; + enum flm_link_speed speed; + s8 spec_name[16]; +}; + +struct optical_module { + char vendor[SXE2_FLM_VENDOR_LEN]; + char vendor_pn[SXE2_FLM_VENDOR_PN_LEN]; + u8 module_type; + u8 current_connection; +}; + +struct optical_warning_info { + u8 vendor[SXE2_HOST_FLM_VENDOR_LEN]; + u8 vendor_pn[SXE2_HOST_FLM_VENDOR_PN_LEN]; + bool is_warning; +}; + +struct __an_mode { + u32 pause; + u32 speed_ability_10Gkr; + u32 speed_ability_25Gkrcr; + u32 speed_ability_25Gkrcr_s; + u32 speed_ability_100Gcr4; + u32 speed_ability_100Gkr4; + u32 fec_ability_10g; + + u32 fec_en_10g; + u32 fec_bsfec_25g; + u32 fec_rs528_25g; + u8 Consortium_25g_50g_en; +}; + +struct __an_np_mode { + u32 speed_ability_25Gkr; + u32 speed_ability_25Gcr; + u32 speed_ability_50Gkr2; + u32 speed_ability_50Gcr2; + u32 fec_ability_rs528; + u32 fec_ability_bsfec; + u32 fec_en_rs528; + u32 fec_en_bsfec; +}; + +struct __an_orig_speed_fec { + u32 orig_speed; + u32 orig_fec; +}; + +struct sxe2_an_publicity { + + struct __an_mode an_mode; + + struct __an_np_mode an_np_mode; +}; + +struct support_speed_ability_mode { + u32 ability_speed_25Gkr; + u32 ability_speed_25Gcr; + u32 ability_speed_50Gkr2; + u32 ability_speed_50Gcr2; + u32 ability_speed_10Gkr; + u32 ability_speed_25Gkrcr; + u32 ability_speed_25Gkrcr_s; + u32 ability_speed_100Gcr4; + u32 ability_speed_100Gkr4; + u32 ability_speed_100Gsr4; +}; + +struct sxe2_fwc_link_state_resp { + u8 link; + u8 resv[3]; +}; + +struct flm_ethtool_get_link_resp { + u8 specs_list[32]; + u32 sxe2_ana_fsm; + struct optical_module optical_module; + struct configure_fc configed_pause_result; + struct configure_fc partner_pause_result; + struct sxe2_pause_publicity_ability local_pause; + + struct sxe2_local_suppet_advertis_an_en local_an_en; + enum flm_fec_mode local_fec_mode; + struct sxe2_pause_publicity_ability partner_pause; + struct sxe2_peer_suppet_an_en partner_an_en; + enum sxe2_duplex_type support_duplx; + enum sxe2_current_media_type current_media; + struct sxe2_current_an_en current_an_en; + struct fec_ability_supported advertis_fec; + struct fec_ability_supported partner_fec; + struct sxe2_an_publicity an_publicity; +}; + +struct sxe2_msg_ethtool_info { + struct flm_ethtool_get_link_resp cfg; + struct support_speed_ability_mode ability; + u8 usr_link_speed; +}; + +struct sxe2_fwc_udp_tunnel_ref_add_req { + u8 type; + __le16 port; +}; + +struct sxe2_fwc_udp_tunnel_ref_delete_req { + u8 type; + u8 clear; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_req { + u8 type; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_resp { + u8 type; + u8 enable; + u8 dst; + u8 src; + __le16 port; + u8 used; + u8 rsvd; +}; + +struct sxe2_fw_ncd_core_num_config_req { + u8 core_num; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_core_pri_queue { + u8 core_id; + u8 pri; + __le16 queue_id; +}; + +struct sxe2_fw_ncd_switch_tag_req { + u8 loc; + u8 len; + u8 en; + u8 mac_id; +}; + +struct sxe2_fw_ncd_switch_tag_resp { + u8 loc; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_req { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_resp { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdf_req { + __le16 eth_type; + __le16 mask; + u8 en; + u8 resv; +}; + +struct sxe2_fw_ncd_sdf_resp { + __le16 eth_type; + __le16 mask; +}; + +struct sxe2_fw_ncd_sdn_udp_req_resp { + __le16 start_port; + __le16 end_port; + u8 ph_len; + u8 udp_grp_id; + u8 used_count; + u8 resv; +}; + +struct sxe2_fw_ncd_sdn_req_resp { + union { + __le32 ipv4_addr; + __le32 ipv6_addr[4]; + } dest_addr; +__le16 used_count; + u8 is_ipv4; + u8 udp_grp_id; + u8 ip_id; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_req { + __le16 udp_port; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_resp { + u8 find; + u8 udp_grp_id; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_udp_req { + __le32 port_bmp; + __le16 start_port; + u8 udp_grp_id; + u8 resv; +}; + +struct sxe2_fw_ncd_app_port_req { + u8 idx; + u8 is_tcp; + u8 sport_en; + u8 dport_en; +}; + +struct sxe2_fw_ncd_pkt_pri_req { + u8 idx; + u8 pri; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_udf_capa_get_resp { + __le32 sdn_ip_addr; + __le32 sdn_udp_ipsec_bm; + __le16 sdn_udp_ipsec_sp; + __le16 sdn_udp_start_port; + __le16 sdn_udp_end_port; + __le16 sdf_eth_type; + __le16 sdf_mask; + u8 sdf_en; + u8 switch_tag_en; + u8 switch_tag_len; + u8 switch_tag_loc; + u8 txlen_adj_len[4]; + u8 sdn_udp_ph_len; + u8 sdn_ip_type; + u8 sdn_ip_udp_grp_id; + u8 spec_proto_port_type; + u8 spec_ptype_pri_level; + u8 resv; +}; + +#define BFD_INTQ_CMD_SRC_IRQ (0) +#define BFD_INTQ_CMD_SRC_POLLING (1) +#define BFD_INTQ_CMD_BUF_LEN (64) + +struct sxe2_fwc_bfd_config_set_req { + __le16 max_sess; + u8 scan_interval; + u8 bfd_en; +}; + +struct sxe2_fwc_bfd_intq_info_get_req { + u8 src; + u8 sess_num; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_intq_info_get_resp { + u8 buf_empty; + u8 valid_len; + u8 resv[2]; + struct { + __le16 sess_id; + u8 reason; + u8 sess_state; + } data[BFD_INTQ_CMD_BUF_LEN]; +}; + +struct sxe2_fwc_bfd_kt_entry { + u8 is_add; + u8 prof_id; + u8 rsv[2]; + u8 inputset[SXE2_BFD_FV_CNT_MAX * 2]; + __le16 vsi; + __le16 sess_id; + __le32 fdid; +}; + +struct sxe2_fwc_bfd_kt_entry_resp { + __le16 kt_index; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_sess_cfg_set_req { + __le16 sess_id; + u8 valid; + u8 sess_mode; + __le16 kt_index; + u8 mac_id; + u8 tc_id; + u8 tx_en; + u8 rx_en; + __le16 tx_interval; + __le32 rx_interval; + __le16 ppt_id; + u8 state; + u8 rsv; +}; + +struct sxe2_fwc_bfd_sess_state_get_req { + __le16 sess_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_bfd_sess_state_get_resp { + u8 state; + u8 aging_state; + __le16 prof_id; + __le16 rx_cnt; + __le16 tx_cnt; +}; + +struct sxe2_fwc_bfd_capability_get_resp { + __le16 max_sess; + __le16 max_package_profile; + __le16 min_scan_interval; + __le16 bfde_vsi; + __le16 max_pkt_buf; + __le16 max_edit_cmd; + __le16 max_csum_desc; + u8 rsv[2]; +}; + +#define SXE2_BYTES_PER_PKT_BUF_ENT (32) +#define SXE2_PKT_BUF_MAX_PER_PROF (16) +#define SXE2_PKT_BUF_SIZE_MAX \ + (SXE2_BYTES_PER_PKT_BUF_ENT * SXE2_PKT_BUF_MAX_PER_PROF) + +#define SXE2_BYTES_PER_EDIT_CMD (8) +#define SXE2_EDIT_CMD_PER_ENT (4) +#define SXE2_EDIT_CMD_MAX (16) +#define SXE2_EDIT_CMD_ENT_MAX (SXE2_EDIT_CMD_MAX / SXE2_EDIT_CMD_PER_ENT) +#define SXE2_BYTES_PER_EDIT_CMD_ENT (SXE2_BYTES_PER_EDIT_CMD * SXE2_EDIT_CMD_PER_ENT) +#define SXE2_EDIT_CMD_SIZE_MAX (SXE2_EDIT_CMD_MAX * SXE2_BYTES_PER_EDIT_CMD) + +struct sxe2_bfd_edit_cmd_req { + struct { + u8 code; + u8 size; + __le16 loc; + } instr[SXE2_EDIT_CMD_PER_ENT]; +}; + +struct sxe2_fwc_bfd_pack_prof_set_req { + __le16 prof_id; + u8 rsv[2]; + + struct { + __le16 pkt_addr; + u8 pkt_len; + u8 pkt_lbo; + u8 data[SXE2_PKT_BUF_SIZE_MAX]; + } pkt_buffer; + + struct { + __le16 edit_cmd_addr; + u8 edit_cmd_len; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_MAX]; + } edit_cmd; + + __le16 csum_desc_addr; + u8 csum_desc_len; + u8 rsv0; + struct { + __le16 start_addr; + __le16 csum_loc; + __le16 csum_len; + u8 result_negate; + u8 rsv1; + __le32 chk_data; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_PER_ENT]; + } csum_desc[2]; +}; + +struct sxe2_fwc_bfd_meta_key_len_req { + __le16 prof_id; + __le16 key_len; +}; + +struct sxe2_tx_fault_count_mib { + u64 tx_fault_count; +}; + +struct single_link_result { + u32 port_num; + u32 is_link_up; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct configure_fixed { + u32 port; + u32 speed; + u32 fec_mode; + u32 port_mode; + u32 an_en; + u32 lt_en; + struct configure_fc fc_mode; + s32 pcsRet; +}; + +struct all_link_status { + u32 dut_status; + u32 sb_status; +}; + +struct sxe2_fwc_txpa_dfx { + u32 txpa_in_all; + u32 txpa_out_all; + u32 txpa_in_drop; + u32 txpa_out_drop; + u32 txpa_in_err; + u32 txpa_out_err; +}; + +struct sxe2_fwc_txfb_dfx { + u32 txfb_in_all; + u32 txfb_in_drop; + u32 txfb_out_all; + u32 txfb_out_drop; + u32 txfb_internal_drop; +}; + +struct sxe2_fwc_rxpa_dfx { + u32 rxpa_in_all; + u32 rxpa_out_all; + u32 rxpa_in_drop; + u32 rxpa_out_drop; + u32 rxpa_in_err; + u32 rxpa_out_err; +}; + +struct sxe2_fwc_rxfb_dfx { + u32 rxfb_tx_in_all; + u32 rxfb_rx_in_all; + u32 rxfb_tx_in_drop; + u32 rxfb_rx_in_drop; + u32 rxfb_out_all; + u32 rxfb_out_drop; + u32 rxfb_internal_drop; +}; + +struct sxe2_fwc_switch_dfx { + u32 tx_all; + u32 tx_drop; + u32 rx_all; + u32 rx_drop; +}; + +struct sxe2_fwc_rxft_dfx { + u32 tx_in_all; + u32 tx_in_drop; + u32 tx_out_all; + u32 tx_out_drop; + u32 rx_in_all; + u32 rx_in_drop; + u32 rx_out_all; + u32 rx_out_drop; + u32 lp_in_all; + u32 lp_in_drop; + u32 lp_out_all; + u32 lp_out_drop; +}; + +struct sxe2_fwc_ppe_dfx { + struct sxe2_fwc_txpa_dfx txpa[4]; + struct sxe2_fwc_txfb_dfx txfb; + struct sxe2_fwc_rxpa_dfx rxpa[4]; + struct sxe2_fwc_rxfb_dfx rxfb; + struct sxe2_fwc_switch_dfx sw; + struct sxe2_fwc_rxft_dfx rxft; +}; + +#define SXE2_ACL_LUT_ENTRY_WIDTH (5) +#define SXE2_ACL_ACTION_TCAM_CNT (16) +#define SXE2_ACL_ACTION_MEM_CNT (20) +#define SXE2_ACL_ACTION_NUM_PER_ENTRY (2) +#define SXE2_ACL_ACTION_TCAM_DEPTH (512) +#define SXE2_ACL_ACTION_MEM_DEPTH (512) + +struct sxe2_fwc_acl_lut_alloc_req { + __le16 width; + __le16 depth; + u8 act_pairs_per_entry; + + u8 concurr; + u8 num_dependent_alloc_ids; + __le16 alloc_ids[SXE2_ACL_ACTION_TCAM_CNT - 1]; +}; + +struct sxe2_fwc_acl_lut_alloc_resp { + + __le16 alloc_id; + + __le16 first_entry; + __le16 last_entry; + + u8 first_tcam; + u8 last_tcam; + + u8 act_mem[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_lut_dealloc_req { + __le16 alloc_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_sel_base_req { + __le16 prof_id; + + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_entry_data { + struct { + u8 val[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; + u8 reserved[2]; + } entry_key, entry_key_invert; +}; + +struct sxe2_fwc_acl_lut_entry_set_req { + u8 tcam_idx; + __le16 entry_idx; + u8 rsv; + + struct sxe2_acl_entry_data data; +}; + +struct sxe2_acl_act_entry_data { + u8 prio; + u8 mdid; + __le16 value; +}; + +struct sxe2_fwc_acl_act_entry_set_req { + u8 act_mem_idx; + __le16 act_entry_idx; + u8 rsv; + + struct sxe2_acl_act_entry_data data[SXE2_ACL_ACTION_NUM_PER_ENTRY]; +}; + +struct sxe2_fwc_acl_scen_alloc_req { + struct { + u8 tcam_select[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; +#define SXE2_ACL_ALLOC_SCEN_START_CMP BIT(0) +#define SXE2_ACL_ALLOC_SCEN_START_SET BIT(1) + u8 start_cmp_set; + u8 rsv; + } tcam_cfg[SXE2_ACL_ACTION_TCAM_CNT]; + +#define SXE2_ACL_ACT_MEM_EN BIT(4) + u8 act_mem_cfg[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_scen_alloc_resp { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_scen_dealloc_req { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_querey_req { + __le16 prof_id; +}; + +struct sxe2_fwc_acl_prof_querey_resp { + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_hit_info { + + __le32 profile_id : 7; + __le32 rsv0 : 25; + + __le32 fv1 : 16; + __le32 fv0 : 16; + + __le32 fv3 : 16; + __le32 fv2 : 16; + + __le32 fv5 : 16; + __le32 fv4 : 16; + + __le32 fv7 : 16; + __le32 fv6 : 16; + + __le32 fv9 : 16; + __le32 fv8 : 16; + + __le32 fv11 : 16; + __le32 fv10 : 16; + + __le32 fv13 : 16; + __le32 fv12 : 16; + + __le32 fv15 : 16; + __le32 fv14 : 16; + + __le32 fv17 : 16; + __le32 fv16 : 16; + + __le32 fv19 : 16; + __le32 fv18 : 16; + + __le32 fv21 : 16; + __le32 fv20 : 16; + + __le32 fv23 : 16; + __le32 fv22 : 16; + + __le32 fv25 : 16; + __le32 fv24 : 16; + + __le32 fv27 : 16; + __le32 fv26 : 16; + + __le32 fv29 : 16; + __le32 fv28 : 16; + + __le32 fv31 : 16; + __le32 fv30 : 16; +}; + +struct sxe2_acl_dfx_info { + + __le32 og_inbuf_hdr_cnt; + __le32 og_inbuf_info_cnt; + __le32 og_proc_hdr_cnt; + __le32 og_proc_info_cnt; + __le32 og_to_engine_cnt; + __le32 og_in_rg_cnt; + __le32 og_out_rg_cnt; + __le32 sel_base_cnt; + __le32 key_gen_cnt; + __le32 key_gen_to_lkt_cnt; + __le32 act_mem_cnt; + __le32 osc_act_cnt; + __le32 osc_pkt_cnt; + __le32 acl_rxft_cnt; + __le32 acl_recv_drop_cnt; + __le32 acl_action_drop_cnt; + __le32 acl_vsi_disable_drop_cnt; + __le32 prfl_tcam_hit_cnt; + __le32 prfl_tcam_miss_cnt; + __le32 prfl_tcam_bypss_cnt; + __le32 act_tcam_hit_cnt[16]; + __le32 act_tcam_miss_cnt[16]; + + __le16 act_idx_first[16]; + __le16 act_idx_last[16]; + __le32 act_key_first_low[16]; + __le32 act_key_first_high[16]; + __le32 act_key_last_low[16]; + __le32 act_key_last_high[16]; + + __le64 key_first; + __le64 key_last; + + u8 first_prfl_id; + u8 last_prfl_id; + u8 first_scen_id; + u8 last_scen_id; + __le16 first_prfl_tcam_idx; + __le16 last_prfl_tcam_idx; + + __le16 first_cascade; + __le16 last_cascade; + __le16 first_stack; + __le16 last_stack; + __le16 first_tcam_en; + __le16 last_tcam_en; +}; + +struct sxe2_acl_trace_recorder { + u8 trace_status0; + u8 trace_status2; + u8 rsv[2]; + struct sxe2_acl_hit_info hit_info; +}; + +struct sxe2_vf_queue_info { + __le16 rxq_base; + __le16 rxq_cnt; + __le16 txq_base; + __le16 txq_cnt; +}; + +struct sxe2_fwc_vf_queue_info { + u8 pf_id; + u16 vf_cnt; + u8 rsv[1]; + struct sxe2_vf_queue_info queue_info[]; +}; + +#pragma pack() + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ddp_common.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ddp_common.h new file mode 100644 index 0000000000000000000000000000000000000000..1dae61f83a801c7a5be3062927650444eb00d5f8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ddp_common.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ddp_common.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DDP_COMMON_H_ +#define _SXE2_DDP_COMMON_H_ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_type.h" +#include "sxe2_common.h" +#endif + +#define SXE2_DDP_DRV_VER_MAJ 1 +#define SXE2_DDP_DRV_VER_MNR 0 + +#define SXE2_DDP_FW_VER_MAJ 1 +#define SXE2_DDP_FW_VER_MNR 0 + +enum sxe2_ddp_error { + + SXE2_DDP_PKG_SUCCESS = 0, + + SXE2_DDP_PKG_ALREADY_LOADED = 1, + + SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED = 2, + + SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = 3, + + SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED = 4, + + SXE2_DDP_PKG_FW_MISMATCH = 5, + + SXE2_DDP_PKG_INVALID_FILE = 6, + + SXE2_DDP_PKG_FILE_VERSION_TOO_HIGH = 7, + + SXE2_DDP_PKG_FILE_VERSION_TOO_LOW = 8, + + SXE2_DDP_PKG_NO_SEC_MANIFEST = 9, + + SXE2_DDP_PKG_MANIFEST_INVALID = 10, + + SXE2_DDP_PKG_BUFFER_INVALID = 11, + + SXE2_DDP_PKG_BUSY = 12, + + SXE2_DDP_PKG_ERR = 13, +}; + +enum sxe2_ddp_state { + SXE2_DDP_STATE_UNINIT, + SXE2_DDP_STATE_PROC, + SXE2_DDP_STATE_FINISH, + SXE2_DDP_STATE_ERROR, + SXE2_DDP_STATE_INVALID = 0xFFFFFFFF, +}; + +struct sxe2_ddp_pkg_ver { + __le16 major; + __le16 minor; +}; + +union sxe2_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct sxe2_device_id_entry { + union sxe2_device_id device; + union sxe2_device_id sub_device; +}; + +struct sxe2_pkg_hdr { + struct sxe2_ddp_pkg_ver pkg_drv_ver; + struct sxe2_ddp_pkg_ver pkg_fw_ver; + struct sxe2_device_id_entry dev_vend_id; + __le32 seg_count; + + __le32 seg_offset[]; +}; + +#define SEGMENT_SIGN_TYPE_NONE 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 +#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 + +struct sxe2_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_SXE2_DDP 0x00000010 +#define SEGMENT_TYPE_SXE2_RUN_TIME_CFG 0x00000020 + __le32 seg_id; + __le32 seg_type; + __le32 seg_size; +}; + +struct sxe2_buf { +#define SXE2_PKG_BUF_SIZE 4096 + u8 buf[SXE2_PKG_BUF_SIZE]; +}; + +struct sxe2_buf_table { + __le32 buf_count; + struct sxe2_buf buf_array[]; +}; + +struct sxe2_seg { + struct sxe2_generic_seg_hdr hdr; + u8 rsvd[8]; + struct sxe2_buf_table buf_table; +}; + +#define SXE2_MIN_S_OFF 12 +#define SXE2_MAX_S_OFF 4095 +#define SXE2_MIN_S_SZ 1 +#define SXE2_MAX_S_SZ 4084 +#define SXE2_MIN_CFG_SZ (sizeof(struct sxe2_pkg_hdr) + sizeof(struct sxe2_seg)) + +struct sxe2_section_entry { + __le16 type; + __le16 unit_size; + + __le16 offset; + __le16 size; +}; + +#define SXE2_MIN_SECT_COUNT 1 +#define SXE2_MAX_SECT_COUNT 512 +#define SXE2_MIN_SECT_DATA_END 12 +#define SXE2_MAX_SECT_DATA_END 4096 + +struct sxe2_buf_hdr { + __le16 section_count; + + __le16 data_end; + __le32 buf_idx; + + __le32 crc; + struct sxe2_section_entry section_entry[]; +}; + +enum sxe2_segment_type { + SXE2_SGM_BLK_DP = 0, + SXE2_SGM_BLK_MAX +}; + +enum sxe2_section_type { + SXE2_SECT_SWPTG_TYPE = 0, + SXE2_SECT_SWVSIG_TYPE, + SXE2_SECT_SWTCAM_TYPE, + SXE2_SECT_SWEXTRACTOR_TYPE, + SXE2_SECT_SWMAP_TYPE, + SXE2_SECT_SWRCP_TYPE, + SXE2_SECT_SWPROFILERCPBITMAP_TYPE, + SXE2_SECT_RSSPTG_TYPE, + SXE2_SECT_RSSVSIG_TYPE, + SXE2_SECT_RSSTCAM_TYPE, + SXE2_SECT_RSSEXTRACTOR_TYPE, + SXE2_SECT_RSSMAP_TYPE, + SXE2_SECT_RSSIPSET_TYPE, + SXE2_SECT_FNAVPTG_TYPE, + SXE2_SECT_FNAVMASK_TYPE, + SXE2_SECT_ACLPTG_TYPE = 16, + SXE2_SECT_TYPE_MAX, +}; +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_drv_type.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_drv_type.h new file mode 100644 index 0000000000000000000000000000000000000000..3131b33f57845130e0f08b5ef0d1b1e55ed155ba --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_drv_type.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_type.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_TYPEDEF_H__ +#define __SXE2_DRV_TYPEDEF_H__ + +#include "ps3_types.h" + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#endif + +typedef U8 u8; +typedef U16 u16; +typedef U32 u32; +typedef U64 u64; + +#ifndef SXE2_SUPPORT_IPXE +typedef S8 s8; +#endif + +typedef S16 s16; +typedef S32 s32; +typedef S64 s64; + +typedef U16 __le16; +typedef U32 __le32; +typedef U64 __le64; + +#ifndef true +#define true (1) +#endif + +#ifndef false +#define false (0) +#endif + +#ifndef bool +#define bool Ps3Bool_t +#endif + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_host_regs.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_host_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..165167d4f384dc89bb25b663de9f1b8a7664a0fe --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_host_regs.h @@ -0,0 +1,717 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_host_regs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HOST_REGS_H__ +#define __SXE2_HOST_REGS_H__ + +#define SXE2_BITS_MASK(m, s) ((m ## UL) << (s)) + +#define SXE2_RXQ_CTXT(_i, _QRX) (0x0050000 + ((_i) * 4 + (_QRX) * 0x20)) +#define SXE2_RXQ_HEAD(_QRX) (0x0060000 + ((_QRX) * 4)) +#define SXE2_RXQ_TAIL(_QRX) (0x0070000 + ((_QRX) * 4)) +#define SXE2_RXQ_CTRL(_QRX) (0x006d000 + ((_QRX) * 4)) +#define SXE2_RXQ_WB(_QRX) (0x006B000 + ((_QRX) * 4)) + +#define SXE2_RXQ_CTRL_STATUS_ACTIVE 0x00000004 +#define SXE2_RXQ_CTRL_ENABLED 0x00000001 +#define SXE2_RXQ_CTRL_CDE_ENABLE BIT(3) + +#define SXE2_PCIEPROC_BASE 0x002d6000 +#define SXE2_PF_INT_BASE 0x00260000 +#define SXE2_PF_INT_ALLOC (SXE2_PF_INT_BASE + 0x0000) +#define SXE2_PF_INT_ALLOC_FIRST 0x7FF +#define SXE2_PF_INT_ALLOC_LAST_S 12 +#define SXE2_PF_INT_ALLOC_LAST \ + (0x7FF << SXE2_PF_INT_ALLOC_LAST_S) +#define SXE2_PF_INT_ALLOC_VALID BIT(31) + +#define SXE2_PF_INT_OICR (SXE2_PF_INT_BASE + 0x0040) +#define SXE2_PF_INT_OICR_PCIE_TIMEOUT BIT(0) +#define SXE2_PF_INT_OICR_UR BIT(1) +#define SXE2_PF_INT_OICR_CA BIT(2) +#define SXE2_PF_INT_OICR_VFLR BIT(3) +#define SXE2_PF_INT_OICR_VFR_DONE BIT(4) +#define SXE2_PF_INT_OICR_LAN_TX_ERR BIT(5) +#define SXE2_PF_INT_OICR_BFDE BIT(6) +#define SXE2_PF_INT_OICR_LAN_RX_ERR BIT(7) +#define SXE2_PF_INT_OICR_ECC_ERR BIT(8) +#define SXE2_PF_INT_OICR_GPIO BIT(9) +#define SXE2_PF_INT_OICR_TSYN_TX BIT(11) +#define SXE2_PF_INT_OICR_TSYN_EVENT BIT(12) +#define SXE2_PF_INT_OICR_TSYN_TGT BIT(13) +#define SXE2_PF_INT_OICR_EXHAUST BIT(14) +#define SXE2_PF_INT_OICR_FW BIT(15) +#define SXE2_PF_INT_OICR_SWINT BIT(16) +#define SXE2_PF_INT_OICR_LINKSEC_CHG BIT(17) +#define SXE2_PF_INT_OICR_INT_CFG_ADDR_ERR BIT(18) +#define SXE2_PF_INT_OICR_INT_CFG_DATA_ERR BIT(19) +#define SXE2_PF_INT_OICR_INT_CFG_ADR_UNRANGE BIT(20) +#define SXE2_PF_INT_OICR_INT_RAM_CONFLICT BIT(21) +#define SXE2_PF_INT_OICR_GRST BIT(22) +#define SXE2_PF_INT_OICR_FWQ_INT BIT(29) +#define SXE2_PF_INT_OICR_FWQ_TOOL_INT BIT(30) +#define SXE2_PF_INT_OICR_MBXQ_INT BIT(31) + +#define SXE2_PF_INT_OICR_ENABLE (SXE2_PF_INT_BASE + 0x0020) + +#define SXE2_PF_INT_FW_EVENT (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_FW_ABNORMAL BIT(0) +#define SXE2_PF_INT_RDMA_AEQ_OVERFLOW BIT(1) +#define SXE2_PF_INT_CGMAC_LINK_CHG BIT(18) +#define SXE2_PF_INT_VFLR_DONE BIT(2) + +#define SXE2_PF_INT_OICR_CTL (SXE2_PF_INT_BASE + 0x0060) +#define SXE2_PF_INT_OICR_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_OICR_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_OICR_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_OICR_CTL_ITR_IDX_S) +#define SXE2_PF_INT_OICR_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_FWQ_CTL (SXE2_PF_INT_BASE + 0x00C0) +#define SXE2_PF_INT_FWQ_CTL_MSIX_IDX 0x7FFF +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_FWQ_CTL_ITR_IDX_S) +#define SXE2_PF_INT_FWQ_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_MBX_CTL (SXE2_PF_INT_BASE + 0x00A0) +#define SXE2_PF_INT_MBX_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_MBX_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_MBX_CTL_ITR_IDX (0x3 << SXE2_PF_INT_MBX_CTL_ITR_IDX_S) +#define SXE2_PF_INT_MBX_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_GPIO_ENA (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_GPIO_X_ENA(x) BIT(x) + +#define SXE2_PFG_INT_CTL (SXE2_PF_INT_BASE + 0x0120) +#define SXE2_PFG_INT_CTL_ITR_GRAN 0x7 +#define SXE2_PFG_INT_CTL_ITR_GRAN_0 (2) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN BIT(4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_0 (4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_1 (8) + +#define SXE2_VFG_RAM_INIT_DONE \ + (SXE2_PF_INT_BASE + 0x0128) +#define SXE2_VFG_RAM_INIT_DONE_0 BIT(0) +#define SXE2_VFG_RAM_INIT_DONE_1 BIT(1) +#define SXE2_VFG_RAM_INIT_DONE_2 BIT(2) + +#define SXE2_LINK_REG_GET_10G_VALUE 4 +#define SXE2_LINK_REG_GET_25G_VALUE 1 +#define SXE2_LINK_REG_GET_50G_VALUE 2 +#define SXE2_LINK_REG_GET_100G_VALUE 3 + +#define SXE2_PORT0_CNT 0 +#define SXE2_PORT1_CNT 1 +#define SXE2_PORT2_CNT 2 +#define SXE2_PORT3_CNT 3 + +#define SXE2_LINK_STATUS_BASE (0x002ac200) +#define SXE2_LINK_STATUS_PORT0_POS 3 +#define SXE2_LINK_STATUS_PORT1_POS 11 +#define SXE2_LINK_STATUS_PORT2_POS 19 +#define SXE2_LINK_STATUS_PORT3_POS 27 +#define SXE2_LINK_STATUS_MASK 1 + +#define SXE2_LINK_SPEED_BASE (0x002ac200) +#define SXE2_LINK_SPEED_PORT0_POS 0 +#define SXE2_LINK_SPEED_PORT1_POS 8 +#define SXE2_LINK_SPEED_PORT2_POS 16 +#define SXE2_LINK_SPEED_PORT3_POS 24 +#define SXE2_LINK_SPEED_MASK 7 + +#define SXE2_PFVP_INT_ALLOC(vf_idx) (SXE2_PF_INT_BASE + 0x012C + ((vf_idx) * 4)) +#define SXE2_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PFVP_INT_ALLOC_LAST_S 12 +#define SXE2_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCI_PFVP_INT_ALLOC(vf_idx) (SXE2_PCIEPROC_BASE + 0x5800 + ((vf_idx) * 4)) +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_S 12 + +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCIEPROC_INT2FUNC(_INT) (SXE2_PCIEPROC_BASE + 0xe000 + ((_INT) * 4)) +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_S 0 +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_M (0xFF << SXE2_PCIEPROC_INT2FUNC_VF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_S 12 +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_M (0x7 << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_S 16 +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_M BIT(16) + +#define SXE2_VSI_PF(vf_idx) (SXE2_PF_INT_BASE + 0x14000 + ((vf_idx) * 4)) +#define SXE2_VSI_PF_ID_S 0 +#define SXE2_VSI_PF_ID_M (0x7 << SXE2_VSI_PF_ID_S) +#define SXE2_VSI_PF_EN_M BIT(3) + +#define SXE2_MBX_CTL(_VSI) (0x0026692C + ((_VSI) * 4)) +#define SXE2_MBX_CTL_MSIX_INDX_S 0 +#define SXE2_MBX_CTL_MSIX_INDX_M (0x7FF << SXE2_MBX_CTL_MSIX_INDX_S) +#define SXE2_MBX_CTL_CAUSE_ENA_M BIT(30) + +#define SXE2_PF_INT_TQCTL(q_idx) (SXE2_PF_INT_BASE + 0x092C + 4 * (q_idx)) +#define SXE2_PF_INT_TQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_TQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_TQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_TQCTL_ITR_IDX_S) +#define SXE2_PF_INT_TQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RQCTL(q_idx) (SXE2_PF_INT_BASE + 0x292C + 4 * (q_idx)) +#define SXE2_PF_INT_RQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_RQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_RQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_RQCTL_ITR_IDX_S) +#define SXE2_PF_INT_RQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RATE(irq_idx) (SXE2_PF_INT_BASE + 0x7530 + 4 * (irq_idx)) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL (0x3F) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX \ + (0x3F) +#define SXE2_PF_INT_RATE_INTRL_ENABLE (BIT(6)) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT (7) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE \ + (0x3F << SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT) + +#define SXE2_VF_INT_ITR(itr_idx, irq_idx) \ + (SXE2_PF_INT_BASE + 0xB530 + 0x2000 * (itr_idx) + 4 * (irq_idx)) +#define SXE2_VF_INT_ITR_INTERVAL 0xFFF + +#define SXE2_VF_DYN_CTL(irq_idx) (SXE2_PF_INT_BASE + 0x9530 + 4 * (irq_idx)) +#define SXE2_VF_DYN_CTL_INTENABLE BIT(0) +#define SXE2_VF_DYN_CTL_CLEARPBA BIT(1) +#define SXE2_VF_DYN_CTL_SWINT_TRIG BIT(2) +#define SXE2_VF_DYN_CTL_ITR_IDX_S \ + 3 +#define SXE2_VF_DYN_CTL_ITR_IDX_M 0x3 +#define SXE2_VF_DYN_CTL_INTERVAL_S 5 +#define SXE2_VF_DYN_CTL_INTERVAL_M 0xFFF +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_ENABLE BIT(24) +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_S 25 +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_M 0x3 + +#define SXE2_VF_DYN_CTL_INTENABLE_MSK \ + BIT(31) + +#define SXE2_BAR4_MSIX_BASE 0 +#define SXE2_BAR4_MSIX_CTL(_idx) (SXE2_BAR4_MSIX_BASE + 0xC + ((_idx) * 0x10)) +#define SXE2_BAR4_MSIX_ENABLE 0 +#define SXE2_BAR4_MSIX_DISABLE 1 + +#define SXE2_TXQ_LEGACY_DBLL(_DBQM) (0x1000 + ((_DBQM) * 4)) + +#define SXE2_TXQ_CONTEXT0(_pf_idx) (0x10040 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT1(_pf_idx) (0x10044 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT2(_pf_idx) (0x10048 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT3(_pf_idx) (0x1004C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT4(_pf_idx) (0x10050 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7(_pf_idx) (0x1005C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7_HEAD_S 0 +#define SXE2_TXQ_CONTEXT7_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_HEAD_S) +#define SXE2_TXQ_CONTEXT7_READ_HEAD_S 16 +#define SXE2_TXQ_CONTEXT7_READ_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_READ_HEAD_S) + +#define SXE2_TXQ_CTRL(_pf_idx) (0x10064 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CTXT_CTRL(_pf_idx) (0x100C8 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_DIS_CNT(_pf_idx) (0x100D0 + ((_pf_idx) * 0x100)) + +#define SXE2_TXQ_CTXT_CTRL_USED_MASK 0x00000800 +#define SXE2_TXQ_CTRL_SW_EN_M BIT(0) +#define SXE2_TXQ_CTRL_HW_EN_M BIT(1) + +#define SXE2_TXQ_CTXT2_PROT_IDX_S 0 +#define SXE2_TXQ_CTXT2_PROT_IDX_M SXE2_BITS_MASK(0x7, 0) +#define SXE2_TXQ_CTXT2_CGD_IDX_S 4 +#define SXE2_TXQ_CTXT2_CGD_IDX_M SXE2_BITS_MASK(0x1F, 4) +#define SXE2_TXQ_CTXT2_PF_IDX_S 9 +#define SXE2_TXQ_CTXT2_PF_IDX_M SXE2_BITS_MASK(0x7, 9) +#define SXE2_TXQ_CTXT2_VMVF_IDX_S 12 +#define SXE2_TXQ_CTXT2_VMVF_IDX_M SXE2_BITS_MASK(0x3FF, 12) +#define SXE2_TXQ_CTXT2_VMVF_TYPE_S 23 +#define SXE2_TXQ_CTXT2_VMVF_TYPE_M SXE2_BITS_MASK(0x3, 23) +#define SXE2_TXQ_CTXT2_TSYN_ENA_S 25 +#define SXE2_TXQ_CTXT2_TSYN_ENA_M BIT(25) +#define SXE2_TXQ_CTXT2_ALT_VLAN_S 26 +#define SXE2_TXQ_CTXT2_ALT_VLAN_M BIT(26) +#define SXE2_TXQ_CTXT2_WB_MODE_S 27 +#define SXE2_TXQ_CTXT2_WB_MODE_M BIT(27) +#define SXE2_TXQ_CTXT2_ITR_WB_S 28 +#define SXE2_TXQ_CTXT2_ITR_WB_M BIT(28) +#define SXE2_TXQ_CTXT2_LEGACY_EN_S 29 +#define SXE2_TXQ_CTXT2_LEGACY_EN_M BIT(29) +#define SXE2_TXQ_CTXT2_SSO_EN_S 30 +#define SXE2_TXQ_CTXT2_SSO_EN_M BIT(30) + +#define SXE2_TXQ_CTXT3_SRC_VSI_S 0 +#define SXE2_TXQ_CTXT3_SRC_VSI_M SXE2_BITS_MASK(0x3FF, 0) +#define SXE2_TXQ_CTXT3_CPU_ID_S 12 +#define SXE2_TXQ_CTXT3_CPU_ID_M SXE2_BITS_MASK(0xFF, 12) +#define SXE2_TXQ_CTXT3_TPH_RDDESC_S 20 +#define SXE2_TXQ_CTXT3_TPH_RDDESC_M BIT(20) +#define SXE2_TXQ_CTXT3_TPH_RDDATA_S 21 +#define SXE2_TXQ_CTXT3_TPH_RDDATA_M BIT(21) +#define SXE2_TXQ_CTXT3_TPH_WRDESC_S 22 +#define SXE2_TXQ_CTXT3_TPH_WRDESC_M BIT(22) + +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_S 0 +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_TXQ_CTXT3_RDDESC_RO_S 13 +#define SXE2_TXQ_CTXT3_RDDESC_RO_M BIT(13) +#define SXE2_TXQ_CTXT3_WRDESC_RO_S 14 +#define SXE2_TXQ_CTXT3_WRDESC_RO_M BIT(14) +#define SXE2_TXQ_CTXT3_RDDATA_RO_S 15 +#define SXE2_TXQ_CTXT3_RDDATA_RO_M BIT(15) +#define SXE2_TXQ_CTXT3_QLEN_S 16 +#define SXE2_TXQ_CTXT3_QLEN_M SXE2_BITS_MASK(0x1FFF, 16) + +#define SXE2_RX_BUF_CHAINED_MAX 10 +#define SXE2_RX_DESC_BASE_ADDR_UNIT 7 +#define SXE2_RX_HBUF_LEN_UNIT 6 +#define SXE2_RX_DBUF_LEN_UNIT 7 +#define SXE2_RX_DBUF_LEN_MASK (~0x7F) +#define SXE2_RX_HWTAIL_VALUE_MASK (~0x7) + +enum { + SXE2_RX_CTXT0 = 0, + SXE2_RX_CTXT1, + SXE2_RX_CTXT2, + SXE2_RX_CTXT3, + SXE2_RX_CTXT4, + SXE2_RX_CTXT_CNT, +}; + +#define SXE2_RX_CTXT_BASE_L_S 0 +#define SXE2_RX_CTXT_BASE_L_W 32 + +#define SXE2_RX_CTXT_BASE_H_S 0 +#define SXE2_RX_CTXT_BASE_H_W 25 +#define SXE2_RX_CTXT_DEPTH_L_S 25 +#define SXE2_RX_CTXT_DEPTH_L_W 7 + +#define SXE2_RX_CTXT_DEPTH_H_S 0 +#define SXE2_RX_CTXT_DEPTH_H_W 6 + +#define SXE2_RX_CTXT_DBUFF_S 6 +#define SXE2_RX_CTXT_DBUFF_W 7 + +#define SXE2_RX_CTXT_HBUFF_S 13 +#define SXE2_RX_CTXT_HBUFF_W 5 + +#define SXE2_RX_CTXT_HSPLT_TYPE_S 18 +#define SXE2_RX_CTXT_HSPLT_TYPE_W 2 + +#define SXE2_RX_CTXT_DESC_TYPE_S 20 +#define SXE2_RX_CTXT_DESC_TYPE_W 1 + +#define SXE2_RX_CTXT_CRC_S 21 +#define SXE2_RX_CTXT_CRC_W 1 + +#define SXE2_RX_CTXT_L2TAG_FLAG_S 23 +#define SXE2_RX_CTXT_L2TAG_FLAG_W 1 + +#define SXE2_RX_CTXT_HSPLT_0_S 24 +#define SXE2_RX_CTXT_HSPLT_0_W 4 + +#define SXE2_RX_CTXT_HSPLT_1_S 28 +#define SXE2_RX_CTXT_HSPLT_1_W 2 + +#define SXE2_RX_CTXT_INVALN_STP_S 31 +#define SXE2_RX_CTXT_INVALN_STP_W 1 + +#define SXE2_RX_CTXT_LRO_ENABLE_S 0 +#define SXE2_RX_CTXT_LRO_ENABLE_W 1 + +#define SXE2_RX_CTXT_CPUID_S 3 +#define SXE2_RX_CTXT_CPUID_W 8 + +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_S 11 +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_W 14 + +#define SXE2_RX_CTXT_LRO_DESC_MAX_S 25 +#define SXE2_RX_CTXT_LRO_DESC_MAX_W 4 + +#define SXE2_RX_CTXT_RELAX_DATA_S 29 +#define SXE2_RX_CTXT_RELAX_DATA_W 1 + +#define SXE2_RX_CTXT_RELAX_WB_S 30 +#define SXE2_RX_CTXT_RELAX_WB_W 1 + +#define SXE2_RX_CTXT_RELAX_RD_S 31 +#define SXE2_RX_CTXT_RELAX_RD_W 1 + +#define SXE2_RX_CTXT_THPRDESC_ENABLE_S 1 +#define SXE2_RX_CTXT_THPRDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPWDESC_ENABLE_S 2 +#define SXE2_RX_CTXT_THPWDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPRDATA_ENABLE_S 3 +#define SXE2_RX_CTXT_THPRDATA_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPHEAD_ENABLE_S 4 +#define SXE2_RX_CTXT_THPHEAD_ENABLE_W 1 + +#define SXE2_RX_CTXT_LOW_DESC_LINE_S 6 +#define SXE2_RX_CTXT_LOW_DESC_LINE_W 3 + +#define SXE2_RX_CTXT_VF_ID_S 9 +#define SXE2_RX_CTXT_VF_ID_W 8 + +#define SXE2_RX_CTXT_PF_ID_S 17 +#define SXE2_RX_CTXT_PF_ID_W 3 + +#define SXE2_RX_CTXT_VF_ENABLE_S 20 +#define SXE2_RX_CTXT_VF_ENABLE_W 1 + +#define SXE2_RX_CTXT_VSI_ID_S 21 +#define SXE2_RX_CTXT_VSI_ID_W 10 + +#define SXE2_PF_CTRLQ_FW_BASE 0x00312000 +#define SXE2_PF_CTRLQ_FW_ATQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0000) +#define SXE2_PF_CTRLQ_FW_ARQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0080) +#define SXE2_PF_CTRLQ_FW_ATQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0100) +#define SXE2_PF_CTRLQ_FW_ARQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0180) +#define SXE2_PF_CTRLQ_FW_ATQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0200) +#define SXE2_PF_CTRLQ_FW_ARQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0280) +#define SXE2_PF_CTRLQ_FW_ATQH (SXE2_PF_CTRLQ_FW_BASE + 0x0300) +#define SXE2_PF_CTRLQ_FW_ARQH (SXE2_PF_CTRLQ_FW_BASE + 0x0380) +#define SXE2_PF_CTRLQ_FW_ATQT (SXE2_PF_CTRLQ_FW_BASE + 0x0400) +#define SXE2_PF_CTRLQ_FW_ARQT (SXE2_PF_CTRLQ_FW_BASE + 0x0480) + +#define SXE2_PF_CTRLQ_MBX_BASE 0x00316000 +#define SXE2_PF_CTRLQ_MBX_ATQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE100) +#define SXE2_PF_CTRLQ_MBX_ATQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE180) +#define SXE2_PF_CTRLQ_MBX_ATQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE200) +#define SXE2_PF_CTRLQ_MBX_ATQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE280) +#define SXE2_PF_CTRLQ_MBX_ATQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE300) +#define SXE2_PF_CTRLQ_MBX_ARQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE380) +#define SXE2_PF_CTRLQ_MBX_ARQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE400) +#define SXE2_PF_CTRLQ_MBX_ARQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE480) +#define SXE2_PF_CTRLQ_MBX_ARQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE500) +#define SXE2_PF_CTRLQ_MBX_ARQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE580) + +#define SXE2_CMD_REG_LEN_M 0x3FF +#define SXE2_CMD_REG_LEN_VFE_M BIT(28) +#define SXE2_CMD_REG_LEN_OVFL_M BIT(29) +#define SXE2_CMD_REG_LEN_CRIT_M BIT(30) +#define SXE2_CMD_REG_LEN_ENABLE_M BIT(31) + +#define SXE2_CMD_REG_HEAD_M 0x3FF + +#define SXE2_PF_CTRLQ_FW_HW_STS (SXE2_PF_CTRLQ_FW_BASE + 0x0500) +#define SXE2_PF_CTRLQ_FW_ATQ_IDLE_MASK BIT(0) +#define SXE2_PF_CTRLQ_FW_ARQ_IDLE_MASK BIT(1) + +#define SXE2_TOP_CFG_BASE 0x00292000 +#define SXE2_HW_VER (SXE2_TOP_CFG_BASE + 0x48c) +#define SXE2_HW_FPGA_VER_M SXE2_BITS_MASK(0xFFF, 0) + +#define SXE2_FW_VER (SXE2_TOP_CFG_BASE + 0x214) +#define SXE2_FW_VER_BUILD_M SXE2_BITS_MASK(0xFF, 0) +#define SXE2_FW_VER_FIX_M SXE2_BITS_MASK(0xFF, 8) +#define SXE2_FW_VER_SUB_M SXE2_BITS_MASK(0xFF, 16) +#define SXE2_FW_VER_MAIN_M SXE2_BITS_MASK(0xFF, 24) +#define SXE2_FW_VER_FIX_SHIFT (8) +#define SXE2_FW_VER_SUB_SHIFT (16) +#define SXE2_FW_VER_MAIN_SHIFT (24) + +#define SXE2_FW_COMP_VER_ADDR (SXE2_TOP_CFG_BASE + 0x20c) + +#define SXE2_STATUS SXE2_FW_VER + +#define SXE2_FW_STATE (SXE2_TOP_CFG_BASE + 0x210) + +#define SXE2_FW_HEARTBEAT (SXE2_TOP_CFG_BASE + 0x218) + +#define SXE2_FW_MISC (SXE2_TOP_CFG_BASE + 0x21c) +#define SXE2_FW_MISC_MODE_M SXE2_BITS_MASK(0xF, 0) +#define SXE2_FW_MISC_POP_M SXE2_BITS_MASK(0x80000000, 0) + +#define SXE2_TX_OE_BASE 0x00030000 +#define SXE2_RX_OE_BASE 0x00050000 + +#define SXE2_PFP_L2TAGSEN(_i) (SXE2_TX_OE_BASE + 0x00300 + ((_i) * 4)) +#define SXE2_VSI_L2TAGSTXVALID(_i) \ + (SXE2_TX_OE_BASE + 0x01000 + ((_i) * 4)) +#define SXE2_VSI_TIR0(_i) (SXE2_TX_OE_BASE + 0x01C00 + ((_i) * 4)) +#define SXE2_VSI_TIR1(_i) (SXE2_TX_OE_BASE + 0x02800 + ((_i) * 4)) +#define SXE2_VSI_TAR(_i) (SXE2_TX_OE_BASE + 0x04C00 + ((_i) * 4)) +#define SXE2_VSI_TSR(_i) (SXE2_RX_OE_BASE + 0x18000 + ((_i) * 4)) + +#define SXE2_STATS_TX_LAN_CONFIG(_i) (SXE2_TX_OE_BASE + 0x08300 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_PKT_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08340 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_BYTE_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08380 + ((_i) * 4)) + +#define SXE2_STATS_RX_CONFIG(_i) (SXE2_RX_OE_BASE + 0x230B0 + ((_i) * 4)) +#define SXE2_STATS_RX_LAN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230C0 + ((_i) * 8)) +#define SXE2_STATS_RX_LAN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23120 + ((_i) * 8)) +#define SXE2_STATS_RX_FD_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230E0 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23100 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23140 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_OUT_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23160 + ((_i) * 8)) + +#define SXE2_L2TAG_ID_STAG 0 +#define SXE2_L2TAG_ID_OUT_VLAN1 1 +#define SXE2_L2TAG_ID_OUT_VLAN2 2 +#define SXE2_L2TAG_ID_VLAN 3 + +#define SXE2_PFP_L2TAGSEN_ALL_TAG 0xFF +#define SXE2_PFP_L2TAGSEN_DVM BIT(10) + +#define SXE2_VSI_TSR_STRIP_TAG_S 0 +#define SXE2_VSI_TSR_SHOW_TAG_S 4 + +#define SXE2_VSI_TSR_ID_STAG BIT(0) +#define SXE2_VSI_TSR_ID_OUT_VLAN1 BIT(1) +#define SXE2_VSI_TSR_ID_OUT_VLAN2 BIT(2) +#define SXE2_VSI_TSR_ID_VLAN BIT(3) + +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S 0 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID BIT(3) +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S 4 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID BIT(7) +#define SXE2_VSI_L2TAGSTXVALID_TIR0_ID_S 16 +#define SXE2_VSI_L2TAGSTXVALID_TIR0_VALID BIT(19) +#define SXE2_VSI_L2TAGSTXVALID_TIR1_ID_S 20 +#define SXE2_VSI_L2TAGSTXVALID_TIR1_VALID BIT(23) + +#define SXE2_VSI_L2TAGSTXVALID_ID_STAG 0 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 2 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 3 +#define SXE2_VSI_L2TAGSTXVALID_ID_VLAN 4 + +#define SXE2_SWITCH_OG_BASE 0x00140000 +#define SXE2_SWITCH_SWE_BASE 0x00150000 +#define SXE2_SWITCH_RG_BASE 0x00160000 + +#define SXE2_VSI_RX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01074 + ((_i) * 4)) +#define SXE2_VSI_TX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01C74 + ((_i) * 4)) + +#define SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN BIT(1) +#define SXE2_VSI_TX_SW_CTRL_LAN_EN BIT(2) +#define SXE2_VSI_TX_SW_CTRL_MACAS_EN BIT(3) +#define SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TAR_UNTAGGED_SHIFT (16) + +#define SXE2_PCIE_SYS_READY 0x38c +#define SXE2_PCIE_SYS_READY_CORER_ASSERT BIT(0) +#define SXE2_PCIE_SYS_READY_STOP_DROP_DONE BIT(2) +#define SXE2_PCIE_SYS_READY_R5 BIT(3) +#define SXE2_PCIE_SYS_READY_STOP_DROP BIT(16) + +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS 0x78 +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS_TRANS_PENDING BIT(21) + +#define SXE2_TOP_CFG_CORE (SXE2_TOP_CFG_BASE + 0x0630) +#define SXE2_TOP_CFG_CORE_RST_CODE 0x09FBD586 + +#define SXE2_PFGEN_CTRL (0x00336000) +#define SXE2_PFGEN_CTRL_PFSWR BIT(0) + +#define SXE2_VFGEN_CTRL(_vf) (0x00337000 + ((_vf) * 4)) +#define SXE2_VFGEN_CTRL_VFSWR BIT(0) + +#define SXE2_VF_VRC_VFGEN_RSTAT(_vf) (0x00338000 + (_vf) * 4) +#define SXE2_VF_VRC_VFGEN_VFRSTAT (0x3) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VFR (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_COMPLETE (BIT(0)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE (BIT(1)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_MASK \ + (BIT(2)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF \ + (0x300) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_NO_VFR \ + (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_VFR (1) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK \ + (BIT(10)) + +#define SXE2_GLGEN_VFLRSTAT(_reg) (0x0033A000 + ((_reg) * 4)) + +#define SXE2_ACCEPT_RULE_TAGGED_S 0 +#define SXE2_ACCEPT_RULE_UNTAGGED_S 16 + +#define SXE2_VF_RXQ_BASE(_VF) (0x000b0800 + ((_VF) * 4)) +#define SXE2_VF_RXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_RXQ_BASE_FIRST_Q_M (0x7FF << SXE2_VF_RXQ_BASE_FIRST_Q_S) +#define SXE2_VF_RXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_RXQ_BASE_Q_NUM_M (0x7FF << SXE2_VF_RXQ_BASE_Q_NUM_S) + +#define SXE2_VF_RXQ_MAPENA(_VF) (0x000b0400 + ((_VF) * 4)) +#define SXE2_VF_RXQ_MAPENA_M BIT(0) + +#define SXE2_VF_TXQ_BASE(_VF) (0x00040400 + ((_VF) * 4)) +#define SXE2_VF_TXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_TXQ_BASE_FIRST_Q_M (0x3FFF << SXE2_VF_TXQ_BASE_FIRST_Q_S) +#define SXE2_VF_TXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_TXQ_BASE_Q_NUM_M (0xFF << SXE2_VF_TXQ_BASE_Q_NUM_S) + +#define SXE2_VF_TXQ_MAPENA(_VF) (0x00045000 + ((_VF) * 4)) +#define SXE2_VF_TXQ_MAPENA_M BIT(0) + +#define PRI_PTP_BASEADDR 0x2a8000 + +#define GLTSYN (PRI_PTP_BASEADDR + 0x0) +#define GLTSYN_ENA_M BIT(0) + +#define GLTSYN_CMD (PRI_PTP_BASEADDR + 0x4) +#define GLTSYN_CMD_INIT_TIME 0x01 +#define GLTSYN_CMD_INIT_INCVAL 0x02 +#define GLTSYN_CMD_ADJ_TIME 0x04 +#define GLTSYN_CMD_ADJ_TIME_AT_TIME 0x0C +#define GLTSYN_CMD_LATCHING_SHTIME 0x80 + +#define GLTSYN_SYNC (PRI_PTP_BASEADDR + 0x8) +#define GLTSYN_SYNC_PLUS_1NS 0x1 +#define GLTSYN_SYNC_MINUS_1NS 0x2 +#define GLTSYN_SYNC_EXEC 0x3 +#define GLTSYN_SYNC_GEN_PULSE 0x4 + +#define GLTSYN_SEM (PRI_PTP_BASEADDR + 0xC) +#define GLTSYN_SEM_BUSY_M BIT(0) + +#define GLTSYN_STAT (PRI_PTP_BASEADDR + 0x10) +#define GLTSYN_STAT_EVENT0_M BIT(0) +#define GLTSYN_STAT_EVENT1_M BIT(1) +#define GLTSYN_STAT_EVENT2_M BIT(2) + +#define GLTSYN_TIME_SUBNS (PRI_PTP_BASEADDR + 0x20) +#define GLTSYN_TIME_NS (PRI_PTP_BASEADDR + 0x24) +#define GLTSYN_TIME_S_H (PRI_PTP_BASEADDR + 0x28) +#define GLTSYN_TIME_S_L (PRI_PTP_BASEADDR + 0x2C) + +#define GLTSYN_SHTIME_SUBNS (PRI_PTP_BASEADDR + 0x30) +#define GLTSYN_SHTIME_NS (PRI_PTP_BASEADDR + 0x34) +#define GLTSYN_SHTIME_S_H (PRI_PTP_BASEADDR + 0x38) +#define GLTSYN_SHTIME_S_L (PRI_PTP_BASEADDR + 0x3C) + +#define GLTSYN_SHADJ_SUBNS (PRI_PTP_BASEADDR + 0x40) +#define GLTSYN_SHADJ_NS (PRI_PTP_BASEADDR + 0x44) + +#define GLTSYN_INCVAL_NS (PRI_PTP_BASEADDR + 0x50) +#define GLTSYN_INCVAL_SUBNS (PRI_PTP_BASEADDR + 0x54) + +#define GLTSYN_TGT_NS(_i) \ + (PRI_PTP_BASEADDR + 0x60 + ((_i) * 16)) +#define GLTSYN_TGT_S_H(_i) (PRI_PTP_BASEADDR + 0x64 + ((_i) * 16)) +#define GLTSYN_TGT_S_L(_i) (PRI_PTP_BASEADDR + 0x68 + ((_i) * 16)) + +#define GLTSYN_EVENT_NS(_i) \ + (PRI_PTP_BASEADDR + 0xA0 + ((_i) * 16)) + +#define GLTSYN_EVENT_S_H(_i) (PRI_PTP_BASEADDR + 0xA4 + ((_i) * 16)) +#define GLTSYN_EVENT_S_H_MASK (0xFFFF) + +#define GLTSYN_EVENT_S_L(_i) (PRI_PTP_BASEADDR + 0xA8 + ((_i) * 16)) + +#define GLTSYN_AUXOUT(_i) \ + (PRI_PTP_BASEADDR + 0xD0 + ((_i) * 4)) +#define GLTSYN_AUXOUT_OUT_ENA BIT(0) +#define GLTSYN_AUXOUT_OUT_MOD (0x03 << 1) +#define GLTSYN_AUXOUT_OUTLVL BIT(3) +#define GLTSYN_AUXOUT_INT_ENA BIT(4) +#define GLTSYN_AUXOUT_PULSEW (0x1fff << 3) + +#define GLTSYN_CLKO(_i) \ + (PRI_PTP_BASEADDR + 0xE0 + ((_i) * 4)) + +#define GLTSYN_AUXIN(_i) (PRI_PTP_BASEADDR + 0xF4 + ((_i) * 4)) +#define GLTSYN_AUXIN_RISING_EDGE BIT(0) +#define GLTSYN_AUXIN_FALLING_EDGE BIT(1) +#define GLTSYN_AUXIN_ENABLE BIT(4) + +#define CGMAC_CSR_BASE 0x2B4000 + +#define CGMAC_PORT_OFFSET 0x00004000 + +#define PFP_CGM_TX_TSMEM(_port, _i) \ + (CGMAC_CSR_BASE + 0x100 + \ + + CGMAC_PORT_OFFSET * (_port) + ((_i) * 4)) + +#define PFP_CGM_TX_TXHI(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x108 + ((_i) * 8)) +#define PFP_CGM_TX_TXLO(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x10C + ((_i) * 8)) + +#define CGMAC_CSR_MAC0_OFFSET 0x2B4000 +#define CGMAC_CSR_MAC_OFFSET(_i) (CGMAC_CSR_MAC0_OFFSET + ((_i) * 0x4000)) + +#define PFP_CGM_MAC_TX_TSMEM(_phy, _i) \ + (CGMAC_CSR_MAC_OFFSET(_phy) + 0x100 + \ + ((_i) * 4)) + +#define PFP_CGM_MAC_TX_TXHI(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x108 + ((_i) * 8)) +#define PFP_CGM_MAC_TX_TXLO(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x10C + ((_i) * 8)) + +#define SXE2_VF_GLINT_CEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_S 11 +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_GLINT_CEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_GLINT_CEQCTL(_INT) (0x0026492C + ((_INT) * 4)) + +#define SXE2_VF_PFINT_AEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_S 11 +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_VPINT_AEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_VPINT_AEQCTL(_VF) (0x0026052c + ((_VF) * 4)) + +#define SXE2_IPSEC_TX_BASE (0x2A0000) +#define SXE2_IPSEC_RX_BASE (0x2A2000) + +#define SXE2_IPSEC_RX_IPSIDX_ADDR (SXE2_IPSEC_RX_BASE + 0x0084) +#define SXE2_IPSEC_RX_IPSIDX_RST (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_VBI_SHIFT (18) +#define SXE2_IPSEC_RX_IPSIDX_VBI_MASK (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_SHIFT (17) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_MASK (0x00020000) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_SHIFT (4) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_MASK (0x0000fff0) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_SHIFT (2) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_MASK (0x0000000c) + +#define SXE2_IPSEC_RX_IPSIPID_ADDR (SXE2_IPSEC_RX_BASE + 0x0088) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_MASK (0x000000ff) + +#define SXE2_IPSEC_RX_IPSSPI0_ADDR (SXE2_IPSEC_RX_BASE + 0x008c) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_MASK (0xffffffff) + +#define SXE2_IPSEC_RX_IPSSPI1_ADDR (SXE2_IPSEC_RX_BASE + 0x0090) +#define SXE2_IPSEC_RX_IPSSPI1_SPI_Y_MASK (0xffffffff) + +#define SXE2_PAUSE_STATS_BASE(port) (0x002b2000 + (port) * 0x4000) +#define SXE2_TXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0894) +#define SXE2_TXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0a18) +#define SXE2_TXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a20 + 8 * (pri))) +#define SXE2_TXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a60 + 8 * (pri))) +#define SXE2_TXPFCXONTOXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0aa0 + 8 * (pri))) +#define SXE2_RXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0988) +#define SXE2_RXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0b28) +#define SXE2_RXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b30 + 8 * (pri))) +#define SXE2_RXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b70 + 8 * (pri))) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_internal_ver.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_internal_ver.h new file mode 100644 index 0000000000000000000000000000000000000000..b786433c5fb477b368408c4bac3e6281588cea77 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_internal_ver.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_internal_ver.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_INTERNAL_VER_H__ +#define __SXE2_INTERNAL_VER_H__ + +#define SXE2_VER_MAJOR_OFFSET (16) +#define SXE2_MK_VER(major, minor) \ + ((major) << SXE2_VER_MAJOR_OFFSET | (minor)) +#define SXE2_MK_VER_MAJOR(ver) (((ver) >> SXE2_VER_MAJOR_OFFSET) & 0xff) +#define SXE2_MK_VER_MINOR(ver) ((ver) & 0xff) + +#define SXE2_ITR_VER_MAJOR_V100 1 +#define SXE2_ITR_VER_MAJOR_V200 2 + +#define SXE2_ITR_VER_MAJOR 1 +#define SXE2_ITR_VER_MINOR 1 +#define SXE2_ITR_VER SXE2_MK_VER(SXE2_ITR_VER_MAJOR, SXE2_ITR_VER_MINOR) + +#define SXE2_CTRL_VER_IS_V100(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V100) +#define SXE2_CTRL_VER_IS_V200(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V200) + +#define SXE2LIB_ITR_VER_MAJOR 1 +#define SXE2LIB_ITR_VER_MINOR 1 +#define SXE2LIB_ITR_VER SXE2_MK_VER(SXE2LIB_ITR_VER_MAJOR, SXE2LIB_ITR_VER_MINOR) + +#define SXE2_DRV_CLI_VER_MAJOR 1 +#define SXE2_DRV_CLI_VER_MINOR 1 +#define SXE2_DRV_CLI_VER \ + SXE2_MK_VER(SXE2_DRV_CLI_VER_MAJOR, SXE2_DRV_CLI_VER_MINOR) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ioctl.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..defd0b7ab3250490d5591a88920bbf6da70eaccb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_ioctl.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ioctl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_IOCTL_H__ +#define __SXE2_IOCTL_H__ + +#include "sxe2_internal_ver.h" + +struct sxe2_ioctl_sync_cmd { + u32 ver; + u32 resv; + u64 trace_id; + u32 timeout; + u8 resv1[4]; + void *in_data; + u32 in_len; + u8 resv2[4]; + void *out_data; + u32 out_len; + u8 resv3[4]; +}; + +#define SXE2_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct sxe2_ioctl_sync_cmd) +#define SXE2_CMD_IOCTL_SYNC_DRV_CMD _IOWR('M', 2, struct sxe2_ioctl_sync_cmd) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_misc.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_misc.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9124fc7a74fe99f5752733fb99da84edadeeaa --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_misc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_MISC_H__ +#define __SXE2_MISC_H__ + +#define REG_FW_MISC_MASK_MODE (0xF) +#define REG_FW_MISC_MASK_RSV0 (0xF0) +#define REG_FW_MISC_MASK_RSV1 (0xF00) +#define REG_FW_MISC_MASK_RSV2 (0xF000) +#define REG_FW_MISC_MASK_POP (0x80000000) + +#define REG_FW_MISC_MASK_MODE_OFST (0) +#define REG_FW_MISC_MASK_RSV0_OFST (4) +#define REG_FW_MISC_MASK_RSV1_OFST (8) +#define REG_FW_MISC_MASK_RSV2_OFST (12) +#define REG_FW_MISC_MASK_POP_OFST (31) + +enum sxe2_nic_mode { + SXE2_NIC_MODE_NORMAL = 0, + SXE2_NIC_MODE_NCD = 1, + + SXE2_NIC_MODE_MAX = 0xF, +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_msg.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb5055f51621a5ea2e75cb876dfd31bc5ee2462 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_msg.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MSG_H__ +#define __SXE2_MSG_H__ + +#if defined(SXE2_FW) || defined(SXE2_SUPPORT_UEFI) + +#include "sxe2_drv_type.h" +#endif + +#ifdef PS3_CLI_SXE2 +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +enum sfp_type_identifier { + SXE2_SFP_TYPE_UNKNOW = 0x00, + SXE2_SFP_TYPE_SFP = 0x03, + + SXE2_SFP_TYPE_QSFP_PLUS = 0x0D, + SXE2_SFP_TYPE_QSFP28 = 0x11, + + SXE2_SFP_TYPE_MAX = 0xFF, +}; + +#ifndef SFP_DEFINE +#define SFP_DEFINE + +#define SXE2_SFP_EEP_WR 0x1 +#define SXE2_SFP_EEP_QSFP 0x1 + +enum sfp_bus_addr { + SXE2_SFP_EEP_I2C_ADDR0 = 0xA0, + SXE2_SFP_EEP_I2C_ADDR1 = 0xA2, + + SXE2_SFP_EEP_I2C_ADDR_NR = 0xFFFF, +}; + +struct sxe2_sfp_req { + u8 is_wr; + u8 is_qsfp; + __le16 bus_addr; + __le16 page_cnt; + __le16 offset; + __le16 data_len; + __le16 rvd; + u8 data[]; +}; + +struct sxe2_sfp_resp { + u8 is_wr; + u8 is_qsfp; + __le16 data_len; + u8 data[]; +}; + +enum sfp_page_cnt { + SXE2_SFP_EEP_PAGE_CNT0 = 0, + SXE2_SFP_EEP_PAGE_CNT1, + SXE2_SFP_EEP_PAGE_CNT2, + SXE2_SFP_EEP_PAGE_CNT3, + + SXE2_SFP_EEP_PAGE_CNT20 = 20, + SXE2_SFP_EEP_PAGE_CNT21 = 21, + + SXE2_SFP_EEP_PAGE_CNT_NR = 0xFFFF, +}; + +#define SXE2_SFP_E2P_I2C_7BIT_ADDR0 (SXE2_SFP_EEP_I2C_ADDR0 >> 1) +#define SXE2_SFP_E2P_I2C_7BIT_ADDR1 (SXE2_SFP_EEP_I2C_ADDR1 >> 1) + +#define SXE2_QSFP_PAGE_OFST_START 128 +#define SXE2_SFP_EEP_OFST_MAX 255 +#define SXE2_SFP_EEP_LEN_MAX 256 +#endif + +#ifndef FW_STATE_DEFINE +#define FW_STATE_DEFINE + +#define SXE2_FW_STATUS_MAIN_SHIF (16) +#define SXE2_FW_STATUS_MAIN_MASK (0xFF0000) +#define SXE2_FW_STATUS_SUB_MASK (0xFFFF) +enum Sxe2FwStateMain { + SXE2_FW_STATE_MAIN_UNDEFINED = 0x00, + SXE2_FW_STATE_MAIN_INIT = 0x10000, + SXE2_FW_STATE_MAIN_RUN = 0x20000, + SXE2_FW_STATE_MAIN_ABNOMAL = 0x30000, +}; + +enum Sxe2FwState { + SXE2_FW_START_STATE_UNDEFINED = SXE2_FW_STATE_MAIN_UNDEFINED, + SXE2_FW_START_STATE_INIT_BASE = (SXE2_FW_STATE_MAIN_INIT + 0x1), + SXE2_FW_START_STATE_SCAN_DEVICE = (SXE2_FW_STATE_MAIN_INIT + 0x20), + SXE2_FW_START_STATE_FINISHED = (SXE2_FW_STATE_MAIN_RUN + 0x0), + SXE2_FW_START_STATE_UPGRADE = (SXE2_FW_STATE_MAIN_RUN + 0x1), + SXE2_FW_START_STATE_SYNC = (SXE2_FW_STATE_MAIN_RUN + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x1), + SXE2_FW_RUNNING_STATE_ABNOMAL_CORE1 = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL_HEART = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x3), + + SXE2_FW_START_STATE_MASK = (SXE2_FW_STATUS_MAIN_MASK | SXE2_FW_STATUS_SUB_MASK), +}; +#endif + +#ifndef LED_DEFINE +#define LED_DEFINE +enum sxe2_led_mode { + SXE2_IDENTIFY_LED_BLINK_ON = 0, + SXE2_IDENTIFY_LED_BLINK_OFF, + SXE2_IDENTIFY_LED_ON, + SXE2_IDENTIFY_LED_OFF, + SXE2_IDENTIFY_LED_RESET, +}; + +struct sxe2_led_ctrl { + u32 mode; + u32 duration; + +}; + +struct sxe2_led_ctrl_resp { + u32 ack; +}; +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_spec.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..20338dcd57c87077dc415a03c92db084a10845cb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_spec.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_spec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_SPEC__ +#define __SXE2_SPEC__ + +#define SXE2_TXSCHED_LAYER_MAX_7 7 +#define SXE2_TXSCHED_LAYER_MAX_4 4 +#define SXE2_TXSCHED_LAYER_MAX_3 3 +#define SXE2_TXSCHED_LEAF_MAX_3072 3072 +#define SXE2_TXSCHED_LEAF_MAX_512 512 +#define SXE2_TXSCHED_LEAF_MAX_256 256 +#define SXE2_TXSCHED_LEAF_MAX_128 128 +#define SXE2_TXSCHED_LEAF_MAX_64 64 + +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 + +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#define SXE2_XDP_TX_Q_NUM 8 + +#ifndef SXE2_TXSCHED_LAYER_MAX +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#endif + +#ifndef SXE2_TXSCHED_LEAF_MAX +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 +#endif + +#ifndef SXE2_VSI_PF_ASSURED_NUM +#define SXE2_VSI_PF_ASSURED_NUM 256 +#endif + +#ifndef SXE2_PF_NUM +#define SXE2_PF_NUM 8 +#endif + +#ifndef SXE2_VSI_NUM +#define SXE2_VSI_NUM 768 +#endif + +#ifndef SXE2_QUEUE_NUM +#define SXE2_QUEUE_NUM 2048 +#endif + +#ifndef SXE2_IRQ_NUM +#define SXE2_IRQ_NUM 2048 +#endif + +#ifndef SXE2_VF_NUM +#define SXE2_VF_NUM 256 +#endif + +#ifndef SXE2_MAX_MACVLANS +#define SXE2_MAX_MACVLANS 16 +#endif + +#define SXE2_BUF_SIZE_FW_TQ (8 * 1024) +#define SXE2_BUF_SIZE_FW_RQ (8 * 1024) + +#ifndef SXE2_BUF_SIZE_MBX_TQ +#define SXE2_BUF_SIZE_MBX_TQ (4 * 1024) +#endif + +#ifndef SXE2_BUF_SIZE_MBX_RQ +#define SXE2_BUF_SIZE_MBX_RQ (4 * 1024) +#endif + +#ifndef SXE2_DFLT_IRQS_MAX_CNT +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#endif + +#ifndef SXE2_DFLT_IRQS_MIN_CNT +#define SXE2_DFLT_IRQS_MIN_CNT 8 + +#endif + +#ifndef SXE2_VF_RSS_Q_NUM +#define SXE2_VF_RSS_Q_NUM 16 +#endif + +#ifndef SXE2_IPSEC_RX_SA_DEPTH +#define SXE2_IPSEC_RX_SA_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_RX_DCAM_DEPTH +#define SXE2_IPSEC_RX_DCAM_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_TX_SA_DEPTH +#define SXE2_IPSEC_TX_SA_DEPTH 4096 +#endif + +#define SXE2_MACSEC_ENABLE + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_type.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_type.h new file mode 100644 index 0000000000000000000000000000000000000000..bb2dbd8f323a37b78424a64503505907c1906ea4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_type.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_type.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_TYPES_H__ +#define __SXE2_TYPES_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#if defined __BYTE_ORDER__ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif defined __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#elif defined RTE_TOOLCHAIN_MSVC +#define __LITTLE_ENDIAN_BITFIELD +#else +#error "Unknown endianness." +#endif +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef char s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef s8 S8; +typedef s16 S16; +typedef s32 S32; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#define STATIC static + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_uefi_def.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_uefi_def.h new file mode 100644 index 0000000000000000000000000000000000000000..332d0d2e4b154aaf24f08caecf0f82562d952e3d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_uefi_def.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_uefi_def.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_UEFI_DEF_H__ +#define __SXE2_UEFI_DEF_H__ + +#ifdef UEFI_SUPPORT_MIPS +#include +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define __BIG_ENDIAN_BITFIELD +#endif + +#if BYTE_ORDER == LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#else +#define __BIG_ENDIAN_BITFIELD +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/include/sxe2_version.h b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_version.h new file mode 100644 index 0000000000000000000000000000000000000000..61d6311f3bf3f05b1b19a3bd9ba267dc089e0b49 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/include/sxe2_version.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_version.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_VER_H__ +#define __SXE2_VER_H__ + +#define SXE2_VERSION "0.0.0.0" +#define SXE2_COMMIT_ID "75c3d1c" +#define SXE2_BRANCH "feature/develop-rc-euler6.6-test" +#define SXE2_BUILD_TIME "2026-04-30 02:40:04" + +#define SXE2_DRV_ARCH "x86_64" +#define SXE2_DRV_NAME "sxe2" +#define SXE2VF_DRV_NAME "sxe2vf" +#define SXE2_DRV_LICENSE "GPL v2" +#define SXE2_DRV_AUTHOR "SXE2" +#define SXE2_DRV_DESCRIPTION "SXE2 Linux Driver" +#define SXE2VF_DRV_DESCRIPTION "SXE2 Virtual Function Linux Driver" + +#define SXE2_FW_NAME "soc" +#define SXE2_FW_ARCH "arm32" + +#ifndef SXE2_CFG_RELEASE +#define SXE2_FW_BUILD_MODE "debug" +#else +#define SXE2_FW_BUILD_MODE "release" +#endif + +#define SXE2_FW_RUN_MODE 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2.cfg b/drivers/net/ethernet/linkdata/sxe2/sxe2.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c02bad7ab037bc0037fbd0b5a346be3b6424493b Binary files /dev/null and b/drivers/net/ethernet/linkdata/sxe2/sxe2.cfg differ diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2_dkms_script.sh b/drivers/net/ethernet/linkdata/sxe2/sxe2_dkms_script.sh new file mode 100755 index 0000000000000000000000000000000000000000..1e44604dd98c65a46d1b96e3fe25646e5aa596a2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2_dkms_script.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# 参数检查 +if [ $# -eq 0 ]; then + echo "Usage: $0 [hook_type]" + exit 1 +fi + +case "$1" in + pre_build) + # 创建固件目录 + mkdir -p /lib/firmware/sxe2/ddp + chmod 755 /lib/firmware/sxe2/ddp + echo "Created /lib/firmware/sxe2/ddp directory" + ;; + post_build) + # 拷贝配置文件 + if [ -f ./sxe2.cfg ]; then + cp -rf ./sxe2.cfg /lib/firmware/sxe2/ddp/ + if [ -f "/etc/.kyinfo" ] || [ -f "/etc/kylin-release" ]; then + setfattr -n security.kysec -v "type:fw_file" /lib/firmware/sxe2/ddp/sxe2.cfg + fi + else + echo "Warning: sxe2.cfg not found in build directory" + fi + ;; + post_install) + # 重建initramfs + mkdir -p /etc/dracut.conf.d + if [ ! -e /etc/dracut.conf.d/sxe2-depmod.conf ]; then + echo 'install_items+=" /lib/firmware/sxe2/ddp/sxe2.cfg "' > /etc/dracut.conf.d/sxe2-depmod.conf + fi + + if [ -x /usr/bin/dracut ]; then + dracut -v -f + else + echo "Error: dracut not found in PATH" + fi + ;; + post_add|pre_install|post_remove) + # 预留空操作 + echo "Hook $1 executed (no operation)" + ;; + *) + echo "Error: Unknown hook type '$1'" + exit 1 + ;; +esac + +exit 0 \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.c new file mode 100644 index 0000000000000000000000000000000000000000..8ce512de7c343bbe56ed9d7bd2f1f4df7fa3fb98 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.c @@ -0,0 +1,1448 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_flow.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_flow.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_mbx_public.h" +#include "sxe2_switch.h" + +static u32 sxe2_com_flow_rss_flow_id; + +static s32 sxe2_com_flow_vsi_id_change(struct sxe2_adapter *adapter, u16 id_in_dev, + u16 *id_in_pf) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = NULL; + *id_in_pf = 0xffff; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, id_in_dev); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", id_in_dev); + ret = -EINVAL; + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + } + *id_in_pf = vsi->id_in_pf; + mutex_unlock(&adapter->vsi_ctxt.lock); +l_end: + return ret; +} + +static s32 sxe2_com_flow_switch_meta(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_user_cpx_fltr *fltr) +{ + s32 ret = 0; + + if (req->meta.switch_src_direct == SXE2_FLOW_SW_DIRECT_RX) + fltr->src_type = SXE2_SRC_TYPE_RX; + else + fltr->src_type = SXE2_SRC_TYPE_TX; + + if (req->meta.switch_pattern_dup_allow == SXE2_FLOW_SW_PATTERN_ONLY) + fltr->backup_type = SXE2_RULE_BACKUP_T_NO; + else if (req->meta.switch_pattern_dup_allow == SXE2_FLOW_SW_PATTERN_FIRST) + fltr->backup_type = SXE2_RULE_BACKUP_T_FIRST; + else + fltr->backup_type = SXE2_RULE_BACKUP_T_LAST; + + fltr->prio = req->meta.flow_prio; + + fltr->src_vsi_id = req->meta.flow_src_vsi; + + fltr->rule_vsi_id = rule_vsi_id; + + if (req->meta.tunnel_type == SXE2_FLOW_TUNNEL_TYPE_NONE) { + fltr->tunnel_type = SXE2_TNL_NONE; + } else if (req->meta.tunnel_type == SXE2_FLOW_TUNNEL_TYPE_VXLAN) { + fltr->tunnel_type = SXE2_TNL_VXLAN; + } else if (req->meta.tunnel_type == SXE2_FLOW_TUNNEL_TYPE_GENEVE) { + fltr->tunnel_type = SXE2_TNL_GENEVE; + } else if (req->meta.tunnel_type == SXE2_FLOW_TUNNEL_TYPE_GRE) { + fltr->tunnel_type = SXE2_TNL_GRETAP; + } else { + ret = -EINVAL; + goto l_end; + } + + LOG_DEBUG_BDF("meta src_type[%d],backup_type[%d],prio[%d],src_vsi[%d],rule_vsi[%d]\n", + fltr->src_type, fltr->backup_type, fltr->prio, fltr->src_vsi_id, + fltr->rule_vsi_id); + +l_end: + return ret; +} + +static s32 sxe2_com_flow_switch_pattern_outer(struct sxe2_adapter *adapter, + struct sxe2_flow_pattern *pattern_out, + struct sxe2_user_cpx_fltr *fltr) +{ + s32 ret = 0; + struct sxe2_tcf_key_item *item; + DECLARE_BITMAP(map_spec, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(map_spec, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_HDR_ETH, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_ETH_DA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_DMAC]; + memcpy(item->value.hdr.eth_hdr.dst_addr, + pattern_out->item_spec.eth.dst_addr, ETH_ALEN); + memcpy(item->mask.hdr.eth_hdr.dst_addr, + pattern_out->item_mask.eth.dst_addr, ETH_ALEN); + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, dmac[%pM], mask[%pM]\n", item->type, + pattern_out->item_spec.eth.dst_addr, + pattern_out->item_mask.eth.dst_addr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_ETH_SA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_SMAC]; + memcpy(item->value.hdr.eth_hdr.src_addr, + pattern_out->item_spec.eth.src_addr, ETH_ALEN); + memcpy(item->mask.hdr.eth_hdr.src_addr, + pattern_out->item_mask.eth.src_addr, ETH_ALEN); + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d, smac[%pM], mask[%pM]\n", item->type, + pattern_out->item_spec.eth.src_addr, + pattern_out->item_mask.eth.src_addr); + } + if (test_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_ETYPE]; + item->value.hdr.ethertype.ethtype_id = + pattern_out->item_spec.eth.ether_type; + item->mask.hdr.ethertype.ethtype_id = + pattern_out->item_mask.eth.ether_type; + set_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, map_spec); + LOG_DEBUG_BDF("prot_type %d, ether_type[%d], mask[%d]\n", + item->type, pattern_out->item_spec.eth.ether_type, + pattern_out->item_mask.eth.ether_type); + } + } + if (test_bit(SXE2_FLOW_HDR_VLAN, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_S_TPID, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN_EX]; + item->value.hdr.vlan_hdr.type = pattern_out->item_spec.vlan.type; + item->mask.hdr.vlan_hdr.type = pattern_out->item_mask.vlan.type; + set_bit(SXE2_FLOW_FLD_ID_S_TPID, map_spec); + LOG_DEBUG_BDF("prot_type %d, vlan_tpid[%d], mask[%d]\n", + item->type, pattern_out->item_spec.vlan.type, + pattern_out->item_mask.vlan.type); + } + + if (test_bit(SXE2_FLOW_FLD_ID_S_VID, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN_EX]; + item->value.hdr.vlan_hdr.vlan = pattern_out->item_spec.vlan.vlan; + item->mask.hdr.vlan_hdr.vlan = pattern_out->item_mask.vlan.vlan; + set_bit(SXE2_FLOW_FLD_ID_S_VID, map_spec); + LOG_DEBUG_BDF("prot_type %d, vlan_id[%d], mask[%d]\n", item->type, + pattern_out->item_spec.vlan.vlan, + pattern_out->item_mask.vlan.vlan); + } + + if (test_bit(SXE2_FLOW_FLD_ID_S_TCI, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN_EX]; + item->value.hdr.vlan_hdr.vlan = pattern_out->item_spec.vlan.vlan; + item->mask.hdr.vlan_hdr.vlan = pattern_out->item_mask.vlan.vlan; + set_bit(SXE2_FLOW_FLD_ID_S_TCI, map_spec); + LOG_DEBUG_BDF("prot_type %d, vlan_tci[%d], mask[%d]\n", + item->type, pattern_out->item_spec.vlan.vlan, + pattern_out->item_mask.vlan.vlan); + } + } + if (test_bit(SXE2_FLOW_HDR_QINQ, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_C_TPID, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN]; + item->value.hdr.vlan_hdr.type = pattern_out->item_spec.qinq.type; + item->mask.hdr.vlan_hdr.type = pattern_out->item_mask.qinq.type; + set_bit(SXE2_FLOW_FLD_ID_C_TPID, map_spec); + LOG_DEBUG_BDF("prot_type %d, cvlan_tpid[%d], mask[%d]\n", + item->type, pattern_out->item_spec.qinq.type, + pattern_out->item_mask.qinq.type); + } + + if (test_bit(SXE2_FLOW_FLD_ID_C_VID, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN]; + item->value.hdr.vlan_hdr.vlan = pattern_out->item_spec.qinq.vlan; + item->mask.hdr.vlan_hdr.vlan = pattern_out->item_mask.qinq.vlan; + set_bit(SXE2_FLOW_FLD_ID_C_VID, map_spec); + LOG_DEBUG_BDF("prot_type %d, cvlan_id[%d], mask[%d]\n", + item->type, pattern_out->item_spec.qinq.vlan, + pattern_out->item_mask.qinq.vlan); + } + + if (test_bit(SXE2_FLOW_FLD_ID_C_TCI, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_VLAN]; + item->value.hdr.vlan_hdr.vlan = pattern_out->item_spec.qinq.vlan; + item->mask.hdr.vlan_hdr.vlan = pattern_out->item_mask.qinq.vlan; + set_bit(SXE2_FLOW_FLD_ID_C_TCI, map_spec); + LOG_DEBUG_BDF("prot_type %d, cvlan_tci[%d], mask[%d]\n", + item->type, pattern_out->item_spec.qinq.vlan, + pattern_out->item_mask.qinq.vlan); + } + } + + if (test_bit(SXE2_FLOW_HDR_IPV4, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV4_SADDR]; + item->value.hdr.ipv4_hdr.saddr = + pattern_out->item_spec.ipv4.saddr; + item->mask.hdr.ipv4_hdr.saddr = pattern_out->item_mask.ipv4.saddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d, saddr[%d], mask[0x%x]\n", item->type, + pattern_out->item_spec.ipv4.saddr, + pattern_out->item_mask.ipv4.saddr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV4_DADDR]; + item->value.hdr.ipv4_hdr.daddr = + pattern_out->item_spec.ipv4.daddr; + item->mask.hdr.ipv4_hdr.daddr = pattern_out->item_mask.ipv4.daddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, daddr[%d], mask[0x%x]\n", item->type, + pattern_out->item_spec.ipv4.daddr, + pattern_out->item_mask.ipv4.daddr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV4_DADDR]; + item->value.hdr.ipv4_hdr.tos = pattern_out->item_spec.ipv4.tos; + item->mask.hdr.ipv4_hdr.tos = pattern_out->item_mask.ipv4.tos; + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, map_spec); + LOG_DEBUG_BDF("prot_type %d, tos[%d], mask[0x%x]\n", item->type, + pattern_out->item_spec.ipv4.tos, + pattern_out->item_mask.ipv4.tos); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV4_TTL]; + item->value.hdr.ipv4_hdr.ttl = pattern_out->item_spec.ipv4.ttl; + item->mask.hdr.ipv4_hdr.ttl = pattern_out->item_mask.ipv4.ttl; + set_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, map_spec); + LOG_DEBUG_BDF("prot_type %d, ttl[%d], mask[0x%x]\n", item->type, + pattern_out->item_spec.ipv4.ttl, + pattern_out->item_mask.ipv4.ttl); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV4_PROT]; + item->value.hdr.ipv4_hdr.protocol = + pattern_out->item_spec.ipv4.protocol; + item->mask.hdr.ipv4_hdr.protocol = + pattern_out->item_mask.ipv4.protocol; + set_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, map_spec); + LOG_DEBUG_BDF("prot_type %d, ipv4_protocol[%d], mask[0x%x]\n", + item->type, pattern_out->item_spec.ipv4.protocol, + pattern_out->item_mask.ipv4.protocol); + } + } + + if (test_bit(SXE2_FLOW_HDR_IPV6, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_SA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV6_SADDR]; + memcpy(item->value.hdr.ipv6_hdr.saddr, + pattern_out->item_spec.ipv6.saddr, SXE2_IPV6_ADDR_LENGTH); + memcpy(item->mask.hdr.ipv6_hdr.saddr, + pattern_out->item_mask.ipv6.saddr, SXE2_IPV6_ADDR_LENGTH); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d,saddr[0x%x:%x:%x:%x],mask[0x%x:%x:%x:%x]\n", + item->type, pattern_out->item_spec.ipv6.saddr32[0], + pattern_out->item_spec.ipv6.saddr32[1], + pattern_out->item_spec.ipv6.saddr32[2], + pattern_out->item_spec.ipv6.saddr32[3], + pattern_out->item_mask.ipv6.saddr32[0], + pattern_out->item_mask.ipv6.saddr32[1], + pattern_out->item_mask.ipv6.saddr32[2], + pattern_out->item_mask.ipv6.saddr32[3]); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_DA, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_IPV6_DADDR]; + memcpy(item->value.hdr.ipv6_hdr.daddr, + pattern_out->item_spec.ipv6.daddr, SXE2_IPV6_ADDR_LENGTH); + memcpy(item->mask.hdr.ipv6_hdr.daddr, + pattern_out->item_mask.ipv6.daddr, SXE2_IPV6_ADDR_LENGTH); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, daddr[0x%x:%x:%x:%x],mask[0x%x:%x:%x:%x]\n", + item->type, pattern_out->item_spec.ipv6.daddr32[0], + pattern_out->item_spec.ipv6.daddr32[1], + pattern_out->item_spec.ipv6.daddr32[2], + pattern_out->item_spec.ipv6.daddr32[3], + pattern_out->item_mask.ipv6.daddr32[0], + pattern_out->item_mask.ipv6.daddr32[1], + pattern_out->item_mask.ipv6.daddr32[2], + pattern_out->item_mask.ipv6.daddr32[3]); + } + } + + if (test_bit(SXE2_FLOW_HDR_TCP, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, pattern_out->map_spec)) { + item = &fltr->items[SXE2_LAST_TCP_SPORT]; + item->value.hdr.tcp_hdr.source = + pattern_out->item_spec.tcp.source; + item->mask.hdr.tcp_hdr.source = pattern_out->item_mask.tcp.source; + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, tcp sport[%d], mask[0x%x]\n", + item->type, pattern_out->item_spec.tcp.source, + pattern_out->item_mask.tcp.source); + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, pattern_out->map_spec)) { + item = &fltr->items[SXE2_LAST_TCP_DPORT]; + item->value.hdr.tcp_hdr.dest = pattern_out->item_spec.tcp.dest; + item->mask.hdr.tcp_hdr.dest = pattern_out->item_mask.tcp.dest; + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, tcp dport[%d], mask[0x%x]\n", + item->type, pattern_out->item_spec.tcp.dest, + pattern_out->item_mask.tcp.dest); + } + } + if (test_bit(SXE2_FLOW_HDR_UDP, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_UDP_SPORT]; + item->value.hdr.udp_hdr.source = + pattern_out->item_spec.udp.source; + item->mask.hdr.udp_hdr.source = pattern_out->item_mask.udp.source; + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, udp sport[%d], mask[0x%x]\n", + item->type, pattern_out->item_spec.udp.source, + pattern_out->item_mask.udp.source); + } + + if (test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, pattern_out->map_spec)) { + item = &fltr->items[SXE2_OUTER_UDP_DPORT]; + item->value.hdr.udp_hdr.dest = pattern_out->item_spec.udp.dest; + item->mask.hdr.udp_hdr.dest = pattern_out->item_mask.udp.dest; + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, udp dport[%d], mask[0x%x]\n", + item->type, pattern_out->item_spec.udp.dest, + pattern_out->item_mask.udp.dest); + } + } + + if (test_bit(SXE2_FLOW_HDR_GENEVE, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_GENEVE_VNI, pattern_out->map_spec)) { + item = &fltr->items[SXE2_GENEVE_ENC_ID]; + item->value.hdr.udp_tnl_hdr.vni = + pattern_out->item_spec.geneve.vni; + item->mask.hdr.udp_tnl_hdr.vni = + pattern_out->item_mask.geneve.vni; + set_bit(SXE2_FLOW_FLD_ID_GENEVE_VNI, map_spec); + LOG_DEBUG_BDF("prot_type %d, geneve vni[%u], mask[0x%x]\n", + item->type, pattern_out->item_spec.geneve.vni, + pattern_out->item_mask.geneve.vni); + } + } + + if (test_bit(SXE2_FLOW_HDR_VXLAN, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_VXLAN_VNI, pattern_out->map_spec)) { + item = &fltr->items[SXE2_VXLAN_ENC_ID]; + item->value.hdr.udp_tnl_hdr.vni = + pattern_out->item_spec.vxlan.vni; + item->mask.hdr.udp_tnl_hdr.vni = pattern_out->item_mask.vxlan.vni; + set_bit(SXE2_FLOW_FLD_ID_VXLAN_VNI, map_spec); + LOG_DEBUG_BDF("prot_type %d, vxlan vni[%u], mask[0x%x]\n", + item->type, pattern_out->item_spec.vxlan.vni, + pattern_out->item_mask.vxlan.vni); + } + } + + if (test_bit(SXE2_FLOW_HDR_GRE, pattern_out->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_NVGRE_TNI, pattern_out->map_spec)) { + item = &fltr->items[SXE2_NVGRE_ENC_ID]; + item->value.hdr.nvgre_hdr.tni = pattern_out->item_spec.nvgre.tni; + item->mask.hdr.nvgre_hdr.tni = pattern_out->item_mask.nvgre.tni; + set_bit(SXE2_FLOW_FLD_ID_NVGRE_TNI, map_spec); + LOG_DEBUG_BDF("prot_type %d, nvgre vni[%u], mask[0x%x]\n", + item->type, pattern_out->item_spec.nvgre.tni, + pattern_out->item_mask.nvgre.tni); + } + } + if (!bitmap_equal(pattern_out->map_spec, map_spec, SXE2_FLOW_FLD_ID_MAX)) { + ret = -EINVAL; + goto l_end; + } +l_end: + return ret; +} + +static s32 sxe2_com_flow_switch_pattern_inner(struct sxe2_adapter *adapter, + struct sxe2_flow_pattern *pattern_in, + struct sxe2_user_cpx_fltr *fltr) +{ + s32 ret = 0; + struct sxe2_tcf_key_item *item; + DECLARE_BITMAP(map_spec, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(map_spec, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_HDR_ETH, pattern_in->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_ETH_DA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_DMAC]; + memcpy(item->value.hdr.eth_hdr.dst_addr, + pattern_in->item_spec.eth.dst_addr, ETH_ALEN); + memcpy(item->mask.hdr.eth_hdr.dst_addr, + pattern_in->item_mask.eth.dst_addr, ETH_ALEN); + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, dmac[%pM], mask[%pM]\n", item->type, + pattern_in->item_spec.eth.dst_addr, + pattern_in->item_mask.eth.dst_addr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_ETH_SA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_SMAC]; + memcpy(item->value.hdr.eth_hdr.src_addr, + pattern_in->item_spec.eth.src_addr, ETH_ALEN); + memcpy(item->mask.hdr.eth_hdr.src_addr, + pattern_in->item_mask.eth.src_addr, ETH_ALEN); + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d, smac[%pM], mask[%pM]\n", item->type, + pattern_in->item_spec.eth.src_addr, + pattern_in->item_mask.eth.src_addr); + } + if (test_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_ETYPE]; + item->value.hdr.ethertype.ethtype_id = + pattern_in->item_spec.eth.ether_type; + item->mask.hdr.ethertype.ethtype_id = + pattern_in->item_mask.eth.ether_type; + set_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, map_spec); + LOG_DEBUG_BDF("prot_type %d, ether_type[%d], mask[%d]\n", + item->type, pattern_in->item_spec.eth.ether_type, + pattern_in->item_mask.eth.ether_type); + } + } + + if (test_bit(SXE2_FLOW_HDR_IPV4, pattern_in->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV4_SADDR]; + item->value.hdr.ipv4_hdr.saddr = pattern_in->item_spec.ipv4.saddr; + item->mask.hdr.ipv4_hdr.saddr = pattern_in->item_mask.ipv4.saddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d, saddr[%d], mask[0x%x]\n", item->type, + pattern_in->item_spec.ipv4.saddr, + pattern_in->item_mask.ipv4.saddr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV4_DADDR]; + item->value.hdr.ipv4_hdr.daddr = pattern_in->item_spec.ipv4.daddr; + item->mask.hdr.ipv4_hdr.daddr = pattern_in->item_mask.ipv4.daddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, daddr[%d], mask[0x%x]\n", item->type, + pattern_in->item_spec.ipv4.daddr, + pattern_in->item_mask.ipv4.daddr); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV4_TOS]; + item->value.hdr.ipv4_hdr.tos = pattern_in->item_spec.ipv4.tos; + item->mask.hdr.ipv4_hdr.tos = pattern_in->item_mask.ipv4.tos; + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, map_spec); + LOG_DEBUG_BDF("prot_type %d, tos[%d], mask[0x%x]\n", item->type, + pattern_in->item_spec.ipv4.tos, + pattern_in->item_mask.ipv4.tos); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV4_TTL]; + item->value.hdr.ipv4_hdr.ttl = pattern_in->item_spec.ipv4.ttl; + item->mask.hdr.ipv4_hdr.ttl = pattern_in->item_mask.ipv4.ttl; + set_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, map_spec); + LOG_DEBUG_BDF("prot_type %d, ttl[%d], mask[0x%x]\n", item->type, + pattern_in->item_spec.ipv4.ttl, + pattern_in->item_mask.ipv4.ttl); + } + } + + if (test_bit(SXE2_FLOW_HDR_IPV6, pattern_in->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_SA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV6_SADDR]; + memcpy(item->value.hdr.ipv6_hdr.saddr, + pattern_in->item_spec.ipv6.saddr, SXE2_IPV6_ADDR_LENGTH); + memcpy(item->mask.hdr.ipv6_hdr.saddr, + pattern_in->item_mask.ipv6.saddr, SXE2_IPV6_ADDR_LENGTH); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, map_spec); + LOG_DEBUG_BDF("prot_type %d, saddr[0x%x:%x:%x:%x],mask[0x%x:%x:%x:%x]\n", + item->type, pattern_in->item_spec.ipv6.saddr32[0], + pattern_in->item_spec.ipv6.saddr32[1], + pattern_in->item_spec.ipv6.saddr32[2], + pattern_in->item_spec.ipv6.saddr32[3], + pattern_in->item_mask.ipv6.saddr32[0], + pattern_in->item_mask.ipv6.saddr32[1], + pattern_in->item_mask.ipv6.saddr32[2], + pattern_in->item_mask.ipv6.saddr32[3]); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_DA, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_IPV6_DADDR]; + memcpy(item->value.hdr.ipv6_hdr.daddr, + pattern_in->item_spec.ipv6.daddr, SXE2_IPV6_ADDR_LENGTH); + memcpy(item->mask.hdr.ipv6_hdr.daddr, + pattern_in->item_mask.ipv6.daddr, SXE2_IPV6_ADDR_LENGTH); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, map_spec); + LOG_DEBUG_BDF("prot_type %d, daddr[0x%x:%x:%x:%x],mask[0x%x:%x:%x:%x]\n", + item->type, pattern_in->item_spec.ipv6.daddr32[0], + pattern_in->item_spec.ipv6.daddr32[1], + pattern_in->item_spec.ipv6.daddr32[2], + pattern_in->item_spec.ipv6.daddr32[3], + pattern_in->item_mask.ipv6.daddr32[0], + pattern_in->item_mask.ipv6.daddr32[1], + pattern_in->item_mask.ipv6.daddr32[2], + pattern_in->item_mask.ipv6.daddr32[3]); + } + } + + if (test_bit(SXE2_FLOW_HDR_TCP, pattern_in->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, pattern_in->map_spec)) { + item = &fltr->items[SXE2_LAST_TCP_SPORT]; + item->value.hdr.tcp_hdr.source = pattern_in->item_spec.tcp.source; + item->mask.hdr.tcp_hdr.source = pattern_in->item_mask.tcp.source; + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, tcp sport[%d], mask[0x%x]\n", + item->type, pattern_in->item_spec.tcp.source, + pattern_in->item_mask.tcp.source); + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, pattern_in->map_spec)) { + item = &fltr->items[SXE2_LAST_TCP_DPORT]; + item->value.hdr.tcp_hdr.dest = pattern_in->item_spec.tcp.dest; + item->mask.hdr.tcp_hdr.dest = pattern_in->item_mask.tcp.dest; + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, tcp dport[%d], mask[0x%x]\n", + item->type, pattern_in->item_spec.tcp.dest, + pattern_in->item_mask.tcp.dest); + } + } + + if (test_bit(SXE2_FLOW_HDR_UDP, pattern_in->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_UDP_SPORT]; + item->value.hdr.udp_hdr.source = pattern_in->item_spec.udp.source; + item->mask.hdr.udp_hdr.source = pattern_in->item_mask.udp.source; + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, udp sport[%d], mask[0x%x]\n", + item->type, pattern_in->item_spec.udp.source, + pattern_in->item_mask.udp.source); + } + + if (test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, pattern_in->map_spec)) { + item = &fltr->items[SXE2_INNER_UDP_DPORT]; + item->value.hdr.udp_hdr.dest = pattern_in->item_spec.udp.dest; + item->mask.hdr.udp_hdr.dest = pattern_in->item_mask.udp.dest; + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, map_spec); + LOG_DEBUG_BDF("prot_type %d, udp dport[%d], mask[0x%x]\n", + item->type, pattern_in->item_spec.udp.dest, + pattern_in->item_mask.udp.dest); + } + } + + if (!bitmap_equal(pattern_in->map_spec, map_spec, SXE2_FLOW_FLD_ID_MAX)) { + ret = -EINVAL; + goto l_end; + } +l_end: + return ret; +} + +static s32 sxe2_com_flow_switch_pattern_post_proc(struct sxe2_user_cpx_fltr *fltr) +{ + struct sxe2_tcf_key_item *item; + u16 i, j; + + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + item = &fltr->items[i]; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) + item->value.raw[j] = item->value.raw[j] & item->mask.raw[j]; + } + return 0; +} + +static s32 sxe2_com_flow_switch_action(struct sxe2_adapter *adapter, + struct sxe2_flow_action *flow_action, + u16 rule_vsi_id, struct sxe2_user_cpx_fltr *fltr) +{ + s32 ret = 0; + u8 cnt = 0; + + fltr->dst_vsi_id = rule_vsi_id; + if (test_bit(SXE2_FLOW_ACTION_DROP, flow_action->act_types)) { + fltr->action = SXE2_DROP_PACKET; + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_QUEUE, flow_action->act_types)) { + fltr->action = SXE2_FWD_TO_Q; + fltr->dst_vsi_id = flow_action->queue.vsi_index; + fltr->dst_queue_id = flow_action->queue.q_index; + fltr->dst_queue_high = true; + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_Q_REGION, flow_action->act_types)) { + fltr->action = SXE2_FWD_TO_QGRP; + fltr->dst_vsi_id = flow_action->q_region.vsi_index; + fltr->dst_queue_id = flow_action->q_region.q_index; + fltr->dst_queue_group = flow_action->q_region.region; + fltr->dst_queue_high = true; + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_TO_VSI, flow_action->act_types)) { + fltr->dst_vsi_id = flow_action->vsi.vsi_index; + fltr->action = SXE2_FWD_TO_VSI; + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_TO_VSI_LIST, flow_action->act_types)) { + fltr->action = SXE2_FWD_TO_VSI_LIST; + memcpy(fltr->dst_vsi_map, flow_action->vsi_list.vsi_list_map, + sizeof(fltr->dst_vsi_map)); + cnt++; + } + if (cnt > 1) + ret = -EINVAL; + + LOG_DEBUG_BDF("action %d,dst_vsi_id[%d],dst_queue_id[%d],dst_queue_group[%d]\n", + fltr->action, fltr->dst_vsi_id, fltr->dst_queue_id, + fltr->dst_queue_group); + return ret; +} + +static s32 sxe2_com_flow_switch_filter(struct sxe2_adapter *adapter, u16 rule_vsi_id, + bool is_add, struct sxe2_drv_flow_filter_req *req) +{ + struct sxe2_user_cpx_fltr *user_cpx_fltr; + s32 ret = 0; + + user_cpx_fltr = kzalloc(sizeof(*user_cpx_fltr), GFP_KERNEL); + if (!user_cpx_fltr) { + LOG_ERROR_BDF("alloc memory failed, size %ld\n", sizeof(*user_cpx_fltr)); + ret = -ENOMEM; + goto l_end; + } + user_cpx_fltr->adapter = adapter; + + ret = sxe2_com_flow_switch_meta(adapter, rule_vsi_id, req, user_cpx_fltr); + if (ret) { + LOG_ERROR_BDF("switch meta error, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_com_flow_switch_pattern_outer(adapter, &req->pattern_outer, + user_cpx_fltr); + if (ret) { + LOG_ERROR_BDF("switch pattern outer error, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_com_flow_switch_pattern_inner(adapter, &req->pattern_inner, + user_cpx_fltr); + if (ret) { + LOG_ERROR_BDF("switch pattern inner error, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_com_flow_switch_pattern_post_proc(user_cpx_fltr); + if (ret) { + LOG_ERROR_BDF("switch pattern post proc error, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_com_flow_switch_action(adapter, &req->action, rule_vsi_id, + user_cpx_fltr); + if (ret) { + LOG_ERROR_BDF("switch action error, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_ucmd_complex_fltr_proc(user_cpx_fltr, is_add); + if (ret) + LOG_ERROR_BDF("switch complex rule config failed, ret=%d\n", ret); + +l_end: + kfree(user_cpx_fltr); + return ret; +} + +static s32 sxe2_com_flow_switch_filter_add(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_drv_flow_filter_resp *resp) +{ + s32 ret = 0; + static u32 flow_id; + + ret = sxe2_com_flow_switch_filter(adapter, rule_vsi_id, true, req); + if (!ret) { + resp->flow_id = flow_id; + resp->engine_type = SXE2_FLOW_ENGINE_SWITCH; + flow_id++; + } + + return ret; +} + +static s32 sxe2_com_flow_switch_filter_del(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req) +{ + return sxe2_com_flow_switch_filter(adapter, rule_vsi_id, false, req); +} + +static void sxe2_com_fnav_fld_convert_msg(unsigned long *flds, + struct sxe2_fnav_comm_proto_hdr *proto_hdr) +{ + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + bitmap_to_arr32(tmp_flds, flds, SXE2_FLOW_FLD_ID_MAX); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + proto_hdr->flds[i] = cpu_to_le32(tmp_flds[i]); +} + +static s32 +sxe2_com_flow_fnav_filter_add_pattern(struct sxe2_fnav_comm_proto_hdr *proto_hdr, + struct sxe2_flow_pattern *pattern, u8 is_inner) +{ + u8 cnt = 0; + u8 tunnel_level = is_inner ? SXE2_FNAV_TUNNEL_INNER : SXE2_FNAV_TUNNEL_OUTER; + struct sxe2_fnav_comm_proto_hdr *hdr = &proto_hdr[cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + if (test_bit(SXE2_FLOW_HDR_ETH, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_ETH; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_ETH_DA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, flds); + memcpy(hdr->eth.dst, pattern->item_spec.eth.dst_addr, + SXE2_FNAV_ETH_ADDR_LEN); + } + if (test_bit(SXE2_FLOW_FLD_ID_ETH_SA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, flds); + memcpy(hdr->eth.src, pattern->item_spec.eth.src_addr, + SXE2_FNAV_ETH_ADDR_LEN); + } + if (test_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, flds); + hdr->eth.etype = pattern->item_spec.eth.ether_type; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_VLAN, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_VLAN; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_S_TPID, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_S_TPID, flds); + hdr->vlan.vlan_type = pattern->item_spec.vlan.type; + } + if (test_bit(SXE2_FLOW_FLD_ID_S_TCI, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_S_TCI, flds); + hdr->vlan.vlan_tci = pattern->item_spec.vlan.vlan; + } + if (test_bit(SXE2_FLOW_FLD_ID_S_VID, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_S_VID, flds); + hdr->vlan.vlan_vid = pattern->item_spec.vlan.vlan; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + + if (test_bit(SXE2_FLOW_HDR_QINQ, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_VLAN; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_C_TPID, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_C_TPID, flds); + hdr->vlan.vlan_type = pattern->item_spec.qinq.type; + } + if (test_bit(SXE2_FLOW_FLD_ID_C_TCI, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_C_TCI, flds); + hdr->vlan.vlan_tci = pattern->item_spec.qinq.vlan; + } + if (test_bit(SXE2_FLOW_FLD_ID_C_VID, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_C_VID, flds); + hdr->vlan.vlan_vid = pattern->item_spec.qinq.vlan; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + + if (test_bit(SXE2_FLOW_HDR_IPV4, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_IPV4; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, flds); + hdr->ipv4.saddr = pattern->item_spec.ipv4.saddr; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, flds); + hdr->ipv4.daddr = pattern->item_spec.ipv4.daddr; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, flds); + hdr->ipv4.tos = pattern->item_spec.ipv4.tos; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_TTL, flds); + hdr->ipv4.ttl = pattern->item_spec.ipv4.ttl; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, flds); + hdr->ipv4.proto = pattern->item_spec.ipv4.protocol; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + + if (test_bit(SXE2_FLOW_HDR_IPV6, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_IPV6; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_SA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, flds); + memcpy(hdr->ipv6.src_ip, pattern->item_spec.ipv6.saddr, + SXE2_IPV6_ADDR_LENGTH); + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_DA, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, flds); + memcpy(hdr->ipv6.dst_ip, pattern->item_spec.ipv6.daddr, + SXE2_IPV6_ADDR_LENGTH); + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_DSCP, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_DSCP, flds); + hdr->ipv6.tc = (be32_to_cpu(pattern->item_spec.ipv6.pri_ver_flow) >> + SXE2_IPV6_TC_SHIFT) & + SXE2_IPV6_TC_MASK; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_TTL, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_TTL, flds); + hdr->ipv6.hlim = pattern->item_spec.ipv6.hop_limit; + } + if (test_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, flds); + hdr->ipv6.proto = pattern->item_spec.ipv6.nexthdr; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_IPV_FRAG, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_IPV_FRAG; + hdr->tunnel_level = tunnel_level; + } + if (test_bit(SXE2_FLOW_HDR_IPV_OTHER, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_IPV_OTHER; + hdr->tunnel_level = tunnel_level; + } + + if (test_bit(SXE2_FLOW_HDR_TCP, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_TCP; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, flds); + hdr->l4.src_port = pattern->item_spec.tcp.source; + } + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, flds); + hdr->l4.dst_port = pattern->item_spec.tcp.dest; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_UDP, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_UDP; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, flds); + hdr->l4.src_port = pattern->item_spec.udp.source; + } + if (test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, flds); + hdr->l4.dst_port = pattern->item_spec.udp.dest; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_SCTP, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_SCTP; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, flds); + hdr->l4.src_port = pattern->item_spec.sctp.src_port; + } + if (test_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, flds); + hdr->l4.dst_port = pattern->item_spec.sctp.dst_port; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_GENEVE, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_GENEVE; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_GENEVE_VNI, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_GENEVE_VNI, flds); + hdr->geneve.vni = pattern->item_spec.geneve.vni; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_VXLAN, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_VXLAN; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_VXLAN_VNI, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_VXLAN_VNI, flds); + hdr->vxlan.vni = pattern->item_spec.vxlan.vni; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_GTPU, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_GTPU; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_GTPU_TEID, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_GTPU_TEID, flds); + hdr->gtpu.teid = pattern->item_spec.gtpu.teid; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + if (test_bit(SXE2_FLOW_HDR_GRE, pattern->hdrs)) { + hdr = &proto_hdr[cnt]; + cnt++; + hdr->type = SXE2_FLOW_HDR_GRE; + hdr->tunnel_level = tunnel_level; + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + if (test_bit(SXE2_FLOW_FLD_ID_NVGRE_TNI, pattern->map_spec)) { + set_bit(SXE2_FLOW_FLD_ID_NVGRE_TNI, flds); + hdr->gre.tni = pattern->item_spec.nvgre.tni; + } + sxe2_com_fnav_fld_convert_msg(flds, hdr); + } + return cnt; +} + +static s32 sxe2_com_flow_fnav_filter_add_action(struct sxe2_fnav_comm_action *action, + struct sxe2_flow_action *flow_action, + u16 *dst_vsi_id) +{ + u8 cnt = 0; + + if (test_bit(SXE2_FLOW_ACTION_QUEUE, flow_action->act_types)) { + *dst_vsi_id = flow_action->queue.vsi_index; + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_QUEUE); + action[cnt].act_queue.q_index = cpu_to_le16(flow_action->queue.q_index); + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_Q_REGION, flow_action->act_types)) { + *dst_vsi_id = flow_action->q_region.vsi_index; + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_Q_REGION); + action[cnt].act_q_region.q_index = cpu_to_le16(flow_action->q_region.q_index); + action[cnt].act_q_region.region = flow_action->q_region.region; + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_MARK, flow_action->act_types)) { + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_MARK); + action[cnt].act_mark.mark_id = cpu_to_le32(flow_action->mark.mark_id); + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_COUNT, flow_action->act_types)) { + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_COUNT); + action[cnt].act_count.stat_ctrl = cpu_to_le32(flow_action->count.stat_ctrl); + action[cnt].act_count.stat_index = cpu_to_le32(flow_action->count.stat_index); + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_PASSTHRU, flow_action->act_types)) { + *dst_vsi_id = flow_action->passthru.vsi_index; + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_PASSTHRU); + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_DROP, flow_action->act_types)) { + action[cnt].type = cpu_to_le32(SXE2_FNAV_ACTION_DROP); + cnt++; + } + if (test_bit(SXE2_FLOW_ACTION_TO_VSI, flow_action->act_types)) + *dst_vsi_id = flow_action->vsi.vsi_index; + + return cnt; +} + +static s32 sxe2_com_flow_fnav_filter_add(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_drv_flow_filter_resp *resp) +{ + u16 src_vsi_id = cpu_to_le16(req->meta.flow_src_vsi); + u16 dst_vsi_id = cpu_to_le16(req->meta.flow_src_vsi); + struct sxe2_fnav_comm_full_msg filter_msg = {0}; + u32 flow_id = 0; + u8 hdr_cnt_out = 0; + u8 hdr_cnt_in = 0; + u8 action_cnt = 0; + s32 ret = 0; + + filter_msg.filter_loc = cpu_to_le32(0xffffffff); + filter_msg.flow_type = cpu_to_le16(req->meta.flow_type); + + if (bitmap_weight(req->pattern_inner.hdrs, SXE2_FLOW_HDR_MAX) + + bitmap_weight(req->pattern_outer.hdrs, SXE2_FLOW_HDR_MAX) > + SXE2_FNAV_MAX_NUM_PROTO_HDRS) { + LOG_ERROR_BDF("too many proto hdrs, outer hdrs=%u, inner hdrs=%u\n", + bitmap_weight(req->pattern_outer.hdrs, SXE2_FLOW_HDR_MAX), + bitmap_weight(req->pattern_inner.hdrs, SXE2_FLOW_HDR_MAX)); + ret = -EINVAL; + goto l_end; + } + + hdr_cnt_out = sxe2_com_flow_fnav_filter_add_pattern(filter_msg.proto_hdr, + &req->pattern_outer, false); + hdr_cnt_in = sxe2_com_flow_fnav_filter_add_pattern(&filter_msg.proto_hdr[hdr_cnt_out], + &req->pattern_inner, true); + if (hdr_cnt_in + hdr_cnt_out > SXE2_FNAV_MAX_NUM_PROTO_HDRS) { + LOG_ERROR_BDF("too many proto hdrs, hdr_cnt_out=%u, hdr_cnt_in=%u\n", + hdr_cnt_out, hdr_cnt_in); + ret = -EINVAL; + goto l_end; + } + if (hdr_cnt_in > 0) + filter_msg.tunn_flag = cpu_to_le32(SXE2_FNAV_TUN_FLAG_TUNNEL); + else + filter_msg.tunn_flag = cpu_to_le32(SXE2_FNAV_TUN_FLAG_NO_TUNNEL); + + filter_msg.proto_cnt = hdr_cnt_out + hdr_cnt_in; + + action_cnt = sxe2_com_flow_fnav_filter_add_action(filter_msg.action, &req->action, + &dst_vsi_id); + if (action_cnt > SXE2_FNAV_MAX_NUM_ACTIONS) { + LOG_ERROR_BDF("too many actions, action_cnt=%u\n", action_cnt); + ret = -EINVAL; + goto l_end; + } + filter_msg.action_cnt = action_cnt; + + ret = sxe2_com_flow_vsi_id_change(adapter, src_vsi_id, &src_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + ret = sxe2_com_flow_vsi_id_change(adapter, dst_vsi_id, &dst_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + ret = sxe2_com_flow_vsi_id_change(adapter, rule_vsi_id, &rule_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + ret = sxe2_comm_add_fnav_filter(adapter, src_vsi_id, dst_vsi_id, rule_vsi_id, + &filter_msg, &flow_id); + if (ret) { + LOG_ERROR_BDF("sxe2_comm_add_fnav_filter failed, ret=%d\n", ret); + goto l_end; + } + resp->flow_id = flow_id; + resp->engine_type = SXE2_FLOW_ENGINE_FNAV; +l_end: + return ret; +} + +static s32 sxe2_com_flow_rss_filter_add(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_drv_flow_filter_resp *resp) +{ + s32 ret = 0; + struct sxe2_rss_hash_cfg hash_cfg; + struct sxe2_flow_action_rss *rss = &req->action.rss; + struct sxe2_vsi *vsi; + u8 hash_type_old; + + (void)memset(&hash_cfg, 0, sizeof(hash_cfg)); + + if (!test_bit(SXE2_FLOW_ACTION_RSS, req->action.act_types)) { + ret = -EINVAL; + LOG_ERROR_BDF("rss action is not set\n"); + goto l_end; + } + if (rss->func == SXE2_RSS_HASH_FUNC_XOR) { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->meta.flow_src_vsi); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->meta.flow_src_vsi); + ret = -EFAULT; + goto l_end; + } + hash_type_old = vsi->rss_ctxt.hash_type; + vsi->rss_ctxt.hash_type = rss->func; + ret = sxe2_fwc_rss_hash_ctrl_set(vsi); + if (ret != 0) + vsi->rss_ctxt.hash_type = hash_type_old; + + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_DEBUG_BDF("vsi[%u] rss hash type[%u] set ret[%d]\n", + req->meta.flow_src_vsi, rss->func, ret); + } else { + hash_cfg.hdr_type = rss->hdr_type; + if (rss->func == SXE2_RSS_HASH_FUNC_TOEPLITZ) { + hash_cfg.symm = 0; + } else if (rss->func == SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ) { + hash_cfg.symm = 1; + } else { + ret = -EINVAL; + LOG_ERROR_BDF("invalid rss hash func %u\n", rss->func); + goto l_end; + } + if (rss->is_inner) { + bitmap_copy(hash_cfg.headers, rss->hdr_in, SXE2_FLOW_HDR_MAX); + } else { + bitmap_copy(hash_cfg.headers, rss->hdr_out, SXE2_FLOW_HDR_MAX); + clear_bit(SXE2_FLOW_HDR_QINQ, hash_cfg.headers); + } + bitmap_copy(hash_cfg.hash_flds, rss->fld, SXE2_FLOW_FLD_ID_MAX); + + ret = sxe2_com_flow_vsi_id_change(adapter, rule_vsi_id, &rule_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", + ret); + goto l_end; + } + + ret = sxe2_add_rss_flow(&adapter->rss_flow_ctxt, rule_vsi_id, &hash_cfg); + if (ret) { + LOG_ERROR_BDF("sxe2_add_rss_flow failed, ret=%d\n", ret); + goto l_end; + } + } + + sxe2_com_flow_rss_flow_id++; + resp->flow_id = sxe2_com_flow_rss_flow_id; + resp->engine_type = SXE2_FLOW_ENGINE_RSS; + +l_end: + return ret; +} + +s32 sxe2_com_flow_filter_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_flow_filter_resp resp = {0}; + u16 rule_vsi_id = 0; + s32 ret = 0; + struct sxe2_drv_flow_filter_req *req; + + req = (struct sxe2_drv_flow_filter_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + rule_vsi_id = req->meta.flow_rule_vsi; + + if (req->engine_type == SXE2_FLOW_ENGINE_RSS) { + ret = sxe2_com_flow_rss_filter_add(adapter, rule_vsi_id, req, &resp); + } else if (req->engine_type == SXE2_FLOW_ENGINE_FNAV) { + ret = sxe2_com_flow_fnav_filter_add(adapter, rule_vsi_id, req, &resp); + } else if (req->engine_type == SXE2_FLOW_ENGINE_SWITCH) { + ret = sxe2_com_flow_switch_filter_add(adapter, rule_vsi_id, req, &resp); + } else if (req->engine_type == SXE2_FLOW_ENGINE_ACL) { + ret = sxe2_com_flow_acl_filter_add(adapter, rule_vsi_id, req, &resp); + } else { + LOG_ERROR_BDF("invalid flow engine type %d\n", req->engine_type); + ret = -EINVAL; + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_flow_fnav_filter_del(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req) +{ + s32 ret = 0; + + ret = sxe2_com_flow_vsi_id_change(adapter, rule_vsi_id, &rule_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + ret = sxe2_fnav_del_filter_by_flow_id(adapter, rule_vsi_id, req->flow_id); + if (ret) { + LOG_ERROR_BDF("sxe2_fnav_del_filter_by_flow_id failed, ret=%d\n", ret); + goto l_end; + } +l_end: + return ret; +} + +static s32 sxe2_com_flow_rss_filter_del(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req) +{ + s32 ret = 0; + struct sxe2_rss_hash_cfg hash_cfg; + struct sxe2_flow_action_rss *rss = &req->action.rss; + struct sxe2_vsi *vsi; + u8 hash_type_old; + + (void)memset(&hash_cfg, 0, sizeof(hash_cfg)); + + if (!test_bit(SXE2_FLOW_ACTION_RSS, req->action.act_types)) { + ret = -EINVAL; + LOG_ERROR_BDF("rss action is not set\n"); + goto l_end; + } + if (rss->func == SXE2_RSS_HASH_FUNC_XOR) { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->meta.flow_src_vsi); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->meta.flow_src_vsi); + ret = -EFAULT; + goto l_end; + } + hash_type_old = vsi->rss_ctxt.hash_type; + vsi->rss_ctxt.hash_type = SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ; + ret = sxe2_fwc_rss_hash_ctrl_set(vsi); + if (ret != 0) + vsi->rss_ctxt.hash_type = hash_type_old; + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_DEBUG_BDF("vsi[%u] rss hash type[%u] set ret[%d]\n", + req->meta.flow_src_vsi, SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ, + ret); + } else { + hash_cfg.hdr_type = rss->hdr_type; + if (rss->func == SXE2_RSS_HASH_FUNC_TOEPLITZ) { + hash_cfg.symm = 0; + } else if (rss->func == SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ) { + hash_cfg.symm = 1; + } else { + ret = -EINVAL; + LOG_ERROR_BDF("invalid rss hash func %u\n", rss->func); + goto l_end; + } + + if (rss->is_inner) { + bitmap_copy(hash_cfg.headers, rss->hdr_in, SXE2_FLOW_HDR_MAX); + } else { + bitmap_copy(hash_cfg.headers, rss->hdr_out, SXE2_FLOW_HDR_MAX); + clear_bit(SXE2_FLOW_HDR_QINQ, hash_cfg.headers); + } + bitmap_copy(hash_cfg.hash_flds, rss->fld, SXE2_FLOW_FLD_ID_MAX); + + ret = sxe2_com_flow_vsi_id_change(adapter, rule_vsi_id, &rule_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + ret = sxe2_rss_rem_cfg(&adapter->rss_flow_ctxt, rule_vsi_id, &hash_cfg); + if (ret) { + if (ret == -ENOENT) { + ret = 0; + LOG_INFO_BDF("rss cfg not found\n"); + goto l_end; + } + LOG_ERROR_BDF("sxe2_rss_rem_cfg failed, ret=%d\n", ret); + goto l_end; + } + } +l_end: + return ret; +} + +s32 sxe2_com_flow_filter_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + u16 rule_vsi_id = 0; + s32 ret = 0; + struct sxe2_drv_flow_filter_req *req; + + req = (struct sxe2_drv_flow_filter_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + rule_vsi_id = req->meta.flow_rule_vsi; + + if (req->engine_type == SXE2_FLOW_ENGINE_RSS) { + ret = sxe2_com_flow_rss_filter_del(adapter, rule_vsi_id, req); + } else if (req->engine_type == SXE2_FLOW_ENGINE_FNAV) { + ret = sxe2_com_flow_fnav_filter_del(adapter, rule_vsi_id, req); + } else if (req->engine_type == SXE2_FLOW_ENGINE_SWITCH) { + ret = sxe2_com_flow_switch_filter_del(adapter, rule_vsi_id, req); + } else if (req->engine_type == SXE2_FLOW_ENGINE_ACL) { + ret = sxe2_com_flow_acl_filter_del(adapter, rule_vsi_id, req); + } else { + LOG_ERROR_BDF("invalid flow engine type %d\n", req->engine_type); + ret = -EINVAL; + goto l_end; + } +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_flow_fnav_stat_alloc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_flow_fnav_get_stat_id_resp resp = {0}; + u16 rule_vsi_id; + u16 stat_index = 0; + bool need_update = false; + struct sxe2_drv_flow_fnav_get_stat_id_req *req; + + req = (struct sxe2_drv_flow_fnav_get_stat_id_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_com_flow_vsi_id_change(adapter, cmd_buf->vsi_id, &rule_vsi_id); + if (ret) { + LOG_ERROR_BDF("sxe2_com_flow_vsi_id_change failed, ret=%d\n", ret); + goto l_end; + } + + need_update = req->need_update; + ret = sxe2_fnav_stat_idx_alloc_with_lock(adapter, rule_vsi_id, &stat_index, + need_update); + if (ret) { + LOG_ERROR_BDF("sxe2_fnav_stat_idx_alloc_with_lock failed, ret=%d\n", ret); + goto l_end; + } + resp.stat_id = stat_index; + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_flow_fnav_stat_free(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_flow_fnav_free_stat_id_req *req; + + req = (struct sxe2_drv_flow_fnav_free_stat_id_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_fnav_stat_idx_free_with_lock(adapter, req->stat_id); + if (ret) { + LOG_ERROR_BDF("sxe2_fnav_stat_idx_free_with_lock failed, ret=%d\n", ret); + goto l_end; + } +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_flow_fnav_stat_query(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_flow_fnav_query_stat_resp resp = {0}; + + u64 hits = 0; + u64 bytes = 0; + + struct sxe2_cmd_params cmd_fw = {}; + struct sxe2_fwc_fnav_stats_req req_fw = {}; + struct sxe2_fwc_fnav_stats_resp resp_fw = {}; + struct sxe2_drv_flow_fnav_query_stat_req *req; + + req = (struct sxe2_drv_flow_fnav_query_stat_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + req_fw.is_clear = (u8)req->is_clear; + req_fw.counter_idx = cpu_to_le16(req->stat_id); + req_fw.bank_type = SXE2_FNAV_COUNTER_BANK_ALL; + + sxe2_cmd_params_dflt_fill(&cmd_fw, SXE2_CMD_FNAV_STATS_GET, &req_fw, + sizeof(req_fw), &resp_fw, sizeof(resp_fw)); + ret = sxe2_cmd_fw_exec(adapter, &cmd_fw); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav get state failed, stat_id=%u, ret=%d", + req->stat_id, ret); + } else { + switch (req->stat_ctrl) { + case SXE2_FNAV_STAT_ENA_PKTS: + hits += le64_to_cpu(resp_fw.stats[0]); + break; + case SXE2_FNAV_STAT_ENA_BYTES: + bytes += le64_to_cpu(resp_fw.stats[0]); + break; + case SXE2_FNAV_STAT_ENA_ALL: + hits += le64_to_cpu(resp_fw.stats[0]); + bytes += le64_to_cpu(resp_fw.stats[1]); + break; + default: + break; + } + } + resp.stat_hits = hits; + resp.stat_bytes = bytes; + resp.stat_index = req->stat_id; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } +l_end: + kfree(req); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..1b1e0b7ea7602ceaea4ab7ed75b511522d68daf8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_flow.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_flow.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_FLOW_H__ +#define __SXE2_COM_FLOW_H__ + +s32 sxe2_com_flow_filter_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); +s32 sxe2_com_flow_filter_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_flow_fnav_stat_alloc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_flow_fnav_stat_free(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_flow_fnav_stat_query(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.c new file mode 100644 index 0000000000000000000000000000000000000000..1ef3dd3b69adc05a61ca0127298f819114a5a3c2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.c @@ -0,0 +1,1679 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ioctl.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_queue.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_cmd.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_linkchg.h" +#include "sxe2_com_l2_filter.h" +#include "sxe2_com_rss.h" +#include "sxe2_com_switchdev.h" +#include "sxe2_com_flow.h" +#include "sxe2_common.h" +#include "sxe2_sriov.h" +#include "sxe2_vsi.h" +#include "sxe2_eswitch.h" +#include "sxe2_rx.h" +#include "sxe2_tx.h" +#include "sxe2_com_stats.h" +#include "sxe2_acl.h" +#include "sxe2_com_ipsec.h" +#include "sxe2_com_vlan.h" +#include "sxe2_netdev.h" +#include "sxe2_ethtool.h" + +STATIC s32 sxe2_com_handshake_disable(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + return 0; +} + +STATIC void sxe2_txsch_cap_get(struct sxe2_adapter *adapter, + struct sxe2_drv_dev_caps_resp *resp) +{ + resp->txsch_caps.layer_cap = 4; + resp->txsch_caps.prio_num = 8; + resp->txsch_caps.tm_mid_node_num = 8; +} + +STATIC s32 sxe2_com_cap_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_func_caps caps = {}; + struct sxe2_drv_dev_caps_resp *resp; + s32 ret = 0; + + (void)memset(&caps, 0, sizeof(caps)); + + resp = kmalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + ret = -ENOMEM; + goto l_end; + } + + memset(resp, 0, sizeof(*resp)); + ret = sxe2_dpdk_pf_caps_get(adapter, &caps); + if (ret) { + LOG_ERROR_BDF("sxe2_dpdk_pf_caps_get failed\n"); + ret = -EINVAL; + goto l_end; + } + + resp->dev_type = SXE2_DEV_T_PF; + + resp->queue_caps.base_idx_in_pf = caps.tx_caps.base_idx; + resp->queue_caps.queues_cnt = caps.tx_caps.cnt; + + resp->msix_caps.msix_vectors_cnt = caps.msix_caps.cnt; + resp->msix_caps.base_idx_in_func = caps.msix_caps.base_idx; + + resp->vsi_caps.vsi_type = SXE2_VSI_T_DPDK_PF; + resp->vsi_caps.kernel_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + resp->vsi_caps.dpdk_vsi_id = 0xFFFF; + resp->vsi_caps.func_id = adapter->pf_idx; + + resp->rss_hash_caps.hash_key_size = SXE2_RSS_HASH_KEY_SIZE; + if (adapter->q_ctxt.rxq_layout.dpdk == SXE2_DPDK_QUEUE_DFLT_CNT) + resp->rss_hash_caps.lut_key_size = SXE2_RSS_LUT_SIZE_512; + else + resp->rss_hash_caps.lut_key_size = adapter->caps_ctxt.max_rss_lut_size; + + resp->pf_idx = adapter->pf_idx; + resp->port_idx = adapter->port_idx; + + resp->repr_caps.cnt_repr_vf = sxe2_vf_num_get(adapter); + memset(resp->repr_caps.repr_vf_id, 0, sizeof(resp->repr_caps.repr_vf_id)); + sxe2_vfs_vsi_id_get(adapter, resp->repr_caps.repr_vf_id); + + sxe2_txsch_cap_get(adapter, resp); + + resp->cap_flags = SXE2_DEV_CAPS_OFFLOAD_L2 | SXE2_DEV_CAPS_OFFLOAD_VLAN | + SXE2_DEV_CAPS_OFFLOAD_RSS | SXE2_DEV_CAPS_OFFLOAD_FNAV | + SXE2_DEV_CAPS_OFFLOAD_TM | SXE2_DEV_CAPS_OFFLOAD_Q_MAP | + SXE2_DEV_CAPS_OFFLOAD_FC_STATE; + if (adapter->ipsec_ctxt.max_tx_sa_cnt && adapter->ipsec_ctxt.max_rx_sa_cnt) + resp->cap_flags |= SXE2_DEV_CAPS_OFFLOAD_IPSEC; + + if (sxe2_com_resp_copy_to_user(cmd_buf, resp, sizeof(*resp), obj) != 0) { + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(*resp); + +l_end: + kfree(resp); + return ret; +} + +STATIC s32 sxe2_com_switchdev_info_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_switchdev_info *resp; + s32 ret = 0; + + resp = kmalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + ret = -ENOMEM; + goto l_end; + } + memset(resp, 0, sizeof(*resp)); + if (sxe2_eswitch_is_offload(adapter) && sxe2_vf_is_exist(adapter)) { + resp->is_switchdev = true; + resp->port_name_type = SXE2_PHYS_PORT_NAME_TYPE_PFVF; + resp->representor = 1; + resp->master = 0; + } else { + resp->is_switchdev = false; + resp->port_name_type = SXE2_PHYS_PORT_NAME_TYPE_LEGACY; + resp->representor = 0; + resp->master = 1; + } + + LOG_DEBUG_BDF("adapter->eswitch_ctxt.mode = %d\n", adapter->eswitch_ctxt.mode); + if (sxe2_com_resp_copy_to_user(cmd_buf, resp, sizeof(*resp), obj) != 0) { + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(*resp); + +l_end: + kfree(resp); + return ret; +} + +STATIC s32 sxe2_com_main_vsi_create(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_vsi_create_req_resp *req_resp; + struct sxe2_fwc_vsi_crud_resp fwc_resp; + struct sxe2_vsi_cfg_params params = {0}; + + req_resp = (struct sxe2_drv_vsi_create_req_resp *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req_resp) { + LOG_ERROR_BDF("vsi create req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + params.type = SXE2_VSI_T_DPDK_PF; + ret = sxe2_dpdk_vsi_create(adapter, ¶ms, &fwc_resp); + if (ret) { + LOG_ERROR_BDF("sxe2_dpdk_vsi_create failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + + req_resp->vsi_id = fwc_resp.vsi_id; + req_resp->vsi_type = SXE2_VSI_T_DPDK_PF; + if (sxe2_com_resp_copy_to_user(cmd_buf, req_resp, sizeof(*req_resp), obj) != 0) { + LOG_ERROR_BDF("resp_copy_to_user failed, len=%lu\n", sizeof(*req_resp)); + ret = -EFAULT; + goto l_free_vsi; + } + + cmd_buf->resp_len = sizeof(*req_resp); + goto l_end; + +l_free_vsi: + params.vsi_id = req_resp->vsi_id; + (void)sxe2_dpdk_vsi_destroy(adapter, ¶ms); + +l_end: + kfree(req_resp); + return ret; +} + +static s32 sxe2_com_free_vsi(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi_cfg_params params; + struct sxe2_drv_vsi_free_req *req; + s32 ret; + + req = (struct sxe2_drv_vsi_free_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("vsi free req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + params.vsi_id = req->vsi_id; + ret = sxe2_dpdk_vsi_destroy(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2_dpdk_vsi_destroy failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_vsi_fc_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vsi_fc_get_req *req = + (struct sxe2_drv_vsi_fc_get_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + struct sxe2_drv_vsi_fc_get_resp resp; + s32 ret; + + req = (struct sxe2_drv_vsi_fc_get_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("vsi fc get req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + ret = sxe2_fc_get(adapter, req->vsi_id, &resp.fc_enable); + if (ret) { + LOG_ERROR_BDF("sxe2_fc_get failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%zu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } + + cmd_buf->resp_len = sizeof(resp); + +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_vsi_info_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vsi_info_get_req *req; + struct sxe2_drv_vsi_info_get_resp resp; + struct sxe2_fwc_func_caps caps; + s32 ret; + + req = (struct sxe2_drv_vsi_info_get_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("vsi info get null\n"); + ret = -EINVAL; + goto l_end; + } + ret = sxe2_user_vsi_info_get(adapter, req->vsi_id, &caps); + if (ret) { + LOG_ERROR_BDF("vsi info get failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + + resp.used_queues.base_idx_in_pf = caps.tx_caps.base_idx; + resp.used_queues.queues_cnt = caps.tx_caps.cnt; + resp.used_msix.msix_vectors_cnt = caps.msix_caps.cnt; + resp.used_msix.base_idx_in_func = caps.msix_caps.base_idx; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%zu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } + + cmd_buf->resp_len = sizeof(resp); +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_rxq_cfg_enable(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_rxq_cfg_req *req; + struct sxe2_rxq_cfg_params *rxq_params = NULL; + s32 ret; + u32 len; + u16 q_idx; + + req = (struct sxe2_drv_rxq_cfg_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("rxq cfg req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + if (req->q_cnt == 0) { + LOG_ERROR_BDF("dpdk rxq q_cnt is 0\n"); + ret = -EINVAL; + goto l_end; + } + + len = sizeof(*rxq_params) + sizeof(struct sxe2_ctxt_elem) * req->q_cnt; + rxq_params = kzalloc(len, GFP_KERNEL); + if (!rxq_params) { + ret = -SXE2_VF_ERR_NO_MEMORY; + LOG_ERROR_BDF("rxq msg mem %uB alloc failed.\n", len); + goto l_end; + } + rxq_params->q_cnt = req->q_cnt; + rxq_params->vsi_id = req->vsi_id; + rxq_params->max_frame_size = req->max_frame_size; + for (q_idx = 0; q_idx < req->q_cnt; q_idx++) { + rxq_params->cfg[q_idx].dma_addr = req->cfg[q_idx].dma_addr; + rxq_params->cfg[q_idx].max_lro_size = req->cfg[q_idx].max_lro_size; + rxq_params->cfg[q_idx].split_type_mask = req->cfg[q_idx].split_type_mask; + rxq_params->cfg[q_idx].hdr_len = req->cfg[q_idx].hdr_len; + rxq_params->cfg[q_idx].queue_id = req->cfg[q_idx].queue_id; + rxq_params->cfg[q_idx].depth = req->cfg[q_idx].depth; + rxq_params->cfg[q_idx].buf_len = req->cfg[q_idx].buf_len; + rxq_params->cfg[q_idx].lro_en = req->cfg[q_idx].lro_en; + rxq_params->cfg[q_idx].keep_crc_en = req->cfg[q_idx].keep_crc_en; + rxq_params->cfg[q_idx].split_en = req->cfg[q_idx].split_en; + rxq_params->cfg[q_idx].desc_size = req->cfg[q_idx].desc_size; + } + + ret = sxe2_rxq_cfg_ena_common_handle(adapter, rxq_params); + if (ret) { + LOG_ERROR_BDF("sxe2_rxq_cfg_ena_common_handle failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + +l_end: + kfree(rxq_params); + kfree(req); + return ret; +} + +static s32 sxe2_com_rxq_cfg_disable(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_q_switch_req *req; + struct sxe2_rxq_dis_params params; + s32 ret; + + req = (struct sxe2_drv_q_switch_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("rxq switch req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + params.q_idx = req->q_idx; + params.vsi_id = req->vsi_id; + ret = sxe2_rxq_disable_common_handle(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2_rxq_disable_common_handle failed, ret: %d\n", ret); + ret = -EINVAL; + goto l_end; + } + +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_txq_cfg_disable(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_q_switch_req *req; + struct sxe2_txq_ucmd_dis_params txq_params; + s32 ret; + + req = (struct sxe2_drv_q_switch_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("usr txq cfg req=NULL\n"); + ret = -EINVAL; + goto l_end; + } + + txq_params.q_idx = req->q_idx; + txq_params.vsi_id = req->vsi_id; + txq_params.sched_mode = req->sched_mode; + ret = sxe2_txq_dis_common_handle(adapter, &txq_params); + if (ret) { + LOG_ERROR_BDF("usr vsi[%d] txq[%d] dis failed\n", txq_params.vsi_id, + txq_params.q_idx); + } else { + LOG_INFO_BDF("usr vsi[%d] txq[%d] dis success\n", txq_params.vsi_id, + txq_params.q_idx); + } + +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_com_txq_cfg_enable(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret; + u32 i, len; + struct sxe2_drv_txq_ctxt *ctxt; + struct sxe2_drv_txq_cfg_req *req; + struct sxe2_txq_ucmd_en_params *txq_params; + + req = (struct sxe2_drv_txq_cfg_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("usr txq cfg req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + len = sizeof(*txq_params) + sizeof(*ctxt) * req->q_cnt; + txq_params = kzalloc(len, GFP_KERNEL); + if (!txq_params) { + LOG_ERROR_BDF("txq msg mem %u alloc failed.\n", len); + ret = -ENOMEM; + goto l_end; + } + + txq_params->q_cnt = req->q_cnt; + txq_params->vsi_idx = req->vsi_id; + for (i = 0; i < txq_params->q_cnt; i++) { + ctxt = &req->cfg[i]; + txq_params->ctxts[i].depth = ctxt->depth; + txq_params->ctxts[i].dma_addr = ctxt->dma_addr; + txq_params->ctxts[i].queue_id = ctxt->queue_id; + txq_params->ctxts[i].sched_mode = ctxt->sched_mode; + } + ret = sxe2_txq_cfg_ena_common_handle(adapter, txq_params); + if (ret) { + LOG_ERROR_BDF("usr vsi[%d] txq[%d] num[%u] cfg failed\n", + txq_params->vsi_idx, txq_params->ctxts[0].queue_id, + txq_params->q_cnt); + } else { + LOG_INFO_BDF("usr vsi[%d] txq[%d] num[%u] cfg success\n", + txq_params->vsi_idx, txq_params->ctxts[0].queue_id, + txq_params->q_cnt); + } + + kfree(txq_params); +l_end: + kfree(req); + return ret; +} + +STATIC bool sxe2_com_txq_mapping_check(struct sxe2_adapter *adapter, u16 hw_queue_id, + u8 *map_pool_idx, u8 *queues_idx) +{ + struct sxe2_stats_map *stats_map = (struct sxe2_stats_map *)&adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_stats_txq_map_pool *map_pool; + bool is_config = false; + u8 i; + u8 j; + + for (i = 0; i < SXE2_TXQ_STATS_MAP_MAX_NUM; i++) { + map_pool = &stats_txq_map->txq_map_pool[i]; + for (j = 0; j < SXE2_TXQ_STATS_MAP_MAX_NUM; j++) { + if (map_pool->queue_id_pool[j] == hw_queue_id) { + is_config = true; + goto l_end; + } + } + } + +l_end: + if (is_config) { + if (map_pool_idx) + *map_pool_idx = i; + + if (queues_idx) + *queues_idx = j; + + } + + return is_config; +} + +STATIC s32 sxe2_com_cmd_txq_stat_map_set(struct sxe2_adapter *adapter, u16 hw_queue_id) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_txq_stats_map_pool_get_resp get_resp = { 0 }; + struct sxe2_fwc_txq_stats_map_pool_set_req set_req = { 0 }; + s32 ret = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_POOL_GET, NULL, 0, + &get_resp, sizeof(get_resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to get txqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + + stats_txq_map->hw_txq_map.hw_txq_map_pool[get_resp.hw_index].txq_id = hw_queue_id; + + stats_txq_map->hw_txq_map.curr_map_idx++; + + set_req.cfg_info = cpu_to_le16(hw_queue_id); + set_req.hw_index = get_resp.hw_index; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_POOL_SET, &set_req, + sizeof(set_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to set txqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_com_rx_map_info_get(struct sxe2_adapter *adapter, u8 pool_idx, + struct sxe2_rxq_map_info *rxq_stats_map_info) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_rxq_map *stats_rxq_map = &stats_map->rxq_map; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_rxq_stats_map_get_info_resp resp = {0}; + struct sxe2_fwc_rxq_stats_map_get_info_req req = {0}; + u8 curr_map_idx; + s32 ret = 0; + + if (pool_idx >= SXE2_RXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("RX Invalid pool ID[%u]:exceeds supported range.\n", pool_idx); + ret = -EFAULT; + goto l_end; + } + + curr_map_idx = stats_rxq_map->rxq_map_pool[pool_idx].curr_map_idx; + if (curr_map_idx == 0) { + LOG_ERROR_BDF("The current mapping pool has not been configured.\n"); + ret = -EFAULT; + goto l_end; + } + + req.hw_pool_idx = stats_rxq_map->rxq_map_pool[pool_idx].pool_id; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_INFO_GET, &req, + sizeof(req), &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to get rxqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + + rxq_stats_map_info->rxq_fd_in_pkt_cnt = cpu_to_le64(resp.rxq_fd_in_pkt_cnt); + rxq_stats_map_info->rxq_lan_in_byte_cnt = cpu_to_le64(resp.rxq_lan_in_byte_cnt); + rxq_stats_map_info->rxq_lan_in_pkt_cnt = cpu_to_le64(resp.rxq_lan_in_pkt_cnt); + rxq_stats_map_info->rxq_mng_in_byte_cnt = cpu_to_le64(resp.rxq_mng_in_byte_cnt); + rxq_stats_map_info->rxq_mng_in_pkt_cnt = cpu_to_le64(resp.rxq_mng_in_pkt_cnt); + rxq_stats_map_info->rxq_mng_out_pkt_cnt = cpu_to_le64(resp.rxq_mng_out_pkt_cnt); + +l_end: + return ret; +} + +STATIC s32 sxe2_com_tx_map_info_get(struct sxe2_adapter *adapter, u8 index, + struct sxe2_txq_map_info *txq_stats_map_info) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_txq_stats_map_get_info_resp resp = {0}; + struct sxe2_fwc_txq_stats_map_get_info_req req = {0}; + struct sxe2_txq_map_info stats_map_info = {0}; + u8 curr_map_idx; + u8 i; + u8 j; + u16 hw_queue_id; + s32 ret = 0; + + if (index >= SXE2_TXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("TX Invalid pool ID[%u]:exceeds supported range.\n", index); + ret = -EFAULT; + goto l_end; + } + + curr_map_idx = stats_txq_map->txq_map_pool[index].curr_map_idx; + if (curr_map_idx == 0) { + LOG_ERROR_BDF("The current mapping pool has not been configured.\n"); + ret = -EFAULT; + goto l_end; + } + + for (i = 0; i < curr_map_idx; i++) { + hw_queue_id = stats_txq_map->txq_map_pool[index].queue_id_pool[i]; + + for (j = 0; j < SXE2_TXQ_STATS_MAP_MAX_NUM; j++) { + if (stats_txq_map->hw_txq_map.hw_txq_map_pool[j].txq_id == + hw_queue_id) { + break; + } + } + + if (j >= SXE2_TXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("get tx map stats info fail!\n"); + ret = -EFAULT; + goto l_end; + } + + req.hw_index = j; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_INFO_GET, &req, + sizeof(req), &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to get txqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + + stats_map_info.txq_lan_byte_cnt += cpu_to_le64(resp.txq_lan_byte_cnt); + stats_map_info.txq_lan_pkt_cnt += cpu_to_le64(resp.txq_lan_pkt_cnt); + } + + txq_stats_map_info->txq_lan_byte_cnt = stats_map_info.txq_lan_byte_cnt; + txq_stats_map_info->txq_lan_pkt_cnt = stats_map_info.txq_lan_pkt_cnt; + +l_end: + return ret; +} + +STATIC s32 sxe2_com_tx_rx_queue_info_get(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_rxq_map *stats_rxq_map = &stats_map->rxq_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_rxq_map_info *rxq_info = stats_map->q_info.rxq_stats_map_info; + struct sxe2_txq_map_info *txq_info = stats_map->q_info.txq_stats_map_info; + u8 pool_idx; + u8 index; + u8 curr_map_idx; + s32 ret = 0; + + for (pool_idx = 0; pool_idx < SXE2_RXQ_STATS_MAP_MAX_NUM; pool_idx++) { + curr_map_idx = stats_rxq_map->rxq_map_pool[pool_idx].curr_map_idx; + if (curr_map_idx == 0) + continue; + + ret = sxe2_com_rx_map_info_get(adapter, pool_idx, &rxq_info[pool_idx]); + if (ret) + goto l_end; + } + + for (index = 0; index < SXE2_TXQ_STATS_MAP_MAX_NUM; index++) { + curr_map_idx = stats_txq_map->txq_map_pool[index].curr_map_idx; + if (curr_map_idx == 0) + continue; + + ret = sxe2_com_tx_map_info_get(adapter, index, &txq_info[index]); + if (ret) + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &stats_map->q_info, + sizeof(stats_map->q_info), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu, resp_len=%d\n", + sizeof(stats_map->q_info), cmd_buf->resp_len); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(stats_map->q_info); +l_end: + return ret; +} + +s32 sxe2_dpdk_q_map_resource_release(struct sxe2_adapter *adapter, struct sxe2_obj *obj) +{ + struct sxe2_cmd_params cmd = { 0 }; + s32 ret = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_RES_REL, NULL, 0, NULL, + 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("RxQueue stats mapping config release fail."); + goto l_end; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_RES_REL, NULL, 0, NULL, + 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("TxQueue stats mapping config release fail. "); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_com_tx_rx_map_reset(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_stats_rxq_map *stats_rxq_map = &stats_map->rxq_map; + struct sxe2_stats_hw_txq_map *hw_txq_map; + struct sxe2_stats_txq_map_pool *tx_map_pool; + struct sxe2_stats_rxq_map_pool *rx_map_pool; + struct sxe2_cmd_params cmd = { 0 }; + u32 i; + u32 j; + s32 ret = 0; + + memset(stats_map, 0, sizeof(*stats_map)); + + for (i = 0; i < SXE2_TXQ_STATS_MAP_MAX_NUM; i++) { + tx_map_pool = &stats_txq_map->txq_map_pool[i]; + for (j = 0; j < SXE2_TXQ_STATS_MAP_MAX_NUM; j++) + tx_map_pool->queue_id_pool[j] = SXE2_STAT_MAP_INVALID_QID; + } + + hw_txq_map = &stats_txq_map->hw_txq_map; + for (i = 0; i < SXE2_TXQ_STATS_MAP_MAX_NUM; i++) + hw_txq_map->hw_txq_map_pool[i].txq_id = SXE2_STAT_MAP_INVALID_QID; + + for (i = 0; i < SXE2_RXQ_STATS_MAP_MAX_NUM; i++) { + rx_map_pool = &stats_rxq_map->rxq_map_pool[i]; + for (j = 0; j < SXE2_RXQ_MAP_Q_MAX_NUM; j++) + rx_map_pool->queue_id_pool[j] = SXE2_STAT_MAP_INVALID_QID; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_RES_REL, NULL, 0, NULL, + 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("Queue stats mapping config release fail. "); + goto l_end; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_RES_REL, NULL, 0, NULL, + 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("Queue stats mapping config release fail. "); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_com_queue_map_info_clear(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_stats_rxq_map *stats_rxq_map = &stats_map->rxq_map; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_rxq_stats_map_info_clear_req rx_req = { 0 }; + struct sxe2_fwc_txq_stats_map_info_clear_req tx_req = { 0 }; + struct sxe2_rxq_map_info *rxq_info = stats_map->q_info.rxq_stats_map_info; + struct sxe2_txq_map_info *txq_info = stats_map->q_info.txq_stats_map_info; + u8 curr_map_idx; + u8 i; + s32 ret = 0; + + rxq_info->rxq_lan_in_byte_cnt = 0; + rxq_info->rxq_lan_in_pkt_cnt = 0; + txq_info->txq_lan_byte_cnt = 0; + txq_info->txq_lan_pkt_cnt = 0; + + for (i = 0; i < SXE2_TXQ_STATS_MAP_MAX_NUM; i++) { + if (stats_txq_map->hw_txq_map.hw_txq_map_pool[i].txq_id + == SXE2_STAT_MAP_INVALID_QID) + continue; + + tx_req.hw_index = i; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQUEUE_STATS_MAP_INFO_CLEAR, + &tx_req, sizeof(tx_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to clr txqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + } + + for (i = 0; i < SXE2_RXQ_STATS_MAP_MAX_NUM; i++) { + curr_map_idx = stats_rxq_map->rxq_map_pool[i].curr_map_idx; + if (curr_map_idx == 0) + continue; + + rx_req.hw_pool_idx = stats_rxq_map->rxq_map_pool[i].pool_id; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_INFO_CLEAR, + &rx_req, sizeof(rx_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to clr rxqueue stats mapping, ret=%d\n", ret); + goto l_end; + } + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_CLEAR, NULL, + 0, NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fail to clr rxlan stats mapping, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_com_txq_mapping_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_tx_map_req *req; + struct sxe2_stats_map *stats_map = (struct sxe2_stats_map *)&adapter->stats_map; + struct sxe2_stats_txq_map *stats_txq_map = &stats_map->txq_map; + struct sxe2_stats_txq_map_pool *map_pool; + bool is_config; + u8 map_pool_idx; + u8 queues_idx; + s32 ret = 0; + struct sxe2_q_id_transe params = {0}; + + req = (struct sxe2_drv_tx_map_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + params.q_id = req->queue_id; + params.is_tx = true; + params.vsi_id = cmd_buf->vsi_id; + ret = sxe2_dpdk_abs_qid_get(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("get hw_queue_id failed.\n"); + goto l_end; + } + + LOG_DEBUG_BDF("dpdk q_id:%u params.q_id_in_dev:%u\n", req->queue_id, + params.q_id_in_dev); + if (req->pool_idx >= SXE2_TXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("Txq mapping: pool_idx supports up to %u, but %u was provided.\n", + SXE2_TXQ_STATS_MAP_MAX_NUM - 1, req->pool_idx); + ret = -EFAULT; + goto l_end; + } + + is_config = sxe2_com_txq_mapping_check(adapter, params.q_id_in_dev, &map_pool_idx, + &queues_idx); + if (is_config) { + if (map_pool_idx == req->pool_idx) { + LOG_ERROR_BDF("pool_idx%u: queue_id = %u already exist.\n", + req->pool_idx, params.q_id_in_dev); + ret = -EFAULT; + goto l_end; + } + } else { + if (stats_txq_map->hw_txq_map.curr_map_idx >= SXE2_TXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("Mapping failed:exceeds the limit of %u.\n", + SXE2_TXQ_STATS_MAP_MAX_NUM); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_com_cmd_txq_stat_map_set(adapter, params.q_id_in_dev); + if (ret) + goto l_end; + } + + map_pool = &stats_txq_map->txq_map_pool[req->pool_idx]; + map_pool->queue_id_pool[map_pool->curr_map_idx++] = params.q_id_in_dev; + + LOG_DEBUG_BDF("CREATE:TX pool [%u] cfg: %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n", + req->pool_idx, + map_pool->queue_id_pool[0], map_pool->queue_id_pool[1], + map_pool->queue_id_pool[2], map_pool->queue_id_pool[3], + map_pool->queue_id_pool[4], map_pool->queue_id_pool[5], + map_pool->queue_id_pool[6], map_pool->queue_id_pool[7], + map_pool->queue_id_pool[8], map_pool->queue_id_pool[9], + map_pool->queue_id_pool[10], map_pool->queue_id_pool[11], + map_pool->queue_id_pool[12], map_pool->queue_id_pool[13], + map_pool->queue_id_pool[14], map_pool->queue_id_pool[15]); + +l_end: + kfree(req); + return ret; +} + +STATIC s32 sxe2_com_rxq_mapping_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_rx_map_req *req; + struct sxe2_stats_map *stats_map = &adapter->stats_map; + struct sxe2_stats_rxq_map *stats_rxq_map = &stats_map->rxq_map; + struct sxe2_stats_rxq_map_pool *map_pool; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_rxq_stats_map_pool_get_resp get_resp = { 0 }; + struct sxe2_fwc_rxq_stats_map_pool_set_req set_req = { 0 }; + u16 last_q_id; + s32 ret = 0; + u32 qid_cfg; + struct sxe2_q_id_transe params = {0}; + + req = (struct sxe2_drv_rx_map_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + params.q_id = req->queue_id; + params.is_tx = false; + params.vsi_id = cmd_buf->vsi_id; + ret = sxe2_dpdk_abs_qid_get(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("get hw_queue_id failed.\n"); + goto l_end; + } + + LOG_DEBUG_BDF("dpdk q_id:%u params.q_id_in_dev:%u\n", req->queue_id, params.q_id_in_dev); + if (req->pool_idx >= SXE2_RXQ_STATS_MAP_MAX_NUM) { + LOG_ERROR_BDF("Rxq mapping: supports up to %u, but %u was provided.\n", + SXE2_RXQ_STATS_MAP_MAX_NUM - 1, req->pool_idx); + ret = -EFAULT; + goto l_end; + } + + map_pool = &stats_rxq_map->rxq_map_pool[req->pool_idx]; + if (map_pool->curr_map_idx == 0) { + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_POOL_GET, NULL, + 0, &get_resp, sizeof(get_resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to get rxq stats map pool, ret=%d\n", ret); + goto l_end; + } + + map_pool->pool_id = get_resp.hw_pool_idx; + + } else { + last_q_id = map_pool->queue_id_pool[map_pool->curr_map_idx - 1]; + if (last_q_id + 1 != params.q_id_in_dev) { + LOG_ERROR_BDF("Mapping failed: the rxq:%u qmapping[%u] contiguous.\n", + params.q_id_in_dev, req->pool_idx); + ret = -EFAULT; + goto l_end; + } + + if (map_pool->curr_map_idx >= SXE2_RXQ_MAP_Q_MAX_NUM) { + LOG_ERROR_BDF("mapping config limit reached, max:%d\n", + SXE2_RXQ_MAP_Q_MAX_NUM); + ret = -EFAULT; + goto l_end; + } + } + + map_pool->queue_id_pool[map_pool->curr_map_idx++] = params.q_id_in_dev; + + qid_cfg = ((map_pool->queue_id_pool[map_pool->curr_map_idx - 1] & 0x7FF) << 11) | + (map_pool->queue_id_pool[0] & 0x7FF); + + set_req.cfg_info = cpu_to_le32(qid_cfg); + set_req.hw_pool_idx = map_pool->pool_id; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQUEUE_STATS_MAP_POOL_SET, &set_req, + sizeof(set_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to set rxq stats map pool, ret=%d\n", ret); + goto l_end; + } + + LOG_DEBUG_BDF("CREATE:RX mapping pool [%u] configured qrange: %u ~ %u\n", + req->pool_idx, + map_pool->queue_id_pool[0], + map_pool->queue_id_pool[map_pool->curr_map_idx - 1]); +l_end: + kfree(req); + return ret; +} + +static s32 sxe2_user_mac_addr_get(struct sxe2_vsi *vsi, u16 vf_idx, + struct sxe2_drv_dev_info_resp *resp) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vf_node *vf = NULL; + enum sxe2_vsi_type type; + s32 ret = 0; + + if (!vsi || !resp) + return -EINVAL; + + type = vsi->type; + switch (type) { + case SXE2_VSI_T_DPDK_PF: + memcpy(resp->mac_addr, adapter->hw.mac_info.perm_addr, ETH_ALEN); + break; + case SXE2_VSI_T_DPDK_VF: + vf = vsi->vf_node; + if (!vf) { + LOG_ERROR_BDF("vf not found, vsi_id=%u\n", vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) + LOG_ERROR_BDF("VF %u not ready for mac cfg.\n", vf->vf_idx); + else + memcpy(resp->mac_addr, vf->mac_addr.addr, ETH_ALEN); + break; + default: + LOG_ERROR_BDF("vsi type %u not supported\n", type); + ret = -EINVAL; + break; + } + +l_end: + return ret; +} + +static s32 sxe2_user_repr_mac_addr_get(struct sxe2_adapter *adapter, u16 vf_idx, + struct sxe2_drv_dev_info_resp *resp) +{ + struct sxe2_vf_node *vf = NULL; + s32 ret = 0; + + if (!resp) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + vf = sxe2_vf_node_get(adapter, vf_idx); + if (!vf) { + LOG_ERROR_BDF("vf not found, vf_idx=%u\n", vf_idx); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) + LOG_ERROR_BDF("VF %u not ready for mac cfg.\n", vf_idx); + else + memcpy(resp->mac_addr, vf->mac_addr.addr, ETH_ALEN); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + +l_end: + return ret; +} + +STATIC s32 sxe2_com_info_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_dev_info_resp resp; + struct pci_dev *pdev = adapter->pdev; + struct sxe2_vsi *vsi = NULL; + u64 dsn = 0; + s32 ret = 0; + u32 dsn_low, dsn_high; + u16 pos; + + memset(&resp, 0, sizeof(resp)); + if (obj->func_type == SXE2_PF && cmd_buf->repr_id < SXE2_VF_NUM) { + ret = sxe2_user_repr_mac_addr_get(adapter, cmd_buf->repr_id, &resp); + if (ret) { + LOG_ERROR_BDF("get user mac addr failed, ret=%d\n", ret); + goto l_end; + } + } else { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("get vsi:%d failed.\n", cmd_buf->vsi_id); + ret = -EIO; + goto l_end; + } + ret = sxe2_user_mac_addr_get(vsi, cmd_buf->repr_id, &resp); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("get user mac addr failed, ret=%d\n", ret); + goto l_end; + } + } + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + if (!pos) { + LOG_WARN_BDF("Failed to find device serial number capability\n"); + } else { + pci_read_config_dword(pdev, pos + 4, &dsn_low); + pci_read_config_dword(pdev, pos + 8, &dsn_high); + dsn = ((u64)dsn_high << 22) | dsn_low; + } + resp.dsn = dsn; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%zu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(resp); + +l_end: + return ret; +} + +STATIC s32 sxe2_com_fw_info_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fw_ver_msg *fw_ver = &adapter->hw.fw_ver; + struct sxe2_drv_dev_fw_info_resp resp; + s32 ret = 0; + + memset(&resp, 0, sizeof(resp)); + + resp.main_version_id = fw_ver->main_version_id; + resp.sub_version_id = fw_ver->sub_version_id; + resp.fix_version_id = fw_ver->fix_version_id; + resp.build_id = fw_ver->build_id; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", + sizeof(struct sxe2_drv_dev_info_resp)); + ret = -EFAULT; + } + cmd_buf->resp_len = sizeof(resp); + + return ret; +} + +STATIC s32 sxe2_com_link_info_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_link_info_resp resp; + s32 ret = 0; + + memset(&resp, 0, sizeof(resp)); + sxe2_link_get_info_config(adapter, &resp.status, &resp.speed); + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", + sizeof(struct sxe2_drv_dev_info_resp)); + ret = -EFAULT; + } + cmd_buf->resp_len = sizeof(resp); + + return ret; +} + +STATIC s32 sxe2_com_sched_root_tree_alloc(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret; + struct sxe2_vsi *usr_vsi; + struct sxe2_tm_res tm_res = {0}; + + mutex_lock(&adapter->vsi_ctxt.lock); + usr_vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!usr_vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("sxe2_vsi_get_with_id failed.\n"); + ret = -EIO; + goto l_end; + } + + ret = sxe2_txsch_ucmd_root_vsi_cfg(usr_vsi, &tm_res.teid); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("sxe2_txsch_ucmd_dflt_vsi_topo_cfg failed.\n"); + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &tm_res, sizeof(tm_res), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(tm_res)); + ret = -EFAULT; + goto l_end; + } + + cmd_buf->resp_len = sizeof(tm_res); + +l_end: + return ret; +} + +STATIC s32 sxe2_com_sched_root_tree_release(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret; + struct sxe2_tm_res *tm_res; + + tm_res = (struct sxe2_tm_res *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!tm_res) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(tm_res)); + ret = -EFAULT; + goto l_end; + } + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_txsch_ucmd_subtree_del(adapter, cmd_buf->vsi_id, tm_res->teid, true); + mutex_unlock(&adapter->vsi_ctxt.lock); + + if (ret) { + LOG_ERROR_BDF("sxe2_txsch_ucmd_subtree_del failed.\n"); + ret = -EIO; + goto l_end; + } + +l_end: + kfree(tm_res); + return ret; +} + +STATIC s32 sxe2_com_sched_root_children_del(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret; + struct sxe2_tm_res *tm_res; + + tm_res = (struct sxe2_tm_res *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!tm_res) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(tm_res)); + ret = -EFAULT; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_txsch_ucmd_subtree_del(adapter, cmd_buf->vsi_id, tm_res->teid, false); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("sxe2_txsch_ucmd_subtree_del failed.\n"); + goto l_end; + } + +l_end: + kfree(tm_res); + return ret; +} + +STATIC s32 sxe2_com_sched_tm_mid_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret; + struct sxe2_tm_res tm_res; + struct sxe2_vsi *usr_vsi; + struct sxe2_txsched_ucmd_node_params ucmd_node_param; + struct sxe2_tm_add_mid_msg *msg; + + msg = (struct sxe2_tm_add_mid_msg *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!msg) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*msg)); + ret = -EFAULT; + goto l_end; + } + + ucmd_node_param.parent_teid = msg->parent_teid; + ucmd_node_param.adj_lvl = msg->adj_lvl; + ucmd_node_param.peak = msg->info.peak; + ucmd_node_param.committed = msg->info.committed; + ucmd_node_param.priority = msg->info.priority; + ucmd_node_param.weight = msg->info.weight; + + mutex_lock(&adapter->vsi_ctxt.lock); + usr_vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!usr_vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("sxe2_vsi_get_by_idx failed.\n"); + ret = -EIO; + goto l_end; + } + + ret = sxe2_txsched_ucmd_node_add(usr_vsi, &ucmd_node_param); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("sxe2_txsched_ucmd_node_add failed.\n"); + ret = -EIO; + return ret; + } + + tm_res.teid = ucmd_node_param.node_teid; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &tm_res, sizeof(tm_res), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(tm_res)); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(tm_res); + +l_end: + kfree(msg); + return ret; +} + +STATIC s32 sxe2_com_sched_tm_queue_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *usr_vsi; + struct sxe2_txsch_ucmd_qnode_params ucmd_node_param; + struct sxe2_tm_add_queue_msg *msg; + struct sxe2_tm_res tm_res; + s32 ret; + + msg = (struct sxe2_tm_add_queue_msg *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!msg) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*msg)); + ret = -EFAULT; + goto l_end; + } + + ucmd_node_param.parent_teid = msg->parent_teid; + ucmd_node_param.queue_id = msg->queue_id; + ucmd_node_param.adj_lvl = msg->adj_lvl; + ucmd_node_param.committed = msg->info.committed; + ucmd_node_param.peak = msg->info.peak; + ucmd_node_param.priority = msg->info.priority; + ucmd_node_param.weight = msg->info.weight; + + mutex_lock(&adapter->vsi_ctxt.lock); + usr_vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!usr_vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("sxe2_vsi_get_by_idx failed.\n"); + ret = -EIO; + goto l_end; + } + + ret = sxe2_txsched_ucmd_qnode_add(usr_vsi, &ucmd_node_param); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("sxe2_txsched_ucmd_qnode_add failed.\n"); + ret = -EIO; + goto l_end; + } + + tm_res.teid = ucmd_node_param.node_teid; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &tm_res, sizeof(tm_res), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(tm_res)); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(tm_res); + +l_end: + kfree(msg); + return ret; +} + +STATIC s32 sxe2_com_irq_band_rxq(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_queue *rxq; + struct sxe2_vsi *vsi; + struct sxe2_drv_queue_irq_bind_req *req = NULL; + s32 ret = 0; + + if (sizeof(*req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %zu != %u\n", sizeof(*req), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + req = (struct sxe2_drv_queue_irq_bind_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + + vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", cmd_buf->vsi_id); + ret = -EFAULT; + goto l_end; + } + + rxq = vsi->rxqs.q[req->q_idx]; + if (!rxq) { + LOG_ERROR_BDF("failed to get vsi[%u] rxq[%u].\n", cmd_buf->vsi_id, + req->q_idx); + ret = -EFAULT; + goto l_end; + } + + LOG_DEBUG("irq band rxq, vsi[%d] rxq[%u/%u] bind irq[%u->%u] itr[%u].\n", + cmd_buf->vsi_id, rxq->idx_in_vsi, rxq->idx_in_pf, req->msix_idx, + req->msix_idx + vsi->irqs.base_idx_in_pf, req->itr_idx); + + if (req->bind) { + sxe2_hw_rxq_irq_cause_setup(&adapter->hw, rxq->idx_in_pf, req->itr_idx, + req->msix_idx + vsi->irqs.base_idx_in_pf); + LOG_DEBUG("vsi[%d] rxq[%u/%u] bind irq[%u] itr[%u] setup.\n", + cmd_buf->vsi_id, rxq->idx_in_vsi, rxq->idx_in_pf, req->msix_idx, + req->itr_idx); + } else { + sxe2_hw_rxq_irq_cause_clear(&adapter->hw, rxq->idx_in_pf); + LOG_DEBUG("vsi[%d] rxq[%u/%u] bind irq[%u] clear.\n", cmd_buf->vsi_id, + rxq->idx_in_vsi, rxq->idx_in_pf, req->msix_idx); + } + +l_end: + kfree(req); + return ret; +} + +STATIC s32 sxe2_com_switch_srcvsi_ext_prune(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_srcvsi_ext_cfg_req *srcvsi_ext_cfg_req; + s32 ret = 0; + u16 vsi_id; + u16 idx; + u16 srcvsi_list[SXE2_SRCVSI_PRUNE_MAX_NUM]; + u16 srcvsi_cnt; + u8 is_add; + + srcvsi_ext_cfg_req = (struct sxe2_srcvsi_ext_cfg_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!srcvsi_ext_cfg_req) { + LOG_ERROR_BDF("srcvsi list cfg req is NULL\n"); + ret = -EFAULT; + goto l_end; + } + + is_add = srcvsi_ext_cfg_req->is_add; + srcvsi_cnt = srcvsi_ext_cfg_req->srcvsi_cnt; + vsi_id = le16_to_cpu(srcvsi_ext_cfg_req->vsi_id); + for (idx = 0; idx < srcvsi_cnt; idx++) + srcvsi_list[idx] = le16_to_cpu(srcvsi_ext_cfg_req->srcvsi_list[idx]); + + if (is_add) + ret = sxe2_ucmd_srcvsi_ext_add(adapter, vsi_id, srcvsi_list, srcvsi_cnt); + else + ret = sxe2_ucmd_srcvsi_ext_del(adapter, vsi_id); + + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s srcvsi ext fail, ret=%d\n", vsi_id, + is_add ? "set" : "clear", ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s srcvsi ext\n", vsi_id, + is_add ? "set" : "clear"); + } + +l_end: + kfree(srcvsi_ext_cfg_req); + return ret; +} + +STATIC s32 sxe2_com_sfp_eeprom_read(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_sfp_req *req = + (struct sxe2_drv_sfp_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + u32 resp_len = sizeof(struct sxe2_drv_sfp_resp) + req->data_len; + struct sxe2_drv_sfp_resp *resp = NULL; + struct sxe2_sfp_resp *sff_value = NULL; + + resp = kzalloc(resp_len, GFP_KERNEL); + if (!resp) { + ret = -ENOMEM; + goto l_end; + } + + sff_value = kzalloc(sizeof(*sff_value) + req->data_len, GFP_KERNEL); + if (!sff_value) { + ret = -ENOMEM; + goto l_end; + } + + memset(resp, 0, resp_len); + memset(sff_value, 0, sizeof(*sff_value) + req->data_len); + + ret = sxe2_fwc_sff_eeprom_get(adapter, req->is_qsfp, req->bus_addr, req->page_cnt, + req->offset, req->data_len, sff_value); + if (ret) { + LOG_ERROR_BDF("get eeprom failed, ret=%d.\n", ret); + goto l_end; + } + + memcpy(resp->data, sff_value->data, req->data_len); + if (sxe2_com_resp_copy_to_user(cmd_buf, resp, resp_len, obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed.\n"); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = resp_len; + +l_end: + kfree(req); + kfree(resp); + kfree(sff_value); + return ret; +} + +struct sxe2_ioctl_cmd_table driver_cmd_table[] = { + {SXE2_DRV_CMD_HANDSHAKE_DISABLE, sxe2_com_handshake_disable}, + + {SXE2_DRV_CMD_DEV_GET_CAPS, sxe2_com_cap_get}, + {SXE2_DRV_CMD_DEV_GET_SWITCHDEV_INFO, sxe2_com_switchdev_info_get}, + + {SXE2_DRV_CMD_DEV_GET_INFO, sxe2_com_info_get}, + {SXE2_DRV_CMD_DEV_GET_FW_INFO, sxe2_com_fw_info_get}, + + {SXE2_DRV_CMD_RX_MAP_SET, sxe2_com_rxq_mapping_set}, + {SXE2_DRV_CMD_TX_MAP_SET, sxe2_com_txq_mapping_set}, + {SXE2_DRV_CMD_TX_RX_MAP_GET, sxe2_com_tx_rx_queue_info_get}, + {SXE2_DRV_CMD_TX_RX_MAP_RESET, sxe2_com_tx_rx_map_reset}, + {SXE2_DRV_CMD_TX_RX_MAP_INFO_CLEAR, sxe2_com_queue_map_info_clear}, + + {SXE2_DRV_CMD_SCHED_ROOT_TREE_ALLOC, sxe2_com_sched_root_tree_alloc}, + {SXE2_DRV_CMD_SCHED_ROOT_TREE_RELEASE, sxe2_com_sched_root_tree_release}, + {SXE2_DRV_CMD_SCHED_ROOT_CHILDREN_DELETE, sxe2_com_sched_root_children_del}, + {SXE2_DRV_CMD_SCHED_TM_ADD_MID_NODE, sxe2_com_sched_tm_mid_add}, + {SXE2_DRV_CMD_SCHED_TM_ADD_QUEUE_NODE, sxe2_com_sched_tm_queue_add}, + + {SXE2_DRV_CMD_RSS_KEY_SET, sxe2_com_rss_key_set}, + {SXE2_DRV_CMD_RSS_LUT_SET, sxe2_com_rss_lut_set}, + {SXE2_DRV_CMD_RSS_FUNC_SET, sxe2_com_rss_func_set}, + {SXE2_DRV_CMD_RSS_HF_ADD, sxe2_com_rss_hf_add}, + {SXE2_DRV_CMD_RSS_HF_DEL, sxe2_com_rss_hf_del}, + {SXE2_DRV_CMD_RSS_HF_CLEAR, sxe2_com_rss_hf_clear}, + + {SXE2_DRV_CMD_VSI_CREATE, sxe2_com_main_vsi_create}, + {SXE2_DRV_CMD_VSI_FREE, sxe2_com_free_vsi}, + {SXE2_DRV_CMD_VSI_FC_GET, sxe2_com_vsi_fc_get}, + {SXE2_DRV_CMD_VSI_INFO_GET, sxe2_com_vsi_info_get}, + {SXE2_DRV_CMD_RXQ_CFG_ENABLE, sxe2_com_rxq_cfg_enable}, + {SXE2_DRV_CMD_RXQ_DISABLE, sxe2_com_rxq_cfg_disable}, + {SXE2_DRV_CMD_TXQ_CFG_ENABLE, sxe2_com_txq_cfg_enable}, + {SXE2_DRV_CMD_TXQ_DISABLE, sxe2_com_txq_cfg_disable}, + + {SXE2_DRV_CMD_MAC_ADDR_UC, sxe2_com_switch_filter_uc}, + {SXE2_DRV_CMD_MAC_ADDR_MC, sxe2_com_switch_filter_mc}, + {SXE2_DRV_CMD_VLAN_FILTER_SWITCH, sxe2_com_switch_filter_vlan_control}, + {SXE2_DRV_CMD_VLAN_FILTER_ADD_DEL, sxe2_com_switch_filter_vlan_rule}, + {SXE2_DRV_CMD_PROMISC_CFG, sxe2_com_switch_filter_promisc}, + {SXE2_DRV_CMD_ALLMULTI_CFG, sxe2_com_switch_filter_allmulti}, + + {SXE2_DRV_CMD_VSI_STATS_GET, sxe2_com_vsi_stat_get}, + {SXE2_DRV_CMD_VSI_STATS_CLEAR, sxe2_com_vsi_stat_clear}, + {SXE2_DRV_CMD_MAC_STATS_GET, sxe2_com_mac_stat_get}, + {SXE2_DRV_CMD_MAC_STATS_CLEAR, sxe2_com_mac_stat_clear}, + + {SXE2_DRV_CMD_EVT_IRQ_BAND_RXQ, sxe2_com_irq_band_rxq}, + + {SXE2_DRV_CMD_SWITCH_UPLINK, sxe2_com_switch_uplink}, + {SXE2_DRV_CMD_SWITCH_REPR, sxe2_com_switch_repr}, + {SXE2_DRV_CMD_SWITCH_MODE, sxe2_com_switch_mode}, + {SXE2_DRV_CMD_SWITCH_CPVSI, sxe2_com_switch_cp_vsi}, + + {SXE2_DRV_CMD_UDPTUNNEL_ADD, sxe2_com_udptunnel_handler}, + {SXE2_DRV_CMD_UDPTUNNEL_DEL, sxe2_com_udptunnel_handler}, + {SXE2_DRV_CMD_UDPTUNNEL_GET, sxe2_com_udptunnel_handler}, + + {SXE2_DRV_CMD_IPSEC_CAP_GET, sxe2_ipsec_cap_get}, + {SXE2_DRV_CMD_IPSEC_TXSA_ADD, sxe2_ipsec_txsa_add}, + {SXE2_DRV_CMD_IPSEC_RXSA_ADD, sxe2_ipsec_rxsa_add}, + {SXE2_DRV_CMD_IPSEC_TXSA_DEL, sxe2_ipsec_txsa_del}, + {SXE2_DRV_CMD_IPSEC_RXSA_DEL, sxe2_ipsec_rxsa_del}, + {SXE2_DRV_CMD_IPSEC_RESOURCE_CLEAR, sxe2_ipsec_resource_clear}, + + {SXE2_DRV_CMD_FLOW_FILTER_ADD, sxe2_com_flow_filter_add}, + {SXE2_DRV_CMD_FLOW_FILTER_DEL, sxe2_com_flow_filter_del}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_ALLOC, sxe2_com_flow_fnav_stat_alloc}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_FREE, sxe2_com_flow_fnav_stat_free}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_QUERY, sxe2_com_flow_fnav_stat_query}, + + {SXE2_DRV_CMD_LINK_STATUS_GET, sxe2_com_link_info_get}, + + {SXE2_DRV_CMD_VLAN_OFFLOAD_CFG, sxe2_com_vlan_offload_cfg}, + {SXE2_DRV_CMD_VLAN_CFG_QUERY, sxe2_com_vlan_cfg_query}, + + {SXE2_DRV_CMD_VSI_SRCVSI_PRUNE, sxe2_com_switch_srcvsi_ext_prune}, + + {SXE2_DRV_CMD_OPT_EEP_GET, sxe2_com_sfp_eeprom_read}, +}; + +STATIC s32 sxe2_drv_cmd_len_check(struct sxe2_drv_cmd_params *cmd_buf) +{ + return 0; +} + +static s32 sxe2_pf_status_check(struct sxe2_adapter *adapter, + struct sxe2_drv_cmd_params *cmd_buf) +{ + return 0; +} + +STATIC s32 sxe2_drv_msg_check(struct sxe2_adapter *adapter, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + + ret = sxe2_drv_cmd_len_check(cmd_buf); + if (ret) + return ret; + + ret = sxe2_pf_status_check(adapter, cmd_buf); + if (ret) + return ret; + + return ret; +} + +STATIC struct sxe2_ioctl_cmd_table *sxe2_drv_cmd_handle_get(u32 opcode) +{ + u32 i; + struct sxe2_ioctl_cmd_table *cmd_func = NULL; + + for (i = 0; i < ARRAY_SIZE(driver_cmd_table); i++) { + if (driver_cmd_table[i].opcode == opcode) { + cmd_func = &driver_cmd_table[i]; + break; + } + } + + return cmd_func; +} + +s32 sxe2_com_cmd_send(void *ad, struct sxe2_obj *obj, struct sxe2_drv_cmd_params *param) +{ + s32 ret; + struct sxe2_ioctl_cmd_table *cmd_table; + struct sxe2_adapter *adapter = ad; + + if (sxe2_drv_msg_check(adapter, param)) { + ret = -EINVAL; + goto l_end; + } + + LOG_DEBUG_BDF("com cmd opcode:0x%x vsi_id:%u trace_id:0x%llx\n.", param->opcode, + param->vsi_id, param->trace_id); + cmd_table = sxe2_drv_cmd_handle_get(param->opcode); + if (cmd_table && cmd_table->func) { + ret = cmd_table->func(adapter, obj, param); + } else { + LOG_ERROR_BDF("Can't find cmd opcode:0x%x vsi_id:%u trace_id:0x%llx\n.", + param->opcode, param->vsi_id, param->trace_id); + ret = -EINVAL; + } +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..e9a43db37a64dbeacb1cfd2da86a9bd409415a84 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ioctl.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ioctl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_IOCTL_H__ +#define __SXE2_COM_IOCTL_H__ + +#include "sxe2.h" + +struct sxe2_drv_cmd_params; + +struct sxe2_ioctl_cmd_table { + u32 opcode; + s32 (*func)(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *param); +}; + +static inline void *sxe2_com_req_data_copy_to_kernel(struct sxe2_drv_cmd_params *param, + struct sxe2_obj *obj) +{ + void *k_buffer = NULL; + + if (param->req_len > 0) { + k_buffer = kzalloc(param->req_len, GFP_KERNEL); + if (!k_buffer) + goto l_end; + if (obj->func_type == SXE2_PF) { + if (copy_from_user(k_buffer, param->req_data, param->req_len)) { + kfree(k_buffer); + return NULL; + } + } else { + memcpy(k_buffer, param->req_data, param->req_len); + } + } + +l_end: + return k_buffer; +} + +static inline s32 sxe2_com_resp_copy_to_user(struct sxe2_drv_cmd_params *param, + void *k_buffer, u32 copy_len, + struct sxe2_obj *obj) +{ + if (!param || !k_buffer || param->resp_len < copy_len) + return -EINVAL; + + if (obj->func_type == SXE2_PF) { + if (copy_to_user(param->resp_data, k_buffer, copy_len)) + return -EFAULT; + } else { + memcpy(param->resp_data, k_buffer, copy_len); + } + + return 0; +} + +s32 sxe2_com_cmd_send(void *ad, struct sxe2_obj *obj, struct sxe2_drv_cmd_params *param); + +s32 sxe2_dpdk_q_map_resource_release(struct sxe2_adapter *adapter, struct sxe2_obj *obj); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.c new file mode 100644 index 0000000000000000000000000000000000000000..84edf5e18ac5013215c6b8b8e5cd4cb037c8710a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ipsec.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_cmd.h" +#include "sxe2_log.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_ipsec.h" +#include "sxe2_ipsec.h" + +s32 sxe2_ipsec_cap_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_ipsec_capa_resq resp; + s32 ret = 0; + + memset(&resp, 0, sizeof(resp)); + + LOG_INFO_BDF("ipsec capacity get\n"); + ret = sxe2_ipsec_fwc_get_ipsec_capa(adapter, &resp); + if (ret) { + LOG_ERROR_BDF("failed to get ipsec capacity, ret=%d\n", ret); + ret = -EFAULT; + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(resp); + +l_end: + return ret; +} + +s32 sxe2_ipsec_resource_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + + LOG_INFO_BDF("ipsec resource clear.\n"); + ret = sxe2_dpdk_ipsec_resource_release(adapter, obj); + if (ret) { + LOG_ERROR_BDF("failed to get ipsec capacity, ret=%d\n", ret); + ret = -EFAULT; + goto l_end; + } + +l_end: + return ret; +} + +s32 sxe2_ipsec_txsa_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_ipsec_txsa_add_req *req = (struct sxe2_fwc_ipsec_txsa_add_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + struct sxe2_fwc_ipsec_txsa_add_resp resp; + s32 ret = 0; + + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + memset(&resp, 0, sizeof(resp)); + + LOG_INFO_BDF("ipsec tx sa add."); + + req->func_type = obj->func_type; + req->drv_id = (u8)((obj->drv_type << 6) | obj->drv_id); + req->func_id = (obj->func_type == SXE2_VF ? obj->vf_id : obj->pf_id); + ret = sxe2_fwc_ipsec_tx_sa_add(adapter, req, &resp); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa, ret=%d\n", ret); + ret = -EFAULT; + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + ret = -EFAULT; + goto l_end; + } + + cmd_buf->resp_len = sizeof(resp); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_ipsec_txsa_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_ipsec_txsa_del_req *req = (struct sxe2_fwc_ipsec_txsa_del_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + s32 ret = 0; + + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + LOG_INFO_BDF("ipsec tx sa del, id=%u.", req->sa_idx); + + req->func_type = obj->func_type; + req->drv_id = (u8)((obj->drv_type << 6) | obj->drv_id); + req->func_id = (obj->func_type == SXE2_VF ? obj->vf_id : obj->pf_id); + ret = sxe2_fwc_ipsec_tx_sa_del(adapter, req); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa, ret=%d\n", ret); + ret = -EFAULT; + } + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_ipsec_rxsa_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_ipsec_rxsa_add_req *req = (struct sxe2_fwc_ipsec_rxsa_add_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + struct sxe2_fwc_ipsec_rxsa_add_resp resp; + s32 ret = 0; + + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + memset(&resp, 0, sizeof(resp)); + + req->func_type = obj->func_type; + req->drv_id = (u8)((obj->drv_type << 6) | obj->drv_id); + req->func_id = (obj->func_type == SXE2_VF ? obj->vf_id : obj->pf_id); + ret = sxe2_fwc_ipsec_rx_sa_add(adapter, req, &resp); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa, ret=%d\n", ret); + ret = -EFAULT; + goto l_end; + } + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + ret = -EFAULT; + goto l_end; + } + + cmd_buf->resp_len = sizeof(resp); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_ipsec_rxsa_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_fwc_ipsec_rxsa_del_req *req = (struct sxe2_fwc_ipsec_rxsa_del_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + s32 ret = 0; + + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", sizeof(*req)); + ret = -EFAULT; + goto l_end; + } + LOG_DEBUG_BDF("ipsec rx sa del, id=%u.", req->sa_idx); + + req->func_type = obj->func_type; + req->drv_id = (u8)((obj->drv_type << 6) | obj->drv_id); + req->func_id = (obj->func_type == SXE2_VF ? obj->vf_id : obj->pf_id); + ret = sxe2_fwc_ipsec_rx_sa_del(adapter, req); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa, ret=%d\n", ret); + ret = -EFAULT; + goto l_end; + } + +l_end: + kfree(req); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..b9a9910add7d419374d9e0a55d536a38bd41ff7c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_ipsec.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ipsec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_ADAPTER_IPSEC_H__ +#define __SXE2_ADAPTER_IPSEC_H__ + +#include "sxe2_drv_cmd.h" +#include "sxe2_cmd.h" + +s32 sxe2_ipsec_cap_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_ipsec_resource_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_ipsec_txsa_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_ipsec_txsa_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_ipsec_rxsa_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_ipsec_rxsa_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..348e466420f70e8b17e077cbb44022300b2f56c9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_l2_filter.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_l2_filter.h" +#include "sxe2_switch.h" + +s32 sxe2_com_switch_filter_uc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_mac_filter_cfg_req mac_filter_cfg_req = {0}; + s32 ret = 0; + s32 i; + u16 vsi_id; + u8 addr[ETH_ALEN]; + u8 is_add; + + if (sizeof(struct sxe2_mac_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_mac_filter_cfg_req), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&mac_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(mac_filter_cfg_req.vsi_id); + is_add = mac_filter_cfg_req.is_add; + for (i = 0; i < ETH_ALEN; i++) + addr[i] = mac_filter_cfg_req.addr[i]; + + if (is_add) + ret = sxe2_ucmd_unicast_mac_add(adapter, vsi_id, addr); + else + ret = sxe2_ucmd_unicast_mac_del(adapter, vsi_id, addr); + + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s unicast mac addr:%pM fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", addr, ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s unicast mac addr:%pM\n", vsi_id, + is_add ? "add" : "del", addr); + } + +l_end: + return ret; +} + +s32 sxe2_com_switch_filter_mc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_mac_filter_cfg_req mac_filter_cfg_req = {0}; + s32 ret = 0; + s32 i; + u16 vsi_id; + u8 addr[ETH_ALEN]; + u8 is_add; + + if (sizeof(struct sxe2_mac_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_mac_filter_cfg_req), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&mac_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(mac_filter_cfg_req.vsi_id); + is_add = mac_filter_cfg_req.is_add; + for (i = 0; i < ETH_ALEN; i++) + addr[i] = mac_filter_cfg_req.addr[i]; + + if (is_add) + ret = sxe2_ucmd_multi_broad_mac_add(adapter, vsi_id, addr); + else + ret = sxe2_ucmd_multi_broad_mac_del(adapter, vsi_id, addr); + + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s multi mac addr:%pM fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", addr, ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s multi mac addr:%pM\n", vsi_id, + is_add ? "add" : "del", addr); +l_end: + return ret; +} + +s32 sxe2_com_switch_filter_vlan_control(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vlan_filter_switch_req vlan_filter_switch_req = {0}; + s32 ret = 0; + u16 vsi_id; + bool is_oper_enable; + + if (sizeof(struct sxe2_vlan_filter_switch_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_vlan_filter_switch_req), + cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&vlan_filter_switch_req, cmd_buf->req_data, + cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(vlan_filter_switch_req.vsi_id); + is_oper_enable = (bool)vlan_filter_switch_req.is_oper_enable; + ret = sxe2_ucmd_vlan_filter_control(adapter, vsi_id, is_oper_enable); + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s vlan filter control fail, ret=%d\n", + vsi_id, is_oper_enable ? "enable" : "disable", ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s vlan filter control.\n", vsi_id, + is_oper_enable ? "enable" : "disable"); + } +l_end: + return ret; +} + +s32 sxe2_com_switch_filter_vlan_rule(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vlan_filter_cfg_req vlan_filter_cfg_req = {0}; + struct sxe2_vlan vlan = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_vlan_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_vlan_filter_cfg_req), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&vlan_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(vlan_filter_cfg_req.vsi_id); + vlan.vid = le16_to_cpu(vlan_filter_cfg_req.vlan_id); + vlan.tpid = le16_to_cpu(vlan_filter_cfg_req.tpid_id); + vlan.prio = vlan_filter_cfg_req.prio; + is_add = vlan_filter_cfg_req.is_add; + ret = sxe2_ucmd_vlan_rule_process(adapter, vsi_id, &vlan, is_add); + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s vlan tpid:%u vid:%u prio:%u fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", vlan.tpid, vlan.vid, vlan.prio, ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s vlan tpid:%u vid:%u prio:%u\n", + vsi_id, is_add ? "add" : "del", vlan.tpid, vlan.vid, vlan.prio); + +l_end: + return ret; +} + +s32 sxe2_com_switch_filter_promisc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_promisc_filter_cfg_req promisc_filter_cfg_req = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_promisc_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_promisc_filter_cfg_req), + cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&promisc_filter_cfg_req, cmd_buf->req_data, + cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(promisc_filter_cfg_req.vsi_id); + is_add = promisc_filter_cfg_req.is_add; + if (is_add) + ret = sxe2_ucmd_promisc_rule_add(adapter, vsi_id); + else + ret = sxe2_ucmd_promisc_rule_del(adapter, vsi_id); + + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s promisc fail, ret=%d\n", vsi_id, + is_add ? "set" : "clear", ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s promisc.\n", vsi_id, + is_add ? "set" : "clear"); +l_end: + return ret; +} + +s32 sxe2_com_switch_filter_allmulti(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_promisc_filter_cfg_req promisc_filter_cfg_req = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_promisc_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_promisc_filter_cfg_req), + cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&promisc_filter_cfg_req, cmd_buf->req_data, + cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(promisc_filter_cfg_req.vsi_id); + is_add = promisc_filter_cfg_req.is_add; + if (is_add) + ret = sxe2_ucmd_allmulti_rule_add(adapter, vsi_id); + else + ret = sxe2_ucmd_allmulti_rule_del(adapter, vsi_id); + + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s promisc fail, ret=%d\n", vsi_id, + is_add ? "set" : "clear", ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s promisc.\n", vsi_id, + is_add ? "set" : "clear"); +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..4184e4f24303f0376b1b10453435820836f1845c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_l2_filter.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_l2_filter.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_L2_FILTER_H__ +#define __SXE2_COM_L2_FILTER_H__ + +#include "sxe2.h" +s32 sxe2_com_switch_filter_uc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_filter_mc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_filter_vlan_control(struct sxe2_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_filter_vlan_rule(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_filter_promisc(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_filter_allmulti(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.c new file mode 100644 index 0000000000000000000000000000000000000000..ef59a852f56dfff74678df638c08b1306c9d9527 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_rss.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_common.h" +#include "sxe2_rss.h" +#include "sxe2_com_rss.h" + +s32 sxe2_com_rss_key_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_rss_key_req *req = NULL; + s32 ret = 0; + + req = (struct sxe2_rss_key_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("sxe2_com_req_data_copy_to_kernel failed\n"); + ret = -EFAULT; + goto l_end; + } + + if (req->key_size != SXE2_RSS_HASH_KEY_SIZE) { + LOG_ERROR_BDF("rss hash key len err %u != %u\n", req->key_size, + SXE2_RSS_HASH_KEY_SIZE); + ret = -EINVAL; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + ret = sxe2_fwc_rss_hkey_set(vsi, req->key); + LOG_DEBUG_BDF("vsi[%u] rss hkey set ret[%d]\n", req->vsi_id, ret); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_rss_lut_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_rss_lut_req *req = NULL; + s32 ret = 0; + + req = (struct sxe2_rss_lut_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("sxe2_com_req_data_copy_to_kernel failed\n"); + ret = -EFAULT; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + if (vsi->rss_ctxt.lut_size != req->lut_size) { + LOG_ERROR_BDF("vsi[%u] lut size[%u] not [%u].\n", req->vsi_id, + vsi->rss_ctxt.lut_size, req->lut_size); + ret = -EINVAL; + goto l_unlock; + } + + ret = sxe2_fwc_rss_lut_set(vsi, req->lut, req->lut_size); + LOG_DEBUG_BDF("vsi[%u] rss lut[%u] set ret[%d]\n", req->vsi_id, req->lut_size, + ret); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_rss_func_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_rss_func_req *req = NULL; + s32 ret = 0; + u8 hash_type_old; + + req = (struct sxe2_rss_func_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("sxe2_com_req_data_copy_to_kernel failed\n"); + ret = -EFAULT; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + hash_type_old = vsi->rss_ctxt.hash_type; + vsi->rss_ctxt.hash_type = req->func; + ret = sxe2_fwc_rss_hash_ctrl_set(vsi); + if (ret != 0) + vsi->rss_ctxt.hash_type = hash_type_old; + + LOG_DEBUG_BDF("vsi[%u] rss hash type[%u] set ret[%d]\n", req->vsi_id, req->func, + ret); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + kfree(req); + return ret; +} + +STATIC void sxe2_com_rss_hf_req_convert_hash_cfg(struct sxe2_adapter *adapter, + struct sxe2_rss_hash_cfg *hash_cfg, + struct sxe2_rss_hf_req *req) +{ + u32 tmp_headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + (void)memset(hash_cfg, 0, sizeof(*hash_cfg)); + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_HDR_MAX); i++) + tmp_headers[i] = le32_to_cpu(req->headers[i]); + + bitmap_from_arr32(hash_cfg->headers, tmp_headers, SXE2_FLOW_HDR_MAX); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + tmp_flds[i] = le32_to_cpu(req->hash_flds[i]); + + bitmap_from_arr32(hash_cfg->hash_flds, tmp_flds, SXE2_FLOW_FLD_ID_MAX); + + hash_cfg->hdr_type = le32_to_cpu(req->hdr_type); + hash_cfg->symm = req->symm == 1 ? true : false; +} + +STATIC s32 sxe2_com_rss_hf_cfg(struct sxe2_adapter *adapter, struct sxe2_rss_hf_req *req, + bool add) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_rss_hash_cfg cfg; + s32 ret = SXE2_VF_ERR_SUCCESS; + + if (sxe2_is_safe_mode(adapter)) { + LOG_ERROR_BDF("sxe2 rss is in safe mode, not support.\n"); + ret = -EPERM; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, req->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", req->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + sxe2_com_rss_hf_req_convert_hash_cfg(adapter, &cfg, req); + + if (bitmap_empty(cfg.headers, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", vsi->type, + vsi->id_in_pf); + ret = -EINVAL; + goto l_unlock; + } + + if (bitmap_empty(cfg.hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR_BDF("invalid flds type! vsi type: %u, idx: %u\n", vsi->type, + vsi->id_in_pf); + ret = -EINVAL; + goto l_unlock; + } + + if (add) { + ret = sxe2_add_rss_flow(&adapter->rss_flow_ctxt, vsi->id_in_pf, &cfg); + } else { + ret = sxe2_rss_rem_cfg(&adapter->rss_flow_ctxt, vsi->id_in_pf, &cfg); + if (ret == -ENOENT) { + ret = 0; + LOG_INFO_BDF("rss cfg not found\n"); + } + } + if (ret != 0) { + LOG_ERROR_BDF("Failed to cfg[%u] rss cfg vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf, vsi->idx_in_dev); + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + return ret; +} + +s32 sxe2_com_rss_hf_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_rss_hf_req *req = NULL; + s32 ret = 0; + + req = (struct sxe2_rss_hf_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("sxe2_com_req_data_copy_to_kernel failed\n"); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_com_rss_hf_cfg(adapter, req, true); + LOG_DEBUG_BDF("vsi[%u] add rss hf cfg, ret[%d].\n", req->vsi_id, ret); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_rss_hf_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_rss_hf_req *req = NULL; + s32 ret = 0; + + req = (struct sxe2_rss_hf_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + if (!req) { + LOG_ERROR_BDF("sxe2_com_req_data_copy_to_kernel failed\n"); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_com_rss_hf_cfg(adapter, req, false); + LOG_DEBUG_BDF("vsi[%u] del rss hf cfg, ret[%d].\n", req->vsi_id, ret); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_rss_hf_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *vsi = NULL; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", cmd_buf->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + sxe2_rss_vsi_flow_clean(vsi); + LOG_DEBUG_BDF("vsi[%u] rss hf clear.\n", cmd_buf->vsi_id); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.h new file mode 100644 index 0000000000000000000000000000000000000000..e741a148ef6048f06253aae6f83f97687d56ac9f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_rss.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_rss.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_RSS_H__ +#define __SXE2_COM_RSS_H__ + +#include "sxe2.h" + +s32 sxe2_com_rss_key_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_rss_lut_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_rss_func_set(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_rss_hf_add(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_rss_hf_del(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_rss_hf_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..3ea95ef66eae752d448088c40b33f433640b2789 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_stats.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_log.h" +#include "sxe2_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_stats.h" +#include "sxe2_ethtool.h" + +#define SXE2_COM_CALC_READCLEAR_STATS(S, N, O) ((S) = (N) - (O)) +#define SXE2_COM_CALC_ADD_STATS(S, N, O) ((S) = (N) + (O)) + +STATIC inline void sxe2_com_calc_nonclear_stats(u64 *s, u64 n, u64 *o) +{ + if (n >= *o) + *s += n - *o; + else + *s = n; + *o = n; +} + +STATIC void sxe2_com_vsi_stats_calc(const struct sxe2_vsi_hw_stats *new_stats, + struct sxe2_vsi_hw_stats *old_stats, + struct sxe2_vsi_hw_stats *stats) +{ + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_vsi_unicast_packets, + new_stats->rx_vsi_unicast_packets, + old_stats->rx_vsi_unicast_packets); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_vsi_bytes, new_stats->rx_vsi_bytes, + old_stats->rx_vsi_bytes); + SXE2_COM_CALC_READCLEAR_STATS(stats->tx_vsi_unicast_packets, + new_stats->tx_vsi_unicast_packets, + old_stats->tx_vsi_unicast_packets); + SXE2_COM_CALC_READCLEAR_STATS(stats->tx_vsi_bytes, new_stats->tx_vsi_bytes, + old_stats->tx_vsi_bytes); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_vsi_multicast_packets, + new_stats->rx_vsi_multicast_packets, + old_stats->rx_vsi_multicast_packets); + SXE2_COM_CALC_READCLEAR_STATS(stats->tx_vsi_multicast_packets, + new_stats->tx_vsi_multicast_packets, + old_stats->tx_vsi_multicast_packets); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_vsi_broadcast_packets, + new_stats->rx_vsi_broadcast_packets, + old_stats->rx_vsi_broadcast_packets); + SXE2_COM_CALC_READCLEAR_STATS(stats->tx_vsi_broadcast_packets, + new_stats->tx_vsi_broadcast_packets, + old_stats->tx_vsi_broadcast_packets); +} + +STATIC void sxe2_com_cp_vsi_stats_accumulate(const struct sxe2_vsi_hw_stats *k_stats, + struct sxe2_vsi_hw_stats *u_stats, + struct sxe2_vsi_hw_stats *stats) +{ + if (!k_stats || !u_stats || !stats) + return; + + SXE2_COM_CALC_ADD_STATS(stats->rx_vsi_unicast_packets, + k_stats->rx_vsi_unicast_packets, + u_stats->rx_vsi_unicast_packets); + SXE2_COM_CALC_ADD_STATS(stats->rx_vsi_bytes, k_stats->rx_vsi_bytes, + u_stats->rx_vsi_bytes); + SXE2_COM_CALC_ADD_STATS(stats->tx_vsi_unicast_packets, + k_stats->tx_vsi_unicast_packets, + u_stats->tx_vsi_unicast_packets); + SXE2_COM_CALC_ADD_STATS(stats->tx_vsi_bytes, k_stats->tx_vsi_bytes, + u_stats->tx_vsi_bytes); + SXE2_COM_CALC_ADD_STATS(stats->rx_vsi_multicast_packets, + k_stats->rx_vsi_multicast_packets, + u_stats->rx_vsi_multicast_packets); + SXE2_COM_CALC_ADD_STATS(stats->tx_vsi_multicast_packets, + k_stats->tx_vsi_multicast_packets, + u_stats->tx_vsi_multicast_packets); + SXE2_COM_CALC_ADD_STATS(stats->rx_vsi_broadcast_packets, + k_stats->rx_vsi_broadcast_packets, + u_stats->rx_vsi_broadcast_packets); + SXE2_COM_CALC_ADD_STATS(stats->tx_vsi_broadcast_packets, + k_stats->tx_vsi_broadcast_packets, + u_stats->tx_vsi_broadcast_packets); +} + +STATIC void sxe2_com_vsi_stats_copy_to_user(struct sxe2_drv_vsi_stats_resp *resp, + struct sxe2_vsi_hw_stats *stats) +{ + resp->rx_vsi_unicast_packets = stats->rx_vsi_unicast_packets; + resp->rx_vsi_bytes = stats->rx_vsi_bytes; + resp->tx_vsi_unicast_packets = stats->tx_vsi_unicast_packets; + resp->tx_vsi_bytes = stats->tx_vsi_bytes; + resp->rx_vsi_multicast_packets = stats->rx_vsi_multicast_packets; + resp->tx_vsi_multicast_packets = stats->tx_vsi_multicast_packets; + resp->rx_vsi_broadcast_packets = stats->rx_vsi_broadcast_packets; + resp->tx_vsi_broadcast_packets = stats->tx_vsi_broadcast_packets; +} + +s32 sxe2_com_vsi_stat_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vsi_stats_req *req = + (struct sxe2_drv_vsi_stats_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + struct sxe2_drv_vsi_stats_resp resp = {0}; + struct sxe2_vsi *vsi = NULL; + struct sxe2_vsi_hw_stats stats = {0}; + struct sxe2_vsi_hw_stats *last_stats = NULL; + struct sxe2_vsi_hw_stats k_stats = {0}; + struct sxe2_vsi_hw_stats u_stats = {0}; + struct sxe2_vf_node *vf = NULL; + s32 ret = 0; + u16 vf_idx = cmd_buf->repr_id; + u16 vsi_id = req->vsi_id; + + if (!req) { + LOG_ERROR_BDF("invalid vsi stats get req.\n"); + ret = -EINVAL; + goto l_end; + } + + if (obj->func_type == SXE2_PF && vf_idx < SXE2_VF_NUM) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf = sxe2_vf_node_get(adapter, vf_idx); + if (!vf) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + LOG_ERROR_BDF("vf not found, vf_idx=%u\n", vf_idx); + ret = -EINVAL; + goto l_end; + } + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) { + LOG_ERROR_BDF("VF %u not ready for mac cfg.\n", vf_idx); + } else { + if (vf->vsi) { + last_stats = &vf->vsi->vsi_stats.parse_vsi_hw_stats; + sxe2_hw_vsi_stats_update(vf->vsi); + sxe2_com_vsi_stats_calc(&vf->vsi->vsi_stats.vsi_hw_stats, + last_stats, &k_stats); + } else { + LOG_INFO_BDF("vf %u vsi not found.\n", vf_idx); + } + + if (vf->dpdk_vf_vsi) { + last_stats = &vf->dpdk_vf_vsi->vsi_stats.parse_vsi_hw_stats; + sxe2_hw_vsi_stats_update(vf->dpdk_vf_vsi); + sxe2_com_vsi_stats_calc(&vf->dpdk_vf_vsi->vsi_stats.vsi_hw_stats, + last_stats, &u_stats); + } else { + LOG_ERROR_BDF("vf %u dpdk vsi not found.\n", vf_idx); + } + + sxe2_com_cp_vsi_stats_accumulate(&k_stats, &u_stats, &stats); + } + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } else { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("invalid vsi id:%d.\n", vsi_id); + ret = -EINVAL; + goto l_end; + } + + if (vsi->type == SXE2_VSI_T_DPDK_PF) { + last_stats = &vsi->vsi_stats.parse_vsi_hw_stats; + sxe2_hw_vsi_stats_update(vsi); + sxe2_com_vsi_stats_calc(&vsi->vsi_stats.vsi_hw_stats, last_stats, + &stats); + } + mutex_unlock(&adapter->vsi_ctxt.lock); + } + + sxe2_com_vsi_stats_copy_to_user(&resp, &stats); + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed.\n"); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(resp); + LOG_INFO_BDF("sxe2 com vsi[%d] stats get is completed.\n", vsi_id); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2_com_vsi_stat_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_vsi_hw_stats *last_stats = NULL; + struct sxe2_vf_node *vf = NULL; + s32 ret = 0; + u16 vsi_id = cmd_buf->vsi_id; + u16 vf_idx = cmd_buf->repr_id; + + if (obj->func_type == SXE2_PF && vf_idx < SXE2_VF_NUM) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf = sxe2_vf_node_get(adapter, vf_idx); + if (!vf) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + LOG_ERROR_BDF("vf not found, vf_idx=%u\n", vf_idx); + ret = -EINVAL; + goto l_end; + } + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) { + LOG_ERROR_BDF("VF %u not ready for mac cfg.\n", vf_idx); + } else { + if (vf->vsi) { + sxe2_hw_vsi_stats_update(vf->vsi); + last_stats = &vf->vsi->vsi_stats.parse_vsi_hw_stats; + (void)memcpy(last_stats, &vf->vsi->vsi_stats.vsi_hw_stats, + sizeof(*last_stats)); + } else { + LOG_INFO_BDF("vf %u vsi not found.\n", vf_idx); + } + + if (vf->dpdk_vf_vsi) { + sxe2_hw_vsi_stats_update(vf->dpdk_vf_vsi); + last_stats = &vf->dpdk_vf_vsi->vsi_stats.parse_vsi_hw_stats; + (void)memcpy(last_stats, + &vf->dpdk_vf_vsi->vsi_stats.vsi_hw_stats, + sizeof(*last_stats)); + } else { + LOG_INFO_BDF("vf %u dpdk vsi not found.\n", vf_idx); + } + } + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } else { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("invalid vsi id:%d.\n", vsi_id); + ret = -EINVAL; + goto l_end; + } + + if (vsi->type == SXE2_VSI_T_DPDK_PF) { + sxe2_hw_vsi_stats_update(vsi); + last_stats = &vsi->vsi_stats.parse_vsi_hw_stats; + (void)memcpy(last_stats, &vsi->vsi_stats.vsi_hw_stats, + sizeof(*last_stats)); + } + mutex_unlock(&adapter->vsi_ctxt.lock); + } + LOG_INFO_BDF("sxe2 com vsi[%d] stats clear is completed.\n", vsi_id); + +l_end: + return ret; +} + +STATIC void sxe2_com_mac_nonclear_stats_calc(const struct sxe2_pf_hw_stats *new_stats, + struct sxe2_pf_hw_stats *old_stats, + struct sxe2_pf_hw_stats *stats) +{ + u8 i = 0; + + sxe2_com_calc_nonclear_stats(&stats->rx_oversize_good, new_stats->rx_oversize_good, + &old_stats->rx_oversize_good); + sxe2_com_calc_nonclear_stats(&stats->rx_discards_phy, new_stats->rx_discards_phy, + &old_stats->rx_discards_phy); + sxe2_com_calc_nonclear_stats(&stats->rx_undersize_good, + new_stats->rx_undersize_good, + &old_stats->rx_undersize_good); + sxe2_com_calc_nonclear_stats(&stats->rx_runt_error, new_stats->rx_runt_error, + &old_stats->rx_runt_error); + sxe2_com_calc_nonclear_stats(&stats->rx_jabbers, new_stats->rx_jabbers, + &old_stats->rx_jabbers); + sxe2_com_calc_nonclear_stats(&stats->tx_frame_good, new_stats->tx_frame_good, + &old_stats->tx_frame_good); + sxe2_com_calc_nonclear_stats(&stats->rx_frame_good, new_stats->rx_frame_good, + &old_stats->rx_frame_good); + sxe2_com_calc_nonclear_stats(&stats->rx_crc_errors, new_stats->rx_crc_errors, + &old_stats->rx_crc_errors); + sxe2_com_calc_nonclear_stats(&stats->tx_bytes_good, new_stats->tx_bytes_good, + &old_stats->tx_bytes_good); + sxe2_com_calc_nonclear_stats(&stats->rx_bytes_good, new_stats->rx_bytes_good, + &old_stats->rx_bytes_good); + sxe2_com_calc_nonclear_stats(&stats->tx_multicast_good, + new_stats->tx_multicast_good, + &old_stats->tx_multicast_good); + sxe2_com_calc_nonclear_stats(&stats->tx_broadcast_good, + new_stats->tx_broadcast_good, + &old_stats->tx_broadcast_good); + sxe2_com_calc_nonclear_stats(&stats->rx_multicast_good, + new_stats->rx_multicast_good, + &old_stats->rx_multicast_good); + sxe2_com_calc_nonclear_stats(&stats->rx_broadcast_good, + new_stats->rx_broadcast_good, + &old_stats->rx_broadcast_good); + sxe2_com_calc_nonclear_stats(&stats->rx_len_errors, new_stats->rx_len_errors, + &old_stats->rx_len_errors); + sxe2_com_calc_nonclear_stats(&stats->rx_out_of_range_errors, + new_stats->rx_out_of_range_errors, + &old_stats->rx_out_of_range_errors); + sxe2_com_calc_nonclear_stats(&stats->rx_oversize_pkts_phy, + new_stats->rx_oversize_pkts_phy, + &old_stats->rx_oversize_pkts_phy); + sxe2_com_calc_nonclear_stats(&stats->rx_symbol_err, new_stats->rx_symbol_err, + &old_stats->rx_symbol_err); + sxe2_com_calc_nonclear_stats(&stats->rx_pause_frame, new_stats->rx_pause_frame, + &old_stats->rx_pause_frame); + sxe2_com_calc_nonclear_stats(&stats->tx_pause_frame, new_stats->tx_pause_frame, + &old_stats->tx_pause_frame); + sxe2_com_calc_nonclear_stats(&stats->tx_dropped_link_down, + new_stats->tx_dropped_link_down, + &old_stats->tx_dropped_link_down); + sxe2_com_calc_nonclear_stats(&stats->tx_bytes_good_bad, + new_stats->tx_bytes_good_bad, + &old_stats->tx_bytes_good_bad); + sxe2_com_calc_nonclear_stats(&stats->tx_frame_good_bad, + new_stats->tx_frame_good_bad, + &old_stats->tx_frame_good_bad); + sxe2_com_calc_nonclear_stats(&stats->rx_size_64, new_stats->rx_size_64, + &old_stats->rx_size_64); + sxe2_com_calc_nonclear_stats(&stats->rx_size_65_127, new_stats->rx_size_65_127, + &old_stats->rx_size_65_127); + sxe2_com_calc_nonclear_stats(&stats->rx_size_128_255, new_stats->rx_size_128_255, + &old_stats->rx_size_128_255); + sxe2_com_calc_nonclear_stats(&stats->rx_size_256_511, new_stats->rx_size_256_511, + &old_stats->rx_size_256_511); + sxe2_com_calc_nonclear_stats(&stats->rx_size_512_1023, new_stats->rx_size_512_1023, + &old_stats->rx_size_512_1023); + sxe2_com_calc_nonclear_stats(&stats->rx_size_1024_1522, + new_stats->rx_size_1024_1522, + &old_stats->rx_size_1024_1522); + sxe2_com_calc_nonclear_stats(&stats->rx_size_1523_max, new_stats->rx_size_1523_max, + &old_stats->rx_size_1523_max); + sxe2_com_calc_nonclear_stats(&stats->rx_illegal_bytes, new_stats->rx_illegal_bytes, + &old_stats->rx_illegal_bytes); + sxe2_com_calc_nonclear_stats(&stats->tx_unicast, new_stats->tx_unicast, + &old_stats->tx_unicast); + sxe2_com_calc_nonclear_stats(&stats->tx_broadcast, new_stats->tx_broadcast, + &old_stats->tx_broadcast); + sxe2_com_calc_nonclear_stats(&stats->tx_multicast, new_stats->tx_multicast, + &old_stats->tx_multicast); + sxe2_com_calc_nonclear_stats(&stats->tx_vlan_packet_good, + new_stats->tx_vlan_packet_good, + &old_stats->tx_vlan_packet_good); + sxe2_com_calc_nonclear_stats(&stats->tx_size_64, new_stats->tx_size_64, + &old_stats->tx_size_64); + sxe2_com_calc_nonclear_stats(&stats->tx_size_65_127, new_stats->tx_size_65_127, + &old_stats->tx_size_65_127); + sxe2_com_calc_nonclear_stats(&stats->tx_size_128_255, new_stats->tx_size_128_255, + &old_stats->tx_size_128_255); + sxe2_com_calc_nonclear_stats(&stats->tx_size_256_511, new_stats->tx_size_256_511, + &old_stats->tx_size_256_511); + sxe2_com_calc_nonclear_stats(&stats->tx_size_512_1023, new_stats->tx_size_512_1023, + &old_stats->tx_size_512_1023); + sxe2_com_calc_nonclear_stats(&stats->tx_size_1024_1522, + new_stats->tx_size_1024_1522, + &old_stats->tx_size_1024_1522); + sxe2_com_calc_nonclear_stats(&stats->tx_size_1523_max, new_stats->tx_size_1523_max, + &old_stats->tx_size_1523_max); + sxe2_com_calc_nonclear_stats(&stats->tx_underflow_error, + new_stats->tx_underflow_error, + &old_stats->tx_underflow_error); + sxe2_com_calc_nonclear_stats(&stats->rx_byte_good_bad, new_stats->rx_byte_good_bad, + &old_stats->rx_byte_good_bad); + sxe2_com_calc_nonclear_stats(&stats->rx_frame_good_bad, + new_stats->rx_frame_good_bad, + &old_stats->rx_frame_good_bad); + sxe2_com_calc_nonclear_stats(&stats->rx_unicast_good, new_stats->rx_unicast_good, + &old_stats->rx_unicast_good); + sxe2_com_calc_nonclear_stats(&stats->rx_vlan_packets, new_stats->rx_vlan_packets, + &old_stats->rx_vlan_packets); + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + sxe2_com_calc_nonclear_stats(&stats->prio_xoff_rx[i], + new_stats->prio_xoff_rx[i], + &old_stats->prio_xoff_rx[i]); + sxe2_com_calc_nonclear_stats(&stats->prio_xoff_tx[i], + new_stats->prio_xoff_tx[i], + &old_stats->prio_xoff_tx[i]); + sxe2_com_calc_nonclear_stats(&stats->prio_xon_rx[i], + new_stats->prio_xon_rx[i], + &old_stats->prio_xon_rx[i]); + sxe2_com_calc_nonclear_stats(&stats->prio_xon_tx[i], + new_stats->prio_xon_tx[i], + &old_stats->prio_xon_tx[i]); + sxe2_com_calc_nonclear_stats(&stats->prio_xon_2_xoff[i], + new_stats->prio_xon_2_xoff[i], + &old_stats->prio_xon_2_xoff[i]); + } +} + +STATIC void sxe2_com_mac_readclear_stats_calc(const struct sxe2_pf_hw_stats *new_stats, + struct sxe2_pf_hw_stats *old_stats, + struct sxe2_pf_hw_stats *stats) +{ + u8 i = 0; + + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_discards_ips_phy, + new_stats->rx_discards_ips_phy, + old_stats->rx_discards_ips_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_out_of_buffer, + new_stats->rx_out_of_buffer, + old_stats->rx_out_of_buffer); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_qblock_drop, new_stats->rx_qblock_drop, + old_stats->rx_qblock_drop); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_pcs_symbol_err_phy, + new_stats->rx_pcs_symbol_err_phy, + old_stats->rx_pcs_symbol_err_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_corrected_bits_phy, + new_stats->rx_corrected_bits_phy, + old_stats->rx_corrected_bits_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_err_lane_0_phy, + new_stats->rx_err_lane_0_phy, + old_stats->rx_err_lane_0_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_err_lane_1_phy, + new_stats->rx_err_lane_1_phy, + old_stats->rx_err_lane_1_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_err_lane_2_phy, + new_stats->rx_err_lane_2_phy, + old_stats->rx_err_lane_2_phy); + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_err_lane_3_phy, + new_stats->rx_err_lane_3_phy, + old_stats->rx_err_lane_3_phy); + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + SXE2_COM_CALC_READCLEAR_STATS(stats->rx_prio_buf_discard[i], + new_stats->rx_prio_buf_discard[i], + old_stats->rx_prio_buf_discard[i]); + } +} + +STATIC void sxe2_com_mac_stats_copy_to_user(struct sxe2_drv_mac_stats_resp *resp, + struct sxe2_pf_hw_stats *stats) +{ + u8 i = 0; + + resp->rx_out_of_buffer = stats->rx_out_of_buffer; + resp->rx_qblock_drop = stats->rx_qblock_drop; + resp->tx_frame_good = stats->tx_frame_good; + resp->rx_frame_good = stats->rx_frame_good; + resp->rx_crc_errors = stats->rx_crc_errors; + resp->tx_bytes_good = stats->tx_bytes_good; + resp->rx_bytes_good = stats->rx_bytes_good; + resp->tx_multicast_good = stats->tx_multicast_good; + resp->tx_broadcast_good = stats->tx_broadcast_good; + resp->rx_multicast_good = stats->rx_multicast_good; + resp->rx_broadcast_good = stats->rx_broadcast_good; + resp->rx_len_errors = stats->rx_len_errors; + resp->rx_out_of_range_errors = stats->rx_out_of_range_errors; + resp->rx_oversize_pkts_phy = stats->rx_oversize_pkts_phy; + resp->rx_symbol_err = stats->rx_symbol_err; + resp->rx_pause_frame = stats->rx_pause_frame; + resp->tx_pause_frame = stats->tx_pause_frame; + resp->rx_discards_phy = stats->rx_discards_phy; + resp->rx_discards_ips_phy = stats->rx_discards_ips_phy; + resp->tx_dropped_link_down = stats->tx_dropped_link_down; + resp->rx_undersize_good = stats->rx_undersize_good; + resp->rx_runt_error = stats->rx_runt_error; + resp->tx_bytes_good_bad = stats->tx_bytes_good_bad; + resp->tx_frame_good_bad = stats->tx_frame_good_bad; + resp->rx_jabbers = stats->rx_jabbers; + resp->rx_size_64 = stats->rx_size_64; + resp->rx_size_65_127 = stats->rx_size_65_127; + resp->rx_size_128_255 = stats->rx_size_128_255; + resp->rx_size_256_511 = stats->rx_size_256_511; + resp->rx_size_512_1023 = stats->rx_size_512_1023; + resp->rx_size_1024_1522 = stats->rx_size_1024_1522; + resp->rx_size_1523_max = stats->rx_size_1523_max; + resp->rx_pcs_symbol_err_phy = stats->rx_pcs_symbol_err_phy; + resp->rx_corrected_bits_phy = stats->rx_corrected_bits_phy; + resp->rx_err_lane_0_phy = stats->rx_err_lane_0_phy; + resp->rx_err_lane_1_phy = stats->rx_err_lane_1_phy; + resp->rx_err_lane_2_phy = stats->rx_err_lane_2_phy; + resp->rx_err_lane_3_phy = stats->rx_err_lane_3_phy; + resp->rx_illegal_bytes = stats->rx_illegal_bytes; + resp->rx_oversize_good = stats->rx_oversize_good; + resp->tx_unicast = stats->tx_unicast; + resp->tx_broadcast = stats->tx_broadcast; + resp->tx_multicast = stats->tx_multicast; + resp->tx_vlan_packet_good = stats->tx_vlan_packet_good; + resp->tx_size_64 = stats->tx_size_64; + resp->tx_size_65_127 = stats->tx_size_65_127; + resp->tx_size_128_255 = stats->tx_size_128_255; + resp->tx_size_256_511 = stats->tx_size_256_511; + resp->tx_size_512_1023 = stats->tx_size_512_1023; + resp->tx_size_1024_1522 = stats->tx_size_1024_1522; + resp->tx_size_1523_max = stats->tx_size_1523_max; + resp->tx_underflow_error = stats->tx_underflow_error; + resp->rx_byte_good_bad = stats->rx_byte_good_bad; + resp->rx_frame_good_bad = stats->rx_frame_good_bad; + resp->rx_unicast_good = stats->rx_unicast_good; + resp->rx_vlan_packets = stats->rx_vlan_packets; + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + resp->rx_prio_buf_discard[i] = stats->rx_prio_buf_discard[i]; + resp->prio_xoff_rx[i] = stats->prio_xoff_rx[i]; + resp->prio_xoff_tx[i] = stats->prio_xoff_tx[i]; + resp->prio_xon_rx[i] = stats->prio_xon_rx[i]; + resp->prio_xon_tx[i] = stats->prio_xon_tx[i]; + resp->prio_xon_2_xoff[i] = stats->prio_xon_2_xoff[i]; + } +} + +s32 sxe2_com_mac_stat_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_mac_stats_resp *resp = NULL; + struct sxe2_pf_hw_stats *hw_stats = &adapter->pf_stats.parse_pf_hw_stats; + struct sxe2_pf_hw_stats *last_hw_stats = + &adapter->pf_stats.parse_last_pf_hw_stats; + s32 ret = 0; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + LOG_ERROR_BDF("kzalloc mr mem failed.\n"); + ret = -ENOMEM; + goto l_end; + } + + sxe2_hw_pf_stats_update(adapter); + sxe2_com_mac_nonclear_stats_calc(&adapter->pf_stats.last_pf_hw_stats, + last_hw_stats, hw_stats); + sxe2_com_mac_readclear_stats_calc(&adapter->pf_stats.pf_hw_stats, last_hw_stats, + hw_stats); + + sxe2_com_mac_stats_copy_to_user(resp, hw_stats); + if (sxe2_com_resp_copy_to_user(cmd_buf, resp, sizeof(*resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed.\n"); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(*resp); + + LOG_INFO_BDF("sxe2 com pf[%d] stats get is completed.\n", adapter->pf_idx); + +l_end: + kfree(resp); + return ret; +} + +s32 sxe2_com_mac_stat_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_pf_hw_stats *hw_stats = &adapter->pf_stats.parse_pf_hw_stats; + struct sxe2_pf_hw_stats *last_hw_stats = + &adapter->pf_stats.parse_last_pf_hw_stats; + u8 i = 0; + + sxe2_hw_pf_stats_update(adapter); + + (void)memcpy(last_hw_stats, &adapter->pf_stats.last_pf_hw_stats, + sizeof(*last_hw_stats)); + (void)memset(hw_stats, 0, sizeof(*last_hw_stats)); + + last_hw_stats->rx_discards_ips_phy = + adapter->pf_stats.pf_hw_stats.rx_discards_ips_phy; + last_hw_stats->rx_out_of_buffer = adapter->pf_stats.pf_hw_stats.rx_out_of_buffer; + last_hw_stats->rx_qblock_drop = adapter->pf_stats.pf_hw_stats.rx_qblock_drop; + last_hw_stats->rx_pcs_symbol_err_phy = + adapter->pf_stats.pf_hw_stats.rx_pcs_symbol_err_phy; + last_hw_stats->rx_corrected_bits_phy = + adapter->pf_stats.pf_hw_stats.rx_corrected_bits_phy; + last_hw_stats->rx_err_lane_0_phy = + adapter->pf_stats.pf_hw_stats.rx_err_lane_0_phy; + last_hw_stats->rx_err_lane_1_phy = + adapter->pf_stats.pf_hw_stats.rx_err_lane_1_phy; + last_hw_stats->rx_err_lane_2_phy = + adapter->pf_stats.pf_hw_stats.rx_err_lane_2_phy; + last_hw_stats->rx_err_lane_3_phy = + adapter->pf_stats.pf_hw_stats.rx_err_lane_3_phy; + for (i = 0; i < SXE2_MAX_USER_PRIORITY; i++) { + last_hw_stats->rx_prio_buf_discard[i] = + adapter->pf_stats.pf_hw_stats.rx_prio_buf_discard[i]; + } + + LOG_INFO_BDF("sxe2 com pf[%d] stats clear is completed.\n", adapter->pf_idx); + return 0; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..cc858c920bcf4eade167eb6593f5fdbf4a598b89 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_stats.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_stats.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_STATS_H__ +#define __SXE2_COM_STATS_H__ + +#include "sxe2.h" + +s32 sxe2_com_vsi_stat_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_vsi_stat_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_mac_stat_get(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_mac_stat_clear(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.c new file mode 100644 index 0000000000000000000000000000000000000000..8492cd43364aad8d57ee319e81230e9dad380e7d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_switchdev.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_switchdev.h" +#include "sxe2_eswitch.h" +#include "sxe2_sriov.h" + +s32 sxe2_com_switch_uplink(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_switchdev_uplink_info switchdev_repr_info_req = {0}; + s32 ret = 0; + u8 pf_id; + u8 is_set; + + if (sizeof(struct sxe2_switchdev_uplink_info) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_switchdev_uplink_info), + cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&switchdev_repr_info_req, cmd_buf->req_data, + cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + is_set = switchdev_repr_info_req.is_set; + pf_id = switchdev_repr_info_req.pf_id; + + if (pf_id != adapter->pf_idx) + goto l_end; + + ret = sxe2_eswitch_ucmd_uplink_set(adapter, is_set); + if (ret) { + LOG_ERROR_BDF("user driver %s uplink pf %d fail, ret=%d\n", + is_set ? "set" : "clear", pf_id, ret); + } else { + LOG_DEBUG_BDF("user driver %s uplink pf %d\n", is_set ? "set" : "clear", + pf_id); + } +l_end: + return ret; +} + +s32 sxe2_com_switch_repr(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vf_node *vf_node; + struct sxe2_vsi *esw_vsi; + struct sxe2_switchdev_repr_info switchdev_repr_info_req = {0}; + s32 ret = 0; + u8 pf_id; + u16 cp_vsi_id; + u16 repr_vf_id; + u16 repr_pf_id; + u8 is_set; + + if (sizeof(struct sxe2_switchdev_repr_info) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_switchdev_repr_info), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&switchdev_repr_info_req, cmd_buf->req_data, + cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + is_set = switchdev_repr_info_req.is_set; + pf_id = switchdev_repr_info_req.pf_id; + cp_vsi_id = le16_to_cpu(switchdev_repr_info_req.cp_vsi_id); + repr_vf_id = le16_to_cpu(switchdev_repr_info_req.repr_vf_id); + repr_pf_id = le16_to_cpu(switchdev_repr_info_req.repr_pf_id); + + vf_node = sxe2_vf_node_get(adapter, repr_vf_id); + if (!vf_node) + goto l_end; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, repr_vf_id)); + esw_vsi = vf_node->adapter->eswitch_ctxt.user_esw_vsi; + if (!esw_vsi) { + LOG_ERROR_BDF("esw vsi null\n"); + goto l_unlock; + } + + if (cp_vsi_id != esw_vsi->idx_in_dev) { + LOG_ERROR_BDF("esw vsi is not cp vsi\n"); + goto l_unlock; + } + + if (pf_id != adapter->pf_idx) + goto l_unlock; + + if (repr_pf_id != adapter->pf_idx) + goto l_unlock; + + ret = sxe2_eswitch_ucmd_repr_cfg(vf_node, is_set); + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, repr_vf_id)); + if (ret) { + LOG_ERROR_BDF("user driver %s repr vf %d fail, ret=%d\n", + is_set ? "set" : "clear", repr_vf_id, ret); + } else { + LOG_DEBUG_BDF("user driver %s repr vf %d\n", is_set ? "set" : "clear", + repr_vf_id); + } +l_end: + return ret; +} + +s32 sxe2_com_switch_mode(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_switchdev_mode_info switchdev_mode_req = {0}; + struct sxe2_switchdev_mode_info switchdev_mode_resp = {0}; + s32 ret = 0; + u8 pf_id; + bool is_switchdev = false; + + if (sizeof(struct sxe2_switchdev_mode_info) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_switchdev_mode_info), cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + if (sizeof(struct sxe2_switchdev_mode_info) != cmd_buf->resp_len) { + LOG_ERROR_BDF("cmd resp len err %lu != %u\n", + sizeof(struct sxe2_switchdev_mode_info), cmd_buf->resp_len); + ret = -EFAULT; + goto l_end; + } + + if (copy_from_user(&switchdev_mode_req, cmd_buf->req_data, cmd_buf->req_len)) { + LOG_ERROR_BDF("copy_from_user failed, len=%u\n", cmd_buf->req_len); + ret = -EFAULT; + goto l_end; + } + + pf_id = switchdev_mode_req.pf_id; + if (pf_id != adapter->pf_idx) + goto l_end; + + ret = sxe2_eswitch_ucmd_mode_get(adapter, &is_switchdev); + if (ret) { + LOG_ERROR_BDF("user driver get pf %d fail, ret=%d\n", pf_id, ret); + } else { + switchdev_mode_resp.pf_id = pf_id; + switchdev_mode_resp.is_switchdev = (u8)is_switchdev; + LOG_DEBUG_BDF("user driver get pf %d\n", pf_id); + if (sxe2_com_resp_copy_to_user(cmd_buf, &switchdev_mode_resp, + cmd_buf->resp_len, obj) != 0) { + ret = -EFAULT; + LOG_ERROR_BDF("copy_to_user failed, len=%u\n", cmd_buf->resp_len); + } + } +l_end: + return ret; +} + +s32 sxe2_com_switch_cp_vsi(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_switchdev_cpvsi_info switchdev_cpvsi_resp = {0}; + s32 ret = 0; + u16 cp_vsi = 0xFFFF; + + if (sizeof(struct sxe2_switchdev_cpvsi_info) != cmd_buf->resp_len) { + LOG_ERROR_BDF("cmd resp len err %lu != %u\n", + sizeof(struct sxe2_switchdev_cpvsi_info), + cmd_buf->resp_len); + ret = -EFAULT; + goto l_end; + } + + ret = sxe2_eswitch_ucmd_eswvsi_get(adapter, &cp_vsi); + if (ret) { + LOG_ERROR_BDF("user driver get pf %d fail, ret=%d\n", adapter->pf_idx, + ret); + } else { + switchdev_cpvsi_resp.cp_vsi_id = cpu_to_le16(cp_vsi); + LOG_DEBUG_BDF("user driver get pf %d cp vsi %d.\n", adapter->pf_idx, + cp_vsi); + if (sxe2_com_resp_copy_to_user(cmd_buf, &switchdev_cpvsi_resp, + cmd_buf->resp_len, obj) != 0) { + ret = -EFAULT; + LOG_ERROR_BDF("copy_to_user failed, len=%u\n", cmd_buf->resp_len); + } + } +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.h new file mode 100644 index 0000000000000000000000000000000000000000..e1c73425a93b5c3bd8f86738f155245b0dcc7735 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_switchdev.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_switchdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_SWITCHDEV_H__ +#define __SXE2_COM_SWITCHDEV_H__ + +#include "sxe2.h" +s32 sxe2_com_switch_uplink(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_repr(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_mode(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_switch_cp_vsi(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.c new file mode 100644 index 0000000000000000000000000000000000000000..4bfc3e6fc31afe56c2dea23ce12b7ba06942d58f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_vlan.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_vlan.h" +#include "sxe2_switch.h" +#include "sxe2_sriov.h" +#include "sxe2_netdev.h" +#include "sxe2_hw.h" + +STATIC s32 sxe2_user_vlan_offload_set_outer_strip(struct sxe2_hw *hw, u16 vsi, + u8 outer_strip) +{ + u32 val; + + if (outer_strip & ~(SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK)) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TSR(vsi)); + val &= ~(SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK | + (SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK << SXE2_DPDK_OFFLOAD_STRIP_OFFSET)); + val = val | outer_strip | (outer_strip << SXE2_DPDK_OFFLOAD_STRIP_OFFSET); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi), val); + return 0; +} + +STATIC s32 sxe2_user_vlan_offload_set_inner_strip(struct sxe2_hw *hw, u16 vsi, + u8 inner_strip) +{ + u32 val; + + if (inner_strip & ~SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1) + return -SXE2_HW_ERR_INVAL; + + val = sxe2_read_reg(hw, SXE2_VSI_TSR(vsi)); + val &= ~(SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1 | + (SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1 << SXE2_DPDK_OFFLOAD_STRIP_OFFSET)); + val = val | inner_strip | (inner_strip << SXE2_DPDK_OFFLOAD_STRIP_OFFSET); + sxe2_write_reg(hw, SXE2_VSI_TSR(vsi), val); + + return 0; +} + +s32 sxe2_user_vlan_offload_strip_paramcheck(struct sxe2_user_vlan_offload_cfg *vlan_cfg, + bool port_vlan_exist) +{ + u8 outer_strip = vlan_cfg->outer_strip; + u8 inner_strip = vlan_cfg->inner_strip; + + if (outer_strip & ~(SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK)) + return -SXE2_HW_ERR_INVAL; + + if (inner_strip & ~SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1) + return -SXE2_HW_ERR_INVAL; + + if ((outer_strip & SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK) == 0 && + (inner_strip & SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1)) { + if (port_vlan_exist) + return 0; + else + return -SXE2_HW_ERR_INVAL; + } + + return 0; +} + +STATIC s32 sxe2_user_vlan_offload_set_insert(struct sxe2_hw *hw, struct sxe2_vsi *vsi, + u8 outer_insert, u8 inner_insert) +{ + u32 tmpval = 0; + u32 val; + u32 vall2tagsen; + s32 ret = SXE2_VF_ERR_SUCCESS; + u16 vsi_id = vsi->idx_in_dev; + struct sxe2_adapter *adapter = vsi->adapter; + u8 portid = adapter->port_idx; + + val = sxe2_read_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id)); + if (!(outer_insert & SXE2_DPDK_OFFLOAD_INSERT_ENABLE) && + !(inner_insert & SXE2_DPDK_OFFLOAD_INSERT_ENABLE)) { + tmpval = val & (~((SXE2_DPDK_OFFLOAD_INSERT_ENABLE << 4) | + SXE2_DPDK_OFFLOAD_INSERT_ENABLE)); + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id), tmpval); + } else if ((outer_insert & SXE2_DPDK_OFFLOAD_INSERT_ENABLE) && + (inner_insert & SXE2_DPDK_OFFLOAD_INSERT_ENABLE)) { + if (((outer_insert & SXE2_DPDK_OFFLOAD_FIELD) == + (inner_insert & SXE2_DPDK_OFFLOAD_FIELD)) || + ((inner_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) != + SXE2_DPDK_OFFLOAD_OUTER_INSERT_VLAN) || + (((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) != + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD) && + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) != + SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1) && + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) != + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q))) { + LOG_ERROR_BDF("vlan insert set failed vsi[%u], inner:%u, outer:%u\n", + vsi_id, inner_insert, outer_insert); + ret = -SXE2_VF_ERR_PARAM; + } else if ((((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD) || + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1))) { + tmpval = (inner_insert & SXE2_DPDK_OFFLOAD_FIELD) | + ((outer_insert & SXE2_DPDK_OFFLOAD_FIELD) << 4); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id), tmpval); + } else if (((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q)) { + vall2tagsen = sxe2_read_reg(hw, SXE2_PFP_L2TAGSEN(portid)); + if (!((vall2tagsen >> 8) & BIT(2))) { + vall2tagsen |= (BIT(2) << 8); + sxe2_write_reg(hw, SXE2_PFP_L2TAGSEN(portid), + vall2tagsen); + } + tmpval = (inner_insert & SXE2_DPDK_OFFLOAD_FIELD) | + ((outer_insert & SXE2_DPDK_OFFLOAD_FIELD) << 4); + + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id), tmpval); + } else { + LOG_ERROR_BDF("failed to insert, vsi:%u, inner:%u, outer:%u\n", + vsi_id, inner_insert, outer_insert); + ret = -SXE2_VF_ERR_PARAM; + } + } else if (inner_insert & SXE2_DPDK_OFFLOAD_INSERT_ENABLE) { + if (vsi->user_vlan.port_vlan_exsit || + (vsi->vf_node && vsi->vf_node->vlan_info.port_vlan_exsit)) { + val |= (SXE2_VSI_L2TAGSTXVALID_ID_VLAN + << SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S); + val |= SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID; + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id), val); + } else { + LOG_ERROR_BDF("failed to insert, vsi:%u, set inner but not set outer\n", + vsi_id); + ret = -SXE2_VF_ERR_PARAM; + } + } else { + if (((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q) || + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD) || + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1) || + ((outer_insert & SXE2_DPDK_OFFLOAD_TAGID_FIELD) == + SXE2_DPDK_OFFLOAD_OUTER_INSERT_VLAN)) { + tmpval = ((val & (~(SXE2_DPDK_OFFLOAD_INSERT_ENABLE << 4))) | + outer_insert); + sxe2_write_reg(hw, SXE2_VSI_L2TAGSTXVALID(vsi_id), tmpval); + } else { + LOG_ERROR_BDF("failed to insert, vsi[%u], outer param :%u\n", + vsi_id, outer_insert); + ret = -SXE2_VF_ERR_PARAM; + } + } + + return ret; +} + +STATIC s32 sxe2_vlan_hw_sync_and_update(struct sxe2_hw *hw, struct sxe2_vsi *vsi, + struct sxe2_user_vlan_offload_cfg *new_cfg, + struct sxe2_user_vlan_offload_cfg *curr_cfg, + bool is_port_vlan_check) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = hw->adapter; + + if (new_cfg->inner_insert != curr_cfg->inner_insert || + new_cfg->outer_insert != curr_cfg->outer_insert) { + ret = sxe2_user_vlan_offload_set_insert(hw, vsi, new_cfg->outer_insert, + new_cfg->inner_insert); + if (ret) { + LOG_ERROR_BDF("Failed to set vlan offload insert, vsi_id:%d\n", + vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + + curr_cfg->inner_insert = new_cfg->inner_insert; + curr_cfg->outer_insert = new_cfg->outer_insert; + } + + ret = sxe2_user_vlan_offload_strip_paramcheck(new_cfg, is_port_vlan_check); + if (ret) { + LOG_ERROR_BDF("Failed to check vlan strip, vsi_id:%d\n", vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + + if (new_cfg->outer_strip != curr_cfg->outer_strip) { + ret = sxe2_user_vlan_offload_set_outer_strip(hw, vsi->idx_in_dev, + new_cfg->outer_strip); + if (ret) { + LOG_ERROR_BDF("Failed to set vlan outer strip, vsi_id:%d\n", + vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + curr_cfg->outer_strip = new_cfg->outer_strip; + } + + if (new_cfg->inner_strip != curr_cfg->inner_strip) { + ret = sxe2_user_vlan_offload_set_inner_strip(hw, vsi->idx_in_dev, + new_cfg->inner_strip); + if (ret) { + LOG_ERROR_BDF("Failed to set vlan inner strip, vsi_id:%d\n", + vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + curr_cfg->inner_strip = new_cfg->inner_strip; + } + +l_end: + return ret; +} + +STATIC s32 +sxe2_user_vlan_offload_cfg_common_handle(struct sxe2_vsi *vsi, + struct sxe2_user_vlan_offload_cfg *vlan_cfg) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_vf_node *vf = vsi->vf_node; + s32 ret; + + if (vf) { + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) { + LOG_ERROR_BDF("VF %u not ready for VLAN cfg.\n", vf->vf_idx); + goto l_vf_unlock; + } + + if (sxe2_port_vlan_is_exist(vf)) { + if (vlan_cfg->outer_insert != 0 || vlan_cfg->outer_strip != 0) { + LOG_ERROR_BDF("VF %u, vsi_id: %u Port enabled, outer forbidden.\n", + vf->vf_idx, vsi->idx_in_dev); + ret = -EINVAL; + goto l_vf_unlock; + } + } + + ret = sxe2_vlan_hw_sync_and_update(hw, vsi, vlan_cfg, + &vf->vlan_info.vlan_offload, true); + if (ret) { + LOG_ERROR_BDF("Failed to sync vlan offload, vsi_id:%d\n", + vsi->idx_in_dev); + goto l_vf_unlock; + } + +l_vf_unlock: + return ret; + } + + return sxe2_vlan_hw_sync_and_update(hw, vsi, vlan_cfg, + &vsi->user_vlan.vlan_offload, false); +} + +s32 sxe2_com_vlan_offload_cfg(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vlan_offload_cfg_req *vlan_offload = + (struct sxe2_drv_vlan_offload_cfg_req *) + sxe2_com_req_data_copy_to_kernel(cmd_buf, obj); + struct sxe2_vsi *vsi = NULL; + struct sxe2_user_vlan_offload_cfg vlan_cfg = {0}; + s32 ret = 0; + u16 idx = 0; + + if (!vlan_offload) { + LOG_ERROR_BDF("vlan offload cfg req is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + idx = vlan_offload->vsi_id; + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, idx); + if (!vsi) { + LOG_ERROR_BDF("vsi is NULL\n"); + ret = -EINVAL; + goto l_unlock; + } + + vlan_cfg.outer_insert = vlan_offload->outer_insert; + vlan_cfg.outer_strip = vlan_offload->outer_strip; + vlan_cfg.inner_insert = vlan_offload->inner_insert; + vlan_cfg.inner_strip = vlan_offload->inner_strip; + ret = sxe2_user_vlan_offload_cfg_common_handle(vsi, &vlan_cfg); + if (ret != 0) { + LOG_ERROR_BDF("failed to cfg vlan, ret:%d\n", ret); + ret = -EINVAL; + goto l_unlock; + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + kfree(vlan_offload); + return ret; +} + +STATIC s32 sxe2_user_vlan_cfg_query_common_handle(struct sxe2_vsi *vsi, + enum sxe2_func_type func_type, + struct sxe2_drv_vlan_cfg_query_resp *vlan_cfg) +{ + struct sxe2_vf_node *vf = vsi->vf_node; + struct sxe2_user_vlan_offload_cfg vlan_offload = {0}; + struct sxe2_vlan port_vlan = {0}; + + if (vf) { + memcpy(&vlan_offload, &vf->vlan_info.vlan_offload, sizeof(vlan_offload)); + memcpy(&port_vlan, &vf->vlan_info.port_vlan, sizeof(port_vlan)); + } else { + memcpy(&vlan_offload, &vsi->user_vlan.vlan_offload, sizeof(vlan_offload)); + memcpy(&port_vlan, &vsi->user_vlan.port_vlan, sizeof(port_vlan)); + } + + vlan_cfg->outer_insert = vlan_offload.outer_insert; + vlan_cfg->outer_strip = vlan_offload.outer_strip; + vlan_cfg->inner_insert = vlan_offload.inner_insert; + vlan_cfg->inner_strip = vlan_offload.inner_strip; + + vlan_cfg->port_vlan_exist = (port_vlan.vid != 0 || port_vlan.prio != 0) ? 1 : 0; + + vlan_cfg->tpid = port_vlan.tpid; + vlan_cfg->vid = port_vlan.vid; + + return 0; +} + +s32 sxe2_com_vlan_cfg_query(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vlan_cfg_query_resp resp; + u16 vsi_id = cmd_buf->vsi_id; + struct sxe2_vsi *vsi = NULL; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("vsi is NULL\n"); + ret = -EINVAL; + goto l_end; + } + + (void)sxe2_user_vlan_cfg_query_common_handle(vsi, obj->func_type, &resp); + mutex_unlock(&adapter->vsi_ctxt.lock); + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj)) { + ret = -EFAULT; + goto l_end; + } + +l_end: + return ret; +} + +s32 sxe2_user_vlan_destroy(struct sxe2_vsi *vsi) +{ + struct sxe2_user_vlan_offload_cfg vlan_cfg = {0}; + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + + ret = sxe2_user_vlan_offload_cfg_common_handle(vsi, &vlan_cfg); + if (ret) { + LOG_ERROR_BDF("Failed to clean vlan offload cfg, vsi_id:%d\n", + vsi->idx_in_dev); + ret = -EINVAL; + goto l_end; + } + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.h new file mode 100644 index 0000000000000000000000000000000000000000..9192128a19b32ceb7a7a1d04f3db2f8bc3f8f12f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/com_parse/sxe2_com_vlan.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_vlan.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_VLAN_H__ +#define __SXE2_COM_VLAN_H__ + +#include "sxe2.h" + +s32 sxe2_com_vlan_offload_cfg(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_port_vlan_cfg(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_com_vlan_cfg_query(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2_user_vlan_destroy(struct sxe2_vsi *vsi); + +s32 sxe2_user_vlan_offload_strip_paramcheck(struct sxe2_user_vlan_offload_cfg *vlan_cfg, + bool port_vlan_exist); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2.h new file mode 100644 index 0000000000000000000000000000000000000000..d0734912b215e213c30311f7d84bef14e5cecf41 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_H__ +#define __SXE2_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_version.h" +#include "sxe2_hw.h" +#include "sxe2_vsi.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_switch.h" +#include "sxe2_monitor.h" +#include "sxe2_ptp.h" +#include "sxe2_msglevel.h" +#include "sxe2_host_cli.h" +#include "sxe2_dcb.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_log_export.h" +#include "sxe2_aux_driver.h" +#include "sxe2_macvlan.h" +#include "sxe2_flow.h" +#include "sxe2_fnav.h" +#include "sxe2_sriov.h" +#include "sxe2_rss.h" +#include "sxe2_txsched.h" +#include "sxe2_eswitch.h" +#include "sxe2_macsec.h" +#include "sxe2_ipsec.h" +#include "sxe2_arfs.h" +#include "sxe2_trace.h" +#include "sxe2_udp_tunnel.h" +#include "sxe2_com_cdev.h" +#include "sxe2_acl.h" +#include "sxe2_drv_cmd.h" + +#define SXE2_CTXT_REG_VALUE(value, shift, width) \ + (((value) << (shift)) & (((1UL << (width)) - 1) << (shift))) + +#define SXE2_ETH_DEAD_LOAD (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN) +#define SXE2_MAX_FRAME_SIZE 9832 +#define SXE2_ETH_MAX_FRAME_SIZE (9728) +#define SXE2_MAX_MTU (SXE2_ETH_MAX_FRAME_SIZE - SXE2_ETH_DEAD_LOAD) + +#define DEV_NAME_LEN (16) + +#define SXE2_PCI_VENDOR_ID_1 0x1ff2 +#define SXE2_PCI_DEVICE_ID_1 0x10b1 + +#define SXE2_PCI_VENDOR_ID_2 0x1d94 +#define SXE2_PCI_DEVICE_ID_2 0x1260 + +#define SXE2_PCI_DEVICE_ID_10B3 0x10b3 + +#define SXE2_PCI_VENDOR_ID_206F 0x206f + +#define SXE2_QUEUE_WAIT_RETRY_CNT (50) + +#define SXE2_CLI_DEV_MGR_DATA_SIZE (128) +#define SXE2_CLI_DEV_MGR_DATA_STEP (1) +#define SXE2_CLI_DEV_MGR_DATA_CNT (1) + +enum sxe2_cli_dev_status { + SXE2_CDEV_STATUS_NORMAL, + SXE2_CDEV_STATUS_UNACCESS, +}; + +enum sxe2_adapter_flags { + SXE2_FLAG_LEGACY_RX_ENABLE = 0, + SXE2_FLAG_SKB_PRIO, + SXE2_FLAG_LRO_CAPABLE, + SXE2_FLAG_SRIOV_CAPABLE, + SXE2_FLAG_SRIOV_VFS_DISABLED, + SXE2_FLAG_ADV_MODE_ENABLE, + SXE2_FLAG_DCB_CAPABLE, + SXE2_FLAG_DCB_ENABLE, + SXE2_FLAG_FW_DCBX_AGENT, + SXE2_FLAG_FNAV_ENABLE, + SXE2_FLAG_MTU_CHANGED, + SXE2_FLAG_ADVANCE_MODE, + SXE2_FLAG_FLTR_SYNC, + SXE2_FLAG_SUSPEND, + SXE2_FLAG_VFLR_PENDING, + SXE2_FLAG_VMDQ_CAPABLE, + SXE2_FLAG_MACVLAN_ENABLE, + SXE2_FLAG_MDD_PENDING, + SXE2_FLAG_MDD_TX_PENDING, + SXE2_FLAG_MDD_RX_PENDING, + SXE2_FLAG_MDD_AUTO_RESET_VF, + SXE2_FLAG_RDMA_AEQ_OVERFLOW, + SXE2_FLAG_LINK_CHECK, + SXE2_FLAG_FNAV_TUNNEL_ENABLE, + SXE2_FLAG_SWITCHDEV_CAPABLE, + SXE2_FLAG_SWITCHDEV_ENABLE, + SXE2_FLAG_VFSWSTATS_ENABLE, + SXE2_FLAG_FW_DCBX_DIS_PENDING, + SXE2_DATAPATH_LOG_ENABLE, + SXE2_FLAG_LINK_DOWN_ON_CLOSE, + SXE2_FLAG_RDMA_LOADED, + SXE2_FLAG_ACL_CAPABLE, + SXE2_PF_FLAGS_NBITS +}; + +struct sxe2_cli_dev_mgr_data { + u16 id; + atomic_t ref_count; + struct sxe2_adapter *adapter; + wait_queue_head_t waitq; + enum sxe2_cli_dev_status status; + struct sxe2_cdev_info cdev_info; +}; + +struct sxe2_cli_dev_mgr { + DECLARE_BITMAP(map, + SXE2_CLI_DEV_MGR_DATA_SIZE); + struct mutex lock; + struct sxe2_cli_dev_mgr_data cdev_mgr[SXE2_CLI_DEV_MGR_DATA_SIZE]; +}; + +struct sxe2_dcb_stats { + struct sxe2_pause_stats curr_pause_stats; + struct sxe2_pause_stats perv_pause_stats; + bool prev_stat_loaded; +}; + +struct sxe2_caps_context { + u16 max_rss_lut_size; + u16 fnav_space_bsize; + u16 fnav_space_gsize; + u16 fnav_stat_base; + u16 fnav_stat_num; + u16 global_lut_base; + u16 global_lut_num; +}; + +struct sxe2_rss_ctxt { + struct sxe2_ppp_common_ctxt ppp; + struct list_head rss_cfgs; + struct mutex rss_cfgs_lock; +}; + +struct sxe2_pf_sw_stats { + u64 fnav_prgm_err; +}; + +struct sxe2_pf_stats { + struct sxe2_pf_hw_stats pf_hw_stats; + struct sxe2_pf_hw_stats last_pf_hw_stats; + struct sxe2_pf_hw_stats parse_pf_hw_stats; + struct sxe2_pf_hw_stats parse_last_pf_hw_stats; + u8 stat_prev_loaded : 1; + struct sxe2_pf_sw_stats pf_sw_stats; + + struct sxe2_dcb_stats dcb_stats; +}; + +struct sxe2_fw_comp_ver { + u16 major; + u16 minor; + u32 resv; +}; + +struct sxe2_udp_tunnel_context { + struct mutex lock; + DECLARE_BITMAP(vsi_map, SXE2_MAX_VSI_NUM); +}; + +struct sxe2_repr_vf_stats { + struct rtnl_link_stats64 repr_link_stats64[SXE2_VF_NUM]; +}; + +struct sxe2_adapter { + char dev_name[DEV_NAME_LEN]; + u8 pf_idx; + u8 port_idx; + u8 pf_cnt; + u8 pad; + struct pci_dev *pdev; + struct sxe2_hw hw; + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + struct sxe2_irq_context irq_ctxt; + struct sxe2_queue_context q_ctxt; + struct sxe2_vsi_context vsi_ctxt; + struct sxe2_txsched_context tx_sched_ctxt; + struct sxe2_cmd_channel_context cmd_channel_ctxt; + struct sxe2_monitor_context monitor_ctxt; + struct sxe2_msglevel_context msglvl_ctxt; + struct sxe2_pf_stats pf_stats; + struct sxe2_repr_vf_stats repr_vf_stats; + struct sxe2_vf_context vf_ctxt; + struct sxe2_dcb_context dcb_ctxt; + struct sxe2_dev_ctrl_context dev_ctrl_ctxt; + struct sxe2_export_context export_ctxt; + struct sxe2_ptp_context ptp_ctxt; + struct sxe2_rdma_aux_context aux_ctxt; + struct sxe2_cmd_link_context link_ctxt; + struct sxe2_lfc_context lfc_ctxt; + DECLARE_BITMAP(flags, SXE2_PF_FLAGS_NBITS); + struct sxe2_switch_context switch_ctxt; + struct sxe2_eswitch_context eswitch_ctxt; + struct sxe2_caps_context caps_ctxt; + struct sxe2_macvlan_context macvlan_ctxt; + struct sxe2_fnav_context fnav_ctxt; + struct sxe2_arfs_ctxt arfs_ctxt; + struct sxe2_rss_ctxt rss_flow_ctxt; +#ifdef HAVE_MACSEC_SUPPORT + struct sxe2_macsec_context macsec_ctxt; +#endif + struct sxe2_ipsec_context ipsec_ctxt; + struct sxe2_acl_context acl_ctxt; + + u8 serial_num[SXE2_SERIAL_NUM_LEN]; + struct sxe2_lag_context *lag_ctxt; + +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + struct dentry *sxe2_debugfs_pf; +#endif + struct sxe2_cli_dev_mgr_data *cdev_mgr; + struct sxe2_user_context user_pf_ctxt; + struct sxe2_fw_comp_ver fw_ver; + struct sxe2_udp_tunnel_context udp_tunnel_ctxt; + + struct sxe2_com_context com_ctxt; + struct sxe2_stats_map stats_map; +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +#ifdef HAVE_UDP_TUNNEL_NIC_SHARED + struct udp_tunnel_nic_shared udp_tunnel_shared; +#endif + struct udp_tunnel_nic_info *udp_tunnel_nic; +#endif + + enum sxe2_com_module drv_mode; +}; + +void sxe2_fw_version_get(struct sxe2_adapter *adapter); + +int sxe2_g_com_mode_get(void); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.c new file mode 100644 index 0000000000000000000000000000000000000000..ed116b983b67cd9414d4e5ca5c94adbc185f8418 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.c @@ -0,0 +1,2204 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_acl.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "linux/device.h" +#include "linux/list.h" +#include "linux/mutex.h" +#include "linux/slab.h" +#include "sxe2.h" +#include "sxe2_acl.h" +#include "sxe2_log.h" +#include "sxe2_common.h" +#include "sxe2_flow_public.h" +#include "sxe2_ethtool.h" + +s32 sxe2_fwc_acl_trace_trigger(struct sxe2_adapter *adapter) +{ + struct sxe2_cmd_params cmd = {0}; + s32 ret = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_TRACE_TRIGGER, NULL, 0, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("acl trace trigger cmd fail, ret=%d", ret); + + return ret; +} + +STATIC void sxe2_acl_trace_hit_info_print(struct sxe2_acl_hit_info *hit_info) +{ + LOG_INFO("profile_id: %u\n", hit_info->profile_id); + LOG_INFO("fv 0-3 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv0, hit_info->fv1, + hit_info->fv2, hit_info->fv3); + LOG_INFO("fv 4-7 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv4, hit_info->fv5, + hit_info->fv6, hit_info->fv7); + LOG_INFO("fv 8-11 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv8, hit_info->fv9, + hit_info->fv10, hit_info->fv11); + LOG_INFO("fv 12-15 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv12, hit_info->fv13, + hit_info->fv14, hit_info->fv15); + LOG_INFO("fv 16-19 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv16, hit_info->fv17, + hit_info->fv18, hit_info->fv19); + LOG_INFO("fv 20-23 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv20, hit_info->fv21, + hit_info->fv22, hit_info->fv23); + LOG_INFO("fv 24-27 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv24, hit_info->fv25, + hit_info->fv26, hit_info->fv27); + LOG_INFO("fv 28-31 0x%x 0x%x 0x%x 0x%x\n", hit_info->fv28, hit_info->fv29, + hit_info->fv30, hit_info->fv31); +} + +s32 sxe2_fwc_acl_trace_recorder(struct sxe2_adapter *adapter) +{ + struct sxe2_cmd_params cmd = {0}; + struct sxe2_acl_trace_recorder recorder = {0}; + s32 ret = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_TRACE_RECORDER, NULL, 0, &recorder, + sizeof(struct sxe2_acl_trace_recorder)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("acl trace recorder cmd fail, ret %d", ret); + goto l_end; + } + + LOG_INFO("****acl trace recorder start****\n"); + LOG_INFO("status0: %u\n", recorder.trace_status0); + if (recorder.trace_status0 == 0) + sxe2_acl_trace_hit_info_print(&recorder.hit_info); + + LOG_INFO("****acl trace recorder end****\n"); +l_end: + return ret; +} + +STATIC void sxe2_acl_dfx_info_print(struct sxe2_acl_dfx_info *dfx_info) +{ + s32 i = 0; + + if (!dfx_info) { + LOG_INFO("Error: dfx_info is NULL\n"); + return; + } + + LOG_INFO("===== ACL DFX Info =====\n"); + LOG_INFO("\n--- Statistics ---\n"); + LOG_INFO("og_inbuf_hdr_cnt: %u\n", le32_to_cpu(dfx_info->og_inbuf_hdr_cnt)); + LOG_INFO("og_inbuf_info_cnt: %u\n", le32_to_cpu(dfx_info->og_inbuf_info_cnt)); + LOG_INFO("og_proc_hdr_cnt: %u\n", le32_to_cpu(dfx_info->og_proc_hdr_cnt)); + LOG_INFO("og_proc_info_cnt: %u\n", le32_to_cpu(dfx_info->og_proc_info_cnt)); + LOG_INFO("og_to_engine_cnt: %u\n", le32_to_cpu(dfx_info->og_to_engine_cnt)); + LOG_INFO("og_in_rg_cnt: %u\n", le32_to_cpu(dfx_info->og_in_rg_cnt)); + LOG_INFO("og_out_rg_cnt: %u\n", le32_to_cpu(dfx_info->og_out_rg_cnt)); + LOG_INFO("sel_base_cnt: %u\n", le32_to_cpu(dfx_info->sel_base_cnt)); + LOG_INFO("key_gen_cnt: %u\n", le32_to_cpu(dfx_info->key_gen_cnt)); + LOG_INFO("key_gen_to_lkt_cnt: %u\n", + le32_to_cpu(dfx_info->key_gen_to_lkt_cnt)); + LOG_INFO("act_mem_cnt: %u\n", le32_to_cpu(dfx_info->act_mem_cnt)); + LOG_INFO("osc_act_cnt: %u\n", le32_to_cpu(dfx_info->osc_act_cnt)); + LOG_INFO("osc_pkt_cnt: %u\n", le32_to_cpu(dfx_info->osc_pkt_cnt)); + LOG_INFO("acl_rxft_cnt: %u\n", le32_to_cpu(dfx_info->acl_rxft_cnt)); + LOG_INFO("acl_recv_drop_cnt: %u\n", le32_to_cpu(dfx_info->acl_recv_drop_cnt)); + LOG_INFO("acl_action_drop_cnt: %u\n", + le32_to_cpu(dfx_info->acl_action_drop_cnt)); + LOG_INFO("acl_vsi_disable_drop_cnt: %u\n", + le32_to_cpu(dfx_info->acl_vsi_disable_drop_cnt)); + LOG_INFO("prfl_tcam_hit_cnt: %u\n", le32_to_cpu(dfx_info->prfl_tcam_hit_cnt)); + LOG_INFO("prfl_tcam_miss_cnt: %u\n", + le32_to_cpu(dfx_info->prfl_tcam_miss_cnt)); + LOG_INFO("prfl_tcam_bypss_cnt: %u\n", + le32_to_cpu(dfx_info->prfl_tcam_bypss_cnt)); + + LOG_INFO("\n--- TCAM Hit/Miss Count ---\n"); + for (i = 0; i < SXE2_ACL_ACTION_TCAM_CNT; i++) { + LOG_INFO("act_tcam_hit_cnt[%d]: %u\n", i, + le32_to_cpu(dfx_info->act_tcam_hit_cnt[i])); + LOG_INFO("act_tcam_miss_cnt[%d]: %u\n", i, + le32_to_cpu(dfx_info->act_tcam_miss_cnt[i])); + } + + LOG_INFO("\n--- ACL DFX ---\n"); + for (i = 0; i < SXE2_ACL_ACTION_TCAM_CNT; i++) { + LOG_INFO("act_idx_first[%d]: %u\n", i, + le16_to_cpu(dfx_info->act_idx_first[i])); + LOG_INFO("act_idx_last[%d]: %u\n", i, + le16_to_cpu(dfx_info->act_idx_last[i])); + LOG_INFO("act_key_first_low[%d]: 0x%x\n", i, + le32_to_cpu(dfx_info->act_key_first_low[i])); + LOG_INFO("act_key_first_high[%d]: 0x%x\n", i, + le32_to_cpu(dfx_info->act_key_first_high[i])); + LOG_INFO("act_key_last_low[%d]: 0x%x\n", i, + le32_to_cpu(dfx_info->act_key_last_low[i])); + LOG_INFO("act_key_last_high[%d]: 0x%x\n", i, + le32_to_cpu(dfx_info->act_key_last_high[i])); + } + + LOG_INFO("key_first: 0x%llx\n", le64_to_cpu(dfx_info->key_first)); + LOG_INFO("key_last: 0x%llx\n", le64_to_cpu(dfx_info->key_last)); + + LOG_INFO("\n--- IDs and Indexes ---\n"); + LOG_INFO("first_prfl_id: %u\n", dfx_info->first_prfl_id); + LOG_INFO("last_prfl_id: %u\n", dfx_info->last_prfl_id); + LOG_INFO("first_scen_id: %u\n", dfx_info->first_scen_id); + LOG_INFO("last_scen_id: %u\n", dfx_info->last_scen_id); + LOG_INFO("first_prfl_tcam_idx: %u\n", + le16_to_cpu(dfx_info->first_prfl_tcam_idx)); + LOG_INFO("last_prfl_tcam_idx: %u\n", + le16_to_cpu(dfx_info->last_prfl_tcam_idx)); + + LOG_INFO("\n--- Flags ---\n"); + LOG_INFO("first_cascade: %u\n", dfx_info->first_cascade); + LOG_INFO("last_cascade: %u\n", dfx_info->last_cascade); + LOG_INFO("first_stack: %u\n", dfx_info->first_stack); + LOG_INFO("last_stack: %u\n", dfx_info->last_stack); + LOG_INFO("first_tcam_en: %u\n", dfx_info->first_tcam_en); + LOG_INFO("last_tcam_en: %u\n", dfx_info->last_tcam_en); + + LOG_INFO("========================\n"); +} + +s32 sxe2_fwc_acl_dfx_get(struct sxe2_adapter *adapter) +{ + struct sxe2_acl_dfx_info dfx_info = {0}; + struct sxe2_cmd_params cmd = {0}; + s32 ret = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_DFX_INFO_GET, NULL, 0, &dfx_info, + sizeof(struct sxe2_acl_dfx_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("acl dfx info get cmd fail, ret %d", ret); + goto l_end; + } + sxe2_acl_dfx_info_print(&dfx_info); + +l_end: + return ret; +} + +s32 sxe2_fwc_acl_set_scen_prof(struct sxe2_adapter *adapter, + struct sxe2_fwc_acl_prof_sel_base_req *prof_sel_req) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_PROF_SEL_BASE_SET, prof_sel_req, + sizeof(*prof_sel_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to set acl profile, ret=%d", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_lut_alloc(struct sxe2_adapter *adapter, + struct sxe2_acl_tbl_params *tbl_params, + struct sxe2_acl_tbl_info *acl_tbl_info) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_lut_alloc_req acl_lut_alloc_req = {0}; + struct sxe2_fwc_acl_lut_alloc_resp acl_lut_alloc_resp = {0}; + u8 i = 0; + + acl_lut_alloc_req.width = cpu_to_le16(tbl_params->width); + acl_lut_alloc_req.depth = cpu_to_le16(tbl_params->depth); + acl_lut_alloc_req.act_pairs_per_entry = tbl_params->entry_act_pairs; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_LUT_ALLOC, &acl_lut_alloc_req, + sizeof(acl_lut_alloc_req), &acl_lut_alloc_resp, + sizeof(acl_lut_alloc_resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to add acl entry, ret=%d\n", ret); + goto l_end; + } + + acl_tbl_info->id = le16_to_cpu(acl_lut_alloc_resp.alloc_id); + acl_tbl_info->first_tcam = acl_lut_alloc_resp.first_tcam; + acl_tbl_info->last_tcam = acl_lut_alloc_resp.last_tcam; + acl_tbl_info->first_entry = le16_to_cpu(acl_lut_alloc_resp.first_entry); + acl_tbl_info->last_entry = le16_to_cpu(acl_lut_alloc_resp.last_entry); + + acl_tbl_info->table_info.width = tbl_params->width; + acl_tbl_info->table_info.depth = tbl_params->depth; + acl_tbl_info->table_info.entry_act_pairs = tbl_params->entry_act_pairs; + + for (i = 0; i < SXE2_ACL_ACTION_MEM_CNT; i++) { + acl_tbl_info->act_mems[i].act_mem = acl_lut_alloc_resp.act_mem[i]; + LOG_DEBUG("dump acl entry msg,act_mems[%u]:%u\n", i, + acl_tbl_info->act_mems[i].act_mem); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_lut_dealloc(struct sxe2_adapter *adapter, u16 alloc_id) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_lut_dealloc_req act_lut_dealloc_req = {0}; + + act_lut_dealloc_req.alloc_id = cpu_to_le16(alloc_id); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_LUT_DEALLOC, &act_lut_dealloc_req, + sizeof(act_lut_dealloc_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to dealloc acl lut, ret=%d", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_act_entry(struct sxe2_adapter *adapter, u8 act_mem_idx, + u16 act_entry_idx, + struct sxe2_acl_act_entry_data *act_entry_data) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_act_entry_set_req act_entry_req = {}; + + act_entry_req.act_mem_idx = act_mem_idx; + act_entry_req.act_entry_idx = cpu_to_le16(act_entry_idx); + act_entry_req.data[0].prio = act_entry_data->prio; + act_entry_req.data[0].mdid = act_entry_data->mdid; + act_entry_req.data[0].value = cpu_to_le16(act_entry_data->value); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_ACT_ENTRY_SET, &act_entry_req, + sizeof(act_entry_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to add acl entry, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_lut_entry(struct sxe2_adapter *adapter, u8 tcam_idx, + u16 entry_idx, + struct sxe2_acl_entry_data *lut_entry_data) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_lut_entry_set_req acl_lut_entry_req; + + acl_lut_entry_req.tcam_idx = tcam_idx; + acl_lut_entry_req.entry_idx = cpu_to_le16(entry_idx); + acl_lut_entry_req.data = *lut_entry_data; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_LUT_ENTRY_SET, &acl_lut_entry_req, + sizeof(acl_lut_entry_req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to add acl entry, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_scen_alloc(struct sxe2_adapter *adapter, + struct sxe2_fwc_acl_scen_alloc_req *req, u16 *scen_id) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_scen_alloc_resp act_scen_alloc_rsp = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_SCEN_ALLOC, req, sizeof(*req), + &act_scen_alloc_rsp, sizeof(act_scen_alloc_rsp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to alloc scen, ret=%d\n", ret); + goto l_end; + } + + *scen_id = le16_to_cpu(act_scen_alloc_rsp.scen_id); + + LOG_DEBUG("acl scen alloc id:%u\n", *scen_id); + +l_end: + return ret; +} + +STATIC s32 sxe2_fwc_acl_scen_dealloc(struct sxe2_adapter *adapter, u16 scen_id) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_acl_scen_dealloc_req act_scen_dealloc_req = {0}; + + act_scen_dealloc_req.scen_id = cpu_to_le16(scen_id); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ACL_SCEN_DEALLOC, &act_scen_dealloc_req, + sizeof(act_scen_dealloc_req), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR("Failed to dealloc scen, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +struct sxe2_acl_flow_cfg * +sxe2_acl_find_flow_cfg_by_flow_type(struct sxe2_vsi *vsi, + enum sxe2_fnav_flow_type flow_type) +{ + struct sxe2_acl_flow_cfg *flow_cfg = NULL; + struct sxe2_acl_flow_cfg *flow_cfg_find = NULL; + + list_for_each_entry(flow_cfg, &vsi->acl.flow_cfg_list, l_node) { + if (flow_type == flow_cfg->flow_type) { + flow_cfg_find = flow_cfg; + break; + } + if (flow_type < flow_cfg->flow_type) + break; + } + + return flow_cfg_find; +} + +void sxe2_acl_flow_cfg_add_list(struct sxe2_vsi *vsi, struct sxe2_acl_flow_cfg *flow_cfg) +{ + struct sxe2_acl_flow_cfg *flow_tmp; + struct sxe2_vsi_acl *vsi_acl = &vsi->acl; + struct list_head *head = &vsi_acl->flow_cfg_list; + + list_for_each_entry(flow_tmp, head, l_node) { + if (flow_tmp->flow_type == flow_cfg->flow_type) + return; + + if (flow_tmp->flow_type > flow_cfg->flow_type) { + list_add_tail(&flow_cfg->l_node, &flow_tmp->l_node); + return; + } + } + + list_add_tail(&flow_cfg->l_node, head); +} + +STATIC s32 sxe2_acl_add_act(struct sxe2_adapter *adapter, struct sxe2_acl_scen_info *scen, + u16 entry_idx, struct sxe2_acl_act_entry_data *act_entry_data) +{ + s32 ret = -EINVAL; + struct sxe2_acl_tbl_info *acl_tbl_info = adapter->acl_ctxt.acl_tbl_info; + struct sxe2_acl_act_mem *act_mem = NULL; + u16 cnt_cascade, first_tcam_id, last_tcam_id; + u16 absolute_line_idx, line_in_mem; + u16 i = 0; + + absolute_line_idx = scen->start + entry_idx; + line_in_mem = absolute_line_idx % SXE2_FW_ACL_TCAM_DEPTH; + + cnt_cascade = DIV_ROUND_UP(scen->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + first_tcam_id = (absolute_line_idx / SXE2_FW_ACL_TCAM_DEPTH) * cnt_cascade; + last_tcam_id = first_tcam_id + cnt_cascade; + for_each_set_bit(i, scen->acl_act_mem_bitmap, SXE2_FW_MAX_ACTION_MEMORIES) { + act_mem = &acl_tbl_info->act_mems[i]; + if (act_mem->member_of_tcam >= first_tcam_id && + act_mem->member_of_tcam < last_tcam_id) { + ret = sxe2_fwc_acl_act_entry(adapter, i, line_in_mem, + act_entry_data); + if (ret) { + LOG_ERROR_BDF("Acl add flow act entry cmd failed, ret:%d\n", ret); + goto l_end; + } + } + } + +l_end: + return ret; +} + +STATIC void +sxe2_acl_assign_act_mems_res_to_tcams(struct sxe2_acl_tbl_info *acl_table_info, + u8 cur_tcam_idx, u8 *cur_mem_idx, u8 num_mem) +{ + u8 i = 0; + struct sxe2_acl_act_mem *act_mem = NULL; + + for (i = 0; *cur_mem_idx < SXE2_FW_MAX_ACTION_MEMORIES && i < num_mem; + (*cur_mem_idx)++) { + act_mem = &acl_table_info->act_mems[*cur_mem_idx]; + + if (act_mem->act_mem == 0xff) + continue; + + act_mem->member_of_tcam = cur_tcam_idx; + i++; + } +} + +STATIC void +sxe2_acl_divide_act_mems_res_to_tcams(struct sxe2_acl_tbl_info *acl_table_info) +{ + u16 num_depth = 0; + u16 num_width = 0; + u8 act_mem = 0; + u8 act_mem_remainder = 0; + u16 i = 0; + u16 j = 0; + u16 total_act_mem = 0; + u8 tcam_idx = 0; + u8 current_mem_idx = 0; + + num_depth = DIV_ROUND_UP(acl_table_info->table_info.depth, + SXE2_FW_ACL_TCAM_DEPTH); + num_width = DIV_ROUND_UP(acl_table_info->table_info.width, + SXE2_FW_ACL_KEY_WIDTH_BYTES); + + act_mem = acl_table_info->table_info.entry_act_pairs / num_width; + act_mem_remainder = acl_table_info->table_info.entry_act_pairs % num_width; + + tcam_idx = acl_table_info->first_tcam; + for (i = 0; i < num_depth; i++) { + for (j = 0; j < num_width; j++) { + total_act_mem = act_mem; + if (j < act_mem_remainder) + total_act_mem += 1; + + sxe2_acl_assign_act_mems_res_to_tcams(acl_table_info, tcam_idx, + ¤t_mem_idx, + total_act_mem); + tcam_idx++; + } + } +} + +STATIC s32 sxe2_acl_create_table(struct sxe2_adapter *adapter, + struct sxe2_acl_tbl_params *params) +{ + s32 ret = 0; + u16 width = 0; + u16 depth = 0; + struct sxe2_acl_tbl_params tbl_params = {0}; + struct sxe2_acl_tbl_info *acl_tbl_info = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 first_entry_in_pf = 0; + u16 last_entry_in_pf = 0; + u16 i = 0; + u16 num_cascades; + u16 num_stacks; + + width = (u16)roundup((s32)params->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + depth = ALIGN(params->depth, SXE2_ACL_ENTRY_ALLOC_UNIT); + + if (params->entry_act_pairs < width / SXE2_FW_ACL_KEY_WIDTH_BYTES) { + params->entry_act_pairs = (u8)(width / SXE2_FW_ACL_KEY_WIDTH_BYTES); + + if (params->entry_act_pairs > SXE2_AQC_TBL_MAX_ACTION_PAIRS) + params->entry_act_pairs = SXE2_AQC_TBL_MAX_ACTION_PAIRS; + } + + num_cascades = width / SXE2_FW_ACL_KEY_WIDTH_BYTES; + num_stacks = DIV_ROUND_UP(depth, SXE2_FW_ACL_TCAM_DEPTH); + if (num_stacks * num_cascades > SXE2_FW_ACL_LUT_NUM) { + LOG_ERROR_BDF("Requested ACL exceeds hardware limit (Total TCAMs needed: %u).\n", + num_cascades * num_stacks); + ret = -ENOSPC; + goto l_end; + } + + memset(&tbl_params, 0, sizeof(tbl_params)); + tbl_params.depth = depth; + tbl_params.width = width; + tbl_params.entry_act_pairs = params->entry_act_pairs; + + acl_tbl_info = devm_kzalloc(dev, sizeof(*acl_tbl_info), GFP_KERNEL); + if (!acl_tbl_info) { + LOG_ERROR("Failed to alloc acl tbl info.\n"); + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2_fwc_acl_lut_alloc(adapter, &tbl_params, acl_tbl_info); + if (ret) { + LOG_ERROR("acl table create cmd fail, ret:%d.\n", ret); + goto l_free_acl_tbl_info; + } + + sxe2_acl_divide_act_mems_res_to_tcams(acl_tbl_info); + + first_entry_in_pf = acl_tbl_info->first_tcam * SXE2_FW_MAX_TCAM_ALLOC_UNITS + + acl_tbl_info->first_entry / SXE2_ACL_ENTRY_ALLOC_UNIT; + last_entry_in_pf = acl_tbl_info->last_tcam * SXE2_FW_MAX_TCAM_ALLOC_UNITS + + acl_tbl_info->last_entry / SXE2_ACL_ENTRY_ALLOC_UNIT; + + for (i = first_entry_in_pf; i < last_entry_in_pf + 1; i++) + set_bit(i, acl_tbl_info->avail); + + acl_tbl_info->max_slot_cnt = depth; + + INIT_LIST_HEAD(&acl_tbl_info->scens); + + adapter->acl_ctxt.acl_tbl_info = acl_tbl_info; + ret = 0; + goto l_end; + +l_free_acl_tbl_info: + adapter->acl_ctxt.acl_tbl_info = NULL; + devm_kfree(dev, acl_tbl_info); + ret = -ENOMEM; +l_end: + return ret; +} + +STATIC void sxe2_acl_tbl_param_calc(struct sxe2_adapter *adapter, + struct sxe2_acl_tbl_params *params) +{ + u8 pf_cnt = adapter->pf_cnt; + + if (pf_cnt <= 0 || pf_cnt > 4) + pf_cnt = 4; + + params->width = SXE2_FW_ACL_KEY_WIDTH_BYTES * SXE2_ACL_MAX_CASCADE_WIDTH; + params->depth = (SXE2_FW_ACL_TCAM_DEPTH * + (SXE2_FW_ACL_LUT_NUM / SXE2_ACL_MAX_CASCADE_WIDTH)) / + pf_cnt; + params->entry_act_pairs = 1; +} + +STATIC inline u16 sxe2_acl_calc_tbl_end_idx(u16 start, u16 num_entries, u16 width) +{ + u16 end_idx = 0; + u16 add_entries = 0; + u16 num_stack_level = 0; + + end_idx = start + (num_entries - 1); + + if (width > 1) { + num_stack_level = (start % SXE2_FW_ACL_TCAM_DEPTH) + num_entries; + + num_stack_level = DIV_ROUND_UP(num_stack_level, SXE2_FW_ACL_TCAM_DEPTH); + + add_entries = (width - 1) * num_stack_level * SXE2_FW_ACL_TCAM_DEPTH; + } + + return end_idx + add_entries; +} + +STATIC s32 sxe2_acl_alloc_partition_from_tbl(struct sxe2_adapter *adapter, + struct sxe2_acl_scen_info *scen) +{ + struct sxe2_acl_tbl_info *acl_tbl_info = adapter->acl_ctxt.acl_tbl_info; + s32 ret = 0; + u16 width = 0; + u16 num_entry = 0; + u16 first_tcam_index = 0; + bool is_cascade = false; + bool is_ser_finish = false; + u16 i = 0; + u16 j = 0; + u16 cnt_entry = 0; + u16 start = 0; + bool slice_avail = true; + u16 slice_pos_in_tcams = 0; + u16 slice_pos_in_tcam = 0; + u16 offset = 0; + + width = DIV_ROUND_UP(scen->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + if (width > acl_tbl_info->last_tcam - acl_tbl_info->first_tcam + 1) { + ret = -EINVAL; + goto l_end; + } + + num_entry = ALIGN(scen->num_entry, SXE2_ACL_ENTRY_ALLOC_UNIT); + if (width == 1) { + is_cascade = false; + first_tcam_index = acl_tbl_info->first_tcam; + } else { + is_cascade = true; + first_tcam_index = acl_tbl_info->last_tcam + 1 - width; + } + + do { + for (i = 0; i < SXE2_FW_MAX_TCAM_ALLOC_UNITS && cnt_entry < num_entry; + i++) { + slice_avail = true; + slice_pos_in_tcam = !is_cascade ? i + : SXE2_FW_MAX_TCAM_ALLOC_UNITS - + i - 1; + for (j = first_tcam_index; + j < first_tcam_index + width && slice_avail; j++) { + slice_pos_in_tcams = (j * SXE2_FW_MAX_TCAM_ALLOC_UNITS) + + slice_pos_in_tcam; + + slice_avail = slice_avail && + (!!(test_bit(slice_pos_in_tcams, + acl_tbl_info->avail))); + } + + if (!slice_avail) { + cnt_entry = 0; + } else { + if (cnt_entry == 0 || is_cascade) { + start = (first_tcam_index * + SXE2_FW_ACL_TCAM_DEPTH) + + (slice_pos_in_tcam * + SXE2_ACL_ENTRY_ALLOC_UNIT); + } + cnt_entry += SXE2_ACL_ENTRY_ALLOC_UNIT; + } + } + + if (cnt_entry >= num_entry) { + scen->start = start; + scen->num_entry = num_entry; + scen->end = sxe2_acl_calc_tbl_end_idx(start, num_entry, width); + } + + first_tcam_index = (!is_cascade) ? first_tcam_index + width + : first_tcam_index - width; + if (first_tcam_index > acl_tbl_info->last_tcam || + first_tcam_index < acl_tbl_info->first_tcam) { + offset++; + + if (offset >= width) { + is_ser_finish = true; + } else { + first_tcam_index = + (!is_cascade) ? offset + : acl_tbl_info->last_tcam + + 1 - + offset - + width; + } + } + } while (!is_ser_finish); + +l_end: + return ret; +} + +STATIC void sxe2_acl_fill_scen_chunk_mask(struct sxe2_fwc_acl_scen_alloc_req *req, + struct sxe2_acl_scen_info *scen) +{ + u16 tcam_idx = 0; + u8 chunk_offset = 0; + u16 cnt_slice = 0; + u16 cnt_cascade = 0; + u16 i = 0; + u16 j = 0; + + tcam_idx = scen->start / SXE2_FW_ACL_TCAM_DEPTH; + + chunk_offset = (u8)((scen->start % SXE2_FW_ACL_TCAM_DEPTH) / + SXE2_ACL_ENTRY_ALLOC_UNIT); + + cnt_slice = scen->num_entry / SXE2_ACL_ENTRY_ALLOC_UNIT; + + cnt_cascade = scen->width / SXE2_FW_ACL_KEY_WIDTH_BYTES; + + for (i = 0; i < cnt_slice; i++) { + for (j = tcam_idx; + j < tcam_idx + cnt_cascade && j < SXE2_ACL_ACTION_TCAM_CNT; j++) + req->tcam_cfg[j].enable |= BIT(chunk_offset); + + chunk_offset++; + chunk_offset %= SXE2_FW_MAX_TCAM_ALLOC_UNITS; + + if (chunk_offset == 0) + tcam_idx += cnt_cascade; + } +} + +STATIC void sxe2_acl_fill_scen_tcam_select(struct sxe2_fwc_acl_scen_alloc_req *req, + u16 tcam_idx, u16 tcam_idx_in_cascade) +{ + u16 idx = 0; + u16 i = 0; + u8 val = 0; + + idx = tcam_idx_in_cascade * SXE2_FW_ACL_KEY_WIDTH_BYTES; + + for (i = 0; i < SXE2_FW_ACL_KEY_WIDTH_BYTES; i++) { + val = (u8)(SXE2_FW_ACL_BYTE_SEL_BASE + idx + i); + if (val > SXE2_FW_ACL_BYTE_SEL_BASE_RNG_CHK) + continue; + + req->tcam_cfg[tcam_idx].tcam_select[i] = val; + } +} + +STATIC void sxe2_acl_fill_scen_act_mem(struct sxe2_acl_tbl_info *tbl_info, + struct sxe2_fwc_acl_scen_alloc_req *req, + struct sxe2_acl_scen_info *scen, + u8 current_tcam_idx, u8 target_tcam_idx) +{ + struct sxe2_acl_act_mem *act_mem = NULL; + u8 i = 0; + + for (i = 0; i < SXE2_FW_MAX_ACTION_MEMORIES; i++) { + act_mem = &tbl_info->act_mems[i]; + + if (act_mem->act_mem == 0xff || + act_mem->member_of_tcam != current_tcam_idx) { + continue; + } + + req->act_mem_cfg[i] = target_tcam_idx; + req->act_mem_cfg[i] |= SXE2_ACL_ACT_MEM_EN; + + set_bit(i, scen->acl_act_mem_bitmap); + } +} + +STATIC void sxe2_acl_update_tbl_avail_sign(struct sxe2_acl_tbl_info *tbl_info, + struct sxe2_acl_scen_info *scen, bool is_avail) +{ + u16 tcam_idx = 0; + u16 cnt_cascade = 0; + u16 offset = 0; + u16 cnt_alloc_uint = 0; + u16 i = 0; + u16 j = 0; + u32 b_avail = 0; + + tcam_idx = scen->start / SXE2_FW_ACL_TCAM_DEPTH; + offset = (scen->start % SXE2_FW_ACL_TCAM_DEPTH) / SXE2_ACL_ENTRY_ALLOC_UNIT; + + cnt_alloc_uint = scen->num_entry / SXE2_ACL_ENTRY_ALLOC_UNIT; + + cnt_cascade = scen->width / SXE2_FW_ACL_KEY_WIDTH_BYTES; + + for (i = 0; i < cnt_alloc_uint; i++) { + for (j = 0; j < cnt_cascade; j++) { + b_avail = (tcam_idx + j) * SXE2_FW_MAX_TCAM_ALLOC_UNITS + offset; + if (is_avail) + set_bit(b_avail, tbl_info->avail); + else + clear_bit(b_avail, tbl_info->avail); + } + + offset += 1; + offset %= SXE2_FW_MAX_TCAM_ALLOC_UNITS; + + if (!offset) + tcam_idx += cnt_cascade; + } +} + +STATIC void sxe2_acl_init_entry_idx(struct sxe2_acl_scen_info *scen) +{ + scen->entry_first_index[SXE2_ACL_LUT_ENTRY_PRIO_NORMAL] = 0; + scen->entry_last_index[SXE2_ACL_LUT_ENTRY_PRIO_NORMAL] = scen->num_entry - 1; +} + +STATIC s32 sxe2_acl_create_scen_info(struct sxe2_adapter *adapter, + struct sxe2_acl_scen_info *scen) +{ + struct sxe2_acl_tbl_info *tbl_info = adapter->acl_ctxt.acl_tbl_info; + u8 first_tcam = 0; + u8 last_tcam = 0; + u16 cnt_cascade = 0; + s32 ret = 0; + struct sxe2_fwc_acl_scen_alloc_req req = {}; + u16 i = 0; + u16 j = 0; + u16 last_tcam_idx_cascade = 0; + u16 scen_id = 0; + + cnt_cascade = DIV_ROUND_UP(scen->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + first_tcam = scen->start / SXE2_FW_ACL_TCAM_DEPTH; + last_tcam = scen->end / SXE2_FW_ACL_TCAM_DEPTH; + + scen->avail_width = cnt_cascade * SXE2_FW_ACL_KEY_WIDTH_BYTES - 3; + + scen->rnage_chk_idx = (cnt_cascade - 1) * SXE2_FW_ACL_KEY_WIDTH_BYTES + 4; + scen->pid_idx = (cnt_cascade - 1) * SXE2_FW_ACL_KEY_WIDTH_BYTES + 3; + scen->pkt_dir_idx = (cnt_cascade - 1) * SXE2_FW_ACL_KEY_WIDTH_BYTES + 2; + + sxe2_acl_fill_scen_chunk_mask(&req, scen); + req.tcam_cfg[first_tcam].start_cmp_set |= SXE2_ACL_ALLOC_SCEN_START_SET; + i = first_tcam; + while (i <= last_tcam) { + last_tcam_idx_cascade = i + cnt_cascade - 1; + req.tcam_cfg[i].start_cmp_set |= SXE2_ACL_ALLOC_SCEN_START_CMP; + for (j = 0; j < cnt_cascade; j++) { + sxe2_acl_fill_scen_tcam_select(&req, i + j, j); + sxe2_acl_fill_scen_act_mem(tbl_info, &req, scen, i + j, + last_tcam_idx_cascade); + } + + i += cnt_cascade; + } + + i = 0; + while (i < first_tcam) { + req.tcam_cfg[i++].start_cmp_set = SXE2_ACL_ALLOC_SCEN_START_CMP | + SXE2_ACL_ALLOC_SCEN_START_SET; + } + + i = last_tcam + 1; + while (i < SXE2_FW_ACL_LUT_NUM) { + req.tcam_cfg[i++].start_cmp_set = SXE2_ACL_ALLOC_SCEN_START_CMP | + SXE2_ACL_ALLOC_SCEN_START_SET; + } + + ret = sxe2_fwc_acl_scen_alloc(adapter, &req, &scen_id); + if (ret) { + LOG_ERROR("Alloc scen cmd failed, ret:%d.\n", ret); + goto l_end; + } + + scen->scen_id = scen_id; + sxe2_acl_update_tbl_avail_sign(tbl_info, scen, false); + sxe2_acl_init_entry_idx(scen); + +l_end: + return ret; +} + +STATIC s32 sxe2_acl_create_scenario(struct sxe2_adapter *adapter, + struct sxe2_acl_tbl_params *params) +{ + struct sxe2_acl_scen_info *scen = NULL; + struct sxe2_acl_tbl_info *acl_tbl_info = adapter->acl_ctxt.acl_tbl_info; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + + scen = devm_kzalloc(dev, sizeof(*scen), GFP_KERNEL); + if (!scen) { + LOG_ERROR("Failed to alloc scen mem.\n"); + ret = -ENOMEM; + goto l_end; + } + INIT_LIST_HEAD(&scen->l_entry); + scen->start = acl_tbl_info->first_entry; + scen->width = SXE2_FW_ACL_KEY_WIDTH_BYTES * + DIV_ROUND_UP(params->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + scen->num_entry = params->depth; + + ret = sxe2_acl_alloc_partition_from_tbl(adapter, scen); + if (ret) { + LOG_ERROR("Failed to alloc scen mem.\n"); + goto l_free_scen; + } + + ret = sxe2_acl_create_scen_info(adapter, scen); + if (ret) { + LOG_ERROR("Failed to init scen.\n"); + goto l_free_scen; + } + list_add(&scen->l_entry, &acl_tbl_info->scens); + goto l_end; + +l_free_scen: + if (scen) { + devm_kfree(dev, scen); + scen = NULL; + } + +l_end: + return ret; +} + +s32 sxe2_acl_init(struct sxe2_adapter *adapter) +{ + struct sxe2_acl_tbl_params params = {}; + struct sxe2_acl_context *acl_ctx = &adapter->acl_ctxt; + s32 ret = 0; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return 0; + + mutex_init(&acl_ctx->filter_lock); + bitmap_fill(adapter->acl_ctxt.slots, SXE2_ACL_MAX_NUM_ENTRY); + sxe2_flow_ppp_comm_ctxt_init(&acl_ctx->ppp, adapter, SXE2_HW_BLOCK_ID_ACL); + + sxe2_acl_tbl_param_calc(adapter, ¶ms); + + ret = sxe2_acl_create_table(adapter, ¶ms); + if (ret) { + LOG_ERROR("Failed to create acl table, ret=%d\n", ret); + goto l_deinit; + } + + ret = sxe2_acl_create_scenario(adapter, ¶ms); + if (ret) { + LOG_ERROR("Failed to create acl scenario, ret=%d\n", ret); + goto l_deinit; + } + + goto l_end; + +l_deinit: + sxe2_acl_deinit(adapter); + +l_end: + return ret; +} + +STATIC s32 sxe2_acl_hw_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_acl_context *acl_ctx = &adapter->acl_ctxt; + struct sxe2_acl_tbl_info *acl_tbl_info = acl_ctx->acl_tbl_info; + struct sxe2_acl_scen_info *scen, *tmp_scen; + s32 ret = 0; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + goto l_end; + + if (!acl_tbl_info) + goto l_end; + + list_for_each_entry_safe(scen, tmp_scen, &acl_tbl_info->scens, l_entry) { + ret = sxe2_fwc_acl_scen_dealloc(adapter, scen->scen_id); + if (ret) { + LOG_ERROR_BDF("Dealloc scen cmd failed, ret:%d.\n", ret); + goto l_end; + } + } + + ret = sxe2_fwc_acl_lut_dealloc(adapter, acl_tbl_info->id); + if (ret) { + LOG_ERROR("Acl tbl dealloc cmd failed, ret:%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC void sxe2_acl_flow_ctxt_deinit(struct sxe2_adapter *adapter) +{ + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return; + + sxe2_flow_ppp_comm_ctxt_deinit(&adapter->acl_ctxt.ppp); +} + +STATIC void sxe2_acl_sw_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_acl_context *acl_ctx = &adapter->acl_ctxt; + struct sxe2_acl_tbl_info *acl_tbl_info = acl_ctx->acl_tbl_info; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_acl_scen_info *scen, *tmp_scen; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return; + + if (!acl_tbl_info) + return; + + list_for_each_entry_safe(scen, tmp_scen, &acl_tbl_info->scens, l_entry) { + list_del(&scen->l_entry); + devm_kfree(dev, scen); + } + + devm_kfree(dev, acl_tbl_info); + acl_tbl_info = NULL; + mutex_destroy(&acl_ctx->filter_lock); +} + +void sxe2_acl_deinit(struct sxe2_adapter *adapter) +{ + (void)sxe2_acl_hw_deinit(adapter); + + sxe2_acl_flow_ctxt_deinit(adapter); + + sxe2_acl_sw_deinit(adapter); +} + +s32 sxe2_acl_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u16 i = 0; + u16 j = 0; + u16 table_idx = 0; + u16 per_size = 0; + u16 ddp_max_cnt = 0; + u8 port_idx = adapter->port_idx; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return -EOPNOTSUPP; + + per_size = sizeof(struct sxe2_ddp_acl_ptg); + ddp_max_cnt = (SXE2_MAX_PTYPE_NUM * SXE2_MAX_CDID_NUM) / per_size; + if (!data || base_id >= ddp_max_cnt || cnt > ddp_max_cnt) { + LOG_ERROR("sxe2 acl ptg parse from ddp failed, port_idx=%u !\n", + port_idx); + ret = -EINVAL; + goto l_end; + } + + table_idx = base_id * per_size; + for (i = 0; i < cnt; i++) { + for (j = 0; j < per_size; j++) { + if (table_idx >= (port_idx * SXE2_MAX_PTYPE_NUM) && + table_idx < ((port_idx + 1) * SXE2_MAX_PTYPE_NUM)) { + adapter->acl_ctxt.ppp + .pt_to_grp[table_idx % SXE2_MAX_PTYPE_NUM] + .idx = *data; + } + table_idx++; + data++; + } + } + LOG_INFO_BDF("sxe2 acl ptg parse from ddp, port_idx=%u !\n", port_idx); + +l_end: + return ret; +} + +STATIC u16 sxe2_acl_get_avail_entry_idx(struct sxe2_acl_scen_info *scen, + enum sxe2_acl_lut_entry_priorty priority) +{ + u16 first_idx = 0; + u16 last_idx = 0; + u16 i = 0; + s8 step = 0; + u16 entry_idx = 0xffff; + + first_idx = scen->entry_first_index[priority]; + last_idx = scen->entry_last_index[priority]; + step = (first_idx < last_idx) ? 1 : -1; + + for (i = first_idx; i != last_idx + step; i += step) { + if (!test_bit(i, scen->acl_entry_bitmap)) { + set_bit(i, scen->acl_entry_bitmap); + entry_idx = i; + goto l_ret; + } + } + +l_ret: + return entry_idx; +} + +STATIC s32 sxe2_acl_remove_flow_lut_entry(struct sxe2_adapter *adapter, + struct sxe2_acl_scen_info *scen, u16 entry_idx) +{ + struct sxe2_acl_tbl_info *acl_tbl_info = adapter->acl_ctxt.acl_tbl_info; + struct sxe2_acl_entry_data lut_entry_data = {}; + struct sxe2_acl_act_entry_data act_entry_data = {0}; + struct sxe2_acl_act_mem *act_mem = NULL; + s32 ret = 0, last_err = 0; + u16 cnt_cascade = 0; + u16 first_tcam = 0; + u16 idx_in_tcam = 0; + u16 i = 0; + u16 absolute_idx = 0; + + if (!scen || scen->num_entry <= entry_idx) { + LOG_ERROR_BDF("Invalid entry_idx %d\n", entry_idx); + last_err = -EINVAL; + goto l_end; + } + + if (!test_bit(entry_idx, scen->acl_entry_bitmap)) { + LOG_WARN_BDF("Entry %u already cleared in bitmap\n", entry_idx); + last_err = 0; + goto l_end; + } + + absolute_idx = scen->start + entry_idx; + first_tcam = (u16)(absolute_idx / SXE2_FW_ACL_TCAM_DEPTH); + idx_in_tcam = (u16)(absolute_idx % SXE2_FW_ACL_TCAM_DEPTH); + cnt_cascade = DIV_ROUND_UP(scen->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + for (i = 0; i < cnt_cascade; i++) { + ret = sxe2_fwc_acl_lut_entry(adapter, (u8)(first_tcam * cnt_cascade + i), + idx_in_tcam, &lut_entry_data); + if (ret) { + LOG_ERROR_BDF("Acl remove flow lut entry cmd failed, ret:%d\n", + ret); + last_err = ret; + } + } + + for_each_set_bit(i, scen->acl_act_mem_bitmap, SXE2_FW_MAX_ACTION_MEMORIES) { + act_mem = &acl_tbl_info->act_mems[i]; + if (act_mem->member_of_tcam >= first_tcam * cnt_cascade && + act_mem->member_of_tcam < first_tcam * cnt_cascade + cnt_cascade) { + ret = sxe2_fwc_acl_act_entry(adapter, (u8)i, idx_in_tcam, + &act_entry_data); + if (ret) { + LOG_ERROR_BDF("Acl remove flow act cmd failed, ret:%d\n", ret); + last_err = ret; + } + } + } + + clear_bit(entry_idx, scen->acl_entry_bitmap); + +l_end: + return last_err; +} + +STATIC s32 sxe2_acl_add_flow_lut_entry(struct sxe2_adapter *adapter, + struct sxe2_acl_scen_info *scen, u16 *entry_idx, + enum sxe2_acl_lut_entry_priorty priority, u8 *keys, + u8 *inverts, + struct sxe2_acl_flow_action *act_entry_data) +{ + struct sxe2_acl_entry_data lut_entry_data = {}; + s32 ret = 0; + u16 cnt_cascade = 0; + u16 i = 0; + u8 cascade_ver_idx = 0; + u16 absolute_idx = 0; + u16 tcam_block_offset = 0; + u16 line_idx_in_tcam = 0; + u16 target_tcam_id = 0; + + if (!scen) { + LOG_ERROR_BDF("Invalid scen\n"); + ret = -EINVAL; + goto l_end; + } + + *entry_idx = sxe2_acl_get_avail_entry_idx(scen, priority); + if (*entry_idx >= scen->num_entry) { + LOG_ERROR_BDF("Invalid entry_idx %d, max entry_idx is %d\n", + *entry_idx, scen->num_entry); + ret = -ENOSPC; + goto l_end; + } + + absolute_idx = scen->start + *entry_idx; + tcam_block_offset = (u16)(absolute_idx / SXE2_FW_ACL_TCAM_DEPTH); + line_idx_in_tcam = absolute_idx % SXE2_FW_ACL_TCAM_DEPTH; + cnt_cascade = DIV_ROUND_UP(scen->width, SXE2_FW_ACL_KEY_WIDTH_BYTES); + for (i = 0; i < cnt_cascade; i++) { + cascade_ver_idx = (u8)(cnt_cascade - i - 1); + target_tcam_id = tcam_block_offset * cnt_cascade + cascade_ver_idx; + + memcpy(&lut_entry_data.entry_key.val, + &keys[cascade_ver_idx * SXE2_FW_ACL_KEY_WIDTH_BYTES], + SXE2_FW_ACL_KEY_WIDTH_BYTES); + memcpy(&lut_entry_data.entry_key_invert.val, + &inverts[cascade_ver_idx * SXE2_FW_ACL_KEY_WIDTH_BYTES], + SXE2_FW_ACL_KEY_WIDTH_BYTES); + + lut_entry_data.entry_key.enable = 1; + lut_entry_data.entry_key_invert.enable = 1; + LOG_DEBUG_BDF("Acl add flow lut entry, idx:%d, tcam_id:%d.\n", + line_idx_in_tcam, target_tcam_id); + ret = sxe2_fwc_acl_lut_entry(adapter, target_tcam_id, line_idx_in_tcam, + &lut_entry_data); + if (ret) { + LOG_ERROR_BDF("LUT Entry write failed at TCAM:%u, Line:%u, ret:%d\n", + target_tcam_id, line_idx_in_tcam, ret); + goto l_rem_flow_entry; + } + } + ret = sxe2_acl_add_act(adapter, scen, *entry_idx, &act_entry_data->data.acl_act); + if (ret) { + LOG_ERROR_BDF("Acl add flow act cmd failed, ret:%d\n", ret); + goto l_rem_flow_entry; + } + +l_rem_flow_entry: + if (ret) { + (void)sxe2_acl_remove_flow_lut_entry(adapter, scen, *entry_idx); + *entry_idx = 0; + } + +l_end: + return ret; +} + +STATIC void sxe2_flow_acl_set_diss_fld(struct sxe2_flow_dissector_info *dissector, + u16 fld_id) +{ + u16 val_loc = 0; + u16 mask_loc = 0; + + switch (fld_id) { + case SXE2_FLOW_FLD_ID_ETH_DA: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, eth.h_dest); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, eth_mask.h_dest); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + case SXE2_FLOW_FLD_ID_ETH_SA: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, eth.h_source); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, eth_mask.h_source); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + case SXE2_FLOW_FLD_ID_IPV4_SA: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, ip.v4.src_ip); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, mask.v4.src_ip); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + case SXE2_FLOW_FLD_ID_IPV4_DA: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, ip.v4.dst_ip); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, mask.v4.dst_ip); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + case SXE2_FLOW_FLD_ID_TCP_SRC_PORT: + case SXE2_FLOW_FLD_ID_UDP_SRC_PORT: + case SXE2_FLOW_FLD_ID_SCTP_SRC_PORT: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, l4.src_port); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, l4_mask.src_port); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + case SXE2_FLOW_FLD_ID_TCP_DST_PORT: + case SXE2_FLOW_FLD_ID_UDP_DST_PORT: + case SXE2_FLOW_FLD_ID_SCTP_DST_PORT: + val_loc = offsetof(struct sxe2_fnav_filter_full_key, l4.dst_port); + mask_loc = offsetof(struct sxe2_fnav_filter_full_key, l4_mask.dst_port); + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)fld_id, val_loc, + mask_loc, SXE2_U16_MASK); + break; + default: + break; + } +} + +STATIC void sxe2_acl_gen_dissector_info(struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, struct sxe2_fnav_flow_seg *seg) +{ + u64 i = 0; + struct sxe2_flow_dissector_info *dissector = NULL; + + dissector = &dissectors[dissectors_cnt - 1]; + + for_each_set_bit(i, seg->fields, SXE2_FLOW_FLD_ID_MAX) { + sxe2_flow_acl_set_diss_fld(dissector, (u16)i); + } + + bitmap_or(dissector->headers, dissector->headers, seg->headers, + SXE2_FLOW_HDR_MAX); + + for (i = 0; i < seg->raw_cnt; i++) { + sxe2_flow_add_diss_raw(dissector, seg->raw[i].offset, SXE2_U16_MASK, + SXE2_U16_MASK, seg->raw[i].len); + } +} + +STATIC struct sxe2_flow_info_node *sxe2_acl_hw_flow_add(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_seg *segs) +{ + s32 ret = 0; + struct sxe2_ppp_common_ctxt *ppp_ctxt = &adapter->acl_ctxt.ppp; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_dissector_info *dissectors = NULL; + bool is_tunnel = segs->is_tunnel; + u8 dissectors_cnt = is_tunnel ? 2 : 1; + struct sxe2_flow_info_node *flow = NULL; + u16 i = 0; + struct sxe2_fnav_flow_seg *seg = NULL; + + dissectors = devm_kcalloc(dev, dissectors_cnt, sizeof(*dissectors), GFP_KERNEL); + if (!dissectors) + goto l_end; + + for (i = 0; i < dissectors_cnt; i++) { + seg = &segs[i]; + LOG_DEV_DEBUG("add a flow, header:0x%lX, field[0]:0x%lX, is_tun:%d.\n", + seg->headers[0], seg->fields[0], seg->is_tunnel); + sxe2_acl_gen_dissector_info(dissectors, (u8)(i + 1), seg); + } + + flow = sxe2_find_flow(ppp_ctxt, dissectors, dissectors_cnt); + if (flow) { + LOG_DEBUG_BDF("find a flow with seg cfg.\n"); + goto l_end; + } + + ret = sxe2_flow_creat(ppp_ctxt, dissectors, dissectors_cnt, &flow); + if (ret) + LOG_ERROR_BDF("create a flow with seg cfg failed, ret:%d\n", ret); + +l_end: + if (dissectors) + devm_kfree(dev, dissectors); + + return flow; +} + +STATIC s32 sxe2_acl_hw_flow_del(struct sxe2_vsi *vsi, struct sxe2_flow_info_node *flow) +{ + s32 ret = 0; + struct sxe2_ppp_common_ctxt *ppp_ctxt = &vsi->adapter->acl_ctxt.ppp; + + if (!flow) + return 0; + + if (bitmap_empty((unsigned long *)flow->used_vsi, SXE2_MAX_VSI_NUM)) + ret = sxe2_flow_delete(ppp_ctxt, flow); + + return ret; +} + +STATIC s32 sxe2_acl_flow_cfg_del(struct sxe2_vsi *vsi, struct sxe2_acl_flow_cfg *flow_cfg) +{ + s32 ret = 0; + u64 vsi_sw_id = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_ppp_common_ctxt *ppp_ctxt = &adapter->acl_ctxt.ppp; + struct sxe2_flow_info_node *flow = NULL; + + if (!flow_cfg->seg) + return 0; + + if (!flow_cfg->seg->flow_ptr) + return 0; + + list_del(&flow_cfg->l_node); + flow = flow_cfg->seg->flow_ptr; + + for_each_set_bit(vsi_sw_id, flow_cfg->seg->vsis, SXE2_MAX_VSI_NUM) { + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, (u16)vsi_sw_id); + if (ret) { + LOG_ERROR_BDF("fnav hw flow disassociate vsi failed, vsi_sw_id:%u ret:%d\n", + (u16)vsi_sw_id, ret); + } + clear_bit((u16)vsi_sw_id, flow_cfg->seg->vsis); + } + + ret = sxe2_acl_hw_flow_del(vsi, flow); + if (ret) { + LOG_ERROR_BDF("fnav hw flow del failed, vsi_sw_id:%u ret:%d\n", + (u16)vsi_sw_id, ret); + } + + return ret; +} + +s32 sxe2_acl_flow_cfg_add(struct sxe2_vsi *vsi, struct sxe2_acl_flow_cfg *flow_cfg, + struct sxe2_fnav_flow_seg *seg) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_ppp_common_ctxt *ppp_ctxt = &adapter->acl_ctxt.ppp; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_seg *seg_old = flow_cfg->seg; + struct sxe2_flow_info_node *flow = NULL; + + if (seg_old) { + if (bitmap_equal(seg_old->headers, seg->headers, SXE2_FLOW_HDR_MAX)) { + LOG_INFO_BDF("both segs are same, flow_type:%d, tun:%d.\n", + flow_cfg->flow_type, seg->is_tunnel); + devm_kfree(dev, seg); + return -EEXIST; + } + } + + flow = sxe2_acl_hw_flow_add(adapter, seg); + if (!flow) { + LOG_ERROR_BDF("fnav hw flow add failed, ret:%d\n", ret); + ret = -EIO; + goto l_flow_add_failed; + } + + ret = sxe2_flow_assoc_vsi(ppp_ctxt, flow, vsi->id_in_pf); + if (ret) { + LOG_ERROR_BDF("fnav hw flow associate main vsi failed, ret:%d\n", ret); + (void)sxe2_acl_hw_flow_del(vsi, flow); + goto l_flow_add_failed; + } + + if (seg_old) + devm_kfree(dev, seg_old); + + INIT_LIST_HEAD(&flow->acl_entry); + mutex_init(&flow->acl_entry_lock); + set_bit(vsi->id_in_pf, seg->vsis); + seg->flow_ptr = flow; + flow_cfg->seg = seg; + + return 0; + +l_flow_add_failed: + if (seg) { + devm_kfree(dev, seg); + seg = NULL; + } + return ret; +} + +STATIC s32 sxe2_acl_slot_id_alloc(unsigned long *slots, u32 *slot_id) +{ + u32 pos = 0; + s32 ret = 0; + + pos = find_next_bit(slots, SXE2_ACL_MAX_NUM_ENTRY, 0); + if (pos >= SXE2_ACL_MAX_NUM_ENTRY) { + ret = -EINVAL; + goto l_end; + } + clear_bit(pos, slots); + + *slot_id = pos; + +l_end: + return ret; +} + +STATIC void sxe2_acl_slot_id_free(unsigned long *slots, u32 slot_id) +{ + if (slot_id >= SXE2_ACL_MAX_NUM_ENTRY) + return; + + set_bit(slot_id, slots); +} + +STATIC s32 sxe2_acl_slot_id_set(unsigned long *slots, u32 max_bits) +{ + s32 pos = -1; + s32 ret = 0; + s32 i; + + for (i = max_bits - 1; i >= 0; i--) { + if (!test_bit(i, slots)) { + pos = i; + break; + } + } + + if (pos < 0) { + ret = -EINVAL; + goto l_end; + } + + if (test_and_set_bit(pos, slots)) { + ret = -EAGAIN; + goto l_end; + } + +l_end: + return ret; +} + +STATIC struct sxe2_acl_flow_entry * +sxe2_flow_acl_scen_entry_cond_compare(struct sxe2_flow_info_node *flow_node, + struct sxe2_acl_flow_entry *flow_entry, + bool *do_chg_action, bool *do_add_entry, + bool *do_rem_entry) +{ + struct sxe2_acl_flow_entry *return_node = NULL; + struct sxe2_acl_flow_entry *node = NULL; + struct sxe2_acl_flow_entry *tmp = NULL; + + *do_chg_action = false; + *do_add_entry = true; + *do_rem_entry = false; + + mutex_lock(&flow_node->acl_entry_lock); + list_for_each_entry_safe(node, tmp, &flow_node->acl_entry, l_entry) { + if (node->entry_size != flow_entry->entry_size || + memcmp(node->entry, flow_entry->entry, node->entry_size)) { + continue; + } + *do_add_entry = false; + return_node = node; + + if (memcmp(node->action, flow_entry->action, sizeof(*node->action))) { + *do_chg_action = true; + *do_rem_entry = true; + break; + } + } + mutex_unlock(&flow_node->acl_entry_lock); + + return return_node; +} + +STATIC s32 sxe2_flow_acl_add_scen_entry(struct sxe2_adapter *adapter, + struct sxe2_acl_flow_entry *flow_entry, + struct sxe2_flow_info_node *flow) +{ + struct sxe2_acl_flow_entry *node = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 ret = -EEXIST; + bool do_add_entry = false; + bool do_rem_entry = false; + bool do_chg_action = false; + u8 *keys = NULL; + u8 *inverts = NULL; + u16 entry_idx = 0; + + node = sxe2_flow_acl_scen_entry_cond_compare(flow, flow_entry, &do_chg_action, + &do_add_entry, &do_rem_entry); + + if (do_rem_entry && node) { + ret = sxe2_acl_remove_flow_lut_entry(adapter, node->flow->cfg.scen, + node->scen_entry_idx); + if (ret) { + LOG_ERROR_BDF("Failed to remove flow entry, ret:%d\n", ret); + goto l_end; + } + + list_del(&node->l_entry); + if (node->action) { + devm_kfree(dev, node->action); + node->action = NULL; + } + if (node->entry) { + devm_kfree(dev, node->entry); + node->entry = NULL; + } + devm_kfree(dev, node); + } + + if (do_add_entry) { + keys = (u8 *)flow_entry->entry; + inverts = keys + (flow_entry->entry_size / 2); + ret = sxe2_acl_add_flow_lut_entry(adapter, flow->cfg.scen, &entry_idx, + SXE2_ACL_LUT_ENTRY_PRIO_NORMAL, keys, + inverts, flow_entry->action); + if (ret) { + LOG_ERROR_BDF("Failed to add flow entry, ret:%d\n", ret); + goto l_end; + } + flow_entry->scen_entry_idx = entry_idx; + mutex_lock(&flow->acl_entry_lock); + list_add_tail(&flow_entry->l_entry, &flow->acl_entry); + mutex_unlock(&flow->acl_entry_lock); + } else { + if (do_chg_action) { + LOG_INFO_BDF("flow entry is exist, do change action.\n"); + goto l_end; + } + } + +l_end: + return ret; +} + +s32 sxe2_acl_lut_entry_add(struct sxe2_vsi *vsi, struct sxe2_acl_filter *filter, + struct sxe2_acl_flow_action *acts) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_acl_flow_cfg *flow_cfg = NULL; + struct sxe2_flow_info_node *flow = NULL; + struct sxe2_acl_flow_entry *acl_flow_entry = NULL; + u16 max_filter_cnt = adapter->acl_ctxt.acl_tbl_info->max_slot_cnt; + u32 slot_id = 0; + + mutex_lock(&adapter->acl_ctxt.filter_lock); + + ret = sxe2_acl_slot_id_alloc(adapter->acl_ctxt.slots, &slot_id); + if (ret) { + LOG_ERROR_BDF("failed to alloc slot id, ret:%d\n", ret); + goto l_unlock; + } + + if (slot_id >= max_filter_cnt) { + LOG_ERROR_BDF("Exceed the max entry number:%d of HW support\n", + max_filter_cnt); + ret = -EOPNOTSUPP; + goto l_free_slot_id; + } + + flow_cfg = sxe2_acl_find_flow_cfg_by_flow_type(vsi, filter->flow_type); + if (!flow_cfg) { + LOG_ERROR_BDF("flow node is not exist, flow_type: %d.\n", + filter->flow_type); + ret = -EINVAL; + goto l_free_slot_id; + } + flow = flow_cfg->seg->flow_ptr; + + acl_flow_entry = devm_kzalloc(dev, sizeof(*acl_flow_entry), GFP_KERNEL); + if (!acl_flow_entry) { + LOG_ERROR_BDF("Failed to alloc acl flow entry\n"); + ret = -ENOMEM; + goto l_free_slot_id; + } + + acl_flow_entry->flow = flow; + + ret = sxe2_flow_acl_format_lut_act_entry(adapter, acl_flow_entry, flow, acts, + (u8 *)&filter->full_key); + if (ret) { + LOG_ERROR_BDF("Failed to format lut ands act entry, ret %d.\n", ret); + goto l_err_free_flow_struct; + } + + ret = sxe2_flow_acl_add_scen_entry(adapter, acl_flow_entry, flow); + if (ret) { + LOG_ERROR_BDF("Failed to add scen entry, ret %d.\n", ret); + goto l_err_free_flow_members; + } + + filter->flow_entry = acl_flow_entry; + list_add(&filter->l_node, &vsi->acl.filter_list); + flow_cfg->filter_cnt++; + mutex_unlock(&adapter->acl_ctxt.filter_lock); + return 0; + +l_err_free_flow_members: + if (acl_flow_entry->action) { + devm_kfree(dev, acl_flow_entry->action); + acl_flow_entry->action = NULL; + } + + if (acl_flow_entry->entry) { + devm_kfree(dev, acl_flow_entry->entry); + acl_flow_entry->entry = NULL; + } + +l_err_free_flow_struct: + devm_kfree(dev, acl_flow_entry); + +l_free_slot_id: + sxe2_acl_slot_id_free(adapter->acl_ctxt.slots, slot_id); + +l_unlock: + mutex_unlock(&adapter->acl_ctxt.filter_lock); + return ret; +} + +STATIC s32 sxe2_acl_del_entry(struct sxe2_adapter *adapter, + struct sxe2_acl_flow_entry *flow_entry) +{ + s32 ret = 0; + struct sxe2_acl_scen_info *scen = NULL; + struct sxe2_flow_info_node *flow = NULL; + u16 entry_idx = 0; + + if (!flow_entry) { + LOG_DEBUG_BDF("flow_entry is NULL, nothing to delete\n"); + return 0; + } + + flow = flow_entry->flow; + if (flow) { + scen = flow->cfg.scen; + if (!scen) { + LOG_ERROR_BDF("scen is NULL, nothing to delete\n"); + } else { + entry_idx = flow_entry->scen_entry_idx; + ret = sxe2_acl_remove_flow_lut_entry(adapter, scen, entry_idx); + if (ret) { + LOG_ERROR_BDF("Failed to delete entry,scen_entry_idx:%u\n", + entry_idx); + } + } + } + + if (!list_empty(&flow_entry->l_entry)) + list_del(&flow_entry->l_entry); + else + LOG_WARN_BDF("flow_entry %p not in any list\n", flow_entry); + + return ret; +} + +STATIC s32 sxe2_acl_filter_del(struct sxe2_vsi *vsi, struct sxe2_acl_filter *filter) +{ + s32 ret1 = 0; + s32 ret2 = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_acl_flow_cfg *flow_cfg; + struct sxe2_acl_flow_entry *acl_flow_entry = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 max_filter_cnt = adapter->acl_ctxt.acl_tbl_info->max_slot_cnt; + + flow_cfg = sxe2_acl_find_flow_cfg_by_flow_type(vsi, filter->flow_type); + if (!flow_cfg) { + LOG_ERROR_BDF("Flow cfg not found, filter_id:%llu, flow_type:%d\n", + filter->filter_id, filter->flow_type); + if (!list_empty(&filter->l_node)) + list_del(&filter->l_node); + else + LOG_ERROR_BDF("filter %p not in any list\n", filter); + + devm_kfree(dev, filter); + return -EINVAL; + } + + if (!list_empty(&filter->l_node)) { + list_del(&filter->l_node); + acl_flow_entry = filter->flow_entry; + ret1 = sxe2_acl_del_entry(adapter, acl_flow_entry); + if (ret1) { + LOG_ERROR_BDF("Failed to delete, filter_id:%llu, flow_type:%d,ret:%d\n", + filter->filter_id, filter->flow_type, ret1); + } + + if (flow_cfg->filter_cnt > 0) { + flow_cfg->filter_cnt--; + } else { + LOG_WARN_BDF("filter_cnt already 0 for flow_type %d, filter_id=%llu\n", + filter->flow_type, filter->filter_id); + } + } + + if (flow_cfg->filter_cnt == 0) { + LOG_DEBUG_BDF("flow cfg is empty, flow_type:%d.\n", filter->flow_type); + ret2 = sxe2_acl_flow_cfg_del(vsi, flow_cfg); + if (ret2) { + LOG_WARN_BDF("delete hw failed, filter_id:%llu, flow_type:%d, ret:%d\n", + filter->filter_id, filter->flow_type, ret2); + } + + if (flow_cfg->seg) { + devm_kfree(dev, flow_cfg->seg); + flow_cfg->seg = NULL; + } + + devm_kfree(dev, flow_cfg); + } + + if (acl_flow_entry) { + if (acl_flow_entry->action) { + devm_kfree(dev, acl_flow_entry->action); + acl_flow_entry->action = NULL; + } + + if (acl_flow_entry->entry) { + devm_kfree(dev, acl_flow_entry->entry); + acl_flow_entry->entry = NULL; + } + devm_kfree(dev, acl_flow_entry); + filter->flow_entry = NULL; + } + + devm_kfree(dev, filter); + (void)sxe2_acl_slot_id_set(adapter->acl_ctxt.slots, max_filter_cnt); + + return ret1 ? ret1 : ret2; +} + +s32 sxe2_acl_del_filter_by_vsi(struct sxe2_vsi *rule_vsi) +{ + s32 ret = 0; + s32 error = 0; + struct sxe2_adapter *adapter = rule_vsi->adapter; + struct sxe2_acl_filter *filter = NULL; + struct sxe2_acl_filter *tmp = NULL; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return 0; + + mutex_lock(&adapter->acl_ctxt.filter_lock); + list_for_each_entry_safe(filter, tmp, &rule_vsi->acl.filter_list, l_node) { + ret = sxe2_acl_filter_del(rule_vsi, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 acl delete filter failed, rule_vsi_id=%u, ret:%d\n", + rule_vsi->idx_in_dev, ret); + error = ret; + } + } + + mutex_unlock(&adapter->acl_ctxt.filter_lock); + + return error; +} + +void sxe2_vsi_acl_init(struct sxe2_vsi *vsi) +{ + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, vsi->adapter->flags)) + return; + + memset(&vsi->acl, 0, sizeof(vsi->acl)); + mutex_init(&vsi->acl.flow_cfg_lock); + INIT_LIST_HEAD(&vsi->acl.filter_list); + INIT_LIST_HEAD(&vsi->acl.flow_cfg_list); + bitmap_fill(vsi->acl.filter_ids, SXE2_ACL_MAX_NUM_ENTRY); +} + +void sxe2_vsi_acl_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_acl *vsi_acl = &vsi->acl; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, vsi->adapter->flags)) + return; + + (void)sxe2_acl_del_filter_by_vsi(vsi); + + mutex_destroy(&vsi_acl->flow_cfg_lock); + bitmap_zero(vsi->acl.filter_ids, SXE2_ACL_MAX_NUM_ENTRY); +} + +STATIC struct sxe2_acl_filter *sxe2_acl_find_filter_by_id_unlock(struct sxe2_vsi *vsi, + u64 loc) +{ + struct sxe2_acl_filter *filter_tmp = NULL; + struct sxe2_acl_filter *filter_find = NULL; + struct sxe2_adapter *adapter = vsi->adapter; + + list_for_each_entry(filter_tmp, &vsi->acl.filter_list, l_node) { + LOG_DEBUG_BDF("filter_id:%llu\n", filter_tmp->filter_id); + if (loc == filter_tmp->filter_id) { + filter_find = filter_tmp; + break; + } + } + + return filter_find; +} + +s32 sxe2_acl_del_filter_by_id(struct sxe2_vsi *vsi, u64 filter_id) +{ + s32 ret = -ENOENT; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_acl_filter *filter = NULL; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, vsi->adapter->flags)) { + LOG_INFO_BDF("acl not supported.\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&adapter->acl_ctxt.filter_lock); + filter = sxe2_acl_find_filter_by_id_unlock(vsi, filter_id); + if (filter) { + ret = sxe2_acl_filter_del(vsi, filter); + if (ret) { + LOG_ERROR_BDF("delete filter failed, filter_id:%llu, ret:%d\n", + filter_id, ret); + } + } else { + LOG_ERROR_BDF("filter not found, filter_id:%llu.\n", filter_id); + } + + mutex_unlock(&adapter->acl_ctxt.filter_lock); + return ret; +} + +STATIC s32 sxe2_com_acl_input_parse(struct sxe2_acl_filter *filter, + struct sxe2_fnav_flow_seg *seg, + struct sxe2_flow_pattern *pattern, u16 flow_type) +{ + struct sxe2_fnav_filter_full_key *full_key = &filter->full_key; + bool has_l4 = false; + u32 weight; + DECLARE_BITMAP(fld_spec, SXE2_FLOW_FLD_ID_MAX); + DECLARE_BITMAP(head_spec, SXE2_FLOW_HDR_MAX); + + bitmap_zero(fld_spec, SXE2_FLOW_FLD_ID_MAX); + bitmap_zero(head_spec, SXE2_FLOW_HDR_MAX); + + if (test_bit(SXE2_FLOW_HDR_ETH, pattern->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_ETH_DA, pattern->map_spec)) { + memcpy(&full_key->eth.h_dest, &pattern->item_spec.eth.dst_addr, + ETH_ALEN * sizeof(u8)); + memcpy(&full_key->eth_mask.h_dest, + &pattern->item_mask.eth.dst_addr, ETH_ALEN * sizeof(u8)); + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, fld_spec); + } + + if (test_bit(SXE2_FLOW_FLD_ID_ETH_SA, pattern->map_spec)) { + memcpy(&full_key->eth.h_source, &pattern->item_spec.eth.src_addr, + ETH_ALEN * sizeof(u8)); + memcpy(&full_key->eth_mask.h_source, + &pattern->item_mask.eth.src_addr, ETH_ALEN * sizeof(u8)); + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, fld_spec); + } + + set_bit(SXE2_FLOW_HDR_ETH, head_spec); + } + + if (test_bit(SXE2_FLOW_HDR_IPV4, pattern->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, pattern->map_spec)) { + full_key->ip.v4.dst_ip = pattern->item_spec.ipv4.daddr; + full_key->mask.v4.dst_ip = pattern->item_mask.ipv4.daddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, fld_spec); + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, pattern->map_spec)) { + full_key->ip.v4.src_ip = pattern->item_spec.ipv4.saddr; + full_key->mask.v4.src_ip = pattern->item_mask.ipv4.saddr; + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, fld_spec); + } + + set_bit(SXE2_FLOW_HDR_IPV4, head_spec); + } + + if (test_bit(SXE2_FLOW_HDR_TCP, pattern->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, pattern->map_spec)) { + full_key->l4.src_port = pattern->item_spec.tcp.source; + full_key->l4_mask.src_port = pattern->item_mask.tcp.source; + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, fld_spec); + } + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, pattern->map_spec)) { + full_key->l4.dst_port = pattern->item_spec.tcp.dest; + full_key->l4_mask.dst_port = pattern->item_mask.tcp.dest; + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, fld_spec); + } + + set_bit(SXE2_FLOW_HDR_TCP, head_spec); + full_key->ip.v4.proto = IPPROTO_TCP; + has_l4 = true; + } + + if (test_bit(SXE2_FLOW_HDR_UDP, pattern->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, pattern->map_spec)) { + full_key->l4.src_port = pattern->item_spec.udp.source; + full_key->l4_mask.src_port = pattern->item_mask.udp.source; + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, fld_spec); + } + + if (test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, pattern->map_spec)) { + full_key->l4.dst_port = pattern->item_spec.udp.dest; + full_key->l4_mask.dst_port = pattern->item_mask.udp.dest; + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, fld_spec); + } + + set_bit(SXE2_FLOW_HDR_UDP, head_spec); + full_key->ip.v4.proto = IPPROTO_UDP; + has_l4 = true; + } + + if (test_bit(SXE2_FLOW_HDR_SCTP, pattern->hdrs)) { + if (test_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, pattern->map_spec)) { + full_key->l4.src_port = pattern->item_spec.sctp.src_port; + full_key->l4_mask.src_port = pattern->item_mask.sctp.src_port; + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, fld_spec); + } + + if (test_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, pattern->map_spec)) { + full_key->l4.dst_port = pattern->item_spec.sctp.dst_port; + full_key->l4_mask.dst_port = pattern->item_mask.sctp.dst_port; + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, fld_spec); + } + + set_bit(SXE2_FLOW_HDR_SCTP, head_spec); + full_key->ip.v4.proto = IPPROTO_SCTP; + has_l4 = true; + } + + if (!bitmap_equal(pattern->map_spec, fld_spec, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR("map_spec not match.\n"); + return -EINVAL; + } + + if (flow_type != SXE2_FLOW_MAC_PAY && flow_type != SXE2_FLOW_MAC_IPV4_TCP_PAY && + flow_type != SXE2_FLOW_MAC_IPV4_UDP_PAY && + flow_type != SXE2_FLOW_MAC_IPV4_SCTP_PAY && + flow_type != SXE2_FLOW_MAC_IPV4_PAY) { + LOG_ERROR("flow_type:%d not support.\n", flow_type); + return -EOPNOTSUPP; + } + + weight = bitmap_weight(head_spec, SXE2_FLOW_HDR_MAX); + if (weight > 3 || weight == 0) { + LOG_ERROR("head_spec weight is %d, max is 3.\n", weight); + return -EINVAL; + } + + if (test_bit(SXE2_FLOW_HDR_IPV4, head_spec) && !has_l4) { + set_bit(SXE2_FLOW_HDR_IPV_OTHER, head_spec); + full_key->ip.v4.proto = 0; + } + + bitmap_copy(seg->headers, head_spec, SXE2_FLOW_HDR_MAX); + bitmap_copy(seg->fields, fld_spec, SXE2_FLOW_FLD_ID_MAX); + filter->flow_type = flow_type; + return 0; +} + +STATIC void sxe2_com_acl_action_parse(struct sxe2_acl_flow_action *action, + struct sxe2_flow_action *flow_action) +{ + if (test_bit(SXE2_FLOW_ACTION_QUEUE, flow_action->act_types)) { + action[0].type = SXE2_ACL_ACT_QINDEX; + action[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_RX_DST_Q; + action[0].data.acl_act.prio = 3; + action[0].data.acl_act.value = cpu_to_le16(flow_action->queue.q_index); + } + if (test_bit(SXE2_FLOW_ACTION_DROP, flow_action->act_types)) { + action[0].type = SXE2_ACL_ACT_DROP; + action[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_PKT_DROP; + action[0].data.acl_act.prio = 3; + action[0].data.acl_act.value = cpu_to_le16(0x1); + } + if (test_bit(SXE2_FLOW_ACTION_Q_REGION, flow_action->act_types)) { + action[0].type = SXE2_ACL_ACT_QGROUP; + action[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_RX_DST_Q_REGION; + action[0].data.acl_act.prio = 3; + action[0].data.acl_act.value = + ((cpu_to_le16(flow_action->q_region.q_index) & 0x7ff) + << 3) | + (cpu_to_le16(flow_action->q_region.region) & 0x7); + } + if (test_bit(SXE2_FLOW_ACTION_TO_VSI, flow_action->act_types)) { + action[0].type = SXE2_ACL_ACT_VSI; + action[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_RX_DST_VSI; + action[0].data.acl_act.prio = 3; + action[0].data.acl_act.value = cpu_to_le16(flow_action->vsi.vsi_index); + } +} + +STATIC s32 sxe2_com_acl_filter_add_internal(struct sxe2_vsi *vsi, u16 flow_type, + struct sxe2_flow_pattern *pattern, + struct sxe2_flow_action *action, + u32 filter_id) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_acl_flow_action acts[1] = {}; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_seg *seg = NULL; + struct sxe2_acl_flow_cfg *flow_cfg = NULL; + struct sxe2_acl_filter *filter = NULL; + bool new_alloc_flow = false; + + flow_cfg = sxe2_acl_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) { + flow_cfg = devm_kzalloc(dev, sizeof(*flow_cfg), GFP_KERNEL); + if (!flow_cfg) { + LOG_ERROR_BDF("no memory for flow cfg.\n"); + ret = -ENOMEM; + goto l_end; + } + flow_cfg->flow_type = flow_type; + new_alloc_flow = true; + } + + sxe2_com_acl_action_parse(acts, action); + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) { + LOG_ERROR_BDF("no memory for seg.\n"); + ret = -ENOMEM; + goto l_free_cfg; + } + + filter = devm_kzalloc(dev, sizeof(*filter), GFP_KERNEL); + if (!filter) { + LOG_ERROR_BDF("no memory for input.\n"); + ret = -ENOMEM; + goto l_free_seg; + } + + ret = sxe2_com_acl_input_parse(filter, seg, pattern, flow_type); + if (ret) { + LOG_ERROR_BDF("acl input parse failed, ret:%d\n", ret); + goto l_free_filter; + } + + ret = sxe2_acl_flow_cfg_add(vsi, flow_cfg, seg); + if (ret == -EEXIST) { + seg = NULL; + LOG_DEBUG_BDF("acl rule exist.\n"); + if (new_alloc_flow) + new_alloc_flow = false; + ret = 0; + } else if (ret) { + seg = NULL; + LOG_ERROR_BDF("outer rule add failed, ret:%d\n", ret); + goto l_free_filter; + } + + if (ret == 0 && new_alloc_flow) + sxe2_acl_flow_cfg_add_list(vsi, flow_cfg); + + INIT_LIST_HEAD(&filter->l_node); + ret = sxe2_acl_lut_entry_add(vsi, filter, acts); + if (ret) { + LOG_ERROR_BDF("acl lut entry add failed, ret:%d\n", ret); + (void)sxe2_acl_filter_del(vsi, filter); + goto l_end; + } + filter->filter_id = SXE2_GEN_FILTER_ID(vsi->idx_in_dev, filter_id); + LOG_DEBUG_BDF("filter_id:%llu\n", filter->filter_id); + goto l_end; + +l_free_filter: + if (filter) + devm_kfree(dev, filter); +l_free_seg: + if (seg) + devm_kfree(dev, seg); +l_free_cfg: + if (new_alloc_flow) { + if (flow_cfg) + devm_kfree(dev, flow_cfg); + } +l_end: + return ret; +} + +s32 sxe2_com_flow_acl_filter_add(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_drv_flow_filter_resp *resp) +{ + s32 ret = 0; + u32 pos = 0; + struct sxe2_vsi *rule_vsi; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) { + LOG_INFO_BDF("acl not supported.\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + rule_vsi = sxe2_vsi_get_by_idx(adapter, rule_vsi_id); + if (!rule_vsi) { + LOG_ERROR_BDF("vsi not found, vsi_id:%u.\n", rule_vsi_id); + ret = -EINVAL; + goto l_end; + } + + pos = find_next_bit(rule_vsi->acl.filter_ids, SXE2_ACL_MAX_NUM_ENTRY, 0); + if (pos >= SXE2_ACL_MAX_NUM_ENTRY) { + ret = -EINVAL; + goto l_end; + } + clear_bit(pos, rule_vsi->acl.filter_ids); + resp->flow_id = pos; + + ret = sxe2_com_acl_filter_add_internal(rule_vsi, req->meta.flow_type, + &req->pattern_outer, &req->action, pos); + if (ret) { + set_bit(pos, rule_vsi->acl.filter_ids); + LOG_ERROR_BDF("failed to add acl filter, ret:%d\n", ret); + goto l_end; + } + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_com_flow_acl_filter_del(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req) +{ + s32 ret = 0; + struct sxe2_vsi *rule_vsi; + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) { + LOG_INFO_BDF("acl not supported.\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + rule_vsi = sxe2_vsi_get_by_idx(adapter, rule_vsi_id); + if (!rule_vsi) { + LOG_ERROR_BDF("vsi not found, vsi_id:%u.\n", rule_vsi_id); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_acl_del_filter_by_id(rule_vsi, + SXE2_GEN_FILTER_ID(rule_vsi->idx_in_dev, req->flow_id)); + if (ret) + LOG_ERROR_BDF("delete acl filter failed, ret:%d\n", ret); + + set_bit(req->flow_id, rule_vsi->acl.filter_ids); + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.h new file mode 100644 index 0000000000000000000000000000000000000000..eaef0390b188917abcd0012033713fb1022d3e01 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_acl.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_acl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_ACL_H__ +#define __SXE2_ACL_H__ + +#include +#include + +#include "sxe2_cmd.h" +#include "sxe2_mbx_public.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_flow.h" +#include "sxe2_fnav.h" +#include "sxe2_drv_cmd.h" + +#define SXE2_FW_MAX_ACTION_MEMORIES 20 + +#define SXE2_FW_ACL_KEY_WIDTH 40 +#define SXE2_FW_ACL_KEY_WIDTH_BYTES 5 +#define SXE2_FW_ACL_TCAM_DEPTH 512 +#define SXE2_ACL_ENTRY_ALLOC_UNIT 512 +#define SXE2_FW_MAX_CONCURRENT_ACL_TBL 15 +#define SXE2_FW_MAX_ACTION_MEMORIES 20 +#define SXE2_FW_MAX_ACTION_ENTRIES 512 +#define SXE2_FW_ACL_LUT_NUM 16 +#define SXE2_AQC_ALLOC_ID_LESS_THAN_4K 0x1000 +#define SXE2_AQC_TBL_MAX_ACTION_PAIRS 4 +#define SXE2_ACL_MAX_CASCADE_WIDTH 4 + +#define SXE2_FW_MAX_TCAM_ALLOC_UNITS (SXE2_FW_ACL_TCAM_DEPTH / SXE2_ACL_ENTRY_ALLOC_UNIT) + +#define SXE2_FW_ACL_ALLOC_UNITS (SXE2_FW_ACL_LUT_NUM * SXE2_FW_MAX_TCAM_ALLOC_UNITS) + +#define SXE2_FW_MAX_ACL_TCAM_ENTRY (SXE2_FW_ACL_TCAM_DEPTH * SXE2_FW_ACL_LUT_NUM) + +#define SXE2_ACL_MAX_NUM_ENTRY 2048 + +#define SXE2_ACL_SCEN_PKT_DIR_IDX_IN_TCAM 0x2 +#define SXE2_ACL_SCEN_PID_IDX_IN_TCAM 0x3 +#define SXE2_ACL_SCEN_RNG_CHK_IDX_IN_TCAM 0x4 + +#define SXE2_FW_ACL_BYTE_SEL_BASE 0x20 +#define SXE2_FW_ACL_BYTE_SEL_BASE_PKT_DIR 0x20 +#define SXE2_FW_ACL_BYTE_SEL_BASE_PID 0x3E +#define SXE2_FW_ACL_BYTE_SEL_BASE_RNG_CHK 0x3F + +#define SXE2_ACL_INVALID_PF_SCEN_NUM (0x3f) +#define SXE2_ACL_PROF_BYTE_SEL_ELEMS (30) +#define SXE2_ACL_PROF_BYTE_SEL_START_IDX (0) + +#define SXE2_GEN_FILTER_ID(mark, id) (((u64)(mark) << 32) | ((u64)(id) & 0xFFFFFFFFULL)) + +struct sxe2_mbx_msg_info; + +enum sxe2_acl_fw_action_mdid { + SXE2_ACL_ACTION_MDID_FLOW_ID = 0, + SXE2_ACL_ACTION_MDID_PKT_DROP, + SXE2_ACL_ACTION_MDID_RX_DST_Q, + SXE2_ACL_ACTION_MDID_RX_DST_Q_REGION, + SXE2_ACL_ACTION_MDID_RX_DST_VSI, + SXE2_ACL_ACTION_MDID_CNT_PKT, + SXE2_ACL_ACTION_MDID_CNT_BYTE, + SXE2_ACL_ACTION_MDID_CNT_PKT_BYTE, + SXE2_ACL_ACTION_MDID_NOP, + SXE2_ACL_ACTION_MDID_MAX, +}; + +enum sxe2_acl_act_type { + SXE2_ACL_ACT_DROP, + SXE2_ACL_ACT_QINDEX, + SXE2_ACL_ACT_QGROUP, + SXE2_ACL_ACT_VSI, + SXE2_ACL_ACT_OTHER, +}; + +enum sxe2_acl_lut_entry_priorty { + SXE2_ACL_LUT_ENTRY_PRIO_LOW, + SXE2_ACL_LUT_ENTRY_PRIO_NORMAL, + SXE2_ACL_LUT_ENTRY_PRIO_HIGH, + SXE2_ACL_LUT_ENTRY_PRIO_MAX, +}; + +struct sxe2_acl_tbl_params { + u16 width; + u16 depth; + + u8 entry_act_pairs; +}; + +struct sxe2_acl_act_mem { + u8 act_mem; + u8 member_of_tcam; +}; + +struct sxe2_acl_tbl_info { + u8 first_tcam; + u8 last_tcam; + u16 first_entry; + u16 last_entry; + + struct list_head scens; + struct sxe2_acl_tbl_params table_info; + struct sxe2_acl_act_mem act_mems[SXE2_FW_MAX_ACTION_MEMORIES]; + + DECLARE_BITMAP(avail, SXE2_FW_ACL_ALLOC_UNITS); + + u16 id; + u16 max_slot_cnt; +}; + +struct sxe2_acl_scen_info { + struct list_head l_entry; + + DECLARE_BITMAP(acl_act_mem_bitmap, SXE2_FW_MAX_ACTION_MEMORIES); + + DECLARE_BITMAP(acl_entry_bitmap, SXE2_FW_MAX_ACL_TCAM_ENTRY); + + u16 entry_first_index[SXE2_ACL_LUT_ENTRY_PRIO_MAX]; + u16 entry_last_index[SXE2_ACL_LUT_ENTRY_PRIO_MAX]; + + u16 scen_id; + + u16 start; + u16 end; + + u16 width; + u16 num_entry; + u8 avail_width; + + u8 pid_idx; + u8 rnage_chk_idx; + u8 pkt_dir_idx; +}; + +struct sxe2_acl_flow_action { + enum sxe2_acl_act_type type; + union { + struct sxe2_acl_act_entry_data acl_act; + u32 dummy; + } data; +}; + +struct sxe2_acl_flow_entry { + struct list_head l_entry; + struct sxe2_flow_info_node *flow; + struct sxe2_acl_flow_action *action; + + void *entry; + u16 entry_size; + u16 scen_entry_idx; +}; + +struct sxe2_acl_flow_cfg { + struct list_head l_node; + struct sxe2_fnav_flow_seg *seg; + u32 filter_cnt; + enum sxe2_fnav_flow_type flow_type; +}; + +struct sxe2_acl_context { + struct sxe2_acl_tbl_info *acl_tbl_info; + DECLARE_BITMAP(slots, SXE2_ACL_MAX_NUM_ENTRY); + struct mutex filter_lock; + struct sxe2_ppp_common_ctxt ppp; +}; + +struct sxe2_acl_filter { + struct list_head l_node; + struct sxe2_fnav_filter_full_key full_key; + enum sxe2_fnav_flow_type flow_type; + struct sxe2_acl_flow_entry *flow_entry; + u64 filter_id; +}; + +s32 sxe2_acl_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +s32 sxe2_acl_init(struct sxe2_adapter *adapter); + +void sxe2_acl_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_acl_set_scen_prof(struct sxe2_adapter *adapter, + struct sxe2_fwc_acl_prof_sel_base_req *prof_sel_req); + +s32 sxe2_acl_flow_cfg_add(struct sxe2_vsi *vsi, struct sxe2_acl_flow_cfg *flow_cfg, + struct sxe2_fnav_flow_seg *seg); + +s32 sxe2_acl_del_filter_by_id(struct sxe2_vsi *vsi, u64 filter_id); + +s32 sxe2_acl_lut_entry_add(struct sxe2_vsi *vsi, struct sxe2_acl_filter *filter, + struct sxe2_acl_flow_action *acts); + +s32 sxe2_com_flow_acl_filter_del(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req); + +s32 sxe2_com_flow_acl_filter_add(struct sxe2_adapter *adapter, u16 rule_vsi_id, + struct sxe2_drv_flow_filter_req *req, + struct sxe2_drv_flow_filter_resp *resp); +s32 sxe2_fwc_acl_trace_trigger(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_acl_trace_recorder(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_acl_dfx_get(struct sxe2_adapter *adapter); + +struct sxe2_acl_flow_cfg * +sxe2_acl_find_flow_cfg_by_flow_type(struct sxe2_vsi *vsi, + enum sxe2_fnav_flow_type flow_type); + +s32 sxe2_acl_del_filter_by_vsi(struct sxe2_vsi *rule_vsi); + +void sxe2_acl_flow_cfg_add_list(struct sxe2_vsi *vsi, struct sxe2_acl_flow_cfg *flow_cfg); + +void sxe2_vsi_acl_init(struct sxe2_vsi *vsi); + +void sxe2_vsi_acl_deinit(struct sxe2_vsi *vsi); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f89f0e31d8a511741e67ca0ee87e2121150b3899 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_arfs.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_arfs.h" +#include "sxe2_tx.h" + +#ifdef NEED_INCLUDE_NET_RPS_H +#include +#endif +#ifdef NEED_INCLUDE_NETDEV_RX_QUEUE_H +#include +#endif + +#ifdef CONFIG_RFS_ACCEL +#define SXE2_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + +static void sxe2_arfs_filter_print(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *arfs_filter) +{ + bool is_v4; + __be16 *ipv6_src, *ipv6_dst; + struct sxe2_fnav_filter *fltr_info; + + if (!arfs_filter) + return; + + fltr_info = &arfs_filter->filter_info; + LOG_ARFS("filter state %u\n", arfs_filter->filter_state); + + is_v4 = (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP || + fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP); + + if (is_v4) { + LOG_ARFS("ipv4 src_ip 0x%X, dst_ip 0x%X, ip_proto %u, src_port %u,\t" + "dst_port %u\n", + fltr_info->full_key.ip.v4.src_ip, + fltr_info->full_key.ip.v4.dst_ip, + fltr_info->full_key.ip.v4.proto, + be16_to_cpu(fltr_info->full_key.l4.src_port), + be16_to_cpu(fltr_info->full_key.l4.dst_port)); + } else { + ipv6_src = (__be16 *)fltr_info->full_key.ip.v6.src_ip; + ipv6_dst = (__be16 *)fltr_info->full_key.ip.v6.dst_ip; + LOG_ARFS("ipv6 src_ip %x:%x:%x:%x:%x:%x:%x:%x, dst_ip\t" + "%x:%x:%x:%x:%x:%x:%x:%x,ip_proto %u, src_port %u,\t" + "dst_port %u\n", + be16_to_cpu(ipv6_src[0]), be16_to_cpu(ipv6_src[1]), + be16_to_cpu(ipv6_src[2]), be16_to_cpu(ipv6_src[3]), + be16_to_cpu(ipv6_src[4]), be16_to_cpu(ipv6_src[5]), + be16_to_cpu(ipv6_src[6]), be16_to_cpu(ipv6_src[7]), + be16_to_cpu(ipv6_dst[0]), be16_to_cpu(ipv6_dst[1]), + be16_to_cpu(ipv6_dst[2]), be16_to_cpu(ipv6_dst[3]), + be16_to_cpu(ipv6_dst[4]), be16_to_cpu(ipv6_dst[5]), + be16_to_cpu(ipv6_dst[6]), be16_to_cpu(ipv6_dst[7]), + fltr_info->full_key.ip.v6.proto, + be16_to_cpu(fltr_info->full_key.l4.src_port), + be16_to_cpu(fltr_info->full_key.l4.dst_port)); + } + LOG_ARFS("flow_id %u, q_inedx %u, loc %u\n", arfs_filter->flow_id, + fltr_info->q_index, arfs_filter->filter_idx); +} + +STATIC void sxe2_arfs_filter_print_screen(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *arfs_filter) +{ + bool is_v4; + __be16 *ipv6_src, *ipv6_dst; + struct sxe2_fnav_filter *fltr_info; + + if (!arfs_filter) + return; + + fltr_info = &arfs_filter->filter_info; + LOG_DEV_INFO("\tfilter state %u\n", arfs_filter->filter_state); + + is_v4 = (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP || + fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP); + + if (is_v4) { + LOG_DEV_INFO("ipv4 src_ip 0x%X, dst_ip 0x%X, ip_proto %u, src_port\t" + "%u, dst_port %u\n", + fltr_info->full_key.ip.v4.src_ip, + fltr_info->full_key.ip.v4.dst_ip, + fltr_info->full_key.ip.v4.proto, + be16_to_cpu(fltr_info->full_key.l4.src_port), + be16_to_cpu(fltr_info->full_key.l4.dst_port)); + } else { + ipv6_src = (__be16 *)fltr_info->full_key.ip.v6.src_ip; + ipv6_dst = (__be16 *)fltr_info->full_key.ip.v6.dst_ip; + LOG_DEV_INFO("ipv6 src_ip %x:%x:%x:%x:%x:%x:%x:%x, dst_ip\t" + "%x:%x:%x:%x:%x:%x:%x:%x, ip_proto %u, src_port %u,\t" + "dst_port %u\n", + be16_to_cpu(ipv6_src[0]), be16_to_cpu(ipv6_src[1]), + be16_to_cpu(ipv6_src[2]), be16_to_cpu(ipv6_src[3]), + be16_to_cpu(ipv6_src[4]), be16_to_cpu(ipv6_src[5]), + be16_to_cpu(ipv6_src[6]), be16_to_cpu(ipv6_src[7]), + be16_to_cpu(ipv6_dst[0]), be16_to_cpu(ipv6_dst[1]), + be16_to_cpu(ipv6_dst[2]), be16_to_cpu(ipv6_dst[3]), + be16_to_cpu(ipv6_dst[4]), be16_to_cpu(ipv6_dst[5]), + be16_to_cpu(ipv6_dst[6]), be16_to_cpu(ipv6_dst[7]), + fltr_info->full_key.ip.v6.proto, + be16_to_cpu(fltr_info->full_key.l4.src_port), + be16_to_cpu(fltr_info->full_key.l4.dst_port)); + } + LOG_DEV_INFO("flow_id %u, q_inedx %u, loc %u\n", arfs_filter->flow_id, + fltr_info->q_index, arfs_filter->filter_idx); +} + +s32 sxe2_arfs_enable(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u32 i; + + adapter->arfs_ctxt.filter_list = kcalloc(SXE2_MAX_ARFS_LIST, + sizeof(struct hlist_head), + GFP_KERNEL); + if (!adapter->arfs_ctxt.filter_list) + return -ENOMEM; + + for (i = 0; i < SXE2_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&adapter->arfs_ctxt.filter_list[i]); + + adapter->arfs_ctxt.last_filter_id = 0; + memset(&adapter->arfs_ctxt.filter_cnt, 0, + sizeof(struct sxe2_arfs_active_filter_cnt)); + + return ret; +} + +void sxe2_arfs_disable(struct sxe2_adapter *adapter) +{ + u32 i = 0; + s32 ret; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_arfs_filter *filter; + struct hlist_node *node; + HLIST_HEAD(del_list); + + mutex_lock(&adapter->arfs_ctxt.update_list_lock); + spin_lock_bh(&adapter->arfs_ctxt.filter_lock); + if (likely(adapter->arfs_ctxt.filter_list)) { + for (i = 0; i < SXE2_MAX_ARFS_LIST; i++) { + hlist_for_each_entry_safe(filter, node, + &adapter->arfs_ctxt.filter_list[i], + hl_node) { + hlist_del(&filter->hl_node); + hlist_add_head(&filter->hl_node, &del_list); + } + } + kfree(adapter->arfs_ctxt.filter_list); + adapter->arfs_ctxt.filter_list = NULL; + } + spin_unlock_bh(&adapter->arfs_ctxt.filter_lock); + + hlist_for_each_entry_safe(filter, node, &del_list, hl_node) { + if (filter->filter_state != SXE2_ARFS_FILTER_INACTIVE && + filter->filter_state != SXE2_ARFS_FILTER_CONFLICT) { + ret = sxe2_pf_fnav_hw_filter_update(vsi, + &filter->filter_info, + false, false, + SXE2_FNAV_FILTER_UPDATE_PKT); + if (ret) { + LOG_DEV_WARN("unable to delete aRFS filter, ret:%d\t" + "filter_state:%d filter_loc:%d\t" + "flow_id:%d queue:%d\n", + ret, filter->filter_state, + filter->filter_idx, filter->flow_id, + filter->filter_info.q_index); + } + } + hlist_del(&filter->hl_node); + devm_kfree(dev, filter); + } + + adapter->arfs_ctxt.last_filter_id = 0; + memset(&adapter->arfs_ctxt.filter_cnt, 0, + sizeof(struct sxe2_arfs_active_filter_cnt)); + mutex_unlock(&adapter->arfs_ctxt.update_list_lock); +} + +s32 sxe2_arfs_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + adapter->arfs_ctxt.vsi_id_in_pf = SXE2_INVAL_U16; + mutex_init(&adapter->arfs_ctxt.update_list_lock); + spin_lock_init(&adapter->arfs_ctxt.filter_lock); + + if (test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) + ret = sxe2_arfs_enable(adapter); + + return ret; +} + +void sxe2_arfs_deinit(struct sxe2_adapter *adapter) +{ + sxe2_arfs_disable(adapter); + mutex_destroy(&adapter->arfs_ctxt.update_list_lock); +} + +void sxe2_arfs_clean(struct sxe2_adapter *adapter) +{ + u32 i = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_arfs_filter *filter; + struct hlist_node *node; + + mutex_lock(&adapter->arfs_ctxt.update_list_lock); + spin_lock_bh(&adapter->arfs_ctxt.filter_lock); + if (likely(adapter->arfs_ctxt.filter_list)) { + for (i = 0; i < SXE2_MAX_ARFS_LIST; i++) { + hlist_for_each_entry_safe(filter, node, + &adapter->arfs_ctxt.filter_list[i], + hl_node) { + hlist_del(&filter->hl_node); + devm_kfree(dev, filter); + } + } + } + spin_unlock_bh(&adapter->arfs_ctxt.filter_lock); + + adapter->arfs_ctxt.last_filter_id = 0; + memset(&adapter->arfs_ctxt.filter_cnt, 0, + sizeof(struct sxe2_arfs_active_filter_cnt)); + mutex_unlock(&adapter->arfs_ctxt.update_list_lock); +} + +s32 sxe2_cpu_rx_rmap_set(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = NULL; + struct net_device *netdev = NULL; + u32 i; + + if (!vsi || vsi->type != SXE2_VSI_T_PF) + return 0; + + netdev = vsi->netdev; + if (!netdev || !vsi->irqs.cnt) + return -EINVAL; + + adapter = vsi->adapter; + LOG_NETDEV_DEBUG("setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", + vsi->type, netdev->name, vsi->irqs.cnt); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->irqs.cnt); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + sxe2_for_each_vsi_irq(vsi, i) + { + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, + adapter->irq_ctxt + .msix_entries[vsi->irqs.irq_data[i] + ->idx_in_pf] + .vector)) { + sxe2_cpu_rx_rmap_free(vsi); + return -EINVAL; + } + } + + return 0; +} + +void sxe2_cpu_rx_rmap_free(struct sxe2_vsi *vsi) +{ + struct net_device *netdev = NULL; + + if (!vsi || vsi->type != SXE2_VSI_T_PF) + return; + + netdev = vsi->netdev; + if (!netdev || !netdev->rx_cpu_rmap) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +STATIC bool sxe2_arfs_filter_cmp(struct sxe2_fnav_filter *fltr_info, + const struct flow_keys *fk) +{ + bool is_v4; + + if (!fltr_info || !fk) + return false; + + is_v4 = (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP || + fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP); + + if (fk->basic.n_proto == htons(ETH_P_IP) && is_v4) { + return (fltr_info->full_key.ip.v4.proto == fk->basic.ip_proto && + fltr_info->full_key.ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->full_key.ip.v4.dst_ip == fk->addrs.v4addrs.dst && + fltr_info->full_key.l4.src_port == fk->ports.src && + fltr_info->full_key.l4.dst_port == fk->ports.dst); + } else if (fk->basic.n_proto == htons(ETH_P_IPV6) && !is_v4) { + return (fltr_info->full_key.ip.v6.proto == fk->basic.ip_proto && + !memcmp(&fltr_info->full_key.ip.v6.src_ip, + &fk->addrs.v6addrs.src, sizeof(struct in6_addr)) && + !memcmp(&fltr_info->full_key.ip.v6.dst_ip, + &fk->addrs.v6addrs.dst, sizeof(struct in6_addr)) && + fltr_info->full_key.l4.src_port == fk->ports.src && + fltr_info->full_key.l4.dst_port == fk->ports.dst); + } + + return false; +} + +STATIC void sxe2_arfs_active_filter_cnt_update(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + bool add) +{ + struct sxe2_arfs_active_filter_cnt *filter_cnt = + &adapter->arfs_ctxt.filter_cnt; + + switch (filter->filter_info.flow_type) { + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP: + if (add) + filter_cnt->tcp4_cnt++; + else + filter_cnt->tcp4_cnt--; + break; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP: + if (add) + filter_cnt->udp4_cnt++; + else + filter_cnt->udp4_cnt--; + break; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP: + if (add) + filter_cnt->tcp6_cnt++; + else + filter_cnt->tcp6_cnt--; + break; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP: + if (add) + filter_cnt->udp6_cnt++; + else + filter_cnt->udp6_cnt--; + break; + default: + LOG_ERROR_BDF("aRFS: failed to update filter counters, invalid\t" + "filter type %d\n", + filter->filter_info.flow_type); + break; + } +} + +STATIC struct sxe2_arfs_filter *sxe2_arfs_filter_build(struct sxe2_adapter *adapter, + const struct flow_keys *fk, + u16 rxq_idx, u32 flow_id, + u32 hash) +{ + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_arfs_filter *arfs_filter = NULL; + struct sxe2_fnav_filter *fltr_info; + u8 ip_proto; + + arfs_filter = devm_kzalloc(dev, sizeof(*arfs_filter), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_filter) { + LOG_ARFS("alloc arfs_filter memory failed\n"); + return NULL; + } + + arfs_filter->filter_idx = + adapter->arfs_ctxt.last_filter_id++ % RPS_NO_FILTER; + fltr_info = &arfs_filter->filter_info; + ip_proto = fk->basic.ip_proto; + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->flow_type = + (ip_proto == IPPROTO_TCP) + ? SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP + : SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP; + fltr_info->full_key.ip.v4.proto = ip_proto; + fltr_info->full_key.ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->full_key.ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->full_key.l4.src_port = fk->ports.src; + fltr_info->full_key.l4.dst_port = fk->ports.dst; + } else { + fltr_info->flow_type = + (ip_proto == IPPROTO_TCP) + ? SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP + : SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP; + fltr_info->full_key.ip.v6.proto = ip_proto; + memcpy(&fltr_info->full_key.ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->full_key.ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->full_key.l4.src_port = fk->ports.src; + fltr_info->full_key.l4.dst_port = fk->ports.dst; + } + fltr_info->filter_loc = hash; + fltr_info->fdid_prio = SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_THREE; + fltr_info->ori_vsi_sw = vsi->id_in_pf; + fltr_info->ori_vsi_hw = vsi->idx_in_dev; + fltr_info->dst_vsi_hw = vsi->idx_in_dev; + fltr_info->act_type = SXE2_FNAV_ACT_QINDEX; + fltr_info->origin_q_index = rxq_idx; + fltr_info->q_index = rxq_idx; + fltr_info->q_region = 0; + fltr_info->act_prio = SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_THREE; + fltr_info->complete_report = SXE2_FNAV_TX_DESC_QW0_COMP_RPT_FAIL; + fltr_info->stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_PKTS; + if (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP) { + fltr_info->stat_index = + adapter->fnav_ctxt.fnav_stat_ctxt + .stat_rsv_idx[SXE2_ARFS_STAT_TCP4]; + } else if (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP) { + fltr_info->stat_index = + adapter->fnav_ctxt.fnav_stat_ctxt + .stat_rsv_idx[SXE2_ARFS_STAT_UDP4]; + } else if (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP) { + fltr_info->stat_index = + adapter->fnav_ctxt.fnav_stat_ctxt + .stat_rsv_idx[SXE2_ARFS_STAT_TCP6]; + } else if (fltr_info->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + fltr_info->stat_index = + adapter->fnav_ctxt.fnav_stat_ctxt + .stat_rsv_idx[SXE2_ARFS_STAT_UDP6]; + } + fltr_info->tunn_flag = SXE2_FNAV_TUN_FLAG_NO_TUNNEL; + + arfs_filter->flow_id = flow_id; + arfs_filter->filter_state = SXE2_ARFS_FILTER_INACTIVE; + arfs_filter->time_activated = 0; + INIT_HLIST_NODE(&arfs_filter->hl_node); + LOG_ARFS("build new filter\n"); + sxe2_arfs_filter_print(adapter, arfs_filter); + + return arfs_filter; +} + +STATIC bool sxe2_arfs_flow_cfg_full_match(struct sxe2_adapter *adapter, + __be16 l3_proto, u8 l4_proto) +{ + enum sxe2_fnav_flow_type flow_type = SXE2_FNAV_FLOW_TYPE_NONE; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + + if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP) + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP) + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP) + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP) + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + else + return false; + + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(adapter->vsi_ctxt.main_vsi, + flow_type); + if (!flow_cfg || flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0 || + flow_cfg->full_match) { + return true; + } + return false; +} + +int sxe2_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct flow_keys fk; + __be16 n_proto; + u8 ip_proto; + u16 idx; + struct sxe2_arfs_filter *arfs_filter; + u32 hash; + int ret = -EOPNOTSUPP; + + if (unlikely(!adapter->arfs_ctxt.filter_list)) { + LOG_ARFS("filter list is NULL.\n"); + return -ENODEV; + } + + if (skb->encapsulation) { + LOG_ARFS("skb is encapsulation.\n"); + return -EPROTONOSUPPORT; + } + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + LOG_ARFS("unsupport flow key, l3_proto 0x%X, l4_proto 0x%X\n", + ntohs(skb->protocol), + skb->protocol == htons(ETH_P_IP) + ? ip_hdr(skb)->protocol + : skb->protocol == htons(ETH_P_IPV6) + ? ipv6_hdr(skb)->nexthdr + : 0); + return -EPROTONOSUPPORT; + } + + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + LOG_ARFS("unsupport fragment\n"); + return -EPROTONOSUPPORT; + } + + n_proto = fk.basic.n_proto; + if ((n_proto == htons(ETH_P_IP)) || n_proto == htons(ETH_P_IPV6)) { + ip_proto = fk.basic.ip_proto; + } else { + LOG_ARFS("unsupport l3_proto 0x%X\n", ntohs(n_proto)); + return -EPROTONOSUPPORT; + } + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + LOG_ARFS("unsupport l4_proto 0x%X\n", ip_proto); + return -EPROTONOSUPPORT; + } + + if (!sxe2_arfs_flow_cfg_full_match(adapter, n_proto, ip_proto)) { + LOG_ARFS("flow is not full match, l3_proto 0x%X, l4_proto 0x%X\n", + ntohs(n_proto), ip_proto); + return -EOPNOTSUPP; + } + + hash = skb_get_hash_raw(skb); + idx = hash & SXE2_ARFS_LIST_MASK; + + spin_lock_bh(&adapter->arfs_ctxt.filter_lock); + if (unlikely(!adapter->arfs_ctxt.filter_list)) { + LOG_ARFS("filter list is NULL.\n"); + ret = -ENODEV; + goto l_out; + } + hlist_for_each_entry(arfs_filter, &adapter->arfs_ctxt.filter_list[idx], + hl_node) { + struct sxe2_fnav_filter *filter_info = &arfs_filter->filter_info; + + if (!sxe2_arfs_filter_cmp(filter_info, &fk)) + continue; + + ret = arfs_filter->filter_idx; + + if (arfs_filter->filter_state != SXE2_ARFS_FILTER_ACTIVE) + goto l_out; + + filter_info->origin_q_index = rxq_idx; + filter_info->q_index = rxq_idx; + arfs_filter->filter_state = SXE2_ARFS_FILTER_MODIFY; + LOG_ARFS("modify q, ori_q %u, new_q %u, state %u\n", + filter_info->origin_q_index, rxq_idx, + arfs_filter->filter_state); + + goto schedule_work; + } + + arfs_filter = sxe2_arfs_filter_build(adapter, &fk, rxq_idx, flow_id, hash); + if (!arfs_filter) { + ret = -ENOMEM; + goto l_out; + } + ret = arfs_filter->filter_idx; + hlist_add_head(&arfs_filter->hl_node, &adapter->arfs_ctxt.filter_list[idx]); + +schedule_work: + sxe2_monitor_work_schedule(adapter); +l_out: + spin_unlock_bh(&adapter->arfs_ctxt.filter_lock); + return ret; +} + +STATIC bool sxe2_arfs_filter_expired(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter) +{ + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; + + if (rps_may_expire_flow(netdev, filter->filter_info.q_index, filter->flow_id, + filter->filter_idx)) { + return true; + } + + if (filter->filter_info.flow_type != SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP && + filter->filter_info.flow_type != SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + return false; + } + + return time_in_range64(filter->time_activated + + SXE2_ARFS_TIME_DELTA_EXPIRATION, + filter->time_activated, get_jiffies_64()); +} + +STATIC void sxe2_arfs_filter_sync_process_inactive(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + struct hlist_head *update_list, + struct hlist_head *del_list) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + enum sxe2_fnav_flow_type flow_type = filter->filter_info.flow_type; + enum sxe2_fnav_flow_type fnav_flow_type = + sxe2_arfs_flow_to_fnav_flow(flow_type); + struct sxe2_arfs_entry_ptr *ep = NULL; + + if (!sxe2_fnav_flow_cfg_full_match(adapter, fnav_flow_type)) { + hlist_del(&filter->hl_node); + hlist_add_head(&filter->hl_node, del_list); + LOG_ARFS("full match change, del inactive filter[%u]\n", + filter->filter_idx); + } else { + ep = devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC | __GFP_NOWARN); + if (!ep) { + LOG_ARFS("alloc inactive ep memory failed\n"); + return; + } + INIT_HLIST_NODE(&ep->hl_node); + ep->arfs_filter = filter; + hlist_add_head(&ep->hl_node, update_list); + if (flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP || + flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + filter->time_activated = get_jiffies_64(); + } + LOG_ARFS("add inactive filter[%u]\n", filter->filter_idx); + } +} + +STATIC void sxe2_arfs_filter_sync_process_modify(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + struct hlist_head *update_list, + struct hlist_head *del_list) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + enum sxe2_fnav_flow_type flow_type = filter->filter_info.flow_type; + struct sxe2_arfs_entry_ptr *ep = NULL; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC | __GFP_NOWARN); + if (!ep) { + LOG_ARFS("alloc modify ep memory failed\n"); + return; + } + INIT_HLIST_NODE(&ep->hl_node); + ep->arfs_filter = filter; + hlist_add_head(&ep->hl_node, update_list); + if (flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP || + flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + filter->time_activated = get_jiffies_64(); + } + LOG_ARFS("add modify filter[%u]\n", filter->filter_idx); +} + +STATIC void sxe2_arfs_filter_sync_process_conflict(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + struct hlist_head *update_list, + struct hlist_head *del_list) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); +#ifndef SXE2_CFG_RELEASE + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; +#endif + enum sxe2_fnav_flow_type flow_type = filter->filter_info.flow_type; + enum sxe2_fnav_flow_type fnav_flow_type = + sxe2_arfs_flow_to_fnav_flow(flow_type); + struct sxe2_arfs_entry_ptr *ep = NULL; + + if (!sxe2_fnav_flow_cfg_full_match(adapter, fnav_flow_type)) { + hlist_del(&filter->hl_node); + hlist_add_head(&filter->hl_node, del_list); + LOG_ARFS("del conflict filter[%u], expire flow %d, timeout %d\n", + filter->filter_idx, + rps_may_expire_flow(netdev, filter->filter_info.q_index, + filter->flow_id, filter->filter_idx), + time_in_range64(filter->time_activated + + SXE2_ARFS_TIME_DELTA_EXPIRATION, + filter->time_activated, get_jiffies_64())); + } else { + if (sxe2_arfs_filter_expired(adapter, filter)) { + hlist_del(&filter->hl_node); + hlist_add_head(&filter->hl_node, del_list); + LOG_ARFS("del conflict filter[%u], expire flow %d, timeout %d\n", + filter->filter_idx, + rps_may_expire_flow(netdev, + filter->filter_info.q_index, + filter->flow_id, + filter->filter_idx), + time_in_range64(filter->time_activated + + SXE2_ARFS_TIME_DELTA_EXPIRATION, + filter->time_activated, + get_jiffies_64())); + } else { + ep = devm_kzalloc(dev, sizeof(*ep), + GFP_ATOMIC | __GFP_NOWARN); + if (!ep) { + LOG_ARFS("alloc conflict ep memory failed\n"); + return; + } + INIT_HLIST_NODE(&ep->hl_node); + ep->arfs_filter = filter; + hlist_add_head(&ep->hl_node, update_list); + if (flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP || + flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + filter->time_activated = get_jiffies_64(); + } + LOG_ARFS("add conflict filter[%u]\n", filter->filter_idx); + } + } +} + +STATIC void sxe2_arfs_filter_sync_process_active(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + struct hlist_head *update_list, + struct hlist_head *del_list) +{ +#ifndef SXE2_CFG_RELEASE + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; +#endif + + if (sxe2_arfs_filter_expired(adapter, filter)) { + hlist_del(&filter->hl_node); + filter->filter_state = SXE2_ARFS_FILTER_TODEL; + hlist_add_head(&filter->hl_node, del_list); + LOG_ARFS("del active filter[%u], expire flow %d, timeout %d\n", + filter->filter_idx, + rps_may_expire_flow(netdev, filter->filter_info.q_index, + filter->flow_id, filter->filter_idx), + time_in_range64(filter->time_activated + + SXE2_ARFS_TIME_DELTA_EXPIRATION, + filter->time_activated, get_jiffies_64())); + } +} + +STATIC void sxe2_arfs_filter_sync_process(struct sxe2_adapter *adapter, + struct sxe2_arfs_filter *filter, + struct hlist_head *update_list, + struct hlist_head *del_list) +{ + if (filter->filter_state == SXE2_ARFS_FILTER_INACTIVE) { + sxe2_arfs_filter_sync_process_inactive(adapter, filter, update_list, + del_list); + } else if (filter->filter_state == SXE2_ARFS_FILTER_MODIFY) { + sxe2_arfs_filter_sync_process_modify(adapter, filter, update_list, + del_list); + } else if (filter->filter_state == SXE2_ARFS_FILTER_CONFLICT) { + sxe2_arfs_filter_sync_process_conflict(adapter, filter, update_list, + del_list); + } else if (filter->filter_state == SXE2_ARFS_FILTER_ACTIVE) { + sxe2_arfs_filter_sync_process_active(adapter, filter, update_list, + del_list); + } +} + +static bool sxe2_arfs_filter_compare(struct sxe2_fnav_filter *filter_a, + enum sxe2_fnav_flow_type flow_type_a, + struct sxe2_fnav_filter *filter_b, + enum sxe2_fnav_flow_type flow_type_b) +{ + bool is_v4, is_v6; + + if (!filter_a || !filter_b) + return false; + + if (flow_type_a != flow_type_b) + return false; + + is_v4 = (flow_type_a == SXE2_FNAV_FLOW_TYPE_IPV4_TCP || + flow_type_a == SXE2_FNAV_FLOW_TYPE_IPV4_UDP); + is_v6 = (flow_type_a == SXE2_FNAV_FLOW_TYPE_IPV6_TCP || + flow_type_a == SXE2_FNAV_FLOW_TYPE_IPV6_UDP); + + if (is_v4) { + return (filter_a->full_key.ip.v4.src_ip == + filter_b->full_key.ip.v4.src_ip && + filter_a->full_key.ip.v4.dst_ip == + filter_b->full_key.ip.v4.dst_ip && + filter_a->full_key.l4.src_port == + filter_b->full_key.l4.src_port && + filter_a->full_key.l4.dst_port == + filter_b->full_key.l4.dst_port); + } else if (is_v6) { + return (!memcmp(filter_a->full_key.ip.v6.src_ip, + filter_b->full_key.ip.v6.src_ip, + sizeof(struct in6_addr)) && + !memcmp(filter_a->full_key.ip.v6.dst_ip, + filter_b->full_key.ip.v6.dst_ip, + sizeof(struct in6_addr)) && + filter_a->full_key.l4.src_port == + filter_b->full_key.l4.src_port && + filter_a->full_key.l4.dst_port == + filter_b->full_key.l4.dst_port); + } + + return false; +} + +STATIC bool sxe2_arfs_filter_conflict(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter_info) +{ + bool ret = false; + u32 idx; + struct sxe2_fnav_filter *filter_tmp; + + sxe2_fnav_filter_hash(filter_info); + + idx = filter_info->hash_val & SXE2_FNAV_FLTR_HLIST_MASK; + hlist_for_each_entry(filter_tmp, &adapter->fnav_ctxt.filter_hlist[idx], + hl_node) { + ret = sxe2_arfs_filter_compare(filter_info, + + sxe2_arfs_flow_to_fnav_flow(filter_info->flow_type), + filter_tmp, filter_tmp->flow_type); + if (ret) { + filter_tmp->conflict = true; + break; + } + } + + return ret; +} + +STATIC void sxe2_arfs_filters_del(struct sxe2_adapter *adapter, + struct hlist_head *del_list) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct sxe2_arfs_filter *filter; + struct hlist_node *node; + s32 ret; + bool conflict; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + mutex_lock(&vsi->fnav.flow_cfg_lock); + hlist_for_each_entry_safe(filter, node, del_list, hl_node) { + if (filter->filter_state != SXE2_ARFS_FILTER_INACTIVE && + filter->filter_state != SXE2_ARFS_FILTER_CONFLICT) { + LOG_ARFS("del filter\n"); + sxe2_arfs_filter_print(adapter, filter); + conflict = sxe2_arfs_filter_conflict(adapter, + &filter->filter_info); + if (!conflict) { + ret = sxe2_pf_fnav_hw_filter_update(vsi, + &filter->filter_info, + false, + false, + SXE2_FNAV_FILTER_UPDATE_PKT); + if (!ret) { + sxe2_arfs_active_filter_cnt_update(adapter, + filter, false); + } else { + LOG_DEV_WARN("unable to delete aRFS filter,\t" + "ret:%d filter_state:%d\t" + "filter_loc:%d flow_id:%d\t" + "queue:%d\n", + ret, filter->filter_state, + filter->filter_idx, + filter->flow_id, + filter->filter_info.q_index); + } + } else { + sxe2_arfs_active_filter_cnt_update(adapter, filter, + false); + } + } + hlist_del(&filter->hl_node); + devm_kfree(dev, filter); + } + mutex_unlock(&vsi->fnav.flow_cfg_lock); + mutex_unlock(&adapter->fnav_ctxt.filter_lock); +} + +STATIC void sxe2_arfs_filters_update(struct sxe2_adapter *adapter, + struct hlist_head *update_list) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct sxe2_arfs_filter *filter; + struct sxe2_arfs_entry_ptr *ep; + struct hlist_node *node; + s32 ret; + bool conflict; + bool is_update; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + hlist_for_each_entry_safe(ep, node, update_list, hl_node) { + is_update = false; + filter = ep->arfs_filter; + LOG_ARFS("update filter\n"); + sxe2_arfs_filter_print(adapter, filter); + conflict = sxe2_arfs_filter_conflict(adapter, &filter->filter_info); + if (filter->filter_state == SXE2_ARFS_FILTER_INACTIVE) { + if (conflict) + filter->filter_state = SXE2_ARFS_FILTER_CONFLICT; + } else if (filter->filter_state == SXE2_ARFS_FILTER_MODIFY) { + if (conflict) { + filter->filter_state = SXE2_ARFS_FILTER_CONFLICT; + sxe2_arfs_active_filter_cnt_update(adapter, filter, + false); + } + } + if (!conflict) { + if (filter->filter_state == SXE2_ARFS_FILTER_MODIFY) + is_update = true; + ret = sxe2_pf_fnav_hw_filter_update(vsi, + &filter->filter_info, + true, + is_update, + SXE2_FNAV_FILTER_UPDATE_PKT); + if (!ret) { + if (filter->filter_state != + SXE2_ARFS_FILTER_MODIFY) { + sxe2_arfs_active_filter_cnt_update(adapter, + filter, true); + } + filter->filter_state = SXE2_ARFS_FILTER_ACTIVE; + } else { + LOG_DEV_WARN("unable to add aRFS filter, ret:%d\t" + "filter_state:%d filter_loc:%d\t" + "flow_id:%d queue:%d\n", + ret, filter->filter_state, + filter->filter_idx, filter->flow_id, + filter->filter_info.q_index); + } + } + hlist_del(&ep->hl_node); + devm_kfree(dev, ep); + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); +} + +void sxe2_arfs_filters_sync(struct sxe2_adapter *adapter) +{ + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_update_list); + u32 i; + struct sxe2_arfs_filter *filter; + struct hlist_node *node; + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + mutex_lock(&adapter->arfs_ctxt.update_list_lock); + mutex_lock(&adapter->vsi_ctxt.main_vsi->fnav.flow_cfg_lock); + spin_lock_bh(&adapter->arfs_ctxt.filter_lock); + if (likely(adapter->arfs_ctxt.filter_list)) { + for (i = 0; i < SXE2_MAX_ARFS_LIST; i++) { + hlist_for_each_entry_safe(filter, node, + &adapter->arfs_ctxt.filter_list[i], + hl_node) { + sxe2_arfs_filter_sync_process(adapter, filter, + &tmp_update_list, + &tmp_del_list); + } + } + } + spin_unlock_bh(&adapter->arfs_ctxt.filter_lock); + mutex_unlock(&adapter->vsi_ctxt.main_vsi->fnav.flow_cfg_lock); + + sxe2_arfs_filters_del(adapter, &tmp_del_list); + sxe2_arfs_filters_update(adapter, &tmp_update_list); + + mutex_unlock(&adapter->arfs_ctxt.update_list_lock); +} + +bool sxe2_arfs_flow_cfg_used(struct sxe2_adapter *adapter, u16 vsi_id, + enum sxe2_fnav_flow_type flow_type) +{ + if (vsi_id != adapter->arfs_ctxt.vsi_id_in_pf) + return false; + + if (flow_type == SXE2_FNAV_FLOW_TYPE_IPV4_TCP) + return adapter->arfs_ctxt.filter_cnt.tcp4_cnt > 0; + else if (flow_type == SXE2_FNAV_FLOW_TYPE_IPV4_UDP) + return adapter->arfs_ctxt.filter_cnt.udp4_cnt > 0; + else if (flow_type == SXE2_FNAV_FLOW_TYPE_IPV6_TCP) + return adapter->arfs_ctxt.filter_cnt.tcp6_cnt > 0; + else if (flow_type == SXE2_FNAV_FLOW_TYPE_IPV6_UDP) + return adapter->arfs_ctxt.filter_cnt.udp6_cnt > 0; + else + return false; +} + +STATIC bool sxe2_rps_expire_flow(struct sxe2_adapter *adapter, + struct net_device *dev, + struct sxe2_arfs_filter *filter) +{ + struct netdev_rx_queue *rxqueue = dev->_rx + filter->filter_info.q_index; + struct rps_dev_flow_table *flow_table; + struct rps_dev_flow *rflow; + bool expire = true; + unsigned int cpu; + + LOG_DEV_INFO("\trps expire flow detect:"); + + rcu_read_lock(); + flow_table = rcu_dereference(rxqueue->rps_flow_table); + if (flow_table) { + LOG_DEV_INFO("\tflow_id %u, mask %u\n", filter->flow_id, + flow_table->mask); + } + if (flow_table && filter->flow_id <= flow_table->mask) { + rflow = &flow_table->flows[filter->flow_id]; + cpu = READ_ONCE(rflow->cpu); + LOG_DEV_INFO("\tfilter_id %u, cpu %u, nr_cpu_ids %u, last_qtail %d\n", + filter->filter_idx, cpu, nr_cpu_ids, + (int)(rflow->last_qtail)); + if (cpu < nr_cpu_ids) { + LOG_DEV_INFO("\tinput_queue_head %d", + (int)(per_cpu(softnet_data, cpu) + .input_queue_head)); + } + if (rflow->filter == filter->filter_idx && cpu < nr_cpu_ids && + ((int)(per_cpu(softnet_data, cpu).input_queue_head - + rflow->last_qtail) < (int)(10 * flow_table->mask))) + expire = false; + } + rcu_read_unlock(); + + LOG_DEV_INFO("\tfilter expire: %d\n", expire); + + return expire; +} + +void sxe2_arfs_stats_dump(struct sxe2_adapter *adapter) +{ + u32 i = 0, j = 0; + struct sxe2_arfs_filter *filter; + struct hlist_node *node; + + LOG_DEV_INFO("active filter cnt:\n"); + + mutex_lock(&adapter->arfs_ctxt.update_list_lock); + LOG_DEV_INFO("\ttcp4: %u\n", adapter->arfs_ctxt.filter_cnt.tcp4_cnt); + LOG_DEV_INFO("\tudp4: %u\n", adapter->arfs_ctxt.filter_cnt.udp4_cnt); + LOG_DEV_INFO("\ttcp6: %u\n", adapter->arfs_ctxt.filter_cnt.tcp6_cnt); + LOG_DEV_INFO("\tudp6: %u\n", adapter->arfs_ctxt.filter_cnt.udp6_cnt); + LOG_DEV_INFO("list filter id: %u\n", adapter->arfs_ctxt.last_filter_id); + + spin_lock_bh(&adapter->arfs_ctxt.filter_lock); + if (likely(adapter->arfs_ctxt.filter_list)) { + for (i = 0; i < SXE2_MAX_ARFS_LIST; i++) { + hlist_for_each_entry_safe(filter, node, + &adapter->arfs_ctxt.filter_list[i], + hl_node) { + LOG_DEV_INFO("filter[%u]\n", j); + sxe2_arfs_filter_print_screen(adapter, filter); + sxe2_rps_expire_flow(adapter, + adapter->vsi_ctxt.main_vsi->netdev, + filter); + j++; + } + } + } + spin_unlock_bh(&adapter->arfs_ctxt.filter_lock); + + mutex_unlock(&adapter->arfs_ctxt.update_list_lock); + + if (adapter->vsi_ctxt.main_vsi->netdev->rx_cpu_rmap) { + for (i = 0; i < num_online_cpus(); i++) { + LOG_DEV_INFO("cpu[%u] --> %u, dist: %u\n", i, + adapter->vsi_ctxt.main_vsi->netdev->rx_cpu_rmap + ->near[i] + .index, + adapter->vsi_ctxt.main_vsi->netdev->rx_cpu_rmap + ->near[i] + .dist); + } + } +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.h new file mode 100644 index 0000000000000000000000000000000000000000..653cc5bed4dbd636675e74b2efa7ac1bb6485ecc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_arfs.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_arfs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_ARFS_H__ +#define __SXE2_ARFS_H__ + +#include "sxe2_fnav.h" + +#define SXE2_MAX_RFS_FILTERS (0xFFFF) +#define SXE2_MAX_ARFS_LIST (1024) +#define SXE2_ARFS_LIST_MASK (SXE2_MAX_ARFS_LIST - 1) + +#define LOG_ARFS(fmt, ...) LOG_DEBUG_BDF("ARFS_LOG: " fmt, ##__VA_ARGS__) + +enum sxe2_arfs_filter_state { + SXE2_ARFS_FILTER_INACTIVE, + SXE2_ARFS_FILTER_ACTIVE, + SXE2_ARFS_FILTER_MODIFY, + SXE2_ARFS_FILTER_CONFLICT, + SXE2_ARFS_FILTER_TODEL, +}; + +struct sxe2_arfs_filter { + struct hlist_node hl_node; + struct sxe2_fnav_filter filter_info; + u64 time_activated; + u32 filter_idx; + u32 flow_id; + enum sxe2_arfs_filter_state filter_state; +}; + +struct sxe2_arfs_entry_ptr { + struct sxe2_arfs_filter *arfs_filter; + struct hlist_node hl_node; +}; + +struct sxe2_arfs_active_filter_cnt { + u32 tcp4_cnt; + u32 tcp6_cnt; + u32 udp4_cnt; + u32 udp6_cnt; +}; + +struct sxe2_arfs_ctxt { + struct hlist_head *filter_list; + spinlock_t filter_lock; + u32 last_filter_id; + struct sxe2_arfs_active_filter_cnt filter_cnt; + struct mutex update_list_lock; + u16 vsi_id_in_pf; +}; + +#ifdef CONFIG_RFS_ACCEL +s32 sxe2_arfs_init(struct sxe2_adapter *adapter); + +void sxe2_arfs_deinit(struct sxe2_adapter *adapter); + +void sxe2_arfs_clean(struct sxe2_adapter *adapter); + +s32 sxe2_arfs_enable(struct sxe2_adapter *adapter); + +void sxe2_arfs_disable(struct sxe2_adapter *adapter); + +s32 sxe2_cpu_rx_rmap_set(struct sxe2_vsi *vsi); + +void sxe2_cpu_rx_rmap_free(struct sxe2_vsi *vsi); + +int sxe2_rx_flow_steer(struct net_device *netdev, + const struct sk_buff *skb, u16 rxq_idx, u32 flow_id); + +void sxe2_arfs_filters_sync(struct sxe2_adapter *adapter); + +bool sxe2_arfs_flow_cfg_used(struct sxe2_adapter *adapter, u16 vsi_id, + enum sxe2_fnav_flow_type flow_type); + +void sxe2_arfs_stats_dump(struct sxe2_adapter *adapter); +#else +static inline s32 sxe2_arfs_init(struct sxe2_adapter *adapter) +{ + return 0; +} + +static inline void sxe2_arfs_deinit(struct sxe2_adapter *adapter) +{ +} + +static inline void sxe2_arfs_clean(struct sxe2_adapter *adapter) +{ +} + +static inline s32 sxe2_arfs_enable(struct sxe2_adapter *adapter) +{ + return 0; +} + +static inline void sxe2_arfs_disable(struct sxe2_adapter *adapter) +{ +} + +static inline s32 sxe2_cpu_rx_rmap_set(struct sxe2_vsi *vsi) +{ + return 0; +} + +static inline void sxe2_cpu_rx_rmap_free(struct sxe2_vsi *vsi) +{ +} + +static inline int sxe2_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, u16 rxq_idx, u32 flow_id) +{ + return -EOPNOTSUPP; +} + +static inline void sxe2_arfs_filters_sync(struct sxe2_adapter *adapter) +{ +} + +static inline bool sxe2_arfs_flow_cfg_used(struct sxe2_adapter *adapter, u16 vsi_id, + enum sxe2_fnav_flow_type flow_type) +{ + return false; +} + +static inline void sxe2_arfs_stats_dump(struct sxe2_adapter *adapter) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.c new file mode 100644 index 0000000000000000000000000000000000000000..dea5ac664edf2cd42182269233c50bf0293ea335 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_aux_driver.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2.h" +#include "sxe2_common.h" +#include "sxe2_aux_driver.h" +#include "sxe2_log.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_netdev.h" +#include "sxe2_lag.h" +#include "sxe2_tc.h" +#include "sxe2_version.h" + +#define MAX_RDMA_MSG_SIZE (4096) +#define PRIMARY_PF_INDEX 0 +#define REDUNDANT_PF_INDEX 1 +static DEFINE_IDA(sxe2_aux_ida); + +u8 sxe2_rdma_aux_get_qset_tc(struct sxe2_adapter *adapter, + struct aux_rdma_qset_params *qset) +{ + if (sxe2_lag_is_bonded(adapter)) + return qset->tc[adapter->pf_idx]; + + return qset->tc[PRIMARY_PF_INDEX]; +} + +static bool sxe2_rdma_aux_is_in_vf(struct sxe2_adapter *adapter, + struct aux_rdma_qset_params *qset) +{ + bool is_vf = false; + + is_vf = (qset->vport_id == adapter->vsi_ctxt.main_vsi->idx_in_dev) ? false + : true; + + return is_vf; +} + +static int sxe2_alloc_res(struct aux_core_dev_info *cdev_info, + struct aux_rdma_qset_params *qset) +{ + int ret; + u8 tc; + struct sxe2_lag_context *lag; + struct sxe2_adapter *adapter = NULL; + struct sxe2_vsi *vsi = NULL; + bool is_primary = false; + bool is_vf = false; + + if (!cdev_info || !qset) + return -EINVAL; + + adapter = cdev_info->adapter; + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + if (qset->vport_id == adapter->vsi_ctxt.main_vsi->idx_in_dev) { + ret = sxe2_txsched_rdma_vsi_cfg(adapter->vsi_ctxt.main_vsi, 0); + if (ret) { + LOG_ERROR("rdma rdma vsi cfg failed, ret=%d\n", ret); + return ret; + } + vsi = adapter->vsi_ctxt.main_vsi; + } else { + vsi = sxe2_vsi_get_by_idx(adapter, qset->vport_id); + if (!vsi) { + LOG_ERROR("rdma vsi cfg failed, find vf vsi by vsi_id %d failed.\n", + qset->vport_id); + return -EINVAL; + } + } + + tc = qset->tc[PRIMARY_PF_INDEX]; + ret = sxe2_txsched_qset_node_add(adapter, vsi, qset, tc); + if (ret) { + LOG_ERROR("rdma qset node add failed, ret=%d\n", ret); + goto l_end; + } + + lag = adapter->lag_ctxt; + is_primary = (adapter->pf_idx == PRIMARY_PF_INDEX) ? true : false; + is_vf = sxe2_rdma_aux_is_in_vf(adapter, qset); + if (sxe2_lag_support(adapter) && lag && is_primary && !is_vf) { + mutex_lock(&lag->lock); + qset->qset_port = SXE2_RDMA_PF0; + lag->rdma_qset[qset->user_pri] = *qset; + if (sxe2_lag_is_bonded(adapter)) { + sxe2_lag_alloced_node_move(cdev_info, qset->user_pri, false); + qset->teid = lag->rdma_qset[qset->user_pri].teid; + qset->qset_port = lag->rdma_qset[qset->user_pri].qset_port; + } + mutex_unlock(&lag->lock); + } + LOG_INFO("rdma add sched teid %#x.\n", qset->teid); +l_end: + return ret; +} + +static s32 sxe2_rdma_aux_free_qset(struct aux_core_dev_info *cdev_info, + struct aux_rdma_qset_params *dqset, u8 tc, + bool is_primary) +{ + s32 ret; + struct sxe2_lag_context *lag; + bool is_vf = false; + + is_vf = sxe2_rdma_aux_is_in_vf(cdev_info->adapter, dqset); + + lag = cdev_info->adapter->lag_ctxt; + if (sxe2_lag_support(cdev_info->adapter) && lag && is_primary && !is_vf) { + mutex_lock(&lag->lock); + ret = sxe2_txsched_qset_node_del(cdev_info->adapter, dqset, tc); + if (!ret) + lag->rdma_qset[dqset->user_pri].teid = 0; + mutex_unlock(&lag->lock); + } else { + ret = sxe2_txsched_qset_node_del(cdev_info->adapter, dqset, tc); + } + + return ret; +} + +static int sxe2_free_res(struct aux_core_dev_info *cdev_info, + struct aux_rdma_qset_params *qset) +{ + int ret; + u8 tc; + struct sxe2_adapter *adapter = NULL; + struct sxe2_lag_context *lag = NULL; + bool is_primary = false; + bool is_vf = false; + + if (!cdev_info || !qset) + return -EINVAL; + + if (!sxe2_txsched_support_chk(cdev_info->adapter)) + return 0; + + adapter = cdev_info->adapter; + lag = cdev_info->adapter->lag_ctxt; + is_primary = (adapter->pf_idx == PRIMARY_PF_INDEX) ? true : false; + is_vf = sxe2_rdma_aux_is_in_vf(adapter, qset); + + LOG_INFO("rdma del sched teid %#x.\n", qset->teid); + + if (sxe2_lag_support(cdev_info->adapter) && lag && is_primary && !is_vf) { + mutex_lock(&lag->lock); + qset->teid = lag->rdma_qset[qset->user_pri].teid; + if (sxe2_lag_is_bonded(cdev_info->adapter) && + cdev_info->bond_mode == SXE2_LAG_MODE_ACTIVE_BACKUP) { + sxe2_lag_ab_reclaim_node(lag, cdev_info, qset->user_pri); + qset->teid = lag->rdma_qset[qset->user_pri].teid; + qset->qset_port = lag->rdma_qset[qset->user_pri].qset_port; + } + mutex_unlock(&lag->lock); + } + + tc = qset->tc[PRIMARY_PF_INDEX]; + ret = sxe2_rdma_aux_free_qset(cdev_info, qset, tc, is_primary); + if (ret) + LOG_ERROR("rdma qset node del failed, ret=%d\n", ret); + + if (qset->vport_id == adapter->vsi_ctxt.main_vsi->idx_in_dev && + sxe2_txsched_qset_left(adapter, qset->vport_id) == false) { + ret = sxe2_txsched_rdma_vsi_rm(cdev_info->adapter->vsi_ctxt.main_vsi); + if (ret) + LOG_ERROR("rdma rdma vsi rm failed, ret=%d\n", ret); + } + return ret; +} + +static int sxe2_request_reset(struct aux_core_dev_info *cdev_info, + enum aux_reset_type reset_type) +{ + struct sxe2_adapter *adapter; + + if (reset_type != AUX_PFR) + return -EINVAL; + + adapter = pci_get_drvdata(cdev_info->pdev); + if (!adapter) + return -EINVAL; + + return sxe2_reset_async(adapter, SXE2_RESET_PFR); +} + +STATIC void sxe2_rdma_aux_qsets_info_update(struct aux_rdma_multi_qset_params *qset, + struct sxe2_lag_context *lag) +{ + qset->teid[PRIMARY_PF_INDEX] = + lag->rdma_qsets[qset->user_pri].teid[PRIMARY_PF_INDEX]; + qset->teid[REDUNDANT_PF_INDEX] = + lag->rdma_qsets[qset->user_pri].teid[REDUNDANT_PF_INDEX]; + qset->qset_port[PRIMARY_PF_INDEX] = + lag->rdma_qsets[qset->user_pri].qset_port[PRIMARY_PF_INDEX]; + qset->qset_port[REDUNDANT_PF_INDEX] = + lag->rdma_qsets[qset->user_pri] + .qset_port[REDUNDANT_PF_INDEX]; +} + +static int sxe2_alloc_rdma_multi_qsets(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset) +{ + int ret; + int i; + struct sxe2_lag_context *lag; + struct aux_rdma_qset_params aqset; + + if (!cdev_info || !qset) + return -EINVAL; + + if (!sxe2_txsched_support_chk(cdev_info->adapter)) + return 0; + + ret = sxe2_txsched_rdma_vsi_cfg(cdev_info->adapter->vsi_ctxt.main_vsi, 1); + if (ret) { + LOG_ERROR("rdma rdma vsi cfg failed, ret=%d\n", ret); + goto l_end; + } + + for (i = 0; i < qset->num; i++) { + aqset.teid = qset->teid[i]; + aqset.tc[PRIMARY_PF_INDEX] = qset->tc[PRIMARY_PF_INDEX]; + + aqset.qset_id = qset->qset_id[i]; + aqset.user_pri = qset->user_pri; + ret = sxe2_txsched_qset_node_add(cdev_info->adapter, + cdev_info->adapter->vsi_ctxt.main_vsi, &aqset, + qset->tc[PRIMARY_PF_INDEX]); + if (ret) { + LOG_ERROR("rdma qset node add qg %d failed, ret=%d\n", i, + ret); + goto l_end; + } + qset->teid[i] = aqset.teid; + LOG_INFO("rdma add sched i %d teid %#x.\n", i, qset->teid[i]); + } + + for (i = 0; i < qset->num; i++) + qset->qset_port[i] = SXE2_RDMA_PF0; + + lag = cdev_info->adapter->lag_ctxt; + if (sxe2_lag_support(cdev_info->adapter) && lag) { + mutex_lock(&lag->lock); + lag->rdma_qsets[qset->user_pri] = *qset; + if (sxe2_lag_is_bonded(cdev_info->adapter) && + cdev_info->bond_mode == SXE2_LAG_MODE_ACTIVE_ACTIVE) { + sxe2_lag_alloced_node_move(cdev_info, qset->user_pri, true); + sxe2_rdma_aux_qsets_info_update(qset, lag); + } + mutex_unlock(&lag->lock); + } + +l_end: + return ret; +} + +static s32 sxe2_rdma_aux_bond_aa_free_qset(struct aux_core_dev_info *cdev_info, + struct aux_rdma_qset_params *dqset, u8 tc, + int i) +{ + s32 ret; + struct sxe2_lag_context *lag; + + lag = cdev_info->adapter->lag_ctxt; + if (sxe2_lag_support(cdev_info->adapter) && lag) { + mutex_lock(&lag->lock); + ret = sxe2_txsched_qset_node_del(cdev_info->adapter, dqset, tc); + if (!ret) + lag->rdma_qsets[dqset->user_pri].teid[i] = 0; + + mutex_unlock(&lag->lock); + } else { + ret = sxe2_txsched_qset_node_del(cdev_info->adapter, dqset, tc); + } + + return ret; +} + +static int sxe2_free_rdma_multi_qsets(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset) +{ + struct aux_rdma_qset_params dqset; + struct sxe2_lag_context *lag; + int i; + int ret; + + if (!cdev_info || !qset) + return -EINVAL; + if (!sxe2_txsched_support_chk(cdev_info->adapter)) + return 0; + + lag = cdev_info->adapter->lag_ctxt; + if (sxe2_lag_support(cdev_info->adapter) && lag) { + mutex_lock(&lag->lock); + sxe2_rdma_aux_qsets_info_update(qset, lag); + if (sxe2_lag_is_bonded(cdev_info->adapter) && + cdev_info->bond_mode == SXE2_LAG_MODE_ACTIVE_ACTIVE) { + sxe2_lag_aa_reclaim_node(lag, cdev_info, qset->user_pri); + sxe2_rdma_aux_qsets_info_update(qset, lag); + } + mutex_unlock(&lag->lock); + } + + for (i = 0; i < qset->num; i++) { + dqset.teid = qset->teid[i]; + dqset.user_pri = qset->user_pri; + dqset.tc[PRIMARY_PF_INDEX] = qset->tc[PRIMARY_PF_INDEX]; + dqset.qset_id = qset->qset_id[i]; + ret = sxe2_rdma_aux_bond_aa_free_qset(cdev_info, &dqset, + qset->tc[PRIMARY_PF_INDEX], i); + if (ret) { + LOG_ERROR("rdma qset node del failed, ret=%d\n", ret); + goto l_end; + } + } +l_end: + return ret; +} + +STATIC s32 sxe2_get_vf_info(struct aux_core_dev_info *cdev_info, u16 vf_id, + struct aux_vf_port_info *vf_port_info) +{ + s32 ret = 0; + struct sxe2_adapter *adapter; + struct sxe2_vf_node *vf; + struct sxe2_vsi *vsi; + + if (!cdev_info || !vf_port_info) { + LOG_INFO("params err dev(%p) vf port(%p).\n", (void *)cdev_info, + (void *)vf_port_info); + ret = -EINVAL; + goto l_end; + } + + adapter = pci_get_drvdata(cdev_info->pdev); + if (!adapter) { + LOG_INFO("invalid vf_id:%u adapter err.\n", vf_id); + ret = -EINVAL; + goto l_end; + } + + if (sxe2_vf_id_check(adapter, vf_id)) { + ret = -EINVAL; + goto l_end; + } + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_id)); + vf = sxe2_vf_node_get(adapter, vf_id); + if (!vf) { + ret = -EAGAIN; + LOG_INFO_BDF("invalid vf_id:%u vf nullptr.\n", vf_id); + goto l_unlock; + } + + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) { + LOG_ERROR_BDF("vf:%u pf flags:0x%lx vf states:0x%lx not ready.\n", + vf_id, *adapter->flags, *vf->states); + goto l_unlock; + } + + vsi = vf->vsi; + if (!vsi) { + LOG_INFO_BDF("invalid vf_id:%u vsi nullptr.\n", vf_id); + ret = -EAGAIN; + goto l_unlock; + } + + vf_port_info->vf_id = vf->vf_idx; + vf_port_info->vport_id = vsi->idx_in_dev; + + if (sxe2_port_vlan_is_exist(vf)) { + vf_port_info->port_vlan_id = sxe2_vf_port_vid_get(vf); + vf_port_info->port_vlan_tpid = sxe2_vf_port_tpid_get(vf); + } + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_id)); + +l_end: + return ret; +} + +static int sxe2_vc_send(struct aux_core_dev_info *cdev_info, u16 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + s32 ret = 0; + struct sxe2_adapter *adapter; + + if (!cdev_info || !msg || !len || len > MAX_RDMA_MSG_SIZE) + return -EINVAL; + + adapter = pci_get_drvdata(cdev_info->pdev); + + ret = sxe2_aux_reply_rdma_msg_to_vf(adapter, vf_id, msg, len, session_id); + return ret; +} + +int sxe2_rdma_msg_send(struct sxe2_adapter *adapter, enum sxe2_drv_cmd_opcode opcode, + u8 *msg, u16 len, u8 *recv_msg, u16 recv_len) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, opcode, msg, len, recv_msg, recv_len); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to exec rdma op op=%u ,ret=%d\n", opcode, ret); + ret = -EIO; + } + return ret; +} + +static s32 sxe2_rdma_mac_rule_cmd(struct sxe2_adapter *adapter, u8 *mac, bool is_add) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + if (!is_multicast_ether_addr(mac)) + return -EADDRNOTAVAIL; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (is_add) + ret = sxe2_mac_addr_add(vsi, mac, SXE2_MAC_OWNER_ROCE); + else + ret = sxe2_mac_addr_del(vsi, mac, SXE2_MAC_OWNER_ROCE); + + if (ret) { + LOG_ERROR_BDF("roce %s mac addr %pM failed, ret %d\n", + is_add ? "add" : "del", mac, ret); + } + + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +static s32 sxe2_rdma_send_cmd(struct aux_core_dev_info *cdev_info, + enum sxe2_drv_cmd_opcode op, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len) +{ + s32 ret = -EINVAL; + struct sxe2_adapter *adapter; + + if (!cdev_info || !msg || !len || len > MAX_RDMA_MSG_SIZE) + return -EINVAL; + + adapter = pci_get_drvdata(cdev_info->pdev); + ret = sxe2_rdma_msg_send(adapter, op, msg, len, recv_msg, recv_len); + + return ret; +} + +static s32 sxe2_rdma_drv_config(struct aux_core_dev_info *cdev_info, u8 op, u8 *msg) +{ + s32 ret = -EINVAL; + struct sxe2_adapter *adapter; + enum aux_rdma_opcode opcode = op; + + if (!cdev_info || !msg) + return -EINVAL; + + adapter = pci_get_drvdata(cdev_info->pdev); + switch (opcode) { + case RDMA_MAC_RULE_ADD: + ret = sxe2_rdma_mac_rule_cmd(adapter, msg, true); + break; + case RDMA_MAC_RULE_DELETE: + ret = sxe2_rdma_mac_rule_cmd(adapter, msg, false); + break; + default: + break; + } + return ret; +} + +static void sxe2_rdma_loaded(struct aux_core_dev_info *cdev_info, bool loaded) +{ + struct sxe2_adapter *adapter; + + adapter = pci_get_drvdata(cdev_info->pdev); + + if (loaded) + set_bit(SXE2_FLAG_RDMA_LOADED, adapter->flags); + else + clear_bit(SXE2_FLAG_RDMA_LOADED, adapter->flags); + + LOG_INFO_BDF("roce loaded %d.\n", loaded); +} + +static int sxe2_rdma_dump_pcap_cmd(struct aux_core_dev_info *cdev_info, u8 *mac, + bool is_add) +{ + struct sxe2_adapter *adapter; + + adapter = pci_get_drvdata(cdev_info->pdev); + + return sxe2_rdma_dump_pcap_setup(adapter->vsi_ctxt.main_vsi, mac, is_add); +} + +static u32 sxe2_rdma_get_link_speed(struct aux_core_dev_info *cdev_info) +{ + struct sxe2_adapter *adapter; + + adapter = pci_get_drvdata(cdev_info->pdev); + + return sxe2_get_link_speed(adapter); +} + +static struct sxe2_core_ops core_ops = { + .alloc_res = sxe2_alloc_res, + .free_res = sxe2_free_res, + .request_reset = sxe2_request_reset, + .get_vf_info = sxe2_get_vf_info, + .vc_send = sxe2_vc_send, + .rdma_send_cmd = sxe2_rdma_send_cmd, + .alloc_multi_res = sxe2_alloc_rdma_multi_qsets, + .free_multi_res = sxe2_free_rdma_multi_qsets, + .dump_pcap_cmd = sxe2_rdma_dump_pcap_cmd, + .rdma_drv_config = sxe2_rdma_drv_config, + .notify_rdma_load = sxe2_rdma_loaded, + .rdma_get_link_speed = sxe2_rdma_get_link_speed, +}; + +struct sxe2_auxiliary_drv *sxe2_rdma_aux_drv_get(struct aux_core_dev_info *cdev_info) +{ + struct auxiliary_device *adev = cdev_info->adev; + + if (!adev || !adev->dev.driver) { + LOG_WARN("adev null.\n"); + return NULL; + } + return container_of(adev->dev.driver, struct sxe2_auxiliary_drv, + adrv.driver); +} + +static int sxe2_rdma_aux_send_event(struct sxe2_adapter *adapter, + struct sxe2_rdma_event_info *event) +{ + struct sxe2_auxiliary_drv *iadrv; + struct aux_core_dev_info *cdev_info; + + if (!adapter || !event) + return -EINVAL; + + if (sxe2_is_safe_mode(adapter)) + return -EPERM; + + cdev_info = &adapter->aux_ctxt.cdev_info; + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + if (!cdev_info->adev) { + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + LOG_WARN_BDF("adev null, event:0x%lx no need send.\n", *event->type); + return 0; + } + + device_lock(&cdev_info->adev->dev); + iadrv = sxe2_rdma_aux_drv_get(cdev_info); + if (iadrv && iadrv->aux_ops.event_handler) + iadrv->aux_ops.event_handler(cdev_info, event); + device_unlock(&cdev_info->adev->dev); + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + return 0; +} + +void sxe2_rdma_aux_adev_release(struct device *dev) +{ + struct sxe2_auxiliary_device *iadev; + + iadev = container_of(dev, struct sxe2_auxiliary_device, adev.dev); + kfree(iadev); +} + +STATIC s32 sxe2_rdma_aux_msix_get(struct sxe2_adapter *adapter) +{ + u16 irq_num = adapter->irq_ctxt.irq_layout.rdma; + s32 index = sxe2_irq_offset_get(adapter, irq_num, SXE2_VSI_T_RDMA); + + if (index < 0) { + LOG_DEV_ERR("failed to get rdma irqs. err:%d\n", index); + return index; + } + + adapter->irq_ctxt.rdma_base_idx = (u16)index; + return 0; +} + +STATIC void sxe2_rdma_aux_add_qos_set(struct sxe2_adapter *adapter) +{ + struct sxe2_adapter *redundant_adapter; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct aux_core_dev_info *aux_info = &adapter->aux_ctxt.cdev_info; + + (void)memset(aux_info->qos_info, 0x0, sizeof(aux_info->qos_info)); + sxe2_setup_dcb_qos_info(adapter, &aux_info->qos_info[PRIMARY_PF_INDEX]); + + if (lag && sxe2_lag_is_bonded(adapter)) { + redundant_adapter = sxe2_lag_role_find(lag, REDUNDANT_PF_INDEX); + if (!redundant_adapter) { + LOG_ERROR("qos info set, find redundant failed.\n"); + return; + } + sxe2_setup_dcb_qos_info(redundant_adapter, + &aux_info->qos_info[REDUNDANT_PF_INDEX]); + } +} + +int sxe2_rdma_aux_add(struct sxe2_adapter *adapter) +{ + int ret = 0; + struct auxiliary_device *adev; + struct sxe2_auxiliary_device *sadev; + struct aux_core_dev_info *cdev_info = &adapter->aux_ctxt.cdev_info; + + if (cdev_info->adev) { + LOG_DEV_WARN("rdma adev exists, duplicate create.\n"); + goto out; + } + + sadev = kzalloc(sizeof(*sadev), GFP_KERNEL); + if (!sadev) { + ret = -ENOMEM; + goto out; + } + + adev = &sadev->adev; + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + cdev_info->adev = adev; + sadev->cdev_info = cdev_info; + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + cdev_info->adapter = adapter; + cdev_info->pdev = adapter->pdev; + cdev_info->netdev = adapter->vsi_ctxt.main_vsi->netdev; + cdev_info->pf_id = adapter->pf_idx; + cdev_info->ops = &core_ops; + cdev_info->ftype = AUX_FUNCTION_TYPE_PF; + cdev_info->ver.major = AUX_MAJOR_VER; + cdev_info->ver.minor = AUX_MINOR_VER; + cdev_info->rdma_caps.gen = AUX_RDMA_GEN_1; + cdev_info->vfid_base = adapter->vf_ctxt.vfid_base; + cdev_info->hw_addr = sxe2_reg_addr_get(&adapter->hw, 0); + cdev_info->cdev_info_id = SXE2_RDMA_INDEX; + cdev_info->vport_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + cdev_info->msix_count = adapter->irq_ctxt.irq_layout.rdma; + cdev_info->rdma_pf_bitmap = SXE2_RDMA_INVALID_PF; + (void)strscpy(cdev_info->drv_ver, SXE2_VERSION, SXE2_DRV_VER_STR_LEN); + sxe2_rdma_aux_add_qos_set(adapter); + + cdev_info->msix_entries = &adapter->irq_ctxt.msix_entries + [adapter->irq_ctxt.rdma_base_idx]; + + adev->id = (u32)adapter->aux_ctxt.aux_idx; + adev->dev.release = sxe2_rdma_aux_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + adev->name = SXE2_RDMA_NAME; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_out; + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + goto err_out; + } + + goto out; + +err_out: + kfree(sadev); + cdev_info->adev = NULL; +out: + return ret; +} + +void sxe2_rdma_aux_delete(struct aux_core_dev_info *cdev_info) +{ + struct sxe2_adapter *adapter; + struct auxiliary_device *adev; + + if (cdev_info->pdev) + adapter = pci_get_drvdata(cdev_info->pdev); + else + return; + + LOG_DEBUG_BDF("release aux device!adev is :0x%pK\n", cdev_info->adev); + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + if (!cdev_info->adev) { + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + return; + } + adev = cdev_info->adev; + cdev_info->adev = NULL; + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + if (adev) { + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + } +} + +int sxe2_rdma_aux_init(struct sxe2_adapter *adapter) +{ + int ret; + + ret = sxe2_rdma_aux_msix_get(adapter); + if (ret < 0) { + LOG_DEV_ERR("failed to initial aux driver,no irq to use.\n"); + goto l_end; + } + + adapter->aux_ctxt.aux_idx = ida_alloc(&sxe2_aux_ida, GFP_KERNEL); + if (adapter->aux_ctxt.aux_idx < 0) { + LOG_DEV_ERR("failed to allocate device ID for aux drvs\n"); + ret = -ENOMEM; + goto l_end; + } + + adapter->aux_ctxt.init = true; + +l_end: + return ret; +} + +void sxe2_rdma_aux_deinit(struct sxe2_adapter *adapter) +{ + struct aux_core_dev_info *cdev_info = &adapter->aux_ctxt.cdev_info; + + if (adapter->aux_ctxt.init) { + if (cdev_info->adev) + sxe2_rdma_aux_delete(cdev_info); + + ida_simple_remove(&sxe2_aux_ida, adapter->aux_ctxt.aux_idx); + + adapter->aux_ctxt.init = false; + } +} + +s32 sxe2_rdma_aux_rebuild(struct sxe2_adapter *adapter) +{ + LOG_DEBUG_BDF("aux driver rebuild:index=%d\n", adapter->aux_ctxt.aux_idx); + + if (sxe2_lag_support(adapter)) { + mutex_lock(&adapter->lag_ctxt->lock); + if (sxe2_lag_is_bonded(adapter)) { + mutex_unlock(&adapter->lag_ctxt->lock); + LOG_WARN_BDF("running in bond mode, rdma does not need\t" + "rebuild.\n"); + goto end; + } + mutex_unlock(&adapter->lag_ctxt->lock); + } + + if (!sxe2_is_safe_mode(adapter)) + return sxe2_rdma_aux_add(adapter); + LOG_WARN_BDF("running in safe mode, rdma does not need rebuild.\n"); + +end: + return 0; +} + +void sxe2_rdma_aux_send_reset_event(struct sxe2_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + + (void)memset(&event, 0, sizeof(event)); + set_bit(SXE2_EVENT_NOTIFY_RESET, event.type); + (void)sxe2_rdma_aux_send_event(adapter, &event); +} + +int sxe2_rdma_aux_send_mtu_changed_event(struct sxe2_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + + (void)memset(&event, 0x0, sizeof(event)); + set_bit(SXE2_EVENT_MTU_CHANGED, event.type); + return sxe2_rdma_aux_send_event(adapter, &event); +} + +int sxe2_rdma_aux_send_vf_reset_event(struct sxe2_adapter *adapter, u16 vf_id) +{ + struct sxe2_rdma_event_info event; + + (void)memset(&event, 0x0, sizeof(event)); + event.vf_id = vf_id; + set_bit(SXE2_EVENT_VF_RESET, event.type); + return sxe2_rdma_aux_send_event(adapter, &event); +} + +int sxe2_rdma_aux_send_aeq_overflow_event(struct sxe2_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + int ret = 0; + + (void)memset(&event, 0x0, sizeof(event)); + set_bit(SXE2_EVENT_AEQ_OVERFLOW, event.type); + ret = sxe2_rdma_aux_send_event(adapter, &event); + LOG_DEBUG_BDF("aux send--aeq overflow err:ret %d\n", ret); + return ret; +} + +void sxe2_aux_aeq_overflow_handler(struct sxe2_adapter *adapter) +{ + if (test_and_clear_bit(SXE2_FLAG_RDMA_AEQ_OVERFLOW, adapter->flags)) + (void)sxe2_rdma_aux_send_aeq_overflow_event(adapter); +} + +int sxe2_rdma_aux_send_failover_event(struct sxe2_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + + (void)memset(&event, 0x0, sizeof(event)); + set_bit(SXE2_EVENT_FAILOVER, event.type); + LOG_INFO_BDF("lag send failover event.\n"); + return sxe2_rdma_aux_send_event(adapter, &event); +} + +STATIC struct sxe2_adapter *sxe2_rdma_aux_qos_set(struct sxe2_adapter *adapter) +{ + struct aux_core_dev_info *aux_info = &adapter->aux_ctxt.cdev_info; + + (void)memset(aux_info->qos_info, 0x0, sizeof(aux_info->qos_info)); + + sxe2_setup_dcb_qos_info(adapter, &aux_info->qos_info[PRIMARY_PF_INDEX]); + + return adapter; +} + +STATIC struct sxe2_adapter *sxe2_rdma_aux_bond_qos_set(struct sxe2_adapter *adapter, + struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *primary_adapter; + struct aux_core_dev_info *send_aux_info; + struct sxe2_adapter *send_adapter = NULL; + + primary_adapter = sxe2_lag_role_find(lag, PRIMARY_PF_INDEX); + if (!primary_adapter) { + LOG_ERROR("qos info set, find primary failed.\n"); + goto l_end; + } + + send_adapter = primary_adapter; + send_aux_info = &primary_adapter->aux_ctxt.cdev_info; + + if (primary_adapter->pf_idx == adapter->pf_idx) { + sxe2_setup_dcb_qos_info(send_adapter, + &send_aux_info->qos_info[PRIMARY_PF_INDEX]); + } else { + sxe2_setup_dcb_qos_info(adapter, + &send_aux_info->qos_info[REDUNDANT_PF_INDEX]); + } + +l_end: + return send_adapter; +} + +STATIC struct sxe2_adapter * +sxe2_rdma_aux_support_lag_qos_set(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct sxe2_adapter *send_adapter = adapter; + struct aux_core_dev_info *aux_info = &adapter->aux_ctxt.cdev_info; + + if (lag && sxe2_lag_is_bonded(adapter)) { + send_adapter = sxe2_rdma_aux_bond_qos_set(adapter, lag); + } else { + (void)memset(aux_info->qos_info, 0x0, sizeof(aux_info->qos_info)); + sxe2_setup_dcb_qos_info(send_adapter, + &aux_info->qos_info[PRIMARY_PF_INDEX]); + } + + return send_adapter; +} + +STATIC struct sxe2_adapter * +sxe2_rdma_aux_change_qos_update(struct sxe2_adapter *adapter) +{ + struct sxe2_adapter *send_adapter = NULL; + + if (sxe2_lag_support(adapter) && adapter->lag_ctxt) { + mutex_lock(&adapter->lag_ctxt->lock); + send_adapter = sxe2_rdma_aux_support_lag_qos_set(adapter); + mutex_unlock(&adapter->lag_ctxt->lock); + } else { + send_adapter = sxe2_rdma_aux_qos_set(adapter); + } + + return send_adapter; +} + +int sxe2_rdma_aux_send_tc_change_event(struct sxe2_adapter *adapter) +{ + int ret = -1; + struct sxe2_rdma_event_info event; + struct sxe2_adapter *send_adapter = NULL; + + (void)memset(&event, 0x0, sizeof(event)); + + send_adapter = sxe2_rdma_aux_change_qos_update(adapter); + if (!send_adapter) { + LOG_ERROR("rdma aux send tc change event failed, pf id = %d\n", + adapter->pf_idx); + return ret; + } + + set_bit(SXE2_EVENT_TC_CHANGE, event.type); + return sxe2_rdma_aux_send_event(send_adapter, &event); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.h new file mode 100644 index 0000000000000000000000000000000000000000..2b9ebdf37e5f7898da1b0d568f771b36095e887b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_aux_driver.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_aux_driver.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_AUX_DRIVER_H__ +#define __SXE2_AUX_DRIVER_H__ + +#include "sxe2_drv_aux.h" + +#define SXE2_RDMA_NAME "roce" + +struct sxe2_adapter; +struct sxe2_rdma_mac_entry { + struct list_head list_entry; + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_rdma_aux_context { + struct aux_core_dev_info cdev_info; + int aux_idx; + struct mutex adev_mutex; + bool init; +}; + +u8 sxe2_rdma_aux_get_qset_tc(struct sxe2_adapter *adapter, + struct aux_rdma_qset_params *qset); + +struct sxe2_auxiliary_drv * +sxe2_rdma_aux_drv_get(struct aux_core_dev_info *cdev_info); + +int sxe2_rdma_aux_init(struct sxe2_adapter *adapter); + +void sxe2_rdma_aux_deinit(struct sxe2_adapter *adapter); +int sxe2_rdma_aux_add(struct sxe2_adapter *adapter); + +void sxe2_rdma_aux_delete(struct aux_core_dev_info *cdev_info); + +s32 sxe2_rdma_aux_rebuild(struct sxe2_adapter *adapter); + +void sxe2_rdma_aux_send_reset_event(struct sxe2_adapter *adapter); + +int sxe2_rdma_aux_send_mtu_changed_event(struct sxe2_adapter *adapter); + +int sxe2_rdma_aux_send_vf_reset_event(struct sxe2_adapter *adapter, u16 vf_id); + +int sxe2_rdma_aux_send_aeq_overflow_event(struct sxe2_adapter *adapter); + +void sxe2_aux_aeq_overflow_handler(struct sxe2_adapter *adapter); + +int sxe2_rdma_msg_send(struct sxe2_adapter *adapter, + enum sxe2_drv_cmd_opcode opcode, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len); +int sxe2_rdma_aux_send_failover_event(struct sxe2_adapter *adapter); + +int sxe2_rdma_aux_send_tc_change_event(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.c new file mode 100644 index 0000000000000000000000000000000000000000..d4767ce757fc9388673e81855cdab6cc319648c8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_common.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_common.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_log.h" +#include "sxe2_spec.h" + +STATIC void sxe2_dev_caps_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_dev_caps *dev_caps) +{ + adapter->aux_ctxt.cdev_info.pf_cnt = dev_caps->pf_cnt; + adapter->pf_cnt = dev_caps->pf_cnt; + + if (dev_caps->dev_common_caps.acl_support) + set_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags); + + set_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags); + + LOG_INFO_BDF("pf_cnt:%u\n", dev_caps->pf_cnt); +} + +STATIC s32 sxe2_fwc_dev_caps_get(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_fwc_dev_caps dev_caps = {}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DEV_CAPS, NULL, 0, &dev_caps, + sizeof(dev_caps)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get func caps failed, ret=%d\n", ret); + ret = -EIO; + goto l_end; + } + + sxe2_dev_caps_set(adapter, &dev_caps); + +l_end: + return ret; +} + +static u16 __sxe2_min_msix_num_calc(struct sxe2_adapter *adapter) +{ + u16 cnt; + u32 mode = (u32)sxe2_com_mode_get(adapter); + + if (mode == SXE2_COM_MODULE_KERNEL) + cnt = SXE2_MSIX_MIN_CNT + SXE2_RDMA_MSIX_MIN_CNT + + SXE2_FNAV_MSIX_CNT + SXE2_ESWITCH_MSIX_CNT; + else if (mode == SXE2_COM_MODULE_DPDK) + cnt = SXE2_DPDK_MSIX_MIN_CNT + SXE2_EVENT_MSIX_CNT + + SXE2_ESWITCH_MSIX_CNT + SXE2_DPDK_ESWITCH_MSIX_CNT; + else + cnt = SXE2_MSIX_MIN_CNT + SXE2_RDMA_MSIX_MIN_CNT + + SXE2_FNAV_MSIX_CNT + SXE2_ESWITCH_MSIX_CNT + + SXE2_DPDK_ESWITCH_MSIX_CNT + SXE2_DPDK_MSIX_MIN_CNT; + + LOG_INFO_BDF("mode:%d min irq cnt:%u\n", mode, cnt); + + return cnt; +} + +u16 sxe2_min_msix_num_calc(struct sxe2_adapter *adapter) +{ + u16 min_msix; + + if (sxe2_is_safe_mode(adapter)) + min_msix = SXE2_MSIX_MIN_CNT; + else + min_msix = __sxe2_min_msix_num_calc(adapter); + return min_msix; +} + +u16 sxe2_min_queue_num_calc(struct sxe2_adapter *adapter) +{ + u16 min_msix; + + if (sxe2_is_safe_mode(adapter)) + min_msix = SXE2_SAFE_MODE_TXQ_CNT; + else + min_msix = SXE2_NON_SAFEMODE_MIN_TXQ_CNT; + return min_msix; +} + +STATIC s32 sxe2_func_caps_check(struct sxe2_adapter *adapter, + struct sxe2_fwc_func_caps *func_caps) +{ + s32 ret = 0; + u16 min_msix = sxe2_min_msix_num_calc(adapter); + u16 min_queue = sxe2_min_queue_num_calc(adapter); + + if (le16_to_cpu(func_caps->msix_caps.cnt) < min_msix) + ret = -ENOSPC; + if (le16_to_cpu(func_caps->tx_caps.cnt) < min_queue) + ret = -ENOSPC; + if (le16_to_cpu(func_caps->rx_caps.cnt) < min_queue) + ret = -ENOSPC; + return ret; +} + +STATIC s32 sxe2_sw_caps_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_func_caps *func_caps) +{ + if (sxe2_func_caps_check(adapter, func_caps)) + return -ENOSPC; + + adapter->irq_ctxt.max_cnt = le16_to_cpu(func_caps->msix_caps.cnt); + adapter->irq_ctxt.base_idx_in_dev = + le16_to_cpu(func_caps->msix_caps.base_idx); + + adapter->q_ctxt.max_txq_cnt = le16_to_cpu(func_caps->tx_caps.cnt); + adapter->q_ctxt.txq_base_idx_in_dev = + le16_to_cpu(func_caps->tx_caps.base_idx); + adapter->q_ctxt.max_rxq_cnt = le16_to_cpu(func_caps->rx_caps.cnt); + adapter->q_ctxt.rxq_base_idx_in_dev = + le16_to_cpu(func_caps->rx_caps.base_idx); + + adapter->vsi_ctxt.max_cnt = le16_to_cpu(func_caps->vsi_caps.cnt); + adapter->vsi_ctxt.base_idx_in_dev = + le16_to_cpu(func_caps->vsi_caps.base_idx); + adapter->pf_idx = func_caps->pf_idx; + adapter->port_idx = func_caps->port_idx; + + adapter->caps_ctxt.max_rss_lut_size = + le16_to_cpu(func_caps->ppe_caps.rss_lut_size); + adapter->caps_ctxt.fnav_space_bsize = + le16_to_cpu(func_caps->ppe_caps.fnav_space_bsize); + adapter->caps_ctxt.fnav_space_gsize = + le16_to_cpu(func_caps->ppe_caps.fnav_space_gsize); + adapter->caps_ctxt.fnav_stat_base = + le16_to_cpu(func_caps->ppe_caps.fnav_counter_base); + adapter->caps_ctxt.fnav_stat_num = + le16_to_cpu(func_caps->ppe_caps.fnav_counter_num); + adapter->caps_ctxt.global_lut_base = + le16_to_cpu(func_caps->ppe_caps.rss_global_lut_base); + adapter->caps_ctxt.global_lut_num = + le16_to_cpu(func_caps->ppe_caps.rss_global_lut_num); + + if (func_caps->vf_caps.sriov_cap) { + set_bit(SXE2_FLAG_SRIOV_CAPABLE, adapter->flags); + adapter->vf_ctxt.max_vfs = (u16)min_t(u16, + (le16_to_cpu(func_caps->vf_caps.cnt)), + SXE2_VF_NUM); + adapter->vf_ctxt.vfid_base = + le16_to_cpu(func_caps->vf_caps.base_idx); + } + + clear_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags); + if (func_caps->common_caps.vmdq_support) + set_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags); + + adapter->ptp_ctxt.ptp_owned = + (func_caps->pf_idx == func_caps->common_caps.ptp_owner); + + if (sxe2_is_safe_mode(adapter)) + sxe2_safe_mode_caps_set(adapter); + + LOG_INFO_BDF("pf_idx:%u port_idx:%u irq max:%u base:%u txq max:%u \n" + "txq_base_idx_in_dev:%u rxq max:%u rxq_base_idx_in_dev:%u\n" + "sriov_cap:%u max_vfs:%u vfid_base:%u rss_lut_size:%u.\n" + "fnav_bsize: %u, fnav_gsize: %u vsi_max_cnt:%u vsi_base_id:%u\n", + adapter->pf_idx, adapter->port_idx, adapter->irq_ctxt.max_cnt, + adapter->irq_ctxt.base_idx_in_dev, adapter->q_ctxt.max_txq_cnt, + adapter->q_ctxt.txq_base_idx_in_dev, + adapter->q_ctxt.max_rxq_cnt, + adapter->q_ctxt.rxq_base_idx_in_dev, + func_caps->vf_caps.sriov_cap, adapter->vf_ctxt.max_vfs, + adapter->vf_ctxt.vfid_base, adapter->caps_ctxt.max_rss_lut_size, + adapter->caps_ctxt.fnav_space_bsize, + adapter->caps_ctxt.fnav_space_gsize, adapter->vsi_ctxt.max_cnt, + adapter->vsi_ctxt.base_idx_in_dev); + + return 0; +} + +s32 sxe2_fwc_func_caps_get(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_fwc_func_caps func_caps = {}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FUNC_CAPS, NULL, 0, &func_caps, + sizeof(func_caps)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("get func caps failed, ret=%d\n", ret); + ret = -EIO; + goto l_end; + } + + ret = sxe2_sw_caps_set(adapter, &func_caps); + if (ret) + LOG_DEV_ERR("set func caps failed, ret=%d\n", ret); + +l_end: + return ret; +} + +s32 __sxe2_drv_mode_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_drv_mode_resp *resp, u32 resp_len) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + memset(&cmd, 0, sizeof(cmd)); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DRV_MODE_GET, NULL, 0, resp, + resp_len); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_INFO("get drv mode failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_drv_mode_get(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_fwc_drv_mode_resp resp = {}; + + ret = __sxe2_drv_mode_get(adapter, &resp, sizeof(resp)); + if (!ret) { + if (resp.drv_mode != SXE2_COM_MODULE_UNDEFINED) { + adapter->drv_mode = resp.drv_mode; + goto end; + } else { + ret = -EINVAL; + } + } + + if ((sxe2_g_com_mode_get() != SXE2_COM_MODULE_UNDEFINED) && + (sxe2_g_com_mode_get() != SXE2_COM_MODULE_DPDK)) { + adapter->drv_mode = sxe2_g_com_mode_get(); + ret = 0; + } + +end: + return ret; +} + +s32 sxe2_drv_mode_set(struct sxe2_adapter *adapter, enum sxe2_com_module type) +{ + s32 ret = 0; + struct sxe2_fwc_drv_mode_req req = {}; + struct sxe2_cmd_params cmd = {}; + + req.drv_mode = type; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DRV_MODE_SET, &req, sizeof(req), + NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("set drv mode failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_hw_mtu_init(struct sxe2_adapter *adapter, u32 init_mtu, u8 is_set_hw) +{ + s32 ret; + struct sxe2_fw_mtu_info mtu = {}; + struct sxe2_cmd_params cmd = {}; + + mtu.mtu = init_mtu; + mtu.is_set_hw = is_set_hw; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MAC_MTU_SET, &mtu, sizeof(mtu), + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_DEV_ERR("init mac 9k failed, ret=%d\n", ret); + + return ret; +} + +s32 sxe2_caps_get(struct sxe2_adapter *adapter) +{ + s32 ret; + + ret = sxe2_fwc_dev_caps_get(adapter); + if (ret) + goto l_end; + + ret = sxe2_fwc_func_caps_get(adapter); + +l_end: + return ret; +} + +u32 sxe2_local_cpus_cnt_get(struct device *device) +{ + unsigned int cnt; + int node; + + node = dev_to_node(device); + + if (node == NUMA_NO_NODE) + cnt = cpumask_weight(cpu_online_mask); + else + cnt = cpumask_weight(cpumask_of_node(node)); + + return cnt; +} + +u32 sxe2_standardize_cpu_cnt(u32 cpu_cnt) +{ + if (cpu_cnt > SXE2_DFLT_IRQS_MAX_CNT) + cpu_cnt = SXE2_DFLT_IRQS_MAX_CNT; + else if (cpu_cnt < SXE2_DFLT_IRQS_MIN_CNT) + cpu_cnt = SXE2_DFLT_IRQS_MIN_CNT; + + return cpu_cnt; +} + +bool sxe2_is_safe_mode(struct sxe2_adapter *adapter) +{ + return !test_bit(SXE2_FLAG_ADVANCE_MODE, adapter->flags); +} + +void sxe2_safe_mode_caps_set(struct sxe2_adapter *adapter) +{ + adapter->irq_ctxt.max_cnt = SXE2_SAFE_MODE_IRQ_CNT; + adapter->q_ctxt.max_txq_cnt = SXE2_SAFE_MODE_TXQ_CNT; + adapter->q_ctxt.max_rxq_cnt = SXE2_SAFE_MODE_RXQ_CNT; + adapter->vsi_ctxt.max_cnt = SXE2_SAFE_MODE_VSI_CNT; + + clear_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags); + + clear_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags); + + clear_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags); + + clear_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); +} + +bool sxe2_is_vf_vlan_enabled(struct sxe2_vsi *vsi) +{ + return false; +} + +s32 sxe2_err_code_trans_hw(s32 err) +{ + s32 ret; + + switch (err) { + case SXE2_HW_ERR_SUCCESS: + ret = 0; + break; + case -SXE2_HW_ERR_FAULT: + ret = -EFAULT; + break; + case -SXE2_HW_ERR_TIMEDOUT: + ret = -ETIMEDOUT; + break; + case -SXE2_HW_ERR_INVAL: + ret = -EINVAL; + break; + case -SXE2_HW_ERR_IO: + default: + ret = -EIO; + break; + } + + return ret; +} + +s32 sxe2_fwc_pxe_disable(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_fwc_pxe_req req = {}; + struct sxe2_cmd_params cmd = {}; + + req.ena = 0; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_PXE_CTRL, &req, sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("pxe disable failed, ret=%d\n", ret); + ret = -EIO; + } + return ret; +} + +void sxe2_queue_work(struct sxe2_adapter *adapter, struct workqueue_struct *wq, + struct work_struct *dwork) +{ + if (!queue_work(wq, dwork)) + LOG_WARN_BDF("work was already on a queue.\n"); +} + +s32 sxe2_dpdk_pf_caps_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_func_caps *caps) +{ + s32 ret = 0; + + if (!adapter || !caps) { + ret = -EINVAL; + LOG_ERROR_BDF("param invalid.\n"); + goto l_end; + } + + caps->tx_caps.base_idx = adapter->q_ctxt.txq_layout.dpdk_offset; + caps->tx_caps.cnt = adapter->q_ctxt.txq_layout.dpdk; + + caps->rx_caps.base_idx = adapter->q_ctxt.rxq_layout.dpdk_offset; + caps->rx_caps.cnt = adapter->q_ctxt.rxq_layout.dpdk; + + caps->msix_caps.base_idx = adapter->irq_ctxt.irq_layout.dpdk_offset; + caps->msix_caps.cnt = adapter->irq_ctxt.irq_layout.dpdk; + + LOG_INFO_BDF("dpdk pf txq base:%d cnt:%d rxq base:%d cnt:%d irq base:%d cnt:%d.\n", + caps->tx_caps.base_idx, caps->tx_caps.cnt, + caps->rx_caps.base_idx, caps->rx_caps.cnt, + caps->msix_caps.base_idx, caps->msix_caps.cnt); + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.h new file mode 100644 index 0000000000000000000000000000000000000000..a10232cadcee6a5eed443e63f32289af1a6bad5e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_common.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COMMON_H__ +#define __SXE2_COMMON_H__ + +#include "sxe2.h" +#include "sxe2_spec.h" + +#ifndef SXE2_VSI_DFLT_IRQS_MAX_CNT +#define SXE2_VSI_DFLT_IRQS_MAX_CNT 64 +#endif +#define SXE2_VSI_DFLT_IRQS_MIN_CNT 8 + +#define SXE2_ADAPTER_TO_DEV(adapter) \ + (&((adapter)->pdev->dev)) + +#define SXE2_SAFE_MODE_TXQ_CNT (1) +#define SXE2_SAFE_MODE_RXQ_CNT (1) +#define SXE2_SAFE_MODE_IRQ_CNT (2) +#define SXE2_SAFE_MODE_VSI_CNT (1) +#define SXE2_CTRLQ_CNT (1) +#define SXE2_LBQ_CNT (1) +#define SXE2_NON_SAFEMODE_MIN_TXQ_CNT \ + (SXE2_SAFE_MODE_TXQ_CNT + SXE2_CTRLQ_CNT + SXE2_LBQ_CNT) + +s32 sxe2_caps_get(struct sxe2_adapter *adapter); + +u32 sxe2_local_cpus_cnt_get(struct device *device); + +u32 sxe2_standardize_cpu_cnt(u32 cpu_cnt); + +bool sxe2_is_safe_mode(struct sxe2_adapter *adapter); + +void sxe2_safe_mode_caps_set(struct sxe2_adapter *adapter); + +bool sxe2_is_vf_vlan_enabled(struct sxe2_vsi *vsi); + +static inline void sxe2_itr_set(struct sxe2_irq_data *irq_data, + struct sxe2_q_container *qc, u16 itr) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + + sxe2_hw_irq_itr_set(hw, irq_data->idx_in_pf, qc->itr_idx, itr); +} + +s32 sxe2_hw_mtu_init(struct sxe2_adapter *adapter, u32 initMtu, u8 isSetHw); + +s32 sxe2_err_code_trans_hw(s32 err); + +s32 sxe2_fwc_pxe_disable(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_func_caps_get(struct sxe2_adapter *adapter); + +void sxe2_queue_work(struct sxe2_adapter *adapter, struct workqueue_struct *wq, + struct work_struct *dwork); + +u16 sxe2_min_msix_num_calc(struct sxe2_adapter *adapter); + +u16 sxe2_min_queue_num_calc(struct sxe2_adapter *adapter); + +s32 sxe2_dpdk_pf_caps_get(struct sxe2_adapter *adapter, struct sxe2_fwc_func_caps *caps); + +s32 sxe2_drv_mode_set(struct sxe2_adapter *adapter, enum sxe2_com_module type); + +s32 __sxe2_drv_mode_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_drv_mode_resp *resp, u32 resp_len); + +s32 sxe2_drv_mode_get(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.c new file mode 100644 index 0000000000000000000000000000000000000000..4743ec1bdd5faa04b85ae8dae22cd6428f8df2de --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.c @@ -0,0 +1,1130 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dcb.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_dcb.h" +#include "sxe2_event.h" +#include "sxe2_common.h" +#include "sxe2_dcb_nl.h" +#include "sxe2_lldp_tlv.h" +#include "sxe2_txsched.h" +#include "sxe2_aux_driver.h" +#include "sxe2_switch.h" +#include "sxe2_netdev.h" +#include +#include + +#define CEE_DCBX_MAX_PGS 8 +#define CEE_DCBX_MAX_PRIO 8 + +#define SXE2_DCBX_WAIT_CNT 10 +#define SXE2_DCBX_RESET_WAIT_MS 5 + +void sxe2_dcb_set_state(struct sxe2_adapter *adapter, enum sxe2_dcb_state state, + bool need_lock) +{ + if (need_lock) { + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + adapter->dcb_ctxt.state = state; + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + } else { + adapter->dcb_ctxt.state = state; + } +} + +STATIC s32 sxe2_dcb_vsis_enable_unlock(struct sxe2_adapter *adapter) +{ + u32 i; + s32 ret = 0; + struct sxe2_vsi *vsi; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + ret = sxe2_main_vsi_open(vsi); + break; + + case SXE2_VSI_T_ESW: + ret = sxe2_vsi_open(vsi); + break; + + default: + break; + } + } + + return ret; +} + +STATIC void sxe2_dcb_vsis_disable_unlock(struct sxe2_adapter *adapter) +{ + u32 i; + struct sxe2_vsi *vsi; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + case SXE2_VSI_T_ESW: + (void)sxe2_vsi_close(vsi); + break; + + default: + break; + } + } +} + +static u8 sxe2_dcb_mode_get(struct sxe2_adapter *adapter, bool host) +{ + u8 mode; + + if (host) + mode = DCB_CAP_DCBX_HOST; + else + mode = DCB_CAP_DCBX_LLD_MANAGED; + + if (adapter->dcb_ctxt.local_dcbx_cfg.dcbx_mode & SXE2_DCBX_MODE_CEE) + mode |= DCB_CAP_DCBX_VER_CEE; + else + mode |= DCB_CAP_DCBX_VER_IEEE; + + return mode; +} + +s32 sxe2_dcb_lldp_mib_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_local_mib_set *mib) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_MIB_SET, mib, + sizeof(struct sxe2_fwc_local_mib_set) + mib->mib_len, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("dcb mib set failed, ret=%d\n", ret); + + return ret; +} + +STATIC s32 sxe2_dcb_cfg_download(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_dcbx_cfg *dcbcfg; + struct sxe2_fwc_local_mib_set *lldpmib; + + dcbcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + lldpmib = devm_kzalloc(SXE2_ADAPTER_TO_DEV(adapter), + (sizeof(struct sxe2_fwc_local_mib_set) + SXE2_LLDPDU_SIZE), + GFP_KERNEL); + if (!lldpmib) + return -ENOMEM; + + sxe2_dcb_cfg_to_lldp(lldpmib->mib_buffer, &lldpmib->mib_len, dcbcfg); + + ret = sxe2_dcb_lldp_mib_cfg(adapter, lldpmib); + + devm_kfree(SXE2_ADAPTER_TO_DEV(adapter), lldpmib); + + return ret; +} + +u8 sxe2_dcb_tc_cnt_get(struct sxe2_dcbx_cfg *dcbcfg) +{ + u32 i; + u8 tc_cnt = 0; + u8 tc_bitmap = 0; + + sxe2_for_each_tc(i) tc_bitmap |= BIT(dcbcfg->ets.prio_tbl[i]); + + sxe2_for_each_tc(i) + { + if (tc_bitmap & BIT(i)) + tc_cnt++; + else + break; + } + + if (!tc_cnt) + tc_cnt = SXE2_DFLT_TC_NUM; + + return tc_cnt; +} + +u8 sxe2_dcb_tc_bitmap_get(struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 i, tc_cnt, tc_bitmap = 1; + + tc_cnt = sxe2_dcb_tc_cnt_get(dcbcfg); + + for (i = 0; i < tc_cnt; i++) + tc_bitmap |= BIT(i); + + return tc_bitmap; +} + +s32 sxe2_dcb_bw_chk(struct sxe2_adapter *adapter, struct sxe2_dcbx_cfg *dcbcfg) +{ + s32 i; + u8 tc_cnt, total_bw = 0; + u8 ets_tc_cnt = 0; + struct sxe2_dcb_ets_cfg *etscfg = &dcbcfg->ets; + + tc_cnt = sxe2_dcb_tc_cnt_get(dcbcfg); + + if (tc_cnt == SXE2_DFLT_TC_NUM) { + etscfg->tcbw_tbl[0] = SXE2_TC_MAX_BW; + for (i = 1; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (etscfg->tsa_tbl[i] == IEEE_8021QAZ_TSA_ETS) + etscfg->tcbw_tbl[i] = 0; + else + etscfg->tcbw_tbl[i] = SXE2_TC_MAX_BW; + } + LOG_DEV_WARN("One tc enabled, change tc 0 bandwidth to 100.\n"); + return 0; + } + + for (i = 0; i < tc_cnt; i++) { + if (etscfg->tsa_tbl[i] == IEEE_8021QAZ_TSA_ETS) { + if (etscfg->tcbw_tbl[i] == 0) { + LOG_DEV_ERR("TC %d is used but bw is 0.\n", i); + return -EINVAL; + } + total_bw += etscfg->tcbw_tbl[i]; + ets_tc_cnt++; + } else { + etscfg->tcbw_tbl[i] = SXE2_TC_MAX_BW; + } + } + + if (total_bw != SXE2_TC_MAX_BW && ets_tc_cnt != 0) { + LOG_DEV_ERR("invalid config, total bandwidth must equal 100, total_bw %d\n", + total_bw); + return -EINVAL; + } + + return 0; +} + +void sxe2_vsi_netdev_tc_cfg(struct sxe2_vsi *vsi, u8 tc_bitmap) +{ + u8 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct net_device *netdev = vsi->netdev; + + if (!netdev) + return; + + LOG_INFO_BDF("netdev tc cfg, tc_cnt=%d, tc_map=%x\n", vsi->tc.tc_cnt, + vsi->tc.tc_map); + + if (netdev_set_num_tc(netdev, vsi->tc.tc_cnt)) + return; + + sxe2_for_each_tc(i) + { + if (vsi->tc.tc_map & BIT(i)) { + (void)netdev_set_tc_queue(netdev, i, vsi->tc.info[i].txq_cnt, + vsi->tc.info[i].txq_offset); + LOG_INFO_BDF("netdev tc[%d] q cfg, txq_cnt=%d, txq_offset=%d\n", + i, vsi->tc.info[i].txq_cnt, + vsi->tc.info[i].txq_offset); + } + } +} + +STATIC void sxe2_dcb_vsi_cfg(struct sxe2_vsi *vsi, u8 tc_bitmap) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_dcbx_cfg *dcbcfg; + struct net_device *netdev = vsi->netdev; + int i; + u8 ets_tc; + + sxe2_vsi_tc_cfg(vsi); + + sxe2_vsi_queues_irqs_map(vsi); + + ret = sxe2_vsi_recfg(vsi); + + if (ret) { + LOG_ERROR_BDF("vsi recfg failed: %d\n", ret); + goto l_err; + } + + sxe2_vsi_netdev_tc_cfg(vsi, tc_bitmap); + + dcbcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) { + ets_tc = dcbcfg->ets.prio_tbl[i]; + + (void)netdev_set_prio_tc_map(netdev, i, ets_tc); + + LOG_INFO_BDF("netdev map prio[%d] to tc[%d]\n", i, ets_tc); + } +l_err: + return; +} + +static bool sxe2_dcb_tc_contig(u8 *prio_table) +{ + bool found_empty = false; + u8 used_tc = 0; + int i; + + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + used_tc |= BIT(prio_table[i]); + + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { + if (used_tc & BIT(i)) { + if (found_empty) + return false; + } else { + found_empty = true; + } + } + + return true; +} + +void sxe2_setup_dcb_qos_info(struct sxe2_adapter *adapter, + struct aux_qos_params *qos_info) +{ + struct sxe2_dcbx_cfg *dcbx_cfg; + unsigned int i; + + dcbx_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + qos_info->num_apps = dcbx_cfg->numapps; + qos_info->num_tc = sxe2_dcb_tc_cnt_get(dcbx_cfg); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos_info->up2tc[i] = dcbx_cfg->ets.prio_tbl[i]; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos_info->tc_info[i].rel_bw = dcbx_cfg->ets.tcbw_tbl[i]; + + for (i = 0; i < qos_info->num_apps; i++) { + qos_info->apps[i].priority = dcbx_cfg->app[i].prio; + qos_info->apps[i].prot_id = dcbx_cfg->app[i].prot_id; + qos_info->apps[i].selector = dcbx_cfg->app[i].selector; + } + + qos_info->pfc_mode = dcbx_cfg->qos_mode; + for (i = 0; i < SXE2_DSCP_MAX_NUM; i++) + qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; +} + +STATIC bool sxe2_dcb_tc_qalloc_chk(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *new_cfg) +{ + u8 tc_cnt; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + tc_cnt = sxe2_dcb_tc_cnt_get(new_cfg); + if (vsi->txqs.q_alloc < tc_cnt) { + LOG_DEV_ERR("Trying to use more Traffic Classes (%u), " + "than tx queue allocated (%u)!\n", + tc_cnt, vsi->txqs.q_alloc); + return false; + } + + if (vsi->rxqs.q_alloc < tc_cnt) { + LOG_DEV_ERR("Trying to use more Traffic Classes (%u), " + "than rx queue allocated (%u)!\n", + tc_cnt, vsi->txqs.q_alloc); + return false; + } + + return true; +} + +s32 sxe2_fc_get(struct sxe2_adapter *adapter, u16 vsi_id, u8 *fc) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = NULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + ret = -EINVAL; + LOG_ERROR("find vf vsi by vsi_id %d failed.\n", vsi_id); + goto l_unlock; + } + + if (test_bit(SXE2_VSI_FLAG_FC_ON, vsi->flags)) { + if (adapter->lfc_ctxt.rx_en || adapter->lfc_ctxt.tx_en) + *fc = SXE2_FC_T_LFC; + else + *fc = SXE2_FC_T_PFC; + } else { + *fc = SXE2_FC_T_DIS; + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +void sxe2_set_fc_flag(struct sxe2_vsi *vsi, bool on) +{ + if (on) + set_bit(SXE2_VSI_FLAG_FC_ON, vsi->flags); + else + clear_bit(SXE2_VSI_FLAG_FC_ON, vsi->flags); +} + +s32 sxe2_dcb_cfg(struct sxe2_adapter *adapter, struct sxe2_dcbx_cfg *new_cfg, bool locked) +{ + u8 tc_cnt; + struct sxe2_vsi *pf_vsi; + s32 ret = 0; + struct sxe2_dcbx_cfg *old_cfg, *curr_cfg; + struct sxe2_vsi *main_vsi; + u8 tc_map; + int i; + + curr_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (!sxe2_dcb_tc_contig(new_cfg->ets.prio_tbl)) { + LOG_DEV_ERR("tc not contig\n"); + return -EINVAL; + } + + if (!sxe2_dcb_tc_qalloc_chk(adapter, new_cfg)) + return -EINVAL; + + tc_cnt = sxe2_dcb_tc_cnt_get(new_cfg); + if (tc_cnt == SXE2_DFLT_TC_NUM) { + LOG_INFO_BDF("DCB tagging disabled (num TC = 1)\n"); + clear_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags); + } else { + LOG_INFO_BDF("DCB tagging enabled (num TC > 1)\n"); + set_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags); + } + + if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) { + LOG_INFO_BDF("No change in DCB config required\n"); + return ret; + } + + if (sxe2_dcb_bw_chk(adapter, new_cfg)) + return -EINVAL; + + old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL); + if (!old_cfg) + return -ENOMEM; + + LOG_INFO_BDF("Commit DCB Configuration to the hardware\n"); + + pf_vsi = adapter->vsi_ctxt.main_vsi; + if (!pf_vsi) { + LOG_ERROR_BDF("PF VSI doesn't exist\n"); + ret = -EINVAL; + goto free_old_cfg; + } + + main_vsi = adapter->vsi_ctxt.main_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (tc_cnt > SXE2_DFLT_TC_NUM && + test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) { + LOG_DEV_ERR("macvlan is enabled, can not set dcb\n"); + ret = -EINVAL; + goto l_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, main_vsi->state)) + goto l_unlock; + + sxe2_dcb_vsis_disable_unlock(adapter); + LOG_INFO_BDF("set dcb vsis disable\n"); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + (void)sxe2_txsched_tc_max_bw_lmt_cfg(adapter->vsi_ctxt.main_vsi, + (u8)i, SXE2_TXSCHED_DFLT_BW); + LOG_INFO_BDF("disable tc maxrate.\n"); + + (void)memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); + + ret = sxe2_dcb_cfg_download(adapter); + if (ret) { + LOG_ERROR_BDF("set dcb config failed, ret=%d\n", ret); + ret = -EBUSY; + (void)memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); + goto out; + } + + LOG_INFO_BDF("download dcb cfg\n"); + + ret = sxe2_txsched_ets_update(adapter, tc_cnt); + if (ret) + goto out; + + tc_map = sxe2_dcb_tc_bitmap_get(curr_cfg); + sxe2_dcb_vsi_cfg(adapter->vsi_ctxt.main_vsi, tc_map); + + if (adapter->dcb_ctxt.state == SXE2_DCB_STATE_READY) { + mutex_unlock(&adapter->vsi_ctxt.lock); + + ret = sxe2_rdma_aux_send_tc_change_event(adapter); + if (ret) { + LOG_ERROR_BDF("notify rdma aux failed ret:%d.\n", ret); + } + + ret = sxe2_netdev_q_cnt_set(main_vsi->netdev, main_vsi->txqs.q_cnt, + main_vsi->rxqs.q_cnt, locked); + if (ret) { + LOG_INFO_BDF("pf netdev q cnt set failed ret:%d.\n", ret); + } + mutex_lock(&adapter->vsi_ctxt.lock); + } + + if (test_bit(SXE2_VSI_S_DISABLE, main_vsi->state)) + goto l_unlock; +out: + (void)sxe2_dcb_vsis_enable_unlock(adapter); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +free_old_cfg: + for (i = 0; i < tc_cnt; i++) + (void)sxe2_txsched_tc_max_bw_lmt_cfg(adapter->vsi_ctxt.main_vsi, + (u8)i, curr_cfg->hw_bw_value[i]); + LOG_INFO_BDF("rebuild tc maxrate.\n"); + + kfree(old_cfg); + return ret; +} + +void sxe2_dcb_sw_safe_mode_cfg(struct sxe2_adapter *adapter) +{ + struct sxe2_dcbx_cfg *dcbcfg; + int i; + + dcbcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + (void)memset(&adapter->dcb_ctxt.local_dcbx_cfg, 0, sizeof(*dcbcfg)); + + dcbcfg->ets.willing = 1; + dcbcfg->ets.maxtcs = SXE2_TC_MAX_CNT; + dcbcfg->ets.tcbw_tbl[0] = 100; + dcbcfg->ets.tsa_tbl[0] = SXE2_IEEE_TSA_ETS; + + dcbcfg->pfc.willing = 1; + dcbcfg->pfc.cap = SXE2_TC_MAX_CNT; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + dcbcfg->usr_bw_value[i] = 0; + dcbcfg->hw_bw_value[i] = SXE2_TXSCHED_DFLT_BW; + } + (void)memcpy(&adapter->dcb_ctxt.desired_dcbx_cfg, dcbcfg, sizeof(*dcbcfg)); +} + +s32 sxe2_dcb_sw_dflt_cfg(struct sxe2_adapter *adapter, bool ets_willing, bool locked) +{ + int ret; + struct sxe2_dcbx_cfg *dcbcfg; + int i; + + dcbcfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + (void)memset(&adapter->dcb_ctxt.desired_dcbx_cfg, 0, sizeof(*dcbcfg)); + + dcbcfg->ets.willing = ets_willing ? 1 : 0; + dcbcfg->ets.maxtcs = SXE2_TC_MAX_CNT; + dcbcfg->ets.tcbw_tbl[0] = 100; + dcbcfg->ets.tsa_tbl[0] = SXE2_IEEE_TSA_ETS; + + dcbcfg->pfc.willing = 1; + dcbcfg->pfc.cap = SXE2_TC_MAX_CNT; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + dcbcfg->usr_bw_value[i] = 0; + dcbcfg->hw_bw_value[i] = SXE2_TXSCHED_DFLT_BW; + } + + ret = sxe2_dcb_cfg(adapter, dcbcfg, locked); + if (ret) + LOG_ERROR_BDF("Failed to set default dcb config %d\n", ret); + + return ret; +} + +s32 sxe2_dcb_process_lldp_set_mib_change(struct sxe2_adapter *adapter, void *buf, + u32 buf_len) +{ + s32 ret; + struct sxe2_dcbx_cfg *tmp_dcbx_cfg = NULL; + struct sxe2_dcbx_cfg *new_dcbx_cfg = NULL; + struct sxe2_dcbx_cfg *remote_dcbx_cfg = NULL; + s32 i = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (!(test_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags))) { + LOG_WARN_BDF("dcb disable, lldp mib change\n"); + return 0; + } + + tmp_dcbx_cfg = devm_kcalloc(dev, sizeof(struct sxe2_dcbx_cfg), 1, + GFP_KERNEL); + new_dcbx_cfg = devm_kcalloc(dev, sizeof(struct sxe2_dcbx_cfg), 1, + GFP_KERNEL); + remote_dcbx_cfg = devm_kcalloc(dev, sizeof(struct sxe2_dcbx_cfg), 1, + GFP_KERNEL); + if (!remote_dcbx_cfg || + !tmp_dcbx_cfg || + !remote_dcbx_cfg) { + LOG_ERROR_BDF("no mem for remote dcbx_cfg.\n"); + ret = -ENOMEM; + goto l_out; + } + + memcpy(tmp_dcbx_cfg, &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(struct sxe2_dcbx_cfg)); + + ret = sxe2_lldp_to_dcb_cfg(buf, remote_dcbx_cfg); + if (ret) { + LOG_ERROR_BDF("parse dcbx tlv failed: %d\n", ret); + ret = -EIO; + goto l_out; + } + + (void)memcpy(&adapter->dcb_ctxt.remote_dcbx_cfg, + remote_dcbx_cfg, sizeof(struct sxe2_dcbx_cfg)); + + (void)memcpy(&new_dcbx_cfg->ets, &remote_dcbx_cfg->etsrec, + sizeof(struct sxe2_dcb_ets_cfg)); + (void)memcpy(&new_dcbx_cfg->pfc, &remote_dcbx_cfg->pfc, + sizeof(struct sxe2_dcb_pfc_cfg)); + new_dcbx_cfg->ets.willing = 1; + new_dcbx_cfg->pfc.willing = 1; + + if (!memcmp(new_dcbx_cfg, &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(struct sxe2_dcbx_cfg))) { + LOG_INFO_BDF("no change detected in dcbx configuration.\n"); + goto l_out; + } + + adapter->dcb_ctxt.dcbx_cap = sxe2_dcb_mode_get(adapter, false); + sxe2_dcbnl_flush_apps(adapter, tmp_dcbx_cfg, + &adapter->dcb_ctxt.local_dcbx_cfg); + + for (i = 0; i < SXE2_DCBX_WAIT_CNT; i++) { + rtnl_lock(); + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state == SXE2_DCB_STATE_READY) + break; + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + rtnl_unlock(); + msleep(SXE2_DCBX_RESET_WAIT_MS); + } + + if (i == SXE2_DCBX_WAIT_CNT) { + LOG_ERROR_BDF("wait reset done timeout.\n"); + ret = -ETIMEDOUT; + goto l_out; + } + + ret = sxe2_dcb_cfg(adapter, new_dcbx_cfg, true); + if (ret) + LOG_ERROR_BDF("dcb cfg in dcbx notify failed: %d.\n", ret); + else + memcpy(&adapter->dcb_ctxt.desired_dcbx_cfg, + &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(struct sxe2_dcbx_cfg)); + + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + rtnl_unlock(); + + LOG_ERROR_BDF("dcbx fw agent mib change process end.\n"); + +l_out: + if (tmp_dcbx_cfg) + devm_kfree(dev, tmp_dcbx_cfg); + if (new_dcbx_cfg) + devm_kfree(dev, new_dcbx_cfg); + if (remote_dcbx_cfg) + devm_kfree(dev, remote_dcbx_cfg); + + return ret; +} + +void sxe2_lldp_sw_rule_change(struct sxe2_adapter *adapter, u8 stats) +{ + s32 ret; + + mutex_lock(&adapter->switch_ctxt.lldp_rule_lock); + if (stats == SXE2_LLDP_FW_AGENT_ENABLE) { + ret = sxe2_rx_etype_rule_del(adapter, + adapter->vsi_ctxt.main_vsi->idx_in_dev, + ETH_P_LLDP); + if (ret) + LOG_ERROR_BDF("fw lldp agent fail to change enable:%d\n", + ret); + + ret = sxe2_tx_etype_rule_add(adapter->vsi_ctxt.main_vsi, ETH_P_LLDP); + if (ret) + LOG_ERROR_BDF("fw lldp agent fail to change enable:%d\n", + ret); + + LOG_INFO_BDF("fw lldp agent change to enable\n"); + } else { + ret = sxe2_rx_etype_rule_add(adapter->vsi_ctxt.main_vsi, ETH_P_LLDP); + if (ret) + LOG_ERROR_BDF("fw lldp agent fail to change disable:%d\n", + ret); + + ret = sxe2_tx_etype_rule_del(adapter, + adapter->vsi_ctxt.main_vsi->idx_in_dev, + ETH_P_LLDP); + if (ret) + LOG_ERROR_BDF("fw lldp agent fail to change disable:%d\n", + ret); + + LOG_INFO_BDF("fw lldp agent change to disable\n"); + } + mutex_unlock(&adapter->switch_ctxt.lldp_rule_lock); +} + +s32 sxe2_lldp_fw_agent_change(struct sxe2_adapter *adapter, + void *buf, u32 buf_len) + +{ + struct sxe2_fwc_notify_lldp_fw_agent *lldp_st = buf; + s32 ret = 0; + + if (buf_len < sizeof(struct sxe2_fwc_notify_lldp_fw_agent)) { + LOG_WARN_BDF("lldp agent changed, receive bad msg.\n"); + ret = -EIO; + goto out; + } + + sxe2_lldp_sw_rule_change(adapter, lldp_st->stats); + + if (lldp_st->stats == SXE2_LLDP_FW_AGENT_DISABLE && + test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) { + set_bit(SXE2_FLAG_FW_DCBX_DIS_PENDING, adapter->flags); + sxe2_monitor_work_schedule(adapter); + LOG_DEV_INFO("Recv Fw lldp agent disable action, disable Fw dcbx " + "agent.\n"); + } + +out: + return ret; +} + +s32 sxe2_qos_mode_set(struct sxe2_adapter *adapter, enum sxe2QosMode mode) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_QOS_MODE_SET, &mode, sizeof(mode), + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("add vsi failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_dcbx_agent_event_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_fwc_event event = {0}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_MIB_NOTIFY); + ret = sxe2_fwc_event_subscribe(adapter, &event); + if (ret) { + LOG_ERROR_BDF("dcbx agent event subscribe failed, ret=%d\n", ret); + } else { + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_MIB_NOTIFY, + SXE2_CMD_EVENT_STATUS_SUB); + } + + return ret; +} + +void sxe2_dcbx_agent_event_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_fwc_event event = {0}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_MIB_NOTIFY); + (void)sxe2_fwc_event_unsubscribe(adapter, &event); + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_MIB_NOTIFY, + SXE2_CMD_EVENT_STATUS_UNSUB); +} + +s32 sxe2_lldp_agent_event_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_fwc_event event = {0}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY); + ret = sxe2_fwc_event_subscribe(adapter, &event); + if (ret) + LOG_ERROR_BDF("dcbx agent event subscribe failed, ret=%d\n", ret); + else + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, + SXE2_CMD_EVENT_STATUS_SUB); + + return ret; +} + +void sxe2_lldp_agent_event_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_fwc_event event = {0}; + + event.count = 1; + event.code[0] = cpu_to_le16(SXE2_EVENT_CODE_MIB_NOTIFY); + (void)sxe2_fwc_event_unsubscribe(adapter, &event); + (void)sxe2_set_event_status(adapter, SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, + SXE2_CMD_EVENT_STATUS_UNSUB); +} + +s32 sxe2_lldp_fw_agent_status_get(struct sxe2_adapter *adapter, + bool *enable, u8 *direction) +{ + s32 ret; + struct sxe2_fwc_lldp_fw_agent agent = {0}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_FW_AGENT_GET, + NULL, 0, &agent, sizeof(agent)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get lldp fw agent status failed, ret=%d\n", ret); + ret = -EIO; + } + + *enable = (agent.status == sxe2_lldp_disabled) ? false : true; + if (direction) + *direction = agent.status; + + return ret; +} + +s32 sxe2_dcbx_fw_agent_status_get(struct sxe2_adapter *adapter, bool *isEnable) +{ + s32 ret; + struct sxe2_fwc_fw_agent agent = {0}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_DCBX_FW_AGENT_GET, NULL, 0, + &agent, sizeof(agent)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get lldp fw agent status failed, ret=%d\n", ret); + ret = -EIO; + } + + *isEnable = agent.enable; + + return ret; +} + +s32 sxe2_dcbx_fw_agent_status_set(struct sxe2_adapter *adapter, bool isEnable) +{ + s32 ret; + struct sxe2_fwc_fw_agent agent; + struct sxe2_cmd_params cmd = {}; + + agent.enable = (u8)isEnable; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, &agent, + sizeof(agent), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get lldp fw agent status failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +void sxe2_dcb_stats_update(struct sxe2_adapter *adapter) +{ + struct sxe2_dcb_stats *dcb_stats = &adapter->pf_stats.dcb_stats; + + sxe2_hw_pause_stats_update( + &adapter->hw, adapter->port_idx, dcb_stats->prev_stat_loaded, + &dcb_stats->curr_pause_stats, &dcb_stats->perv_pause_stats); + dcb_stats->prev_stat_loaded = true; +} + +s32 sxe2_dcb_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u8 tc_cnt; + struct sxe2_dcbx_cfg *cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + + if (test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) { + ret = sxe2_dcbx_agent_event_init(adapter); + if (ret) { + LOG_ERROR_BDF("Listening dcbx agent change even failed %d\n", + ret); + ret = -EIO; + goto dcb_error; + } + + ret = sxe2_fw_dcbx_agent_cfg_get(adapter, &adapter->dcb_ctxt.local_dcbx_cfg); + if (ret) + LOG_ERROR_BDF("get local dcb config %d use old cfg\n", ret); + + (void)memcpy(&adapter->dcb_ctxt.desired_dcbx_cfg, + &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(struct sxe2_dcbx_cfg)); + } + + ret = sxe2_dcb_cfg_download(adapter); + if (ret) { + LOG_ERROR_BDF("failed to set dcb config in rebuild\n"); + goto dcb_error; + } + + LOG_INFO_BDF("DCB info restored\n"); + tc_cnt = sxe2_dcb_tc_cnt_get(cfg); + ret = sxe2_txsched_ets_update(adapter, tc_cnt); + if (ret) { + LOG_ERROR_BDF("Query Port ets failed\n"); + goto dcb_error; + } + +dcb_error: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + + return ret; +} + +s32 sxe2_dcb_maxrate_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u8 tc_cnt; + struct sxe2_dcbx_cfg *cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + u8 i; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + tc_cnt = sxe2_dcb_tc_cnt_get(cfg); + for (i = 0; i < tc_cnt; i++) { + ret = sxe2_txsched_tc_max_bw_lmt_cfg(adapter->vsi_ctxt.main_vsi, i, + (u32)cfg->hw_bw_value[i]); + if (ret) { + LOG_ERROR_BDF("dcb set tc %d maxrate %u failed %d.\n", + i, cfg->hw_bw_value[i], ret); + goto l_end; + } + LOG_INFO_BDF("dcb rebuild tc_%d max_bw %u kbps\n", + i, cfg->hw_bw_value[i]); + } + +l_end: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + + return ret; +} + +static s32 sxe2_dcbx_change2host(struct sxe2_adapter *adapter, bool locked) +{ + s32 ret = 0; + + clear_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags); + ret = sxe2_qos_mode_set(adapter, SXE2_QOS_MODE_VLAN); + if (ret) { + LOG_ERROR_BDF("failed to set vlan pfc mode\n"); + goto out; + } + + ret = sxe2_dcb_sw_dflt_cfg(adapter, true, locked); + if (ret) { + LOG_ERROR_BDF("Failed to set local dcb config %d\n", ret); + ret = -EIO; + goto out; + } + + adapter->dcb_ctxt.dcbx_cap = sxe2_dcb_mode_get(adapter, true); +out: + return ret; +} + +s32 sxe2_dcbx_agent_enable(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + bool lldp_enabled = false; + u8 dir; + + if (adapter->dcb_ctxt.local_dcbx_cfg.qos_mode == SXE2_QOS_MODE_DSCP) { + LOG_DEV_ERR("QOS in L3 DSCP mode, " + "FW Agent not allowed to start\n"); + ret = -EOPNOTSUPP; + goto l_out; + } + + ret = sxe2_lldp_fw_agent_status_get(adapter, &lldp_enabled, &dir); + if (ret) { + LOG_ERROR_BDF("dcbx lldp st get failed:%d\n", ret); + goto l_out; + } else if (!lldp_enabled || dir != sxe2_lldp_enabled_rx_tx) { + LOG_DEV_ERR("Lldp agent is not enabled or " + "Fw Lldp Agent work mode is not TXRX.\n"); + ret = -EOPNOTSUPP; + goto l_out; + } + + ret = sxe2_dcbx_fw_agent_status_set(adapter, true); + if (ret) { + LOG_ERROR_BDF("start fw dcbx agent failed, ret=%d\n", ret); + ret = -EIO; + goto l_out; + } + + ret = sxe2_dcb_init(adapter, true); + if (ret) { + LOG_ERROR_BDF("start fw dcbx agent failed, ret=%d\n", ret); + ret = -EIO; + goto l_out; + } + +l_out: + return ret; +} + +s32 sxe2_dcbx_agent_disable(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + sxe2_dcbx_agent_event_deinit(adapter); + + ret = sxe2_dcbx_fw_agent_status_set(adapter, false); + if (ret) { + LOG_ERROR_BDF("stop fw dcbx agent failed, ret=%d\n", ret); + ret = -EIO; + goto l_out; + } + + ret = sxe2_dcb_init(adapter, true); + if (ret) { + LOG_ERROR_BDF("start fw dcbx agent failed, ret=%d\n", ret); + ret = -EIO; + goto l_out; + } + +l_out: + + return ret; +} + +static s32 sxe2_dcbx_change2fw(struct sxe2_adapter *adapter, bool locked) +{ + s32 ret = 0; + + ret = sxe2_dcbx_agent_event_init(adapter); + if (ret) { + LOG_ERROR_BDF("Listening dcbx agent " + "change even failed %d\n", ret); + ret = -EIO; + goto out; + } + + ret = sxe2_fw_dcbx_agent_cfg_get(adapter, &adapter->dcb_ctxt.desired_dcbx_cfg); + if (ret) { + LOG_ERROR_BDF("get local dcb config %d\n", ret); + ret = -EIO; + goto out; + } + + (void)sxe2_dcb_cfg(adapter, + &adapter->dcb_ctxt.desired_dcbx_cfg, locked); + + adapter->dcb_ctxt.dcbx_cap = sxe2_dcb_mode_get(adapter, false); + + set_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags); + +out: + return ret; +} + +s32 sxe2_dcb_init(struct sxe2_adapter *adapter, bool locked) +{ + s32 ret = 0; + bool dcbx_enable; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state == SXE2_DCB_STATE_RESET) { + LOG_ERROR_BDF("dcb is reset state.\n"); + goto dcb_init_err; + } + + ret = sxe2_dcbx_fw_agent_status_get(adapter, &dcbx_enable); + if (ret) { + LOG_ERROR_BDF("dcbx fw agent status get failed, ret=%d\n", ret); + goto dcb_init_err; + } + + if (!dcbx_enable) + ret = sxe2_dcbx_change2host(adapter, locked); + else + ret = sxe2_dcbx_change2fw(adapter, locked); + if (ret) { + LOG_ERROR_BDF("dcbx init failed, ret=%d\n", ret); + goto dcb_init_err; + } + + if (adapter->dcb_ctxt.state == SXE2_DCB_STATE_UNINIT) + adapter->dcb_ctxt.state = SXE2_DCB_STATE_READY; + + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + LOG_INFO_BDF("DCB init success\n"); + + return 0; + +dcb_init_err: + + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + LOG_ERROR_BDF("DCB init failed\n"); + + return ret; +} + +void sxe2_dcb_deinit(struct sxe2_adapter *adapter, bool locked) +{ + + rtnl_lock(); + (void)sxe2_dcbx_agent_disable(adapter); + rtnl_unlock(); + + adapter->dcb_ctxt.state = SXE2_DCB_STATE_UNINIT; + + LOG_INFO_BDF("DCB deinit success\n"); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea9c89da6b788bccb5c278ada0f532dd64f7ee4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dcb.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DCB_H__ +#define __SXE2_DCB_H__ + +#include +#include +#include "sxe2_cmd.h" +#include "sxe2_drv_aux.h" + +#define SXE2_DFLT_TC_NUM 1 +#define SXE2_DFLT_TC_BITMAP BIT(0) +#define SXE2_BYTES_PER_DSCP_VAL 8 + +#define SXE2_TC_MAX_BW 100 +#define SXE2_DCB_HW_CHG_RST 0 +#define SXE2_DCB_NO_HW_CHG 1 +#define SXE2_DCB_HW_CHG 2 + +#define SXE2_IEEE_TSA_STRICT 0 +#define SXE2_IEEE_TSA_ETS 2 + +#define SXE2_DCBX_MODE_CEE 0x1 +#define SXE2_DCBX_MODE_IEEE 0x2 +#define SXE2_DSCP_NUM_VAL 64 +#define SXE2_DCBX_APPS_NON_WILLING 0x1 + +enum sxe2_dcb_state { + SXE2_DCB_STATE_UNINIT = 0, + SXE2_DCB_STATE_READY, + SXE2_DCB_STATE_RESET, + SXE2_DCB_STATE_MAX, +}; + +struct sxe2_dcb_ets_cfg { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prio_tbl[IEEE_8021Q_MAX_PRIORITIES]; + u8 tcbw_tbl[IEEE_8021QAZ_MAX_TCS]; + u8 tsa_tbl[IEEE_8021QAZ_MAX_TCS]; +}; + +struct sxe2_dcb_pfc_cfg { + u8 willing; + u8 mbc; + u8 cap; + u8 enable; +}; + +struct sxe2_lfc_context { + u8 tx_en; + u8 rx_en; + u8 recv[2]; +}; + +struct sxe2_dcb_app_prio_tbl { + u16 prot_id; + + u8 prio; + + u8 selector; +}; + +struct sxe2_dcbx_cfg { + u8 dcbx_mode; + + struct sxe2_dcb_ets_cfg ets; + struct sxe2_dcb_ets_cfg etsrec; + struct sxe2_dcb_pfc_cfg pfc; + + u8 qos_mode; + + u32 numapps; + u8 app_mode; + DECLARE_BITMAP(dscp_mapped, SXE2_DSCP_NUM_VAL); + u8 dscp_map[SXE2_DSCP_NUM_VAL]; + struct sxe2_dcb_app_prio_tbl app[SXE2_DCBX_MAX_APPS]; + u64 usr_bw_value[IEEE_8021QAZ_MAX_TCS]; + u32 hw_bw_value[IEEE_8021QAZ_MAX_TCS]; +}; + +struct sxe2_dcb_context { + struct sxe2_dcbx_cfg local_dcbx_cfg; + struct sxe2_dcbx_cfg desired_dcbx_cfg; + struct sxe2_dcbx_cfg remote_dcbx_cfg; + enum sxe2_dcb_state state; + u8 dcbx_cap; + struct mutex tc_mutex; +}; + +void sxe2_dcb_set_state(struct sxe2_adapter *adapter, + enum sxe2_dcb_state state, bool need_lock); + +u8 sxe2_dcb_tc_bitmap_get(struct sxe2_dcbx_cfg *dcbcfg); + +u8 sxe2_dcb_tc_cnt_get(struct sxe2_dcbx_cfg *dcbcfg); + +s32 sxe2_dcb_bw_chk(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *dcbcfg); + +void sxe2_dcb_sw_safe_mode_cfg(struct sxe2_adapter *adapter); + +s32 sxe2_dcb_sw_dflt_cfg(struct sxe2_adapter *adapter, + bool ets_willing, bool locked); + +s32 sxe2_dcb_cfg(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *new_cfg, bool locked); + +s32 sxe2_qos_mode_set(struct sxe2_adapter *adapter, enum sxe2QosMode mode); + +s32 sxe2_dcbx_fw_agent_status_set(struct sxe2_adapter *adapter, bool isEnable); + +void sxe2_dcb_stats_update(struct sxe2_adapter *adapter); + +s32 sxe2_dcb_rebuild(struct sxe2_adapter *adapter); + +s32 sxe2_dcb_maxrate_rebuild(struct sxe2_adapter *adapter); + +s32 sxe2_dcbx_agent_enable(struct sxe2_adapter *adapter); + +s32 sxe2_dcbx_agent_disable(struct sxe2_adapter *adapter); + +s32 sxe2_dcb_init(struct sxe2_adapter *adapter, bool locked); + +void sxe2_dcb_deinit(struct sxe2_adapter *adapter, bool locked); + +s32 sxe2_dcb_lldp_mib_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_local_mib_set *mib); + +void sxe2_dcbx_agent_event_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_lldp_agent_event_init(struct sxe2_adapter *adapter); + +void sxe2_lldp_agent_event_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_lldp_fw_agent_status_get(struct sxe2_adapter *adapter, + bool *isEnable, u8 *direction); + +s32 sxe2_dcbx_fw_agent_status_get(struct sxe2_adapter *adapter, bool *isEnable); + +void sxe2_lldp_sw_rule_change(struct sxe2_adapter *adapter, u8 stats); + +s32 sxe2_dcb_process_lldp_set_mib_change(struct sxe2_adapter *adapter, + void *buf, u32 buf_len); + +s32 sxe2_lldp_fw_agent_change(struct sxe2_adapter *adapter, + void *buf, u32 buf_len); + +s32 sxe2_dcbx_agent_event_init(struct sxe2_adapter *adapter); + +void sxe2_setup_dcb_qos_info(struct sxe2_adapter *adapter, + struct aux_qos_params *qos_info); + +void sxe2_vsi_netdev_tc_cfg(struct sxe2_vsi *vsi, u8 tc_bitmap); + +s32 sxe2_fc_get(struct sxe2_adapter *adapter, u16 vsi_id, u8 *fc); + +void sxe2_set_fc_flag(struct sxe2_vsi *vsi, bool on); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.c new file mode 100644 index 0000000000000000000000000000000000000000..eee29baa13615e5c854148f3b70fd992ecb49816 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.c @@ -0,0 +1,1111 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dcb_nl.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_dcb_nl.h" +#include + +#define SXE2_1K (1000U) +#define SXE2_MINTC_RATE (62500ULL) + +#define SXE2_B2KBIT(rate) (rate * BITS_PER_BYTE / SXE2_1K) + +static int sxe2_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_dcbx_cfg *dcbxcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + ets->willing = dcbxcfg->ets.willing; + ets->ets_cap = dcbxcfg->ets.maxtcs; + ets->cbs = dcbxcfg->ets.cbs; + (void)memcpy(ets->tc_tx_bw, dcbxcfg->ets.tcbw_tbl, sizeof(ets->tc_tx_bw)); + (void)memcpy(ets->tc_rx_bw, dcbxcfg->ets.tcbw_tbl, sizeof(ets->tc_rx_bw)); + (void)memcpy(ets->tc_tsa, dcbxcfg->ets.tsa_tbl, sizeof(ets->tc_tsa)); + (void)memcpy(ets->prio_tc, dcbxcfg->ets.prio_tbl, sizeof(ets->prio_tc)); + (void)memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbw_tbl, + sizeof(ets->tc_reco_bw)); + (void)memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsa_tbl, + sizeof(ets->tc_reco_tsa)); + (void)memcpy(ets->reco_prio_tc, dcbxcfg->etsrec.prio_tbl, + sizeof(ets->reco_prio_tc)); + + return 0; +} + +static int sxe2_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) +{ + int ret, i; + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto l_unlock; + } + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + ret = -EINVAL; + goto l_unlock; + } + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + new_cfg->ets.willing = ets->willing; + new_cfg->ets.cbs = ets->cbs; + + sxe2_for_each_tc(i) + { + new_cfg->ets.tcbw_tbl[i] = ets->tc_tx_bw[i]; + new_cfg->ets.tsa_tbl[i] = ets->tc_tsa[i]; + new_cfg->ets.prio_tbl[i] = ets->prio_tc[i]; + } + + if (sxe2_dcb_bw_chk(adapter, new_cfg)) { + ret = -EINVAL; + goto l_cfg_err; + } + + new_cfg->ets.maxtcs = IEEE_8021QAZ_MAX_TCS; + + LOG_INFO_BDF("ets cfg, willing=%d, cbs=%d, maxtcs=%d\n" + "prio[0]=%d, tcbw[0]=%d, tsa_tbl[0]=%d\n" + "prio[1]=%d, tcbw[1]=%d, tsa_tbl[1]=%d\n" + "prio[2]=%d, tcbw[2]=%d, tsa_tbl[2]=%d\n" + "prio[3]=%d, tcbw[3]=%d, tsa_tbl[3]=%d\n" + "prio[4]=%d, tcbw[4]=%d, tsa_tbl[4]=%d\n" + "prio[5]=%d, tcbw[5]=%d, tsa_tbl[5]=%d\n" + "prio[6]=%d, tcbw[6]=%d, tsa_tbl[6]=%d\n" + "prio[7]=%d, tcbw[7]=%d, tsa_tbl[7]=%d\n", + new_cfg->ets.willing, new_cfg->ets.cbs, new_cfg->ets.maxtcs, + new_cfg->ets.prio_tbl[0], new_cfg->ets.tcbw_tbl[0], + new_cfg->ets.tsa_tbl[0], new_cfg->ets.prio_tbl[1], + new_cfg->ets.tcbw_tbl[1], new_cfg->ets.tsa_tbl[1], + new_cfg->ets.prio_tbl[2], new_cfg->ets.tcbw_tbl[2], + new_cfg->ets.tsa_tbl[2], new_cfg->ets.prio_tbl[3], + new_cfg->ets.tcbw_tbl[3], new_cfg->ets.tsa_tbl[3], + new_cfg->ets.prio_tbl[4], new_cfg->ets.tcbw_tbl[4], + new_cfg->ets.tsa_tbl[4], new_cfg->ets.prio_tbl[5], + new_cfg->ets.tcbw_tbl[5], new_cfg->ets.tsa_tbl[5], + new_cfg->ets.prio_tbl[6], new_cfg->ets.tcbw_tbl[6], + new_cfg->ets.tsa_tbl[6], new_cfg->ets.prio_tbl[7], + new_cfg->ets.tcbw_tbl[7], new_cfg->ets.tsa_tbl[7]); + + ret = sxe2_dcb_cfg(adapter, new_cfg, true); +l_cfg_err: + if (ret) + (void)memcpy(new_cfg, &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(*new_cfg)); +l_unlock: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + return ret; +} + +static int sxe2_dcbnl_getnumtcs(struct net_device *netdev, int __always_unused tcid, + u8 *num) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (!test_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags)) + return -EINVAL; + + *num = IEEE_8021QAZ_MAX_TCS; + return 0; +} + +static u8 sxe2_dcbnl_getdcbx(struct net_device *netdev) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + return adapter->dcb_ctxt.dcbx_cap; +} + +static u8 sxe2_dcbnl_setdcbx(struct net_device *netdev, u8 mode) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_dcb_context *dcb_ctxt; + + if (test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) + return SXE2_DCB_NO_HW_CHG; + + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return SXE2_DCB_NO_HW_CHG; + + if (mode == adapter->dcb_ctxt.dcbx_cap) + return SXE2_DCB_NO_HW_CHG; + + dcb_ctxt = &adapter->dcb_ctxt; + + if (dcb_ctxt->local_dcbx_cfg.qos_mode == SXE2_QOS_MODE_DSCP) { + LOG_INFO_BDF("dcbx cfg, " + "dscp configuration is not dcbx negotiated\n"); + return SXE2_DCB_NO_HW_CHG; + } + + dcb_ctxt->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) + dcb_ctxt->local_dcbx_cfg.dcbx_mode = SXE2_DCBX_MODE_CEE; + else + dcb_ctxt->local_dcbx_cfg.dcbx_mode = SXE2_DCBX_MODE_IEEE; + + LOG_INFO_BDF("DCBx mode = 0x%x\n", mode); + return SXE2_DCB_HW_CHG_RST; +} + +static void sxe2_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) +{ + (void)memset(perm_addr, 0xff, MAX_ADDR_LEN); + +} + +static int sxe2_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + s32 i; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_dcbx_cfg *dcbxcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + pfc->pfc_cap = dcbxcfg->pfc.cap; + pfc->pfc_en = dcbxcfg->pfc.enable; + pfc->mbc = dcbxcfg->pfc.mbc; + + sxe2_for_each_tc(i) + { + pfc->requests[i] = adapter->pf_stats.dcb_stats.curr_pause_stats + .prio_xoff_tx[i]; + pfc->indications[i] = adapter->pf_stats.dcb_stats.curr_pause_stats + .prio_xoff_rx[i]; + } + + return 0; +} + +static void sxe2_pfc_cfg_recover(struct sxe2_adapter *adapter) +{ + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct sxe2_vsi *dpdk_vsi = NULL; + bool fc = false; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + (void)memcpy(new_cfg, &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(*new_cfg)); + + mutex_lock(&adapter->vsi_ctxt.lock); + dpdk_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + + if (new_cfg->pfc.enable) + fc = true; + + if (dpdk_vsi) + sxe2_set_fc_flag(dpdk_vsi, fc); + sxe2_set_fc_flag(vsi, fc); + + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +static int sxe2_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + int ret; + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vsi *dpdk_vsi = NULL; + bool fc = false; + bool changed = false; + u8 old_fc = SXE2_FC_MODE_DISABLE; + u8 new_fc = SXE2_FC_MODE_DISABLE; + + (void)sxe2_fc_get(adapter, vsi->idx_in_dev, &old_fc); + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto l_unlock; + } + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + ret = -EINVAL; + goto l_unlock; + } + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + if (pfc->pfc_cap) + new_cfg->pfc.cap = pfc->pfc_cap; + else + new_cfg->pfc.cap = IEEE_8021QAZ_MAX_TCS; + + new_cfg->pfc.enable = pfc->pfc_en; + if (new_cfg->pfc.enable) + fc = true; + + mutex_lock(&adapter->vsi_ctxt.lock); + dpdk_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (dpdk_vsi) + sxe2_set_fc_flag(dpdk_vsi, fc); + sxe2_set_fc_flag(vsi, fc); + mutex_unlock(&adapter->vsi_ctxt.lock); + + LOG_INFO_BDF("pfc cfg, willing=%d, cap=%d, enable=%d\n", + new_cfg->pfc.willing, new_cfg->pfc.cap, new_cfg->pfc.enable); + + ret = sxe2_dcb_cfg(adapter, new_cfg, true); + if (ret) { + sxe2_pfc_cfg_recover(adapter); + LOG_INFO_BDF("pfc cfg, failed ret=%d\n", ret); + } + +l_unlock: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + + (void)sxe2_fc_get(adapter, vsi->idx_in_dev, &new_fc); + if (new_fc != old_fc) + changed = true; + + if (changed && ret == 0) + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_FC_ST_CHANGE); + + return ret; +} + +static int sxe2_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct dcb_app app = {.selector = idtype, .protocol = id}; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + return dcb_getapp(netdev, &app); +} + +STATIC bool sxe2_dcbnl_find_app(struct sxe2_dcbx_cfg *cfg, + struct sxe2_dcb_app_prio_tbl *app) +{ + u32 i; + + for (i = 0; i < cfg->numapps; i++) { + if (app->selector == cfg->app[i].selector && + app->prot_id == cfg->app[i].prot_id && + app->prio == cfg->app[i].prio) + return true; + } + + return false; +} + +static int sxe2_dcbnl_setapp_recover(struct net_device *netdev, struct dcb_app *app) +{ + int ret; + unsigned int i, j; + struct sxe2_dcbx_cfg *old_cfg, *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + goto delapp_out; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + old_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + for (i = 0; i < new_cfg->numapps; i++) { + if (app->selector == new_cfg->app[i].selector && + app->protocol == new_cfg->app[i].prot_id && + app->priority == new_cfg->app[i].prio) { + new_cfg->app[i].selector = 0; + new_cfg->app[i].prot_id = 0; + new_cfg->app[i].prio = 0; + break; + } + } + + new_cfg->numapps--; + for (j = i; j < new_cfg->numapps; j++) { + new_cfg->app[j].selector = old_cfg->app[j + 1].selector; + new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[j].prio = old_cfg->app[j + 1].prio; + } + memset(&new_cfg->app[j], 0, sizeof(struct sxe2_dcb_app_prio_tbl)); + + clear_bit(app->protocol, new_cfg->dscp_mapped); + + if (bitmap_empty(new_cfg->dscp_mapped, SXE2_DSCP_NUM_VAL) && + new_cfg->qos_mode == SXE2_QOS_MODE_DSCP) { + ret = sxe2_qos_mode_set(adapter, SXE2_QOS_MODE_VLAN); + if (ret) { + LOG_NETDEV_ERR("failed to set VLAN PFC mode %d\n", ret); + goto delapp_out; + } + + LOG_DEV_INFO("switched QoS to L2 VLAN mode\n"); + + new_cfg->qos_mode = SXE2_QOS_MODE_VLAN; + } + +delapp_out: + return ret; +} + +static int sxe2_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) +{ + s32 ret, rtn; + u8 max_tc; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_dcbx_cfg *old_cfg, *new_cfg; + + LOG_INFO_BDF("addapp: app:%d up %d\n", app->protocol, app->priority); + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto l_unlock; + } + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) { + LOG_NETDEV_ERR("only support dscp mode set app\n"); + ret = -EINVAL; + goto l_unlock; + } + + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + LOG_NETDEV_ERR("can't do DSCP QoS when FW DCB agent active\n"); + ret = -EINVAL; + goto l_unlock; + } + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + LOG_NETDEV_ERR("only support set app in dscp mode\n"); + ret = -EINVAL; + goto l_unlock; + } + + if (app->protocol >= SXE2_DSCP_NUM_VAL) { + LOG_NETDEV_ERR("DSCP value 0x%04X out of range\n", app->protocol); + ret = -EINVAL; + goto l_unlock; + } + + max_tc = IEEE_8021QAZ_MAX_TCS; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + old_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (app->priority >= IEEE_8021Q_MAX_PRIORITIES || + old_cfg->ets.prio_tbl[app->priority] >= max_tc) { + LOG_NETDEV_ERR("tc %d out of range, max tc %d\n", app->priority, + max_tc); + ret = -EINVAL; + goto l_unlock; + } + + if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) { + ret = -EINVAL; + LOG_NETDEV_ERR("DSCP value 0x%04X already user mapped\n", + app->protocol); + goto l_unlock; + } + + if (old_cfg->qos_mode == SXE2_QOS_MODE_VLAN) { + ret = sxe2_qos_mode_set(adapter, SXE2_QOS_MODE_DSCP); + if (ret) { + clear_bit(app->protocol, new_cfg->dscp_mapped); + ret = dcb_ieee_delapp(netdev, app); + if (ret) + LOG_NETDEV_ERR("Failed to delete re-mapping TLV\n"); + + ret = -EIO; + LOG_NETDEV_ERR("Failed to set DSCP PFC mode %d\n", ret); + goto l_unlock; + } + + LOG_DEV_INFO("switched QoS to L3 DSCP mode\n"); + + new_cfg->qos_mode = SXE2_QOS_MODE_DSCP; + + (void)memset(new_cfg->dscp_map, 0, sizeof(u8) * SXE2_DSCP_MAX_NUM); + } + + new_cfg->dscp_map[app->protocol] = app->priority; + new_cfg->app[new_cfg->numapps].selector = app->selector; + new_cfg->app[new_cfg->numapps].prot_id = app->protocol; + new_cfg->app[new_cfg->numapps].prio = app->priority; + new_cfg->numapps++; + + ret = dcb_ieee_setapp(netdev, app); + if (ret) { + memcpy(new_cfg, old_cfg, sizeof(struct sxe2_dcbx_cfg)); + LOG_NETDEV_ERR("set app failed, ret=%d\n", ret); + goto l_unlock; + } + + ret = sxe2_dcb_cfg(adapter, new_cfg, true); + if (ret) { + rtn = sxe2_dcbnl_setapp_recover(netdev, app); + if (rtn) { + LOG_NETDEV_ERR("Failed to delete re-mapping TLV %d\n", rtn); + goto l_unlock; + } + } + +l_unlock: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + return ret; +} + +static int sxe2_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) +{ + int ret; + unsigned int i, j; + struct sxe2_dcbx_cfg *old_cfg, *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + LOG_INFO_BDF("delapp: app:%d up %d\n", app->protocol, app->priority); + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto l_unlock; + } + + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + LOG_NETDEV_ERR("can't delete DSCP netlink app\t" + "when FW DCB agent is active\n"); + ret = -EINVAL; + goto l_unlock; + } + + old_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + ret = dcb_ieee_delapp(netdev, app); + if (ret) + goto l_unlock; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + for (i = 0; i < new_cfg->numapps; i++) { + if (app->selector == new_cfg->app[i].selector && + app->protocol == new_cfg->app[i].prot_id && + app->priority == new_cfg->app[i].prio) { + new_cfg->app[i].selector = 0; + new_cfg->app[i].prot_id = 0; + new_cfg->app[i].prio = 0; + break; + } + } + + if (i == new_cfg->numapps) { + LOG_NETDEV_ERR("can't delete DSCP netlink app: does not exist.\n"); + ret = -EINVAL; + goto l_unlock; + } + + new_cfg->numapps--; + for (j = i; j < new_cfg->numapps; j++) { + new_cfg->app[j].selector = old_cfg->app[j + 1].selector; + new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[j].prio = old_cfg->app[j + 1].prio; + } + memset(&new_cfg->app[j], 0, sizeof(struct sxe2_dcb_app_prio_tbl)); + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) { + ret = 0; + goto l_unlock; + } + + clear_bit(app->protocol, new_cfg->dscp_mapped); + new_cfg->dscp_map[app->protocol] = 0; + + if (bitmap_empty(new_cfg->dscp_mapped, SXE2_DSCP_NUM_VAL) && + new_cfg->qos_mode == SXE2_QOS_MODE_DSCP) { + ret = sxe2_qos_mode_set(adapter, SXE2_QOS_MODE_VLAN); + if (ret) { + LOG_NETDEV_ERR("failed to set VLAN PFC mode %d\n", ret); + goto l_unlock; + } + + LOG_DEV_INFO("switched QoS to L2 VLAN mode\n"); + + new_cfg->qos_mode = SXE2_QOS_MODE_VLAN; + } + + ret = sxe2_dcb_cfg(adapter, new_cfg, true); + if (ret) + LOG_INFO_BDF("del app cfg failed:%d\n", ret); + +l_unlock: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + return ret; +} + +static int sxe2_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_dcbx_cfg *cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + maxrate->tc_maxrate[i] = cfg->usr_bw_value[i]; + LOG_INFO_BDF("dcb get tc_%d max_bw %llu bps\n", i, + maxrate->tc_maxrate[i]); + } + + return 0; +} + +static int sxe2_maxrate_param_check(struct sxe2_adapter *adapter, + struct ieee_maxrate *maxrate) +{ + u64 max_tx_rate = 0; + int ret = 0; + int i; + + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto l_end; + } + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + ret = -EINVAL; + goto l_end; + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (maxrate->tc_maxrate[i] == 0) + continue; + + max_tx_rate = SXE2_B2KBIT(maxrate->tc_maxrate[i]); + if (max_tx_rate > (SXE2_TXSCHED_MAX_BW)) { + LOG_ERROR_BDF("invalid max rate %llu specified for the tc %d\n", + max_tx_rate, i); + ret = -EINVAL; + goto l_end; + } + } + +l_end: + return ret; +} + +static void sxe2_dcb_maxrate_cfg(u64 *usr_rate, u32 *hw_rate, u64 maxrate) +{ + if (!maxrate) { + *hw_rate = SXE2_TXSCHED_DFLT_BW; + *usr_rate = 0; + } else if (maxrate > SXE2_MINTC_RATE) { + *hw_rate = (u32)SXE2_B2KBIT(maxrate); + *usr_rate = maxrate; + } else { + *hw_rate = (u32)SXE2_B2KBIT(SXE2_MINTC_RATE); + *usr_rate = SXE2_MINTC_RATE; + } +} + +static int sxe2_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + int ret, rc; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_dcbx_cfg *new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + struct sxe2_dcbx_cfg *local_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + int i; + u8 tc_cnt; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + ret = sxe2_maxrate_param_check(adapter, maxrate); + if (ret) + goto l_unlock; + + memset(new_cfg->hw_bw_value, 0, sizeof(new_cfg->hw_bw_value)); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + sxe2_dcb_maxrate_cfg(&new_cfg->usr_bw_value[i], + &new_cfg->hw_bw_value[i], maxrate->tc_maxrate[i]); + + tc_cnt = sxe2_dcb_tc_cnt_get(new_cfg); + for (i = 0; i < tc_cnt; i++) { + if (new_cfg->hw_bw_value[i] != local_cfg->hw_bw_value[i]) { + ret = sxe2_txsched_tc_max_bw_lmt_cfg(vsi, (u8)i, + (u32)new_cfg->hw_bw_value[i]); + if (ret) { + LOG_ERROR_BDF("dcb set tc %d maxrate %u kbps failed " + "%d.\n", + i, new_cfg->hw_bw_value[i], ret); + goto l_recover; + } + } + LOG_INFO_BDF("dcb set tc_%d max_bw %u kbps\n", i, + new_cfg->hw_bw_value[i]); + } + + memcpy(local_cfg, new_cfg, sizeof(*local_cfg)); + goto l_unlock; + +l_recover: + for (; i >= 0; i--) { + rc = sxe2_txsched_tc_max_bw_lmt_cfg(vsi, (u8)i, + local_cfg->hw_bw_value[i]); + if (rc) { + ret = rc; + LOG_ERROR_BDF("dcb recover tc %d maxrate %u kbps failed %d.\n", + i, local_cfg->hw_bw_value[i], ret); + } + } + memcpy(new_cfg, local_cfg, sizeof(*local_cfg)); + +l_unlock: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + return ret; +} + +static void sxe2_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= IEEE_8021Q_MAX_PRIORITIES) + return; + + *setting = (adapter->dcb_ctxt.local_dcbx_cfg.pfc.enable >> prio) & 0x1; + LOG_INFO_BDF("get pfc config up=%d, setting=%d, pfc enable=0x%x\n", prio, + *setting, adapter->dcb_ctxt.local_dcbx_cfg.pfc.enable); +} + +static void sxe2_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) +{ + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= IEEE_8021Q_MAX_PRIORITIES) + return; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + new_cfg->pfc.cap = IEEE_8021QAZ_MAX_TCS; + if (set) + new_cfg->pfc.enable |= BIT(prio); + else + new_cfg->pfc.enable &= ~BIT(prio); + + LOG_INFO_BDF("set pfc config up:%d set:%d pfcena:0x%x\n", prio, set, + new_cfg->pfc.enable); +} + +static u8 sxe2_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (adapter->dcb_ctxt.local_dcbx_cfg.pfc.enable) + return 1; + + return 0; +} + +static u8 sxe2_dcbnl_getstate(struct net_device *netdev) +{ + u8 state = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + state = (u8)test_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags); + + LOG_INFO_BDF("dcb enabled state = %d\n", state); + + return state; +} + +static u8 sxe2_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return SXE2_DCB_NO_HW_CHG; + + if (!!state == test_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags)) + return SXE2_DCB_NO_HW_CHG; + + if (state) { + set_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags); + (void)memcpy(&adapter->dcb_ctxt.desired_dcbx_cfg, + &adapter->dcb_ctxt.local_dcbx_cfg, + sizeof(struct sxe2_dcbx_cfg)); + } else { + clear_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags); + } + + return SXE2_DCB_HW_CHG; +} + +static void sxe2_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, + u8 __always_unused *prio_type, u8 *pgid, + u8 __always_unused *bw_pct, + u8 __always_unused *up_map) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= IEEE_8021Q_MAX_PRIORITIES || prio < 0) + return; + + *pgid = adapter->dcb_ctxt.local_dcbx_cfg.ets.prio_tbl[prio]; + LOG_INFO_BDF("get pg config prio=%d tc=%d\n", prio, *pgid); +} + +static void sxe2_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 __always_unused prio_type, + u8 __always_unused bwg_id, + u8 __always_unused bw_pct, u8 up_map) +{ + u32 i; + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (tc >= IEEE_8021QAZ_MAX_TCS || tc < 0) + return; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + sxe2_for_each_tc(i) + if (up_map & BIT(i)) + new_cfg->ets.prio_tbl[i] = (u8)tc; + + new_cfg->ets.tsa_tbl[tc] = SXE2_IEEE_TSA_ETS; +} + +static void sxe2_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, + u8 *bw_pct) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (pgid >= IEEE_8021QAZ_MAX_TCS || pgid < 0) + return; + + *bw_pct = adapter->dcb_ctxt.local_dcbx_cfg.ets.tcbw_tbl[pgid]; + LOG_INFO_BDF("get pg bw config tc=%d bw_pct=%d\n", pgid, *bw_pct); +} + +static void sxe2_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (pgid >= IEEE_8021QAZ_MAX_TCS || pgid < 0) + return; + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + new_cfg->ets.tcbw_tbl[pgid] = bw_pct; + LOG_INFO_BDF("set pg bw config tc=%d bw_pct=%d\n", pgid, bw_pct); +} + +static void sxe2_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, + u8 __always_unused *prio_type, u8 *pgid, + u8 __always_unused *bw_pct, + u8 __always_unused *up_map) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= IEEE_8021Q_MAX_PRIORITIES || prio < 0) + return; + + *pgid = adapter->dcb_ctxt.local_dcbx_cfg.ets.prio_tbl[prio]; + LOG_INFO_BDF("get up=%d config tc=%d\n", prio, *pgid); +} + +static void +sxe2_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int __always_unused prio, + u8 __always_unused prio_type, u8 __always_unused pgid, + u8 __always_unused bw_pct, u8 __always_unused up_map) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + LOG_INFO_BDF("rx tc pg config not supported.\n"); +} + +static void sxe2_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, + int __always_unused pgid, u8 *bw_pct) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + *bw_pct = 0; +} + +static void sxe2_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, + int __always_unused pgid, + u8 __always_unused bw_pct) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + LOG_INFO_BDF("rx bwg pg config not supported.\n"); +} + +static u8 sxe2_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (!(test_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags))) + return SXE2_DCB_NO_HW_CHG; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcb_ctxt.dcbx_cap; + break; + default: + *cap = false; + break; + } + + LOG_INFO_BDF("dcbx get capability cap_mode=%d cap=0x%x\n", capid, *cap); + return 0; +} + +static u8 sxe2_dcbnl_cee_set_all(struct net_device *netdev) +{ + s32 ret = 0; + struct sxe2_dcbx_cfg *new_cfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->dcb_ctxt.tc_mutex); + if (adapter->dcb_ctxt.state != SXE2_DCB_STATE_READY) { + ret = -EBUSY; + goto no_change; + } + + if ((adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) { + goto no_change; + } + + new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + + ret = sxe2_dcb_cfg(adapter, new_cfg, true); + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + + return (ret != SXE2_DCB_HW_CHG_RST) ? SXE2_DCB_NO_HW_CHG : SXE2_DCB_HW_CHG_RST; + +no_change: + mutex_unlock(&adapter->dcb_ctxt.tc_mutex); + return SXE2_DCB_NO_HW_CHG; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = sxe2_dcbnl_getets, + .ieee_setets = sxe2_dcbnl_setets, + .ieee_getmaxrate = sxe2_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = sxe2_dcbnl_ieee_setmaxrate, + .ieee_getpfc = sxe2_dcbnl_getpfc, + .ieee_setpfc = sxe2_dcbnl_setpfc, + .ieee_setapp = sxe2_dcbnl_setapp, + .ieee_delapp = sxe2_dcbnl_delapp, + + .getstate = sxe2_dcbnl_getstate, + .setstate = sxe2_dcbnl_setstate, + .getpermhwaddr = sxe2_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = sxe2_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = sxe2_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = sxe2_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = sxe2_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = sxe2_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = sxe2_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = sxe2_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = sxe2_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = sxe2_dcbnl_set_pfc_cfg, + .getpfccfg = sxe2_dcbnl_get_pfc_cfg, + .getapp = sxe2_dcbnl_getapp, + .getcap = sxe2_dcbnl_get_cap, + .setall = sxe2_dcbnl_cee_set_all, + .getnumtcs = sxe2_dcbnl_getnumtcs, + .getpfcstate = sxe2_dcbnl_getpfcstate, + + .getdcbx = sxe2_dcbnl_getdcbx, + .setdcbx = sxe2_dcbnl_setdcbx, +}; + +void sxe2_dcbnl_set_all(struct sxe2_vsi *vsi) +{ + u32 i; + u8 prio, tc_map; + struct dcb_app sapp; + struct sxe2_dcbx_cfg *dcbxcfg; + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!netdev) + return; + + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_HOST) + return; + + if (!test_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags)) + return; + + dcbxcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + for (i = 0; i < dcbxcfg->numapps; i++) { + prio = dcbxcfg->app[i].prio; + tc_map = BIT(dcbxcfg->ets.prio_tbl[prio]); + + if (tc_map & vsi->tc.tc_map) { + sapp.selector = dcbxcfg->app[i].selector; + sapp.protocol = dcbxcfg->app[i].prot_id; + sapp.priority = prio; + (void)dcb_ieee_setapp(netdev, &sapp); + } + } + + (void)dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +static void sxe2_dcbnl_vsi_del_app(struct sxe2_vsi *vsi, + struct sxe2_dcb_app_prio_tbl *app) +{ + s32 ret; + struct dcb_app sapp; + struct sxe2_adapter *adapter = vsi->adapter; + + sapp.priority = app->prio; + sapp.protocol = app->prot_id; + sapp.selector = app->selector; + + rtnl_lock(); + ret = sxe2_dcbnl_delapp(vsi->netdev, &sapp); + rtnl_unlock(); + + LOG_INFO_BDF("deleting app for vsi idx=%d ret=%d \t" + "sel=%d proto=0x%x, prio=%d\n", + vsi->idx_in_dev, ret, app->selector, + app->prot_id, app->prio); +} + +void sxe2_dcbnl_flush_apps(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *old_cfg, + struct sxe2_dcbx_cfg *new_cfg) +{ + u32 i; + struct sxe2_vsi *pf_vsi = adapter->vsi_ctxt.main_vsi; + + if (!pf_vsi) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + struct sxe2_dcb_app_prio_tbl app = old_cfg->app[i]; + + if (!sxe2_dcbnl_find_app(new_cfg, &app)) + sxe2_dcbnl_vsi_del_app(pf_vsi, &app); + } +} + +void sxe2_dcbnl_setup(struct sxe2_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!test_bit(SXE2_FLAG_DCB_CAPABLE, adapter->flags)) + return; + + netdev->dcbnl_ops = &dcbnl_ops; + + sxe2_dcbnl_set_all(vsi); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.h new file mode 100644 index 0000000000000000000000000000000000000000..499aabb56da493fb3ca435cf505239c56fc8735d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dcb_nl.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dcb_nl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DCB_NL_H__ +#define __SXE2_DCB_NL_H__ + +#include "sxe2_dcb.h" + +void sxe2_dcbnl_set_all(struct sxe2_vsi *vsi); + +void sxe2_dcbnl_setup(struct sxe2_vsi *vsi); + +void sxe2_dcbnl_flush_apps(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *old_cfg, + struct sxe2_dcbx_cfg *new_cfg); + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.c new file mode 100644 index 0000000000000000000000000000000000000000..1222e5a38bbbd3e9b9c0683337d6d6b54c4ac8f2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ddp.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2_cmd.h" +#include "sxe2_ddp.h" +#include "sxe2_sriov.h" +#include "sxe2_fnav.h" +#include "sxe2_rss.h" +#include "sxe2_netdev.h" +#include "sxe2_acl.h" + +#define SXE2_WAIT_BUSY_DONE_TIMEOUT 300 + +struct sxe2_ddp_data_parse g_ddp_parse_table[] = { + {SXE2_SECT_RSSPTG_TYPE, sxe2_rss_ptg_parse_from_ddp}, + {SXE2_SECT_FNAVPTG_TYPE, sxe2_fnav_ptg_parse_from_ddp}, + {SXE2_SECT_FNAVMASK_TYPE, sxe2_fnav_mask_parse_from_ddp}, + {SXE2_SECT_SWEXTRACTOR_TYPE, sxe2_sw_profile_parse_from_ddp}, + {SXE2_SECT_ACLPTG_TYPE, sxe2_acl_ptg_parse_from_ddp}, +}; + +STATIC s32 sxe2_verify_pkg(struct sxe2_adapter *adapter, struct sxe2_pkg_hdr *hdr) +{ + s32 err = SXE2_DDP_PKG_SUCCESS; + + if (hdr->pkg_drv_ver.major > SXE2_DDP_DRV_VER_MAJ) { + LOG_INFO_BDF("file version(%d) too high.\n", hdr->pkg_drv_ver.major); + err = -SXE2_DDP_PKG_FILE_VERSION_TOO_HIGH; + } + + if (hdr->seg_count == 0) { + err = -SXE2_DDP_PKG_INVALID_FILE; + LOG_INFO_BDF("file have no segment.\n"); + } + + return err; +} + +struct sxe2_buf_table *sxe2_get_buf_table(struct sxe2_seg *sxe2_seg) +{ + return &sxe2_seg->buf_table; +} + +struct sxe2_seg *sxe2_get_pkg_segment(struct sxe2_pkg_hdr *hdr, + enum sxe2_segment_type type) +{ + struct sxe2_seg *seg = NULL; + u32 seg_count = hdr->seg_count; + u32 i; + u32 offset; + u64 hdr_size = sizeof(struct sxe2_pkg_hdr) + seg_count * sizeof(__le32); + + for (i = 0; i < seg_count; i++) { + offset = hdr->seg_offset[i]; + if (offset < hdr_size) + return NULL; + + seg = (struct sxe2_seg *)((u8 *)hdr + offset); + if (seg->hdr.seg_type == type) + return seg; + } + + return NULL; +} + +s32 sxe2_ddp_pre_deal(struct sxe2_adapter *adapter, struct sxe2_pkg_hdr *hdr) +{ + s32 ret; + struct sxe2_fwc_ddp_state ddp_state = {}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DP_DLD_PRE, (void *)hdr, + sizeof(*hdr), &ddp_state, sizeof(ddp_state)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get ddp pre failed, ret=%d\n", ret); + if (ret != -SXE2_DDP_PKG_BUSY) + ret = -SXE2_DDP_PKG_ERR; + goto l_end; + } + + if (ddp_state.state == SXE2_DDP_STATE_UNINIT) { + ret = SXE2_DDP_PKG_SUCCESS; + } else if (ddp_state.state == SXE2_DDP_STATE_PROC) { + LOG_INFO_BDF("code(%d) error, ddp failed.\n", ddp_state.state); + ret = -SXE2_DDP_PKG_BUSY; + } else { + ret = -SXE2_DDP_PKG_ERR; + } + +l_end: + return ret; +} + +s32 sxe2_ddp_proc_deal(struct sxe2_adapter *adapter, struct sxe2_buf *buffer) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DP_DLD_PROC, buffer, + sizeof(*buffer), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("ddp process failed, ret=%d\n", ret); + if (ret != -SXE2_DDP_PKG_BUSY) + ret = -SXE2_DDP_PKG_ERR; + } + + return ret; +} + +s32 sxe2_ddp_deal_done(struct sxe2_adapter *adapter, s32 result) +{ + s32 ret; + struct sxe2_fwc_ddp_state ddp_state = {}; + struct sxe2_cmd_params cmd = {}; + __le32 res = cpu_to_le32((u32)result); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DP_DLD_DONE, &res, sizeof(res), + &ddp_state, sizeof(ddp_state)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get ddp done failed, ret=%d\n", ret); + if (ret != -SXE2_DDP_PKG_BUSY) + ret = -SXE2_DDP_PKG_ERR; + goto l_end; + } + + if (ddp_state.state == SXE2_DDP_STATE_FINISH) { + ret = SXE2_DDP_PKG_SUCCESS; + } else { + LOG_WARN_BDF("code(%d) error, ddp failed.\n", ddp_state.state); + ret = -SXE2_DDP_PKG_ERR; + } + +l_end: + return ret; +} + +s32 sxe2_ddp_acquire_state(struct sxe2_adapter *adapter, struct sxe2_pkg_hdr *hdr) +{ + s32 ret; + struct sxe2_fwc_ddp_state ddp_state = {}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_DP_DLD_STATE, NULL, 0, &ddp_state, + sizeof(ddp_state)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get ddp state failed, ret=%d\n", ret); + ret = -SXE2_DDP_PKG_ERR; + goto l_end; + } + + if (ddp_state.state == SXE2_DDP_STATE_PROC) { + ret = -SXE2_DDP_PKG_BUSY; + } else if (ddp_state.state == SXE2_DDP_STATE_FINISH) { + if (hdr) { + if ((__le16)((hdr->pkg_fw_ver.major << 8) + + hdr->pkg_fw_ver.minor) == ddp_state.ver) { + ret = -SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + } else { + ret = -SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } + } else { + ret = -SXE2_DDP_PKG_ALREADY_LOADED; + } + + } else if (ddp_state.state == SXE2_DDP_STATE_ERROR) { + ret = -SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (ddp_state.state == SXE2_DDP_STATE_UNINIT) { + ret = SXE2_DDP_PKG_SUCCESS; + } else { + LOG_DEV_WARN("code(%d) error, unexpected running branch.\n", + ddp_state.state); + ret = -SXE2_DDP_PKG_ERR; + } + +l_end: + return ret; +} + +s32 sxe2_init_pkg(struct sxe2_adapter *adapter, u8 *buff, u32 len) +{ + s32 err; + s32 rc = SXE2_DDP_PKG_SUCCESS; + struct sxe2_pkg_hdr *hdr = (struct sxe2_pkg_hdr *)buff; + struct sxe2_seg *seg = NULL; + struct sxe2_buf_table *buf_table; + u32 i = 0; + u16 timeout = SXE2_WAIT_BUSY_DONE_TIMEOUT; + + if (len < SXE2_MIN_CFG_SZ) { + rc = -SXE2_DDP_PKG_INVALID_FILE; + goto end; + } + + err = sxe2_verify_pkg(adapter, hdr); + if (err) { + rc = err; + LOG_WARN_BDF("failed to verify pkg (err: %d)\n", err); + goto end; + } + + seg = sxe2_get_pkg_segment(hdr, SXE2_SGM_BLK_DP); + if (!seg) { + LOG_WARN_BDF("can not find segment ptr.\n"); + rc = -SXE2_DDP_PKG_MANIFEST_INVALID; + goto end; + } + + err = sxe2_ddp_pre_deal(adapter, hdr); + if (err == -SXE2_DDP_PKG_BUSY) { + do { + (void)msleep(SXE2_DDP_DELAY_100MS); + err = sxe2_ddp_acquire_state(adapter, hdr); + i++; + if (i > timeout) { + err = -SXE2_DDP_PKG_ERR; + break; + } + } while (err == -SXE2_DDP_PKG_BUSY); + + if (sxe2_is_init_pkg_successful(err)) { + rc = err; + goto end; + } + } + + if (!sxe2_is_init_pkg_successful(err)) { + LOG_WARN_BDF("failed to download pkg (err: %d)\n", err); + goto notify; + } + + buf_table = sxe2_get_buf_table(seg); + for (i = 0; i < buf_table->buf_count; i++) { + err = sxe2_ddp_proc_deal(adapter, &buf_table->buf_array[i]); + if (err) + break; + } + +notify: + if (!sxe2_is_init_pkg_successful(err)) + rc = err; + + err = sxe2_ddp_deal_done(adapter, err); + if (err) + rc = err; +end: + return rc; +} + +s32 sxe2_copy_and_init_pkg(struct sxe2_adapter *adapter, const u8 *buf, u32 len) +{ + s32 err; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u8 *buf_copy; + struct sxe2_hw *hw = &adapter->hw; + + if (!buf || !len) + return -SXE2_DDP_PKG_ERR; + + buf_copy = devm_kmemdup(dev, buf, len, GFP_KERNEL); + if (!buf_copy) { + LOG_ERROR_BDF("failed to alloc ddp cfg memory\n"); + return -SXE2_DDP_PKG_ERR; + } + + err = sxe2_init_pkg(adapter, buf_copy, len); + if (!sxe2_is_init_pkg_successful(err)) { + devm_kfree(dev, buf_copy); + } else { + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return err; +} + +bool sxe2_is_init_pkg_successful(s32 state) +{ + switch (state) { + case SXE2_DDP_PKG_SUCCESS: + case -SXE2_DDP_PKG_ALREADY_LOADED: + case -SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case -SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +void sxe2_free_seg(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (hw->pkg_copy) { + devm_kfree(dev, hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } +} + +s32 sxe2_ddp_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret = SXE2_DDP_PKG_SUCCESS; + + if (!sxe2_is_safe_mode(adapter)) { + do { + ret = sxe2_ddp_acquire_state(adapter, NULL); + if (ret == -SXE2_DDP_PKG_ALREADY_LOADED) { + LOG_DEBUG_BDF("pkg has already download in " + "device.\n"); + ret = SXE2_DDP_PKG_SUCCESS; + break; + } else if (ret == -SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED || + ret == SXE2_DDP_PKG_SUCCESS) { + sxe2_load_pkg(NULL, adapter); + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("failed to reload DDP " + "Package.\n"); + set_bit(SXE2_FLAG_ADVANCE_MODE, + adapter->flags); + ret = -EIO; + } + break; + } else if (ret == -SXE2_DDP_PKG_BUSY) { + msleep(SXE2_DDP_DELAY_100MS); + continue; + } else { + LOG_DEV_ERR("pkg process err in device.(%d)\n", ret); + break; + } + } while (1); + } + + return ret; +} + +STATIC s32 sxe2_ddp_store_by_type(struct sxe2_adapter *adapter, u16 type, u8 *data, + u16 size, u16 base_id) +{ + s32 rc = 0; + u32 index; + u32 table_size = (sizeof(g_ddp_parse_table) / + sizeof(struct sxe2_ddp_data_parse)); + sxe2_ddp_data_parse_cb cb = NULL; + + for (index = 0; index < table_size; index++) { + if (g_ddp_parse_table[index].sec_type == type) { + cb = g_ddp_parse_table[index].cb; + break; + } + } + + if (cb) { + rc = cb(data, size, base_id, adapter); + if (rc) { + LOG_INFO_BDF("ddp data store failed! size[%d] base_id[%d] " + "type[%d]\n", + size, base_id, type); + } + } else { + LOG_DEBUG_BDF("ddp data[%d] do not need store!\n", type); + } + + return rc; +} + +s32 sxe2_ddp_params_store(struct sxe2_adapter *adapter) +{ + s32 rc = 0; + u32 buf_cnt; + u16 sec_cnt; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_pkg_hdr *hdr = (struct sxe2_pkg_hdr *)hw->pkg_copy; + struct sxe2_seg *seg = NULL; + struct sxe2_buf_table *buf_table; + struct sxe2_section_entry *sec_entry; + u16 base_id = 0; + u16 unit_cnt = 0; + u16 cur_type = SXE2_INVAL_U16; + + if (sxe2_is_safe_mode(adapter)) { + LOG_WARN_BDF("running in safe mode.\n"); + goto end; + } + + if (!hdr) { + LOG_WARN_BDF("buffer is nullptr!\n"); + return -EINVAL; + } + + seg = sxe2_get_pkg_segment(hdr, SXE2_SGM_BLK_DP); + if (!seg) { + LOG_WARN_BDF("can not find segment ptr.\n"); + rc = -SXE2_DDP_PKG_MANIFEST_INVALID; + goto end; + } + + buf_table = sxe2_get_buf_table(seg); + + for (buf_cnt = 0; buf_cnt < buf_table->buf_count; buf_cnt++) { + struct sxe2_buf_hdr *buffer_hdr = + (struct sxe2_buf_hdr *)buf_table->buf_array[buf_cnt] + .buf; + + for (sec_cnt = 0; sec_cnt < buffer_hdr->section_count; sec_cnt++) { + sec_entry = &buffer_hdr->section_entry[sec_cnt]; + if (sec_entry->unit_size == 0) { + LOG_WARN_BDF("ddp file[%d] is invalid.\n", sec_cnt); + rc = -SXE2_DDP_PKG_BUFFER_INVALID; + break; + } + + if (cur_type != sec_entry->type) { + base_id = 0; + cur_type = sec_entry->type; + } + + unit_cnt = (sec_entry->size / sec_entry->unit_size); + rc = sxe2_ddp_store_by_type( + adapter, sec_entry->type, + (u8 *)buffer_hdr + sec_entry->offset, + unit_cnt, base_id); + if (rc) { + LOG_WARN_BDF("ddp data type[%d] base[%d] count[%d] " + "store failed.\n", + cur_type, base_id, unit_cnt); + break; + } + base_id += unit_cnt; + } + } +end: + return rc; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.h new file mode 100644 index 0000000000000000000000000000000000000000..4ab34388cb2458d35e771f02e5df34fefeaa9ab6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ddp.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ddp.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DDP_H__ +#define __SXE2_DDP_H__ + +#include "sxe2.h" +#include "sxe2_ddp_common.h" +#include + +#define SXE2_DDP_PKG_PATH "sxe2/ddp/" +#define SXE2_DDP_PKG_FILE SXE2_DDP_PKG_PATH "sxe2.cfg" + +#define SXE2_DDP_DELAY_100MS (100) + +#define SXE2_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \ + ((SXE2_PKG_BUF_SIZE - \ + struct_size((struct sxe2_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \ + (ent_sz)) + +typedef s32 (*sxe2_ddp_data_parse_cb)(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +struct sxe2_ddp_data_parse { + enum sxe2_section_type sec_type; + sxe2_ddp_data_parse_cb cb; +}; + +s32 sxe2_get_pkg_info(struct sxe2_adapter *adapter, struct sxe2_pkg_hdr *hdr); + +struct sxe2_buf_table *sxe2_get_buf_table(struct sxe2_seg *sxe2_seg); + +struct sxe2_seg *sxe2_get_pkg_segment(struct sxe2_pkg_hdr *hdr, + enum sxe2_segment_type type); + +s32 sxe2_ddp_pre_deal(struct sxe2_adapter *adapter, struct sxe2_pkg_hdr *hdr); + +s32 sxe2_ddp_proc_deal(struct sxe2_adapter *adapter, struct sxe2_buf *buffer); + +s32 sxe2_ddp_deal_done(struct sxe2_adapter *adapter, s32 result); + +s32 sxe2_ddp_acquire_state(struct sxe2_adapter *adapter, + struct sxe2_pkg_hdr *hdr); + +s32 sxe2_init_pkg(struct sxe2_adapter *adapter, u8 *buff, u32 len); + +s32 sxe2_copy_and_init_pkg(struct sxe2_adapter *adapter, const u8 *buf, + u32 len); + +bool sxe2_is_init_pkg_successful(s32 state); + +void sxe2_free_seg(struct sxe2_adapter *adapter); + +void sxe2_load_pkg(const struct firmware *firmware, + struct sxe2_adapter *adapter); + +s32 sxe2_ddp_rebuild(struct sxe2_adapter *adapter); + +void sxe2_pci_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_hw_cfg_info_get(struct sxe2_adapter *adapter); + +s32 sxe2_ddp_params_store(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..a45aea36ff39d5b8b6f9b345b339bcdec97963a8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.c @@ -0,0 +1,1065 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_debugfs.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_debugfs.h" +#include "sxe2_switch.h" +#include "sxe2_txsched.h" +#include "sxe2_lldp.h" +#include "sxe2_rss.h" +#include "sxe2_fnav.h" +#include "sxe2_dfx.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_host_cli.h" +#include "sxe2_lag.h" +#include "sxe2_acl.h" +#include "sxe2_com_dma.h" +#include "sxe2_ethtool.h" +#include "sxe2_com_cdev.h" + +static struct dentry *sxe2_debugfs_root; + +#ifdef SXE2_CFG_DEBUG +extern int g_pf_switch_stats; +#endif + +static char *g_sxe2_com_mode_to_str[] = { + [SXE2_COM_MODULE_KERNEL] = SXE2_COM_KERNEL_MODE_NAME, + [SXE2_COM_MODULE_DPDK] = SXE2_COM_DPDK_MODE_NAME, + [SXE2_COM_MODULE_RDMA] = SXE2_COM_RDMA_MODE_NAME, + [SXE2_COM_MODULE_MIXED] = SXE2_COM_MIXED_MODE_NAME, + [SXE2_COM_MODULE_UNDEFINED] = SXE2_COM_UNDEFINED_MODE_NAME, +}; + +STATIC void sxe2_info_dump(struct sxe2_adapter *adapter) +{ + LOG_DEV_INFO("\t adapter=%pK\n", adapter); + LOG_DEV_INFO("\t adapter.dev_name=%s\n", adapter->dev_name); + LOG_DEV_INFO("\t adapter.pf_idx=%d\n", adapter->pf_idx); + LOG_DEV_INFO("\t adapter.port_idx=%d\n", adapter->port_idx); + LOG_DEV_INFO("\t adapter mode:%d\n", sxe2_com_mode_get(adapter)); + LOG_DEV_INFO("\t adapter.irq_ctxt.max_cnt=%d\n", adapter->irq_ctxt.max_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.avail_cnt=%d\n", + adapter->irq_ctxt.avail_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.base_idx_in_dev=%d\n", + adapter->irq_ctxt.base_idx_in_dev); + LOG_DEV_INFO("\t adapter.irq_ctxt.rdma_base_idx=%d\n", + adapter->irq_ctxt.rdma_base_idx); + LOG_DEV_INFO("\t adapter.irq_ctxt.event_irq_cnt=%d\n", + adapter->irq_ctxt.event_irq_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.event=%d\n", + adapter->irq_ctxt.irq_layout.event); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.event_offset=%d\n", + adapter->irq_ctxt.irq_layout.event_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.lb=%d\n", + adapter->irq_ctxt.irq_layout.lb); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.lb_offset=%d\n", + adapter->irq_ctxt.irq_layout.lb_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.fnav=%d\n", + adapter->irq_ctxt.irq_layout.fnav); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.fnav_offset=%d\n", + adapter->irq_ctxt.irq_layout.fnav_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.eswitch=%d\n", + adapter->irq_ctxt.irq_layout.eswitch); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.eswitch_offset=%d\n", + adapter->irq_ctxt.irq_layout.eswitch_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.lan=%d\n", + adapter->irq_ctxt.irq_layout.lan); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.lan_offset=%d\n", + adapter->irq_ctxt.irq_layout.lan_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.rdma_offset=%d\n", + adapter->irq_ctxt.irq_layout.rdma_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.rdma=%d\n", + adapter->irq_ctxt.irq_layout.rdma); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.dpdk_offset=%d\n", + adapter->irq_ctxt.irq_layout.dpdk_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.dpdk=%d\n", + adapter->irq_ctxt.irq_layout.dpdk); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.dpdk_eswitch=%d\n", + adapter->irq_ctxt.irq_layout.dpdk_eswitch); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.dpdk_eswitch_offset=%d\n", + adapter->irq_ctxt.irq_layout.dpdk_eswitch_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.macvlan_offset=%d\n", + adapter->irq_ctxt.irq_layout.macvlan_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.macvlan=%d\n", + adapter->irq_ctxt.irq_layout.macvlan); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.sriov=%d\n", + adapter->irq_ctxt.irq_layout.sriov); + LOG_DEV_INFO("\t adapter.irq_ctxt.irq_layout.sriov_offset=%d\n", + adapter->irq_ctxt.irq_layout.sriov_offset); + + LOG_DEV_INFO("\t adapter.q_ctxt.max_txq_cnt=%d\n", + adapter->q_ctxt.max_txq_cnt); + LOG_DEV_INFO("\t adapter.q_ctxt.max_rxq_cnt=%d\n", + adapter->q_ctxt.max_rxq_cnt); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_base_idx_in_dev=%d\n", + adapter->q_ctxt.txq_base_idx_in_dev); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_base_idx_in_dev=%d\n", + adapter->q_ctxt.rxq_base_idx_in_dev); + + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.lb=%d\n", + adapter->q_ctxt.txq_layout.lb); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.lb_offset=%d\n", + adapter->q_ctxt.txq_layout.lb_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.ctrl_offset=%d\n", + adapter->q_ctxt.txq_layout.ctrl_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.ctrl=%d\n", + adapter->q_ctxt.txq_layout.ctrl); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.lan=%d\n", + adapter->q_ctxt.txq_layout.lan); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.lan_offset=%d\n", + adapter->q_ctxt.txq_layout.lan_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.dpdk=%d\n", + adapter->q_ctxt.txq_layout.dpdk); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.dpdk_offset=%d\n", + adapter->q_ctxt.txq_layout.dpdk_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.xdp=%d\n", + adapter->q_ctxt.txq_layout.xdp); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.xdp_offset=%d\n", + adapter->q_ctxt.txq_layout.xdp_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.macvlan=%d\n", + adapter->q_ctxt.txq_layout.macvlan); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.macvlan_offset=%d\n", + adapter->q_ctxt.txq_layout.macvlan_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.sriov=%d\n", + adapter->q_ctxt.txq_layout.sriov); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.sriov_offset=%d\n", + adapter->q_ctxt.txq_layout.sriov_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.esw=%d\n", + adapter->q_ctxt.txq_layout.esw); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.esw_offset=%d\n", + adapter->q_ctxt.txq_layout.esw_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.dpdk_esw=%d\n", + adapter->q_ctxt.txq_layout.dpdk_esw); + LOG_DEV_INFO("\t adapter.q_ctxt.txq_layout.dpdk_esw_offset=%d\n", + adapter->q_ctxt.txq_layout.dpdk_esw_offset); + + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.lb=%d\n", + adapter->q_ctxt.rxq_layout.lb); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.lb_offset=%d\n", + adapter->q_ctxt.rxq_layout.lb_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.ctrl_offset=%d\n", + adapter->q_ctxt.rxq_layout.ctrl_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.ctrl=%d\n", + adapter->q_ctxt.rxq_layout.ctrl); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.lan=%d\n", + adapter->q_ctxt.rxq_layout.lan); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.lan_offset=%d\n", + adapter->q_ctxt.rxq_layout.lan_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.dpdk=%d\n", + adapter->q_ctxt.rxq_layout.dpdk); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.dpdk_offset=%d\n", + adapter->q_ctxt.rxq_layout.dpdk_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.xdp=%d\n", + adapter->q_ctxt.rxq_layout.xdp); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.xdp_offset=%d\n", + adapter->q_ctxt.rxq_layout.xdp_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.macvlan=%d\n", + adapter->q_ctxt.rxq_layout.macvlan); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.macvlan_offset=%d\n", + adapter->q_ctxt.rxq_layout.macvlan_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.sriov=%d\n", + adapter->q_ctxt.rxq_layout.sriov); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.sriov_offset=%d\n", + adapter->q_ctxt.rxq_layout.sriov_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.esw=%d\n", + adapter->q_ctxt.rxq_layout.esw); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.esw_offset=%d\n", + adapter->q_ctxt.rxq_layout.esw_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.dpdk_esw=%d\n", + adapter->q_ctxt.rxq_layout.dpdk_esw); + LOG_DEV_INFO("\t adapter.q_ctxt.rxq_layout.dpdk_esw_offset=%d\n", + adapter->q_ctxt.rxq_layout.dpdk_esw_offset); + + LOG_DEV_INFO("\t adapter.vsi_ctxt.cnt=%d\n", adapter->vsi_ctxt.cnt); + LOG_DEV_INFO("\t adapter.vsi_ctxt.max_cnt=%d\n", adapter->vsi_ctxt.max_cnt); + LOG_DEV_INFO("\t adapter.vsi_ctxt.base_idx_in_dev=%d\n", + adapter->vsi_ctxt.base_idx_in_dev); + LOG_DEV_INFO("\t adapter.dev_ctrl_ctxt.dev_state=%d\n", + adapter->dev_ctrl_ctxt.dev_state); + LOG_DEV_INFO("\t adapter.dev_ctrl_ctxt.work_state=%lu\n", + adapter->dev_ctrl_ctxt.work_state); + LOG_DEV_INFO("\t adapter.macvlan_ctxt.num_macvlan=%d\n", + adapter->macvlan_ctxt.num_macvlan); + LOG_DEV_INFO("\t adapter.macvlan_ctxt.max_num_macvlan=%d\n", + adapter->macvlan_ctxt.max_num_macvlan); +} + +STATIC void sxe2_vsi_dump(struct sxe2_vsi *vsi) +{ + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct netdev_queue *ntxq = NULL; + + LOG_DEV_INFO("\t ----vsi[%d]----\n", vsi->idx_in_dev); + LOG_DEV_INFO("\t vsi->is_from_pool=%d\n", vsi->is_from_pool); + LOG_DEV_INFO("\t vsi->type=%d\n", vsi->type); + LOG_DEV_INFO("\t vsi->id_in_pf=%d\n", vsi->id_in_pf); + LOG_DEV_INFO("\t vsi->idx_in_dev=%d\n", vsi->idx_in_dev); + LOG_DEV_INFO("\t vsi base id in dev=%d\n", + adapter->vsi_ctxt.base_idx_in_dev); + LOG_DEV_INFO("\t vsi cnt=%d\n", adapter->vsi_ctxt.cnt); + + LOG_DEV_INFO("\t vsi->tc.tc_cnt=%d\n", vsi->tc.tc_cnt); + LOG_DEV_INFO("\t vsi->tc.tc_map=%d\n", vsi->tc.tc_map); + + LOG_DEV_INFO("\t vsi->irqs.cnt=%d\n", vsi->irqs.cnt); + LOG_DEV_INFO("\t vsi->irqs.base_idx_in_pf=%d\n", vsi->irqs.base_idx_in_pf); + sxe2_for_each_vsi_irq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi irq_data[%d]----\n", + vsi->irqs.irq_data[i]->idx_in_vsi); + LOG_DEV_INFO("\t\t irq_data[%d]->name=%s\n", i, + vsi->irqs.irq_data[i]->name); + LOG_DEV_INFO("\t\t irq_data[%d]->idx_in_pf=%d\n", i, + vsi->irqs.irq_data[i]->idx_in_pf); + LOG_DEV_INFO("\t\t irq_data[%d]->rate_limit=%d\n", i, + vsi->irqs.irq_data[i]->rate_limit); + LOG_DEV_INFO("\t\t irq_data[%d]->multiple_polling=%d\n", i, + vsi->irqs.irq_data[i]->multiple_polling); + LOG_DEV_INFO("\t\t irq_data[%d]->event_ctr=%d\n", i, + vsi->irqs.irq_data[i]->event_ctr); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_mode=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_mode); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_idx=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_idx); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_setting=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_setting); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_mode=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_mode); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_idx=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_idx); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_setting=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_setting); + } + + LOG_DEV_INFO("\t vsi->txqs.q_cnt=%d\n", vsi->txqs.q_cnt); + LOG_DEV_INFO("\t vsi->txqs.depth=%d\n", vsi->txqs.depth); + LOG_DEV_INFO("\t vsi->txqs.rx_buf_len=%d\n", vsi->txqs.rx_buf_len); + LOG_DEV_INFO("\t vsi->txqs.max_frame=%d\n", vsi->txqs.max_frame); + sxe2_for_each_vsi_txq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi txq[%d]----\n", vsi->txqs.q[i]->idx_in_vsi); + LOG_DEV_INFO("\t\t txq[%d]->idx_in_pf=%d\n", i, + vsi->txqs.q[i]->idx_in_pf); + LOG_DEV_INFO("\t\t txq[%d]->depth=%d\n", i, vsi->txqs.q[i]->depth); + LOG_DEV_INFO("\t\t txq[%d]->next_to_use=%d\n", i, + vsi->txqs.q[i]->next_to_use); + LOG_DEV_INFO("\t\t txq[%d]->next_to_clean=%d\n", i, + vsi->txqs.q[i]->next_to_clean); + if (vsi->txqs.q[i]->netdev) { + ntxq = netdev_get_tx_queue(vsi->txqs.q[i]->netdev, + vsi->txqs.q[i]->idx_in_vsi); + if (ntxq) { + LOG_DEV_INFO("\t\t txq[%d] netdev st=%lu\t" + "(BIT: 0 - DRV_XOFF; 1 - STACK_XOFF;\t" + "2- FROZEN)\n", + i, ntxq->state); +#ifdef CONFIG_BQL + LOG_DEV_INFO("\t\t txq[%d] dql adj_limit %u queued\t" + "%u\t" + "completed %u max_limit %u min_limit\t" + "%u.\n", + i, ntxq->dql.adj_limit, + ntxq->dql.num_queued, + ntxq->dql.num_completed, + ntxq->dql.max_limit, + ntxq->dql.min_limit); +#endif + } + } + } + + LOG_DEV_INFO("\t vsi->rxqs.q_cnt=%d\n", vsi->rxqs.q_cnt); + LOG_DEV_INFO("\t vsi->rxqs.depth=%d\n", vsi->rxqs.depth); + LOG_DEV_INFO("\t vsi->rxqs.rx_buf_len=%d\n", vsi->rxqs.rx_buf_len); + LOG_DEV_INFO("\t vsi->rxqs.max_frame=%d\n", vsi->rxqs.max_frame); + sxe2_for_each_vsi_rxq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi rxq[%d]----\n", vsi->rxqs.q[i]->idx_in_vsi); + LOG_DEV_INFO("\t\t rxq[%d]->idx_in_pf=%d\n", i, + vsi->rxqs.q[i]->idx_in_pf); + LOG_DEV_INFO("\t\t rxq[%d]->depth=%d\n", i, vsi->rxqs.q[i]->depth); + LOG_DEV_INFO("\t\t rxq[%d]->next_to_use=%d\n", i, + vsi->rxqs.q[i]->next_to_use); + LOG_DEV_INFO("\t\t rxq[%d]->next_to_clean=%d\n", i, + vsi->rxqs.q[i]->next_to_clean); + } +} + +STATIC void sxe2_vsis_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + + sxe2_vsi_dump(vsi); + } + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +STATIC void sxe2_debugfs_cdev_show(struct sxe2_adapter *adapter) +{ + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + + LOG_DEV_INFO("bitmap:%ld\n", cli_dev_mgr->map[0]); + + LOG_DEV_INFO("cdev mgr[0] id %d\n", cli_dev_mgr->cdev_mgr[0].id); + LOG_DEV_INFO("cdev mgr[0] ref_count %d\n", + atomic_read(&cli_dev_mgr->cdev_mgr[0].ref_count)); + LOG_DEV_INFO("cdev mgr[0] adapter %p\n", + (void *)cli_dev_mgr->cdev_mgr[0].adapter); + LOG_DEV_INFO("cdev mgr[0] status %d\n", cli_dev_mgr->cdev_mgr[0].status); + LOG_DEV_INFO("cdev mgr[1] id %d\n", cli_dev_mgr->cdev_mgr[1].id); + LOG_DEV_INFO("cdev mgr[1] ref_count %d\n", + atomic_read(&cli_dev_mgr->cdev_mgr[1].ref_count)); + LOG_DEV_INFO("cdev mgr[1] adapter %p\n", + (void *)cli_dev_mgr->cdev_mgr[1].adapter); + LOG_DEV_INFO("cdev mgr[1] status %d\n", cli_dev_mgr->cdev_mgr[1].status); + LOG_DEV_INFO("cdev mgr[2] id %d\n", cli_dev_mgr->cdev_mgr[2].id); + LOG_DEV_INFO("cdev mgr[2] ref_count %d\n", + atomic_read(&cli_dev_mgr->cdev_mgr[2].ref_count)); + LOG_DEV_INFO("cdev mgr[2] adapter %p\n", + (void *)cli_dev_mgr->cdev_mgr[2].adapter); + LOG_DEV_INFO("cdev mgr[2] status %d\n", cli_dev_mgr->cdev_mgr[2].status); + LOG_DEV_INFO("cdev mgr[3] id %d\n", cli_dev_mgr->cdev_mgr[3].id); + LOG_DEV_INFO("cdev mgr[3] ref_count %d\n", + atomic_read(&cli_dev_mgr->cdev_mgr[3].ref_count)); + LOG_DEV_INFO("cdev mgr[3] adapter %p\n", + (void *)cli_dev_mgr->cdev_mgr[3].adapter); + LOG_DEV_INFO("cdev mgr[3] status %d\n", cli_dev_mgr->cdev_mgr[3].status); +} + +STATIC void sxe2_pf_state_get(struct sxe2_adapter *adapter) +{ + enum sxe2_dev_state dev_state; + enum sxe2_reset_type reset_type; + + sxe2_dev_state_get(adapter, &dev_state, &reset_type); + if (dev_state == SXE2_DEVSTATE_RUNNING) + LOG_DEV_INFO("pf ready\n"); + else + LOG_DEV_INFO("pf unready\n"); +} + +STATIC void sxe2_pf_lag_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + if (!lag) { + LOG_DEV_INFO("No lag\n"); + return; + } + + mutex_lock(&lag->lock); + LOG_DEV_INFO("lag bonded:%d mode:%d pf 0 \t" + "state: %d pf 1 state: %d ref_num: %d\n", + lag->bonded, lag->bond_mode, lag->state[SXE2_LAG_PF0], + lag->state[SXE2_LAG_PF0], lag->ref_num); + LOG_DEV_INFO("lag serinum:%s\n", lag->serial_num); + LOG_DEV_INFO("lag adapters[0] 0x%p pfid %d\n", lag->adapters[0], + lag->adapters[0] ? lag->adapters[0]->pf_idx : -1); + LOG_DEV_INFO("lag adapters[1] 0x%p pfid %d\n", lag->adapters[1], + lag->adapters[1] ? lag->adapters[1]->pf_idx : -1); + LOG_DEV_INFO("lag wk state %d mode %d event %d is_bonded %d\n", + lag->lag_wk.state, lag->lag_wk.bond_mode, lag->lag_wk.event, + lag->lag_wk.is_bonded); + LOG_DEV_INFO("lag wk slave[0] st %d link %d\n", + lag->lag_wk.info[0].slave_state, + lag->lag_wk.info[0].slave_link); + LOG_DEV_INFO("lag wk slave[1] st %d link %d\n", + lag->lag_wk.info[1].slave_state, + lag->lag_wk.info[1].slave_link); + mutex_unlock(&lag->lock); +} + +STATIC void sxe2_vf_nodes_show(struct sxe2_adapter *adapter) +{ + u16 vf_idx; + struct sxe2_vf_node_e *vf_node_e; + + LOG_DEV_INFO("\t vf node show start.\n"); + + LOG_DEV_INFO("\t adapter.vf_ctxt.irq_cnt=%d\n", adapter->vf_ctxt.irq_cnt); + LOG_DEV_INFO("\t adapter.vf_ctxt.max_vfs=%d\n", adapter->vf_ctxt.max_vfs); + LOG_DEV_INFO("\t adapter.vf_ctxt.num_vfs=%d\n", adapter->vf_ctxt.num_vfs); + LOG_DEV_INFO("\t adapter.vf_ctxt.q_cnt=%d\n", adapter->vf_ctxt.q_cnt); + LOG_DEV_INFO("\t adapter.vf_ctxt.vfid_base=%d\n", + adapter->vf_ctxt.vfid_base); + + mutex_lock(&adapter->vf_ctxt.vfs_lock); + + for (vf_idx = 0; vf_idx < adapter->vf_ctxt.num_vfs; vf_idx++) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node_e = SXE2_VF_NODE_E(adapter, vf_idx); + + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d]\n", vf_idx); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].irq_base_idx=%d\n", + vf_idx, vf_node_e->vf_node->irq_base_idx); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].vf_idx=%d\n", vf_idx, + vf_node_e->vf_node->vf_idx); + + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].mac_from_pf=%d\n", + vf_idx, vf_node_e->vf_node->prop.mac_from_pf); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].trusted=%d\n", vf_idx, + vf_node_e->vf_node->prop.trusted); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].spoofchk=%d\n", + vf_idx, vf_node_e->vf_node->prop.spoofchk); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].link_forced=%d\n", + vf_idx, vf_node_e->vf_node->prop.link_forced); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].link_up=%d\n", vf_idx, + vf_node_e->vf_node->prop.link_up); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].min_tx_rate=%d\n", + vf_idx, vf_node_e->vf_node->prop.min_tx_rate); + LOG_DEV_INFO("\t adapter.vf_ctxt.vf_node_e[%d].max_tx_rate=%d\n", + vf_idx, vf_node_e->vf_node->prop.max_tx_rate); + + if (vf_node_e->vf_node->vsi) + sxe2_vsi_dump(vf_node_e->vf_node->vsi); + else if (vf_node_e->vf_node->dpdk_vf_vsi) + sxe2_vsi_dump(vf_node_e->vf_node->dpdk_vf_vsi); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + mutex_unlock(&adapter->vf_ctxt.vfs_lock); + + LOG_DEV_INFO("\t vf node show end.\n"); +} + +#ifdef SXE2_CFG_DEBUG +STATIC void sxe2_switch_dfx_irq_enable(struct sxe2_adapter *adapter) +{ + (void)sxe2_switch_dfx_irq_setup(adapter, true); +} + +STATIC void sxe2_switch_dfx_irq_disable(struct sxe2_adapter *adapter) +{ + (void)sxe2_switch_dfx_irq_setup(adapter, false); +} + +STATIC void sxe2_fnav_hw_clear(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_fwc_fnav_hw_clear(adapter); + if (ret) + return; + + sxe2_fnav_flow_ctxt_clean(adapter); +} + +STATIC void sxe2_fnav_hw_replay(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_fnav_rule_reply(adapter); + if (ret) + LOG_DEV_ERR("fnav rule failed, ret:%d\n", ret); +} + +STATIC void sxe2_heart_close(struct sxe2_adapter *adapter) +{ + adapter->dev_ctrl_ctxt.heart_beat_ena = false; + LOG_DEV_INFO("heart beat disable\n"); +} + +STATIC void sxe2_heart_open(struct sxe2_adapter *adapter) +{ + adapter->dev_ctrl_ctxt.heart_beat_ena = true; + LOG_DEV_INFO("heart beat enable\n"); +} + +STATIC void sxe2_corer_trigger(struct sxe2_adapter *adapter) +{ + enum sxe2_dev_state curr_dev_state; + enum sxe2_reset_type curr_reset_type; + + LOG_DEV_INFO("\t CORER trigger\n"); + sxe2_dev_state_get(adapter, &curr_dev_state, &curr_reset_type); + if (curr_dev_state == SXE2_DEVSTATE_FAULT) { + LOG_DEV_DEBUG("sxe2 nic fault.\n"); + return; + } else if (curr_dev_state != SXE2_DEVSTATE_RUNNING) { + LOG_DEV_DEBUG("reset already in progress.\n"); + return; + } + (void)sxe2_reset_async(adapter, SXE2_RESET_CORER); +} + +STATIC void sxe2_pfr_trigger(struct sxe2_adapter *adapter) +{ + enum sxe2_dev_state curr_dev_state; + enum sxe2_reset_type curr_reset_type; + + LOG_DEV_INFO("\t PFR trigger\n"); + + sxe2_dev_state_get(adapter, &curr_dev_state, &curr_reset_type); + if (curr_dev_state == SXE2_DEVSTATE_FAULT) { + LOG_DEV_DEBUG("sxe2 nic fault.\n"); + return; + } else if (curr_dev_state != SXE2_DEVSTATE_RUNNING) { + LOG_DEV_DEBUG("reset already in progress.\n"); + return; + } + (void)sxe2_reset_async(adapter, SXE2_RESET_PFR); +} + +void sxe2_etype_rx_rule_add(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + LOG_DEV_INFO("\t rx etype rule add\n"); + vsi = adapter->vsi_ctxt.main_vsi; + (void)sxe2_rx_etype_rule_add(vsi, ETH_P_LLDP); +} + +void sxe2_etype_rx_rule_del(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + LOG_DEV_INFO("\t rx etype rule del\n"); + vsi = adapter->vsi_ctxt.main_vsi; + (void)sxe2_rx_etype_rule_del(adapter, vsi->idx_in_dev, ETH_P_LLDP); +} + +STATIC void sxe2_dcb_info_show(struct sxe2_adapter *adapter) +{ + struct sxe2_dcbx_cfg *new_cfg = &adapter->dcb_ctxt.desired_dcbx_cfg; + struct sxe2_dcbx_cfg *local_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + u32 i; + + LOG_INFO_BDF("local pfc cfg, cap=%u, enable=%u,\n" + "mbc=%u, willing=%u\n", + local_cfg->pfc.cap, local_cfg->pfc.enable, local_cfg->pfc.mbc, + local_cfg->pfc.willing); + + LOG_INFO_BDF("local ets cfg, willing=%d, cbs=%d, maxtcs=%d\n" + "prio[0]=%d, tcbw[0]=%d, tsa_tbl[0]=%d\n" + "prio[1]=%d, tcbw[1]=%d, tsa_tbl[1]=%d\n" + "prio[2]=%d, tcbw[2]=%d, tsa_tbl[2]=%d\n" + "prio[3]=%d, tcbw[3]=%d, tsa_tbl[3]=%d\n" + "prio[4]=%d, tcbw[4]=%d, tsa_tbl[4]=%d\n" + "prio[5]=%d, tcbw[5]=%d, tsa_tbl[5]=%d\n" + "prio[6]=%d, tcbw[6]=%d, tsa_tbl[6]=%d\n" + "prio[7]=%d, tcbw[7]=%d, tsa_tbl[7]=%d\n", + local_cfg->ets.willing, local_cfg->ets.cbs, + local_cfg->ets.maxtcs, local_cfg->ets.prio_tbl[0], + local_cfg->ets.tcbw_tbl[0], local_cfg->ets.tsa_tbl[0], + local_cfg->ets.prio_tbl[1], local_cfg->ets.tcbw_tbl[1], + local_cfg->ets.tsa_tbl[1], local_cfg->ets.prio_tbl[2], + local_cfg->ets.tcbw_tbl[2], local_cfg->ets.tsa_tbl[2], + local_cfg->ets.prio_tbl[3], local_cfg->ets.tcbw_tbl[3], + local_cfg->ets.tsa_tbl[3], local_cfg->ets.prio_tbl[4], + local_cfg->ets.tcbw_tbl[4], local_cfg->ets.tsa_tbl[4], + local_cfg->ets.prio_tbl[5], local_cfg->ets.tcbw_tbl[5], + local_cfg->ets.tsa_tbl[5], local_cfg->ets.prio_tbl[6], + local_cfg->ets.tcbw_tbl[6], local_cfg->ets.tsa_tbl[6], + local_cfg->ets.prio_tbl[7], local_cfg->ets.tcbw_tbl[7], + local_cfg->ets.tsa_tbl[7]); + + for (i = 0; i < SXE2_DSCP_MAX_NUM; i++) { + LOG_INFO_BDF("local app cfg, qos_mode %d dscp=%d, en=%d\n", i, + local_cfg->qos_mode, + test_bit((int)i, local_cfg->dscp_mapped)); + } + for (i = 0; i < local_cfg->numapps; i++) + LOG_INFO_BDF("local app cfg, dscp=%d, up=%d\n", + local_cfg->app[i].prot_id, local_cfg->app[i].prio); + + LOG_INFO_BDF("desired pfc cfg, cap=%u, enable=%u,\n" + "mbc=%u, willing=%u\n", + new_cfg->pfc.cap, new_cfg->pfc.enable, new_cfg->pfc.mbc, + new_cfg->pfc.willing); + + LOG_INFO_BDF("desired ets cfg, willing=%d, cbs=%d, maxtcs=%d\n" + "prio[0]=%d, tcbw[0]=%d, tsa_tbl[0]=%d\n" + "prio[1]=%d, tcbw[1]=%d, tsa_tbl[1]=%d\n" + "prio[2]=%d, tcbw[2]=%d, tsa_tbl[2]=%d\n" + "prio[3]=%d, tcbw[3]=%d, tsa_tbl[3]=%d\n" + "prio[4]=%d, tcbw[4]=%d, tsa_tbl[4]=%d\n" + "prio[5]=%d, tcbw[5]=%d, tsa_tbl[5]=%d\n" + "prio[6]=%d, tcbw[6]=%d, tsa_tbl[6]=%d\n" + "prio[7]=%d, tcbw[7]=%d, tsa_tbl[7]=%d\n", + new_cfg->ets.willing, new_cfg->ets.cbs, new_cfg->ets.maxtcs, + new_cfg->ets.prio_tbl[0], new_cfg->ets.tcbw_tbl[0], + new_cfg->ets.tsa_tbl[0], new_cfg->ets.prio_tbl[1], + new_cfg->ets.tcbw_tbl[1], new_cfg->ets.tsa_tbl[1], + new_cfg->ets.prio_tbl[2], new_cfg->ets.tcbw_tbl[2], + new_cfg->ets.tsa_tbl[2], new_cfg->ets.prio_tbl[3], + new_cfg->ets.tcbw_tbl[3], new_cfg->ets.tsa_tbl[3], + new_cfg->ets.prio_tbl[4], new_cfg->ets.tcbw_tbl[4], + new_cfg->ets.tsa_tbl[4], new_cfg->ets.prio_tbl[5], + new_cfg->ets.tcbw_tbl[5], new_cfg->ets.tsa_tbl[5], + new_cfg->ets.prio_tbl[6], new_cfg->ets.tcbw_tbl[6], + new_cfg->ets.tsa_tbl[6], new_cfg->ets.prio_tbl[7], + new_cfg->ets.tcbw_tbl[7], new_cfg->ets.tsa_tbl[7]); + + for (i = 0; i < SXE2_DSCP_MAX_NUM; i++) { + LOG_INFO_BDF("desired app cfg, qos_mode %d dscp=%d, en=%d\n", i, + new_cfg->qos_mode, + test_bit((int)i, new_cfg->dscp_mapped)); + } + for (i = 0; i < new_cfg->numapps; i++) + LOG_INFO_BDF("desired app cfg, dscp=%d, up=%d\n", + new_cfg->app[i].prot_id, new_cfg->app[i].prio); +} + +STATIC void sxe2_datapath_log_close(struct sxe2_adapter *adapter) +{ + clear_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags); + LOG_DEV_INFO("datapath log disable\n"); +} + +STATIC void sxe2_datapath_log_open(struct sxe2_adapter *adapter) +{ + set_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags); + LOG_DEV_INFO("datapath log enable\n"); +} +#endif + +STATIC void sxe2_spoof_stats_debugfs_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_pf_hw_stats *stats = &adapter->pf_stats.pf_hw_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_stats_update(adapter); + mutex_unlock(&adapter->vsi_ctxt.lock); + + sxe2_repr_vf_vsis_stats_acculate_update(adapter); + + LOG_DEV_INFO("%llu mac spoof packets detected.\n", stats->spoof_mac_packets); + LOG_DEV_INFO("%llu vlan spoof packets detected.\n", + stats->spoof_vlan_packets); +} + +STATIC void sxe2_nic_type_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + + if (hw->is_pop_type) + LOG_DEV_INFO("nic is pop\n"); + else + LOG_DEV_INFO("nic is sig\n"); +} + +STATIC void sxe2_fw_optical_waring_info_show(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + struct optical_warning_info opt_waring_info = {}; + + memset(&opt_waring_info, 0, sizeof(struct optical_warning_info)); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_GET_VENDOR_INFO_CHECK_WARNING, NULL, + 0, &opt_waring_info, + sizeof(struct optical_warning_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("get fw optical vendor info cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("optical vendor pn: %s\n", opt_waring_info.vendor_pn); + LOG_DEV_INFO("optical vendor name: %s\n", opt_waring_info.vendor); + if (opt_waring_info.is_warning) { + LOG_DEV_WARN("an unsupport optical module type was detected\n"); + LOG_DEV_WARN("refer to the sxe2 ethernet adapters and devices usr \t" + "guide for a list of supported modules\n"); + } else { + LOG_DEV_WARN("a supported optical module type was detected\n"); + } +} + +STATIC void sxe2_acl_trace_trigger(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_fwc_acl_trace_trigger(adapter); + if (ret) + LOG_DEV_ERR("sxe2_fwc_acl_trace_trigger failed, ret=%d\n", ret); +} + +STATIC void sxe2_acl_trace_recorder(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_fwc_acl_trace_recorder(adapter); + if (ret) + LOG_DEV_ERR("sxe2_fwc_acl_trace_recorder failed, ret=%d\n", ret); +} + +STATIC void sxe2_acl_dfx_dump(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_fwc_acl_dfx_get(adapter); + if (ret) + LOG_DEV_ERR("sxe2_fwc_acl_dfx_get failed, ret=%d\n", ret); +} + +STATIC void sxe2_com_info(struct sxe2_adapter *adapter) +{ + sxe2_com_info_print(&adapter->com_ctxt); +} + +#ifdef SXE2_CFG_DEBUG +STATIC void sxe2_monitor_stats_close(struct sxe2_adapter *adapter) +{ + g_pf_switch_stats = 0; +} + +STATIC void sxe2_monitor_stats_open(struct sxe2_adapter *adapter) +{ + g_pf_switch_stats = 1; +} +#endif + +static struct sxe2_debugfs_command command[] = { + {"info dump", sxe2_info_dump}, + {"vsi dump", sxe2_vsis_dump}, + {"switch trigger rx", sxe2_fwc_switch_trace_rx_trigger}, + {"switch trigger tx", sxe2_fwc_switch_trace_tx_trigger}, + {"switch recorder", sxe2_fwc_switch_trace_recorder}, + {"switch rule dump", sxe2_switch_rule_hw_dump}, + {"switch recipe get", sxe2_switch_recipe_dump}, + {"switch profile recipe map", sxe2_switch_profile_recipemap_dump}, + {"switch share id", sxe2_switch_share_id_dump}, + {"txsched tree dump", sxe2_txsched_tree_dump}, + {"rss trigger", sxe2_fwc_rss_trace_trigger}, + {"rss recorder", sxe2_fwc_rss_trace_recorder}, + {"rss xlt2 dump", sxe2_rss_xlt2_dump}, + {"rss vsig dump", sxe2_rss_vsig_dump}, + {"rss prof dump", sxe2_rss_prof_dump}, + {"rss mask dump", sxe2_rss_mask_dump}, + {"fnav hw sts", sxe2_fwc_fnav_hw_sts}, + {"fnav xlt2 dump", sxe2_fnav_xlt2_dump}, + {"fnav vsig dump", sxe2_fnav_vsig_dump}, + {"fnav prof dump", sxe2_fnav_prof_dump}, + {"fnav mask dump", sxe2_fnav_mask_dump}, + {"fnav stats dump", sxe2_fnav_stats_dump}, +#ifdef CONFIG_RFS_ACCEL + {"arfs stats dump", sxe2_arfs_stats_dump}, +#endif + {"rxft ppe info", sxe2_fwc_rxft_ppe_info}, + {"ppe dfx", sxe2_fwc_ppe_dfx_show}, + {"lldp fw stats", sxe2_lldp_fw_stats}, + {"lldp remote mibs dump", sxe2_lldp_remote_mibs_dump}, + {"cdev show", sxe2_debugfs_cdev_show}, + {"pf state", sxe2_pf_state_get}, + {"lag dump", sxe2_pf_lag_dump}, + {"vf node show", sxe2_vf_nodes_show}, + {"nic type", sxe2_nic_type_dump}, + + {"optical_info", sxe2_fw_optical_waring_info_show}, + {"com info", sxe2_com_info}, + + {"acl trigger", sxe2_acl_trace_trigger}, + {"acl recorder", sxe2_acl_trace_recorder}, + {"acl dfx", sxe2_acl_dfx_dump}, + +#ifdef SXE2_CFG_DEBUG + {"switch dfx irq enable", sxe2_switch_dfx_irq_enable}, + {"switch dfx irq disable", sxe2_switch_dfx_irq_disable}, + {"fnav trigger", sxe2_fwc_fnav_trace_trigger}, + {"fnav recorder", sxe2_fwc_fnav_trace_recorder}, + {"fnav hw clear", sxe2_fnav_hw_clear}, + {"fnav hw replay", sxe2_fnav_hw_replay}, + {"heart close", sxe2_heart_close}, + {"heart open", sxe2_heart_open}, + {"corer trigger", sxe2_corer_trigger}, + {"pfr trigger", sxe2_pfr_trigger}, + {"rx etype rule add", sxe2_etype_rx_rule_add}, + {"rx etype rule del", sxe2_etype_rx_rule_del}, + {"dcbx agent on", sxe2_lldp_dcbx_agent_on}, + {"dcbx agent off", sxe2_lldp_dcbx_agent_off}, + {"dcbx agent status", sxe2_lldp_dcbx_agent_is_on}, + {"dcb show", sxe2_dcb_info_show}, + {"datapath log close", sxe2_datapath_log_close}, + {"datapath log open", sxe2_datapath_log_open}, + {"stats close", sxe2_monitor_stats_close}, + {"stats open", sxe2_monitor_stats_open}, +#endif + {"spoof stats", sxe2_spoof_stats_debugfs_dump}, + {"", NULL}, +}; + +static s32 sxe2_debugfs_command_match(struct sxe2_adapter *adapter, s8 *cmd_buf, + size_t size) +{ + u32 i; + + for (i = 0; strlen(command[i].string) != 0; i++) { + if (!strcmp(cmd_buf, command[i].string)) { + command[i].debugfs_cb(adapter); + goto l_end; + } + } + + return -EINVAL; + +l_end: + return 0; +} + +static void sxe2_debugfs_command_help_info(struct sxe2_adapter *adapter) +{ + u32 i; + + LOG_DEV_INFO("available commands:\n"); + + for (i = 0; strlen(command[i].string) != 0; i++) + LOG_DEV_INFO("\t %s\n", command[i].string); +} + +STATIC ssize_t sxe2_debugfs_command_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + s8 *cmd_buf, *cmd_buf_tmp; + struct sxe2_adapter *adapter = file->private_data; + + if (*ppos != 0) { + LOG_DEV_ERR(" don't allow partial writes\n, *ppos!=NULL"); + return -EINVAL; + } + + cmd_buf = memdup_user(buf, count + 1); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = (size_t)cmd_buf_tmp - (size_t)cmd_buf + 1; + } + ret = (ssize_t)count; + + if (sxe2_debugfs_command_match(adapter, cmd_buf, count)) { + LOG_DEV_INFO("unknown or invalid command '%s'\n", cmd_buf); + sxe2_debugfs_command_help_info(adapter); + ret = -EINVAL; + } + + kfree(cmd_buf); + return ret; +} + +static const struct file_operations sxe2_debugfs_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = sxe2_debugfs_command_write, +}; + +void sxe2_debugfs_create_common_file(struct sxe2_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + if (IS_ERR(debugfs_create_file("command", 0600, adapter->sxe2_debugfs_pf, + adapter, &sxe2_debugfs_command_fops))) { + LOG_DEV_ERR("debugfs file create failed\n"); + } + + return; +#endif +} + +static char *sxe2_com_mode_to_str(enum sxe2_com_module com_mode) +{ + if (com_mode >= ARRAY_SIZE(g_sxe2_com_mode_to_str)) + return "unknown"; + + return g_sxe2_com_mode_to_str[com_mode]; +} + +static s32 sxe2_com_str_to_mode(char *cmd_buf, enum sxe2_com_module *new_mode) +{ + s32 ret; + u32 i; + + for (i = 0; i < SXE2_COM_MODULE_INVAL; i++) { + if (!strcmp(cmd_buf, g_sxe2_com_mode_to_str[i])) { + *new_mode = (enum sxe2_com_module)i; + ret = 0; + goto end; + } + } + + ret = -EINVAL; + +end: + return ret; +} + +STATIC ssize_t sxe2_debugfs_drv_mode_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct sxe2_adapter *adapter = file->private_data; + struct sxe2_fwc_drv_mode_resp resp = {}; + char tmp_buf[SXE2_COM_MODE_NAME_SIZE]; + ssize_t len = 0; + s32 ret = 0; + + len = snprintf(tmp_buf, SXE2_COM_MODE_NAME_SIZE, "current mode:%s\n", + sxe2_com_mode_to_str(adapter->drv_mode)); + ret = __sxe2_drv_mode_get(adapter, &resp, sizeof(resp)); + len += snprintf(tmp_buf + len, SXE2_COM_MODE_NAME_SIZE - len, + "configured mode:%s \t", + ret ? "get failed" : sxe2_com_mode_to_str(resp.drv_mode)); + + return simple_read_from_buffer(buf, count, ppos, &tmp_buf, len); +} + +STATIC bool sxe2_drv_mode_check(char *cmd_buf) +{ + if ((!strcmp(cmd_buf, SXE2_COM_KERNEL_MODE_NAME)) || + (!strcmp(cmd_buf, SXE2_COM_MIXED_MODE_NAME)) || + (!strcmp(cmd_buf, SXE2_COM_UNDEFINED_MODE_NAME))) + return true; + + return false; +} + +STATIC s32 sxe2_debugfs_drv_mode_set(struct sxe2_adapter *adapter, char *cmd_buf) +{ + s32 ret = 0; + enum sxe2_com_module new_mode = SXE2_COM_MODULE_INVAL; + + if (sxe2_drv_mode_check(cmd_buf)) { + ret = sxe2_com_str_to_mode(cmd_buf, &new_mode); + if (ret) { + LOG_ERROR_BDF("drv mode buf error.\n"); + goto end; + } + + ret = sxe2_drv_mode_set(adapter, new_mode); + if (ret) { + LOG_ERROR_BDF("drv mode configurate failed.\n"); + goto end; + } + + LOG_DEV_INFO("current mode:%s configured mode: %s\n", + g_sxe2_com_mode_to_str[adapter->drv_mode], + g_sxe2_com_mode_to_str[new_mode]); + } else { + LOG_DEV_INFO("unknown or invalid command '%s'\n", cmd_buf); + LOG_DEV_INFO("supported commands: %s、%s.\n", + SXE2_COM_KERNEL_MODE_NAME, SXE2_COM_MIXED_MODE_NAME); + ret = -EINVAL; + } + +end: + return ret; +} + +STATIC ssize_t sxe2_debugfs_drv_mode_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret, tmp_ret; + s8 *cmd_buf, *cmd_buf_tmp; + struct sxe2_adapter *adapter = file->private_data; + + if (*ppos != 0) { + LOG_DEV_ERR("dont't allow partial writes.\n, *ppos!=NULL"); + return -EINVAL; + } + + cmd_buf = memdup_user(buf, count + 1); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = (size_t)cmd_buf_tmp - (size_t)cmd_buf + 1; + } + + ret = (ssize_t)count; + + tmp_ret = (ssize_t)sxe2_debugfs_drv_mode_set(adapter, cmd_buf); + if (tmp_ret) + ret = tmp_ret; + + kfree(cmd_buf); + return ret; +} + +static const struct file_operations sxe2_debugfs_drv_mode_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe2_debugfs_drv_mode_read, + .write = sxe2_debugfs_drv_mode_write, +}; + +void sxe2_debugfs_create_drv_mode_file(struct sxe2_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + if (IS_ERR(debugfs_create_file("drv_mode", 0600, adapter->sxe2_debugfs_pf, + adapter, &sxe2_debugfs_drv_mode_fops))) { + LOG_DEV_ERR("debugfs file create failed\n"); + } + + return; +#endif +} + +void sxe2_debugfs_pf_init(struct sxe2_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + const char *name = pci_name(adapter->pdev); + + adapter->sxe2_debugfs_pf = debugfs_create_dir(name, sxe2_debugfs_root); + if (IS_ERR(adapter->sxe2_debugfs_pf)) { + LOG_ERROR("init of pf debugfs failed\n"); + goto l_end; + } + + sxe2_debugfs_create_common_file(adapter); + sxe2_debugfs_create_drv_mode_file(adapter); + +l_end: + return; +#endif +} + +void sxe2_debugfs_pf_exit(struct sxe2_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + debugfs_remove_recursive(adapter->sxe2_debugfs_pf); + adapter->sxe2_debugfs_pf = NULL; +#endif +} + +void sxe2_debugfs_init(void) +{ + sxe2_debugfs_root = debugfs_create_dir(SXE2_DRV_NAME, NULL); + if (IS_ERR(sxe2_debugfs_root)) + LOG_ERROR_D("init of debugfs failed.\n"); + else + LOG_INFO_D("init of debugfs success.\n"); +} + +void sxe2_debugfs_exit(void) +{ + debugfs_remove_recursive(sxe2_debugfs_root); + sxe2_debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..047a8fe65765b3814c747867b9ab7cd5a0974be8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_debugfs.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_debugfs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DEBUGFS_H__ +#define __SXE2_DEBUGFS_H__ + +struct sxe2_debugfs_command { + s8 string[50]; + void (*debugfs_cb)(struct sxe2_adapter *adapter); +}; + +void sxe2_debugfs_init(void); + +void sxe2_debugfs_exit(void); + +void sxe2_debugfs_create_common_file(struct sxe2_adapter *adapter); + +void sxe2_debugfs_create_drv_mode_file(struct sxe2_adapter *adapter); + +void sxe2_debugfs_pf_init(struct sxe2_adapter *adapter); + +void sxe2_debugfs_pf_exit(struct sxe2_adapter *adapter); + +void sxe2_debugfs_pf_init(struct sxe2_adapter *adapter); + +void sxe2_etype_rx_rule_add(struct sxe2_adapter *adapter); + +void sxe2_etype_rx_rule_del(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..42b8f86b02045c255376b867381e26b0612edbeb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dev_ctrl.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_log.h" +#include "sxe2_hw.h" +#include "sxe2_common.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_mbx_channel.h" +#include "sxe2_netdev.h" +#include "sxe2_macsec.h" +#include "sxe2_ipsec.h" +#include "sxe2_ddp.h" +#include "sxe2_eswitch.h" +#include "sxe2_ethtool.h" +#include "sxe2_lag.h" +#include "sxe2_com_cdev.h" +#include "sxe2_acl.h" + +#ifdef SXE2_CFG_DEBUG +extern int switch_heart_check; +#endif + +#ifndef secs_to_jiffies +#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) +#endif + +#define SXE2_HEARTBEAT_TIMEOUT_SECS (30) + +#define SXE2_DEV_CTRL_TIMEOUT_SECS (2) +#define SXE2_DEV_CTRL_TIMER_PERIOD (secs_to_jiffies(SXE2_DEV_CTRL_TIMEOUT_SECS)) + +#ifndef SXE2_TEST +#define SXE2_WAIT_RESET_DONE_TIMEOUT (100000) +#else +#define SXE2_WAIT_RESET_DONE_TIMEOUT (200) +#endif + +#ifndef SXE2_TEST +#define SXE2_WAIT_FW_INIT_TIMEOUT_MS (30000) +#else +#define SXE2_WAIT_FW_INIT_TIMEOUT_MS (200) +#endif + +#define SXE2_STOP_DROP_TIMEOUT 1000 +#define SXE2_STOP_DROP_DONE_INTERVAL 1 +#define SXE2_RESET_WAIT_INTERVAL (5) +#define SXE2_RESET_WAIT_TIMES (200) + +struct workqueue_struct *sxe2_dev_ctrl_workqueue; + +void sxe2_vf_stop(struct sxe2_vf_node *vf_node) +{ + set_bit(SXE2_VF_STATE_DIS, vf_node->states); + clear_bit(SXE2_VF_STATE_ACTIVE, vf_node->states); + + sxe2_vf_queues_stop(vf_node); +} + +void sxe2_vfs_stop(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_context *vf_ctxt = &adapter->vf_ctxt; + struct sxe2_vf_node *vf_node; + u16 idx; + + mutex_lock(&vf_ctxt->vfs_lock); + + if (!test_and_set_bit(SXE2_FLAG_SRIOV_VFS_DISABLED, adapter->flags)) { + sxe2_eswitch_stop(adapter); + + sxe2_for_each_vf(adapter, idx) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + + vf_node = sxe2_vf_node_get(adapter, idx); + + sxe2_vf_stop(vf_node); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + } + + mutex_unlock(&vf_ctxt->vfs_lock); + + LOG_DEV_INFO("vf cnt:%u stops done.\n", adapter->vf_ctxt.num_vfs); +} + +void sxe2_pf_stop(struct sxe2_adapter *adapter, u16 stop_flag) +{ + LOG_DEBUG_BDF("pf sw process stop start.\n"); + + mutex_lock(&adapter->dev_ctrl_ctxt.pf_lock); + if (test_and_set_bit(SXE2_PF_STOPPED, &adapter->dev_ctrl_ctxt.flag)) + goto l_end; + + if (stop_flag & SXE2_PF_STOP_CANCEL_CMD_QUEUE) + sxe2_wait_task_cancel_all(adapter); + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_KERNEL) + sxe2_com_disable(&adapter->com_ctxt); + + sxe2_dcb_set_state(adapter, SXE2_DCB_STATE_RESET, true); + if (test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) + sxe2_dcbx_agent_event_deinit(adapter); + + sxe2_lldp_agent_event_deinit(adapter); + sxe2_log_export_deinit(adapter); + sxe2_monitor_stop(adapter); + + sxe2_lag_stop(adapter); + + sxe2_ptp_stop(adapter); + + (void)sxe2_vsi_disable_all(adapter); + + adapter->pf_stats.stat_prev_loaded = false; + adapter->pf_stats.dcb_stats.prev_stat_loaded = false; + + sxe2_vfs_stop(adapter); + + sxe2_ipsec_stop(adapter); + + if (stop_flag & SXE2_PF_STOP_RESET_NOTICE_RDMA) + sxe2_rdma_aux_send_reset_event(adapter); + + sxe2_cmd_channels_disable(adapter); + + sxe2_event_irq_disable(adapter); + +l_end: + LOG_DEBUG_BDF("pf sw process stopped.\n"); + mutex_unlock(&adapter->dev_ctrl_ctxt.pf_lock); +} + +STATIC void sxe2_pf_sw_cfg_clear(struct sxe2_adapter *adapter) +{ + sxe2_fnav_enter_reset(adapter, true); + sxe2_fnav_flow_ctxt_clean(adapter); + sxe2_arfs_clean(adapter); + sxe2_rss_ppp_ctxt_clean(&adapter->rss_flow_ctxt); + + sxe2_txsched_tree_clean(adapter); +#ifdef HAVE_MACSEC_SUPPORT + sxe2_macsec_enter_reset(adapter, true); +#endif + sxe2_ipsec_sa_clean(adapter); +} + +STATIC s32 sxe2_pf_hw_cfg_clear(struct sxe2_adapter *adapter) +{ + return sxe2_fwc_clear_pf_cfg(adapter); +} + +STATIC s32 sxe2_pf_cfg_clear(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + sxe2_pf_sw_cfg_clear(adapter); + + ret = sxe2_pf_hw_cfg_clear(adapter); + + return ret; +} + +static s32 sxe2_vfs_rebuild(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_context *vf_ctxt = &adapter->vf_ctxt; + struct sxe2_vf_node *vf_node; + u16 idx; + s32 ret = 0; + + mutex_lock(&vf_ctxt->vfs_lock); + + if (test_bit(SXE2_FLAG_SRIOV_VFS_DISABLED, adapter->flags)) { + if (!adapter->vf_ctxt.num_vfs) { + LOG_INFO_BDF("no enable vf do nothing.\n"); + goto l_clear_bit; + } + sxe2_vfs_repr_decfg(adapter); + + sxe2_for_each_vf(adapter, idx) + { + ret = sxe2_wait_vfr_done(adapter, idx); + if (ret) { + if (!pci_num_vf(adapter->pdev)) { + LOG_DEV_ERR("vf:%u wait vfr done fail due\t" + "to vf pci dev removed.\n", + idx); + ret = 0; + } + goto l_unlock; + } + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + ret = sxe2_vf_clean_and_rebuild(vf_node, false); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + if (ret) { + LOG_DEV_ERR("vf:%u rebuild failed during function\t" + "level reset.\n", + idx); + goto l_unlock; + } + } + + ret = sxe2_eswitch_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("eswitch rebuild failed\n"); + goto l_unlock; + } + + if (test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags) && + adapter->vsi_ctxt.main_vsi) { + ret = sxe2_fnav_filter_replay(adapter->vsi_ctxt.main_vsi, + true); + if (ret) { + LOG_DEV_ERR("fnav filter action to vf rebuild\t" + "failed\n"); + goto l_unlock; + } + } + + sxe2_vfs_active(adapter); + } + +l_clear_bit: + clear_bit(SXE2_FLAG_SRIOV_VFS_DISABLED, adapter->flags); + LOG_INFO_BDF("vf cnt:%u rebuild done.\n", pci_num_vf(adapter->pdev)); + +l_unlock: + mutex_unlock(&vf_ctxt->vfs_lock); + return ret; +} + +s32 sxe2_lfc_rebuild_set(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_fwc_lfc_info lfc = {0}; + struct sxe2_cmd_params cmd = {0}; + + if (adapter->lfc_ctxt.rx_en || adapter->lfc_ctxt.tx_en) { + lfc.fc_mode = SXE2_FC_MODE_LFC; + lfc.rx_en = adapter->lfc_ctxt.rx_en; + lfc.tx_en = adapter->lfc_ctxt.tx_en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LINK_FLOW_CONTROL_SET, &lfc, + sizeof(lfc), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("fc set cmd fail, ret=%d\n", ret); + goto end; + } + } + +end: + return ret; +} + +STATIC s32 sxe2_pf_cmd_chnl_reactive(struct sxe2_adapter *adapter) +{ + s32 ret; + + pci_restore_msi_state(adapter->pdev); + + sxe2_event_irq_enable(adapter); + + ret = sxe2_cmd_channels_enable(adapter); + if (ret) + goto l_cmd_channels_enable_err; + + return 0; + +l_cmd_channels_enable_err: + sxe2_event_irq_disable(adapter); + (void)sxe2_corer_check(adapter); + return ret; +} + +STATIC void sxe2_pf_cmd_chnl_deactive(struct sxe2_adapter *adapter) +{ + sxe2_cmd_channels_disable(adapter); + sxe2_event_irq_disable(adapter); +} + +STATIC s32 sxe2_pf_recfg(struct sxe2_adapter *adapter) +{ + s32 ret; + u32 mtu; + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; + struct sxe2_dcbx_cfg *curr_cfg; + + ret = sxe2_pf_cfg_clear(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_pf_cfg_clear rq failed, ret=%d\n", ret); + goto l_end; + } + + sxe2_fw_version_get(adapter); + + sxe2_hw_pf_stats_update(adapter); + + ret = sxe2_ddp_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("ddp pkg process failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_caps_get(adapter); + if (ret) { + LOG_DEV_ERR("get device and function caps failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_txsched_dflt_topo_init(adapter); + if (ret) { + LOG_DEV_ERR("txsched init failed, ret=%d\n", ret); + goto l_end; + } + + curr_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + if (sxe2_is_safe_mode(adapter)) + sxe2_dcb_sw_safe_mode_cfg(adapter); + + ret = sxe2_dcb_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("dcb rebuild failed, ret=%d\n", ret); + goto l_end; + } + + if (!(curr_cfg->pfc.enable)) { + ret = sxe2_lfc_rebuild_set(adapter); + if (ret) + LOG_DEV_ERR("lfc rebuild failed, ret=%d\n", ret); + } + + mtu = netdev->mtu; + ret = sxe2_set_mtu_cfg(adapter, mtu); + if (ret) + LOG_DEV_ERR("rebuild mtu:%d, ret: %d\n", mtu, ret); + +l_end: + return ret; +} + +STATIC s32 sxe2_pf_vsi_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_vsi *main_vsi; + + rtnl_lock(); + ret = sxe2_vsi_rebuild_by_type(adapter, SXE2_VSI_T_PF, true); + if (ret) { + rtnl_unlock(); + goto l_end; + } + rtnl_unlock(); + + main_vsi = adapter->vsi_ctxt.main_vsi; + ret = sxe2_netdev_q_cnt_set(main_vsi->netdev, main_vsi->txqs.q_cnt, + main_vsi->rxqs.q_cnt, false); + if (ret) { + LOG_DEV_ERR("pf netdev q cnt set failed ret:%d.\n", ret); + goto l_end; + } + + ret = sxe2_macvlan_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_macvlan_rebuild failed ret:%d.\n", ret); + goto l_end; + } + + ret = sxe2_vsi_enable_by_type(adapter, SXE2_VSI_T_PF); + if (ret) { + LOG_DEV_ERR("pf enable failed ret:%d.\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_pf_feature_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret; + + ret = sxe2_dcb_maxrate_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("dcb max tc rate rebuild failed, ret:%d\n", ret); + goto l_end; + } + + ret = sxe2_pf_eth_fnav_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("ctrl vsi rebuild failed, ret:%d\n", ret); + goto l_end; + } + sxe2_fnav_enter_reset(adapter, false); + + (void)sxe2_ptp_rebuild(adapter); + + ret = sxe2_rdma_aux_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_rdma_aux_rebuild failed. ret:%d\n", ret); + goto l_end; + } + + sxe2_lag_rebuild(adapter); + +#ifdef HAVE_MACSEC_SUPPORT + (void)sxe2_macsec_rebuild(adapter); +#endif + + (void)sxe2_ipsec_rebuild(adapter); + + ret = sxe2_log_export_init(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_log_export_init failed. ret:%d\n", ret); + goto l_log_export_init_failed; + } + + ret = sxe2_vfs_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("vfs rebuild failed. ret:%d\n", ret); + goto l_vfs_rebuild_failed; + } + + sxe2_dcb_set_state(adapter, SXE2_DCB_STATE_READY, true); + +#ifdef HAVE_MACSEC_SUPPORT + sxe2_macsec_enter_reset(adapter, false); +#endif + + return ret; + +l_vfs_rebuild_failed: + sxe2_log_export_deinit(adapter); +l_log_export_init_failed: + sxe2_rdma_aux_delete(&adapter->aux_ctxt.cdev_info); + +l_end: + return ret; +} + +s32 sxe2_pf_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret; + + mutex_lock(&adapter->dev_ctrl_ctxt.pf_lock); + + LOG_DEBUG_BDF("pf rebuild start.\n"); + + if (adapter->dev_ctrl_ctxt.dev_state == SXE2_DEVSTATE_FAULT) { + LOG_WARN_BDF("sxe2 nic fault.\n"); + ret = -EIO; + goto l_end; + } + ret = sxe2_pf_cmd_chnl_reactive(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_pf_cmd_chnl_reactive failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_pf_recfg(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_pf_recfg failed, ret=%d\n", ret); + goto l_pf_recfg_failed; + } + + ret = sxe2_pf_vsi_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_pf_vsi_rebuild failed, ret=%d\n", ret); + goto l_vsi_comm_roll_back; + } + + if (sxe2_is_safe_mode(adapter)) + goto l_rebuild_safe_mode; + + ret = sxe2_pf_feature_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("sxe2_pf_feature_rebuild failed, ret=%d\n", ret); + goto l_vsi_comm_roll_back; + } + +l_rebuild_safe_mode: + if (sxe2_is_safe_mode(adapter)) + (void)sxe2_ipsec_rebuild(adapter); + + sxe2_monitor_start(adapter); + + sxe2_com_enable(&adapter->com_ctxt); + + clear_bit(SXE2_PF_STOPPED, &adapter->dev_ctrl_ctxt.flag); + + goto l_end; + +l_vsi_comm_roll_back: + (void)sxe2_vsi_disable_all(adapter); + sxe2_txsched_tree_clean(adapter); +l_pf_recfg_failed: + sxe2_pf_cmd_chnl_deactive(adapter); + (void)sxe2_corer_check(adapter); +l_end: + mutex_unlock(&adapter->dev_ctrl_ctxt.pf_lock); + LOG_DEBUG_BDF("pf rebuild end.\n"); + + return ret; +} + +void sxe2_dev_state_set(struct sxe2_adapter *adapter, + enum sxe2_dev_state new_dev_state, + enum sxe2_reset_type new_reset_type) +{ + enum sxe2_dev_state old_state; + enum sxe2_reset_type old_reset_type; + unsigned long flags; + + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.state_lock, flags); + + old_state = adapter->dev_ctrl_ctxt.dev_state; + old_reset_type = adapter->dev_ctrl_ctxt.reset_type; + + if (old_state == SXE2_DEVSTATE_FAULT) + goto l_unlock; + + switch (new_dev_state) { + case SXE2_DEVSTATE_INITIAL: + if (old_state == SXE2_DEVSTATE_RESETTING && + old_reset_type == SXE2_RESET_CORER) { + adapter->dev_ctrl_ctxt.dev_state = SXE2_DEVSTATE_INITIAL; + adapter->dev_ctrl_ctxt.reset_type = SXE2_RESET_INVAL; + } + break; + case SXE2_DEVSTATE_ACCESSIBLE: + if (old_state == SXE2_DEVSTATE_INITIAL || + old_state == SXE2_DEVSTATE_RUNNING || + (old_state == SXE2_DEVSTATE_RESETTING && + old_reset_type == SXE2_RESET_PFR)) { + adapter->dev_ctrl_ctxt.dev_state = SXE2_DEVSTATE_ACCESSIBLE; + adapter->dev_ctrl_ctxt.reset_type = SXE2_RESET_INVAL; + } + break; + case SXE2_DEVSTATE_RUNNING: + if (old_state == SXE2_DEVSTATE_ACCESSIBLE) { + adapter->dev_ctrl_ctxt.dev_state = SXE2_DEVSTATE_RUNNING; + adapter->dev_ctrl_ctxt.reset_type = SXE2_RESET_INVAL; + } + break; + case SXE2_DEVSTATE_ABNORMAL: + if (new_reset_type == SXE2_RESET_CORER) { + if (old_state == SXE2_DEVSTATE_ACCESSIBLE || + old_state == SXE2_DEVSTATE_RUNNING || + (old_state == SXE2_DEVSTATE_ABNORMAL && + old_reset_type == SXE2_RESET_PFR) || + (old_state == SXE2_DEVSTATE_RESETTING && + old_reset_type == SXE2_RESET_PFR)) { + adapter->dev_ctrl_ctxt.dev_state = + SXE2_DEVSTATE_ABNORMAL; + adapter->dev_ctrl_ctxt.reset_type = new_reset_type; + } + } + + if (new_reset_type == SXE2_RESET_PFR) { + if (old_state == SXE2_DEVSTATE_ACCESSIBLE || + old_state == SXE2_DEVSTATE_RUNNING) { + adapter->dev_ctrl_ctxt.dev_state = + SXE2_DEVSTATE_ABNORMAL; + adapter->dev_ctrl_ctxt.reset_type = new_reset_type; + } + } + break; + case SXE2_DEVSTATE_RESETTING: + if (new_reset_type == SXE2_RESET_CORER) { + if (old_state == SXE2_DEVSTATE_ACCESSIBLE || + old_state == SXE2_DEVSTATE_RUNNING || + old_state == SXE2_DEVSTATE_ABNORMAL || + (old_state == SXE2_DEVSTATE_RESETTING && + old_reset_type == SXE2_RESET_PFR)) + adapter->dev_ctrl_ctxt.dev_state = + SXE2_DEVSTATE_RESETTING; + adapter->dev_ctrl_ctxt.reset_type = new_reset_type; + } + + if (new_reset_type == SXE2_RESET_PFR) { + if (old_state == SXE2_DEVSTATE_ABNORMAL && + old_reset_type == SXE2_RESET_PFR) { + adapter->dev_ctrl_ctxt.dev_state = + SXE2_DEVSTATE_RESETTING; + adapter->dev_ctrl_ctxt.reset_type = new_reset_type; + } + } + break; + case SXE2_DEVSTATE_FAULT: + adapter->dev_ctrl_ctxt.dev_state = SXE2_DEVSTATE_FAULT; + break; + default: + SXE2_BUG(); + break; + } + +l_unlock: + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.state_lock, flags); + LOG_DEBUG_BDF("device state changes from %d(state)-%d(reset_type) to\t" + "%d(state)-%d(reset_type), final state is\t" + "%d(state)-%d(reset_type)\n", + old_state, old_reset_type, new_dev_state, new_reset_type, + adapter->dev_ctrl_ctxt.dev_state, + adapter->dev_ctrl_ctxt.reset_type); +} + +void sxe2_dev_state_get(struct sxe2_adapter *adapter, enum sxe2_dev_state *state, + enum sxe2_reset_type *reset_type) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.state_lock, flags); + if (state) + *state = adapter->dev_ctrl_ctxt.dev_state; + if (reset_type) + *reset_type = adapter->dev_ctrl_ctxt.reset_type; + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.state_lock, flags); +} + +STATIC void sxe2_heartbeat_update(struct sxe2_adapter *adapter) +{ + u32 heartbeat_value; + + heartbeat_value = sxe2_hw_heartbeat_get(&adapter->hw); + + adapter->dev_ctrl_ctxt.last_heartbeat_value = heartbeat_value; + adapter->dev_ctrl_ctxt.last_heartbeat_time = jiffies; +} + +s32 sxe2_wait_fw_init(struct sxe2_adapter *adapter) +{ + unsigned long end = jiffies + msecs_to_jiffies(SXE2_WAIT_FW_INIT_TIMEOUT_MS); + struct sxe2_hw *hw = &adapter->hw; + u32 fw_state; + s32 ret = 0; + + do { + cond_resched(); + + fw_state = sxe2_fw_state_get(hw) & SXE2_FW_STATE_MASK; + if (fw_state == SXE2_FW_STATE_FINISH) { + LOG_DEBUG_BDF("fw state is finished.\n"); + ret = 0; + goto out; + } else if (unlikely(fw_state == SXE2_FW_STATE_ABNORMAL)) { + LOG_DEV_INFO("fw state is abnormal.\n"); + ret = -EBUSY; + goto out; + } else if (unlikely(fw_state == SXE2_FW_STATE_MASK)) { + LOG_DEV_INFO("device offline.\n"); + ret = -ENODEV; + goto out; + } + } while (!time_after(jiffies, end)); + + ret = -ETIMEDOUT; + LOG_DEV_INFO("wait Fw init timeout.\n"); + +out: + LOG_DEBUG_BDF("fw state is 0x%x, ret is:%d.\n", fw_state, ret); + return ret; +} + +s32 sxe2_stop_drop(struct sxe2_adapter *adapter) +{ + u16 timeout = SXE2_STOP_DROP_TIMEOUT; + struct sxe2_hw *hw = &adapter->hw; + u16 cnt; + + if (sxe2_hw_stop_drop_done(hw)) + return 0; + + sxe2_hw_stop_drop(hw); + + for (cnt = 0; cnt < timeout; cnt++) { + msleep(SXE2_STOP_DROP_DONE_INTERVAL); + if (sxe2_hw_stop_drop_done(hw)) + break; + } + if (cnt == timeout) { + LOG_DEV_ERR("stop PCIe drop timeout.\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void sxe2_trigger_reset(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type) +{ + struct sxe2_hw *hw = &adapter->hw; + + switch (reset_type) { + case SXE2_RESET_PFR: + adapter->dev_ctrl_ctxt.pfr_cnt++; + sxe2_hw_trigger_pfr(hw); + break; + case SXE2_RESET_CORER: + { + if (hw->is_pop_type) { + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_FAULT, + SXE2_RESET_INVAL); + LOG_INFO_BDF("device is pop mode, can not do core\t" + "reset.\n"); + } else { + sxe2_hw_trigger_corer(hw); + } + } + break; + default: + LOG_DEV_ERR("invalid reset type %d\n", reset_type); + break; + } +} + +static s32 sxe2_reset_done(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type) +{ + s32 ret = 0; + struct sxe2_hw *hw = &adapter->hw; + + switch (reset_type) { + case SXE2_RESET_PFR: + ret = sxe2_hw_pfr_done(hw); + break; + case SXE2_RESET_CORER: + ret = sxe2_hw_corer_done(hw); + break; + default: + LOG_DEV_ERR("invalid reset type %d\n", reset_type); + break; + } + + return ret; +} + +s32 sxe2_wait_reset_done(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type) +{ + s32 ret; + unsigned long end; + u32 delay_ms = SXE2_WAIT_RESET_DONE_TIMEOUT; + + end = jiffies + msecs_to_jiffies(delay_ms); + do { + cond_resched(); + + ret = sxe2_reset_done(adapter, reset_type); + if (ret) { + if (unlikely(ret == -EBUSY)) { + LOG_WARN_BDF("wait pfr done. but core reset\t" + "occur.\n"); + goto out; + } else { + ret = 0; + LOG_DEBUG_BDF("reset success. reset type:%u.\n", + reset_type); + goto out; + } + } + } while (!time_after(jiffies, end)); + + ret = -ETIMEDOUT; + LOG_DEV_ERR("wait reset done timeout. reset type:%u\n", reset_type); + +out: + return ret; +} + +STATIC void sxe2_initial_proc(struct sxe2_adapter *adapter) +{ + if (sxe2_stop_drop(adapter)) { + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_FAULT, SXE2_RESET_INVAL); + } else { + sxe2_heartbeat_update(adapter); + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_ACCESSIBLE, + SXE2_RESET_INVAL); + } +} + +STATIC s32 sxe2_accessible_proc(struct sxe2_adapter *adapter) +{ + s32 ret; + + if (adapter->dev_ctrl_ctxt.rebuild_failed) { + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_FAULT, SXE2_RESET_INVAL); + return 0; + } + + ret = sxe2_pf_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("pf rebuild failed:%d.\n", ret); + adapter->dev_ctrl_ctxt.rebuild_failed = true; + return ret; + } + + adapter->dev_ctrl_ctxt.rebuild_failed = false; + + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_RUNNING, SXE2_RESET_INVAL); + + return 0; +} + +STATIC void sxe2_abnormal_proc(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type) +{ + adapter->dev_ctrl_ctxt.rebuild_failed = false; + + sxe2_trigger_reset(adapter, reset_type); + + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_RESETTING, reset_type); +} + +STATIC s32 sxe2_resetting_proc(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type) +{ + s32 ret; + + adapter->dev_ctrl_ctxt.rebuild_failed = false; + + (void)sxe2_pf_stop(adapter, SXE2_PF_STOP_CANCEL_CMD_QUEUE | + SXE2_PF_STOP_RESET_NOTICE_RDMA); + + ret = sxe2_wait_reset_done(adapter, reset_type); + if (ret == -ETIMEDOUT) { + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_FAULT, SXE2_RESET_INVAL); + return ret; + } + + sxe2_rdma_aux_delete(&adapter->aux_ctxt.cdev_info); + + if (reset_type == SXE2_RESET_CORER) + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_INITIAL, SXE2_RESET_INVAL); + else if (reset_type == SXE2_RESET_PFR) + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_ACCESSIBLE, + SXE2_RESET_INVAL); + else + SXE2_BUG(); + + return ret; +} + +STATIC void sxe2_fault_proc(struct sxe2_adapter *adapter) +{ + u32 val; + + sxe2_pf_stop(adapter, SXE2_PF_STOP_RESET_NOTICE_RDMA); + del_timer_sync(&adapter->dev_ctrl_ctxt.timer); + + (void)pci_read_config_dword(adapter->pdev, SXE2_PCIE_SYS_READY, &val); + if (val == SXE2_REG_INVALID_VALUE) { + LOG_DEV_ERR("configuration space inaccessible. please check the\t" + "device.\n"); + } else { + val = sxe2_read_reg(&adapter->hw, SXE2_PFGEN_CTRL); + + if (val == SXE2_REG_INVALID_VALUE) + LOG_DEV_ERR("bar0 inaccessible.\n"); + } + + LOG_DEV_ERR("sxe2 nic fault\n"); +} + +STATIC s32 sxe2_dev_state_proc(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + enum sxe2_dev_state dev_state; + enum sxe2_reset_type reset_type; + + sxe2_dev_state_get(adapter, &dev_state, &reset_type); + + switch (dev_state) { + case SXE2_DEVSTATE_INITIAL: + sxe2_initial_proc(adapter); + break; + case SXE2_DEVSTATE_ACCESSIBLE: + ret = sxe2_accessible_proc(adapter); + break; + case SXE2_DEVSTATE_ABNORMAL: + sxe2_abnormal_proc(adapter, reset_type); + break; + case SXE2_DEVSTATE_RESETTING: + ret = sxe2_resetting_proc(adapter, reset_type); + break; + case SXE2_DEVSTATE_RUNNING: + case SXE2_DEVSTATE_FAULT: + default: + LOG_DEV_ERR("Invalid device state %d\n", dev_state); + ret = -EINVAL; + SXE2_BUG(); + break; + } + + return ret; +} + +STATIC void sxe2_reset_work(struct sxe2_adapter *adapter) +{ + while (adapter->dev_ctrl_ctxt.dev_state != SXE2_DEVSTATE_RUNNING && + adapter->dev_ctrl_ctxt.dev_state != SXE2_DEVSTATE_FAULT) { + (void)sxe2_dev_state_proc(adapter); + cond_resched(); + } +} + +STATIC s32 sxe2_fault_work(struct sxe2_adapter *adapter) +{ + enum sxe2_dev_state dev_state; + enum sxe2_reset_type reset_type; + + sxe2_dev_state_get(adapter, &dev_state, &reset_type); + (void)reset_type; + + if (dev_state != SXE2_DEVSTATE_FAULT) + return 0; + + sxe2_fault_proc(adapter); + + return -EFAULT; +} + +STATIC s32 sxe2_heartbeat_check(struct sxe2_adapter *adapter) +{ + u32 heartbeat_value; + unsigned long expired_time = adapter->dev_ctrl_ctxt.last_heartbeat_time + + secs_to_jiffies(SXE2_HEARTBEAT_TIMEOUT_SECS); + + if (time_before(jiffies, expired_time)) + return 0; + + heartbeat_value = sxe2_hw_heartbeat_get(&adapter->hw); + + if (heartbeat_value == adapter->dev_ctrl_ctxt.last_heartbeat_value) { + LOG_DEV_ERR("heartbeat check failed, lasttime: %u ms, nowtime: %u\t" + "ms.\n", + jiffies_to_msecs(adapter->dev_ctrl_ctxt + .last_heartbeat_time), + jiffies_to_msecs(jiffies)); + adapter->dev_ctrl_ctxt.last_heartbeat_time = jiffies; + return -EFAULT; + } + + sxe2_heartbeat_update(adapter); + + return 0; +} + +STATIC void sxe2_heartbeat_work(struct sxe2_adapter *adapter) +{ + enum sxe2_dev_state dev_state; + enum sxe2_reset_type reset_type; + +#ifdef SXE2_CFG_DEBUG + if (!adapter->dev_ctrl_ctxt.heart_beat_ena) + return; +#endif + + sxe2_dev_state_get(adapter, &dev_state, &reset_type); + + if (dev_state != SXE2_DEVSTATE_ACCESSIBLE && + dev_state != SXE2_DEVSTATE_RUNNING) + return; + + if (sxe2_heartbeat_check(adapter)) + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_ABNORMAL, + SXE2_RESET_CORER); +} + +void sxe2_dev_ctrl_work_schedule(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.wq_lock, flags); + if (!test_bit(SXE2_DEV_CTRL_WORK_DISABLED, + &adapter->dev_ctrl_ctxt.work_state) && + !test_and_set_bit(SXE2_DEV_CTRL_WORK_SCHED, + &adapter->dev_ctrl_ctxt.work_state)) + sxe2_queue_work(adapter, sxe2_dev_ctrl_workqueue, + &adapter->dev_ctrl_ctxt.work); + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.wq_lock, flags); +} + +STATIC void sxe2_dev_ctrl_work_complete(struct sxe2_adapter *adapter) +{ + BUG_ON(!test_bit(SXE2_DEV_CTRL_WORK_SCHED, + &adapter->dev_ctrl_ctxt.work_state)); + + smp_mb__before_atomic(); + clear_bit(SXE2_DEV_CTRL_WORK_SCHED, &adapter->dev_ctrl_ctxt.work_state); + + if (test_bit(SXE2_FLAG_VFLR_PENDING, adapter->flags)) + sxe2_dev_ctrl_work_schedule(adapter); +} + +STATIC void sxe2_vflr_work(struct sxe2_adapter *adapter) +{ + u16 vf_id_in_dev = 0; + u16 idx; + struct sxe2_vf_node *vf_node; + + if (!test_and_clear_bit(SXE2_FLAG_VFLR_PENDING, adapter->flags)) + return; + + sxe2_for_each_vf(adapter, idx) + { + vf_id_in_dev = idx + adapter->vf_ctxt.vfid_base; + if (sxe2_hw_vflr_cause_get(&adapter->hw, vf_id_in_dev)) { + sxe2_hw_vflr_cause_clear(&adapter->hw, vf_id_in_dev); + + LOG_WARN_BDF("vf:%u vf_id_in_dev:%u vflr checked.\n", idx, + vf_id_in_dev); + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + if (!vf_node) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + continue; + } + + if (sxe2_check_vf_ready_for_cfg(vf_node)) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + continue; + } + (void)sxe2_reset_vf(adapter, idx, SXE2_VF_RESET_FLAG_NOTIFY); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + } +} + +STATIC void sxe2_interrupt_err_print(struct sxe2_adapter *adapter) +{ + u32 print_flag = adapter->dev_ctrl_ctxt.print_flag; + + if (likely(print_flag == 0)) + goto l_out; + + if (print_flag & SXE2_PRINT_CORE_RESET) { + LOG_DEV_INFO("recv core reset irq cause.\n"); + print_flag &= ~SXE2_PRINT_CORE_RESET; + } + + if (print_flag & SXE2_PRINT_ECC_ERROR) { + LOG_ERROR_BDF("multi-bit ecc err occur!\n"); + print_flag &= ~SXE2_PRINT_ECC_ERROR; + } + + if (print_flag & SXE2_PRINT_REG_CFG_ERR) { + LOG_WARN_BDF("register config error!\n"); + print_flag &= ~SXE2_PRINT_REG_CFG_ERR; + } + + if (print_flag & SXE2_PRINT_RAM_CONFLICT) { + LOG_DEV_WARN("ram read write conflict!\n"); + print_flag &= ~SXE2_PRINT_RAM_CONFLICT; + } + + adapter->dev_ctrl_ctxt.print_flag = print_flag; +l_out: + return; +} + +STATIC void sxe2_dev_ctrl_work_cb(struct work_struct *work) +{ + struct sxe2_dev_ctrl_context *dev_ctrl_ctxt = + container_of(work, struct sxe2_dev_ctrl_context, work); + struct sxe2_adapter *adapter = + container_of(dev_ctrl_ctxt, struct sxe2_adapter, dev_ctrl_ctxt); + + sxe2_interrupt_err_print(adapter); + + if (sxe2_fault_work(adapter)) + goto l_end; + + sxe2_heartbeat_work(adapter); + + sxe2_reset_work(adapter); + + sxe2_vflr_work(adapter); + +l_end: + sxe2_dev_ctrl_work_complete(adapter); +} + +STATIC void sxe2_dev_ctrl_timer_cb(struct timer_list *timer) +{ + struct sxe2_dev_ctrl_context *dev_ctrl = + container_of(timer, struct sxe2_dev_ctrl_context, timer); + struct sxe2_adapter *adapter = + container_of(dev_ctrl, struct sxe2_adapter, dev_ctrl_ctxt); + + (void)dev_ctrl; + mod_timer(&adapter->dev_ctrl_ctxt.timer, + adapter->dev_ctrl_ctxt.period + jiffies); + + sxe2_dev_ctrl_work_schedule(adapter); +} + +STATIC void sxe2_dev_ctrl_work_init(struct sxe2_adapter *adapter) +{ + adapter->dev_ctrl_ctxt.period = SXE2_DEV_CTRL_TIMER_PERIOD; + + INIT_WORK(&adapter->dev_ctrl_ctxt.work, sxe2_dev_ctrl_work_cb); + + timer_setup(&adapter->dev_ctrl_ctxt.timer, sxe2_dev_ctrl_timer_cb, 0); +} + +void sxe2_dev_ctrl_work_stop(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.wq_lock, flags); + set_bit(SXE2_DEV_CTRL_WORK_DISABLED, &adapter->dev_ctrl_ctxt.work_state); + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.wq_lock, flags); + + if (adapter->dev_ctrl_ctxt.timer.function) + del_timer_sync(&adapter->dev_ctrl_ctxt.timer); + + if (adapter->dev_ctrl_ctxt.work.func) + cancel_work_sync(&adapter->dev_ctrl_ctxt.work); + + clear_bit(SXE2_DEV_CTRL_WORK_SCHED, &adapter->dev_ctrl_ctxt.work_state); +} + +void sxe2_dev_ctrl_work_start(struct sxe2_adapter *adapter) +{ + sxe2_heartbeat_update(adapter); + clear_bit(SXE2_DEV_CTRL_WORK_DISABLED, &adapter->dev_ctrl_ctxt.work_state); + sxe2_dev_ctrl_work_schedule(adapter); + mod_timer(&adapter->dev_ctrl_ctxt.timer, + adapter->dev_ctrl_ctxt.period + jiffies); +} + +s32 sxe2_dev_ctrl_work_create(void) +{ + sxe2_dev_ctrl_workqueue = + alloc_workqueue("%s-DEV-CTRL", 0, 0, SXE2_DRV_NAME); + if (!sxe2_dev_ctrl_workqueue) { + LOG_PR_ERR("failed to create dev ctrl workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +void sxe2_dev_ctrl_work_destroy(void) +{ + destroy_workqueue(sxe2_dev_ctrl_workqueue); + sxe2_dev_ctrl_workqueue = NULL; +} + +void sxe2_dev_ctrl_init_once(struct sxe2_adapter *adapter) +{ + mutex_init(&adapter->dev_ctrl_ctxt.pf_lock); + + spin_lock_init(&adapter->dev_ctrl_ctxt.state_lock); + spin_lock_init(&adapter->dev_ctrl_ctxt.wq_lock); + spin_lock_init(&adapter->dev_ctrl_ctxt.cmd_list_lock); +} + +void sxe2_dev_ctrl_deinit_once(struct sxe2_adapter *adapter) +{ + mutex_destroy(&adapter->dev_ctrl_ctxt.pf_lock); +} + +void sxe2_dev_ctrl_init(struct sxe2_adapter *adapter) +{ + set_bit(SXE2_DEV_CTRL_WORK_DISABLED, &adapter->dev_ctrl_ctxt.work_state); + sxe2_dev_ctrl_work_init(adapter); +#ifdef SXE2_CFG_DEBUG + if (switch_heart_check) + adapter->dev_ctrl_ctxt.heart_beat_ena = true; + else + adapter->dev_ctrl_ctxt.heart_beat_ena = false; +#endif +} + +void sxe2_dev_ctrl_deinit(struct sxe2_adapter *adapter) +{ + sxe2_dev_ctrl_work_stop(adapter); +} + +s32 sxe2_reset_async(struct sxe2_adapter *adapter, enum sxe2_reset_type reset_type) +{ + if (reset_type == SXE2_RESET_INVAL || reset_type >= SXE2_RESET_MAX) + return -EINVAL; + + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_ABNORMAL, reset_type); + + sxe2_dev_ctrl_work_schedule(adapter); + + return 0; +} + +s32 sxe2_reset_sync(struct sxe2_adapter *adapter, enum sxe2_reset_type reset_type) +{ + if (reset_type == SXE2_RESET_INVAL) + return -EINVAL; + + sxe2_trigger_reset(adapter, reset_type); + + return sxe2_wait_reset_done(adapter, reset_type); +} + +STATIC void sxe2_core_reset_and_check(struct sxe2_adapter *adapter) +{ +#ifndef SXE2_TEST + u32 val; + u16 wait_times = 0; + struct sxe2_hw *hw = &adapter->hw; + + if (hw->is_pop_type) { + LOG_INFO_BDF("device is pop mode, do not need reset proc.\n"); + return; + } + + sxe2_trigger_reset(adapter, SXE2_RESET_CORER); + + do { + val = sxe2_read_reg(hw, SXE2_PFGEN_CTRL); + wait_times++; + if (val == SXE2_REG_INVALID_VALUE) + break; + + msleep(SXE2_RESET_WAIT_INTERVAL); + if (wait_times > SXE2_RESET_WAIT_TIMES) { + LOG_DEV_WARN("core reset trigger when err occur.\n"); + break; + } + } while (1); +#endif +} + +s32 sxe2_fwc_clear_pf_cfg(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_PF_CFG_CLEAR, NULL, 0, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("clear pf cfg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +bool sxe2_corer_check(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + unsigned long flags; + + if (sxe2_hw_corer_irq_cause_get(hw)) { + if (in_interrupt()) + adapter->dev_ctrl_ctxt.print_flag |= SXE2_PRINT_CORE_RESET; + else + LOG_DEV_INFO("recv core reset irq cause.\n"); + adapter->dev_ctrl_ctxt.corer_cnt++; + spin_lock_irqsave(&adapter->dev_ctrl_ctxt.cmd_list_lock, flags); + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_RESETTING, + SXE2_RESET_CORER); + sxe2_wait_task_cancel_all(adapter); + spin_unlock_irqrestore(&adapter->dev_ctrl_ctxt.cmd_list_lock, flags); + return true; + } + + return false; +} + +s32 sxe2_wait_vfr_done(struct sxe2_adapter *adapter, u16 vf_id) +{ + s32 ret; + unsigned long end; + u32 reset_status; + unsigned int delay_ms = SXE2_WAIT_RESET_DONE_TIMEOUT; + struct sxe2_hw *hw = &adapter->hw; + + end = jiffies + msecs_to_jiffies(delay_ms); + do { + cond_resched(); + + reset_status = sxe2_hw_vfr_done(hw, vf_id); + if (reset_status == SXE2_REG_UNACCESS) { + ret = -EBUSY; + LOG_DEBUG_BDF("core/pfr reset detected.\n"); + goto out; + } else if (reset_status) { + ret = 0; + LOG_DEBUG_BDF("vf:%u reset success.\n", vf_id); + goto out; + } + } while (!time_after(jiffies, end)); + + ret = -ETIMEDOUT; + + LOG_DEV_ERR("vf:%u wait vfr done timeout\n", vf_id); + +out: + return ret; +} + +s32 sxe2_reset_vf(struct sxe2_adapter *adapter, u16 vf_id, u32 flag) +{ + s32 ret = 0; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_vf_node *vf_node; + + vf_node = sxe2_vf_node_get(adapter, vf_id); + if (!vf_node) { + LOG_ERROR_BDF("invalid vf_id:%u reset failed.\n", vf_id); + ret = -EINVAL; + return ret; + } + + if (flag & SXE2_VF_RESET_FLAG_NOTIFY) + (void)sxe2_vf_reset_notify(adapter, vf_node); + + sxe2_vf_stop(vf_node); + + sxe2_hw_trigger_vfr(hw, vf_id); + + ret = sxe2_wait_vfr_done(adapter, vf_id); + if (ret) + goto l_end; + + (void)sxe2_rdma_aux_send_vf_reset_event(adapter, vf_node->vf_idx); + + ret = sxe2_vf_clean_and_rebuild(vf_node, true); + if (ret) { + LOG_ERROR_BDF("vf:%u vsi recreate failed during reset.\n", + vf_node->vf_idx); + goto l_end; + } + + clear_bit(SXE2_VF_STATE_DIS, vf_node->states); + + sxe2_hw_vf_active(hw, vf_node->vf_idx); + + LOG_DEV_INFO("vf:%u reset done.\n", vf_id); + +l_end: + return ret; +} + +s32 sxe2_reset_all_vfs(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u16 idx; + struct sxe2_vf_node *vf_node; + + for (idx = 0; idx < pci_num_vf(adapter->pdev); idx++) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + if (!vf_node) { + LOG_ERROR_BDF("idx:%u invalid.\n", idx); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + continue; + } + ret = sxe2_reset_vf(adapter, idx, 0); + if (ret) + LOG_ERROR_BDF("vf:%u reset failed %d.\n", idx, ret); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + + LOG_DEV_INFO("reset all vfs:%u done.\n", idx); + + return ret; +} + +STATIC bool sxe2_is_in_resetting(struct sxe2_adapter *adapter) +{ + bool ret; + struct sxe2_hw *hw = &adapter->hw; + + ret = sxe2_corer_check(adapter); + if (ret) + goto l_end; + + if (!sxe2_hw_pfr_done(hw)) { + ret = true; + goto l_end; + } + + if (!sxe2_hw_corer_done(hw)) { + ret = true; + goto l_end; + } + + if (adapter->dev_ctrl_ctxt.dev_state == SXE2_DEVSTATE_RESETTING) { + ret = true; + goto l_end; + } + ret = false; + +l_end: + return ret; +} + +void sxe2_trigger_and_wait_resetting(struct sxe2_adapter *adapter) +{ + if (sxe2_is_in_resetting(adapter)) + goto l_end; + + sxe2_core_reset_and_check(adapter); + +l_end: + LOG_INFO_BDF("device is in resetting.\n"); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..0a6d2b02f9be8a125f93f48b6f248f202508df52 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dev_ctrl.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dev_ctrl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DEV_CTRL_H__ +#define __SXE2_DEV_CTRL_H__ + +#include +#include +#include + +#define SXE2_PF_STOPPED 0 + +struct sxe2_adapter; + +enum sxe2_dev_state { + SXE2_DEVSTATE_INITIAL = 0, + SXE2_DEVSTATE_ACCESSIBLE, + SXE2_DEVSTATE_RUNNING, + SXE2_DEVSTATE_ABNORMAL, + SXE2_DEVSTATE_RESETTING, + SXE2_DEVSTATE_FAULT, +}; + +enum sxe2_reset_type { + SXE2_RESET_INVAL = 0, + SXE2_RESET_CORER, + SXE2_RESET_PFR, + SXE2_RESET_VFR, + SXE2_RESET_MAX, +}; + +enum sxe2_dev_ctrl_task_state { + SXE2_DEV_CTRL_WORK_SCHED, + SXE2_DEV_CTRL_WORK_DISABLED, +}; + +enum sxe2_pf_stop_flags { + SXE2_PF_STOP_NORMAL = 0, + SXE2_PF_STOP_RESET_NOTICE_RDMA = BIT(0), + SXE2_PF_STOP_CANCEL_CMD_QUEUE = BIT(1), +}; + +enum sxe2_vf_reset_flags { + SXE2_VF_RESET_FLAG_NOTIFY = BIT(0), +}; + +enum sxe2_err_print_flags { + SXE2_PRINT_CORE_RESET = BIT(0), + SXE2_PRINT_ECC_ERROR = BIT(1), + SXE2_PRINT_REG_CFG_ERR = BIT(2), + SXE2_PRINT_RAM_CONFLICT = BIT(3), +}; + +struct sxe2_dev_ctrl_context { + enum sxe2_dev_state dev_state; + enum sxe2_reset_type reset_type; + unsigned long flag; + u16 corer_cnt; + u16 pfr_cnt; + u8 rebuild_failed; + u32 last_heartbeat_value; + unsigned long last_heartbeat_time; + struct work_struct work; + unsigned long work_state; + struct timer_list timer; + unsigned long period; + /* in order to protect the data */ + struct mutex pf_lock; + /* in order to protect the data */ + spinlock_t state_lock; + /* in order to protect the data */ + spinlock_t wq_lock; + /* in order to protect the data */ + spinlock_t cmd_list_lock; + u32 print_flag; +#ifdef SXE2_CFG_DEBUG + u8 heart_beat_ena; + u8 pad[3]; +#endif +}; + +void sxe2_dev_state_get(struct sxe2_adapter *adapter, + enum sxe2_dev_state *state, + enum sxe2_reset_type *reset_type); + +void sxe2_dev_state_set(struct sxe2_adapter *adapter, + enum sxe2_dev_state dev_state, + enum sxe2_reset_type reset_type); + +s32 sxe2_stop_drop(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_work_schedule(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_init(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_deinit(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_init_once(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_deinit_once(struct sxe2_adapter *adapter); + +s32 sxe2_dev_ctrl_work_create(void); + +void sxe2_dev_ctrl_work_destroy(void); + +s32 sxe2_wait_reset_done(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type); + +void sxe2_pf_stop(struct sxe2_adapter *adapter, u16 stop_flag); + +s32 sxe2_lfc_rebuild_set(struct sxe2_adapter *adapter); + +s32 sxe2_pf_rebuild(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_clear_pf_cfg(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_clear_vf_cfg(struct sxe2_adapter *adapter, u16 vf_id); + +s32 sxe2_reset_async(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type); + +s32 sxe2_reset_sync(struct sxe2_adapter *adapter, + enum sxe2_reset_type reset_type); + +void sxe2_dev_ctrl_work_stop(struct sxe2_adapter *adapter); + +void sxe2_dev_ctrl_work_start(struct sxe2_adapter *adapter); + +bool sxe2_corer_check(struct sxe2_adapter *adapter); + +s32 sxe2_reset_vf(struct sxe2_adapter *adapter, u16 vf_id, u32 flag); + +s32 sxe2_reset_all_vfs(struct sxe2_adapter *adapter); + +void sxe2_vfs_stop(struct sxe2_adapter *adapter); + +void sxe2_vf_stop(struct sxe2_vf_node *vf_node); + +s32 sxe2_wait_vfr_done(struct sxe2_adapter *adapter, u16 vf_id); + +void sxe2_trigger_and_wait_resetting(struct sxe2_adapter *adapter); + +s32 sxe2_wait_fw_init(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.c new file mode 100644 index 0000000000000000000000000000000000000000..51a5d2292ff20814ecc5b34dbdfc688301443fe5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_devlink.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_devlink.h" + +#ifdef ESWITCH_MODE_SET_NEED_TWO_PRAMS +int sxe2_eswitch_mode_set(struct devlink *devlink, u16 mode) +#else +int sxe2_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +#endif +{ +#ifdef HAVE_METADATA_PORT_INFO + s32 ret = 0; + struct sxe2_adapter *adapter = devlink_priv(devlink); + + if (!test_bit(SXE2_FLAG_SWITCHDEV_CAPABLE, adapter->flags) && + mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { + ret = -EOPNOTSUPP; + return ret; + } + + if (sxe2_eswitch_mode_write_try_lock(adapter)) { + LOG_DEV_INFO("PF %d get eswitchlock fail\n", adapter->pf_idx); + ret = -EBUSY; + goto l_end; + } + + if (adapter->eswitch_ctxt.mode == mode) + goto l_end; + + if (sxe2_vf_is_exist(adapter)) { + LOG_DEV_INFO("changing eswitch mode is allowed only if there is no\t" + "VFs created.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + LOG_DEV_INFO("PF %d changed eswitch mode to legacy.\n", + adapter->pf_idx); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (sxe2_macvlan_is_enabled(adapter)) { + LOG_DEV_ERR("PF %d switchdev cannot be configured L2\t" + "Forwarding Offload is currently enabled.\n", + adapter->pf_idx); + ret = -EOPNOTSUPP; + goto l_end; + } + LOG_DEV_INFO("PF %d changed eswitch mode to switchdev.\n", + adapter->pf_idx); + break; + default: +#ifndef ESWITCH_MODE_SET_NEED_TWO_PRAMS + NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode"); +#endif + ret = -EINVAL; + goto l_end; + } + + adapter->eswitch_ctxt.mode = mode; + +l_end: + sxe2_eswitch_mode_write_unlock(adapter); + return ret; +#else + return -EOPNOTSUPP; +#endif +} + +int sxe2_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct sxe2_adapter *adapter = devlink_priv(devlink); + + *mode = adapter->eswitch_ctxt.mode; + return 0; +} + +static const struct devlink_ops sxe2_devlink_ops = { +#ifdef SUPPORTED_FLASH_UPDATE_PARAMS + .supported_flash_update_params = + DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, +#endif + .eswitch_mode_get = sxe2_eswitch_mode_get, + .eswitch_mode_set = sxe2_eswitch_mode_set, +}; + +void sxe2_devlink_register(struct sxe2_adapter *adapter) +{ + struct devlink *devlink = priv_to_devlink(adapter); +#ifdef DEVLINK_REGISTER_NEED_2_PARAMS + (void)devlink_register(devlink, &adapter->pdev->dev); +#else + (void)devlink_register(devlink); +#endif +} + +void sxe2_devlink_unregister(struct sxe2_adapter *adapter) +{ + devlink_unregister(priv_to_devlink(adapter)); +} + +void sxe2_adapter_free(void *devlink_ptr) +{ + devlink_free((struct devlink *)devlink_ptr); +} + +struct sxe2_adapter *sxe2_adapter_create(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter; + struct device *dev = &pdev->dev; + const char *device_name = dev_name(dev); + struct devlink *devlink; + u32 device_len; + size_t copy_result; + + devlink = devlink_alloc(&sxe2_devlink_ops, sizeof(struct sxe2_adapter), dev); + if (!devlink) + return NULL; + + if (devm_add_action_or_reset(dev, sxe2_adapter_free, devlink)) + return NULL; + + adapter = devlink_priv(devlink); + adapter->pdev = pdev; + + device_len = (u32)(strlen(device_name) + 1); + copy_result = SXE2_STRCPY(adapter->dev_name, device_name, + min_t(u32, device_len, DEV_NAME_LEN)); + if (copy_result >= DEV_NAME_LEN) { + LOG_INFO_BDF("adapter:%pK, pdev:%pK, device_len:%u\n", adapter, pdev, + device_len); + } + sxe2_eswitch_mode_rwlock_init(adapter); + LOG_INFO_BDF("adapter:%pK, pdev:%pK\n", adapter, pdev); + + return adapter; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.h new file mode 100644 index 0000000000000000000000000000000000000000..27cfcc47ef252afd0672d2daf2a4bf3e32464b91 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_devlink.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_devlink.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DEVLINK_H__ +#define __SXE2_DEVLINK_H__ + +struct sxe2_adapter *sxe2_adapter_create(struct pci_dev *pdev); +void sxe2_adapter_free(void *devlink_ptr); +void sxe2_devlink_register(struct sxe2_adapter *adapter); +void sxe2_devlink_unregister(struct sxe2_adapter *adapter); + +#ifdef ESWITCH_MODE_SET_NEED_TWO_PRAMS +int sxe2_eswitch_mode_set(struct devlink *devlink, u16 mode); +#else +int sxe2_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); +#endif + +int sxe2_eswitch_mode_get(struct devlink *devlink, u16 *mode); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.c new file mode 100644 index 0000000000000000000000000000000000000000..3ffab96ed4b0ca09bb5bfac7dd423ca2a6b881a4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.c @@ -0,0 +1,675 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dfx.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_common.h" +#include "sxe2_cmd.h" +#include "sxe2_log.h" +#include "sxe2_dfx.h" + +static void rxft_action_type_analysis(struct sxe2_adapter *adapter, u32 acType) +{ + switch (acType) { + case 0: + LOG_DEV_INFO( + "action_type 0 : To Tx MAC(contain LAN TX / RDMA TX / MNG TX)\r\n"); + break; + case 1: + LOG_DEV_INFO( + "action_type 1 : Tx or Rx pkt with drop flag set by switch or acl\r\n"); + break; + case 2: + LOG_DEV_INFO( + "action_type 2 : Tx first action is to MNG\r\n"); + break; + case 3: + LOG_DEV_INFO( + "action_type 3 : Tx first action is to RDMA Rx\r\n"); + break; + case 4: + LOG_DEV_INFO( + "action_type 4 : Tx first action is to LAN Rx\r\n"); + break; + case 5: + LOG_DEV_INFO( + "action_type 5 : Tx first action is to FD program\r\n"); + break; + case 6: + LOG_DEV_INFO( + "action_type 6 : Tx first action is to mirror(contain ingress/egress/event)\r\n"); + break; + case 7: + LOG_DEV_INFO( + "action_type 7 : Tx first action is RDMA TX multicast loopback(RDMA TX multicast table hit)\r\n"); + break; + case 8: + LOG_DEV_INFO("action_type 8 : To vsi\r\n"); + break; + case 9: + LOG_DEV_INFO( + "action_type 9 : To vsi queue or queue region\r\n"); + break; + case 10: + LOG_DEV_INFO("action_type 10 : Execute FD program\r\n"); + break; + case 11: + LOG_DEV_INFO("action_type 11 : LAN RX multicast list\r\n"); + break; + case 12: + LOG_DEV_INFO("action_type 12 : LAN RX mirror list\r\n"); + break; + case 13: + LOG_DEV_INFO("action_type 13 : RDMA multicast list\r\n"); + break; + case 14: + LOG_DEV_INFO("action_type 14 : To RDMA Rx dst qpn\r\n"); + break; + case 15: + LOG_DEV_INFO( + "action_type 15 : Original received network packet lookup table\r\n"); + break; + case 16: + LOG_DEV_INFO("action_type 16 : PFR/VFR/VMR\r\n"); + break; + default: + LOG_DEV_INFO("action_type %u error!!!\r\n", acType); + break; + } +} + +static void ppe_info_protocol_id_convert( + struct sxe2_rxft_dbg_ppe_info_action_type_1 *ppe_info, + union sxe2_rxft_ppe_protocol_info *prot_info) +{ + prot_info[0].reg.protocol_id = ppe_info->protocol_id0; + prot_info[0].reg.protocol_offset = + (u32)(ppe_info->protocol_offset0_6_0 | + (ppe_info->protocol_offset0_7 << (u32)7)); + prot_info[1].reg.protocol_id = ppe_info->protocol_id1; + prot_info[1].reg.protocol_offset = ppe_info->protocol_offset1; + prot_info[2].reg.protocol_id = ppe_info->protocol_id2; + prot_info[2].reg.protocol_offset = + (u32)(ppe_info->protocol_offset2_6_0 | + (ppe_info->protocol_offset2_7 << (u32)7)); + prot_info[3].reg.protocol_id = ppe_info->protocol_id3; + prot_info[3].reg.protocol_offset = ppe_info->protocol_offset3; + prot_info[4].reg.protocol_id = ppe_info->protocol_id4; + prot_info[4].reg.protocol_offset = + (u32)(ppe_info->protocol_offset4_6_0 | + (ppe_info->protocol_offset4_7 << (u32)7)); + prot_info[5].reg.protocol_id = ppe_info->protocol_id5; + prot_info[5].reg.protocol_offset = ppe_info->protocol_offset5; + prot_info[6].reg.protocol_id = ppe_info->protocol_id6; + prot_info[6].reg.protocol_offset = + (u32)(ppe_info->protocol_offset6_6_0 | + (ppe_info->protocol_offset6_7 << (u32)7)); + prot_info[7].reg.protocol_id = ppe_info->protocol_id7; + prot_info[7].reg.protocol_offset = ppe_info->protocol_offset7; + prot_info[8].reg.protocol_id = ppe_info->protocol_id8; + prot_info[8].reg.protocol_offset = + (u32)(ppe_info->protocol_offset8_6_0 | + (ppe_info->protocol_offset8_7 << (u32)7)); + prot_info[9].reg.protocol_id = ppe_info->protocol_id9; + prot_info[9].reg.protocol_offset = ppe_info->protocol_offset9; + prot_info[10].reg.protocol_id = ppe_info->protocol_id10; + prot_info[10].reg.protocol_offset = + (u32)(ppe_info->protocol_offset10_6_0 | + (ppe_info->protocol_offset10_7 << (u32)7)); + prot_info[11].reg.protocol_id = ppe_info->protocol_id11; + prot_info[11].reg.protocol_offset = ppe_info->protocol_offset11; + prot_info[12].reg.protocol_id = ppe_info->protocol_id12; + prot_info[12].reg.protocol_offset = + (u32)(ppe_info->protocol_offset12_6_0 | + (ppe_info->protocol_offset12_7 << (u32)7)); + prot_info[13].reg.protocol_id = ppe_info->protocol_id13; + prot_info[13].reg.protocol_offset = ppe_info->protocol_offset13; + prot_info[14].reg.protocol_id = ppe_info->protocol_id14; + prot_info[14].reg.protocol_offset = + (u32)(ppe_info->protocol_offset14_6_0 | + (ppe_info->protocol_offset14_7 << (u32)7)); + prot_info[15].reg.protocol_id = ppe_info->protocol_id15; + prot_info[15].reg.protocol_offset = ppe_info->protocol_offset15; +} + +static void rxft_ppe_info_common_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + int i; + struct sxe2_rxft_dbg_ppe_info_action_type_1 ppe_info; + union sxe2_rxft_ppe_protocol_info prot_info[16]; + + (void)memset(&prot_info, 0, + sizeof(union sxe2_rxft_ppe_protocol_info) * 16); + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + + LOG_DEV_INFO("action_prio = 0x%x\r\n", ppe_info.action_pro); + LOG_DEV_INFO("pkt_tc = 0x%x\r\n", ppe_info.pkt_tc); + LOG_DEV_INFO("port = 0x%x\r\n", ppe_info.port); + LOG_DEV_INFO("pkt_len = 0x%x\r\n", ppe_info.pkt_len); + LOG_DEV_INFO("src_pf = 0x%x\r\n", ppe_info.src_pf); + LOG_DEV_INFO("src_vf = 0x%x\r\n", + ppe_info.src_vf_1_0 | (ppe_info.srx_vf_7_2 << 2)); + LOG_DEV_INFO("src_txvsi = 0x%x\r\n", ppe_info.src_txvsi); + LOG_DEV_INFO("src_pf_vf_vm_flg = 0x%x\r\n", ppe_info.src_pf_vf_vm_flg); + LOG_DEV_INFO("pkt_type = 0x%x\r\n", ppe_info.pkt_type); + LOG_DEV_INFO("dst_pf = 0x%x\r\n", ppe_info.dst_pf); + LOG_DEV_INFO("dst_vf = 0x%x\r\n", + ppe_info.dst_vf_5_0 | (ppe_info.dst_vf_7_6 << 6)); + LOG_DEV_INFO("dst_vsi = 0x%x\r\n", ppe_info.dst_vsi); + LOG_DEV_INFO("dst_pf_vf_vm_flg = 0x%x\r\n", ppe_info.dst_pf_vf_vm_flg); + LOG_DEV_INFO("up = 0x%x\r\n", ppe_info.up); + LOG_DEV_INFO("tcp_syn = 0x%x\r\n", ppe_info.tcp_syn); + LOG_DEV_INFO("tcp_ack = 0x%x\r\n", ppe_info.tcp_ack); + LOG_DEV_INFO("tcp_rst = 0x%x\r\n", ppe_info.tcp_rst); + LOG_DEV_INFO("tcp_fin = 0x%x\r\n", ppe_info.tcp_fin); + LOG_DEV_INFO("l2_mac_err = 0x%x\r\n", ppe_info.l2_mac_err); + LOG_DEV_INFO("bypass_sw = 0x%x\r\n", ppe_info.bypass_switch); + LOG_DEV_INFO("bypass_acl = 0x%x\r\n", ppe_info.bypass_acl); + LOG_DEV_INFO("bypass_rxft = 0x%x\r\n", ppe_info.bypass_rxft); + LOG_DEV_INFO("drop = 0x%x\r\n", ppe_info.drop); + LOG_DEV_INFO("parser_abort = 0x%x\r\n", ppe_info.parser_abort); + LOG_DEV_INFO("malicious_abort = 0x%x\r\n", ppe_info.malicious_abort); + + if (ppe_info.packet_source_type == 0) { + LOG_DEV_INFO("pkt_src_type = 0x0, receive - from lan\r\n"); + } else if (ppe_info.packet_source_type == 1) { + LOG_DEV_INFO( + "pkt_src_type = 0x1, loopback - from host\r\n"); + } else if (ppe_info.packet_source_type == 2) { + LOG_DEV_INFO( + "pkt_src_type = 0x2, loopback - from mng\r\n"); + } else if (ppe_info.packet_source_type == 3) { + LOG_DEV_INFO("pkt_src_type = 0x3, transmit\r\n"); + if (ppe_info.pkt_source == 0) { + LOG_DEV_INFO("pkt_src = 0x0, lan tx\r\n"); + } else if (ppe_info.pkt_source == 1) { + LOG_DEV_INFO("pkt_src = 0x1, rdma tx\r\n"); + } else if (ppe_info.pkt_source == 2) { + LOG_DEV_INFO("pkt_src = 0x2, bmc-mng tx\r\n"); + } else if (ppe_info.pkt_source == 3) { + LOG_DEV_INFO( + "pkt_src = 0x3, miniSOC-mng tx\r\n"); + } + } + + LOG_DEV_INFO("fd_program = 0x%x\r\n", ppe_info.fd_program); + LOG_DEV_INFO("fd_program_dummy = 0x%x\r\n", ppe_info.fd_program_dummy); + LOG_DEV_INFO("lan_tx_sw = 0x%x\r\n", ppe_info.lan_tx_sw); + LOG_DEV_INFO("rdma_tx_swap = 0x%x\r\n", ppe_info.rdma_tx_swap); + + if (ppe_info.pkt_dest == 0) + LOG_DEV_INFO("pkt_dest = 0x0, lan tx/rx\r\n"); + else if (ppe_info.pkt_dest == 1) + LOG_DEV_INFO("pkt_dest = 0x1, rdma\r\n"); + else if (ppe_info.pkt_dest == 2) + LOG_DEV_INFO("pkt_dest = 0x2, mng\r\n"); + else if (ppe_info.pkt_dest == 3) + LOG_DEV_INFO("pkt_dest = 0x3, bfde\r\n"); + + LOG_DEV_INFO("icrc_err = 0x%x\r\n", ppe_info.icrc_err); + LOG_DEV_INFO("ipeh = 0x%x\r\n", ppe_info.ipeh); + LOG_DEV_INFO("esp = 0x%x\r\n", ppe_info.esp); + LOG_DEV_INFO("mpls = 0x%x\r\n", ppe_info.mpls); + LOG_DEV_INFO("fragment = 0x%x\r\n", ppe_info.fragment); + LOG_DEV_INFO("checksum_offload = 0x%x\r\n", ppe_info.checksum_offload); + LOG_DEV_INFO("ipe = 0x%x\r\n", ppe_info.ipe); + LOG_DEV_INFO("l4e = 0x%x\r\n", ppe_info.l4e); + LOG_DEV_INFO("eipe = 0x%x\r\n", ppe_info.eipe); + LOG_DEV_INFO("eudpe = 0x%x\r\n", ppe_info.eudpe); + LOG_DEV_INFO("mac_in_mac = 0x%x\r\n", ppe_info.mac_in_mac); + LOG_DEV_INFO("pkt_err = 0x%x\r\n", ppe_info.pkt_err); + LOG_DEV_INFO("mirr_id = 0x%x\r\n", ppe_info.mirr_id); + + if (ppe_info.mirr_type == 0) + LOG_DEV_INFO("mirr_type = 0x0, ingress mirror\r\n"); + else if (ppe_info.mirr_type == 1) + LOG_DEV_INFO("mirr_type = 0x1, egress mirror\r\n"); + else if (ppe_info.mirr_type == 2) + LOG_DEV_INFO("mirr_type = 0x2, event mirror\r\n"); + else if (ppe_info.mirr_type == 3) + LOG_DEV_INFO("mirr_type = 0x3, rsv\r\n"); + + LOG_DEV_INFO("multicast_copy = 0x%x\r\n", ppe_info.multicast_copy); + + if (ppe_info.umbcast == 0) + LOG_DEV_INFO("umbcast = 0x0, unicast\r\n"); + else if (ppe_info.umbcast == 1) + LOG_DEV_INFO("umbcast = 0x1, multicast\r\n"); + else if (ppe_info.umbcast == 2) + LOG_DEV_INFO("umbcast = 0x2, broadcast\r\n"); + else if (ppe_info.umbcast == 3) + LOG_DEV_INFO("umbcast = 0x3, mirror\r\n"); + + switch (ppe_info.tunnel_type) { + case 0: + LOG_DEV_INFO("tunnel_type = 0, non tunnel pkt\r\n"); + break; + case 1: + LOG_DEV_INFO("tunnel_type = 1, Rsv\r\n"); + break; + case 2: + LOG_DEV_INFO("tunnel_type = 2, IPv4-in-IPv4\r\n"); + break; + case 3: + LOG_DEV_INFO("tunnel_type = 3, IPv4-in-IPv6\r\n"); + break; + case 4: + LOG_DEV_INFO("tunnel_type = 4, IPv6-in-IPv4\r\n"); + break; + case 5: + LOG_DEV_INFO("tunnel_type = 5, IPv6-in-IPv6\r\n"); + break; + case 6: + LOG_DEV_INFO("tunnel_type = 6, NSH\r\n"); + break; + case 7: + LOG_DEV_INFO("tunnel_type = 7, VXLAN(MAC-in-UDP)\r\n"); + break; + case 8: + LOG_DEV_INFO("tunnel_type = 8, VXLAN(GPE)\r\n"); + break; + case 9: + LOG_DEV_INFO("tunnel_type = 9, GRE\r\n"); + break; + case 10: + LOG_DEV_INFO("tunnel_type = 10, Geneve\r\n"); + break; + case 11: + LOG_DEV_INFO("tunnel_type = 11, MPLSoGRE\r\n"); + break; + case 12: + LOG_DEV_INFO("tunnel_type = 12, MPLSoUDP\r\n"); + break; + case 13: + LOG_DEV_INFO("tunnel_type = 13, IPSEC NAT-T\r\n"); + break; + case 14: + LOG_DEV_INFO("tunnel_type = 14, GTP\r\n"); + break; + case 15: + LOG_DEV_INFO("tunnel_type = 15, ETH-in-IPv6(SRv6)\r\n"); + break; + case 16: + LOG_DEV_INFO("tunnel_type = 16, Teredo(IPv6-in-UDP)\r\n"); + break; + case 17: + LOG_DEV_INFO("tunnel_type = 17, L2TP-in-UDP\r\n"); + break; + case 18: + LOG_DEV_INFO("tunnel_type = 18, SDN\r\n"); + break; + default: + LOG_DEV_INFO("tunnel_type = %d, not defined\r\n", + ppe_info.tunnel_type); + break; + } + + LOG_DEV_INFO("oam = 0x%x\r\n", ppe_info.oam); + LOG_DEV_INFO("flow_id_vld = 0x%x\r\n", ppe_info.flow_id_vld); + LOG_DEV_INFO("acl_hit = 0x%x\r\n", ppe_info.acl_hit); + LOG_DEV_INFO("macsec_err = 0x%x\r\n", ppe_info.macsec_err); + LOG_DEV_INFO("first_action = 0x%x\r\n", ppe_info.first_action); + LOG_DEV_INFO("last_action = 0x%x\r\n", ppe_info.last_action); + LOG_DEV_INFO("last_mc_pkt = 0x%x\r\n", ppe_info.last_mc_pkt); + + ppe_info_protocol_id_convert(&ppe_info, prot_info); + + LOG_DEV_INFO("protocol_id_num = 0x%x\r\n", ppe_info.protocol_id_num); + for (i = 0; i < ppe_info.protocol_id_num; i++) { + LOG_DEV_INFO("protocol_id[%d] = %3d, offset : %d\r\n", i, + prot_info[i].reg.protocol_id, + prot_info[i].reg.protocol_offset); + } +} + +static void rxft_ppe_info_common_tail_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + struct sxe2_rxft_dbg_ppe_info_action_type_1 ppe_info; + + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + + LOG_DEV_INFO("sdf_hash = 0x%x\r\n", ppe_info.sdf_hash); + LOG_DEV_INFO("sdf_pri = 0x%x\r\n", ppe_info.sdf_pri); + LOG_DEV_INFO("fd_prog_drop = 0x%x\r\n", ppe_info.fd_prog_drop); + LOG_DEV_INFO("trace_level = 0x%x\r\n", ppe_info.trace_level); + LOG_DEV_INFO("pkt_src_bfd = 0x%x\r\n", ppe_info.pkt_src_bfd); + LOG_DEV_INFO("to_host = 0x%x\r\n", ppe_info.to_host); + LOG_DEV_INFO("to_mng = 0x%x\r\n", ppe_info.to_mng); + LOG_DEV_INFO("to_lan = 0x%x\r\n", ppe_info.to_lan); + LOG_DEV_INFO("to_roce = 0x%x\r\n", ppe_info.to_roce); +} + +static void rxft_ppe_info_acType9_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + u32 tmp[10] = { 0 }; + struct sxe2_rxft_dbg_ppe_info_action_type_9 ppe_info; + + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_9)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_9)); + + rxft_ppe_info_common_analysis(adapter, pData); + + LOG_DEV_INFO("sw_profile_id = 0x%x\r\n", ppe_info.sw_profile_id); + tmp[2] = (u32)(ppe_info.acl_profile_id_1_0 | + (ppe_info.acl_profile_id_6_2 << (u32)2)); + LOG_DEV_INFO("acl_profile_id = 0x%x\r\n", tmp[2]); + LOG_DEV_INFO("fd_profile_id = 0x%x\r\n", ppe_info.fd_profile_id); + LOG_DEV_INFO("flow_id = 0x%x\r\n", ppe_info.flow_id); + LOG_DEV_INFO("flow_id_pri = 0x%x\r\n", ppe_info.flow_id_pri); + tmp[3] = (u32)(ppe_info.queue_buf_num_0 | + (ppe_info.queue_buf_num_10_1 << (u32)1)); + LOG_DEV_INFO("queue_buf_num = 0x%x\r\n", tmp[3]); + LOG_DEV_INFO("toqueue = 0x%x\r\n", ppe_info.toqueue); + + if (ppe_info.queue_hit_flag & 0x1) { + LOG_DEV_INFO( + "queue_hit_flag[0] vld , fd default action hit\r\n"); + } else if (ppe_info.queue_hit_flag & 0x2) { + LOG_DEV_INFO("queue_hit_flag[1] vld , fd kt/fkot hit\r\n"); + } else if (ppe_info.queue_hit_flag & 0x4) { + LOG_DEV_INFO("queue_hit_flag[2] vld , acl hit\r\n"); + } else if (ppe_info.queue_hit_flag & 0x8) { + LOG_DEV_INFO("queue_hit_flag[3] vld , sw hit\r\n"); + } + + if (ppe_info.queue_sel_result == 0) { + LOG_DEV_INFO( + "queue_sel_result = 0x0, queue/queue region non select\r\n"); + } else if (ppe_info.queue_sel_result == 1) { + LOG_DEV_INFO( + "queue_sel_result = 0x1, queue/queue region from fd\r\n"); + } else if (ppe_info.queue_sel_result == 2) { + LOG_DEV_INFO( + "queue_sel_result = 0x2, queue/queue region from acl\r\n"); + } else if (ppe_info.queue_sel_result == 3) { + LOG_DEV_INFO( + "queue_sel_result = 0x3, queue/queue region from sw\r\n"); + } +} + +static void rxft_ppe_info_acType1_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + u32 tmp[10] = { 0 }; + struct sxe2_rxft_dbg_ppe_info_action_type_1 ppe_info; + + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_1)); + + rxft_ppe_info_common_analysis(adapter, pData); + + LOG_DEV_INFO("sw_profile_id = 0x%x\r\n", ppe_info.sw_profile_id); + tmp[2] = (u32)(ppe_info.acl_profile_id_1_0 | + (ppe_info.acl_profile_id_6_2 << (u32)2)); + LOG_DEV_INFO("acl_profile_id = 0x%x\r\n", tmp[2]); + LOG_DEV_INFO("fd_profile_id = 0x%x\r\n", ppe_info.fd_profile_id); + LOG_DEV_INFO("flow_id = 0x%x\r\n", ppe_info.flow_id); + LOG_DEV_INFO("flow_id_pri = 0x%x\r\n", ppe_info.flow_id_pri); +} + +static void rxft_ppe_info_acType10_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + u32 tmp[10] = { 0 }; + struct sxe2_rxft_dbg_ppe_info_action_type_10 ppe_info; + + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_10)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_10)); + + rxft_ppe_info_common_analysis(adapter, pData); + + LOG_DEV_INFO("sw_profile_id = 0x%x\r\n", ppe_info.sw_profile_id); + tmp[2] = (u32)(ppe_info.acl_profile_id_1_0 | + (ppe_info.acl_profile_id_6_2 << (u32)2)); + LOG_DEV_INFO("acl_profile_id = 0x%x\r\n", tmp[2]); + LOG_DEV_INFO("fd_profile_id = 0x%x\r\n", ppe_info.fd_profile_id); + LOG_DEV_INFO("flow_id = 0x%x\r\n", ppe_info.flow_id); + LOG_DEV_INFO("flow_id_pri = 0x%x\r\n", ppe_info.flow_id_pri); + tmp[3] = (u32)(ppe_info.qindex_0 | (ppe_info.qindex_10_1 << (u32)1)); + LOG_DEV_INFO("fd_desc_q_index = 0x%x\r\n", tmp[3]); + LOG_DEV_INFO("fd_desc_comp_queue = 0x%x\r\n", ppe_info.comp_queue); + LOG_DEV_INFO("fd_desc_comp_report = 0x%x\r\n", ppe_info.comp_report); + LOG_DEV_INFO("fd_desc_fd_space = 0x%x\r\n", ppe_info.fd_space); + LOG_DEV_INFO("fd_desc_stat_cnt = 0x%x\r\n", ppe_info.stat_cnt); + LOG_DEV_INFO("fd_desc_stat_ena = 0x%x\r\n", ppe_info.stat_ena); + LOG_DEV_INFO("fd_desc_evict_ena = 0x%x\r\n", ppe_info.evict_ena); + LOG_DEV_INFO("fd_desc_to_queue = 0x%x\r\n", ppe_info.to_queue); + LOG_DEV_INFO("fd_desc_to_queue_prio = 0x%x\r\n", + ppe_info.to_queue_prio); + LOG_DEV_INFO("fd_desc_fd_flow_id = 0x%x\r\n", ppe_info.fd_flow_id); + LOG_DEV_INFO("fd_desc_pcmd = 0x%x\r\n", ppe_info.pcmd); + LOG_DEV_INFO("fd_desc_fd_vsi = 0x%x\r\n", ppe_info.fd_vsi); + LOG_DEV_INFO("fd_desc_swap = 0x%x\r\n", ppe_info.swap); + LOG_DEV_INFO("fd_desc_fdid_prio = 0x%x\r\n", ppe_info.fdid_prio); + LOG_DEV_INFO("fd_desc_fdid_did = 0x%x\r\n", ppe_info.fdid_did); + tmp[4] = (u32)(ppe_info.fdid_0 | (ppe_info.fdid_31_1 << (u32)1)); + LOG_DEV_INFO("fd_desc_fdid = 0x%x\r\n", tmp[4]); +} + +static void rxft_ppe_info_acType14_analysis(struct sxe2_adapter *adapter, + u32 *pData) +{ + struct sxe2_rxft_dbg_ppe_info_action_type_14 ppe_info; + + (void)memset(&ppe_info, 0, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_14)); + (void)memcpy(&ppe_info, pData, + sizeof(struct sxe2_rxft_dbg_ppe_info_action_type_14)); + + rxft_ppe_info_common_analysis(adapter, pData); + + LOG_DEV_INFO("rh ip offset = 0x%x\r\n", ppe_info.rh_ip_offset); + LOG_DEV_INFO("rh vlan offset = 0x%x\r\n", + ppe_info.rh_vlan_offset1_0 | + (ppe_info.rh_vlan_offset7_2 << 2)); + LOG_DEV_INFO("rh vlan vld = 0x%x\r\n", ppe_info.rh_vlan_vld); + LOG_DEV_INFO("rh dqpn = 0x%x\r\n", ppe_info.rh_dqpn); + LOG_DEV_INFO("dst_rdma_mc_num = 0x%x\r\n", + ppe_info.dst_rdma_mc_num_0 | + (ppe_info.dst_rdma_mc_num_12_1 << 1)); + LOG_DEV_INFO("rdma_mc_cnt = 0x%x\r\n", ppe_info.rdma_mc_cnt); +} + +static void rxft_ppe_info_analysis(struct sxe2_adapter *adapter, u32 *data) +{ + u32 action_type; + + action_type = data[0] & 0x1f; + rxft_action_type_analysis(adapter, action_type); + switch (action_type) { + case 0: + case 1: + case 2: + case 4: + case 5: + case 8: + case 15: + rxft_ppe_info_acType1_analysis(adapter, data); + break; + case 9: + rxft_ppe_info_acType9_analysis(adapter, data); + break; + case 10: + rxft_ppe_info_acType10_analysis(adapter, data); + break; + case 3: + case 7: + case 13: + case 14: + rxft_ppe_info_acType14_analysis(adapter, data); + break; + default: + LOG_DEV_INFO( + "action type = %d, analysis not supported yet \r\n", + action_type); + break; + } + + rxft_ppe_info_common_tail_analysis(adapter, data); +} + +STATIC void sxe2_rxft_ppe_info_analysis(struct sxe2_adapter *adapter, + struct sxe2_rxft_ppe_info *ppe_info) +{ + int i; + + for (i = 0; i < SXE2_RXFT_PPE_INFO_TYPE_MAX; i++) { + switch (i) { + case SXE2_RXFT_PPE_INFO_TX_IN: + LOG_DEV_INFO("RXFT TX INGRESS PPE INFO : \r\n"); + break; + case SXE2_RXFT_PPE_INFO_TX_EX: + LOG_DEV_INFO("RXFT TX EGRESS PPE INFO : \r\n"); + break; + case SXE2_RXFT_PPE_INFO_RX_IN: + LOG_DEV_INFO("RXFT RX INGRESS PPE INFO : \r\n"); + break; + case SXE2_RXFT_PPE_INFO_RX_EX: + LOG_DEV_INFO("RXFT RX EGRESS PPE INFO : \r\n"); + break; + case SXE2_RXFT_PPE_INFO_LP_IN: + LOG_DEV_INFO("RXFT LP INGRESS PPE INFO : \r\n"); + break; + case SXE2_RXFT_PPE_INFO_LP_EX: + LOG_DEV_INFO("RXFT LP EGRESS PPE INFO : \r\n"); + break; + default: + LOG_DEV_INFO("ERROR TYPE : %u\r\n", i); + break; + } + rxft_ppe_info_analysis(adapter, ppe_info->info[i].data); + } +} + +void sxe2_fwc_rxft_ppe_info(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_rxft_ppe_info ppe_info; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXFT_PPE_INFO, NULL, 0, + &ppe_info, + sizeof(struct sxe2_rxft_ppe_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("rxft ppe info cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("****rxft ppe info start****"); + + sxe2_rxft_ppe_info_analysis(adapter, &ppe_info); + + LOG_DEV_INFO("****rxft ppe info end****"); +} + +STATIC void sxe2_ppe_dfx_dump(struct sxe2_adapter *adapter, + struct sxe2_fwc_ppe_dfx *ppe_dfx) +{ + u8 i; + struct sxe2_fwc_txpa_dfx *txpa; + struct sxe2_fwc_rxpa_dfx *rxpa; + + LOG_DEV_INFO("TXPA DFX\n"); + for (i = 0; i < 4; i++) { + txpa = &ppe_dfx->txpa[i]; + LOG_DEV_INFO(" txpa[%u].in_all = %u (include drop/err)\n", i, txpa->txpa_in_all); + LOG_DEV_INFO(" txpa[%u].out_all = %u\n", i, txpa->txpa_out_all); + LOG_DEV_INFO(" txpa[%u].in_drop = %u\n", i, txpa->txpa_in_drop); + LOG_DEV_INFO(" txpa[%u].out_drop = %u\n", i, txpa->txpa_out_drop); + LOG_DEV_INFO(" txpa[%u].in_err = %u\n", i, txpa->txpa_in_err); + LOG_DEV_INFO(" txpa[%u].out_err = %u\n", i, txpa->txpa_out_err); + } + + LOG_DEV_INFO("TXFB DFX\n"); + LOG_DEV_INFO(" txfb.in_all = %u (include in_drop)\n", ppe_dfx->txfb.txfb_in_all); + LOG_DEV_INFO(" txfb.in_drop = %u\n", ppe_dfx->txfb.txfb_in_drop); + LOG_DEV_INFO(" txfb.out_all = %u (include out_drop)\n", ppe_dfx->txfb.txfb_out_all); + LOG_DEV_INFO(" txfb.out_drop = %u\n", ppe_dfx->txfb.txfb_out_drop); + LOG_DEV_INFO(" txfb.internal_drop = %u\n", ppe_dfx->txfb.txfb_internal_drop); + + LOG_DEV_INFO("RXPA DFX\n"); + for (i = 0; i < 4; i++) { + rxpa = &ppe_dfx->rxpa[i]; + LOG_DEV_INFO(" rxpa[%u].in_all = %u (include drop/err)\n", i, rxpa->rxpa_in_all); + LOG_DEV_INFO(" rxpa[%u].out_all = %u\n", i, rxpa->rxpa_out_all); + LOG_DEV_INFO(" rxpa[%u].in_drop = %u\n", i, rxpa->rxpa_in_drop); + LOG_DEV_INFO(" rxpa[%u].out_drop = %u\n", i, rxpa->rxpa_out_drop); + LOG_DEV_INFO(" rxpa[%u].in_err = %u\n", i, rxpa->rxpa_in_err); + LOG_DEV_INFO(" rxpa[%u].out_err = %u\n", i, rxpa->rxpa_out_err); + } + + LOG_DEV_INFO("RXFB DFX\n"); + LOG_DEV_INFO(" rxfb.tx_in_all = %u (include tx_in_drop)\n", ppe_dfx->rxfb.rxfb_tx_in_all); + LOG_DEV_INFO(" rxfb.tx_in_drop = %u\n", ppe_dfx->rxfb.rxfb_tx_in_drop); + LOG_DEV_INFO(" rxfb.rx_in_all = %u (include rx_in_drop)\n", ppe_dfx->rxfb.rxfb_rx_in_all); + LOG_DEV_INFO(" rxfb.rx_in_drop = %u\n", ppe_dfx->rxfb.rxfb_rx_in_drop); + LOG_DEV_INFO(" rxfb.out_all = %u (include out_drop)\n", ppe_dfx->rxfb.rxfb_out_all); + LOG_DEV_INFO(" rxfb.out_drop = %u\n", ppe_dfx->rxfb.rxfb_out_drop); + LOG_DEV_INFO(" rxfb.internal_drop = %u\n", ppe_dfx->rxfb.rxfb_internal_drop); + + LOG_DEV_INFO("SWITCH DFX\n"); + LOG_DEV_INFO(" switch.tx_all = %u (include tx_drop)\n", ppe_dfx->sw.tx_all); + LOG_DEV_INFO(" switch.tx_drop = %u\n", ppe_dfx->sw.tx_drop); + LOG_DEV_INFO(" switch.rx_all = %u (include rx_drop)\n", ppe_dfx->sw.rx_all); + LOG_DEV_INFO(" switch.rx_drop = %u\n", ppe_dfx->sw.rx_drop); + + LOG_DEV_INFO("RXFT DFX\n"); + LOG_DEV_INFO(" rxft.tx_in_all = %u (include tx_in_drop)\n", ppe_dfx->rxft.tx_in_all); + LOG_DEV_INFO(" rxft.tx_in_drop = %u\n", ppe_dfx->rxft.tx_in_drop); + LOG_DEV_INFO(" rxft.tx_out_all = %u (include tx_out_drop)\n", ppe_dfx->rxft.tx_out_all); + LOG_DEV_INFO(" rxft.tx_out_drop = %u\n", ppe_dfx->rxft.tx_out_drop); + LOG_DEV_INFO(" rxft.rx_in_all = %u (include rx_in_drop)\n", ppe_dfx->rxft.rx_in_all); + LOG_DEV_INFO(" rxft.rx_in_drop = %u\n", ppe_dfx->rxft.rx_in_drop); + LOG_DEV_INFO(" rxft.rx_out_all = %u (include rx_out_drop)\n", ppe_dfx->rxft.rx_out_all); + LOG_DEV_INFO(" rxft.rx_out_drop = %u\n", ppe_dfx->rxft.rx_out_drop); + LOG_DEV_INFO(" rxft.lp_in_all = %u (include lp_in_drop)\n", ppe_dfx->rxft.lp_in_all); + LOG_DEV_INFO(" rxft.lp_in_drop = %u\n", ppe_dfx->rxft.lp_in_drop); + LOG_DEV_INFO(" rxft.lp_out_all = %u (include lp_out_drop)\n", ppe_dfx->rxft.lp_out_all); + LOG_DEV_INFO(" rxft.lp_out_drop = %u\n", ppe_dfx->rxft.lp_out_drop); +} + +void sxe2_fwc_ppe_dfx_show(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_ppe_dfx ppe_dfx; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_GET_PPE_DFX, NULL, 0, + &ppe_dfx, + sizeof(struct sxe2_fwc_ppe_dfx)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("ppe dfx cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("****ppe dfx start****"); + + sxe2_ppe_dfx_dump(adapter, &ppe_dfx); + + LOG_DEV_INFO("****ppe dfx end****"); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.h new file mode 100644 index 0000000000000000000000000000000000000000..01e6becf628729d7eaec1fd3e778d01b966d0d0b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_dfx.h @@ -0,0 +1,606 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_dfx.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DFX_H__ +#define __SXE2_DFX_H__ + +#include +#include + +#include "sxe2.h" + +union sxe2_rxft_ppe_protocol_info { + u32 val; + struct { + u32 protocol_id : 8; + u32 protocol_offset : 8; + u32 rsv : 16; + } reg; +}; + +struct sxe2_rxft_dbg_ppe_info_action_type_1 { + u32 action_type : 5; + u32 action_pro : 3; + u32 pkt_tc : 3; + u32 port : 2; + u32 pkt_len : 14; + u32 src_pf : 3; + u32 src_vf_1_0 : 2; + + u32 srx_vf_7_2 : 6; + u32 src_txvsi : 10; + u32 src_pf_vf_vm_flg : 2; + u32 pkt_type : 10; + u32 head0_addr_63_60 : 4; + + u32 head0_addr_72_64 : 9; + u32 head1_addr : 13; + u32 head_addr_valid : 1; + u32 dst_pf : 3; + u32 dst_vf_5_0 : 6; + + u32 dst_vf_7_6 : 2; + u32 dst_vsi : 10; + u32 dst_pf_vf_vm_flg : 2; + u32 up : 3; + u32 tcp_syn : 1; + u32 tcp_ack : 1; + u32 tcp_rst : 1; + u32 tcp_fin : 1; + u32 l2_mac_err : 1; + u32 bypass_switch : 1; + u32 bypass_acl : 1; + u32 bypass_rxft : 1; + u32 drop : 1; + u32 parser_abort : 1; + u32 malicious_abort : 1; + u32 packet_source_type : 2; + u32 pkt_source : 2; + + u32 fd_program : 1; + u32 fd_program_dummy : 1; + u32 lan_tx_sw : 2; + u32 rdma_tx_swap : 1; + u32 pkt_dest : 2; + u32 icrc_err : 1; + u32 ipeh : 1; + u32 esp : 1; + u32 mpls : 1; + u32 fragment : 1; + u32 checksum_offload : 1; + u32 ipe : 1; + u32 l4e : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 mac_in_mac : 1; + u32 pkt_err : 6; + u32 mirr_id : 6; + u32 mirr_type : 2; + + u32 multicast_copy : 1; + u32 umbcast : 2; + u32 tunnel_type : 5; + u32 oam : 1; + u32 flow_id_vld : 1; + u32 rsv : 1; + u32 acl_hit : 1; + u32 macsec_err : 2; + u32 last_action : 1; + u32 last_mc_pkt : 1; + u32 first_action : 1; + u32 protocol_id0 : 8; + u32 protocol_offset0_6_0 : 7; + + u32 protocol_offset0_7 : 1; + u32 protocol_id1 : 8; + u32 protocol_offset1 : 8; + u32 protocol_id2 : 8; + u32 protocol_offset2_6_0 : 7; + + u32 protocol_offset2_7 : 1; + u32 protocol_id3 : 8; + u32 protocol_offset3 : 8; + u32 protocol_id4 : 8; + u32 protocol_offset4_6_0 : 7; + + u32 protocol_offset4_7 : 1; + u32 protocol_id5 : 8; + u32 protocol_offset5 : 8; + u32 protocol_id6 : 8; + u32 protocol_offset6_6_0 : 7; + + u32 protocol_offset6_7 : 1; + u32 protocol_id7 : 8; + u32 protocol_offset7 : 8; + u32 protocol_id8 : 8; + u32 protocol_offset8_6_0 : 7; + + u32 protocol_offset8_7 : 1; + u32 protocol_id9 : 8; + u32 protocol_offset9 : 8; + u32 protocol_id10 : 8; + u32 protocol_offset10_6_0 : 7; + + u32 protocol_offset10_7 : 1; + u32 protocol_id11 : 8; + u32 protocol_offset11 : 8; + u32 protocol_id12 : 8; + u32 protocol_offset12_6_0 : 7; + + u32 protocol_offset12_7 : 1; + u32 protocol_id13 : 8; + u32 protocol_offset13 : 8; + u32 protocol_id14 : 8; + u32 protocol_offset14_6_0 : 7; + + u32 protocol_offset14_7 : 1; + u32 protocol_id15 : 8; + u32 protocol_offset15 : 8; + u32 protocol_id_num : 5; + u32 sw_profile_id : 8; + u32 acl_profile_id_1_0 : 2; + + u32 acl_profile_id_6_2 : 5; + u32 fd_profile_id : 7; + u32 flow_id : 16; + u32 flow_id_pri : 3; + u32 rsv1 : 1; + + u32 rsv2; + + u32 rsv3; + + u32 rsv4; + + u32 rsv5; + + u32 rsv6 : 6; + u32 sdf_hash : 12; + u32 sdf_pri : 6; + u32 fd_prog_drop : 1; + u32 trace_level : 2; + u32 pkt_src_bfd : 1; + u32 to_host : 1; + u32 to_mng : 1; + u32 to_lan : 1; + u32 to_roce : 1; +}; + +struct sxe2_rxft_dbg_ppe_info_action_type_9 { + u32 action_type : 5; + u32 action_pro : 3; + u32 pkt_tc : 3; + u32 port : 2; + u32 pkt_len : 14; + u32 src_pf : 3; + u32 src_vf_31_30 : 2; + + u32 srx_vf_37_32 : 6; + u32 src_txvsi : 10; + u32 src_pf_vf_vm_flg : 2; + u32 pkt_type : 10; + u32 head0_addr_63_60 : 4; + + u32 head0_addr_72_64 : 9; + u32 head1_addr : 13; + u32 head_addr_valid : 1; + u32 dst_pf : 3; + u32 dst_vf_95_90 : 6; + + u32 dst_vf_97_96 : 2; + u32 dst_vsi : 10; + u32 dst_pf_vf_vm_flg : 2; + u32 up : 3; + u32 tcp_syn : 1; + u32 tcp_ack : 1; + u32 tcp_rst : 1; + u32 tcp_fin : 1; + u32 l2_mac_err : 1; + u32 bypass_switch : 1; + u32 bypass_acl : 1; + u32 bypass_rxft : 1; + u32 drop : 1; + u32 parser_abort : 1; + u32 malicious_abort : 1; + u32 packet_source_type : 2; + u32 pkt_source : 2; + + u32 fd_program : 1; + u32 fd_program_dummy : 1; + u32 lan_tx_sw : 2; + u32 rdma_tx_swap : 1; + u32 pkt_dest : 2; + u32 icrc_err : 1; + u32 ipeh : 1; + u32 esp : 1; + u32 mpls : 1; + u32 fragment : 1; + u32 checksum_offload : 1; + u32 ipe : 1; + u32 l4e : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 rsv0 : 1; + u32 pkt_err : 6; + u32 mirr_id : 6; + u32 mirr_type : 2; + + u32 multicast_copy : 1; + u32 umbcast : 2; + u32 tunnel_type : 5; + u32 oam : 1; + u32 flow_id_vld : 1; + u32 switch_hit : 1; + u32 acl_hit : 1; + u32 macsec_err : 2; + u32 last_action : 1; + u32 last_mc_pkt : 1; + u32 first_action : 1; + u32 protocol_id0 : 8; + u32 protocol_offset0_6_0 : 7; + + u32 protocol_offset0_7 : 1; + u32 protocol_id1 : 8; + u32 protocol_offset1 : 8; + u32 protocol_id2 : 8; + u32 protocol_offset2_6_0 : 7; + + u32 protocol_offset2_7 : 1; + u32 protocol_id3 : 8; + u32 protocol_offset3 : 8; + u32 protocol_id4 : 8; + u32 protocol_offset4_6_0 : 7; + + u32 protocol_offset4_7 : 1; + u32 protocol_id5 : 8; + u32 protocol_offset5 : 8; + u32 protocol_id6 : 8; + u32 protocol_offset6_6_0 : 7; + + u32 protocol_offset6_7 : 1; + u32 protocol_id7 : 8; + u32 protocol_offset7 : 8; + u32 protocol_id8 : 8; + u32 protocol_offset8_6_0 : 7; + + u32 protocol_offset8_7 : 1; + u32 protocol_id9 : 8; + u32 protocol_offset9 : 8; + u32 protocol_id10 : 8; + u32 protocol_offset10_6_0 : 7; + + u32 protocol_offset10_7 : 1; + u32 protocol_id11 : 8; + u32 protocol_offset11 : 8; + u32 protocol_id12 : 8; + u32 protocol_offset12_6_0 : 7; + + u32 protocol_offset12_7 : 1; + u32 protocol_id13 : 8; + u32 protocol_offset13 : 8; + u32 protocol_id14 : 8; + u32 protocol_offset14_6_0 : 7; + + u32 protocol_offset14_7 : 1; + u32 protocol_id15 : 8; + u32 protocol_offset15 : 8; + u32 protocol_id_num : 5; + u32 sw_profile_id : 8; + u32 acl_profile_id_1_0 : 2; + + u32 acl_profile_id_6_2 : 5; + u32 fd_profile_id : 7; + u32 flow_id : 16; + u32 flow_id_pri : 3; + u32 queue_buf_num_0 : 1; + + u32 queue_buf_num_10_1 : 10; + u32 toqueue : 3; + u32 queue_hit_flag : 4; + u32 queue_sel_result : 2; + u32 rsv1 : 13; +}; + +struct sxe2_rxft_dbg_ppe_info_action_type_10 { + u32 action_type : 5; + u32 action_pro : 3; + u32 pkt_tc : 3; + u32 port : 2; + u32 pkt_len : 14; + u32 src_pf : 3; + u32 src_vf_31_30 : 2; + + u32 srx_vf_37_32 : 6; + u32 src_txvsi : 10; + u32 src_pf_vf_vm_flg : 2; + u32 pkt_type : 10; + u32 head0_addr_63_60 : 4; + + u32 head0_addr_72_64 : 9; + u32 head1_addr : 13; + u32 head_addr_valid : 1; + u32 dst_pf : 3; + u32 dst_vf_95_90 : 6; + + u32 dst_vf_97_96 : 2; + u32 dst_vsi : 10; + u32 dst_pf_vf_vm_flg : 2; + u32 up : 3; + u32 tcp_syn : 1; + u32 tcp_ack : 1; + u32 tcp_rst : 1; + u32 tcp_fin : 1; + u32 l2_mac_err : 1; + u32 bypass_switch : 1; + u32 bypass_acl : 1; + u32 bypass_rxft : 1; + u32 drop : 1; + u32 parser_abort : 1; + u32 malicious_abort : 1; + u32 packet_source_type : 2; + u32 pkt_source : 2; + + u32 fd_program : 1; + u32 fd_program_dummy : 1; + u32 lan_tx_sw : 2; + u32 rdma_tx_swap : 1; + u32 pkt_dest : 2; + u32 icrc_err : 1; + u32 ipeh : 1; + u32 esp : 1; + u32 mpls : 1; + u32 fragment : 1; + u32 checksum_offload : 1; + u32 ipe : 1; + u32 l4e : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 rsv0 : 1; + u32 pkt_err : 6; + u32 mirr_id : 6; + u32 mirr_type : 2; + + u32 multicast_copy : 1; + u32 umbcast : 2; + u32 tunnel_type : 5; + u32 oam : 1; + u32 flow_id_vld : 1; + u32 switch_hit : 1; + u32 acl_hit : 1; + u32 macsec_err : 2; + u32 last_action : 1; + u32 last_mc_pkt : 1; + u32 first_action : 1; + u32 protocol_id0 : 8; + u32 protocol_offset0_6_0 : 7; + + u32 protocol_offset0_7 : 1; + u32 protocol_id1 : 8; + u32 protocol_offset1 : 8; + u32 protocol_id2 : 8; + u32 protocol_offset2_6_0 : 7; + + u32 protocol_offset2_7 : 1; + u32 protocol_id3 : 8; + u32 protocol_offset3 : 8; + u32 protocol_id4 : 8; + u32 protocol_offset4_6_0 : 7; + + u32 protocol_offset4_7 : 1; + u32 protocol_id5 : 8; + u32 protocol_offset5 : 8; + u32 protocol_id6 : 8; + u32 protocol_offset6_6_0 : 7; + + u32 protocol_offset6_7 : 1; + u32 protocol_id7 : 8; + u32 protocol_offset7 : 8; + u32 protocol_id8 : 8; + u32 protocol_offset8_6_0 : 7; + + u32 protocol_offset8_7 : 1; + u32 protocol_id9 : 8; + u32 protocol_offset9 : 8; + u32 protocol_id10 : 8; + u32 protocol_offset10_6_0 : 7; + + u32 protocol_offset10_7 : 1; + u32 protocol_id11 : 8; + u32 protocol_offset11 : 8; + u32 protocol_id12 : 8; + u32 protocol_offset12_6_0 : 7; + + u32 protocol_offset12_7 : 1; + u32 protocol_id13 : 8; + u32 protocol_offset13 : 8; + u32 protocol_id14 : 8; + u32 protocol_offset14_6_0 : 7; + + u32 protocol_offset14_7 : 1; + u32 protocol_id15 : 8; + u32 protocol_offset15 : 8; + u32 protocol_id_num : 5; + u32 sw_profile_id : 8; + u32 acl_profile_id_1_0 : 2; + + u32 acl_profile_id_6_2 : 5; + u32 fd_profile_id : 7; + u32 flow_id : 16; + u32 flow_id_pri : 3; + u32 qindex_0 : 1; + + u32 qindex_10_1 : 10; + u32 comp_queue : 1; + u32 comp_report : 2; + u32 fd_space : 2; + u32 stat_cnt : 14; + u32 stat_ena : 2; + u32 evict_ena : 1; + + u32 to_queue : 3; + u32 to_queue_prio : 3; + u32 dpu_repie : 1; + u32 fd_drop : 1; + u32 flex : 7; + u32 fd_flow_id : 16; + u32 d_type_0 : 1; + + u32 d_type_3_1 : 3; + u32 pcmd : 2; + u32 desc_pro_prio : 2; + u32 desc_prof : 6; + u32 fd_vsi : 10; + u32 swap : 1; + u32 fdid_prio : 3; + u32 fdid_did : 4; + u32 fdid_0 : 1; + + u32 fdid_31_1 : 31; + u32 rsv2 : 1; +}; + +struct sxe2_rxft_dbg_ppe_info_action_type_14 { + u32 action_type : 5; + u32 action_pro : 3; + u32 pkt_tc : 3; + u32 port : 2; + u32 pkt_len : 14; + u32 src_pf : 3; + u32 src_vf_1_0 : 2; + + u32 srx_vf_7_2 : 6; + u32 src_txvsi : 10; + u32 src_pf_vf_vm_flg : 2; + u32 pkt_type : 10; + u32 head0_addr_63_60 : 4; + + u32 head0_addr_72_64 : 9; + u32 head1_addr : 13; + u32 head_addr_valid : 1; + u32 dst_pf : 3; + u32 dst_vf_5_0 : 6; + + u32 dst_vf_7_6 : 2; + u32 dst_vsi : 10; + u32 dst_pf_vf_vm_flg : 2; + u32 up : 3; + u32 tcp_syn : 1; + u32 tcp_ack : 1; + u32 tcp_rst : 1; + u32 tcp_fin : 1; + u32 l2_mac_err : 1; + u32 bypass_switch : 1; + u32 bypass_acl : 1; + u32 bypass_rxft : 1; + u32 drop : 1; + u32 parser_abort : 1; + u32 malicious_abort : 1; + u32 packet_source_type : 2; + u32 pkt_source : 2; + + u32 fd_program : 1; + u32 fd_program_dummy : 1; + u32 lan_tx_sw : 2; + u32 rdma_tx_swap : 1; + u32 pkt_dest : 2; + u32 icrc_err : 1; + u32 ipeh : 1; + u32 esp : 1; + u32 mpls : 1; + u32 fragment : 1; + u32 checksum_offload : 1; + u32 ipe : 1; + u32 l4e : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 rsv0 : 1; + u32 pkt_err : 6; + u32 mirr_id : 6; + u32 mirr_type : 2; + + u32 multicast_copy : 1; + u32 umbcast : 2; + u32 tunnel_type : 5; + u32 oam : 1; + u32 flow_id_vld : 1; + u32 switch_hit : 1; + u32 acl_hit : 1; + u32 macsec_err : 2; + u32 last_action : 1; + u32 last_mc_pkt : 1; + u32 first_action : 1; + u32 protocol_id0 : 8; + u32 protocol_offset0_6_0 : 7; + + u32 protocol_offset0_7 : 1; + u32 protocol_id1 : 8; + u32 protocol_offset1 : 8; + u32 protocol_id2 : 8; + u32 protocol_offset2_6_0 : 7; + + u32 protocol_offset2_7 : 1; + u32 protocol_id3 : 8; + u32 protocol_offset3 : 8; + u32 protocol_id4 : 8; + u32 protocol_offset4_6_0 : 7; + + u32 protocol_offset4_7 : 1; + u32 protocol_id5 : 8; + u32 protocol_offset5 : 8; + u32 protocol_id6 : 8; + u32 protocol_offset6_6_0 : 7; + + u32 protocol_offset6_7 : 1; + u32 protocol_id7 : 8; + u32 protocol_offset7 : 8; + u32 protocol_id8 : 8; + u32 protocol_offset8_6_0 : 7; + + u32 protocol_offset8_7 : 1; + u32 protocol_id9 : 8; + u32 protocol_offset9 : 8; + u32 protocol_id10 : 8; + u32 protocol_offset10_6_0 : 7; + + u32 protocol_offset10_7 : 1; + u32 protocol_id11 : 8; + u32 protocol_offset11 : 8; + u32 protocol_id12 : 8; + u32 protocol_offset12_6_0 : 7; + + u32 protocol_offset12_7 : 1; + u32 protocol_id13 : 8; + u32 protocol_offset13 : 8; + u32 protocol_id14 : 8; + u32 protocol_offset14_6_0 : 7; + + u32 protocol_offset14_7 : 1; + u32 protocol_id15 : 8; + u32 protocol_offset15 : 8; + u32 protocol_id_num : 5; + u32 rh_ip_offset : 8; + u32 rh_vlan_offset1_0 : 2; + + u32 rh_vlan_offset7_2 : 6; + u32 rh_vlan_vld : 1; + u32 rh_dqpn : 18; + u32 rsv1 : 6; + u32 dst_rdma_mc_num_0 : 1; + + u32 dst_rdma_mc_num_12_1 : 12; + u32 rdma_mc_cnt : 10; + u32 rsv2 : 10; +}; + +void sxe2_fwc_rxft_ppe_info(struct sxe2_adapter *adapter); + +void sxe2_fwc_ppe_dfx_show(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.c new file mode 100644 index 0000000000000000000000000000000000000000..3eb4becdac4d7d49bcb9250c200950b39caf872d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.c @@ -0,0 +1,1405 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_eswitch.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_irq.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_tx.h" +#include "sxe2_eswitch.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_ethtool.h" +#include "sxe2_tc.h" +#include "sxe2_rx.h" + +#ifdef NEED_DEFINE_METADATA_DST_FREE +void metadata_dst_free(struct metadata_dst *md_dst) +{ + kfree((void *)md_dst); +} +#endif + +STATIC void sxe2_eswitch_vsi_destroy(struct sxe2_adapter *adapter) +{ + sxe2_vsi_destroy(adapter->eswitch_ctxt.esw_vsi); + adapter->eswitch_ctxt.esw_vsi = NULL; + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_KERNEL) { + sxe2_vsi_destroy(adapter->eswitch_ctxt.user_esw_vsi); + adapter->eswitch_ctxt.user_esw_vsi = NULL; + } +} + +static s32 sxe2_uplink_vsi_cfg_to_switchdev(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *uplink_vsi; + struct net_device *uplink_netdev; + s32 ret = 0; + + uplink_vsi = adapter->vsi_ctxt.main_vsi; + + adapter->eswitch_ctxt.uplink_vsi = uplink_vsi; + + sxe2_vsi_l2_fltr_remove(adapter, uplink_vsi->idx_in_dev); + + if (uplink_vsi->type == SXE2_VSI_T_PF) { + uplink_netdev = uplink_vsi->netdev; + netif_addr_lock_bh(uplink_netdev); + __dev_uc_unsync(uplink_netdev, NULL); + __dev_mc_unsync(uplink_netdev, NULL); + netif_addr_unlock_bh(uplink_netdev); + } + + ret = sxe2_promisc_rule_add(uplink_vsi); + if (ret && ret != -EEXIST) + return ret; + + ret = sxe2_vlan_filter_control(adapter, uplink_vsi->idx_in_dev, false); + + return ret; +} + +static s32 sxe2_mac_rule_restore(struct sxe2_vsi *vsi, const u8 *mac) +{ + s32 ret; + u8 broadcast[ETH_ALEN]; + + eth_broadcast_addr(broadcast); + ret = sxe2_mac_rule_add(vsi, broadcast); + if (ret) { + LOG_ERROR("vsi[%u][%u] broadcast mac addr add failed.(err:%d).\n", + vsi->id_in_pf, vsi->idx_in_dev, ret); + return ret; + } + + ret = sxe2_mac_rule_add(vsi, mac); + if (ret) { + LOG_ERROR("vsi[%u][%u] dev mac addr add failed.(err:%d).\n", + vsi->id_in_pf, vsi->idx_in_dev, ret); + return ret; + } + + return ret; +} + +static void sxe2_uplink_vsi_cfg_to_legacy(struct sxe2_adapter *adapter) +{ + + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct net_device *netdev = vsi->netdev; + + (void)sxe2_promisc_rule_del(adapter, vsi->idx_in_dev); + + (void)sxe2_mac_rule_restore(vsi, vsi->netdev->dev_addr); + + (void)sxe2_vsi_vlan_zero_add(vsi); + + rtnl_lock(); + if (netdev->features & + (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) { + (void)sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, true); + } + rtnl_unlock(); + + sxe2_set_rx_mode(vsi->netdev); + + (void)sxe2_etype_fltr_init(vsi); + (void)sxe2_src_vsi_prune_control(adapter, vsi->idx_in_dev, true); + (void)sxe2_srcvsi_rule_add(vsi); +} + +static struct net_device *sxe2_repr_netdev_alloc(struct sxe2_vf_node *vf) +{ + struct sxe2_netdev_priv *priv; + struct net_device *netdev; + struct sxe2_adapter *adapter = vf->adapter; + + netdev = alloc_etherdev_mq(sizeof(*priv), SXE2_ESWITCH_QUEUE_CNT); + if (!netdev) { + LOG_DEV_ERR("vf:%u repr netdev alloc failed. priv size %zu\n", + vf->vf_idx, sizeof(*priv)); + return NULL; + } + + vf->repr->netdev = netdev; + vf->repr->vf_node = vf; + vf->repr->src_vsi = NULL; + priv = netdev_priv(netdev); + priv->repr = vf->repr; + priv->vsi = adapter->eswitch_ctxt.esw_vsi; + + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + return netdev; +} + +static s32 sxe2_repr_open(struct net_device *netdev) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vf_repr *repr = np->repr; + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_vf_node *vf_node = repr->vf_node; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) { + ret = -EBUSY; + goto unlock; + } + + vf_node->prop.link_forced = true; + vf_node->prop.link_up = true; + sxe2_notify_vf_link_state(repr->vf_node); + + netif_carrier_on(netdev); + + netif_tx_start_all_queues(netdev); + + LOG_INFO_BDF("vf:%u src_vsi:%u repr:%pK esw_vsi:%u start.\n", + repr->vf_node->vf_idx, repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH], + repr, np->vsi->idx_in_dev); + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2_repr_stop(struct net_device *netdev) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vf_repr *repr = np->repr; + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_vf_node *vf_node = repr->vf_node; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) + goto unlock; + + vf_node->prop.link_forced = true; + vf_node->prop.link_up = false; + sxe2_notify_vf_link_state(repr->vf_node); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + LOG_INFO_BDF("vf:%u src_vsi:%u repr:%pK esw_vsi:%u stopped.\n", + repr->vf_node->vf_idx, repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH], + repr, np->vsi->idx_in_dev); + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return 0; +} + +static int sxe2_repr_get_phys_port_name(struct net_device *netdev, char *buf, + size_t len) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vf_repr *repr = np->repr; + struct sxe2_vf_node *vf = repr->vf_node; + int res; + + res = snprintf(buf, len, "r%d", vf->vf_idx); + if (res <= 0) + return -EOPNOTSUPP; + return 0; +} + +STATIC netdev_tx_t sxe2_repr_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vf_repr *repr = np->repr; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)repr->dst); + skb_dst_set(skb, (struct dst_entry *)repr->dst); + skb->queue_mapping = repr->vf_node->vf_idx; + + return sxe2_xmit(skb, netdev); +} + +#ifdef HAVE_VOID_NDO_GET_STATS64 +static struct rtnl_link_stats64 * +sxe2_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static void sxe2_repr_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct rtnl_link_stats64 *repr_vf_stats; + u16 vf_idx; + + vf_idx = np->repr->vf_idx; + repr_vf_stats = &adapter->repr_vf_stats.repr_link_stats64[vf_idx]; + memcpy(stats, repr_vf_stats, sizeof(*stats)); + +#ifdef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_NDO_OFFLOAD_STATS +static bool sxe2_repr_ndo_has_offload_stats(const struct net_device *dev, + int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int sxe2_repr_slow_path_stats64(const struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + int ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + u64 pkts, bytes; + struct sxe2_vsi *esw_vsi = np->vsi; + struct sxe2_adapter *adapter = np->vsi->adapter; + int vf_id = np->repr->vf_node->vf_idx; + struct sxe2_queue *txq = NULL; + struct sxe2_queue *rxq = NULL; + struct sxe2_vsi_qs_stats *qs_stats_last = &esw_vsi->vsi_qs_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, esw_vsi->state)) { + stats->rx_packets = qs_stats_last->txqs_stats[vf_id].packets; + stats->rx_bytes = qs_stats_last->txqs_stats[vf_id].bytes; + stats->tx_packets = qs_stats_last->rxqs_stats[vf_id].packets; + stats->tx_bytes = qs_stats_last->rxqs_stats[vf_id].bytes; + stats->tx_dropped = qs_stats_last->rxqs_stats[vf_id] + .rx_stats.rx_pg_alloc_fail + + qs_stats_last->rxqs_stats[vf_id] + .rx_stats.rx_buff_alloc_err; + LOG_DEBUG_BDF("esw vsi[%u][%u] vf_id[%u]dev is busy now(err:%d).\n", + esw_vsi->id_in_pf, esw_vsi->idx_in_dev, vf_id, ret); + goto l_unlock; + } + + txq = esw_vsi->txqs.q[vf_id]; + rxq = esw_vsi->rxqs.q[vf_id]; + + sxe2_fetch_u64_data_per_ring(&txq->syncp, txq->stats, &pkts, &bytes); + stats->rx_packets = pkts; + stats->rx_bytes = bytes; + + sxe2_fetch_u64_data_per_ring(&rxq->syncp, rxq->stats, &pkts, &bytes); + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_dropped = rxq->stats->rx_stats.rx_pg_alloc_fail + + rxq->stats->rx_stats.rx_buff_alloc_err; + LOG_DEBUG_BDF("esw stats vf_id:[%u] rx_pack:[%llu] tx_pack:[%llu]\n", vf_id, + stats->rx_packets, stats->tx_packets); +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return sxe2_repr_slow_path_stats64(dev, + (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} +#endif + +static LIST_HEAD(sxe2_repr_block_cb_list); + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static s32 sxe2_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + + if (type == TC_SETUP_BLOCK) + return flow_block_cb_setup_simple( + type_data, &sxe2_repr_block_cb_list, + sxe2_repr_setup_tc_block_cb, np, np, true); + return -EOPNOTSUPP; +} +#else +static s32 sxe2_repr_setup_tc(struct net_device *netdev, u32 __always_unused handle, + __be16 __always_unused proto, struct tc_to_netdev *tc) +{ + if (tc->type == TC_SETUP_CLSFLOWER) + return sxe2_repr_setup_tc_cls_flower(np->repr, tc->cls_flower); + return -EOPNOTSUPP; +} +#endif + +STATIC const struct net_device_ops sxe2_repr_netdev_ops = { + .ndo_get_stats64 = sxe2_repr_get_stats64, + + .ndo_get_phys_port_name = sxe2_repr_get_phys_port_name, + .ndo_start_xmit = sxe2_repr_start_xmit, + .ndo_open = sxe2_repr_open, + .ndo_stop = sxe2_repr_stop, + +#ifdef HAVE_NDO_OFFLOAD_STATS + .ndo_has_offload_stats = sxe2_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = sxe2_repr_ndo_get_offload_stats, +#endif + .ndo_setup_tc = sxe2_repr_setup_tc, +}; + +static void sxe2_repr_netdev_ops_init(struct net_device *netdev) +{ + netdev->netdev_ops = &sxe2_repr_netdev_ops; +} + +#ifdef HAVE_NETDEV_MIN_MAX_MTU +static void sxe2_repr_netdev_mtu_init(struct net_device *netdev) +{ + sxe2_netdev_mtu_init(netdev); +} +#endif + +STATIC void sxe2_repr_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + + __sxe2_get_drvinfo(netdev, drvinfo, priv->repr->vf_node->adapter); +} + +STATIC int sxe2_repr_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SXE2_VSI_HW_STATS_LEN; + + default: + return -EOPNOTSUPP; + } +} + +STATIC void sxe2_repr_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + + if (priv->repr->src_vsi) { + struct sxe2_vsi *vsi = priv->repr->src_vsi; + + __sxe2_repr_get_ethtool_stats(netdev, stats, data, vsi); + } +} + +STATIC void sxe2_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + __sxe2_repr_get_strings(netdev, stringset, data); +} + +static bool sxe2_repr_is_coalesce_param_invalid(struct ethtool_coalesce *ec) +{ + if (ec->rx_coalesce_usecs || ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq || + ec->tx_coalesce_usecs || ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return true; + + return false; +} + +#ifdef SET_COALESCE_NEED_2_PARAMS +STATIC int sxe2_repr_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +STATIC int sxe2_repr_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u16 vf_idx; + struct sxe2_vf_node *vf; + struct sxe2_vsi *vsi = adapter->eswitch_ctxt.esw_vsi; + struct sxe2_irq_data *irq_data; + s32 ret = 0; + + if (sxe2_repr_is_coalesce_param_invalid(ec)) + return -EOPNOTSUPP; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) { + ret = -EBUSY; + goto unlock; + } + + irq_data = vsi->irqs.irq_data[0]; + if (!irq_data) { + ret = -EINVAL; + goto unlock; + } + + if (ec->rx_coalesce_usecs_high > + SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX * + hw->hw_cfg.credit_interval_gran || + (ec->rx_coalesce_usecs_high && + ec->rx_coalesce_usecs_high < hw->hw_cfg.credit_interval_gran)) { + LOG_NETDEV_INFO("invalid value, rx_coalesce_usecs_high valid values\t" + "are 0(disabled), value:%d ,valid range:[%d-%d]\n", + ec->rx_coalesce_usecs_high, + hw->hw_cfg.credit_interval_gran, + SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX * + hw->hw_cfg.credit_interval_gran); + ret = -EINVAL; + goto unlock; + } + + sxe2_for_each_vf(adapter, vf_idx) + { + vf = SXE2_VF_NODE(adapter, vf_idx); + if (ec->rx_coalesce_usecs_high != vf->repr->irq_data->rate_limit) + vf->repr->irq_data->rate_limit = + (u16)ec->rx_coalesce_usecs_high; + } + + irq_data->rate_limit = (u16)ec->rx_coalesce_usecs_high; + + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, irq_data->rate_limit); + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef GET_COALESCE_NEED_2_PARAMS +STATIC int sxe2_repr_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +STATIC int sxe2_repr_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_vsi *vsi = adapter->eswitch_ctxt.esw_vsi; + struct sxe2_irq_data *irq_data; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) { + ret = -EBUSY; + goto unlock; + } + + irq_data = vsi->irqs.irq_data[0]; + if (!irq_data) { + ret = -EINVAL; + goto unlock; + } + + ec->rx_coalesce_usecs_high = irq_data->rate_limit; + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static const struct ethtool_ops sxe2_repr_ethtool_ops = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_HIGH, +#endif + .get_drvinfo = sxe2_repr_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = sxe2_repr_get_strings, + .get_ethtool_stats = sxe2_repr_get_ethtool_stats, + .get_sset_count = sxe2_repr_get_sset_count, + .set_coalesce = sxe2_repr_set_coalesce, + .get_coalesce = sxe2_repr_get_coalesce, + +}; + +static void sxe2_repr_ethtool_ops_set(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxe2_repr_ethtool_ops; +} + +STATIC s32 sxe2_repr_netdev_init(struct sxe2_vf_node *vf) +{ + struct net_device *netdev; + + netdev = sxe2_repr_netdev_alloc(vf); + if (!netdev) + return -ENOMEM; + + sxe2_repr_netdev_ops_init(netdev); + +#ifdef HAVE_NETDEV_MIN_MAX_MTU + sxe2_repr_netdev_mtu_init(netdev); +#endif + + sxe2_repr_ethtool_ops_set(netdev); + + return 0; +} + +STATIC s32 sxe2_repr_netdev_register(struct sxe2_vf_node *vf) +{ + struct net_device *netdev = vf->repr->netdev; + + eth_hw_addr_random(netdev); + + netdev->hw_features |= NETIF_F_HW_TC; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return register_netdev(netdev); +} + +STATIC s32 __sxe2_vf_repr_create(struct sxe2_vf_node *vf) +{ + s32 ret; + struct sxe2_vf_repr *repr; + struct sxe2_adapter *adapter = vf->adapter; + + repr = kzalloc(sizeof(*repr), GFP_KERNEL); + if (!repr) { + LOG_ERROR_BDF("vf:%u repr malloc failed.\n", vf->vf_idx); + return -ENOMEM; + } + vf->repr = repr; + + repr->irq_data = kzalloc(sizeof(*repr->irq_data), GFP_KERNEL); + if (!repr->irq_data) { + ret = -ENOMEM; + LOG_ERROR_BDF("vf:%u repr irq data alloc failed.\n", vf->vf_idx); + goto l_repr_free; + } + + ret = sxe2_repr_netdev_init(vf); + if (ret) + goto l_irq_free; + + ret = sxe2_repr_netdev_register(vf); + if (ret) { + LOG_ERROR_BDF("vf:%u representor netdev register failed.(err:%d)\n", + vf->vf_idx, ret); + goto l_netdev_free; + } + + vf->msg_table = sxe2_esw_mbx_msg_table_get(); + + LOG_INFO_BDF("vf:%u repr:%pK netdev:%pK name:%s.\n", vf->vf_idx, repr, + repr->netdev, repr->netdev->name); + + return ret; + +l_netdev_free: + free_netdev(repr->netdev); + repr->netdev = NULL; + +l_irq_free: + kfree(repr->irq_data); + repr->irq_data = NULL; + +l_repr_free: + kfree(repr); + vf->repr = NULL; + return ret; +} + +STATIC void __sxe2_vf_repr_destroy(struct sxe2_vf_node *vf) +{ + struct sxe2_vf_repr *repr = vf->repr; + struct net_device *netdev = repr->netdev; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + unregister_netdev(netdev); + + free_netdev(netdev); + kfree(repr->irq_data); + repr->irq_data = NULL; + kfree(repr); + vf->repr = NULL; +} + +STATIC void sxe2_vf_repr_destroy(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf_node; + u16 idx; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + sxe2_for_each_vf(adapter, idx) + { + vf_node = SXE2_VF_NODE(adapter, idx); + if (vf_node->repr) + __sxe2_vf_repr_destroy(vf_node); + } +} + +static s32 sxe2_vf_repr_create(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf_node; + u16 idx; + s32 ret; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + sxe2_for_each_vf(adapter, idx) + { + vf_node = SXE2_VF_NODE(adapter, idx); + ret = __sxe2_vf_repr_create(vf_node); + if (ret) { + LOG_ERROR_BDF("vf:%u repr create failed.\n", idx); + goto l_err; + } + } + + return 0; + +l_err: + sxe2_vf_repr_destroy(adapter); + return ret; +} + +s32 sxe2_vf_sp_rule_add(struct sxe2_vf_node *vf_node, bool is_user) +{ + return sxe2_eswitch_vf_slow_path_rule_setup(vf_node, is_user, true); +} + +s32 sxe2_vf_sp_rule_del(struct sxe2_vf_node *vf_node, bool is_user) +{ + return sxe2_eswitch_vf_slow_path_rule_setup(vf_node, is_user, false); +} + +void sxe2_vf_repr_decfg(struct sxe2_vf_node *vf_node) +{ + struct sxe2_vf_repr *repr; + + repr = vf_node->repr; + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, vf_node->adapter->flags)) + return; + + if (!repr->dst) + return; + + (void)sxe2_vf_sp_rule_del(vf_node, false); + netif_napi_del(&repr->irq_data->napi); + metadata_dst_free(repr->dst); + repr->dst = NULL; +} + +void sxe2_vfs_repr_decfg(struct sxe2_adapter *adapter) +{ + u16 idx; + struct sxe2_vf_node *vf_node; + + sxe2_for_each_vf(adapter, idx) + { + (void)mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + sxe2_vf_repr_decfg(vf_node); + (void)mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } +} + +#ifdef HAVE_METADATA_PORT_INFO +static s32 sxe2_vf_repr_cfg(struct sxe2_vf_node *vf_node) +{ + s32 ret; + struct sxe2_vf_repr *repr; + struct sxe2_adapter *adapter; + + adapter = vf_node->adapter; + + repr = vf_node->repr; + repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); + if (!repr->dst) { + ret = -ENOMEM; + LOG_DEV_ERR("metadata dst alloc failed.\n"); + goto l_err; + } + + ret = sxe2_vf_sp_rule_add(vf_node, false); + if (ret && (ret != -EEXIST)) { + metadata_dst_free(repr->dst); + repr->dst = NULL; + LOG_DEV_ERR("slow path rule add failed.(err:%d)\n", ret); + goto l_err; + } + + vf_node->prop.spoofchk = false; + + netif_napi_add(repr->netdev, &repr->irq_data->napi, sxe2_esw_napi_poll, + NAPI_POLL_WEIGHT); + netif_keep_dst(repr->netdev); + + repr->dst->u.port_info.port_id = repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + repr->dst->u.port_info.lower_dev = repr->netdev; + + return 0; + +l_err: + sxe2_vf_repr_decfg(vf_node); + return ret; +} + +static s32 sxe2_vfs_repr_cfg(struct sxe2_adapter *adapter) +{ + u16 idx; + struct sxe2_vf_node *vf_node; + s32 ret = 0; + + sxe2_for_each_vf(adapter, idx) + { + (void)mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + ret = sxe2_vf_repr_cfg(vf_node); + if (ret) { + (void)mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + goto l_end; + } + (void)mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + + return ret; + +l_end: + sxe2_vfs_repr_decfg(adapter); + + return ret; +} +#else +static s32 sxe2_vfs_repr_cfg(struct sxe2_adapter *adapter) +{ + return -ENODEV; +} +#endif + +static void sxe2_eswitch_irq_queues_map(struct sxe2_adapter *adapter) +{ + u16 i; + struct sxe2_irq_data *irq_data; + struct sxe2_queue *txq; + struct sxe2_queue *rxq; + struct sxe2_vf_node *vf; + struct sxe2_vsi *esw_vsi = adapter->eswitch_ctxt.esw_vsi; + + sxe2_for_each_vsi_txq(esw_vsi, i) + { + vf = SXE2_VF_NODE(adapter, i); + if (!vf) { + LOG_WARN_BDF("vf:%u has freed.\n", i); + continue; + } + + irq_data = vf->repr->irq_data; + irq_data->vsi = esw_vsi; + irq_data->idx_in_pf = esw_vsi->irqs.irq_data[0]->idx_in_pf; + irq_data->tx.list.next = esw_vsi->txqs.q[i]; + irq_data->tx.list.next->next = NULL; + irq_data->tx.itr_idx = esw_vsi->irqs.irq_data[0]->tx.itr_idx; + + txq = esw_vsi->txqs.q[i]; + txq->idx_in_vsi = 0; + txq->irq_data = irq_data; + txq->netdev = vf->repr->netdev; + + LOG_DEBUG_BDF("eswitch irq map, vf %d, irq in pf %d, txq in pf %d\n", + i, irq_data->idx_in_pf, txq->idx_in_pf); + } + + sxe2_for_each_vsi_rxq(esw_vsi, i) + { + vf = SXE2_VF_NODE(adapter, i); + if (!vf) { + LOG_WARN_BDF("vf:%u has freed.\n", i); + continue; + } + + irq_data = vf->repr->irq_data; + irq_data->vsi = esw_vsi; + irq_data->idx_in_pf = esw_vsi->irqs.irq_data[0]->idx_in_pf; + irq_data->rx.list.next = esw_vsi->rxqs.q[i]; + irq_data->rx.list.next->next = NULL; + irq_data->rx.itr_idx = esw_vsi->irqs.irq_data[0]->rx.itr_idx; + + rxq = esw_vsi->rxqs.q[i]; + rxq->idx_in_vsi = 0; + rxq->irq_data = irq_data; + rxq->netdev = vf->repr->netdev; + + LOG_DEBUG_BDF("eswitch irq map, vf %d, irq in pf %d, rxq in pf %d\n", + i, irq_data->idx_in_pf, rxq->idx_in_pf); + } +} + +static s32 sxe2_eswitch_offload_enable(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_eswitch_context *eswitch = &adapter->eswitch_ctxt; + + eswitch->adapter = adapter; + + set_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags); + + ret = sxe2_eswitch_vsi_create(adapter); + if (ret) { + LOG_ERROR_BDF("eswitch vsi create failed.(err:%d)\n", ret); + clear_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags); + return ret; + } + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + ret = sxe2_uplink_vsi_cfg_to_switchdev(adapter); + if (ret) + goto l_uplink_setup; + } + + ret = sxe2_vf_repr_create(adapter); + if (ret) + goto l_uplink_setup; + + ret = sxe2_vfs_repr_cfg(adapter); + if (ret) + goto l_repr_destroy; + + sxe2_eswitch_irq_queues_map(adapter); + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_vsi_open(eswitch->esw_vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("eswitch vsi[%u][%u] open failed.(err:%d)\n", + eswitch->esw_vsi->id_in_pf, + eswitch->esw_vsi->idx_in_dev, ret); + goto l_vsi_open_fail; + } + + return ret; + +l_vsi_open_fail: + sxe2_vfs_repr_decfg(adapter); + +l_repr_destroy: + sxe2_vf_repr_destroy(adapter); + +l_uplink_setup: + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) + sxe2_uplink_vsi_cfg_to_legacy(adapter); + + sxe2_eswitch_vsi_destroy(adapter); + + clear_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags); + + return -ENODEV; +} + +static s32 sxe2_eswitch_offload_disable(struct sxe2_adapter *adapter) +{ + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + return 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (!test_bit(SXE2_VSI_S_CLOSE, adapter->eswitch_ctxt.esw_vsi->state)) + (void)sxe2_vsi_close(adapter->eswitch_ctxt.esw_vsi); + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_KERNEL && + !test_bit(SXE2_VSI_S_CLOSE, adapter->eswitch_ctxt.user_esw_vsi->state)) { + (void)sxe2_vsi_close(adapter->eswitch_ctxt.user_esw_vsi); + } + + mutex_unlock(&adapter->vsi_ctxt.lock); + + sxe2_vfs_repr_decfg(adapter); + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + sxe2_vsi_complex_fltr_remove(adapter, + adapter->vsi_ctxt.main_vsi->idx_in_dev, + false); + } + + sxe2_vf_repr_destroy(adapter); + + clear_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags); + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) + sxe2_uplink_vsi_cfg_to_legacy(adapter); + + sxe2_eswitch_vsi_destroy(adapter); + + return 0; +} + +s32 sxe2_eswitch_configure(struct sxe2_adapter *adapter, bool enable) +{ + s32 ret = 0; + + if (enable) + ret = sxe2_eswitch_offload_enable(adapter); + else + ret = sxe2_eswitch_offload_disable(adapter); + + if (ret) + LOG_ERROR_BDF("eswitch offload %s failed.(err:%d)\n", + enable ? "enable" : "disable", ret); + return ret; +} + +irqreturn_t sxe2_eswitch_msix_ring_irq_handler(int __always_unused irq, void *data) +{ + struct sxe2_irq_data *irq_data = (struct sxe2_irq_data *)data; + struct sxe2_adapter *adapter = irq_data->vsi->adapter; + struct sxe2_vf_node *vf; + u16 vf_idx; + + if (!SXE2_IRQ_HAS_TXQ(irq_data) && !SXE2_IRQ_HAS_RXQ(irq_data)) + return IRQ_HANDLED; + + sxe2_for_each_vf(adapter, vf_idx) + { + vf = SXE2_VF_NODE(adapter, vf_idx); + napi_schedule(&vf->repr->irq_data->napi); + } + + return IRQ_HANDLED; +} + +bool sxe2_is_repr_netdev(struct net_device *netdev) +{ + return netdev && (netdev->netdev_ops == &sxe2_repr_netdev_ops); +} + +void sxe2_eswitch_txqs_stop(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf; + u16 idx; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + sxe2_for_each_vf(adapter, idx) + { + vf = SXE2_VF_NODE(adapter, idx); + if (vf->repr) { + netif_carrier_off(vf->repr->netdev); + netif_tx_disable(vf->repr->netdev); + } + } +} + +bool sxe2_eswitch_is_offload(struct sxe2_adapter *adapter) +{ + return adapter->eswitch_ctxt.mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; +} + +void sxe2_vf_repr_rebuild(struct sxe2_vsi *vsi, bool is_vfr_vflr) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vf_repr *repr; + struct sxe2_vf_node *vf_node; + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + return; + + if (!is_vfr_vflr) { + LOG_INFO_BDF("vsi[%u][%u] pfr/pflr skip.\n", vsi->id_in_pf, + vsi->idx_in_dev); + return; + } + + vf_node = vsi->vf_node; + repr = vf_node->repr; + repr->src_vsi = vsi; +#ifdef HAVE_METADATA_PORT_INFO + repr->dst->u.port_info.port_id = vsi->idx_in_dev; +#endif +} + +static void sxe2_eswitch_txqs_start(struct sxe2_adapter *adapter) +{ + u16 vf_idx; + struct sxe2_vf_node *vf; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + sxe2_for_each_vf(adapter, vf_idx) + { + vf = SXE2_VF_NODE(adapter, vf_idx); + if (vf->repr && netif_running(vf->repr->netdev)) { + netif_carrier_on(vf->repr->netdev); + netif_tx_start_all_queues(vf->repr->netdev); + } + } +} + +void sxe2_eswitch_stop(struct sxe2_adapter *adapter) +{ + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + return; + + if (sxe2_vsi_close(adapter->eswitch_ctxt.esw_vsi)) + LOG_ERROR_BDF("eswitch vsi close failed.\n"); + +} + +s32 sxe2_eswitch_rebuild(struct sxe2_adapter *adapter) +{ + struct sxe2_eswitch_context *esw_ctxt = &adapter->eswitch_ctxt; + s32 ret; + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + return 0; + + ret = sxe2_sriov_vsi_rebuild(esw_ctxt->esw_vsi, false); + if (ret) { + LOG_ERROR_BDF("eswitch vsi rebuild failed.(err:%d)\n", ret); + goto l_end; + } + + ret = sxe2_uplink_vsi_cfg_to_switchdev(adapter); + if (ret) { + LOG_ERROR_BDF("uplink vsi promisc enable failed.\n"); + goto l_end; + } + + ret = sxe2_vfs_repr_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("vf repr cfg failed.\n"); + goto l_end; + } + + ret = sxe2_vfs_complex_fltr_restore(adapter); + if (ret) { + LOG_ERROR_BDF("vfs complex fltr restore failed.\n"); + goto l_end; + } + + ret = sxe2_pf_complex_fltr_restore(adapter); + if (ret) { + LOG_ERROR_BDF("pf complex fltr restore failed.\n"); + goto l_end; + } + + sxe2_eswitch_irq_queues_map(adapter); + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_vsi_open(esw_ctxt->esw_vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("eswitch vsi[%u][%u] open failed.(err:%d)\n", + esw_ctxt->esw_vsi->id_in_pf, + esw_ctxt->esw_vsi->idx_in_dev, ret); + goto l_end; + } + + sxe2_eswitch_txqs_start(adapter); + +l_end: + return ret; +} + +s32 sxe2_eswitch_mode_write_try_lock(struct sxe2_adapter *adapter) +{ + set_bit(SXE2_ESWITCH_MODE_CHANGING, &adapter->eswitch_ctxt.flag); + if (atomic64_read(&adapter->eswitch_ctxt.mode_ref_cnt) > 0) + return -EBUSY; + + return 0; +} + +void sxe2_eswitch_mode_rwlock_init(struct sxe2_adapter *adapter) +{ + atomic64_set(&adapter->eswitch_ctxt.mode_ref_cnt, 0); +} + +void sxe2_eswitch_mode_write_unlock(struct sxe2_adapter *adapter) +{ + clear_bit(SXE2_ESWITCH_MODE_CHANGING, &adapter->eswitch_ctxt.flag); +} + +s32 sxe2_eswitch_mode_read_lock(struct sxe2_adapter *adapter) +{ + s32 retry = SXE2_ESWITCH_MODE_TIMEOUT; + + atomic64_inc(&adapter->eswitch_ctxt.mode_ref_cnt); + + while (retry--) { + if (!test_bit(SXE2_ESWITCH_MODE_CHANGING, + &adapter->eswitch_ctxt.flag)) + break; + usleep_range(1000, 1200); + } + if (retry <= 0) + return -EBUSY; + + return 0; +} + +void sxe2_eswitch_mode_read_unlock(struct sxe2_adapter *adapter) +{ + atomic64_dec(&adapter->eswitch_ctxt.mode_ref_cnt); +} + +s32 sxe2_eswitch_ucmd_uplink_set(struct sxe2_adapter *adapter, bool to_user) +{ + s32 ret = 0; + struct sxe2_eswitch_context *eswitch = &adapter->eswitch_ctxt; + struct sxe2_vsi *user_pf_vsi; + struct sxe2_vsi *ker_pf_vsi; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + + ker_pf_vsi = adapter->vsi_ctxt.main_vsi; + user_pf_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (!user_pf_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, ker_pf_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, user_pf_vsi->state)) { + ret = -EBUSY; + goto l_vsi_unlock; + } + + if (to_user && eswitch->uplink_vsi->type == SXE2_VSI_T_DPDK_PF) { + LOG_WARN_BDF("current uplink vsi is user, no neet to set.\n"); + goto l_vsi_unlock; + } else if (!to_user && eswitch->uplink_vsi->type == SXE2_VSI_T_PF) { + LOG_WARN_BDF("current uplink vsi is kernel, no neet to set.\n"); + goto l_vsi_unlock; + } else if (!sxe2_eswitch_is_offload(adapter)) { + LOG_ERROR_BDF("eswitch mode is not switchdev, not support.\n"); + ret = -EOPNOTSUPP; + goto l_vsi_unlock; + } else if (!sxe2_vf_is_exist(adapter)) { + LOG_ERROR_BDF("VF count is 0, not support.\n"); + ret = -EOPNOTSUPP; + goto l_vsi_unlock; + } + + if (to_user) { + ret = sxe2_promisc_rule_del(adapter, ker_pf_vsi->idx_in_dev); + if (ret && ret != -ENOENT) + goto l_vsi_unlock; + + ret = sxe2_promisc_rule_add(user_pf_vsi); + if (ret && ret != -EEXIST) + goto l_to_uesr_failed; + + adapter->eswitch_ctxt.uplink_vsi = user_pf_vsi; + goto l_vsi_unlock; + } else { + ret = sxe2_promisc_rule_del(adapter, user_pf_vsi->idx_in_dev); + if (ret && ret != -ENOENT) + goto l_vsi_unlock; + + ret = sxe2_promisc_rule_add(ker_pf_vsi); + if (ret && ret != -EEXIST) + goto l_to_kernel_failed; + + adapter->eswitch_ctxt.uplink_vsi = ker_pf_vsi; + goto l_vsi_unlock; + } + +l_to_uesr_failed: + (void)sxe2_promisc_rule_add(ker_pf_vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + sxe2_eswitch_mode_read_unlock(adapter); + return ret; + +l_to_kernel_failed: + (void)sxe2_promisc_rule_add(user_pf_vsi); + +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +l_end: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +s32 sxe2_eswitch_ucmd_uplink_resetto_ker(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_eswitch_context *eswitch = &adapter->eswitch_ctxt; + struct sxe2_vsi *user_pf_vsi; + struct sxe2_vsi *ker_pf_vsi; + + ker_pf_vsi = adapter->vsi_ctxt.main_vsi; + user_pf_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (!user_pf_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, ker_pf_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, user_pf_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (eswitch->uplink_vsi->type == SXE2_VSI_T_PF) { + LOG_WARN_BDF("current uplink vsi is kernel, no neet to set.\n"); + goto l_end; + } + + ret = sxe2_promisc_rule_del(adapter, user_pf_vsi->idx_in_dev); + if (ret && ret != -ENOENT) + goto l_end; + + ret = sxe2_promisc_rule_add(ker_pf_vsi); + if (ret && ret != -EEXIST) + goto l_to_kernel_failed; + + adapter->eswitch_ctxt.uplink_vsi = ker_pf_vsi; + return ret; + +l_to_kernel_failed: + (void)sxe2_promisc_rule_add(user_pf_vsi); + +l_end: + return ret; +} + +s32 sxe2_eswitch_ucmd_mode_get(struct sxe2_adapter *adapter, bool *is_switchdev) +{ + s32 ret = 0; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_end; + } + + if (sxe2_eswitch_is_offload(adapter) && sxe2_vf_is_exist(adapter)) + *is_switchdev = true; + else + *is_switchdev = false; + +l_end: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +s32 sxe2_eswitch_ucmd_eswvsi_get(struct sxe2_adapter *adapter, u16 *user_esw_vsi_id) +{ + s32 ret = 0; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_end; + } + + if (adapter->eswitch_ctxt.user_esw_vsi) + *user_esw_vsi_id = adapter->eswitch_ctxt.user_esw_vsi->idx_in_dev; + +l_end: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +s32 sxe2_eswitch_ucmd_repr_cfg(struct sxe2_vf_node *vf_node, bool is_to_user) +{ + s32 ret = 0; + u16 vf_vsi_id; + u16 vf_vsi_id_u; + struct sxe2_vsi *esw_vsi; + struct sxe2_vsi *esw_vsi_u; + u16 qid_in_vsi; + struct sxe2_vf_repr_cfg repr_cfg = {0}; + struct sxe2_adapter *adapter = vf_node->adapter; + + vf_vsi_id = vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + vf_vsi_id_u = vf_node->vsi_id[SXE2_VF_TYPE_DPDK]; + + mutex_lock(&vf_node->repr_cfg_lock); + + if (is_to_user && vf_node->user_repr_valid) { + LOG_WARN_BDF("current repr is user, no neet to set,\t" + "user_vf_vsi_id[%u], ker_vf_vsi_id[%u]\n", + vf_vsi_id, vf_vsi_id_u); + goto l_end; + } else if (!is_to_user && !vf_node->user_repr_valid) { + LOG_WARN_BDF("current repr is kernel, no neet to set,\t" + "user_vf_vsi_id[%u], ker_vf_vsi_id[%u]\n", + vf_vsi_id, vf_vsi_id_u); + goto l_end; + } + + esw_vsi_u = vf_node->adapter->eswitch_ctxt.user_esw_vsi; + esw_vsi = vf_node->adapter->eswitch_ctxt.esw_vsi; + + qid_in_vsi = vf_node->vf_idx; + repr_cfg.queue_in_dev = esw_vsi->rxqs.q[qid_in_vsi]->idx_in_pf + + vf_node->adapter->q_ctxt.rxq_base_idx_in_dev; + repr_cfg.queue_in_dev_u = esw_vsi_u->rxqs.q[qid_in_vsi]->idx_in_pf + + vf_node->adapter->q_ctxt.rxq_base_idx_in_dev; + repr_cfg.cfg_to_user = is_to_user; + + LOG_DEBUG_BDF("vf_idx:%u, queue_in_dev:%u, queue_in_dev_u:%u,\t" + "cfg_to_user:%u\n", + vf_node->vf_idx, repr_cfg.queue_in_dev, + repr_cfg.queue_in_dev_u, repr_cfg.cfg_to_user); + + ret = sxe2_eswitch_vf_slow_path_rule_update(vf_node->adapter, vf_vsi_id, + &repr_cfg); + if (ret) { + LOG_ERROR_BDF("kernel vf slow path update failed,\t" + "ker_vf_vsi_id[%u]\n", + vf_vsi_id); + goto l_end; + } + + ret = sxe2_eswitch_vf_slow_path_rule_update(vf_node->adapter, vf_vsi_id_u, + &repr_cfg); + if (ret) { + LOG_ERROR_BDF("user vf slow path update failed,\t" + "user_vf_vsi_id[%u]\n", + vf_vsi_id_u); + goto l_reback; + } + vf_node->user_repr_valid = is_to_user; + goto l_end; + +l_reback: + repr_cfg.cfg_to_user = !is_to_user; + (void)sxe2_eswitch_vf_slow_path_rule_update(vf_node->adapter, vf_vsi_id, + &repr_cfg); +l_end: + mutex_unlock(&vf_node->repr_cfg_lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a29de52031f1eab908402ad728caeef8553fca4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_eswitch.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_eswitch.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_ESWITCH_H__ +#define __SXE2_ESWITCH_H__ + +#include + +#include "sxe2_vsi.h" + +#define SXE2_ESWITCH_QUEUE_CNT 1 +#define SXE2_ESWITCH_MODE_CHANGING 1 +#define SXE2_ESWITCH_MODE_TIMEOUT 2000 + +enum sxe2_eswtich_mode { + SXE2_EWSITCH_MODE_LEGACY, + SXE2_ESWITCH_MODE_OFFLOAD, +}; + +struct sxe2_eswitch_context { + struct sxe2_adapter *adapter; + struct sxe2_vsi *uplink_vsi; + struct sxe2_vsi *esw_vsi; + struct sxe2_vsi *user_esw_vsi; + enum devlink_eswitch_mode mode; + atomic64_t mode_ref_cnt; + unsigned long flag; +}; + +struct sxe2_vf_repr { + struct sxe2_vsi *src_vsi; + struct sxe2_vsi *dpdk_vf_vsi; + struct sxe2_vf_node *vf_node; + struct sxe2_irq_data *irq_data; + struct net_device *netdev; + struct metadata_dst *dst; + u16 vf_idx; + u8 rule_added; +}; + +struct sxe2_vf_repr_cfg { + u16 queue_in_dev; + u16 queue_in_dev_u; + bool cfg_to_user; +}; + +s32 sxe2_eswitch_configure(struct sxe2_adapter *adapter, bool enable); + +irqreturn_t sxe2_eswitch_msix_ring_irq_handler(int __always_unused irq, + void *data); + +bool sxe2_is_repr_netdev(struct net_device *netdev); + +void sxe2_eswitch_txqs_stop(struct sxe2_adapter *adapter); + +bool sxe2_eswitch_is_offload(struct sxe2_adapter *adapter); + +void sxe2_vf_repr_rebuild(struct sxe2_vsi *vsi, bool is_vfr_vflr); + +void sxe2_eswitch_stop(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_rebuild(struct sxe2_adapter *adapter); + +s32 sxe2_vf_sp_rule_add(struct sxe2_vf_node *vf_node, bool is_user); + +s32 sxe2_vf_sp_rule_del(struct sxe2_vf_node *vf_node, bool is_user); + +void sxe2_vf_repr_decfg(struct sxe2_vf_node *vf_node); + +void sxe2_vfs_repr_decfg(struct sxe2_adapter *adapter); + +void sxe2_eswitch_mode_rwlock_init(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_mode_write_try_lock(struct sxe2_adapter *adapter); + +void sxe2_eswitch_mode_write_unlock(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_mode_read_lock(struct sxe2_adapter *adapter); + +void sxe2_eswitch_mode_read_unlock(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_ucmd_uplink_set(struct sxe2_adapter *adapter, bool to_user); + +s32 sxe2_eswitch_ucmd_uplink_resetto_ker(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_ucmd_mode_get(struct sxe2_adapter *adapter, bool *is_switchdev); + +s32 sxe2_eswitch_ucmd_eswvsi_get(struct sxe2_adapter *adapter, u16 *user_esw_vsi_id); +s32 sxe2_eswitch_ucmd_repr_cfg(struct sxe2_vf_node *vf_node, bool is_to_user); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..8d10fbd50531f635a267ba5d2c9e58209ad37a87 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.c @@ -0,0 +1,6533 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ethtool.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2_ethtool.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_queue.h" +#include "sxe2_netdev.h" +#include "sxe2_log.h" +#include "sxe2_rss.h" +#include "sxe2_flow.h" +#include "sxe2_msg.h" +#include "sxe2_fnav.h" +#include "sxe2_ipsec.h" +#include "sxe2_debugfs.h" +#include "sxe2_upgrade.h" +#include "sxe2_xsk.h" +#include "sxe2_mbx_public.h" +#include "sxe2_cmd.h" +#include "sxe2_monitor.h" +#include "sxe2_dcb.h" +#include "sxe2_acl.h" + +#define SXE2_VSI_TX_QC(vsi, q_idx) (&(vsi)->txqs.q[(q_idx)]->irq_data->tx) +#define SXE2_VSI_RX_QC(vsi, q_idx) (&(vsi)->rxqs.q[(q_idx)]->irq_data->rx) +#define SXE2_Q_TYPE_STR_RX "rx" +#define SXE2_Q_TYPE_STR_TX "tx" +#define SXE2_MODULE_SFF_ADDR_MODE (0x04) +#define SXE2_MODULE_SFF_DIAG_CAPAB (0x40) +#define SXE2_MODULE_REVISION_ADDR (0x01) +#define SXE2_MODULE_SFF_PHY_DEV_IDENTIFIER (0x00) +#define SXE2_MODULE_SFF_8472_COMP (0x5E) +#define SXE2_MODULE_SFF_8472_SWAP (0x5C) +#define SXE2_MODULE_QSFP_MAX_LEN (640) + +#define SXE2_SELFTEST_RTN_LINKDOWN (0x1) +#define SXE2_SELFTEST_RTN_FAIL (0x2) + +#define SXE2_MODULE_REVISION_SFF_8436 (0x2) + +#define SXE2_MODULE_REPEAT_TIMES (4) + +#define EEPROM_DATALEN (1) +#define SFF_READ_BLOCK_SIZE_8 (8) +#define SXE2_REG_VERSION (1) +#define SXE2_COALESCE_QNUM_INVAL (0xFFFFFFFF) + +#define SXE2_SFF_STATUS_INDICATER (0x2) + +#define SXE2_SFF_FLAT_MEM (BIT(2)) + +#define L2_FWD_TX_PKTS1 "l2-fwd-%s-tx_pkts" +#define L2_FWD_TX_PKTS2 "l2-fwd-%i-tx_pkts" +#define L2_FWD_RX_PKTS1 "l2-fwd-%s-rx_pkts" +#define L2_FWD_RX_PKTS2 "l2-fwd-%i-rx_pkts" +#define L2_FWD_TX_BYTES1 "l2-fwd-%s-tx_bytes" +#define L2_FWD_TX_BYTES2 "l2-fwd-%i-tx_bytes" +#define L2_FWD_RX_BYTES1 "l2-fwd-%s-rx_bytes" +#define L2_FWD_RX_BYTES2 "l2-fwd-%i-rx_bytes" + +#define ETHTOOL_SELFTEST_SLEEP_MIN (10000) +#define ETHTOOL_SELFTEST_SLEEP_MAX (20000) +#define ETHTOOL_SELFTEST_FRAME_COUNT (32) + +u8 lbtest_unicast[ETH_ALEN] = { 0x02, 0x00, 0x00, 0x01, 0x02, 0x03 }; + +void __sxe2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo, struct sxe2_adapter *adapter) +{ + struct sxe2_fw_ver_msg *fw_ver = &adapter->hw.fw_ver; + + strscpy(drvinfo->driver, SXE2_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, SXE2_VERSION, sizeof(drvinfo->version)); + + (void)snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u.%u", fw_ver->main_version_id, + fw_ver->sub_version_id, fw_ver->fix_version_id, + fw_ver->build_id); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +STATIC void sxe2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + + __sxe2_get_drvinfo(netdev, drvinfo, priv->vsi->adapter); + drvinfo->n_priv_flags = SXE2_PRIV_FLAG_ARRAY_SIZE; +} + +STATIC int sxe2_get_regs_len(struct net_device __always_unused *netdev) +{ + return sizeof(sxe2_regs_dump_list); +} + +STATIC void sxe2_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u32 i; + u32 *regs_buf = (u32 *)p; + + regs->version = SXE2_REG_VERSION; + for (i = 0; i < ARRAY_SIZE(sxe2_regs_dump_list); ++i) + regs_buf[i] = sxe2_read_reg(hw, sxe2_regs_dump_list[i]); +} + +STATIC u32 sxe2_get_msglevel(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + +#ifndef CONFIG_DYNAMIC_DEBUG + if (adapter->msglvl_ctxt.debug_mask) { + LOG_NETDEV_INFO("debug_mask: 0x%11llx\n", + adapter->msglvl_ctxt.debug_mask); + } +#endif + + return adapter->msglvl_ctxt.msg_enable; +} + +STATIC void sxe2_set_msglevel(struct net_device *netdev, u32 data) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + +#ifndef CONFIG_DYNAMIC_DEBUG + if (SXE2_DBG_USER & data) + adapter->msglvl_ctxt.debug_mask = data; + else + adapter->msglvl_ctxt.msg_enable = data; +#else + adapter->msglvl_ctxt.msg_enable = data; +#endif +} + +#ifdef SXE2_MACVLAN_STATS + +static void sxe2_get_macvlan_tx_strings(struct sxe2_adapter *adapter, u8 **data) +{ + u8 *p; + int i; + + if (!data) + return; + p = *data; + + for (i = 0; i < SXE2_MAX_MACVLANS; i++) { + struct sxe2_macvlan *mv = sxe2_get_macvlan(i, adapter); + + if (mv) { + ethtool_sprintf(&p, L2_FWD_TX_PKTS1, mv->vdev->name); + ethtool_sprintf(&p, L2_FWD_TX_BYTES1, mv->vdev->name); + } else { + ethtool_sprintf(&p, L2_FWD_TX_PKTS2, i); + ethtool_sprintf(&p, L2_FWD_TX_BYTES2, i); + } + } + *data = p; +} + +static void sxe2_get_macvlan_tx_stats(struct sxe2_adapter *adapter, u64 *data, + int *idx) +{ + int i, j; + + if (!idx) + return; + + j = *idx; + + for (i = 0; i < SXE2_MAX_MACVLANS; i++) { + struct sxe2_macvlan *mv = sxe2_get_macvlan(i, adapter); + + if (mv) { + data[j++] = mv->vsi->vsi_stats.txqs_stats.packets; + data[j++] = mv->vsi->vsi_stats.txqs_stats.bytes; + } else { + data[j++] = 0; + data[j++] = 0; + } + } + + *idx = j; +} + +static void sxe2_get_macvlan_rx_strings(struct sxe2_adapter *adapter, u8 **data) +{ + u8 *p; + int i; + + if (!data) + return; + p = *data; + + for (i = 0; i < SXE2_MAX_MACVLANS; i++) { + struct sxe2_macvlan *mv = sxe2_get_macvlan(i, adapter); + + if (mv) { + ethtool_sprintf(&p, L2_FWD_RX_PKTS1, mv->vdev->name); + ethtool_sprintf(&p, L2_FWD_RX_BYTES1, mv->vdev->name); + } else { + ethtool_sprintf(&p, L2_FWD_RX_PKTS2, i); + ethtool_sprintf(&p, L2_FWD_RX_BYTES2, i); + } + } + *data = p; +} + +static void sxe2_get_macvlan_rx_stats(struct sxe2_adapter *adapter, u64 *data, + int *idx) +{ + int i, j; + + if (!idx) + return; + + j = *idx; + + for (i = 0; i < SXE2_MAX_MACVLANS; i++) { + struct sxe2_macvlan *mv = sxe2_get_macvlan(i, adapter); + + if (mv) { + data[j++] = mv->vsi->vsi_stats.rxqs_stats.packets; + data[j++] = mv->vsi->vsi_stats.rxqs_stats.bytes; + } else { + data[j++] = 0; + data[j++] = 0; + } + } + + *idx = j; +} +#endif + +static void sxe2_get_tx_stats(struct sxe2_vsi *vsi, u64 *data, u32 *idx) +{ + struct sxe2_queue *tx_q; + u64 pkts, bytes; + u32 i, j; + + if (!idx) + return; + + i = *idx; + + sxe2_for_each_vsi_txq(vsi, j) { + tx_q = READ_ONCE(vsi->txqs.q[j]); + sxe2_fetch_u64_data_per_ring(&tx_q->syncp, tx_q->stats, &pkts, + &bytes); + data[i++] = pkts; + data[i++] = bytes; + data[i++] = tx_q->stats->tx_stats.tx_tso_packets; + data[i++] = tx_q->stats->tx_stats.tx_tso_bytes; + data[i++] = tx_q->stats->tx_stats.tx_tso_linearize_chk; + data[i++] = tx_q->stats->tx_stats.tx_vlan_insert; + data[i++] = tx_q->stats->tx_stats.tx_csum_none; + data[i++] = tx_q->stats->tx_stats.tx_csum_partial; + data[i++] = tx_q->stats->tx_stats.tx_csum_partial_inner; + data[i++] = tx_q->stats->tx_stats.tx_busy; + data[i++] = tx_q->stats->tx_stats.tx_queue_dropped; + data[i++] = tx_q->stats->tx_stats.tx_xmit_more; + data[i++] = tx_q->stats->tx_stats.tx_restart; + data[i++] = tx_q->stats->tx_stats.tx_linearize; + } + + *idx = i; +} + +static void sxe2_get_rx_stats(struct sxe2_vsi *vsi, u64 *data, u32 *idx) +{ + struct sxe2_queue *rx_q; + u64 pkts, bytes; + u32 i, j; + + if (!idx) + return; + + i = *idx; + + sxe2_for_each_vsi_rxq(vsi, j) { + rx_q = READ_ONCE(vsi->rxqs.q[j]); + sxe2_fetch_u64_data_per_ring(&rx_q->syncp, rx_q->stats, &pkts, + &bytes); + data[i++] = pkts; + data[i++] = bytes; + data[i++] = rx_q->stats->rx_stats.rx_csum_unnecessary; + data[i++] = rx_q->stats->rx_stats.rx_csum_none; + data[i++] = rx_q->stats->rx_stats.rx_csum_complete; + data[i++] = rx_q->stats->rx_stats.rx_csum_unnecessary_inner; + data[i++] = rx_q->stats->rx_stats.rx_csum_err; + data[i++] = rx_q->stats->rx_stats.rx_lro_packets; + data[i++] = rx_q->stats->rx_stats.rx_lro_bytes; + data[i++] = rx_q->stats->rx_stats.rx_lro_count; + data[i++] = rx_q->stats->rx_stats.rx_vlan_strip; + data[i++] = rx_q->stats->rx_stats.rx_pkts_sw_drop; + data[i++] = rx_q->stats->rx_stats.rx_buff_alloc_err; + data[i++] = rx_q->stats->rx_stats.rx_pg_alloc_fail; + data[i++] = rx_q->stats->rx_stats.rx_page_alloc; + data[i++] = rx_q->stats->rx_stats.rx_non_eop_descs; + + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_drop; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_redirect; + data[i++] = + rx_q->stats->rx_stats.xdp_stats.rx_xdp_redirect_fail; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_pkts; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_bytes; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_pass; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_unknown; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_tx_xmit; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xdp_tx_xmit_fail; + + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_drop; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_redirect; + data[i++] = + rx_q->stats->rx_stats.xdp_stats.rx_xsk_redirect_fail; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_packets; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_bytes; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_pass; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_unknown; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_tx_xmit; + data[i++] = rx_q->stats->rx_stats.xdp_stats.rx_xsk_tx_xmit_fail; + data[i++] = rx_q->stats->rx_stats.rx_pa_err; + } + + *idx = i; +} + +static int sxe2_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct sxe2_netdev_priv *netpriv = netdev_priv(dev); + struct sxe2_adapter *adapter = netpriv->vsi->adapter; + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + info->phc_index = sxe2_ptp_clock_idx_get(adapter); + + return 0; +} + +static void sxe2_get_ipsec_strings(struct sxe2_adapter *adapter, u8 **data) +{ + u8 *p; + + if (!data) + return; + + p = *data; + + ethtool_sprintf(&p, "ipsec_rx_offload_ok"); + ethtool_sprintf(&p, "ipsec_rx_decrypt_fail"); + ethtool_sprintf(&p, "ipsec_rx_invalid_state"); + ethtool_sprintf(&p, "ipsec_rx_invalid_sp"); + + ethtool_sprintf(&p, "ipsec_tx_offload_ok"); + ethtool_sprintf(&p, "ipsec_tx_invalid_state"); + ethtool_sprintf(&p, "ipsec_tx_invalid_sp"); + + *data = p; +} + +void __sxe2_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + u32 i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SXE2_VSI_SW_STATS_LEN; i++) + ethtool_sprintf( + &p, sxe2_gstrings_vsi_sw_stats[i].stats_string); + for (i = 0; i < SXE2_VSI_HW_STATS_LEN; i++) + ethtool_sprintf( + &p, sxe2_gstrings_vsi_hw_stats[i].stats_string); + + for (i = 0; i < SXE2_PF_HW_STATS_LEN; i++) + ethtool_sprintf( + &p, sxe2_gstrings_pf_hw_stats[i].stats_string); + for (i = 0; i < SXE2_PF_SW_STATS_LEN; i++) + ethtool_sprintf( + &p, sxe2_gstrings_pf_sw_stats[i].stats_string); + for (i = 0; i < SXE2_TC_MAX_CNT; i++) + ethtool_sprintf(&p, "rx_prio%u_buf_discard", i); + + sxe2_for_each_vsi_txq(vsi, i) { + ethtool_sprintf(&p, "tx%u_packets", i); + ethtool_sprintf(&p, "tx%u_bytes", i); + ethtool_sprintf(&p, "tx%u_tso_packets", i); + ethtool_sprintf(&p, "tx%u_tso_bytes", i); + ethtool_sprintf(&p, "tx%u_tso_linearize_chk", i); + ethtool_sprintf(&p, "tx%u_added_vlan_packets", i); + ethtool_sprintf(&p, "tx%u_csum_none", i); + ethtool_sprintf(&p, "tx%u_csum_partial", i); + ethtool_sprintf(&p, "tx%u_csum_partial_inner", i); + ethtool_sprintf(&p, "tx%u_stopped", i); + ethtool_sprintf(&p, "tx%u_dropped", i); + ethtool_sprintf(&p, "tx%u_xmit_more", i); + ethtool_sprintf(&p, "tx%u_wake", i); + ethtool_sprintf(&p, "tx%u_linearize", i); + } + +#ifdef SXE2_MACVLAN_STATS + mutex_lock(&vsi->adapter->vsi_ctxt.lock); + sxe2_get_macvlan_tx_strings(vsi->adapter, &p); + mutex_unlock(&vsi->adapter->vsi_ctxt.lock); +#endif + + sxe2_for_each_vsi_rxq(vsi, i) { + ethtool_sprintf(&p, "rx%u_packets", i); + ethtool_sprintf(&p, "rx%u_bytes", i); + ethtool_sprintf(&p, "rx%u_csum_unnecessary", i); + ethtool_sprintf(&p, "rx%u_csum_none", i); + ethtool_sprintf(&p, "rx%u_csum_complete", i); + ethtool_sprintf(&p, "rx%u_csum_unnecessary_inner", i); + ethtool_sprintf(&p, "rx%u_csum_err", i); + ethtool_sprintf(&p, "rx%u_lro_packets", i); + ethtool_sprintf(&p, "rx%u_lro_bytes", i); + ethtool_sprintf(&p, "rx%u_lro_count", i); + ethtool_sprintf(&p, "rx%u_removed_vlan_packets", i); + ethtool_sprintf(&p, "rx%u_pkts_sw_drop", i); + ethtool_sprintf(&p, "rx%u_buff_alloc_err", i); + ethtool_sprintf(&p, "rx%u_pg_alloc_fail", i); + ethtool_sprintf(&p, "rx%u_page_alloc", i); + ethtool_sprintf(&p, "rx%u_non_eop_descs", i); + + ethtool_sprintf(&p, "rx%u_xdp_drop", i); + ethtool_sprintf(&p, "rx%u_xdp_redirect", i); + ethtool_sprintf(&p, "rx%u_xdp_redirect_fail", i); + ethtool_sprintf(&p, "rx%u_xdp_pkts", i); + ethtool_sprintf(&p, "rx%u_xdp_bytes", i); + ethtool_sprintf(&p, "rx%u_xdp_pass", i); + ethtool_sprintf(&p, "rx%u_xdp_unknown", i); + ethtool_sprintf(&p, "rx%u_xdp_xmit", i); + ethtool_sprintf(&p, "rx%u_xdp_xmit_fail", i); + + ethtool_sprintf(&p, "rx%u_xsk_drop", i); + ethtool_sprintf(&p, "rx%u_xsk_redirect", i); + ethtool_sprintf(&p, "rx%u_xsk_redirect_fail", i); + ethtool_sprintf(&p, "rx%u_xsk_packets", i); + ethtool_sprintf(&p, "rx%u_xsk_bytes", i); + ethtool_sprintf(&p, "rx%u_xsk_pass", i); + ethtool_sprintf(&p, "rx%u_xsk_unknown", i); + ethtool_sprintf(&p, "rx%u_xsk_xmit", i); + ethtool_sprintf(&p, "rx%u_xsk_xmit_fail", i); + ethtool_sprintf(&p, "rx%u_pa_err", i); + } + +#ifdef SXE2_MACVLAN_STATS + mutex_lock(&vsi->adapter->vsi_ctxt.lock); + sxe2_get_macvlan_rx_strings(vsi->adapter, &p); + mutex_unlock(&vsi->adapter->vsi_ctxt.lock); +#endif + + sxe2_get_ipsec_strings(vsi->adapter, &p); + + sxe2_for_each_prioirty(i) { + ethtool_sprintf(&p, "tx_prio%u_xon_phy", i); + ethtool_sprintf(&p, "tx_prio%u_xoff_phy", i); + ethtool_sprintf(&p, "rx_prio%u_xon_phy", i); + ethtool_sprintf(&p, "rx_prio%u_xoff_phy", i); + } + + break; + case ETH_SS_TEST: + sxe2_ethtool_selftest_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < SXE2_PRIV_FLAG_ARRAY_SIZE; i++) + ethtool_sprintf(&p, sxe2_gstrings_priv_flags[i].name); + break; + default: + break; + } +} + +void __sxe2_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u32 i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SXE2_VSI_HW_STATS_LEN; i++) + ethtool_sprintf( + &p, sxe2_gstrings_vsi_hw_stats[i].stats_string); + break; + default: + break; + } +} + +STATIC void sxe2_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + __sxe2_get_strings(netdev, stringset, data); +} + +STATIC s32 sxe2_identify_led_ctrl(struct sxe2_adapter *adapter, bool is_blink) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_led_ctrl ctrl; + + ctrl.mode = (true == is_blink) ? SXE2_IDENTIFY_LED_BLINK_ON : + SXE2_IDENTIFY_LED_BLINK_OFF; + ctrl.duration = 0; + + sxe2_cmd_params_no_interruptible_fill(&cmd, SXE2_CMD_LED_CTRL, &ctrl, + sizeof(ctrl), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("led ctrl failed, is_blink=%d, ret=%d\n", + is_blink, ret); + ret = -EIO; + goto l_out; + } + +l_out: + return ret; +} + +STATIC int sxe2_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + bool led_active; + int ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_active = true; + break; + case ETHTOOL_ID_INACTIVE: + led_active = false; + break; + default: + ret = -EINVAL; + LOG_ERROR_BDF("identify led dont support ON/OFF, state=%d\n", + state); + goto l_out; + } + ret = sxe2_identify_led_ctrl(adapter, led_active); + if (ret) { + LOG_ERROR_BDF("led ctrl failed, ret=%d\n", ret); + goto l_out; + } + LOG_INFO_BDF("led ctrl succeed!\n"); +l_out: + return ret; +} + +STATIC void sxe2_hw_vsi_stats_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_vsi_stats_resp *resp) +{ + u16 i, j; + struct sxe2_vsi *vsi; + u16 vsi_cnt = le16_to_cpu(resp->vsi_cnt); + + for (i = 0; i < vsi_cnt; i++) { + sxe2_for_each_vsi(&adapter->vsi_ctxt, j) { + vsi = adapter->vsi_ctxt.vsi[j]; + if (!vsi) + continue; + + if (vsi->idx_in_dev == + le16_to_cpu(resp->vsi_stats[i].vsi_id)) { + vsi->vsi_stats.vsi_hw_stats.rx_vsi_unicast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.rx_vsi_unicast_packets); + + vsi->vsi_stats.vsi_hw_stats.rx_vsi_bytes += + le64_to_cpu(resp->vsi_stats[i].stats.rx_vsi_bytes); + + vsi->vsi_stats.vsi_hw_stats.tx_vsi_unicast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.tx_vsi_unicast_packets); + + vsi->vsi_stats.vsi_hw_stats.tx_vsi_bytes += + le64_to_cpu(resp->vsi_stats[i].stats.tx_vsi_bytes); + + vsi->vsi_stats.vsi_hw_stats.rx_vsi_multicast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.rx_vsi_multicast_packets); + + vsi->vsi_stats.vsi_hw_stats.tx_vsi_multicast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.tx_vsi_multicast_packets); + + vsi->vsi_stats.vsi_hw_stats.rx_vsi_broadcast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.rx_vsi_broadcast_packets); + + vsi->vsi_stats.vsi_hw_stats.tx_vsi_broadcast_packets += + le64_to_cpu(resp->vsi_stats[i].stats.tx_vsi_broadcast_packets); + } + } + } +} + +STATIC void sxe2_pf_nonclear_hw_stats_set(struct sxe2_adapter *adapter, + u64 *last_value, u64 current_value, + u64 *new_value) +{ + if (adapter->pf_stats.stat_prev_loaded) { + if (current_value >= *last_value) + *new_value += current_value - *last_value; + else + *new_value += current_value + BIT_ULL(32) - *last_value; + } + + *last_value = current_value; +} + +STATIC void sxe2_pf_nonclear_hw_stats64_set(struct sxe2_adapter *adapter, + u64 *last_value, u64 current_value, + u64 *new_value) +{ + if (adapter->pf_stats.stat_prev_loaded) { + if (current_value >= *last_value) + *new_value += current_value - *last_value; + else + *new_value = current_value; + } + + *last_value = current_value; +} + +STATIC void sxe2_pf_hw_stats_set(struct sxe2_adapter *adapter, + struct sxe2_pf_hw_stats *stats, + struct sxe2_fwc_pf_stats_resp *resp) +{ + u8 i; + struct sxe2_pf_hw_stats *last_pf_hw_stats = + &adapter->pf_stats.last_pf_hw_stats; + + if (adapter->pf_stats.stat_prev_loaded) { + stats->rx_out_of_buffer += le64_to_cpu(resp->stats.rx_out_of_buffer); + stats->rx_pcs_symbol_err_phy += le64_to_cpu(resp->stats.rx_pcs_symbol_err_phy); + stats->rx_corrected_bits_phy += le64_to_cpu(resp->stats.rx_corrected_bits_phy); + stats->rx_err_lane_0_phy += le64_to_cpu(resp->stats.rx_err_lane_0_phy); + stats->rx_err_lane_1_phy += le64_to_cpu(resp->stats.rx_err_lane_1_phy); + stats->rx_err_lane_2_phy += le64_to_cpu(resp->stats.rx_err_lane_2_phy); + stats->rx_err_lane_3_phy += le64_to_cpu(resp->stats.rx_err_lane_3_phy); + stats->rx_discards_ips_phy += le64_to_cpu(resp->stats.rx_discards_ips_phy); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + stats->rx_prio_buf_discard[i] += le64_to_cpu(resp->stats.rx_prio_buf_discard[i]); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->prio_xoff_rx[i], + le64_to_cpu(resp->stats.prio_xoff_rx[i]), + &stats->prio_xoff_rx[i]); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->prio_xon_rx[i], + le64_to_cpu(resp->stats.prio_xon_rx[i]), + &stats->prio_xon_rx[i]); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->prio_xoff_tx[i], + le64_to_cpu(resp->stats.prio_xoff_tx[i]), + &stats->prio_xoff_tx[i]); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->prio_xon_tx[i], + le64_to_cpu(resp->stats.prio_xon_tx[i]), + &stats->prio_xon_tx[i]); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->prio_xon_2_xoff[i], + le64_to_cpu(resp->stats.prio_xon_2_xoff[i]), + &stats->prio_xon_2_xoff[i]); + } + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_pause, + le64_to_cpu(resp->stats.rx_pause), + &stats->rx_pause); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_pause, + le64_to_cpu(resp->stats.tx_pause), + &stats->tx_pause); + + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_frame_good, + le64_to_cpu(resp->stats.tx_frame_good), + &stats->tx_frame_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_frame_good, + le64_to_cpu(resp->stats.rx_frame_good), + &stats->rx_frame_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_crc_errors, + le64_to_cpu(resp->stats.rx_crc_errors), + &stats->rx_crc_errors); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_bytes_good, + le64_to_cpu(resp->stats.tx_bytes_good), + &stats->tx_bytes_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_bytes_good, + le64_to_cpu(resp->stats.rx_bytes_good), + &stats->rx_bytes_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_multicast_good, + le64_to_cpu(resp->stats.tx_multicast_good), + &stats->tx_multicast_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_broadcast_good, + le64_to_cpu(resp->stats.tx_broadcast_good), + &stats->tx_broadcast_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_multicast_good, + le64_to_cpu(resp->stats.rx_multicast_good), + &stats->rx_multicast_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_broadcast_good, + le64_to_cpu(resp->stats.rx_broadcast_good), + &stats->rx_broadcast_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_len_errors, + le64_to_cpu(resp->stats.rx_len_errors), + &stats->rx_len_errors); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_out_of_range_errors, + le64_to_cpu(resp->stats.rx_out_of_range_errors), + &stats->rx_out_of_range_errors); + sxe2_pf_nonclear_hw_stats_set(adapter, &last_pf_hw_stats->rx_oversize_pkts_phy, + le64_to_cpu(resp->stats.rx_oversize_pkts_phy), + &stats->rx_oversize_pkts_phy); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_symbol_err, + le64_to_cpu(resp->stats.rx_symbol_err), + &stats->rx_symbol_err); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_pause_frame, + le64_to_cpu(resp->stats.rx_pause_frame), + &stats->rx_pause_frame); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_pause_frame, + le64_to_cpu(resp->stats.tx_pause_frame), + &stats->tx_pause_frame); + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_discards_phy, + le64_to_cpu(resp->stats.rx_discards_phy), + &stats->rx_discards_phy); + + stats->rx_discards_phy = + stats->rx_discards_phy + stats->rx_discards_ips_phy; + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_dropped_link_down, + le64_to_cpu(resp->stats.tx_dropped_link_down), + &stats->tx_dropped_link_down); + + sxe2_pf_nonclear_hw_stats_set(adapter, &last_pf_hw_stats->rx_undersize_good, + le64_to_cpu(resp->stats.rx_undersize_good), + &stats->rx_undersize_good); + + sxe2_pf_nonclear_hw_stats_set(adapter, &last_pf_hw_stats->rx_runt_error, + le64_to_cpu(resp->stats.rx_runt_error), + &stats->rx_runt_error); + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_bytes_good_bad, + le64_to_cpu(resp->stats.tx_bytes_good_bad), + &stats->tx_bytes_good_bad); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_frame_good_bad, + le64_to_cpu(resp->stats.tx_frame_good_bad), + &stats->tx_frame_good_bad); + + sxe2_pf_nonclear_hw_stats_set(adapter, &last_pf_hw_stats->rx_jabbers, + le64_to_cpu(resp->stats.rx_jabbers), + &stats->rx_jabbers); + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_64, + le64_to_cpu(resp->stats.rx_size_64), + &stats->rx_size_64); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->rx_size_65_127, + le64_to_cpu(resp->stats.rx_size_65_127), + &stats->rx_size_65_127); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_128_255, + le64_to_cpu(resp->stats.rx_size_128_255), + &stats->rx_size_128_255); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_256_511, + le64_to_cpu(resp->stats.rx_size_256_511), + &stats->rx_size_256_511); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_512_1023, + le64_to_cpu(resp->stats.rx_size_512_1023), + &stats->rx_size_512_1023); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_1024_1522, + le64_to_cpu(resp->stats.rx_size_1024_1522), + &stats->rx_size_1024_1522); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_size_1523_max, + le64_to_cpu(resp->stats.rx_size_1523_max), + &stats->rx_size_1523_max); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_illegal_bytes, + le64_to_cpu(resp->stats.rx_illegal_bytes), + &stats->rx_illegal_bytes); + + sxe2_pf_nonclear_hw_stats_set(adapter, + &last_pf_hw_stats->rx_oversize_good, + le64_to_cpu(resp->stats.rx_oversize_good), + &stats->rx_oversize_good); + + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_unicast, + le64_to_cpu(resp->stats.tx_unicast), + &stats->tx_unicast); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_broadcast, + le64_to_cpu(resp->stats.tx_broadcast), + &stats->tx_broadcast); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_multicast, + le64_to_cpu(resp->stats.tx_multicast), + &stats->tx_multicast); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_vlan_packet_good, + le64_to_cpu(resp->stats.tx_vlan_packet_good), + &stats->tx_vlan_packet_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_64, + le64_to_cpu(resp->stats.tx_size_64), + &stats->tx_size_64); + sxe2_pf_nonclear_hw_stats64_set(adapter, + &last_pf_hw_stats->tx_size_65_127, + le64_to_cpu(resp->stats.tx_size_65_127), + &stats->tx_size_65_127); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_128_255, + le64_to_cpu(resp->stats.tx_size_128_255), + &stats->tx_size_128_255); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_256_511, + le64_to_cpu(resp->stats.tx_size_256_511), + &stats->tx_size_256_511); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_512_1023, + le64_to_cpu(resp->stats.tx_size_512_1023), + &stats->tx_size_512_1023); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_1024_1522, + le64_to_cpu(resp->stats.tx_size_1024_1522), + &stats->tx_size_1024_1522); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_size_1523_max, + le64_to_cpu(resp->stats.tx_size_1523_max), + &stats->tx_size_1523_max); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->tx_underflow_error, + le64_to_cpu(resp->stats.tx_underflow_error), + &stats->tx_underflow_error); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_byte_good_bad, + le64_to_cpu(resp->stats.rx_byte_good_bad), + &stats->rx_byte_good_bad); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_frame_good_bad, + le64_to_cpu(resp->stats.rx_frame_good_bad), + &stats->rx_frame_good_bad); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_unicast_good, + le64_to_cpu(resp->stats.rx_unicast_good), + &stats->rx_unicast_good); + sxe2_pf_nonclear_hw_stats64_set(adapter, &last_pf_hw_stats->rx_vlan_packets, + le64_to_cpu(resp->stats.rx_vlan_packets), + &stats->rx_vlan_packets); + + stats->fnav_match += le64_to_cpu(resp->stats.fnav_match); + + stats->spoof_mac_packets = le64_to_cpu(resp->stats.spoof_mac_packets); + stats->spoof_vlan_packets = le64_to_cpu(resp->stats.spoof_vlan_packets); + adapter->pf_stats.stat_prev_loaded = true; +} + +s32 sxe2_fwc_get_pf_stats(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_pf_stats_req req = {}; + struct sxe2_fwc_pf_stats_resp *resp = NULL; + struct sxe2_pf_hw_stats *stats = &adapter->pf_stats.pf_hw_stats; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (resp == NULL) { + LOG_DEV_ERR("alloc memory fail.\n"); + ret = -ENOMEM; + goto l_end; + } + + req.fnav_stats_idx = + adapter->fnav_ctxt.fnav_stat_ctxt.stat_rsv_idx[SXE2_FNAV_STAT_PF]; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_GET_PF_STATS, &req, + sizeof(req), resp, + sizeof(struct sxe2_fwc_pf_stats_resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get stats failed, ret=%d\n", ret); + goto l_end; + } + sxe2_pf_hw_stats_set(adapter, stats, resp); +l_end: + kfree(resp); + + return ret; +} + +void sxe2_hw_pf_stats_update(struct sxe2_adapter *adapter) +{ + (void)sxe2_fwc_get_pf_stats(adapter); +} + +static void sxe2_sw_pf_stats_update(struct sxe2_adapter *adapter) +{ + adapter->pf_stats.pf_sw_stats.fnav_prgm_err = + adapter->fnav_ctxt.pkt_err_cnt; +} + +void sxe2_sw_vsi_stats_update(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_sw_stats cur_stats; + struct sxe2_vsi_qs_stats *vsi_qs_stats = &vsi->vsi_qs_stats; + struct sxe2_queue_stats *txq_stats, *rxq_stats; + u64 pkts, bytes; + u8 j; + + (void)memset(&cur_stats, 0, sizeof(cur_stats)); + + if (!test_bit(SXE2_VSI_S_DOWN, vsi->state)) { + sxe2_for_each_vsi_txq(vsi, j) { + txq_stats = &vsi_qs_stats->txqs_stats[j]; + sxe2_fetch_u64_data_per_ring(&txq_stats->syncp, + txq_stats, &pkts, &bytes); + cur_stats.tx_packets += pkts; + cur_stats.tx_bytes += bytes; + cur_stats.tx_restart += txq_stats->tx_stats.tx_restart; + cur_stats.tx_busy += txq_stats->tx_stats.tx_busy; + cur_stats.tx_linearize += + txq_stats->tx_stats.tx_linearize; + cur_stats.tx_vlan_insert += + txq_stats->tx_stats.tx_vlan_insert; + cur_stats.tx_tso_packets += + txq_stats->tx_stats.tx_tso_packets; + cur_stats.tx_tso_bytes += + txq_stats->tx_stats.tx_tso_bytes; + cur_stats.tx_csum_none += + txq_stats->tx_stats.tx_csum_none; + cur_stats.tx_csum_partial += + txq_stats->tx_stats.tx_csum_partial; + cur_stats.tx_csum_partial_inner += + txq_stats->tx_stats.tx_csum_partial_inner; + cur_stats.tx_queue_dropped += + txq_stats->tx_stats.tx_queue_dropped; + cur_stats.tx_xmit_more += + txq_stats->tx_stats.tx_xmit_more; + cur_stats.tx_tso_linearize_chk += + txq_stats->tx_stats.tx_tso_linearize_chk; + } + + sxe2_for_each_vsi_rxq(vsi, j) { + rxq_stats = &vsi_qs_stats->rxqs_stats[j]; + sxe2_fetch_u64_data_per_ring(&rxq_stats->syncp, + rxq_stats, &pkts, &bytes); + cur_stats.rx_packets += pkts; + cur_stats.rx_bytes += bytes; + cur_stats.rx_buff_alloc_err += + rxq_stats->rx_stats.rx_buff_alloc_err; + cur_stats.rx_pg_alloc_fail += + rxq_stats->rx_stats.rx_pg_alloc_fail; + cur_stats.rx_lro_count += + rxq_stats->rx_stats.rx_lro_count; + cur_stats.rx_lro_packets += + rxq_stats->rx_stats.rx_lro_packets; + cur_stats.rx_vlan_strip += + rxq_stats->rx_stats.rx_vlan_strip; + cur_stats.rx_csum_err += + rxq_stats->rx_stats.rx_csum_err; + cur_stats.rx_csum_unnecessary += + rxq_stats->rx_stats.rx_csum_unnecessary; + cur_stats.rx_csum_none += + rxq_stats->rx_stats.rx_csum_none; + cur_stats.rx_csum_complete += + rxq_stats->rx_stats.rx_csum_complete; + cur_stats.rx_csum_unnecessary_inner += + rxq_stats->rx_stats.rx_csum_unnecessary_inner; + cur_stats.rx_lro_bytes += + rxq_stats->rx_stats.rx_lro_bytes; + cur_stats.rx_pkts_sw_drop += + rxq_stats->rx_stats.rx_pkts_sw_drop; + cur_stats.rx_page_alloc += + rxq_stats->rx_stats.rx_page_alloc; + cur_stats.rx_non_eop_descs += + rxq_stats->rx_stats.rx_non_eop_descs; + + cur_stats.rx_xdp_pkts += + rxq_stats->rx_stats.xdp_stats.rx_xdp_pkts; + cur_stats.rx_xdp_bytes += + rxq_stats->rx_stats.xdp_stats.rx_xdp_bytes; + cur_stats.rx_xdp_pass += + rxq_stats->rx_stats.xdp_stats.rx_xdp_pass; + cur_stats.rx_xdp_drop += + rxq_stats->rx_stats.xdp_stats.rx_xdp_drop; + cur_stats.rx_xdp_unknown += + rxq_stats->rx_stats.xdp_stats.rx_xdp_unknown; + cur_stats.rx_xdp_redirect += + rxq_stats->rx_stats.xdp_stats.rx_xdp_redirect; + cur_stats.rx_xdp_redirect_fail += + rxq_stats->rx_stats.xdp_stats.rx_xdp_redirect_fail; + cur_stats.rx_xdp_tx_xmit += + rxq_stats->rx_stats.xdp_stats.rx_xdp_tx_xmit; + cur_stats.rx_xdp_tx_xmit_fail += + rxq_stats->rx_stats.xdp_stats.rx_xdp_tx_xmit_fail; + + cur_stats.rx_xsk_redirect_fail += + rxq_stats->rx_stats.xdp_stats.rx_xsk_redirect_fail; + cur_stats.rx_xsk_redirect += + rxq_stats->rx_stats.xdp_stats.rx_xsk_redirect; + cur_stats.rx_xsk_unknown += + rxq_stats->rx_stats.xdp_stats.rx_xsk_unknown; + cur_stats.rx_xsk_pass += + rxq_stats->rx_stats.xdp_stats.rx_xsk_pass; + cur_stats.rx_xsk_packets += + rxq_stats->rx_stats.xdp_stats.rx_xsk_packets; + cur_stats.rx_xsk_bytes += + rxq_stats->rx_stats.xdp_stats.rx_xsk_bytes; + cur_stats.rx_xsk_drop += + rxq_stats->rx_stats.xdp_stats.rx_xsk_drop; + cur_stats.rx_xsk_tx_xmit += + rxq_stats->rx_stats.xdp_stats.rx_xsk_tx_xmit; + cur_stats.rx_xsk_tx_xmit_fail += + rxq_stats->rx_stats.xdp_stats.rx_xsk_tx_xmit_fail; + + cur_stats.rx_pa_err += rxq_stats->rx_stats.rx_pa_err; + } + (void)memcpy(&vsi->vsi_stats.vsi_sw_stats, &cur_stats, + sizeof(cur_stats)); + } +} + +STATIC s32 sxe2_fwc_get_vsi_stats(struct sxe2_adapter *adapter, + struct sxe2_fwc_vsi_stats_req *req, + struct sxe2_fwc_vsi_stats_resp *resp, + u32 in_len, u32 out_len) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_GET_VSI_STATS, req, in_len, + resp, out_len); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get %d vsi stats failed, req size=%d, resp size=%d, ret=%d\n", + le16_to_cpu(req->vsi_cnt), in_len, out_len, ret); + goto l_end; + ; + } + sxe2_hw_vsi_stats_set(adapter, resp); +l_end: + return ret; +} + +void sxe2_hw_vsi_stats_update(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fwc_vsi_stats_req req = {}; + struct sxe2_fwc_vsi_stats_resp *resp = NULL; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (resp == NULL) { + LOG_DEV_ERR("alloc memory fail.\n"); + return; + } + + req.vsi_cnt = cpu_to_le16(1); + req.vsi_ids[0] = cpu_to_le16(vsi->idx_in_dev); + + (void)sxe2_fwc_get_vsi_stats(adapter, &req, resp, + sizeof(struct sxe2_fwc_vsi_stats_req), + sizeof(struct sxe2_fwc_vsi_stats_resp)); + kfree(resp); +} + +STATIC void sxe2_hw_vsi_stats_update_all(struct sxe2_adapter *adapter) +{ + u16 i; + u16 idx = 0; + struct sxe2_fwc_vsi_stats_req req = { 0 }; + struct sxe2_fwc_vsi_stats_resp *resp = NULL; + struct sxe2_vsi *vsi = NULL; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (resp == NULL) { + LOG_DEV_ERR("alloc memory fail.\n"); + return; + } + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + req.vsi_ids[idx] = cpu_to_le16(vsi->idx_in_dev); + idx++; + if (idx == SXE2_CMD_VSI_STATS_MAX_CNT) { + req.vsi_cnt = idx; + (void)sxe2_fwc_get_vsi_stats(adapter, &req, resp, + sizeof(struct sxe2_fwc_vsi_stats_req), + sizeof(struct sxe2_fwc_vsi_stats_resp)); + idx = 0; + } + } + + if (idx > 0) { + req.vsi_cnt = idx; + (void)sxe2_fwc_get_vsi_stats(adapter, &req, resp, + sizeof(struct sxe2_fwc_vsi_stats_req), + sizeof(struct sxe2_fwc_vsi_stats_resp)); + } + kfree(resp); +} + +STATIC void sxe2_repr_accumulate_sw_stats(struct rtnl_link_stats64 *stats, + struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_hw_stats *vsi_hw_stats; + + if (!vsi) + return; + + vsi_hw_stats = &vsi->vsi_stats.vsi_hw_stats; + + stats->rx_packets += vsi->vsi_stats.vsi_sw_stats.rx_packets; + stats->rx_bytes += vsi->vsi_stats.vsi_sw_stats.rx_bytes; + stats->tx_packets += vsi->vsi_stats.vsi_sw_stats.tx_packets; + stats->tx_bytes += vsi->vsi_stats.vsi_sw_stats.tx_bytes; + stats->multicast += vsi_hw_stats->rx_vsi_multicast_packets; +} + +STATIC void sxe2_repr_accumulate_hw_stats(struct rtnl_link_stats64 *stats, + struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_hw_stats *vsi_hw_stats; + + if (!vsi) + return; + + vsi_hw_stats = &vsi->vsi_stats.vsi_hw_stats; + + stats->rx_packets += vsi_hw_stats->rx_vsi_unicast_packets + + vsi_hw_stats->rx_vsi_multicast_packets + + vsi_hw_stats->rx_vsi_broadcast_packets; + stats->rx_bytes += vsi_hw_stats->rx_vsi_bytes; + stats->multicast += vsi_hw_stats->rx_vsi_multicast_packets; + + stats->tx_packets += vsi_hw_stats->tx_vsi_unicast_packets + + vsi_hw_stats->tx_vsi_multicast_packets + + vsi_hw_stats->tx_vsi_broadcast_packets; + stats->tx_bytes += vsi_hw_stats->tx_vsi_bytes; +} + +void sxe2_repr_vf_vsis_stats_acculate_update(struct sxe2_adapter *adapter) +{ + u16 vf_idx; + struct sxe2_vf_node *vf = NULL; + struct rtnl_link_stats64 stats; + + sxe2_for_each_vf(adapter, vf_idx) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf = SXE2_VF_NODE(adapter, vf_idx); + if (!vf) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + continue; + } + + memset(&stats, 0, sizeof(stats)); + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_FLAG_VFSWSTATS_ENABLE, adapter->flags)) + sxe2_repr_accumulate_sw_stats(&stats, vf->vsi); + else + sxe2_repr_accumulate_hw_stats(&stats, vf->vsi); + + sxe2_repr_accumulate_hw_stats(&stats, vf->dpdk_vf_vsi); + + memcpy(&adapter->repr_vf_stats.repr_link_stats64[vf_idx], &stats, sizeof(stats)); + + mutex_unlock(&adapter->vsi_ctxt.lock); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } +} + +void sxe2_stats_update(struct sxe2_adapter *adapter) +{ + + sxe2_hw_pf_stats_update(adapter); + + sxe2_hw_vsi_stats_update_all(adapter); + + sxe2_fnav_match_stats_update_batch(adapter); +} + +static void sxe2_get_ipsec_stats(struct sxe2_vsi *vsi, u64 *data, u32 *idx) +{ + u32 i, j; + u64 rx_offload_success = 0; + u64 rx_error_decrypt_fail = 0; + u64 rx_error_invalid_state = 0; + u64 rx_error_invalid_sp = 0; + u64 tx_offload_success = 0; + u64 tx_error_invalid_state = 0; + u64 tx_error_invalid_sp = 0; + struct sxe2_queue *q; + + if (!idx) + return; + + i = *idx; + + sxe2_for_each_vsi_rxq(vsi, j) { + q = READ_ONCE(vsi->rxqs.q[j]); + rx_offload_success += q->stats->ipsec_stats.rx_offload_success; + rx_error_decrypt_fail += + q->stats->ipsec_stats.rx_error_decrypt_fail; + rx_error_invalid_state += + q->stats->ipsec_stats.rx_error_invalid_state; + rx_error_invalid_sp += + q->stats->ipsec_stats.rx_error_invalid_sp; + } + data[i++] = rx_offload_success; + data[i++] = rx_error_decrypt_fail; + data[i++] = rx_error_invalid_state; + data[i++] = rx_error_invalid_sp; + + sxe2_for_each_vsi_txq(vsi, j) { + q = READ_ONCE(vsi->txqs.q[j]); + tx_offload_success += q->stats->ipsec_stats.tx_offload_success; + tx_error_invalid_state += + q->stats->ipsec_stats.tx_error_invalid_state; + tx_error_invalid_sp += + q->stats->ipsec_stats.tx_error_invalid_sp; + } + data[i++] = tx_offload_success; + data[i++] = tx_error_invalid_state; + data[i++] = tx_error_invalid_sp; + + *idx = i; +} + +void __sxe2_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data, struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_pf_hw_stats *pf_hw_stats = &adapter->pf_stats.pf_hw_stats; + struct sxe2_pf_sw_stats *pf_sw_stats = &adapter->pf_stats.pf_sw_stats; + struct sxe2_vsi_sw_stats *vsi_sw_stats = &vsi->vsi_stats.vsi_sw_stats; + struct sxe2_vsi_hw_stats *vsi_hw_stats = &vsi->vsi_stats.vsi_hw_stats; + + u8 j; + u32 i = 0; + char *p; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + goto l_unlock; + + sxe2_hw_pf_stats_update(adapter); + + sxe2_sw_pf_stats_update(adapter); + + sxe2_hw_vsi_stats_update(vsi); + + sxe2_sw_vsi_stats_update(vsi); + + for (j = 0; j < SXE2_VSI_SW_STATS_LEN; j++) { + p = (char *)vsi_sw_stats + + sxe2_gstrings_vsi_sw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + for (j = 0; j < SXE2_VSI_HW_STATS_LEN; j++) { + p = (char *)vsi_hw_stats + + sxe2_gstrings_vsi_hw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + for (j = 0; j < SXE2_PF_HW_STATS_LEN; j++) { + p = (char *)pf_hw_stats + + sxe2_gstrings_pf_hw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + for (j = 0; j < SXE2_PF_SW_STATS_LEN; j++) { + p = (char *)pf_sw_stats + + sxe2_gstrings_pf_sw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) + data[i++] = pf_hw_stats->rx_prio_buf_discard[j]; + + sxe2_get_tx_stats(vsi, data, &i); + +#ifdef SXE2_MACVLAN_STATS + sxe2_get_macvlan_tx_stats(vsi->adapter; data, &i); +#endif + + sxe2_get_rx_stats(vsi, data, &i); + +#ifdef SXE2_MACVLAN_STATS + sxe2_get_macvlan_rx_stats(vsi->adapter; data, &i); +#endif + + sxe2_get_ipsec_stats(vsi, data, &i); + + sxe2_for_each_prioirty(j) { + data[i++] = pf_hw_stats->prio_xon_tx[j]; + data[i++] = pf_hw_stats->prio_xoff_tx[j]; + data[i++] = pf_hw_stats->prio_xon_rx[j]; + data[i++] = pf_hw_stats->prio_xoff_rx[j]; + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +void __sxe2_repr_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data, struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vsi_hw_stats *vsi_hw_stats; + u8 j; + u32 i = 0; + char *p; + struct sxe2_vf_node *vf_node = vsi->vf_node; + struct sxe2_vsi *user_vsi; + s32 ret = 0; + + LOG_INFO_BDF("vsi %d, get ethtool stats.\n", vsi->idx_in_dev); + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_node->vf_idx)); + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) { + LOG_ERROR_BDF( + "vf:%u pf flags:0x%lx vf states:0x%lx not ready.\n", + vf_node->vf_idx, *adapter->flags, *vf_node->states); + goto l_unlock; + } + + sxe2_hw_vsi_stats_update(vsi); + + vsi_hw_stats = &vsi->vsi_stats.vsi_hw_stats; + for (j = 0; j < SXE2_VSI_HW_STATS_LEN; j++) { + p = (char *)vsi_hw_stats + + sxe2_gstrings_vsi_hw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + user_vsi = vf_node->dpdk_vf_vsi; + if (user_vsi) { + sxe2_hw_vsi_stats_update(user_vsi); + i = 0; + vsi_hw_stats = &user_vsi->vsi_stats.vsi_hw_stats; + for (j = 0; j < SXE2_VSI_HW_STATS_LEN; j++) { + p = (char *)vsi_hw_stats + + sxe2_gstrings_vsi_hw_stats[j].stats_offset; + data[i++] += *(u64 *)p; + } + } + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_node->vf_idx)); +} + +STATIC void sxe2_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + + __sxe2_get_ethtool_stats(netdev, stats, data, vsi); +} + +STATIC int sxe2_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return (int)SXE2_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return sxe2_ethtool_selftest_count(netdev); + case ETH_SS_PRIV_FLAGS: + return SXE2_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +#ifdef GET_RINGPARAM_NEED_2_PARAMS +STATIC void sxe2_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#else +STATIC void sxe2_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#endif +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + goto l_unlock; + + ring->rx_max_pending = SXE2_MAX_NUM_DESC; + ring->tx_max_pending = SXE2_MAX_NUM_DESC; + ring->rx_pending = vsi->rxqs.q[0]->depth; + ring->tx_pending = vsi->txqs.q[0]->depth; + + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +static s32 sxe2_ethtool_checkparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + u32 *tx_desc_num, u32 *rx_desc_num) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + LOG_NETDEV_ERR("do not support set rx_mini_pending=%u or rx_jumbo_pending=%u\n", + ring->rx_mini_pending, ring->rx_jumbo_pending); + return -EINVAL; + } + if (ring->tx_pending > SXE2_MAX_NUM_DESC || + ring->tx_pending < SXE2_MIN_NUM_DESC) { + LOG_NETDEV_ERR("tx queue depth(%d) is out of range [%d-%d] (increment %d)\n", + ring->tx_pending, SXE2_MIN_NUM_DESC, SXE2_MAX_NUM_DESC, + SXE2_DESC_ALIGN_32); + return -EINVAL; + } + if (ring->rx_pending > SXE2_MAX_NUM_DESC || + ring->rx_pending < SXE2_MIN_NUM_DESC) { + LOG_NETDEV_ERR("rx queue depth(%d) is out of range [%d-%d] (increment %d)\n", + ring->rx_pending, SXE2_MIN_NUM_DESC, SXE2_MAX_NUM_DESC, + SXE2_DESC_ALIGN_32); + return -EINVAL; + } + *tx_desc_num = ALIGN(ring->tx_pending, SXE2_DESC_ALIGN_32); + if (*tx_desc_num != ring->tx_pending) + LOG_NETDEV_INFO("requested tx descriptor count changed to %d\n", + *tx_desc_num); + + *rx_desc_num = ALIGN(ring->rx_pending, SXE2_DESC_ALIGN_32); + if (*rx_desc_num != ring->rx_pending) + LOG_NETDEV_INFO("requested rx descriptor count changed to %d\n", + *rx_desc_num); + return 0; +} + +static void sxe2_ringparam_set_offline(struct sxe2_vsi *vsi, u32 tx_size, + u32 rx_size) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct net_device *netdev = vsi->netdev; + + u32 i; + + if (vsi->txqs.depth == tx_size) { + LOG_NETDEV_DEBUG("tx desc depth[%d] not changed.\n", tx_size); + } else { + LOG_NETDEV_DEBUG("link is down, tx desc depth chang from [%d] to [%d] happens \t" + "when link is brought up.\n", + vsi->txqs.depth, tx_size); + sxe2_for_each_vsi_txq(vsi, i) { + vsi->txqs.q[i]->depth = (u16)tx_size; + } + + if (sxe2_xdp_is_enable(vsi)) { + for (i = 0; i < vsi->num_xdp_txq; i++) + vsi->xdp_rings.q[i]->depth = (u16)tx_size; + + vsi->xdp_rings.depth = (u16)tx_size; + } + + vsi->txqs.depth = (u16)tx_size; + } + + if (vsi->rxqs.depth == rx_size) { + LOG_NETDEV_DEBUG("rx desc depth[%d] not changed.\n", rx_size); + } else { + LOG_NETDEV_DEBUG("link is down, rx desc depth chang from [%d] to [%d] happens \t" + "when link is brought up.\n", + vsi->rxqs.depth, rx_size); + sxe2_for_each_vsi_rxq(vsi, i) { + vsi->rxqs.q[i]->depth = (u16)rx_size; + } + vsi->rxqs.depth = (u16)rx_size; + } +} + +#ifdef SET_RINGPARAM_NEED_2_PARAMS +STATIC int sxe2_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#else +STATIC int sxe2_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#endif +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + u32 new_rx_size, new_tx_size; + u32 old_rx_size, old_tx_size; + + ret = sxe2_ethtool_checkparam(netdev, ring, &new_tx_size, &new_rx_size); + if (ret) + goto out; + + old_rx_size = vsi->rxqs.depth; + old_tx_size = vsi->txqs.depth; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (netif_running(vsi->netdev)) { + ret = sxe2_vsi_close(vsi); + if (ret) { + LOG_DEBUG_BDF("vsi close failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + goto l_unlock; + } + } + + sxe2_ringparam_set_offline(vsi, new_tx_size, new_rx_size); + + if (netif_running(vsi->netdev)) { + ret = sxe2_vsi_open(vsi); + if (ret) { + LOG_DEBUG_BDF("vsi open failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + sxe2_ringparam_set_offline(vsi, old_tx_size, + old_rx_size); + ret = sxe2_vsi_open(vsi); + if (ret) { + LOG_DEBUG_BDF("vsi open failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + } + } + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +out: + return ret; +} + +STATIC void sxe2_analysis_hdrs(struct ethtool_rxnfc *nfc, unsigned long *hdrs) +{ + bitmap_zero(hdrs, SXE2_FLOW_HDR_MAX); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_TCP, hdrs); + break; + case UDP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_UDP, hdrs); + break; + case SCTP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_SCTP, hdrs); + break; + case TCP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_TCP, hdrs); + break; + case UDP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_UDP, hdrs); + break; + case SCTP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_SCTP, hdrs); + break; + default: + break; + } +} + +STATIC void sxe2_get_rss_flow(struct sxe2_vsi *vsi, struct ethtool_rxnfc *nfc) +{ + struct sxe2_adapter *adapter = vsi->adapter; + DECLARE_BITMAP(hash_flds, SXE2_FLOW_FLD_ID_MAX); + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + + nfc->data = 0; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("ethtool get rx flow hash in safe mode is not supported.\n"); + return; + } + + sxe2_analysis_hdrs(nfc, hdrs); + if (bitmap_empty(hdrs, SXE2_FLOW_HDR_MAX)) { + LOG_INFO_BDF("header type is not valid, vsi:%d\n", + vsi->id_in_pf); + return; + } + + sxe2_rss_get_hash_cfg_with_hdrs(&adapter->rss_flow_ctxt, vsi->id_in_pf, + hdrs, hash_flds); + if (bitmap_empty(hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_INFO_BDF("no hash fields found for the given header type, vsi:%d\n", + vsi->id_in_pf); + return; + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_IPV6_SA, hash_flds)) { + nfc->data |= (u64)RXH_IP_SRC; + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_IPV6_DA, hash_flds)) { + nfc->data |= (u64)RXH_IP_DST; + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, hash_flds)) { + nfc->data |= (u64)RXH_L4_B_0_1; + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, hash_flds)) { + nfc->data |= (u64)RXH_L4_B_2_3; + } +} + +STATIC u32 sxe2_ntuple_max_filter_cnt_get_by_vsi(struct sxe2_vsi *vsi) +{ + u32 acl_filter_cnt = 0; + u32 fnav_filter_cnt = 0; +#ifdef SXE2_SUPPORT_ACL + if (test_bit(SXE2_FLAG_ACL_CAPABLE, vsi->adapter->flags)) + acl_filter_cnt = vsi->adapter->acl_ctxt.acl_tbl_info->max_slot_cnt; +#endif + fnav_filter_cnt = sxe2_fnav_max_filter_cnt_get_by_vsi(vsi); + + return acl_filter_cnt + fnav_filter_cnt; +} + +STATIC enum sxe2_fnav_flow_type sxe2_ethtool_flow_to_type(u32 flow) +{ + enum sxe2_fnav_flow_type flow_type; + + switch (flow) { + case ETHER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_ETH; + break; + case TCP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + break; + case UDP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + break; + case SCTP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_SCTP; + break; + case IPV4_USER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_OTHER; + break; + case TCP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + break; + case UDP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + break; + case SCTP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_SCTP; + break; + case IPV6_USER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_OTHER; + break; + default: + flow_type = SXE2_FNAV_FLOW_TYPE_NONE; + break; + } + + return flow_type; +} + +STATIC u32 sxe2_flow_type_to_ethtool_flow(enum sxe2_fnav_flow_type flow_type) +{ + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + return ETHER_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + return TCP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + return UDP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + return SCTP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + return IPV4_USER_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + return TCP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + return UDP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + return SCTP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + return IPV6_USER_FLOW; + default: + return 0; + } +} + +STATIC int sxe2_ethtool_fnav_filter_get_by_loc(struct sxe2_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + struct sxe2_adapter *adapter = vsi->adapter; + int ret = 0; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct sxe2_fnav_filter *filter; + u64 vf_id = 0; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + filter = sxe2_fnav_find_filter_by_loc_unlock(&vsi->fnav, + fsp->location); + + if (!filter) { + LOG_ERROR_BDF("filter in loc[%u] is not found.\n", + fsp->location); + ret = -EINVAL; + goto l_unlock; + } + + fsp->flow_type = sxe2_flow_type_to_ethtool_flow(filter->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case ETHER_FLOW: + fsp->h_u.ether_spec = filter->full_key.eth; + fsp->m_u.ether_spec = filter->full_key.eth_mask; + break; + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = filter->full_key.ip.v4.proto; + fsp->h_u.usr_ip4_spec.l4_4_bytes = + filter->full_key.ip.v4.l4_header; + fsp->h_u.usr_ip4_spec.tos = filter->full_key.ip.v4.tos; + fsp->h_u.usr_ip4_spec.ip4src = filter->full_key.ip.v4.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = filter->full_key.ip.v4.dst_ip; + fsp->m_u.usr_ip4_spec.ip4src = filter->full_key.mask.v4.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = filter->full_key.mask.v4.dst_ip; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = filter->full_key.mask.v4.proto; + fsp->m_u.usr_ip4_spec.l4_4_bytes = + filter->full_key.mask.v4.l4_header; + fsp->m_u.usr_ip4_spec.tos = filter->full_key.mask.v4.tos; + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.psrc = filter->full_key.l4.src_port; + fsp->h_u.tcp_ip4_spec.pdst = filter->full_key.l4.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = filter->full_key.ip.v4.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = filter->full_key.ip.v4.dst_ip; + fsp->h_u.tcp_ip4_spec.tos = filter->full_key.ip.v4.tos; + fsp->m_u.tcp_ip4_spec.psrc = filter->full_key.l4_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = filter->full_key.l4_mask.dst_port; + fsp->m_u.tcp_ip4_spec.ip4src = filter->full_key.mask.v4.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = filter->full_key.mask.v4.dst_ip; + fsp->m_u.tcp_ip4_spec.tos = filter->full_key.mask.v4.tos; + break; + case IPV6_USER_FLOW: + fsp->h_u.usr_ip6_spec.l4_4_bytes = + filter->full_key.ip.v6.l4_header; + fsp->h_u.usr_ip6_spec.tclass = filter->full_key.ip.v6.tc; + fsp->h_u.usr_ip6_spec.l4_proto = filter->full_key.ip.v6.proto; + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, + filter->full_key.ip.v6.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, + filter->full_key.ip.v6.dst_ip, sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, + filter->full_key.mask.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, + filter->full_key.mask.v6.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = + filter->full_key.mask.v6.l4_header; + fsp->m_u.usr_ip6_spec.tclass = filter->full_key.mask.v6.tc; + fsp->m_u.usr_ip6_spec.l4_proto = filter->full_key.mask.v6.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, + filter->full_key.ip.v6.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, + filter->full_key.ip.v6.dst_ip, sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = filter->full_key.l4.src_port; + fsp->h_u.tcp_ip6_spec.pdst = filter->full_key.l4.dst_port; + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, + filter->full_key.mask.v6.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, + filter->full_key.mask.v6.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = filter->full_key.l4_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = filter->full_key.l4_mask.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = filter->full_key.ip.v6.tc; + fsp->m_u.tcp_ip6_spec.tclass = filter->full_key.mask.v6.tc; + break; + default: + break; + } + + if (filter->act_type == SXE2_FNAV_ACT_DROP) { + fsp->ring_cookie = RX_CLS_FLOW_DISC; + } else { + fsp->ring_cookie = filter->origin_q_index; + if (filter->ori_vsi_hw != filter->dst_vsi_hw) { + vf_id = filter->vf_idx + 1; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fsp->ring_cookie |= vf_id; + } + } + + if (filter->full_key.flow_ext) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, filter->full_key.ext_data.usr_def, + sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, filter->full_key.ext_mask.usr_def, + sizeof(fsp->m_ext.data)); + fsp->h_ext.vlan_etype = filter->full_key.ext_data.vlan_type; + fsp->m_ext.vlan_etype = filter->full_key.ext_mask.vlan_type; + fsp->h_ext.vlan_tci = filter->full_key.ext_data.s_vlan_tci; + fsp->m_ext.vlan_tci = filter->full_key.ext_mask.s_vlan_tci; + } + +l_unlock: + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + return ret; +} + +STATIC int sxe2_ethtool_ntuple_filter_locs_get(struct sxe2_vsi *vsi, + struct ethtool_rxnfc *cmd, + u32 *filter_locs) +{ + struct sxe2_adapter *adapter = vsi->adapter; + int ret = 0; + unsigned int cnt = 0; + struct sxe2_fnav_filter *filter; + + cmd->data = sxe2_ntuple_max_filter_cnt_get_by_vsi(vsi); + if (cmd->data == 0) + return -EOPNOTSUPP; + mutex_lock(&adapter->fnav_ctxt.filter_lock); + + list_for_each_entry(filter, &vsi->fnav.filter_list, + l_node) { + if (cnt == cmd->rule_cnt) { + ret = -EMSGSIZE; + break; + } + filter_locs[cnt] = filter->filter_loc; + cnt++; + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + + if (!ret) + cmd->rule_cnt = cnt; + + return ret; +} + +STATIC int sxe2_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) +{ + int ret = -EOPNOTSUPP; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR( + "ethtool get rx flow in safe mode is not supported.\n"); + return -EINVAL; + } + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi->rss_ctxt.queue_size; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = vsi->fnav.filter_cnt; + cmd->data = sxe2_ntuple_max_filter_cnt_get_by_vsi(vsi); + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = sxe2_ethtool_fnav_filter_get_by_loc(vsi, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sxe2_ethtool_ntuple_filter_locs_get(vsi, cmd, + (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + sxe2_get_rss_flow(vsi, cmd); + ret = 0; + break; + default: + break; + } + + return ret; +} + +STATIC void sxe2_analysis_hash_flds(struct ethtool_rxnfc *nfc, + unsigned long *hash_flds) +{ + bitmap_zero(hash_flds, SXE2_FLOW_FLD_ID_MAX); + if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + if (nfc->data & RXH_IP_SRC) + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, hash_flds); + + if (nfc->data & RXH_IP_DST) + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, hash_flds); + + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + if (nfc->data & RXH_IP_SRC) + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, hash_flds); + + if (nfc->data & RXH_IP_DST) + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, hash_flds); + + break; + default: + break; + } + } + + if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, hash_flds); + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, hash_flds); + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, hash_flds); + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, hash_flds); + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, hash_flds); + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, hash_flds); + break; + default: + break; + } + } +} + +STATIC int sxe2_set_rss_flow(struct sxe2_vsi *vsi, struct ethtool_rxnfc *nfc) +{ + int ret = 0; + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hash_flds, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rss_hash_cfg cfg; + + sxe2_analysis_hdrs(nfc, hdrs); + if (bitmap_empty(hdrs, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + return -EINVAL; + } + + sxe2_analysis_hash_flds(nfc, hash_flds); + if (bitmap_empty(hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR_BDF("invalid field type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + return -EINVAL; + } + + cfg.hdr_type = SXE2_RSS_ANY_HEADERS; + bitmap_copy(cfg.headers, hdrs, SXE2_FLOW_HDR_MAX); + bitmap_copy(cfg.hash_flds, hash_flds, SXE2_FLOW_FLD_ID_MAX); + cfg.symm = false; + + ret = sxe2_add_rss_flow(&adapter->rss_flow_ctxt, vsi->id_in_pf, &cfg); + if (ret != 0) { + LOG_ERROR_BDF("invalid field type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + return -EINVAL; + } + + return 0; +} + +#define SXE2_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0) +#define SXE2_USERDEF_FLEX_OFF_S 16 +#define SXE2_USERDEF_FLEX_OFF_M GENMASK_ULL(31, SXE2_USERDEF_FLEX_OFF_S) +#define SXE2_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0) +#define SXE2_USERDEF_FLEX_MAX_OFF 0x1fe + +STATIC int sxe2_ethtool_parse_ntuple_userdef(struct sxe2_fnav_filter *filter, + struct ethtool_rx_flow_spec *fsp) +{ + u64 value, mask; + + if (!(fsp->flow_type & FLOW_EXT)) + return 0; + + value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data)); + mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data)); + if (!mask) + return 0; + + LOG_DEBUG("user-def param:0x%llx.\n", value); + + if (!((mask & SXE2_USERDEF_FLEX_FLTR_M) == SXE2_USERDEF_FLEX_FLTR_M) || + value > SXE2_USERDEF_FLEX_FLTR_M) { + return -EINVAL; + } + + filter->full_key.flex_word = + cpu_to_be16((u16)(value & SXE2_USERDEF_FLEX_WORD_M)); + filter->full_key.flex_offset = + (u16)FIELD_GET(SXE2_USERDEF_FLEX_OFF_M, value); + if (filter->full_key.flex_offset > SXE2_USERDEF_FLEX_MAX_OFF) + return -EINVAL; + + filter->full_key.has_flex_filed = true; + + return 0; +} + +STATIC int sxe2_ethtool_fnav_seg_eth_fill(struct ethhdr *eth_spec, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + + if (eth_spec->h_proto == htons(0xFFFF)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, seg->fields); + } else if (eth_spec->h_proto) { + LOG_WARN("proto mask must be 0x0000 or 0xffff.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (is_broadcast_ether_addr(eth_spec->h_source)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, seg->fields); + } else if (!is_zero_ether_addr(eth_spec->h_source)) { + LOG_WARN("src mask must be 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (is_broadcast_ether_addr(eth_spec->h_dest)) { + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, seg->fields); + } else if (!is_zero_ether_addr(eth_spec->h_dest)) { + LOG_WARN("dst mask must be 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + +l_end: + return ret; +} + +STATIC bool sxe2_ethtool_vlan_seg_valid(struct ethtool_rx_flow_spec *fsp) +{ + bool ret = fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci; + + if (fsp->m_ext.vlan_etype && + !(fsp->h_ext.vlan_etype == cpu_to_be16(ETH_P_8021Q) || + fsp->h_ext.vlan_etype == cpu_to_be16(ETH_P_8021AD))) { + ret = false; + goto l_end; + } + + if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID) { + ret = false; + goto l_end; + } + + if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci && + !fsp->m_ext.vlan_etype) { + LOG_WARN( + "Filter with proto and vlan require also vlan-etype.\n"); + ret = false; + goto l_end; + } + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_fnav_seg_vlan_fill(struct ethtool_flow_ext *ext_mask, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + + set_bit(SXE2_FLOW_HDR_VLAN, seg->headers); + + if (ext_mask->vlan_etype) { + if (ext_mask->vlan_etype != htons(0xFFFF)) { + ret = -EOPNOTSUPP; + goto l_end; + } + set_bit(SXE2_FLOW_FLD_ID_S_TPID, seg->fields); + } + + if (ext_mask->vlan_tci) { + if (ext_mask->vlan_tci != htons(0xFFFF)) { + ret = -EOPNOTSUPP; + goto l_end; + } + set_bit(SXE2_FLOW_FLD_ID_S_TCI, seg->fields); + } + +l_end: + return ret; +} + +static int +sxe2_ethtool_fnav_l4_ip4_seg_valid(struct ethtool_tcpip4_spec *l4_ip4_spec) +{ + int ret = 0; + + if (!l4_ip4_spec->psrc && !l4_ip4_spec->ip4src && !l4_ip4_spec->pdst && + !l4_ip4_spec->ip4dst && !l4_ip4_spec->tos) { + ret = -EINVAL; + goto l_end; + } + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_fnav_seg_l4_ip4_fill(struct ethtool_rx_flow_spec *fsp, + enum sxe2_flow_hdr l4_proto, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + enum sxe2_flow_fld_id sport, dport; + struct ethtool_tcpip4_spec *l4_ip4_spec = &fsp->m_u.tcp_ip4_spec; + + ret = sxe2_ethtool_fnav_l4_ip4_seg_valid(l4_ip4_spec); + if (ret) { + LOG_ERROR("l4 ipv4 seg is invalid, ret:%d.", ret); + goto l_end; + } + + if (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci || + !(is_zero_ether_addr(fsp->m_ext.h_dest))) { + ret = -EOPNOTSUPP; + goto l_end; + } + + switch (l4_proto) { + case SXE2_FLOW_HDR_TCP: + sport = SXE2_FLOW_FLD_ID_TCP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_TCP_DST_PORT; + break; + case SXE2_FLOW_HDR_UDP: + sport = SXE2_FLOW_FLD_ID_UDP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_UDP_DST_PORT; + break; + case SXE2_FLOW_HDR_SCTP: + sport = SXE2_FLOW_FLD_ID_SCTP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_SCTP_DST_PORT; + break; + default: + ret = -EOPNOTSUPP; + break; + } + if (ret) { + LOG_ERROR("l4 protocol type is invalid, ret:%d.\n", ret); + goto l_end; + } + + *full_match = true; + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + set_bit((int)l4_proto, seg->headers); + + if (l4_ip4_spec->ip4src == htonl(0xFFFFFFFF)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + } else if (!l4_ip4_spec->ip4src) { + *full_match = false; + } else { + LOG_ERROR("src-ip is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (l4_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + } else if (!l4_ip4_spec->ip4dst) { + *full_match = false; + } else { + LOG_ERROR("dst-ip is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (l4_ip4_spec->psrc == htons(0xFFFF)) { + set_bit((int)sport, seg->fields); + } else if (!l4_ip4_spec->psrc) { + *full_match = false; + } else { + LOG_ERROR("src-port is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (l4_ip4_spec->pdst == htons(0xFFFF)) { + set_bit((int)dport, seg->fields); + } else if (!l4_ip4_spec->pdst) { + *full_match = false; + } else { + LOG_ERROR("dst-port is invalid.\n"); + ret = -EOPNOTSUPP; + } + + if (l4_ip4_spec->tos == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, seg->fields); + *full_match = false; + } else if (l4_ip4_spec->tos) { + LOG_ERROR("tos is invalid.\n"); + ret = -EOPNOTSUPP; + } + +l_end: + return ret; +} + +static int +sxe2_ethtool_fnav_usr_ip4_seg_valid(struct ethtool_usrip4_spec *usr_ip4_spec) +{ + if (usr_ip4_spec->l4_4_bytes) + return -EOPNOTSUPP; + if (usr_ip4_spec->ip_ver) + return -EOPNOTSUPP; + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst && + !usr_ip4_spec->tos && !usr_ip4_spec->proto) + return -EINVAL; + + return 0; +} + +STATIC int sxe2_ethtool_fnav_seg_usr_ip4_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + struct ethtool_usrip4_spec *usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + + ret = sxe2_ethtool_fnav_usr_ip4_seg_valid(usr_ip4_spec); + if (ret) { + LOG_ERROR("usr ipv4 seg is invalid, ret:%d.", ret); + goto l_end; + } + + if (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci || + !(is_zero_ether_addr(fsp->m_ext.h_dest))) { + ret = -EOPNOTSUPP; + goto l_end; + } + + if (usr_ip4_spec->proto == 0xFF && + (fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_TCP || + fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_UDP || + fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_SCTP)) { + ret = -EOPNOTSUPP; + goto l_end; + } + + *full_match = true; + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + + if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + } else if (!usr_ip4_spec->ip4src) { + *full_match = false; + } else { + LOG_ERROR("src-ip is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + } else if (!usr_ip4_spec->ip4dst) { + *full_match = false; + } else { + LOG_ERROR("dst-ip is invalid.\n"); + ret = -EOPNOTSUPP; + } + + if (usr_ip4_spec->tos == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, seg->fields); + *full_match = false; + } else if (usr_ip4_spec->tos) { + LOG_ERROR("tos is invalid.\n"); + ret = -EOPNOTSUPP; + } + + if (usr_ip4_spec->proto == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, seg->fields); + *full_match = false; + } else if (usr_ip4_spec->proto) { + LOG_ERROR("proto is invalid.\n"); + ret = -EOPNOTSUPP; + } + +l_end: + return ret; +} + +static int +sxe2_ethtool_fnav_l4_ip6_seg_valid(struct ethtool_tcpip6_spec *l4_ip6_spec) +{ + int ret = 0; + + if (ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6src) && + ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6dst) && + !l4_ip6_spec->psrc && !l4_ip6_spec->pdst && !l4_ip6_spec->tclass) { + ret = -EINVAL; + goto l_end; + } + +l_end: + return ret; +} + +static inline bool sxe2_ethtool_ntuple_ipv6_mask_full(const __be32 *a) +{ + return (a[0] & a[1] & a[2] & a[3]) == cpu_to_be32(0xffffffff); +} + +STATIC int sxe2_ethtool_fnav_seg_l4_ip6_fill(struct ethtool_rx_flow_spec *fsp, + enum sxe2_flow_hdr l4_proto, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + enum sxe2_flow_fld_id sport, dport; + struct ethtool_tcpip6_spec *l4_ip6_spec = &fsp->m_u.tcp_ip6_spec; + + ret = sxe2_ethtool_fnav_l4_ip6_seg_valid(l4_ip6_spec); + if (ret) { + LOG_ERROR("l4 ipv6 seg is invalid, ret:%d.", ret); + goto l_end; + } + + if (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci || + !(is_zero_ether_addr(fsp->m_ext.h_dest))) { + ret = -EOPNOTSUPP; + goto l_end; + } + + switch (l4_proto) { + case SXE2_FLOW_HDR_TCP: + sport = SXE2_FLOW_FLD_ID_TCP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_TCP_DST_PORT; + break; + case SXE2_FLOW_HDR_UDP: + sport = SXE2_FLOW_FLD_ID_UDP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_UDP_DST_PORT; + break; + case SXE2_FLOW_HDR_SCTP: + sport = SXE2_FLOW_FLD_ID_SCTP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_SCTP_DST_PORT; + break; + default: + ret = -EOPNOTSUPP; + break; + } + if (ret) { + LOG_ERROR("l4 protocol type is invalid, ret:%d.\n", ret); + goto l_end; + } + + *full_match = true; + set_bit(SXE2_FLOW_HDR_IPV6, seg->headers); + set_bit((int)l4_proto, seg->headers); + + if (sxe2_ethtool_ntuple_ipv6_mask_full(l4_ip6_spec->ip6src)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, seg->fields); + } else if (ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6src)) { + *full_match = false; + } else { + LOG_ERROR("src-ip is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (sxe2_ethtool_ntuple_ipv6_mask_full(l4_ip6_spec->ip6dst)) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, seg->fields); + } else if (ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6dst)) { + *full_match = false; + } else { + LOG_ERROR("dst-ip is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (l4_ip6_spec->psrc == htons(0xFFFF)) { + set_bit((int)sport, seg->fields); + } else if (!l4_ip6_spec->psrc) { + *full_match = false; + } else { + LOG_ERROR("src-port is invalid.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + if (l4_ip6_spec->pdst == htons(0xFFFF)) { + set_bit((int)dport, seg->fields); + } else if (!l4_ip6_spec->pdst) { + *full_match = false; + } else { + LOG_ERROR("dst-port is invalid.\n"); + ret = -EOPNOTSUPP; + } + + if (l4_ip6_spec->tclass == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_DSCP, seg->fields); + *full_match = false; + } else if (l4_ip6_spec->tclass) { + LOG_ERROR("tclass is invalid.\n"); + ret = -EOPNOTSUPP; + } + +l_end: + return ret; +} + +static int +sxe2_ethtool_fnav_usr_ip6_seg_valid(struct ethtool_usrip6_spec *usr_ip6_spec) +{ + if (usr_ip6_spec->l4_4_bytes) + return -EOPNOTSUPP; + if (ipv6_addr_any((struct in6_addr *)usr_ip6_spec->ip6src) && + ipv6_addr_any((struct in6_addr *)usr_ip6_spec->ip6dst) && + !usr_ip6_spec->l4_proto && !usr_ip6_spec->tclass) + return -EINVAL; + + return 0; +} + +STATIC int sxe2_fnav_seg_usr_ip6_addr_fill(const __be32 *addr, + enum sxe2_flow_fld_id fld_id, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + + if (sxe2_ethtool_ntuple_ipv6_mask_full(addr)) + set_bit((int)fld_id, seg->fields); + else if (ipv6_addr_any((struct in6_addr *)addr)) + *full_match = false; + else + ret = -EOPNOTSUPP; + + return ret; +} + +STATIC int sxe2_ethtool_fnav_seg_usr_ip6_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + struct ethtool_usrip6_spec *usr_ip6_spec = &fsp->m_u.usr_ip6_spec; + + ret = sxe2_ethtool_fnav_usr_ip6_seg_valid(usr_ip6_spec); + if (ret) { + LOG_ERROR("usr ipv6 seg is invalid, ret:%d.", ret); + goto l_end; + } + + if (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci || + !(is_zero_ether_addr(fsp->m_ext.h_dest))) { + ret = -EOPNOTSUPP; + goto l_end; + } + + if (usr_ip6_spec->l4_proto == 0xFF && + (fsp->h_u.usr_ip6_spec.l4_proto == SXE2_FNAV_L4_PROT_TCP || + fsp->h_u.usr_ip6_spec.l4_proto == SXE2_FNAV_L4_PROT_UDP || + fsp->h_u.usr_ip6_spec.l4_proto == SXE2_FNAV_L4_PROT_SCTP)) { + ret = -EOPNOTSUPP; + goto l_end; + } + + *full_match = true; + set_bit(SXE2_FLOW_HDR_IPV6, seg->headers); + + ret = sxe2_fnav_seg_usr_ip6_addr_fill(usr_ip6_spec->ip6src, + SXE2_FLOW_FLD_ID_IPV6_SA, seg, + full_match); + if (ret) { + LOG_ERROR("src-ip is invalid.\n"); + goto l_end; + } + + ret = sxe2_fnav_seg_usr_ip6_addr_fill(usr_ip6_spec->ip6dst, + SXE2_FLOW_FLD_ID_IPV6_DA, seg, + full_match); + if (ret) + LOG_ERROR("dst-ip is invalid.\n"); + + if (usr_ip6_spec->tclass == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_DSCP, seg->fields); + *full_match = false; + } else if (usr_ip6_spec->tclass) { + LOG_ERROR("tclass is invalid.\n"); + ret = -EOPNOTSUPP; + } + + if (usr_ip6_spec->l4_proto == 0xFF) { + set_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, seg->fields); + *full_match = false; + } else if (usr_ip6_spec->l4_proto) { + LOG_ERROR("proto is invalid.\n"); + ret = -EOPNOTSUPP; + } + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_fnav_seg_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_flow_seg *seg, + bool *full_match) +{ + int ret = 0; + enum sxe2_fnav_flow_type flow_type = + sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + struct ethhdr *eth_spec = &fsp->m_u.ether_spec; + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + if (is_zero_ether_addr(eth_spec->h_source) + && is_zero_ether_addr(eth_spec->h_dest) + && !eth_spec->h_proto + && !fsp->m_ext.vlan_etype + && !fsp->m_ext.vlan_tci) { + ret = -EINVAL; + break; + } + ret = sxe2_ethtool_fnav_seg_eth_fill(&fsp->m_u.ether_spec, seg); + if (ret) + break; + + if (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci) { + if (!sxe2_ethtool_vlan_seg_valid(fsp)) { + ret = -EINVAL; + break; + } + ret = sxe2_ethtool_fnav_seg_vlan_fill(&fsp->m_ext, seg); + } + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + ret = sxe2_ethtool_fnav_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_TCP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + ret = sxe2_ethtool_fnav_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_UDP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + ret = sxe2_ethtool_fnav_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_SCTP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + ret = sxe2_ethtool_fnav_seg_usr_ip4_fill(fsp, seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + ret = sxe2_ethtool_fnav_seg_l4_ip6_fill(fsp, SXE2_FLOW_HDR_TCP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + ret = sxe2_ethtool_fnav_seg_l4_ip6_fill(fsp, SXE2_FLOW_HDR_UDP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + ret = sxe2_ethtool_fnav_seg_l4_ip6_fill(fsp, SXE2_FLOW_HDR_SCTP, + seg, full_match); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + ret = sxe2_ethtool_fnav_seg_usr_ip6_fill(fsp, seg, full_match); + break; + default: + ret = -EINVAL; + break; + } + + seg->is_tunnel = false; + + return ret; +} + +STATIC s32 sxe2_ethtool_fnav_flow_cfg_parse(struct sxe2_vsi *vsi, + struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_filter *filter, struct sxe2_fnav_flow_seg *segs) +{ + struct sxe2_adapter *adapter = vsi->adapter; + int ret = 0; + struct sxe2_fnav_flow_seg *seg = NULL; + struct sxe2_fnav_flow_seg *seg_tun = NULL; + bool full_match = false; + + memset(segs, 0, sizeof(struct sxe2_fnav_flow_seg) * SXE2_FNAV_SEG_MAX); + seg = &segs[0]; + ret = sxe2_ethtool_fnav_seg_fill(fsp, seg, &full_match); + if (ret) { + LOG_ERROR_BDF("ethtool fill fnav seg failed, ret:%d.\n", ret); + goto l_end; + } + + if (filter->full_key.has_flex_filed) { + full_match = false; + seg->raw[0].offset = filter->full_key.flex_offset; + seg->raw[0].len = SXE2_FNAV_FLEX_WROD_SIZE; + seg->raw_cnt = 1; + } + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + seg_tun = &segs[1]; + memcpy(seg_tun, seg, sizeof(*seg)); + } + + sxe2_eth_fnav_outer_hdr_set_eth(filter->flow_type, seg); + +l_end: + return ret; +} + +STATIC int +sxe2_ethtool_fnav_full_key_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_filter_full_key *full_key) +{ + int ret = 0; + enum sxe2_fnav_flow_type flow_type = + sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + + if (fsp->flow_type & FLOW_EXT) { + memcpy(full_key->ext_data.usr_def, fsp->h_ext.data, + sizeof(full_key->ext_data.usr_def)); + full_key->ext_data.vlan_type = fsp->h_ext.vlan_etype; + full_key->ext_data.s_vlan_tci = fsp->h_ext.vlan_tci; + memcpy(full_key->ext_mask.usr_def, fsp->m_ext.data, + sizeof(full_key->ext_mask.usr_def)); + full_key->ext_mask.vlan_type = fsp->m_ext.vlan_etype; + full_key->ext_mask.s_vlan_tci = fsp->m_ext.vlan_tci; + full_key->flow_ext = true; + } + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + full_key->eth = fsp->h_u.ether_spec; + full_key->eth_mask = fsp->m_u.ether_spec; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + full_key->l4.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + full_key->l4.src_port = fsp->h_u.tcp_ip4_spec.psrc; + full_key->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + full_key->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + full_key->ip.v4.tos = fsp->h_u.tcp_ip4_spec.tos; + full_key->l4_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + full_key->l4_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + full_key->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + full_key->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + full_key->mask.v4.tos = fsp->m_u.tcp_ip4_spec.tos; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + full_key->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + full_key->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + full_key->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + full_key->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto; + full_key->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos; + full_key->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + full_key->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + full_key->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + full_key->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto; + full_key->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + memcpy(full_key->ip.v6.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(full_key->ip.v6.src_ip, fsp->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + full_key->l4.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + full_key->l4.src_port = fsp->h_u.tcp_ip6_spec.psrc; + full_key->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(full_key->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(full_key->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + full_key->l4_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + full_key->l4_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + full_key->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + memcpy(full_key->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(full_key->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + full_key->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + full_key->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; + + if (!fsp->m_u.usr_ip6_spec.l4_proto) + full_key->ip.v6.proto = IPPROTO_NONE; + else + full_key->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; + + memcpy(full_key->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + memcpy(full_key->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + full_key->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + full_key->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; + full_key->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +STATIC int sxe2_ethtool_fnav_filter_fill(struct sxe2_vsi *vsi, + struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_filter *filter) +{ + int ret = 0; + u64 ring; + u16 vf; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vf_node *vf_node = NULL; + + filter->flow_type = + sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_NONE) { + LOG_ERROR_BDF("unsupport flow type, fsp->flow_type:%d\n", + fsp->flow_type & ~FLOW_EXT); + ret = -EINVAL; + goto l_end; + } + + filter->filter_loc = fsp->location; + filter->fdid_prio = SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_THREE; + + filter->ori_vsi_sw = vsi->id_in_pf; + filter->ori_vsi_hw = vsi->idx_in_dev; + filter->dst_vsi_hw = vsi->idx_in_dev; + filter->rule_vsi_sw = vsi->id_in_pf; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + filter->act_type = SXE2_FNAV_ACT_DROP; + filter->origin_q_index = 0; + filter->q_index = 0; + } else if ((~(ETHTOOL_RX_FLOW_SPEC_RING | + ETHTOOL_RX_FLOW_SPEC_RING_VF)) & + fsp->ring_cookie) { + LOG_DEV_ERR("failed to add filter. unsupported action %lld.\n", + fsp->ring_cookie); + ret = -EOPNOTSUPP; + goto l_end; + } else { + ring = (u32)ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = (u16)ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!vf) { + if (ring >= vsi->rxqs.q_cnt) + return -EINVAL; + filter->origin_q_index = (u16)ring; + } else { + vf--; + vf_node = sxe2_vf_node_get(adapter, vf); + if (vf_node == NULL || vf_node->vsi == NULL || + vf_node->vsi->idx_in_dev >= SXE2_VSI_NUM) { + ret = -EINVAL; + LOG_ERROR_BDF("add fnav filter failed, \t" + "vf node get failed, vf:%u\n", vf); + goto l_end; + } + if (ring >= vf_node->vsi->rxqs.q_cnt) { + ret = -EINVAL; + LOG_ERROR_BDF("add fnav filter failed, queue index is invalid, \t" + "vf:%u, qidx:%llu, vf_qcnt:%u\n", + vf, ring, vf_node->vsi->rxqs.q_cnt); + goto l_end; + } + filter->vf_idx = vf; + filter->dst_vsi_hw = vf_node->vsi->idx_in_dev; + filter->origin_q_index = (u16)ring; + LOG_DEBUG_BDF("add vf queues fnav filter, vf:%u, \t" + "queue:%llu, vf_vsi:%u, vf_qcnt:%u", + vf, ring, vf_node->vsi->id_in_pf, + vf_node->vsi->rxqs.q_cnt); + } + filter->act_type = SXE2_FNAV_ACT_QINDEX; + filter->q_index = (u16)ring; + } + + filter->q_region = 0; + filter->act_prio = SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_THREE; + filter->complete_report = SXE2_FNAV_TX_DESC_QW0_COMP_RPT_FAIL; + filter->stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_PKTS; + filter->stat_index = adapter->fnav_ctxt.fnav_stat_ctxt.stat_rsv_idx[SXE2_FNAV_STAT_PF]; + + INIT_HLIST_NODE(&filter->hl_node); + + ret = sxe2_ethtool_fnav_full_key_fill(fsp, &filter->full_key); + + if (test_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags)) + filter->tunn_flag = SXE2_FNAV_TUN_FLAG_ANY; + +l_end: + return ret; +} + +STATIC struct sxe2_fnav_filter * +sxe2_ethtool_fnav_filter_search_for_dup(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter) +{ + bool ret; + struct sxe2_fnav_filter *filter_tmp = NULL; + struct sxe2_fnav_filter *filter_find = NULL; + + list_for_each_entry(filter_tmp, &vsi->fnav.filter_list, l_node) { + ret = sxe2_fnav_filter_cmp_with_flow_type(filter, filter_tmp); + if (ret) { + if (!(filter->filter_loc == filter_tmp->filter_loc && + (filter->q_index != filter_tmp->q_index || + filter->dst_vsi_hw != filter_tmp->dst_vsi_hw))) { + filter_find = filter_tmp; + } + break; + } + } + return filter_find; +} + +#ifdef SXE2_SUPPORT_ACL +STATIC bool sxe2_is_acl_filter(struct ethtool_rx_flow_spec *fsp) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + struct ethhdr *eth_spec; + + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; + + if (tcp_ip4_spec->ip4src && + tcp_ip4_spec->ip4src != htonl(0xFFFFFFFF)) + return true; + + if (tcp_ip4_spec->ip4dst && + tcp_ip4_spec->ip4dst != htonl(0xFFFFFFFF)) + return true; + + if (!tcp_ip4_spec->ip4src && !tcp_ip4_spec->ip4dst && + !tcp_ip4_spec->psrc && !tcp_ip4_spec->pdst && + !tcp_ip4_spec->tos) + return true; + + if (tcp_ip4_spec->psrc && tcp_ip4_spec->psrc != htons(0xFFFF)) + return true; + + if (tcp_ip4_spec->pdst && tcp_ip4_spec->pdst != htons(0xFFFF)) + return true; + + break; + case IPV4_USER_FLOW: + usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + + if (usr_ip4_spec->ip4src && + usr_ip4_spec->ip4src != htonl(0xFFFFFFFF)) + return true; + + if (usr_ip4_spec->ip4dst && + usr_ip4_spec->ip4dst != htonl(0xFFFFFFFF)) + return true; + + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) + return true; + + break; + case ETHER_FLOW: + eth_spec = &fsp->m_u.ether_spec; + + if (fsp->m_ext.vlan_tci || fsp->m_ext.vlan_etype) + return false; + + if (!is_broadcast_ether_addr(eth_spec->h_dest) && + !is_zero_ether_addr(eth_spec->h_dest)) + return true; + + if (!is_broadcast_ether_addr(eth_spec->h_source) && + !is_zero_ether_addr(eth_spec->h_source)) + return true; + + if (eth_spec->h_proto && eth_spec->h_proto != htons(0xFFFF)) + return true; + + if (!eth_spec->h_proto && + is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest)) + return true; + + break; + } + + return false; +} + +STATIC s32 sxe2_acl_ethtool_input_parse(struct sxe2_vsi *vsi, struct ethtool_rx_flow_spec *fsp, + struct sxe2_acl_filter *filter) +{ + int ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + filter->flow_type = + sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_NONE) { + LOG_ERROR_BDF("unsupport flow type, fsp->flow_type:%d\n", + fsp->flow_type & ~FLOW_EXT); + ret = -EINVAL; + goto l_end; + } + + filter->filter_id = SXE2_GEN_FILTER_ID(vsi->idx_in_dev, fsp->location); + ret = sxe2_ethtool_fnav_full_key_fill(fsp, &filter->full_key); + +l_end: + return ret; +} + +STATIC s32 sxe2_acl_ethtool_action_parse(struct sxe2_acl_flow_action *acts, + struct ethtool_rx_flow_spec *fsp, + struct sxe2_acl_filter *filter) +{ + int ret = 0; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + acts[0].type = SXE2_ACL_ACT_DROP; + acts[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_PKT_DROP; + acts[0].data.acl_act.prio = 3; + acts[0].data.acl_act.value = 0; + } else { + acts[0].type = SXE2_ACL_ACT_QINDEX; + acts[0].data.acl_act.mdid = SXE2_ACL_ACTION_MDID_RX_DST_Q; + acts[0].data.acl_act.prio = 3; + acts[0].data.acl_act.value = ethtool_get_flow_spec_ring(fsp->ring_cookie); + } + + return ret; +} + +STATIC int sxe2_ethtool_acl_seg_eth_fill(struct ethhdr *eth_spec, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + + if (eth_spec->h_proto) { + LOG_WARN("Ether proto type is not supported.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, seg->fields); + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_acl_seg_l4_ip4_fill(struct ethtool_rx_flow_spec *fsp, + enum sxe2_flow_hdr l4_proto, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + enum sxe2_flow_fld_id sport, dport; + struct ethtool_tcpip4_spec *l4_ip4_spec = &fsp->m_u.tcp_ip4_spec; + + ret = sxe2_ethtool_fnav_l4_ip4_seg_valid(l4_ip4_spec); + if (ret) { + LOG_ERROR("l4 ipv4 seg is invalid, ret:%d.", ret); + goto l_end; + } + + switch (l4_proto) { + case SXE2_FLOW_HDR_TCP: + sport = SXE2_FLOW_FLD_ID_TCP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_TCP_DST_PORT; + break; + case SXE2_FLOW_HDR_UDP: + sport = SXE2_FLOW_FLD_ID_UDP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_UDP_DST_PORT; + break; + case SXE2_FLOW_HDR_SCTP: + sport = SXE2_FLOW_FLD_ID_SCTP_SRC_PORT; + dport = SXE2_FLOW_FLD_ID_SCTP_DST_PORT; + break; + default: + ret = -EOPNOTSUPP; + break; + } + if (ret) { + LOG_ERROR("l4 protocol type is invalid, ret:%d.\n", ret); + goto l_end; + } + + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + set_bit((int)l4_proto, seg->headers); + + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + set_bit((int)sport, seg->fields); + set_bit((int)dport, seg->fields); + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_acl_seg_usr_ip4_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + struct ethtool_usrip4_spec *usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + + ret = sxe2_ethtool_fnav_usr_ip4_seg_valid(usr_ip4_spec); + if (ret) { + LOG_ERROR("usr ipv4 seg is invalid, ret:%d.", ret); + goto l_end; + } + + if (usr_ip4_spec->proto == 0xFF && + (fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_TCP || + fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_UDP || + fsp->h_u.usr_ip4_spec.proto == SXE2_FNAV_L4_PROT_SCTP)) { + ret = -EOPNOTSUPP; + goto l_end; + } + + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + +l_end: + return ret; +} + +STATIC int sxe2_ethtool_acl_seg_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2_fnav_flow_seg *seg) +{ + int ret = 0; + enum sxe2_fnav_flow_type flow_type = + sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + ret = sxe2_ethtool_acl_seg_eth_fill(&fsp->h_u.ether_spec, seg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + ret = sxe2_ethtool_acl_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_TCP, + seg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + ret = sxe2_ethtool_acl_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_UDP, + seg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + ret = sxe2_ethtool_acl_seg_l4_ip4_fill(fsp, SXE2_FLOW_HDR_SCTP, + seg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + ret = sxe2_ethtool_acl_seg_usr_ip4_fill(fsp, seg); + break; + default: + ret = -EINVAL; + break; + } + + seg->is_tunnel = false; + + return ret; +} + +STATIC s32 sxe2_acl_ethtool_flow_cfg_add(struct sxe2_vsi *vsi, + struct ethtool_rx_flow_spec *fsp) +{ + s32 ret = 0; + struct sxe2_fnav_flow_seg *seg = NULL; + enum sxe2_fnav_flow_type flow_type; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_acl_flow_cfg *flow_cfg = NULL; + bool new_alloc_flow = false; + + flow_type = sxe2_ethtool_flow_to_type(fsp->flow_type & ~FLOW_EXT); + if (flow_type == SXE2_FNAV_FLOW_TYPE_NONE) { + LOG_ERROR_BDF("unsupport flow type, fsp->flow_type:%d\n", + fsp->flow_type & ~FLOW_EXT); + ret = -EINVAL; + goto l_end; + } + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) { + LOG_ERROR_BDF("no memory for seg.\n"); + ret = -ENOMEM; + goto l_end; + } + + flow_cfg = sxe2_acl_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) { + flow_cfg = devm_kzalloc(dev, sizeof(*flow_cfg), GFP_KERNEL); + if (!flow_cfg) { + LOG_ERROR_BDF("no memory for flow cfg.\n"); + ret = -ENOMEM; + goto l_free; + } + new_alloc_flow = true; + flow_cfg->flow_type = flow_type; + } + + ret = sxe2_ethtool_acl_seg_fill(fsp, seg); + if (ret) { + LOG_ERROR_BDF("ethtool fill fnav seg failed, ret:%d.\n", ret); + goto l_free_flow_cfg; + } + + ret = sxe2_acl_flow_cfg_add(vsi, flow_cfg, seg); + if (ret == -EEXIST) { + LOG_DEBUG_BDF("acl flow config exists, skip creation.\n"); + devm_kfree(dev, seg); + if (new_alloc_flow) + devm_kfree(dev, flow_cfg); + + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("outer rule add failed, ret:%d\n", ret); + goto l_free_flow_cfg; + } + + if (ret == 0 && new_alloc_flow) + sxe2_acl_flow_cfg_add_list(vsi, flow_cfg); + + goto l_end; + +l_free_flow_cfg: + if (new_alloc_flow) + devm_kfree(dev, flow_cfg); + +l_free: + devm_kfree(dev, seg); +l_end: + return ret; +} + +s32 sxe2_acl_add_rule_ethtool(struct sxe2_vsi *vsi, struct ethtool_rx_flow_spec *fsp) +{ + s32 ret = 0; + struct sxe2_acl_flow_action acts[1]; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_acl_filter *filter = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (!test_bit(SXE2_FLAG_ACL_CAPABLE, adapter->flags)) + return -EOPNOTSUPP; + + filter = devm_kzalloc(dev, sizeof(*filter), GFP_KERNEL); + if (!filter) { + LOG_ERROR_BDF("no memory for input.\n"); + ret = -ENOMEM; + goto l_free; + } + + ret = sxe2_acl_ethtool_input_parse(vsi, fsp, filter); + if (ret) { + LOG_ERROR_BDF("acl check input set failed.\n"); + goto l_free; + } + + ret = sxe2_acl_ethtool_action_parse(acts, fsp, filter); + if (ret) { + LOG_ERROR_BDF("acl check action set failed.\n"); + goto l_free; + } + + ret = sxe2_acl_ethtool_flow_cfg_add(vsi, fsp); + if (ret) { + LOG_ERROR_BDF("acl flow cfg add failed, ret:%d\n", ret); + goto l_free; + } + + ret = sxe2_acl_lut_entry_add(vsi, filter, acts); + if (ret) { + LOG_ERROR_BDF("acl lut entry add failed, ret:%d\n", ret); + goto l_free; + } + + goto l_end; + +l_free: + devm_kfree(dev, filter); +l_end: + return ret; +} +#endif +STATIC int sxe2_ethtool_ntuple_filter_add(struct sxe2_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + int ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct ethtool_rx_flow_spec *fsp; + struct sxe2_fnav_filter *filter = NULL; + struct sxe2_fnav_filter *filter_dup = NULL; + struct sxe2_fnav_filter *filter_loc_old = NULL; + u32 avail_filter_num = 0; + u32 max_filter_cnt; + u8 filter_need = 0; + struct sxe2_fnav_flow_seg segs[SXE2_FNAV_SEG_MAX]; + + if (!test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + LOG_DEV_ERR( + "ntuple feature is not enabled, please type in \"ethtool -K {dev} ntuple on\" to enable ntuple firstly.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + mutex_lock(&adapter->fnav_ctxt.fnav_state_lock); + if (adapter->fnav_ctxt.state != SXE2_FNAV_STATE_READY) { + ret = -EBUSY; + goto l_unlock; + } + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->flow_type & FLOW_MAC_EXT) { + LOG_ERROR_BDF("unsupport flow type \"FLOW_MAC_EXT\".\n"); + ret = -EOPNOTSUPP; + goto l_unlock; + } + + max_filter_cnt = sxe2_ntuple_max_filter_cnt_get_by_vsi(vsi); + + if (fsp->location >= max_filter_cnt) { + LOG_ERROR_BDF("location overflow, loc:%u, max_cnt:%u.\n", + fsp->location, max_filter_cnt); + ret = -ENOSPC; + goto l_unlock; + } +#ifdef SXE2_SUPPORT_ACL + if (sxe2_is_acl_filter(fsp)) { + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); + ret = sxe2_acl_add_rule_ethtool(vsi, fsp); + if (ret) + LOG_ERROR_BDF("acl add rule failed, ret:%d\n", ret); + + goto l_end; + } +#endif + mutex_lock(&adapter->fnav_ctxt.filter_lock); + + filter_need = test_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags) ? 2 : 1; + filter_loc_old = sxe2_fnav_find_filter_by_loc_unlock(&vsi->fnav, fsp->location); + if (filter_loc_old) + avail_filter_num = filter_loc_old->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY ? 2 : 1; + + avail_filter_num += sxe2_fnav_num_avail_filter(vsi); + if (avail_filter_num < filter_need) { + LOG_ERROR_BDF("filter cnt overflow, filter_need:%u.\n", + filter_need); + ret = -ENOSPC; + goto l_filter_unlock; + } + + filter = devm_kzalloc(dev, sizeof(*filter), GFP_KERNEL); + if (!filter) { + LOG_ERROR_BDF("no memory.\n"); + ret = -ENOMEM; + goto l_filter_unlock; + } + + ret = sxe2_ethtool_parse_ntuple_userdef(filter, fsp); + if (ret) { + LOG_ERROR_BDF( + "invalid user-def param:0x%llx.\n", + be64_to_cpu(*((__force __be64 *)fsp->h_ext.data))); + ret = -EINVAL; + goto l_filter_unlock; + } + + ret = sxe2_ethtool_fnav_filter_fill(vsi, fsp, filter); + if (ret) { + LOG_ERROR_BDF("ethtool fnav filter fill failed, ret:%d\n", ret); + goto l_filter_unlock; + } + + filter_dup = sxe2_ethtool_fnav_filter_search_for_dup(vsi, filter); + if (filter_dup) { + LOG_DEV_ERR("duplicate rule is detected, id:%u.\n", + filter_dup->filter_loc); + ret = -EINVAL; + goto l_filter_unlock; + } + + if (filter_loc_old) { + ret = sxe2_fnav_filter_del(vsi, filter_loc_old); + if (ret) + goto l_filter_unlock; + + } + + ret = sxe2_ethtool_fnav_flow_cfg_parse(vsi, fsp, filter, segs); + if (ret) { + LOG_ERROR_BDF( + "sxe2 vf fnav parse pattern fail ret: %d ! vsi type: %u, idx: %u\n", ret, vsi->type, vsi->id_in_pf); + goto l_filter_unlock; + } + ret = sxe2_fnav_filter_add_hw(vsi, filter, segs); + if (ret) { + LOG_ERROR_BDF( + "sxe2 vf fnav parse pattern fail ret: %d ! vsi type: %u, idx: %u\n", ret, vsi->type, vsi->id_in_pf); + goto l_filter_unlock; + } + + sxe2_fnav_filter_add_list_by_loc(vsi, filter); + + LOG_INFO_BDF("add filter success, flow_type: %d, total cnt: %u\n", + fsp->flow_type, vsi->fnav.filter_cnt); + +l_filter_unlock: + mutex_unlock(&adapter->fnav_ctxt.filter_lock); +l_unlock: + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); +l_end: + if (filter && ret) + devm_kfree(dev, filter); + + return ret; +} + +STATIC int sxe2_ethtool_ntuple_filter_del(struct sxe2_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + int ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + + mutex_lock(&adapter->fnav_ctxt.fnav_state_lock); + if (adapter->fnav_ctxt.state != SXE2_FNAV_STATE_READY) { + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); + ret = -EBUSY; + goto l_end; + } + ret = sxe2_fnav_del_filter_by_loc(vsi, fsp->location); + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); +#ifdef SXE2_SUPPORT_ACL + if (ret == -ENOENT) { + ret = + sxe2_acl_del_filter_by_id(vsi, + SXE2_GEN_FILTER_ID(vsi->idx_in_dev, fsp->location)); + } +#endif +l_end: + return ret; +} + +STATIC bool sxe2_ethtool_fnav_is_action_to_vf(struct sxe2_adapter *adapter, + struct ethtool_rxnfc *cmd, u16 *vf_idx) +{ + struct ethtool_rx_flow_spec *fsp; + u64 vf; + struct sxe2_fnav_filter *filter = NULL; + bool is_to_vf = false; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + ((~(ETHTOOL_RX_FLOW_SPEC_RING | + ETHTOOL_RX_FLOW_SPEC_RING_VF)) & + fsp->ring_cookie)) { + break; + } + vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (vf) { + is_to_vf = true; + *vf_idx = (u16)(vf - 1); + } + break; + case ETHTOOL_SRXCLSRLDEL: + filter = sxe2_fnav_find_filter_by_loc_lock(vsi, + fsp->location); + if (filter && filter->ori_vsi_hw != filter->dst_vsi_hw) + is_to_vf = true; + break; + default: + break; + } + return is_to_vf; +} + +STATIC int sxe2_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + u16 vf_idx = 0; + bool is_action_to_vf = false; + + LOG_DEBUG_BDF("set rxnfc, cmd: %u\n", cmd->cmd); + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR( + "ethtool set rx flow in safe mode is not supported.\n"); + return -EINVAL; + } + + is_action_to_vf = + sxe2_ethtool_fnav_is_action_to_vf(adapter, cmd, &vf_idx); + if (is_action_to_vf) { + if (sxe2_vf_id_check(adapter, vf_idx)) { + LOG_ERROR_BDF("vf id is invalid, vf:%u\n", vf_idx); + return -EINVAL; + } + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = sxe2_ethtool_ntuple_filter_add(vsi, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sxe2_ethtool_ntuple_filter_del(vsi, cmd); + break; + case ETHTOOL_SRXFH: + ret = sxe2_set_rss_flow(vsi, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + if (is_action_to_vf) + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + return ret; +} + +STATIC u32 sxe2_get_rxft_key_size(struct net_device __always_unused *netdev) +{ + return SXE2_RSS_HASH_KEY_SIZE; +} + +STATIC u32 sxe2_get_rxft_indir_size(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + LOG_DEBUG_BDF("rss lut size: %u\n", vsi->rss_ctxt.lut_size); + + return (u32)vsi->rss_ctxt.lut_size; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +STATIC int sxe2_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +#else +STATIC int sxe2_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#endif +{ + int ret = 0; + u32 i = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR( + "ethtool get rx flow hash in safe mode is not supported.\n"); + return -EINVAL; + } + +#ifdef HAVE_ETHTOOL_RXFH_PARAM + u32 *indir = rxfh->indir; + u8 *key = rxfh->key; + + rxfh->hfunc = ETH_RSS_HASH_TOP; +#else + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) { + if (vsi->rss_ctxt.lut) { + for (i = 0; i < vsi->rss_ctxt.lut_size; i++) + indir[i] = (u32)(vsi->rss_ctxt.lut[i]); + } + } + + if (key) { + if (vsi->rss_ctxt.hkey) + memcpy((key), vsi->rss_ctxt.hkey, SXE2_RSS_HASH_KEY_SIZE); + } + + return ret; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +STATIC int sxe2_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +#else +STATIC int sxe2_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#endif +{ + int ret = 0; + u32 i = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u8 *user_key = NULL; + u8 *user_lut = NULL; +#ifdef HAVE_ETHTOOL_RXFH_PARAM + const u32 *indir = rxfh->indir; + const u8 *key = rxfh->key; + const u8 hfunc = rxfh->hfunc; +#endif + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR( + "ethtool set rx flow hash in safe mode is not supported.\n"); + return -EINVAL; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + ret = -EOPNOTSUPP; + goto l_unlock; + } + + if (key) { + user_key = + devm_kzalloc(dev, SXE2_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (!user_key) { + LOG_ERROR_BDF("no memory for user hash key.\n"); + ret = -ENOMEM; + goto l_unlock; + } + memcpy(user_key, key, SXE2_RSS_HASH_KEY_SIZE); + } + + if (indir) { + user_lut = + devm_kzalloc(dev, vsi->rss_ctxt.lut_size, GFP_KERNEL); + if (!user_lut) { + LOG_ERROR_BDF("no memory for user lut.\n"); + ret = -ENOMEM; + goto l_unlock; + } + for (i = 0; i < vsi->rss_ctxt.lut_size; i++) + user_lut[i] = (u8)(indir[i]); + } + + if (key) { + ret = sxe2_fwc_rss_hkey_set(vsi, user_key); + if (ret) { + LOG_ERROR_BDF("set hash key failed, ret: %d.\n", ret); + goto l_unlock; + } + if (vsi->rss_ctxt.hkey) { + memcpy(vsi->rss_ctxt.hkey, user_key, + SXE2_RSS_HASH_KEY_SIZE); + } + } + + if (indir) { + ret = sxe2_fwc_rss_lut_set(vsi, user_lut, + vsi->rss_ctxt.lut_size); + if (ret) { + LOG_ERROR_BDF("set rss lut failed, ret: %d.\n", ret); + goto l_unlock; + } + if (vsi->rss_ctxt.lut) + memcpy(vsi->rss_ctxt.lut, user_lut, vsi->rss_ctxt.lut_size); + } + +l_unlock: + if (user_key) + devm_kfree(dev, user_key); + + if (user_lut) + devm_kfree(dev, user_lut); + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static s32 sxe2_get_tx_qc_coalesce(struct ethtool_coalesce *ec, + struct sxe2_q_container *qc) +{ + if (!qc->list.cnt) + return -EINVAL; + + ec->use_adaptive_tx_coalesce = SXE2_IS_ITR_DYNAMIC(qc); + ec->tx_coalesce_usecs = qc->itr_setting; + + return 0; +} + +static s32 sxe2_get_rx_qc_coalesce(struct ethtool_coalesce *ec, + struct sxe2_q_container *qc) +{ + if (!qc->list.cnt) + return -EINVAL; + + ec->use_adaptive_rx_coalesce = SXE2_IS_ITR_DYNAMIC(qc); + ec->rx_coalesce_usecs = qc->itr_setting; + ec->rx_coalesce_usecs_high = qc->list.next->irq_data->rate_limit; + + return 0; +} + +static s32 sxe2_get_queue_coalesce(struct sxe2_vsi *vsi, + struct ethtool_coalesce *ec, u32 q_idx) +{ + if (q_idx < vsi->txqs.q_cnt && q_idx < vsi->rxqs.q_cnt) { + if (sxe2_get_tx_qc_coalesce(ec, SXE2_VSI_TX_QC(vsi, q_idx))) + return -EINVAL; + if (sxe2_get_rx_qc_coalesce(ec, SXE2_VSI_RX_QC(vsi, q_idx))) + return -EINVAL; + } else if (q_idx < vsi->txqs.q_cnt) { + if (sxe2_get_tx_qc_coalesce(ec, SXE2_VSI_TX_QC(vsi, q_idx))) + return -EINVAL; + } else if (q_idx < vsi->rxqs.q_cnt) { + if (sxe2_get_rx_qc_coalesce(ec, SXE2_VSI_RX_QC(vsi, q_idx))) + return -EINVAL; + } else { + return -EINVAL; + } + + return 0; +} + +static s32 sxe2_get_coalesce_interval(struct net_device *netdev, + struct ethtool_coalesce *ec, u32 q_idx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (q_idx == SXE2_COALESCE_QNUM_INVAL) + q_idx = 0; + + if (sxe2_get_queue_coalesce(vsi, ec, q_idx)) + ret = -EINVAL; + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef GET_COALESCE_NEED_2_PARAMS +STATIC int sxe2_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +STATIC int sxe2_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + return sxe2_get_coalesce_interval(netdev, ec, SXE2_COALESCE_QNUM_INVAL); +} + +STATIC int sxe2_get_per_queue_coalesce(struct net_device *netdev, u32 q_idx, + struct ethtool_coalesce *ec) +{ + return sxe2_get_coalesce_interval(netdev, ec, q_idx); +} + +STATIC void sxe2_invalid_itr_print(struct net_device *netdev, + u32 use_adaptive_coalesce, + u32 coalesce_usecs, const s8 *q_type_str) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + if (use_adaptive_coalesce) + return; + + if (coalesce_usecs % hw->hw_cfg.itr_gran) + LOG_NETDEV_INFO("user set %s-usecs to invalid value %d, \t" + "device only support values that multiple of %d. \t" + "rounding down and attempting to set %s-usecs to %d ", + q_type_str, coalesce_usecs, hw->hw_cfg.itr_gran, q_type_str, + rounddown(coalesce_usecs, hw->hw_cfg.itr_gran)); +} + +static void sxe2_invalid_rate_limit_print(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + if (ec->use_adaptive_tx_coalesce || ec->use_adaptive_rx_coalesce) + return; + + if (ec->rx_coalesce_usecs_high % hw->hw_cfg.credit_interval_gran) + LOG_NETDEV_INFO("user set rx_coalesce_usecs_high to invalid value %d, \t" + "device only support values that multiple of %d. rounding down \t" + "and attempting to set rx_coalesce_usecs_high to %d\n", + ec->rx_coalesce_usecs_high, + hw->hw_cfg.credit_interval_gran, + rounddown(ec->rx_coalesce_usecs_high, + hw->hw_cfg.credit_interval_gran)); +} + +STATIC void sxe2_invalid_coalesce_print(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + sxe2_invalid_itr_print(netdev, ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs, SXE2_Q_TYPE_STR_TX); + sxe2_invalid_itr_print(netdev, ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs, SXE2_Q_TYPE_STR_RX); + sxe2_invalid_rate_limit_print(netdev, ec); +} + +STATIC s32 sxe2_set_qc_itr(struct sxe2_q_container *qc, const s8 *q_type_str, + u32 use_adaptive_coalesce, u32 coalesce_usecs) +{ + struct sxe2_irq_data *irq_data = qc->list.next->irq_data; + struct sxe2_vsi *vsi = irq_data->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u32 itr_setting = qc->itr_setting; + struct net_device *netdev = vsi->netdev; + + if (!use_adaptive_coalesce) { + if (coalesce_usecs > + SXE2_VF_INT_ITR_INTERVAL * hw->hw_cfg.itr_gran) { + LOG_NETDEV_INFO("invalid value, %s-usecs range is 0-%d\n", + q_type_str, + SXE2_VF_INT_ITR_INTERVAL * hw->hw_cfg.itr_gran); + return -EINVAL; + } + qc->itr_mode = SXE2_ITR_STATIC; + qc->itr_setting = + rounddown(coalesce_usecs, hw->hw_cfg.itr_gran); + sxe2_hw_irq_itr_set(hw, irq_data->idx_in_pf, qc->itr_idx, + (u16)coalesce_usecs); + (void)sxe2_flush(hw); + } else { + if (coalesce_usecs != itr_setting) { + LOG_NETDEV_INFO("%s interrupt throttling cannot be changed \t" + "if adaptive-%s is enabled. \t" + "coalesce from %d to %d,adaptive is %d\n", + q_type_str, q_type_str, itr_setting, + coalesce_usecs, use_adaptive_coalesce); + return -EINVAL; + } + qc->itr_mode = SXE2_ITR_DYNAMIC; + } + return 0; +} + +static s32 sxe2_set_qc_rate_limit(struct ethtool_coalesce *ec, + struct sxe2_q_container *qc, + struct sxe2_vsi *vsi) +{ + struct sxe2_irq_data *irq_data = qc->list.next->irq_data; + struct sxe2_adapter *adapter = irq_data->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + struct net_device *netdev = vsi->netdev; + + if (ec->rx_coalesce_usecs_high > + SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX * + hw->hw_cfg.credit_interval_gran || + (ec->rx_coalesce_usecs_high && + ec->rx_coalesce_usecs_high < hw->hw_cfg.credit_interval_gran)) { + LOG_NETDEV_INFO("invalid value, rx_coalesce_usecs_high valid \t" + "values are 0(disabled), value:%d ,valid range:[%d-%d]\n", + ec->rx_coalesce_usecs_high, + hw->hw_cfg.credit_interval_gran, + SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX * + hw->hw_cfg.credit_interval_gran); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs_high != irq_data->rate_limit && + (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { + LOG_NETDEV_INFO("invalid value, rx_coalesce_usecs_high can be modified \t" + "only when adaptive-tx and adaptive-rx is disabled\n"); + return -EINVAL; + } + + if ((u16)ec->rx_coalesce_usecs_high != irq_data->rate_limit) + irq_data->rate_limit = (u16)ec->rx_coalesce_usecs_high; + + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, + irq_data->rate_limit); + + return 0; +} + +static s32 sxe2_set_all_queue_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + u16 irq_idx; + s32 ret; + struct sxe2_irq_data *irq_data; + + sxe2_for_each_vsi_irq(vsi, irq_idx) { + irq_data = vsi->irqs.irq_data[irq_idx]; + if (SXE2_IRQ_HAS_TXQ(irq_data)) { + ret = sxe2_set_qc_itr(&irq_data->tx, SXE2_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs); + if (ret) + return ret; + } + if (SXE2_IRQ_HAS_RXQ(irq_data)) { + ret = sxe2_set_qc_itr(&irq_data->rx, SXE2_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs); + if (ret) + return ret; + ret = sxe2_set_qc_rate_limit(ec, &irq_data->rx, vsi); + if (ret) + return ret; + } + } + + return 0; +} + +static s32 sxe2_set_queue_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, u32 q_idx) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + s32 ret; + + if (q_idx < vsi->txqs.q_cnt && q_idx < vsi->rxqs.q_cnt) { + ret = sxe2_set_qc_itr(SXE2_VSI_TX_QC(vsi, q_idx), + SXE2_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs); + if (ret) + return ret; + + ret = sxe2_set_qc_itr(SXE2_VSI_RX_QC(vsi, q_idx), + SXE2_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs); + if (ret) + return ret; + ret = sxe2_set_qc_rate_limit(ec, SXE2_VSI_RX_QC(vsi, q_idx), + vsi); + if (ret) + return ret; + } else if (q_idx < vsi->txqs.q_cnt) { + ret = sxe2_set_qc_itr(SXE2_VSI_TX_QC(vsi, q_idx), + SXE2_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs); + if (ret) + return ret; + } else if (q_idx < vsi->rxqs.q_cnt) { + ret = sxe2_set_qc_itr(SXE2_VSI_RX_QC(vsi, q_idx), + SXE2_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs); + if (ret) + return ret; + ret = sxe2_set_qc_rate_limit(ec, SXE2_VSI_RX_QC(vsi, q_idx), + vsi); + if (ret) + return ret; + } else { + return -EINVAL; + } + + return 0; +} + +static s32 sxe2_set_coalesce_interval(struct net_device *netdev, + struct ethtool_coalesce *ec, u32 q_idx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (q_idx == SXE2_COALESCE_QNUM_INVAL) + ret = sxe2_set_all_queue_coalesce(netdev, ec); + else + ret = sxe2_set_queue_coalesce(netdev, ec, q_idx); + + if (ret) + goto l_unlock; + + sxe2_invalid_coalesce_print(netdev, ec); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef SET_COALESCE_NEED_2_PARAMS +STATIC int sxe2_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +STATIC int sxe2_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + return sxe2_set_coalesce_interval(netdev, ec, SXE2_COALESCE_QNUM_INVAL); +} + +STATIC int sxe2_set_per_queue_coalesce(struct net_device *netdev, u32 q_idx, + struct ethtool_coalesce *ec) +{ + return sxe2_set_coalesce_interval(netdev, ec, q_idx); +} + +STATIC u16 sxe2_max_rxq_get(struct sxe2_adapter *adapter) +{ + u16 max_rxq; + + max_rxq = + (u16)min3(adapter->irq_ctxt.irq_layout.lan, (u16)num_online_cpus(), + (u16)adapter->q_ctxt.max_rxq_cnt); + return (max_rxq > SXE2_VSI_TXRX_Q_MAX_CNT) ? SXE2_VSI_TXRX_Q_MAX_CNT : + max_rxq; +} + +STATIC u16 sxe2_max_txq_get(struct sxe2_adapter *adapter) +{ + u16 max_txq; + + max_txq = + (u16)min3(adapter->irq_ctxt.irq_layout.lan, (u16)num_online_cpus(), + (u16)adapter->q_ctxt.max_txq_cnt); + return (max_txq > SXE2_VSI_TXRX_Q_MAX_CNT) ? SXE2_VSI_TXRX_Q_MAX_CNT : + max_txq; +} + +STATIC u32 sxe2_combined_cnt_get(struct sxe2_vsi *vsi) +{ + u32 combined = 0; + s32 i; + + sxe2_for_each_vsi_irq(vsi, i) { + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[i]; + + if (irq_data->rx.list.cnt && irq_data->tx.list.cnt) + combined++; + } + + return combined; +} + +STATIC void sxe2_channels_get(struct net_device *netdev, + struct ethtool_channels *channel) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + goto l_unlock; + + channel->max_rx = sxe2_max_rxq_get(adapter); + channel->max_tx = sxe2_max_txq_get(adapter); + channel->max_combined = + (u32)min_t(int, channel->max_rx, channel->max_tx); + + channel->combined_count = sxe2_combined_cnt_get(vsi); + channel->rx_count = vsi->rxqs.q_cnt - channel->combined_count; + channel->tx_count = vsi->txqs.q_cnt - channel->combined_count; + + if (test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) { + channel->max_combined += adapter->macvlan_ctxt.max_num_macvlan; + channel->combined_count += adapter->macvlan_ctxt.num_macvlan; + } + + channel->other_count = 1; + channel->max_other = channel->other_count; + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +STATIC u32 sxe2_ethtool_priv_flags_get(struct net_device *netdev) +{ + u32 i, flags = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + const struct sxe2_priv_flag *priv_flag; + + for (i = 0; i < SXE2_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &sxe2_gstrings_priv_flags[i]; + if (test_bit((int)priv_flag->adapter_flag_bitno, + adapter->flags)) + flags |= (u32)BIT(i); + } + return flags; +} + +STATIC void sxe2_fnav_tunnel_flag_set(struct sxe2_adapter *adapter, u32 flags) +{ + if ((flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_FNAV_TUNNEL)) && + !test_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags)) { + set_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags); + } else if (!(flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_FNAV_TUNNEL)) && + test_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags)) { + clear_bit(SXE2_FLAG_FNAV_TUNNEL_ENABLE, adapter->flags); + } +} + +static s32 sxe2_legacy_rx_flag_set(struct sxe2_adapter *adapter, u32 flags) +{ + s32 ret = 0; + bool need_downup = false; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct net_device *netdev = vsi->netdev; + bool old_legacy_rx = + (test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, adapter->flags) != 0); + + if ((flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_LEGACY_RX)) && + !test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, adapter->flags)) { + need_downup = true; + set_bit(SXE2_FLAG_LEGACY_RX_ENABLE, adapter->flags); + } else if (!(flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_LEGACY_RX)) && + test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, adapter->flags)) { + need_downup = true; + clear_bit(SXE2_FLAG_LEGACY_RX_ENABLE, adapter->flags); + } + + if (need_downup) { + ret = sxe2_vsi_down_up(vsi); + if (ret) { + if (old_legacy_rx) { + set_bit(SXE2_FLAG_LEGACY_RX_ENABLE, + adapter->flags); + } else { + clear_bit(SXE2_FLAG_LEGACY_RX_ENABLE, + adapter->flags); + } + LOG_NETDEV_ERR("set legacy rx priv flag err %d\n", ret); + } + } + + return ret; +} + +STATIC int sxe2_ethtool_priv_flags_set(struct net_device *netdev, u32 flags) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool part_failed = false; + + if (flags >= BIT(SXE2_PRIV_FLAG_ARRAY_SIZE)) { + ret = -EINVAL; + goto l_out; + } + + ret = sxe2_legacy_rx_flag_set(adapter, flags); + if (ret) + part_failed = true; + + if ((flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_MDD_AUTO_RESET_VF)) && + !test_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, adapter->flags)) { + set_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, adapter->flags); + } else if (!(flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_MDD_AUTO_RESET_VF)) && + test_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, adapter->flags)) { + clear_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, adapter->flags); + } + + if ((flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_DCBX_AGENT)) && + !test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) { + ret = sxe2_dcbx_agent_enable(adapter); + } else if (!(flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_DCBX_AGENT)) && + test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) { + ret = sxe2_dcbx_agent_disable(adapter); + } + + sxe2_fnav_tunnel_flag_set(adapter, flags); + + if ((flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) && + !test_bit(SXE2_FLAG_LINK_DOWN_ON_CLOSE, adapter->flags)) { + LOG_NETDEV_WARN("Setting link-down-on-close enabled on this port\n"); + set_bit(SXE2_FLAG_LINK_DOWN_ON_CLOSE, adapter->flags); + } else if (!(flags & BIT(SXE2_ETHTOOL_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) && + test_bit(SXE2_FLAG_LINK_DOWN_ON_CLOSE, adapter->flags)) { + clear_bit(SXE2_FLAG_LINK_DOWN_ON_CLOSE, adapter->flags); + } + + if (part_failed) + ret = -EINVAL; + +l_out: + return ret; +} + +s32 sxe2_fwc_sff_eeprom_get(struct sxe2_adapter *adapter, bool is_qsfp, + u16 bus_addr, u16 page, u16 offset, u16 data_len, + struct sxe2_sfp_resp *sff_value) +{ + s32 ret = 0; + struct sxe2_sfp_req sfp_req = {}; + struct sxe2_cmd_params cmd = {}; + + sfp_req.is_wr = 0; + sfp_req.is_qsfp = (u8)is_qsfp; + sfp_req.bus_addr = cpu_to_le16(bus_addr); + sfp_req.page_cnt = cpu_to_le16(page); + sfp_req.offset = cpu_to_le16(offset); + sfp_req.data_len = cpu_to_le16(data_len); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_GET_OPT_DATA_INFO, &sfp_req, + sizeof(sfp_req), sff_value, + sizeof(*sff_value) + data_len); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("get sff eeprom failed, ret=%d\n", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC int sxe2_module_info_get(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + int ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_sfp_resp *sff_value = + kzalloc(sizeof(struct sxe2_sfp_resp) + sizeof(u8) * EEPROM_DATALEN, + GFP_KERNEL); + struct sxe2_sfp_resp *sff8472_comp = + kzalloc(sizeof(struct sxe2_sfp_resp) + sizeof(u8) * EEPROM_DATALEN, + GFP_KERNEL); + struct sxe2_sfp_resp *sff8472_swap = + kzalloc(sizeof(struct sxe2_sfp_resp) + sizeof(u8) * EEPROM_DATALEN, + GFP_KERNEL); + struct sxe2_sfp_resp *sff8636_rev = + kzalloc(sizeof(struct sxe2_sfp_resp) + sizeof(u8) * EEPROM_DATALEN, + GFP_KERNEL); + + if (!sff_value || !sff8472_comp || !sff8472_swap || !sff8636_rev) { + ret = -ENOSPC; + LOG_NETDEV_ERR("sff module info get, there is no space to alloc.\n"); + goto l_out; + } + + ret = sxe2_fwc_sff_eeprom_get(adapter, 0, SXE2_SFP_E2P_I2C_7BIT_ADDR0, + 0, SXE2_MODULE_SFF_PHY_DEV_IDENTIFIER, + EEPROM_DATALEN, sff_value); + if (ret) + goto l_out; + + switch (sff_value->data[0]) { + case SXE2_SFP_TYPE_SFP: + ret = sxe2_fwc_sff_eeprom_get(adapter, 0, + SXE2_SFP_E2P_I2C_7BIT_ADDR0, 0, + SXE2_MODULE_SFF_8472_COMP, + EEPROM_DATALEN, sff8472_comp); + if (ret) + goto l_out; + + ret = sxe2_fwc_sff_eeprom_get(adapter, 0, + SXE2_SFP_E2P_I2C_7BIT_ADDR0, 0, + SXE2_MODULE_SFF_8472_SWAP, + EEPROM_DATALEN, sff8472_swap); + if (ret) + goto l_out; + + if (sff8472_swap->data[0] & SXE2_MODULE_SFF_ADDR_MODE) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp->data[0] && + (sff8472_swap->data[0] & + SXE2_MODULE_SFF_DIAG_CAPAB)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + break; + case SXE2_SFP_TYPE_QSFP_PLUS: + case SXE2_SFP_TYPE_QSFP28: + ret = sxe2_fwc_sff_eeprom_get(adapter, 1, + SXE2_SFP_E2P_I2C_7BIT_ADDR0, 0, + SXE2_MODULE_REVISION_ADDR, + EEPROM_DATALEN, sff8636_rev); + if (ret) + goto l_out; + + if (sff8636_rev->data[0] > SXE2_MODULE_REVISION_SFF_8436) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = SXE2_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = SXE2_MODULE_QSFP_MAX_LEN; + } + break; + default: + LOG_NETDEV_WARN("sff module type not recognized.\n"); + ret = -EINVAL; + goto l_out; + } + LOG_INFO_BDF("get module info ok.\n"); +l_out: + kfree(sff_value); + + kfree(sff8472_comp); + + kfree(sff8472_swap); + + kfree(sff8636_rev); + + return ret; +} + +STATIC int sxe2_module_eeprom_get(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool is_sfp = false; + u32 i, j, k; + u16 offset = 0; + u8 page = 0; + u8 addr = SXE2_SFP_E2P_I2C_7BIT_ADDR0; + struct sxe2_sfp_resp *sff_value = + kzalloc(sizeof(struct sxe2_sfp_resp) + + SFF_READ_BLOCK_SIZE_8 * sizeof(u8), + GFP_KERNEL); + u8 upper_page_enable = 0; + + if (!sff_value) { + ret = -ENOSPC; + LOG_NETDEV_ERR("sff module eeprom get, there is no space to alloc.\n"); + goto l_out; + } + + if (!eeprom || !eeprom->len || !data) { + ret = -EINVAL; + goto l_out; + } + + ret = sxe2_fwc_sff_eeprom_get(adapter, 0, SXE2_SFP_E2P_I2C_7BIT_ADDR0, + 0, SXE2_MODULE_SFF_PHY_DEV_IDENTIFIER, + SFF_READ_BLOCK_SIZE_8, sff_value); + if (ret) + goto l_out; + + if (sff_value->data[0] == SXE2_SFP_TYPE_SFP) + is_sfp = true; + + upper_page_enable = sff_value->data[SXE2_SFF_STATUS_INDICATER]; + memset(data, 0, eeprom->len); + for (i = 0; i < SFF_READ_BLOCK_SIZE_8; i++) + sff_value->data[i] = 0; + + for (i = 0; i < eeprom->len; i += SFF_READ_BLOCK_SIZE_8) { + offset = (u16)(eeprom->offset + i); + page = 0; + + if (is_sfp) { + if (offset >= ETH_MODULE_SFF_8079_LEN) { + offset -= ETH_MODULE_SFF_8079_LEN; + addr = SXE2_SFP_E2P_I2C_7BIT_ADDR1; + } + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + offset -= ETH_MODULE_SFF_8436_LEN / 2; + page++; + } + } + + if (page != 0 && (upper_page_enable & SXE2_SFF_FLAT_MEM)) + page = 0; + + for (j = 0; j < SXE2_MODULE_REPEAT_TIMES; j++) { + ret = sxe2_fwc_sff_eeprom_get( + adapter, !is_sfp, addr, page, + (u8)offset, SFF_READ_BLOCK_SIZE_8, + sff_value); + LOG_NETDEV_DEBUG( + "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", + addr, offset, page, is_sfp, + sff_value->data[0], sff_value->data[1], + sff_value->data[2], sff_value->data[3], + sff_value->data[4], sff_value->data[5], + sff_value->data[6], sff_value->data[7], + ret); + if (ret) { + usleep_range(1500, 2500); + for (k = 0; k < SFF_READ_BLOCK_SIZE_8; + k++) { + sff_value->data[k] = 0; + } + continue; + } + break; + } + + if ((i + SFF_READ_BLOCK_SIZE_8) < eeprom->len) { + for (k = 0; k < SFF_READ_BLOCK_SIZE_8; k++) + *(data + i + k) = sff_value->data[k]; + } else { + for (k = 0; i + k < eeprom->len; k++) + *(data + i + k) = sff_value->data[k]; + } + } + +l_out: + kfree(sff_value); + + return ret; +} + +#ifdef SUPPORTED_ETHTOOL_EEPROM_BY_PAGE +STATIC int sxe2_module_eeprom_get_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + s32 ret; + struct ethtool_eeprom eeprom = {0}; + u32 offset = page_data->offset; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_sfp_resp *sff_value = + kzalloc(sizeof(struct sxe2_sfp_resp) + sizeof(u8) * SFF_READ_BLOCK_SIZE_8, + GFP_KERNEL); + + if (!sff_value) { + ret = -ENOSPC; + LOG_NETDEV_ERR("sff module eeprom get, there is no space to alloc.\n"); + return ret; + } + + ret = sxe2_fwc_sff_eeprom_get(adapter, 0, SXE2_SFP_E2P_I2C_7BIT_ADDR0, + 0, SXE2_MODULE_SFF_PHY_DEV_IDENTIFIER, + SFF_READ_BLOCK_SIZE_8, sff_value); + if (ret) + goto end; + + if (page_data->page) + offset += page_data->page * (ETH_MODULE_SFF_8079_LEN / 2); + + if (sff_value->data[0] == SXE2_SFP_TYPE_SFP && + page_data->i2c_address == SXE2_SFP_E2P_I2C_7BIT_ADDR1) { + offset += ETH_MODULE_SFF_8079_LEN; + } + + eeprom.offset = offset; + eeprom.len = page_data->length; + + ret = sxe2_module_eeprom_get(netdev, &eeprom, page_data->data); + if (ret < 0) + goto end; + + ret = (int)page_data->length; +end: + kfree(sff_value); + return ret; +} +#endif + +STATIC s32 sxe2_lbtest_txrx_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_tx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("open: tx config err, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_rx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("open: rx config err, ret=%d\n", ret); + goto l_rx_fail; + } + return 0; +l_rx_fail: + sxe2_tx_rings_res_free(vsi); +l_end: + return ret; +} + +STATIC s32 sxe2_lbtest_txrx_free(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + bool need_reset = false; + + ret = sxe2_vsi_down(vsi); + if (ret) { + LOG_ERROR_BDF("vsi down failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + } + + sxe2_vsi_irqs_clear_free(vsi); + + if (need_reset) + sxe2_trigger_and_wait_resetting(adapter); + + sxe2_tx_rings_res_free(vsi); + + sxe2_rx_rings_res_free(vsi); + + return ret; +} + +STATIC s32 sxe2_lbtest_create_frame(struct device *dev, u8 *mac, u8 **ret_data, + u16 size) +{ + u8 *data; + + if (!dev) + return -EINVAL; + + data = devm_kzalloc(dev, size, GFP_KERNEL); + if (!data) + return -ENOMEM; + memset(data, 0xFF, size); + + memcpy(data, mac, ETH_ALEN); + + data[32] = 0xDE; + data[42] = 0xAD; + data[44] = 0xBE; + data[46] = 0xEF; + + *ret_data = data; + + return 0; +} + +STATIC s32 sxe2_lbtest_frames_xmit(struct sxe2_queue *txq, u8 *data, u16 size) +{ + union sxe2_tx_data_desc *tx_desc; + struct sxe2_tx_buf *tx_buf; + dma_addr_t dma; + u64 td_cmd; + s32 ret = 0; + + tx_desc = SXE2_TX_DESC(txq, txq->next_to_use); + tx_buf = &txq->tx_buf[txq->next_to_use]; + + dma = dma_map_single(txq->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(txq->dev, dma)) { + ret = -EINVAL; + goto l_out; + } + tx_desc->read.buf_addr = cpu_to_le64(dma); + + td_cmd = (u64)(SXE2_TXDD_CMD_EOP | SXE2_TXDD_CMD_RS); + tx_desc->read.cmd_type_offset_bsz = + cpu_to_le64(SXE2_TX_DESC_DTYPE_DATA | (td_cmd << SXE2_TXDD_CMD_S) | + ((u64)0 << SXE2_TXDD_OFFSET_S) | + ((u64)size << SXE2_TXDD_BUF_SZ_S) | + ((u64)0 << SXE2_TXDD_L2TAG1_S)); + + tx_buf->next_to_watch = tx_desc; + + /* in order to force CPU ordering */ + wmb(); + + txq->next_to_use++; + if (txq->next_to_use >= txq->depth) + txq->next_to_use = 0; + + writel(txq->next_to_use, txq->desc.tail); + + usleep_range(ETHTOOL_SELFTEST_SLEEP_MIN, ETHTOOL_SELFTEST_SLEEP_MAX); + dma_unmap_single(txq->dev, dma, size, DMA_TO_DEVICE); + +l_out: + return ret; +} + +STATIC bool sxe2_lbtest_frame_check(u8 *frame) +{ + bool ret = false; + + if (frame[32] == 0xDE && frame[42] == 0xAD && frame[44] == 0xBE && + frame[46] == 0xEF && frame[48] == 0xFF) { + ret = true; + } + + return ret; +} + +STATIC s32 sxe2_lbtest_frames_receive(struct sxe2_queue *rxq) +{ + struct sxe2_rx_buf *rx_buf; + s32 frames_num, i; + u8 *received_buf; + struct sxe2_vsi *vsi = rxq->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + union sxe2_rx_desc *rx_desc; + + frames_num = 0; + for (i = 0; i < rxq->depth; i++) { + rx_desc = SXE2_RX_DESC(rxq, i); + if (!(sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_STATUS0_EOP) | + BIT(SXE2_RX_DESC_STATUS0_DD)))) { + LOG_DEBUG_BDF("Check lb packet status0:0x%x\n", + rx_desc->wb.status0_err); + continue; + } + rx_buf = &rxq->rx_buf[i]; + received_buf = page_address(rx_buf->page) + rx_buf->pg_offset; + if (sxe2_lbtest_frame_check(received_buf)) { + LOG_DEBUG_BDF("check lbtest frame OK.\n"); + frames_num++; + } else { + LOG_ERROR_BDF("failed to checkout lbtest frame. status0_err=0x%x\n", + rx_desc->wb.status0_err); + } + } + + return frames_num; +} + +STATIC s32 sxe2_lbtest_loopback_set(struct sxe2_adapter *adapter, bool enable) +{ + struct sxe2_cmd_params cmd = { 0 }; + s32 ret; + struct sxe2_fw_loop_back_config req; + + req.enable = (u8)enable; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ETHTOOL_LOOPBACK_SET, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to set loopback,enable=%d ret=%d\n", + enable, ret); + } + + return ret; +} + +static bool sxe2_vfs_is_active(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf_node; + u16 vf_idx; + + sxe2_for_each_vf(adapter, vf_idx) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node = sxe2_vf_node_get(adapter, vf_idx); + if (vf_node && + test_bit(SXE2_VF_STATE_ACTIVE, vf_node->states)) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + return true; + } + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + return false; +} + +static s32 sxe2_frames_loopback(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + s32 frames_num, frames_valid; + s32 i; + u8 *tx_frame; + struct sxe2_queue *txq, *rxq; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct net_device *netdev = vsi->netdev; + + txq = vsi->txqs.q[0]; + rxq = vsi->rxqs.q[0]; + + ret = sxe2_lbtest_create_frame(dev, lbtest_unicast, &tx_frame, + SXE2_LB_FRAME_SIZE); + if (ret) { + LOG_NETDEV_ERR("failed to create loopback frame.\n"); + goto l_end; + } + + frames_num = min_t(s32, txq->depth, ETHTOOL_SELFTEST_FRAME_COUNT); + for (i = 0; i < frames_num; i++) { + ret = sxe2_lbtest_frames_xmit(txq, tx_frame, + SXE2_LB_FRAME_SIZE); + if (ret) { + LOG_NETDEV_ERR("failed to send test frame.\n"); + goto frame_free; + } + } + + frames_valid = sxe2_lbtest_frames_receive(rxq); + if (!frames_valid) { + LOG_NETDEV_ERR("failed to receive loopback packets.\n"); + ret = -EFAULT; + } else if (frames_valid != frames_num) { + LOG_NETDEV_ERR("failed to receive all loopback packets.\n"); + ret = -EFAULT; + } + +frame_free: + devm_kfree(dev, tx_frame); + +l_end: + return ret; +} + +static s32 sxe2_lbtest_vsi_open(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_queue *rxq; + struct sxe2_adapter *adapter = vsi->adapter; + struct net_device *netdev = vsi->netdev; + + ret = sxe2_lbtest_txrx_cfg(vsi); + if (ret) { + LOG_NETDEV_ERR("failed to config txrx for the loopback test.\n"); + goto l_err; + } + + rxq = vsi->rxqs.q[0]; + + if (sxe2_rx_buffers_alloc(rxq, SXE2_DESC_UNUSED(rxq))) { + ret = -ENOMEM; + LOG_NETDEV_ERR("failed to alloc rx buffer for the loopback test.\n"); + goto free_txrx; + } + + ret = sxe2_vsi_irqs_configure(vsi); + if (ret) { + ret = -ENOMEM; + LOG_NETDEV_ERR("failed to config vsi for the loopback test.\n"); + goto config_err; + } + + return 0; + +config_err: + (void)sxe2_txqs_stop(vsi); + (void)sxe2_rxqs_stop(vsi); + +free_txrx: + if (sxe2_lbtest_txrx_free(vsi)) + LOG_NETDEV_ERR("could not disable test rings\n"); + +l_err: + return ret; +} + +STATIC s32 sxe2_loopback_test(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool if_running = false; + struct sxe2_vsi *test_vsi; + s32 status, ret = 0; + bool link_status; + + mutex_lock(&adapter->link_ctxt.link_status_lock); + link_status = sxe2_get_pf_link_status(adapter); + mutex_unlock(&adapter->link_ctxt.link_status_lock); + if (!link_status) { + LOG_NETDEV_ERR("nic link is down, loopback test failed.\n"); + return SXE2_SELFTEST_RTN_FAIL; + } + + if (sxe2_vfs_is_active(adapter)) + return -EFAULT; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto vsi_unlock; + } + + if_running = netif_running(netdev); + if (if_running) { + LOG_DEBUG_BDF("close vsi before online test.\n"); + ret = sxe2_vsi_close(vsi); + if (ret) { + LOG_NETDEV_ERR("main vsi close failed.\n"); + goto vsi_unlock; + } + } + + test_vsi = sxe2_loopback_vsi_create(adapter); + if (!test_vsi) { + LOG_NETDEV_ERR("failed to create a VSI for the loopback test\n"); + ret = -ENOMEM; + goto vsi_unlock; + } + + ret = sxe2_lbtest_vsi_open(test_vsi); + if (ret) + goto vsi_destroy; + + ret = sxe2_lbtest_loopback_set(adapter, true); + if (ret) { + LOG_NETDEV_ERR("failed to enable mac loopback.\n"); + goto mac_loopback_set_failed; + } + + ret = sxe2_mac_rule_add(test_vsi, lbtest_unicast); + if (ret) { + LOG_NETDEV_ERR("failed to add mac rule.\n"); + goto mac_rule_add_failed; + } + + ret = sxe2_frames_loopback(test_vsi); + + if (sxe2_mac_rule_del(adapter, test_vsi->idx_in_dev, lbtest_unicast)) + LOG_NETDEV_ERR("failed to remove mac rule from test vsi.\n"); + +mac_rule_add_failed: + if (sxe2_lbtest_loopback_set(adapter, false)) + LOG_NETDEV_ERR("failed to disable mac loopback.\n"); + +mac_loopback_set_failed: + if (sxe2_lbtest_txrx_free(test_vsi)) + LOG_NETDEV_ERR("could not disable test rings\n"); + +vsi_destroy: + test_vsi->netdev = NULL; + sxe2_vsi_destroy_unlock(test_vsi); + +vsi_unlock: + if (if_running) { + LOG_DEBUG_BDF("Re open vsi after online test.\n"); + status = sxe2_vsi_open(vsi); + if (status) { + LOG_NETDEV_ERR("failed to open device %s, error %d\n", + adapter->dev_name, status); + } + } + + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +STATIC s32 sxe2_link_test(struct net_device *netdev) +{ + bool port_state; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!netif_carrier_ok(netdev)) + return SXE2_SELFTEST_RTN_LINKDOWN; + + mutex_lock(&adapter->link_ctxt.link_status_lock); + port_state = sxe2_get_pf_link_status(adapter); + mutex_unlock(&adapter->link_ctxt.link_status_lock); + + return port_state ? 0 : SXE2_SELFTEST_RTN_LINKDOWN; +} + +STATIC s32 sxe2_intr_test(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u64 sw_irq_cnt_old = adapter->irq_ctxt.event_irq_cnt; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; + } + + LOG_NETDEV_INFO("interrupt test\n"); + + sxe2_hw_irq_trigger(hw, SXE2_EVENT_IRQ_IDX); + + usleep_range(1000, 2000); + mutex_unlock(&adapter->vsi_ctxt.lock); + return (sw_irq_cnt_old == adapter->irq_ctxt.event_irq_cnt); +} + +typedef s32 (*sxe2_selftest_func)(struct net_device *); + +struct sxe2_selftest { + char name[ETH_GSTRING_LEN]; + sxe2_selftest_func func; + bool offline; +}; + +static struct sxe2_selftest sxe2_selftest_suite[] = { + { "Link Test", sxe2_link_test, false }, + { "Intr Test", sxe2_intr_test, true }, + { "Loopback Test", sxe2_loopback_test, true }, +}; + +void sxe2_ethtool_selftest(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + + struct sxe2_selftest st; + u32 i, count = 0; + + for (i = 0; i < ARRAY_SIZE(sxe2_selftest_suite); i++) { + st = sxe2_selftest_suite[i]; + if (!(eth_test->flags & ETH_TEST_FL_OFFLINE) && st.offline) { + LOG_NETDEV_INFO("[%d] %s can only perform in offline mode..\n", + i, st.name); + continue; + } + LOG_NETDEV_INFO("\t[%d] %s start..\n", i, st.name); + data[count] = (u64)st.func(netdev); + LOG_NETDEV_INFO("\t[%d] %s end: result(%lld)\n", i, st.name, + data[count]); + count++; + } + for (i = 0; i < count; i++) { + if (data[i]) { + eth_test->flags |= ETH_TEST_FL_FAILED; + break; + } + } + LOG_NETDEV_INFO("self test out: status flags(0x%x)\n", eth_test->flags); +} + +int sxe2_ethtool_selftest_count(struct net_device *netdev) +{ + return ARRAY_SIZE(sxe2_selftest_suite); +} + +void sxe2_ethtool_selftest_strings(struct net_device *netdev, u8 *data) +{ + struct sxe2_selftest st; + u32 i; + + if (!data) + return; + for (i = 0; i < ARRAY_SIZE(sxe2_selftest_suite); i++) { + st = sxe2_selftest_suite[i]; + memcpy(data + ((size_t)i * (size_t)ETH_GSTRING_LEN), st.name, + ETH_GSTRING_LEN); + } +} + +static void sxe2_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_fwc_lfc_info lfc = { 0 }; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_dcbx_cfg *curr_cfg; + s32 ret; + + struct flm_ethtool_get_link_resp link_cfg; + + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) { + LOG_ERROR_BDF("link get cmd fail, ret=%d\n", ret); + return; + } + + pause->autoneg = link_cfg.local_an_en.advertis_an; + + curr_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + pause->rx_pause = 0; + pause->tx_pause = 0; + + if (curr_cfg->pfc.enable) + goto l_out; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LINK_FLOW_CONTROL_GET, NULL, 0, + &lfc, sizeof(lfc)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fc get cmd fail, ret=%d\n", ret); + return; + } + + pause->rx_pause = adapter->lfc_ctxt.rx_en; + pause->tx_pause = adapter->lfc_ctxt.tx_en; + + LOG_INFO_BDF("link get cmd fw tx %d rx %d driver tx %d rx %d, ret=%d\n", + lfc.tx_en, lfc.rx_en, + adapter->lfc_ctxt.tx_en, adapter->lfc_ctxt.rx_en, ret); + +l_out: + return; +} + +static int sxe2_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_vsi *dpdk_vsi = NULL; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fwc_lfc_info lfc = { 0 }; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_dcbx_cfg *curr_cfg; + s32 ret = 0; + struct flm_ethtool_get_link_resp link_cfg; + u32 is_an; + bool changed = false; + u8 old_fc = SXE2_FC_MODE_DISABLE; + u8 new_fc = SXE2_FC_MODE_DISABLE; + + (void)sxe2_fc_get(adapter, vsi->idx_in_dev, &old_fc); + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + LOG_ERROR_BDF("vsi disabled, try later.\n"); + goto vsi_unlock; + } + memset(&adapter->lfc_ctxt, 0, sizeof(struct sxe2_lfc_context)); + curr_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (vsi->type != SXE2_VSI_T_PF) { + LOG_DEV_ERR("changing flow control parameters only supported for PF VSI\n"); + ret = -EOPNOTSUPP; + goto vsi_unlock; + } + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) { + LOG_ERROR_BDF("link get cmd fail, ret=%d\n", ret); + ret = -EIO; + goto vsi_unlock; + } + + is_an = link_cfg.local_an_en.advertis_an; + if (pause->autoneg != is_an) { + LOG_DEV_INFO("To change autoneg please use: ethtool -s autoneg \n"); + ret = -EOPNOTSUPP; + goto vsi_unlock; + } + if (curr_cfg->pfc.enable) { + LOG_DEV_INFO("priority flow control enabled. Cannot set link flow control.\n"); + ret = -EOPNOTSUPP; + goto vsi_unlock; + } + + if (is_an) { + ret = sxe2_link_set_fc_configure(adapter, + (u8)pause->tx_pause, + (u8)pause->rx_pause); + if (ret) { + LOG_ERROR_BDF("link set fc cmd fail, ret=%d\n", ret); + ret = -EIO; + goto vsi_unlock; + } + } else { + memset(&lfc, 0, sizeof(lfc)); + if (pause->rx_pause) { + lfc.rx_en = true; + adapter->lfc_ctxt.rx_en = true; + } + + if (pause->tx_pause) { + lfc.tx_en = true; + adapter->lfc_ctxt.tx_en = true; + } + + lfc.fc_mode = SXE2_FC_MODE_LFC; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LINK_FLOW_CONTROL_SET, + &lfc, sizeof(lfc), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fc set cmd fail, ret=%d\n", ret); + ret = -EIO; + goto vsi_unlock; + } + } + + dpdk_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (dpdk_vsi) + sxe2_set_fc_flag(dpdk_vsi, pause->tx_pause || pause->rx_pause); + sxe2_set_fc_flag(vsi, pause->tx_pause || pause->rx_pause); + + ret = sxe2_vsi_down_up_unlock(adapter->vsi_ctxt.main_vsi); + if (ret) { + LOG_ERROR_BDF("vsi down up failed, ret=%d\n", ret); + goto vsi_unlock; + } + + LOG_INFO_BDF("lfc set tx %d rx %d.\n", pause->tx_pause, pause->rx_pause); + +vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + (void)sxe2_fc_get(adapter, vsi->idx_in_dev, &new_fc); + if (new_fc != old_fc) + changed = true; + + if (changed && ret == 0) + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_FC_ST_CHANGE); + + return ret; +} + +u32 sxe2_speed_switch_set_configure(u32 speed) +{ + u32 switch_speed; + + if (speed == SXE2_ETHTOOL_SPEED_10GB) + switch_speed = SXE2_SET_LINK_SPEED_CFG_10G; + else if (speed == SXE2_ETHTOOL_SPEED_25GB) + switch_speed = SXE2_SET_LINK_SPEED_CFG_25G; + else if (speed == SXE2_ETHTOOL_SPEED_50GB) + switch_speed = SXE2_SET_LINK_SPEED_CFG_50G; + else if (speed == SXE2_ETHTOOL_SPEED_100GB) + switch_speed = SXE2_SET_LINK_SPEED_CFG_100G; + else if (speed == SXE2_ETHTOOL_SPEED_AUTO) + switch_speed = SXE2_SET_LINK_SPEED_CFG_AUTO; + else + switch_speed = SXE2_SET_LINK_SPEED_CFG_MAX; + + return switch_speed; +} + +u32 sxe2_speed_dut_switch_cfg(s32 speed) +{ + u32 switch_speed; + + if (speed == SXE2_SET_LINK_SPEED_CFG_10G) + switch_speed = SXE2_ETHTOOL_SPEED_10GB; + else if (speed == SXE2_SET_LINK_SPEED_CFG_25G) + switch_speed = SXE2_ETHTOOL_SPEED_25GB; + else if (speed == SXE2_SET_LINK_SPEED_CFG_50G) + switch_speed = SXE2_ETHTOOL_SPEED_50GB; + else if (speed == SXE2_SET_LINK_SPEED_CFG_100G) + switch_speed = SXE2_ETHTOOL_SPEED_100GB; + else if (speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + switch_speed = SXE2_ETHTOOL_SPEED_AUTO; + else + switch_speed = SXE2_LINK_SPEED_UNKNOWN; + + return switch_speed; +} + +s32 sxe2_link_get_pasist_info(struct sxe2_adapter *adapter, struct flm_link_info_pasist *cfg) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_PERSIST_GET_LINK_CFG, NULL, 0, + cfg, sizeof(*cfg)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("set_speed pasist, ret=%d\n", ret); + ret = -EIO; + goto out; + } + + LOG_INFO_BDF("get persist cfg speed=[%d], fec=[%d],\t" + "linkup=[%d],fc_tx=[%d],fc_rx=[%d] .\n", + cfg->speed, cfg->fec_mode, cfg->link_status, cfg->fc_mode.tx_en, + cfg->fc_mode.rx_en); + +out: + return ret; +} + +STATIC void sxe2_get_active_fec(struct sxe2_adapter *adapter, struct ethtool_fecparam *fecparam) +{ + int ret = 0; + struct ethtool_flm_link_info currect_info; + struct sxe2_cmd_params cmd = { 0 }; + enum flm_fec_mode fec; + + ret = sxe2_get_cur_link_state(adapter, &currect_info); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + fecparam->active_fec = ETHTOOL_FEC_NONE; + return; + } + + if (currect_info.link_status == SXE2_LINK_DOWN) { + LOG_ERROR_BDF("link state is down"); + fecparam->active_fec = ETHTOOL_FEC_NONE; + return; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SFP_GET_FEC_CFG, NULL, 0, &fec, + sizeof(fec)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fec get cmd fail, ret=%d\n", ret); + fecparam->active_fec = ETHTOOL_FEC_NONE; + return; + } + + switch (fec) { + case FLM_FEC_AUTO: + fecparam->active_fec = ETHTOOL_FEC_AUTO; + break; + case FLM_FEC_528: + case FLM_FEC_544: + fecparam->active_fec = ETHTOOL_FEC_RS; + break; + case FLM_FEC_BSFEC: + fecparam->active_fec = ETHTOOL_FEC_BASER; + break; + case FLM_FEC_NONE: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + default: + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } +} + +STATIC void sxe2_get_fw_cur_fec(struct sxe2_adapter *adapter, struct ethtool_fecparam *fecparam) +{ + int ret = 0; + struct flm_link_info_pasist currect_info; + + ret = sxe2_link_get_pasist_info(adapter, &currect_info); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + fecparam->fec = ETHTOOL_FEC_AUTO; + return; + } + + if (currect_info.fec_mode == FLM_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + else if (currect_info.fec_mode == FLM_FEC_NONE) + fecparam->fec |= ETHTOOL_FEC_OFF; + else if (currect_info.fec_mode == FLM_FEC_BSFEC) + fecparam->fec |= ETHTOOL_FEC_BASER; + else if (currect_info.fec_mode == FLM_FEC_528) + fecparam->fec |= ETHTOOL_FEC_RS; + else if (currect_info.fec_mode == FLM_FEC_544) + fecparam->fec |= ETHTOOL_FEC_RS; + else + fecparam->fec |= ETHTOOL_FEC_AUTO; +} + +STATIC int sxe2_get_fec(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + + sxe2_get_active_fec(adapter, fecparam); + + sxe2_get_fw_cur_fec(adapter, fecparam); + + return 0; +} + +STATIC int sxe2_set_fec(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + enum sxe2_fec_mode fec; + struct sxe2_cmd_params cmd = { 0 }; + struct ethtool_flm_link_info currect_info; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + fec = SXE2_ETHTOOL_FEC_AUTO; + break; + case ETHTOOL_FEC_RS: + fec = SXE2_ETHTOOL_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = SXE2_ETHTOOL_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + fec = SXE2_ETHTOOL_FEC_OFF; + break; + case ETHTOOL_FEC_NONE: + fec = SXE2_ETHTOOL_FEC_NONE; + break; + default: + LOG_INFO_BDF("fec mode Unsupported! fecparam->fec %d\n", + fecparam->fec); + fec = SXE2_ETHTOOL_FEC_NONE; + break; + } + + ret = sxe2_get_cur_link_state(adapter, &currect_info); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + return ret; + } + + if (adapter->link_ctxt.fec == fec && currect_info.link_status) { + LOG_INFO_BDF("user set fec[%d] is not change ,current fec[%d]", + fec, adapter->link_ctxt.fec); + return ret; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SFP_SET_FEC_CFG, &fec, + sizeof(fec), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("fc set cmd fail, ret=%d\n", ret); + + adapter->link_ctxt.fec = fec; + + return ret; +} + +void sxe2_stop_lfc(struct sxe2_adapter *adapter) +{ + struct sxe2_fwc_lfc_info lfc = { 0 }; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_dcbx_cfg *curr_cfg; + s32 ret; + + curr_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (curr_cfg->pfc.enable) { + LOG_DEV_INFO("priority flow control enabled. No need stop link flow control.\n"); + return; + } + + memset(&lfc, 0, sizeof(lfc)); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LINK_FLOW_CONTROL_SET, &lfc, + sizeof(lfc), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("lfc set cmd fail, ret=%d\n", ret); + return; + } +} + +STATIC s32 sxe2_link_set_configure(struct sxe2_adapter *adapter, u32 speed) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = { 0 }; + struct flm_link_config req; + + req.speed = speed; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ETHTOOL_SET_LINKINFO_CFG, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + goto out; + } + +out: + return ret; +} + +s32 sxe2_link_set_fc_configure(struct sxe2_adapter *adapter, u8 tx_fc, u8 rx_fc) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = { 0 }; + struct flm_link_info req = { 0 }; + + req.fec = adapter->link_ctxt.fec; + req.speed = adapter->link_ctxt.current_link_speed; + req.fc_mode.rx_en = rx_fc; + req.fc_mode.tx_en = tx_fc; + req.port_num = adapter->port_idx; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SFP_SET_LINK_CFG, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +s32 sxe2_set_link_autoneg_en(struct sxe2_adapter *adapter, u32 an_en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct configure_an req; + + memset(&req, 0, sizeof(req)); + req.an_en = an_en; + req.port = adapter->port_idx; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FLM_AN_SET, &req, sizeof(req), + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fc set cmd fail, ret=%d\n", ret); + goto out; + } + +out: + return ret; +} + +s32 sxe2_get_link_configure(struct sxe2_adapter *adapter, + struct flm_ethtool_get_link_resp *link_cfg) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct flm_ethtool_get_link_req req; + + req.port_num = adapter->port_idx; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_ETHTOOL_GET_LINKINFO_CFG, &req, + sizeof(req), link_cfg, + sizeof(struct flm_ethtool_get_link_resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +s32 sxe2_get_support_speed_ability(struct sxe2_adapter *adapter, + struct support_speed_ability_mode *speed_ability) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SUPPORT_SPEED_GET_CFG, NULL, 0, + speed_ability, + sizeof(struct support_speed_ability_mode)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +static void sxe2_ethtool_support_fec_get(struct support_speed_ability_mode *speed_ability, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + if (speed_ability->ability_speed_100Gcr4 || speed_ability->ability_speed_100Gkr4) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + + if (speed_ability->ability_speed_50Gcr2 || speed_ability->ability_speed_50Gkr2) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + + if (speed_ability->ability_speed_25Gcr || speed_ability->ability_speed_25Gkr + || speed_ability->ability_speed_25Gkrcr || speed_ability->ability_speed_25Gkrcr_s) { + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + } + + if (speed_ability->ability_speed_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + } + +} + +s32 sxe2_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct flm_ethtool_get_link_resp link_cfg; + struct support_speed_ability_mode speed_ability; + struct flm_link_info_pasist pasist_info; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + (void)memset(&speed_ability, 0, sizeof(struct support_speed_ability_mode)); + (void)memset(&pasist_info, 0, sizeof(struct flm_link_info_pasist)); + (void)memset(&link_cfg, 0, sizeof(struct flm_ethtool_get_link_resp)); + + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + + ret = sxe2_get_support_speed_ability(adapter, &speed_ability); + if (ret) + LOG_ERROR_BDF("failed to get speed_ability, ret=%d\n", ret); + + ret = sxe2_link_get_pasist_info(adapter, &pasist_info); + if (ret) + LOG_ERROR_BDF("failed to get speed_ability, ret=%d\n", ret); + + if (speed_ability.ability_speed_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_10G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + } + if ((speed_ability.ability_speed_25Gcr) || + (speed_ability.ability_speed_25Gkrcr_s)) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_25G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + } + if (speed_ability.ability_speed_25Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_25G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + } + if (speed_ability.ability_speed_50Gcr2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_50G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + } + if (speed_ability.ability_speed_50Gkr2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseKR2_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_50G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + } + if (speed_ability.ability_speed_100Gkr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR4_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_100G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + } + } + if (speed_ability.ability_speed_100Gcr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_100G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + } + if (speed_ability.ability_speed_100Gsr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + if (link_cfg.current_an_en.current_an + && (pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_100G + || pasist_info.speed == SXE2_SET_LINK_SPEED_CFG_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseSR4_Full); + } + } + + if (link_cfg.an_publicity.an_mode.speed_ability_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + if (link_cfg.an_publicity.an_mode.speed_ability_25Gkrcr || + link_cfg.an_publicity.an_mode.speed_ability_25Gkrcr_s) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (link_cfg.an_publicity.an_np_mode.speed_ability_25Gkr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (link_cfg.an_publicity.an_np_mode.speed_ability_25Gcr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (link_cfg.an_publicity.an_np_mode.speed_ability_50Gcr2) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + if (link_cfg.an_publicity.an_np_mode.speed_ability_50Gkr2) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + if (link_cfg.an_publicity.an_mode.speed_ability_100Gcr4) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + if (link_cfg.an_publicity.an_mode.speed_ability_100Gkr4) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + } + + sxe2_ethtool_support_fec_get(&speed_ability, ks); + + return ret; +} + +s32 sxe2_get_cur_link_state(struct sxe2_adapter *adapter, + struct ethtool_flm_link_info *currect_info) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_CURRENT_SPEED_GET_CFG, + NULL, 0, currect_info, + sizeof(struct ethtool_flm_link_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +static void sxe2_advertise_fec_get(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + struct ethtool_flm_link_info *currect_info) +{ + if (currect_info->link_status == SXE2_LINK_UP) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + if (link_cfg->advertis_fec.fec_br) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + + if ((link_cfg->advertis_fec.fec_528) || (link_cfg->advertis_fec.fec_544)) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + } + +} + +static void sxe2_advertise_link_mode_get(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, struct ethtool_flm_link_info *currect_info) +{ + if (!link_cfg->current_an_en.current_an) { + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_10G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 10000baseKR_Full + )) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_25G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 25000baseCR_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_25G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 25000baseKR_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_50G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 50000baseCR2_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_50G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 50000baseKR2_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_100G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 100000baseKR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_100G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 100000baseCR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + + if ((currect_info->speed == SXE2_SET_LINK_SPEED_CFG_100G + || currect_info->speed == SXE2_SET_LINK_SPEED_CFG_AUTO) + && ethtool_link_ksettings_test_link_mode(ks, supported, 100000baseSR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseSR4_Full); + } + } +} + +STATIC int sxe2_get_link_requir_cfg(struct net_device *netdev, + struct ethtool_link_ksettings *ks, u32 *speed) +{ + s32 ret = 0; + u32 supported; + u32 advertising; + u32 lp_advertising; + struct flm_ethtool_get_link_resp link_cfg; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct ethtool_flm_link_info currect_info; + + (void)ethtool_convert_link_mode_to_legacy_u32(&supported, + ks->link_modes.supported); + (void)ethtool_convert_link_mode_to_legacy_u32(&advertising, + ks->link_modes.advertising); + (void)ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + ks->link_modes.lp_advertising); + + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + link_cfg.optical_module.current_connection = SXE2_FW_CONNECT_MDDE_UNKNOW; + } + + ret = sxe2_get_cur_link_state(adapter, &currect_info); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + link_cfg.optical_module.current_connection = SXE2_FW_CONNECT_MDDE_UNKNOW; + } + + sxe2_advertise_fec_get(ks, &link_cfg, &currect_info); + + sxe2_advertise_link_mode_get(ks, &link_cfg, &currect_info); + *speed = sxe2_speed_dut_switch_cfg(currect_info.speed); + if (currect_info.link_status) { + switch (*speed) { + case SXE2_ETHTOOL_SPEED_10GB: + ks->base.speed = SPEED_10000; + ks->base.duplex = DUPLEX_FULL; + break; + case SXE2_ETHTOOL_SPEED_25GB: + ks->base.speed = SPEED_25000; + ks->base.duplex = DUPLEX_FULL; + break; + case SXE2_ETHTOOL_SPEED_50GB: + ks->base.speed = SPEED_50000; + ks->base.duplex = DUPLEX_FULL; + break; + case SXE2_ETHTOOL_SPEED_100GB: + ks->base.speed = SPEED_100000; + ks->base.duplex = DUPLEX_FULL; + break; + default: + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + break; + } + } else { + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + } + + switch (link_cfg.optical_module.current_connection) { + case SXE2_FW_CONNECT_MODE_TRANSCEIVER: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case SXE2_FW_CONNECT_MODE_BACKPLANE: + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ks->base.port = PORT_NONE; + break; + case SXE2_FW_CONNECT_MODE_DAC: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + ks->base.port = PORT_DA; + break; + case SXE2_FW_CONNECT_MODE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + break; + default: + ks->base.port = PORT_OTHER; + break; + } + + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + if (link_cfg.sxe2_ana_fsm == SXE2_AN_GOOD) { + if ((link_cfg.partner_pause_result.tx_en) && + (link_cfg.partner_pause_result.rx_en)) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Pause); + } else if (link_cfg.partner_pause_result.tx_en) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + } else if (link_cfg.partner_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + } else { + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + } + } + + return ret; +} + +static int sxe2_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + u32 supported; + u32 advertising; + u32 lp_advertising; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + u32 speed = 0; + s32 ret = 0; + struct flm_ethtool_get_link_resp link_cfg; + + (void)ethtool_convert_link_mode_to_legacy_u32(&supported, + ks->link_modes.supported); + (void)ethtool_convert_link_mode_to_legacy_u32(&advertising, + ks->link_modes.advertising); + (void)ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + ks->link_modes.lp_advertising); + + ret = sxe2_phy_type_to_ethtool(netdev, ks); + if (ret) + LOG_ERROR_BDF("failed to get phy type, ret=%d\n", ret); + + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + + ks->base.autoneg = link_cfg.current_an_en.current_an; + ret = sxe2_get_link_requir_cfg(netdev, ks, &speed); + if (ret) + LOG_ERROR_BDF("get link requir cfg failed, ret=%d\n", ret); + + adapter->link_ctxt.current_link_speed = speed; + if ((link_cfg.configed_pause_result.tx_en) && + (link_cfg.configed_pause_result.rx_en)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (link_cfg.configed_pause_result.tx_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else if (link_cfg.configed_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + } + + if (link_cfg.local_an_en.suppert_an) + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + + if (link_cfg.local_an_en.advertis_an) + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + + return ret; +} + +u32 sxe2_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) +{ + u32 adv_link_speed = 0; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full)) + adv_link_speed |= LINK_SPEED_10G; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full)) + adv_link_speed |= LINK_SPEED_10G; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseLR4_ER4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR4_Full)) + adv_link_speed |= LINK_SPEED_100G; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseKR_Full)) + adv_link_speed |= LINK_SPEED_25G; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseCR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseKR2_Full)) + adv_link_speed |= LINK_SPEED_50G; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseSR2_Full)) + adv_link_speed |= LINK_SPEED_50G; + + return adv_link_speed; +} + +static u32 sxe2_get_usr_req_link_speed(u32 adv_link_speed) +{ + u8 speed_cnt = 0; + u32 user_req_link_speed = 0; + + if (adv_link_speed & LINK_SPEED_10G) { + speed_cnt++; + user_req_link_speed = SXE2_ETHTOOL_SPEED_10GB; + } + + if (adv_link_speed & LINK_SPEED_25G) { + speed_cnt++; + user_req_link_speed = SXE2_ETHTOOL_SPEED_25GB; + } + + if (adv_link_speed & LINK_SPEED_50G) { + speed_cnt++; + user_req_link_speed = SXE2_ETHTOOL_SPEED_50GB; + } + + if (adv_link_speed & LINK_SPEED_100G) { + speed_cnt++; + user_req_link_speed = SXE2_ETHTOOL_SPEED_100GB; + } + + if (speed_cnt > 1) + user_req_link_speed = SXE2_ETHTOOL_SPEED_AUTO; + + return user_req_link_speed; +} + +static bool sxe2_check_is_advertise_set(const struct ethtool_link_ksettings *ks, u32 current_link_speed) +{ + if (ks->base.speed == SXE2_PF_DOWN_ETHTOOL_BASE_SPEED) + return true; + + if (ks->base.speed == current_link_speed) + return true; + + return false; +} + +static bool sxe2_check_usr_link_speed_change(struct ethtool_flm_link_info *currect_info, + const struct ethtool_link_ksettings *ks, + u32 *user_link_speed, + u32 current_link_speed, + struct flm_link_info_pasist *pasist_info) +{ + u32 last_pasist_speed = sxe2_speed_dut_switch_cfg(pasist_info->speed); + + if (sxe2_check_is_advertise_set(ks, current_link_speed)) { + if (currect_info->link_status == SXE2_LINK_UP) { + if (*user_link_speed != SXE2_ETHTOOL_SPEED_AUTO && + *user_link_speed != current_link_speed) + return true; + + if (last_pasist_speed != *user_link_speed) + return true; + + } else { + if (ks->base.speed != SXE2_PF_DOWN_ETHTOOL_BASE_SPEED) + *user_link_speed = ks->base.speed; + + return true; + } + } else { + if (ks->base.speed != current_link_speed) { + *user_link_speed = ks->base.speed; + return true; + } + } + + return false; +} + +static bool sxe2_check_speed_param_valid(u32 user_link_speed) +{ + bool ret = false; + + switch (user_link_speed) { + case SXE2_ETHTOOL_SPEED_10GB: + case SXE2_ETHTOOL_SPEED_25GB: + case SXE2_ETHTOOL_SPEED_50GB: + case SXE2_ETHTOOL_SPEED_100GB: + case SXE2_ETHTOOL_SPEED_AUTO: + ret = true; + break; + default: + ret = false; + break; + } + + return ret; +} + +static int sxe2_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct ethtool_link_ksettings copy_ks = *ks; + struct ethtool_link_ksettings safe_ks = {}; + u8 autoneg; + bool autoneg_change = 0; + u32 adv_link_speed = 0; + u32 current_link_speed = 0; + u32 usr_link_speed = 0; + struct ethtool_flm_link_info currect_info; + struct flm_ethtool_get_link_resp link_cfg; + struct flm_link_info_pasist pasist_info; + + autoneg = copy_ks.base.autoneg; + memset(&safe_ks, 0, sizeof(safe_ks)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + ret = sxe2_get_link_ksettings(netdev, &safe_ks); + if (ret) { + LOG_ERROR_BDF("get link ksetting failed to link cfg, ret=%d\n", + ret); + goto l_end; + } + + ret = sxe2_get_cur_link_state(adapter, &currect_info); + if (ret) { + LOG_ERROR_BDF("failed to link currect info, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_get_link_configure(adapter, &link_cfg); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_link_get_pasist_info(adapter, &pasist_info); + if (ret) + LOG_ERROR_BDF("failed to get usr pasit info, ret=%d\n", ret); + + if (link_cfg.optical_module.current_connection == SXE2_FW_CONNECT_MDDE_UNKNOW) { + ret = -EIO; + LOG_ERROR_BDF("medial set cmd fail, ret=%d\n", ret); + goto l_end; + } + + if ((link_cfg.current_an_en.current_an == AUTONEG_DISABLE) && + (autoneg == AUTONEG_ENABLE)) { + ret = sxe2_set_link_autoneg_en(adapter, autoneg); + if (ret) { + LOG_ERROR_BDF("failed to autoneg, ret=%d\n", ret); + goto l_end; + } + autoneg_change = 1; + } else if ((link_cfg.current_an_en.current_an == AUTONEG_ENABLE) && + (autoneg == AUTONEG_DISABLE)) { + ret = sxe2_set_link_autoneg_en(adapter, autoneg); + if (ret) { + LOG_ERROR_BDF("failed to autoneg, ret=%d\n", ret); + goto l_end; + } + autoneg_change = 1; + } + + adv_link_speed = sxe2_ksettings_find_adv_link_speed(ks); + usr_link_speed = sxe2_get_usr_req_link_speed(adv_link_speed); + current_link_speed = adapter->link_ctxt.current_link_speed; + if (!autoneg_change) { + if (!sxe2_check_usr_link_speed_change(&currect_info, + ks, &usr_link_speed, current_link_speed, &pasist_info)) { + LOG_INFO_BDF("linkup and config same not set!!\n"); + goto l_end; + } + } else { + if (ks->base.speed != current_link_speed && + ks->base.speed != SXE2_PF_DOWN_ETHTOOL_BASE_SPEED) + usr_link_speed = ks->base.speed; + + } + + if (!sxe2_check_speed_param_valid(usr_link_speed)) + return -EOPNOTSUPP; + + if (currect_info.link_status == SXE2_LINK_UP) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + LOG_INFO_BDF("set ksetting carrier off link down.\n"); + } + + ret = sxe2_link_set_configure(adapter, usr_link_speed); + if (ret) + LOG_ERROR_BDF("Set phy link config failed\n"); + + if (ret != 0) { + if (autoneg_change) + ret = 0; + + if (currect_info.link_status == SXE2_LINK_UP && + (usr_link_speed == SXE2_ETHTOOL_SPEED_AUTO || + usr_link_speed == current_link_speed)) { + ret = 0; + } + + if (currect_info.link_status == SXE2_LINK_UP) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } + } + + adapter->link_ctxt.current_link_speed = adv_link_speed; +l_end: + + return ret; +} + +STATIC int sxe2_set_channels_fnav_check(struct sxe2_adapter *adapter, + u32 new_cnt) +{ + int ret = 0; + struct sxe2_fnav_filter *filter = NULL; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry(filter, &vsi->fnav.filter_list, + l_node) { + if (filter->act_type == SXE2_FNAV_ACT_QINDEX && + filter->q_index >= new_cnt) { + ret = -EINVAL; + LOG_ERROR_BDF("change channel fnav check failed, loc=%u, q_id=%u.\n", + filter->filter_loc, filter->q_index); + break; + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + return ret; +} + +static s32 sxe2_channels_check(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + s32 ret = 0; + u32 new_rx = 0, new_tx = 0; + + new_rx = ch->combined_count + ch->rx_count; + new_tx = ch->combined_count + ch->tx_count; + + if (new_rx < vsi->tc.tc_cnt) { + LOG_NETDEV_ERR("cannot set less Rx channels,\t" + "than Traffic Classes you have (%u)\n", + vsi->tc.tc_cnt); + ret = -EINVAL; + goto l_end; + } + + if (new_tx < vsi->tc.tc_cnt) { + LOG_NETDEV_ERR("cannot set less Tx channels, \t" + "than Traffic Classes you have (%u)\n", + vsi->tc.tc_cnt); + ret = -EINVAL; + goto l_end; + } + + if (new_rx > sxe2_max_rxq_get(adapter)) { + LOG_NETDEV_ERR("ethtool set channels failed, maximum allowed rx channels is %u\n", + sxe2_max_rxq_get(adapter)); + ret = -EINVAL; + goto l_end; + } + + if (new_tx > sxe2_max_txq_get(adapter)) { + LOG_NETDEV_ERR("ethtool set channels failed, maximum allowed tx channels is %u\n", + sxe2_max_txq_get(adapter)); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_set_channels_fnav_check(adapter, new_rx); + if (ret) + goto l_end; + +l_end: + return ret; +} + +s32 sxe2_vsi_qs_reassign(struct net_device *netdev, struct ethtool_channels *ch) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + u32 cur_combined; + u32 new_rx = 0, new_tx = 0; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + LOG_ERROR_BDF("vsi disabled, try later.\n"); + goto l_unlock; + } + + if (sxe2_is_safe_mode(adapter)) { + LOG_NETDEV_ERR("ethtool set channels in safe mode is not supported\n"); + ret = -EOPNOTSUPP; + goto l_unlock; + } + + if (test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) { + LOG_NETDEV_ERR("cannot set channels when L2 forwarding enabled\n"); + ret = -EOPNOTSUPP; + goto l_unlock; + } + + cur_combined = sxe2_combined_cnt_get(vsi); + if (ch->rx_count == vsi->rxqs.q_cnt - cur_combined) + ch->rx_count = 0; + if (ch->tx_count == vsi->txqs.q_cnt - cur_combined) + ch->tx_count = 0; + if (ch->combined_count == cur_combined) + ch->combined_count = 0; + + if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) { + LOG_NETDEV_ERR("ethtool set channels failed, \t" + "please specify at least 1 rx and 1 tx channel\n"); + ret = -EINVAL; + goto l_unlock; + } + new_rx = ch->combined_count + ch->rx_count; + new_tx = ch->combined_count + ch->tx_count; + + if (new_rx > SXE2_VSI_TXRX_Q_MAX_CNT || + new_tx > SXE2_VSI_TXRX_Q_MAX_CNT) { + ret = -EINVAL; + LOG_NETDEV_ERR("ethtool set channels failed, \t" + "a maximum of 256 queues can be allocated\n"); + goto l_unlock; + } + + ret = sxe2_channels_check(netdev, ch); + if (ret) + goto l_unlock; + + if (netdev->features & NETIF_F_NTUPLE) + sxe2_arfs_disable(adapter); + + if (!new_rx && !new_tx) + goto l_unlock; + + if (new_tx) + vsi->txqs.req_q_cnt = (u16)new_tx; + if (new_rx) + vsi->rxqs.req_q_cnt = (u16)new_rx; + + if (!netif_running(vsi->netdev)) { + ret = sxe2_vsi_rebuild(vsi, false); + if (ret) + goto rebuild_err; + LOG_DEV_DEBUG("link is down, \t" + "queue count change happens when link is brought up\n"); + goto update_filter; + } + + (void)sxe2_vsi_close(vsi); + ret = sxe2_vsi_rebuild(vsi, false); + if (ret) + goto rebuild_err; + +update_filter: + if (netdev->features & NETIF_F_NTUPLE) { + if (sxe2_arfs_enable(adapter)) + LOG_NETDEV_WARN("arfs enable failed when set channel!"); + + } + + if (!netif_is_rxfh_configured(netdev)) { + ret = sxe2_rss_lut_reset(vsi, vsi->rxqs.req_q_cnt); + if (ret) + goto rebuild_err; + } + goto l_unlock; + +rebuild_err: + LOG_DEV_ERR("error during VSI rebuild: %d. Unload and reload the driver.\n", + ret); +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2_channels_set(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + + ret = sxe2_vsi_qs_reassign(netdev, ch); + if (ret) + return ret; + + ret = sxe2_netdev_q_cnt_set(netdev, vsi->txqs.q_cnt, vsi->rxqs.q_cnt, + true); + if (ret) { + LOG_ERROR_BDF("new_tx:%u new_rx:%u set netdev queue cnt failed.\n", + vsi->txqs.q_cnt, vsi->rxqs.q_cnt); + goto l_rollback; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + LOG_ERROR_BDF("vsi disabled, try later.\n"); + goto l_out; + } + + if (netif_running(vsi->netdev) && sxe2_vsi_open(vsi)) { + ret = -EIO; + goto l_out; + } + +l_out: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; + +l_rollback: + mutex_lock(&adapter->vsi_ctxt.lock); + (void)sxe2_vsi_disable_unlock(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || + flash->region > SXE2_INVAL_U16) + return sxe2_flash_package_from_file(dev, flash->data, + flash->region); + else + return -EPERM; +} + +#ifdef SUPPORT_ETHTOOL_GET_RMON_STATS +static void sxe2_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_pf_hw_stats *pf_hw_stats = &adapter->pf_stats.pf_hw_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + goto l_unlock; + + sxe2_hw_pf_stats_update(adapter); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + rmon_stats->undersize_pkts = pf_hw_stats->rx_undersize_good; + rmon_stats->fragments = pf_hw_stats->rx_runt_error; + rmon_stats->jabbers = pf_hw_stats->rx_jabbers; + rmon_stats->oversize_pkts = pf_hw_stats->rx_oversize_good; + + rmon_stats->hist[0] = pf_hw_stats->rx_size_64; + rmon_stats->hist[1] = pf_hw_stats->rx_size_65_127; + rmon_stats->hist[2] = pf_hw_stats->rx_size_128_255; + rmon_stats->hist[3] = pf_hw_stats->rx_size_256_511; + rmon_stats->hist[4] = pf_hw_stats->rx_size_512_1023; + rmon_stats->hist[5] = pf_hw_stats->rx_size_1024_1522; + rmon_stats->hist[6] = pf_hw_stats->rx_size_1523_max; + + *ranges = sxe2_rmon_ranges; +} +#endif + +static const struct ethtool_ops sxe2_ethtool_ops = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE | + ETHTOOL_COALESCE_RX_USECS_HIGH, +#endif + .get_drvinfo = sxe2_get_drvinfo, + .get_regs_len = sxe2_get_regs_len, + .get_regs = sxe2_get_regs, + .get_strings = sxe2_get_strings, + .get_ethtool_stats = sxe2_get_ethtool_stats, + .get_priv_flags = sxe2_ethtool_priv_flags_get, + .set_priv_flags = sxe2_ethtool_priv_flags_set, + .get_sset_count = sxe2_get_sset_count, + .get_ringparam = sxe2_get_ringparam, + .set_ringparam = sxe2_set_ringparam, + .get_coalesce = sxe2_get_coalesce, + .set_coalesce = sxe2_set_coalesce, + .get_per_queue_coalesce = sxe2_get_per_queue_coalesce, + .set_per_queue_coalesce = sxe2_set_per_queue_coalesce, + .get_channels = sxe2_channels_get, + .set_channels = sxe2_channels_set, + + .get_module_info = sxe2_module_info_get, + .get_module_eeprom = sxe2_module_eeprom_get, +#ifdef SUPPORTED_ETHTOOL_EEPROM_BY_PAGE + .get_module_eeprom_by_page = sxe2_module_eeprom_get_by_page, +#endif + +#ifndef SXE2_HARDWARE_SIM + .self_test = sxe2_ethtool_selftest, +#endif + .get_pauseparam = sxe2_get_pauseparam, + .set_pauseparam = sxe2_set_pauseparam, + .get_rxnfc = sxe2_get_rxnfc, + .set_rxnfc = sxe2_set_rxnfc, + .get_rxfh_key_size = sxe2_get_rxft_key_size, + .get_rxfh_indir_size = sxe2_get_rxft_indir_size, + .get_rxfh = sxe2_get_rxfh, + .set_rxfh = sxe2_set_rxfh, + .set_phys_id = sxe2_set_phys_id, + .get_ts_info = sxe2_get_ts_info, + .flash_device = sxe2_flash_device, +#ifdef SUPPORT_ETHTOOL_GET_RMON_STATS + .get_rmon_stats = sxe2_get_rmon_stats, +#endif + + .get_msglevel = sxe2_get_msglevel, + .set_msglevel = sxe2_set_msglevel, + .get_link = ethtool_op_get_link, + + .get_fecparam = sxe2_get_fec, + .set_fecparam = sxe2_set_fec, + .get_link_ksettings = sxe2_get_link_ksettings, + .set_link_ksettings = sxe2_set_link_ksettings, +}; + +static const struct ethtool_ops sxe2_ethtool_ops_for_safe_mode = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE | + ETHTOOL_COALESCE_RX_USECS_HIGH, +#endif + .get_drvinfo = sxe2_get_drvinfo, + .get_regs_len = sxe2_get_regs_len, + .get_regs = sxe2_get_regs, + .get_msglevel = sxe2_get_msglevel, + .set_msglevel = sxe2_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sxe2_get_strings, + .get_ethtool_stats = sxe2_get_ethtool_stats, + .get_sset_count = sxe2_get_sset_count, + .get_ringparam = sxe2_get_ringparam, + .set_ringparam = sxe2_set_ringparam, + .get_channels = sxe2_channels_get, +}; + +void sxe2_ethtool_ops_set(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxe2_ethtool_ops; +} + +void sxe2_ethtool_ops_set_for_safe_mode(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxe2_ethtool_ops_for_safe_mode; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..a8c1650c50bf8e947ab9e3fc42a2ed3b0448abff --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ethtool.h @@ -0,0 +1,398 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ethtool.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_ETHTOOL_H__ +#define __SXE2_ETHTOOL_H__ +#include + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_netdev.h" +#include "sxe2_common.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_msg.h" + +#define SXE2_PF_DOWN_ETHTOOL_BASE_SPEED (0xffffffff) +#define SXE2_LB_FRAME_SIZE (64) +#define SXE2_Q_STATS_LEN (2) +#define SXE2_STATS(_type, _name, _stats) \ + { \ + .stats_string = _name, \ + .sizeof_stats = sizeof_field(_type, _stats), \ + .stats_offset = offsetof(_type, _stats) \ + } +#define SXE2_VSI_SW_STATS(_name, _stats) \ + SXE2_STATS(struct sxe2_vsi_sw_stats, _name, _stats) +#define SXE2_PF_HW_STATS(_name, _stats) \ + SXE2_STATS(struct sxe2_pf_hw_stats, _name, _stats) +#define SXE2_VSI_HW_STATS(_name, _stats) \ + SXE2_STATS(struct sxe2_vsi_hw_stats, _name, _stats) +#define SXE2_PF_SW_STATS(_name, _stats) \ + SXE2_STATS(struct sxe2_pf_sw_stats, _name, _stats) + +struct sxe2_stats { + char stats_string[ETH_GSTRING_LEN]; + u32 sizeof_stats; + u32 stats_offset; +}; + +struct sxe2_eth_link { + u32 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 link_status; +}; +enum sxe2_flm_autoneg { FLM_DISAN = 0, FLM_ENAN = 1 }; + +enum sxe2_media_get_type { + SXE2_MEDIA_GET_NONE = 0, + SXE2_MEDIA_GET_UNKNOWN, + SXE2_MEDIA_GET_FIBER, + SXE2_MEDIA_GET_BASET, + SXE2_MEDIA_GET_BACKPLANE, + SXE2_MEDIA_GET_DA, + SXE2_MEDIA_GET_AUI, +}; + +enum sxe2_fw_link_speed_type { + SXE2_SET_LINK_SPEED_CFG_10G = 0, + SXE2_SET_LINK_SPEED_CFG_25G, + SXE2_SET_LINK_SPEED_CFG_50G, + SXE2_SET_LINK_SPEED_CFG_100G, + SXE2_SET_LINK_SPEED_CFG_AUTO = 15, + SXE2_SET_LINK_SPEED_CFG_MAX, +}; + +enum sxe2_priv_flag_index { + SXE2_ETHTOOL_PRIV_FLAG_LEGACY_RX, + SXE2_ETHTOOL_PRIV_FLAG_MDD_AUTO_RESET_VF, + SXE2_ETHTOOL_PRIV_FLAG_DCBX_AGENT, + SXE2_ETHTOOL_PRIV_FLAG_FNAV_TUNNEL, + SXE2_ETHTOOL_PRIV_FLAG_LINK_DOWN_ON_CLOSE, +}; + +struct sxe2_priv_flag { + char name[ETH_GSTRING_LEN]; + enum sxe2_adapter_flags adapter_flag_bitno; + enum sxe2_priv_flag_index priv_flag_bitno; +}; + +static const struct sxe2_priv_flag sxe2_gstrings_priv_flags[] = { + { "legacy-rx", SXE2_FLAG_LEGACY_RX_ENABLE, + SXE2_ETHTOOL_PRIV_FLAG_LEGACY_RX }, + { "mdd-auto-reset-vf", SXE2_FLAG_MDD_AUTO_RESET_VF, + SXE2_ETHTOOL_PRIV_FLAG_MDD_AUTO_RESET_VF }, + { "dcbx-agent", SXE2_FLAG_FW_DCBX_AGENT, + SXE2_ETHTOOL_PRIV_FLAG_DCBX_AGENT }, + { "fnav-tunnel", SXE2_FLAG_FNAV_TUNNEL_ENABLE, + SXE2_ETHTOOL_PRIV_FLAG_FNAV_TUNNEL }, + { "link-down-on-close", SXE2_FLAG_LINK_DOWN_ON_CLOSE, + SXE2_ETHTOOL_PRIV_FLAG_LINK_DOWN_ON_CLOSE }, +}; + +#define SXE2_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(sxe2_gstrings_priv_flags) +static const struct sxe2_stats sxe2_gstrings_vsi_sw_stats[] = { + SXE2_VSI_SW_STATS("rx_packets", rx_packets), + SXE2_VSI_SW_STATS("rx_bytes", rx_bytes), + SXE2_VSI_SW_STATS("tx_packets", tx_packets), + SXE2_VSI_SW_STATS("tx_bytes", tx_bytes), + + SXE2_VSI_SW_STATS("rx_csum_unnecessary", rx_csum_unnecessary), + SXE2_VSI_SW_STATS("rx_csum_none", rx_csum_none), + SXE2_VSI_SW_STATS("rx_csum_complete", rx_csum_complete), + SXE2_VSI_SW_STATS("rx_csum_unnecessary_inner", + rx_csum_unnecessary_inner), + SXE2_VSI_SW_STATS("rx_csum_err", rx_csum_err), + SXE2_VSI_SW_STATS("rx_lro_packets", rx_lro_packets), + SXE2_VSI_SW_STATS("rx_lro_bytes", rx_lro_bytes), + SXE2_VSI_SW_STATS("rx_lro_count", rx_lro_count), + SXE2_VSI_SW_STATS("rx_removed_vlan_packets", rx_vlan_strip), + SXE2_VSI_SW_STATS("rx_pkts_sw_drop", rx_pkts_sw_drop), + SXE2_VSI_SW_STATS("rx_buff_alloc_err", rx_buff_alloc_err), + SXE2_VSI_SW_STATS("rx_pg_alloc_fail", rx_pg_alloc_fail), + SXE2_VSI_SW_STATS("rx_page_alloc", rx_page_alloc), + SXE2_VSI_SW_STATS("rx_non_eop_descs", rx_non_eop_descs), + + SXE2_VSI_SW_STATS("rx_xdp_drop", rx_xdp_drop), + SXE2_VSI_SW_STATS("rx_xdp_redirect", rx_xdp_redirect), + SXE2_VSI_SW_STATS("rx_xdp_redirect_fail", rx_xdp_redirect_fail), + SXE2_VSI_SW_STATS("rx_xdp_pkts", rx_xdp_pkts), + SXE2_VSI_SW_STATS("rx_xdp_bytes", rx_xdp_bytes), + SXE2_VSI_SW_STATS("rx_xdp_pass", rx_xdp_pass), + SXE2_VSI_SW_STATS("rx_xdp_unknown", rx_xdp_unknown), + SXE2_VSI_SW_STATS("rx_xdp_tx_xmit", rx_xdp_tx_xmit), + SXE2_VSI_SW_STATS("rx_xdp_tx_xmit_fail", rx_xdp_tx_xmit_fail), + + SXE2_VSI_SW_STATS("rx_xsk_drop", rx_xsk_drop), + SXE2_VSI_SW_STATS("rx_xsk_redirect", rx_xsk_redirect), + SXE2_VSI_SW_STATS("rx_xsk_redirect_fail", rx_xsk_redirect_fail), + SXE2_VSI_SW_STATS("rx_xsk_packets", rx_xsk_packets), + SXE2_VSI_SW_STATS("rx_xsk_bytes", rx_xsk_bytes), + SXE2_VSI_SW_STATS("rx_xsk_pass", rx_xsk_pass), + SXE2_VSI_SW_STATS("rx_xsk_unknown", rx_xsk_unknown), + SXE2_VSI_SW_STATS("rx_xsk_tx_xmit", rx_xsk_tx_xmit), + SXE2_VSI_SW_STATS("rx_xsk_tx_xmit_fail", rx_xsk_tx_xmit_fail), + SXE2_VSI_SW_STATS("rx_pa_err", rx_pa_err), + + SXE2_VSI_SW_STATS("tx_tso_packets", tx_tso_packets), + SXE2_VSI_SW_STATS("tx_tso_bytes", tx_tso_bytes), + SXE2_VSI_SW_STATS("tx_tso_linearize_chk", tx_tso_linearize_chk), + SXE2_VSI_SW_STATS("tx_added_vlan_packets", tx_vlan_insert), + SXE2_VSI_SW_STATS("tx_csum_none", tx_csum_none), + SXE2_VSI_SW_STATS("tx_csum_partial", tx_csum_partial), + SXE2_VSI_SW_STATS("tx_csum_partial_inner", tx_csum_partial_inner), + SXE2_VSI_SW_STATS("tx_stopped", tx_busy), + SXE2_VSI_SW_STATS("tx_dropped", tx_queue_dropped), + SXE2_VSI_SW_STATS("tx_xmit_more", tx_xmit_more), + SXE2_VSI_SW_STATS("tx_wake", tx_restart), + SXE2_VSI_SW_STATS("tx_linearize", tx_linearize), +}; + +static const struct sxe2_stats sxe2_gstrings_pf_hw_stats[] = { + SXE2_PF_HW_STATS("tx_packets_phy", tx_frame_good), + SXE2_PF_HW_STATS("rx_packets_phy", rx_frame_good), + SXE2_PF_HW_STATS("tx_bytes_phy", tx_bytes_good), + SXE2_PF_HW_STATS("rx_bytes_phy", rx_bytes_good), + + SXE2_PF_HW_STATS("tx_multicast_phy", tx_multicast_good), + SXE2_PF_HW_STATS("rx_multicast_phy", rx_multicast_good), + SXE2_PF_HW_STATS("tx_broadcast_phy", tx_broadcast_good), + SXE2_PF_HW_STATS("rx_broadcast_phy", rx_broadcast_good), + SXE2_PF_HW_STATS("rx_unicast_phy", rx_unicast_good), + + SXE2_PF_HW_STATS("tx_multicast_all_phy", tx_multicast), + SXE2_PF_HW_STATS("tx_broadcast_all_phy", tx_broadcast), + SXE2_PF_HW_STATS("tx_unicast_all_phy", tx_unicast), + + SXE2_PF_HW_STATS("tx_packets_all_phy", tx_frame_good_bad), + SXE2_PF_HW_STATS("rx_packets_all_phy", rx_frame_good_bad), + SXE2_PF_HW_STATS("tx_bytes_all_phy", tx_bytes_good_bad), + SXE2_PF_HW_STATS("rx_bytes_all_phy", rx_byte_good_bad), + + SXE2_PF_HW_STATS("tx_64_bytes_phy", tx_size_64), + SXE2_PF_HW_STATS("tx_65_to_127_bytes_phy", tx_size_65_127), + SXE2_PF_HW_STATS("tx_128_to_255_bytes_phy", tx_size_128_255), + SXE2_PF_HW_STATS("tx_256_to_511_bytes_phy", tx_size_256_511), + SXE2_PF_HW_STATS("tx_512_to_1023_bytes_phy", tx_size_512_1023), + SXE2_PF_HW_STATS("tx_1024_to_1522_bytes_phy", tx_size_1024_1522), + SXE2_PF_HW_STATS("tx_1523_to_max_bytes_phy", tx_size_1523_max), + SXE2_PF_HW_STATS("rx_64_bytes_phy", rx_size_64), + + SXE2_PF_HW_STATS("rx_65_to_127_bytes_phy", rx_size_65_127), + SXE2_PF_HW_STATS("rx_128_to_255_bytes_phy", rx_size_128_255), + SXE2_PF_HW_STATS("rx_256_to_511_bytes_phy", rx_size_256_511), + SXE2_PF_HW_STATS("rx_512_to_1023_bytes_phy", rx_size_512_1023), + SXE2_PF_HW_STATS("rx_1024_to_1522_bytes_phy", rx_size_1024_1522), + SXE2_PF_HW_STATS("rx_1523_to_max_bytes_phy", rx_size_1523_max), + + SXE2_PF_HW_STATS("tx_vlan_packets_good_phy", tx_vlan_packet_good), + SXE2_PF_HW_STATS("rx_vlan_packets_phy", rx_vlan_packets), + + SXE2_PF_HW_STATS("rx_pcs_symbol_err_phy", rx_pcs_symbol_err_phy), + SXE2_PF_HW_STATS("rx_corrected_bits_phy", rx_corrected_bits_phy), + SXE2_PF_HW_STATS("rx_undersize_pkts_phy", rx_undersize_good), + SXE2_PF_HW_STATS("rx_fragments_phy", rx_runt_error), + SXE2_PF_HW_STATS("rx_crc_errors_phy", rx_crc_errors), + SXE2_PF_HW_STATS("rx_jabbers_phy", rx_jabbers), + SXE2_PF_HW_STATS("rx_oversize_good_phy", rx_oversize_good), + SXE2_PF_HW_STATS("rx_illegal_bytes_phy", rx_illegal_bytes), + SXE2_PF_HW_STATS("rx_in_range_len_errs_phy", rx_len_errors), + SXE2_PF_HW_STATS("rx_out_of_range_len_phy", rx_out_of_range_errors), + SXE2_PF_HW_STATS("rx_oversize_pkts_phy", rx_oversize_pkts_phy), + SXE2_PF_HW_STATS("rx_symbol_err_phy", rx_symbol_err), + SXE2_PF_HW_STATS("rx_out_of_buffer", rx_out_of_buffer), + SXE2_PF_HW_STATS("rx_discards_phy", rx_discards_phy), + SXE2_PF_HW_STATS("tx_dropped_link_down_phy", tx_dropped_link_down), + SXE2_PF_HW_STATS("tx_underflow_error_phy", tx_underflow_error), + SXE2_PF_HW_STATS("tx_pause_ctrl_phy", tx_pause_frame), + SXE2_PF_HW_STATS("rx_pause_ctrl_phy", rx_pause_frame), + SXE2_PF_HW_STATS("rx_err_lane_0_phy", rx_err_lane_0_phy), + SXE2_PF_HW_STATS("rx_err_lane_1_phy", rx_err_lane_1_phy), + SXE2_PF_HW_STATS("rx_err_lane_2_phy", rx_err_lane_2_phy), + SXE2_PF_HW_STATS("rx_err_lane_3_phy", rx_err_lane_3_phy), + SXE2_PF_HW_STATS("fnav_match", fnav_match), +}; + +static const struct sxe2_stats sxe2_gstrings_pf_sw_stats[] = { + SXE2_PF_SW_STATS("fnav_prgm_err", fnav_prgm_err), +}; + +static const struct sxe2_stats sxe2_gstrings_vsi_hw_stats[] = { + SXE2_VSI_HW_STATS("rx_vport_unicast_packets", rx_vsi_unicast_packets), + SXE2_VSI_HW_STATS("rx_vport_bytes", rx_vsi_bytes), + SXE2_VSI_HW_STATS("tx_vport_unicast_packets", tx_vsi_unicast_packets), + SXE2_VSI_HW_STATS("tx_vport_bytes", tx_vsi_bytes), + SXE2_VSI_HW_STATS("rx_vport_multicast_packets", + rx_vsi_multicast_packets), + SXE2_VSI_HW_STATS("tx_vport_multicast_packets", + tx_vsi_multicast_packets), + SXE2_VSI_HW_STATS("rx_vport_broadcast_packets", + rx_vsi_broadcast_packets), + SXE2_VSI_HW_STATS("tx_vport_broadcast_packets", + tx_vsi_broadcast_packets), +}; + +#ifdef SUPPORT_ETHTOOL_GET_RMON_STATS +static const struct ethtool_rmon_hist_range sxe2_rmon_ranges[] = { + { 0, 64 }, { 65, 127 }, { 128, 255 }, { 256, 511 }, + { 512, 1023 }, { 1024, 1522 }, { 1523, 9728 }, {} +}; +#endif + +static inline u32 sxe2_q_stats_len(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + + return (vsi->txqs.q_cnt + vsi->rxqs.q_cnt) * SXE2_Q_STATS_LEN; +} + +static inline u32 sxe2_txq_stats_len(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + + return ((vsi->txqs.q_cnt) * + (sizeof(struct sxe2_txq_stats) / sizeof(u64))); +} + +static inline u32 sxe2_rxq_stats_len(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + + return ((vsi->rxqs.q_cnt) * + (sizeof(struct sxe2_rxq_stats) / sizeof(u64))); +} + +#define SXE2_PFC_STAT_FIELD(stat, dir) \ + (sizeof_field(struct sxe2_dcb_stats, \ + curr_pause_stats.prio_x##stat##_##dir)) +#define SXE2_PFC_STATS_LEN \ + ((SXE2_PFC_STAT_FIELD(off, rx) + SXE2_PFC_STAT_FIELD(on, rx) + \ + SXE2_PFC_STAT_FIELD(off, tx) + SXE2_PFC_STAT_FIELD(on, tx)) / \ + sizeof(__le64)) + +#define SXE2_VSI_SW_STATS_LEN ARRAY_SIZE(sxe2_gstrings_vsi_sw_stats) +#define SXE2_PF_HW_STATS_LEN ARRAY_SIZE(sxe2_gstrings_pf_hw_stats) +#define SXE2_VSI_HW_STATS_LEN ARRAY_SIZE(sxe2_gstrings_vsi_hw_stats) +#define SXE2_PF_SW_STATS_LEN ARRAY_SIZE(sxe2_gstrings_pf_sw_stats) + +#define SXE2_IPSEC_STATS_LEN \ + ((sizeof_field(struct sxe2_queue_ipsec_stats, tx_error_invalid_sp) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, \ + tx_error_invalid_state) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, tx_offload_success) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, rx_error_invalid_sp) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, \ + rx_error_invalid_state) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, rx_error_decrypt_fail) + \ + sizeof_field(struct sxe2_queue_ipsec_stats, rx_offload_success)) / \ + sizeof(__le64)) + +#define SXE2_ALL_STATS_LEN(n) \ + ((SXE2_PF_HW_STATS_LEN + SXE2_VSI_HW_STATS_LEN + IEEE_8021QAZ_MAX_TCS + \ + SXE2_VSI_SW_STATS_LEN + SXE2_PFC_STATS_LEN + SXE2_IPSEC_STATS_LEN + \ + SXE2_PF_SW_STATS_LEN + sxe2_q_stats_len(n)) + \ + sxe2_rxq_stats_len(n) + sxe2_txq_stats_len(n)) + +#define sxe2_for_each_prioirty(i) \ + for ((i) = 0; (i) < IEEE_8021Q_MAX_PRIORITIES; (i)++) + +enum sxe2_ethtool_test_id { + SXE2_ETH_TEST_REG = 0, + SXE2_ETH_TEST_INTR, + SXE2_ETH_TEST_LOOP, + SXE2_ETH_TEST_LINK, +}; + +static const u32 sxe2_regs_dump_list[] = { SXE2_PF_INT_OICR_ENABLE, + SXE2_PF_INT_TQCTL(0), + SXE2_PF_INT_RQCTL(0) }; + +void sxe2_hw_pf_stats_update(struct sxe2_adapter *adapter); + +void sxe2_hw_vsi_stats_update(struct sxe2_vsi *vsi); + +void sxe2_stats_update(struct sxe2_adapter *adapter); +void sxe2_repr_vf_vsis_stats_acculate_update(struct sxe2_adapter *adapter); + +void sxe2_ethtool_ops_set(struct net_device *netdev); + +void sxe2_ethtool_ops_set_for_safe_mode(struct net_device *netdev); + +void sxe2_ethtool_selftest(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data); +int sxe2_ethtool_selftest_count(struct net_device *netdev); +void sxe2_ethtool_selftest_strings(struct net_device *netdev, u8 *data); + +s32 sxe2_vsi_qs_reassign(struct net_device *netdev, + struct ethtool_channels *ch); + +s32 sxe2_fwc_sff_eeprom_get(struct sxe2_adapter *adapter, bool is_qsfp, + u16 bus_addr, u16 page, u16 offset, u16 data_len, + struct sxe2_sfp_resp *sff_value); + +void __sxe2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo, struct sxe2_adapter *adapter); + +void __sxe2_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void __sxe2_repr_get_strings(struct net_device *netdev, u32 stringset, + u8 *data); + +void __sxe2_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data, struct sxe2_vsi *vsi); + +void __sxe2_repr_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data, struct sxe2_vsi *vsi); + +void sxe2_stop_lfc(struct sxe2_adapter *adapter); + +void sxe2_sw_vsi_stats_update(struct sxe2_vsi *vsi); + +s32 sxe2_fwc_get_pf_stats(struct sxe2_adapter *adapter); + +s32 sxe2_link_set_fc_pasist(struct sxe2_adapter *adapter, u8 rx_en, u8 tx_en); + +s32 sxe2_link_set_fec_pasist(struct sxe2_adapter *adapter, u8 fec); + +u32 sxe2_speed_switch_set_configure(u32 speed); + +s32 sxe2_get_support_speed_ability(struct sxe2_adapter *adapter, + struct support_speed_ability_mode *speed_ability); + +s32 sxe2_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks); + +s32 sxe2_get_link_configure(struct sxe2_adapter *adapter, + struct flm_ethtool_get_link_resp *link_cfg); + +s32 sxe2_set_link_autoneg_en(struct sxe2_adapter *adapter, u32 an_en); + +s32 sxe2_link_set_fc_configure(struct sxe2_adapter *adapter, u8 tx_fc, + u8 rx_fc); + +u32 sxe2_speed_dut_switch_cfg(s32 speed); + +s32 sxe2_get_cur_link_state(struct sxe2_adapter *adapter, + struct ethtool_flm_link_info *currect_info); + +s32 sxe2_link_get_pasist_info(struct sxe2_adapter *adapter, struct flm_link_info_pasist *cfg); + +u32 sxe2_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks); + +#ifdef SXE2_SUPPORT_ACL +s32 sxe2_acl_add_rule_ethtool(struct sxe2_vsi *vsi, struct ethtool_rx_flow_spec *fsp); +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.c new file mode 100644 index 0000000000000000000000000000000000000000..f4711b9afc8a171d6242a12cb04b4a0e2043efb0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.c @@ -0,0 +1,5655 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_flow.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2_cmd.h" +#include "sxe2_mbx_public.h" +#include "sxe2_flow.h" +#include "sxe2_acl.h" + +#define SXE2_FWC_OG_PKG_BUF_MAX (4096) + +#define SXE2_FLOW_FIND_FLOW_COND_VSI (0x01) +#define SXE2_FLOW_FIND_FLOW_COND_FLD (0x02) +#define SXE2_FLOW_MAX_RECORD_ATTR_NUM (1024) +#define SXE2_FLOW_FV_SIZE (2) + +#define SXE2_SET_USED(x) ((void)(x)) + +static s32 sxe2_flow_vsi_move_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 vsig_idx); + +static s32 sxe2_flow_reless_tcam(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 tcam_idx); + +static s32 sxe2_flow_op_vsig_add_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsig_idx, + bool tail, struct list_head *op_list); +static s32 +sxe2_flow_op_adjust_vsig_tcams_priority(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx, struct list_head *op_list); + +static s32 sxe2_flow_op_creat_vsig_with_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsi_sw_idx, + struct list_head *op_list); + +static void sxe2_rss_delete_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + struct sxe2_flow_info_node *flow); + +STATIC u8 sxe2_blk_fv_cnt_get(enum sxe2_block_id blk) +{ + switch (blk) { + case SXE2_HW_BLOCK_ID_FNAV: + return SXE2_FNAV_FV_CNT; + case SXE2_HW_BLOCK_ID_RSS: + return SXE2_RSS_FV_CNT; + case SXE2_HW_BLOCK_ID_ACL: + return SXE2_ACL_FV_CNT; + default: + return 0; + }; +} + +STATIC u8 sxe2_blk_def_mask_fv_cnt_get(enum sxe2_block_id blk) +{ + switch (blk) { + case SXE2_HW_BLOCK_ID_FNAV: + return SXE2_FNAV_DEFAULT_MASK_CNT; + case SXE2_HW_BLOCK_ID_RSS: + return 0; + default: + return 0; + }; +} + +STATIC u8 sxe2_blk_prof_cnt_get(enum sxe2_block_id blk) +{ + switch (blk) { + case SXE2_HW_BLOCK_ID_FNAV: + return SXE2_FNAV_PROF_CNT; + case SXE2_HW_BLOCK_ID_RSS: + return SXE2_RSS_PROF_CNT; + case SXE2_HW_BLOCK_ID_ACL: + return SXE2_ACL_PROF_CNT; + default: + return 0; + }; +} + +STATIC void *sxe2_section_buf_alloc(struct sxe2_fwc_prof_buf *buf, + enum sxe2_class_id type, u16 size) +{ + u16 data_end; + u16 entry_cnt; + void *p; + + data_end = le16_to_cpu(buf->data_end); + entry_cnt = le16_to_cpu(buf->entry_cnt); + + if (data_end + size > SXE2_OG_BUF_SIZE) + return NULL; + + p = (u8 *)buf + data_end; + + buf->sect[entry_cnt].type = type; + buf->sect[entry_cnt].offset = cpu_to_le16(data_end); + buf->sect[entry_cnt].size = cpu_to_le16(size); + + entry_cnt++; + data_end += size; + buf->data_end = cpu_to_le16(data_end); + buf->entry_cnt = cpu_to_le16(entry_cnt); + + return p; +} + +STATIC s32 sxe2_add_xlt2_entry(struct sxe2_fwc_prof_buf *buf, enum sxe2_block_id blk, + struct list_head *chgs) +{ + s32 ret = 0; + struct sxe2_og_chg *tmp; + struct sxe2_fwc_xlt2_entry *entry; + + SXE2_SET_USED(blk); + + list_for_each_entry(tmp, chgs, l_entry) { + if (tmp->type == SXE2_OG_CHG_TYPE_XLT2) { + entry = (struct sxe2_fwc_xlt2_entry *) + sxe2_section_buf_alloc(buf, + SXE2_XLT2_CLASS_ID, + sizeof(struct sxe2_fwc_xlt2_entry)); + if (!entry) + return -ENOSPC; + entry->vsi_hw_idx = cpu_to_le16(tmp->info.xlt2.vsi_hw_idx); + entry->vsig = cpu_to_le16(tmp->info.xlt2.vsig); + LOG_DEBUG("add vsi[hw:%u] to vsig[%u]\n", + tmp->info.xlt2.vsi_hw_idx, tmp->info.xlt2.vsig); + } + } + + return ret; +} + +STATIC s32 sxe2_add_tcam_entry(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_fwc_prof_buf *buf, enum sxe2_block_id blk, + struct list_head *chgs) +{ + s32 ret = 0; + s32 i; + struct sxe2_og_chg *tmp; + struct sxe2_fwc_tcam_entry *entry; + struct sxe2_prof_tcam_entry *tcam_entry; + + SXE2_SET_USED(blk); + + list_for_each_entry(tmp, chgs, l_entry) { + if (tmp->type == SXE2_OG_CHG_TYPE_TCAM) { + entry = (struct sxe2_fwc_tcam_entry *)sxe2_section_buf_alloc( + buf, SXE2_TCAM_CLASS_ID, + sizeof(struct sxe2_fwc_tcam_entry)); + if (!entry) + return -ENOSPC; + entry->addr = cpu_to_le16(tmp->info.tcam.tcam_idx); + entry->prof_id = tmp->info.tcam.prof_id; + tcam_entry = &ppp_ctxt->tcam_entry[tmp->info.tcam.tcam_idx]; + memcpy(entry->key, tcam_entry->key, SXE2_TCAM_KEY_LEN); + LOG_DEBUG("add tcam[%u]\n", tmp->info.tcam.tcam_idx); + for (i = 0; i < SXE2_TCAM_KEY_LEN; i++) + LOG_DEBUG("\tkey[%d]: %u\n", i, tcam_entry->key[i]); + LOG_DEBUG("\tprofile:%u\n", tmp->info.tcam.prof_id); + } + } + + return ret; +} + +STATIC s32 sxe2_add_es_entry(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_fwc_prof_buf *buf, enum sxe2_block_id blk, + struct list_head *chgs) +{ + s32 ret = 0; + u8 i = 0; + struct sxe2_og_chg *tmp; + struct sxe2_fwc_es_entry *entry; + struct sxe2_flow_hw_prof *hw_prof; + + list_for_each_entry(tmp, chgs, l_entry) { + if (tmp->type == SXE2_OG_CHG_TYPE_ES) { + entry = (struct sxe2_fwc_es_entry *)sxe2_section_buf_alloc( + buf, SXE2_EXTRACTOR_CLASS_ID, + sizeof(struct sxe2_fwc_es_entry)); + if (!entry) + return -ENOSPC; + entry->prof_id = tmp->info.es.prof_id; + entry->cnt = sxe2_blk_fv_cnt_get(blk); + if (!entry->cnt) + return -EINVAL; + LOG_DEBUG("update profile[%u]\n", tmp->info.es.prof_id); + hw_prof = &ppp_ctxt->hw_prof[tmp->info.es.prof_id]; + for (i = 0; i < entry->cnt; i++) { + entry->fv[i].prot_id = hw_prof->fv[i].prot_id; + entry->fv[i].off = cpu_to_le16(hw_prof->fv[i].off); + LOG_DEBUG("\tfv[%u], protocol: %u, off: %d\n", i, + hw_prof->fv[i].prot_id, + hw_prof->fv[i].off); + } + } + } + + return ret; +} + +s32 sxe2_fwc_update_profile(struct sxe2_ppp_common_ctxt *ppp_ctxt, + enum sxe2_block_id blk, struct list_head *chgs) +{ + s32 ret = 0; + u16 data_end; + u16 xlt2_cnt = 0; + u16 tcam_cnt = 0; + u16 es_cnt = 0; + u16 entry_cnt = 0; + struct sxe2_og_chg *tmp; + struct sxe2_fwc_prof_pkg *pkg = NULL; + u16 buf_size = 0; + struct sxe2_fwc_prof_buf *buf; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + buf_size += sizeof(struct sxe2_fwc_prof_buf); + list_for_each_entry(tmp, chgs, l_entry) { + switch (tmp->type) { + case SXE2_OG_CHG_TYPE_XLT2: + buf_size += sizeof(struct sxe2_fwc_xlt2_entry); + xlt2_cnt++; + entry_cnt++; + break; + case SXE2_OG_CHG_TYPE_TCAM: + buf_size += sizeof(struct sxe2_fwc_tcam_entry); + tcam_cnt++; + entry_cnt++; + break; + case SXE2_OG_CHG_TYPE_ES: + buf_size += sizeof(struct sxe2_fwc_es_entry); + es_cnt++; + entry_cnt++; + break; + default: + break; + } + } + buf_size += (u16)flex_array_size(buf, sect, entry_cnt); + + if (entry_cnt == 0 || buf_size > SXE2_OG_BUF_SIZE) { + LOG_DEBUG_BDF("have no valid entry cnt(%u) or buf_size(%u)!\n", + entry_cnt, buf_size); + goto l_out; + } + + pkg = devm_kzalloc(dev, sizeof(struct sxe2_fwc_prof_pkg) + buf_size, + GFP_KERNEL); + if (!pkg) { + LOG_ERROR_BDF("no memory!\n"); + ret = -ENOMEM; + goto l_out; + } + + pkg->blk = blk; + memset(pkg->buf, 0, buf_size); + buf = (struct sxe2_fwc_prof_buf *)pkg->buf; + + data_end = offsetof(struct sxe2_fwc_prof_buf, sect); + data_end += (u16)flex_array_size(buf, sect, entry_cnt); + buf->data_end = cpu_to_le16(data_end); + + if (xlt2_cnt) { + ret = sxe2_add_xlt2_entry(buf, blk, chgs); + if (ret != 0) { + LOG_ERROR_BDF("add xlt2 entry failed, ret: %d\n", ret); + goto l_out; + } + } + + if (tcam_cnt) { + ret = sxe2_add_tcam_entry(ppp_ctxt, buf, blk, chgs); + if (ret != 0) { + LOG_ERROR_BDF("add tcam entry failed, ret: %d\n", ret); + goto l_out; + } + } + + if (es_cnt) { + ret = sxe2_add_es_entry(ppp_ctxt, buf, blk, chgs); + if (ret != 0) { + LOG_ERROR_BDF("add es entry failed, ret: %d\n", ret); + goto l_out; + } + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_OG_CFG_UPDATE, pkg, + sizeof(struct sxe2_fwc_prof_pkg) + buf_size, NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("og config cmd fail, ret=%d\n", ret); + ret = -EIO; + } + +l_out: + if (pkg) + devm_kfree(dev, pkg); + return ret; +} + +STATIC s32 sxe2_fwc_process_tcam_batch(struct sxe2_adapter *adapter, + struct sxe2_fwc_tcam_idx_batch *tcam_batch, + u32 size) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_OG_TCAM_ENTRY_BATCH, tcam_batch, + size, tcam_batch, size); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("tcam entry batch cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_fwc_alloc_prof_id(struct sxe2_adapter *adapter, + enum sxe2_block_id blk, u16 *prof_id) +{ + s32 ret = 0; + struct sxe2_fwc_prof_id entry; + struct sxe2_cmd_params cmd = {0}; + + entry.blk = blk; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_OG_PROF_ID_ALLOC, &entry, + sizeof(struct sxe2_fwc_prof_id), &entry, + sizeof(struct sxe2_fwc_prof_id)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("profile id alloc cmd fail, ret=%d\n", ret); + ret = -EIO; + goto l_out; + } + + *prof_id = le16_to_cpu(entry.prof_id); + +l_out: + return ret; +} + +STATIC s32 sxe2_fwc_free_prof_id(struct sxe2_adapter *adapter, + enum sxe2_block_id blk, u16 prof_id) +{ + s32 ret = 0; + struct sxe2_fwc_prof_id entry; + struct sxe2_cmd_params cmd = {0}; + + entry.blk = blk; + entry.prof_id = cpu_to_le16(prof_id); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_OG_PROF_ID_FREE, &entry, + sizeof(struct sxe2_fwc_prof_id), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("profile id free cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_fwc_update_mask_sel(struct sxe2_adapter *adapter, + enum sxe2_block_id blk, u16 prof_id, + u32 mask_sel) +{ + s32 ret = 0; + struct sxe2_fwc_mask_sel entry; + struct sxe2_cmd_params cmd = {0}; + + entry.blk = blk; + entry.prof_id = cpu_to_le16(prof_id); + entry.mask_sel = cpu_to_le32(mask_sel); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_OG_MASK_SEL_UPDATE, &entry, + sizeof(struct sxe2_fwc_mask_sel), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("mask sel update cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static inline bool +sxe2_flow_compare_dissector_fld(struct sxe2_flow_dissector_info *dis0, + struct sxe2_flow_dissector_info *dis1) +{ + return (bitmap_equal(dis0->headers, dis1->headers, SXE2_FLOW_HDR_MAX) && + bitmap_equal(dis0->fields, dis1->fields, SXE2_FLOW_FLD_ID_MAX)); +} + +static inline bool +sxe2_flow_compare_dissector_raw(struct sxe2_flow_dissector_info *dis0, + struct sxe2_flow_dissector_info *dis1) +{ + if (dis0->raw_cnt != dis1->raw_cnt) + return false; + + if (memcmp(dis0->raw, dis1->raw, sizeof(dis0->raw))) + return false; + + return true; +} + +static inline bool sxe2_flow_compare_flow(struct sxe2_flow_info_node *flow0, + struct sxe2_flow_info_node *flow1) +{ + u16 i; + bool ret = false; + + if (flow0->dissector_cnt != flow1->dissector_cnt) + goto l_end; + + for (i = 0; i < flow0->dissector_cnt; i++) { + if (!sxe2_flow_compare_dissector_fld(&flow0->dissectors[i], + &flow1->dissectors[i])) + goto l_end; + + if (!sxe2_flow_compare_dissector_raw(&flow0->dissectors[i], + &flow1->dissectors[i])) + goto l_end; + } + + ret = true; +l_end: + return ret; +} + +STATIC u16 sxe2_flow_alloc_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + u16 i; + + for (i = 1; i < SXE2_MAX_VSIG_NUM; i++) { + if (!ppp_ctxt->vsig[i].used) { + INIT_LIST_HEAD(&ppp_ctxt->vsig[i].associated_flow_list); + ppp_ctxt->vsig[i].used = true; + goto l_end; + } + } + + i = SXE2_PPP_DEFAULT_VSIG_IDX; +l_end: + return i; +} + +STATIC s32 sxe2_flow_alloc_hw_prof_id(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 *prof_id) +{ + s32 ret; + u16 prof_id_new; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + ret = sxe2_fwc_alloc_prof_id(ppp_ctxt->adapter, ppp_ctxt->block_id, + &prof_id_new); + if (ret != 0) { + LOG_DEBUG_BDF("failed to alloc block[%u] new hw prof, ret:%d\n", + ppp_ctxt->block_id, ret); + goto l_end; + } + + *prof_id = (u8)prof_id_new; + LOG_DEBUG_BDF("alloc block[%u] hw prof, id = %u.\n", ppp_ctxt->block_id, + prof_id_new); +l_end: + return ret; +} + +static s32 sxe2_flow_free_hw_prof_id(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + ret = sxe2_fwc_free_prof_id(ppp_ctxt->adapter, ppp_ctxt->block_id, prof_id); + LOG_DEBUG_BDF("free block[%u] hw prof id[%u], ret:%d.\n", ppp_ctxt->block_id, + prof_id, ret); + + return ret; +} + +STATIC s32 sxe2_flow_hw_prof_inc_ref(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (prof_id >= ppp_ctxt->hw_prof_num) { + ret = -EINVAL; + LOG_ERROR_BDF("failed to inc hw prof ref.id:%u >= %u.\n", prof_id, + ppp_ctxt->hw_prof_num); + goto l_end; + } + + ppp_ctxt->hw_prof[prof_id].ref_cnt++; +l_end: + return ret; +} + +static void sxe2_flow_update_hw_prof_fv(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id, struct sxe2_fv_word *fv) +{ + if (!fv) { + memset(ppp_ctxt->hw_prof[prof_id].fv, 0, + ppp_ctxt->hw_fv_num * sizeof(*fv)); + ppp_ctxt->hw_prof[prof_id].avail = false; + } else { + memcpy(ppp_ctxt->hw_prof[prof_id].fv, fv, + ppp_ctxt->hw_fv_num * sizeof(*fv)); + } +} + +s32 sxe2_flow_update_fv_mask_sel(struct sxe2_ppp_common_ctxt *ppp_ctxt, u8 prof_id, + u32 mask_sel) +{ + s32 ret; + + ret = sxe2_fwc_update_mask_sel(ppp_ctxt->adapter, ppp_ctxt->block_id, + prof_id, mask_sel); + if (ret == 0) + ppp_ctxt->hw_prof[prof_id].fv_masks_sel = mask_sel; + + return ret; +} + +STATIC s32 sxe2_flow_acl_disassoc_prof_scen(struct sxe2_adapter *adapter, + u16 prof_id) +{ + s32 ret; + struct sxe2_fwc_acl_prof_sel_base_req act_scen_dealloc_req; + + memset(&act_scen_dealloc_req, 0, sizeof(act_scen_dealloc_req)); + + act_scen_dealloc_req.prof_id = cpu_to_le16(prof_id); + act_scen_dealloc_req.pf_scenario_num[adapter->pf_idx] = + SXE2_ACL_INVALID_PF_SCEN_NUM; + ret = sxe2_fwc_acl_set_scen_prof(adapter, &act_scen_dealloc_req); + if (ret) + goto l_end; + +l_end: + return ret; +} + +STATIC s32 sxe2_flow_hw_prof_dec_ref(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (prof_id >= ppp_ctxt->hw_prof_num) { + ret = -EINVAL; + LOG_ERROR_BDF("failed to dec hw prof ref.id:%u >= %u.\n", prof_id, + ppp_ctxt->hw_prof_num); + goto l_end; + } + + if (ppp_ctxt->hw_prof[prof_id].ref_cnt > 0) { + ppp_ctxt->hw_prof[prof_id].ref_cnt--; + if (ppp_ctxt->hw_prof[prof_id].ref_cnt == 0) { + if (ppp_ctxt->block_id == SXE2_HW_BLOCK_ID_ACL) { + ret = sxe2_flow_acl_disassoc_prof_scen(ppp_ctxt->adapter, + prof_id); + if (ret) { + LOG_ERROR("Failed to disassoc prof:%u scen.", + prof_id); + goto l_end; + } + } + sxe2_flow_update_hw_prof_fv(ppp_ctxt, prof_id, NULL); + ret = sxe2_flow_free_hw_prof_id(ppp_ctxt, prof_id); + } + } else { + LOG_WARN_BDF("dec hw prof ref is %u.\n", + ppp_ctxt->hw_prof[prof_id].ref_cnt); + } + +l_end: + return ret; +} + +s32 sxe2_flow_find_vsig_with_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 *vsig_idx) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (!vsig_idx || vsi_sw_idx >= SXE2_MAX_VSI_NUM) { + LOG_ERROR_BDF("invalid param, vsig: %p, vsi: %u\n", vsig_idx, + vsi_sw_idx); + ret = -EINVAL; + goto l_end; + } + *vsig_idx = ppp_ctxt->vsi_to_grp[vsi_sw_idx].idx; +l_end: + return ret; +} + +STATIC bool +sxe2_flow_compare_associated_flow_list(struct list_head *associated_flow_list0, + struct list_head *associated_flow_list1) +{ + struct sxe2_associated_flow_node *flow0; + struct sxe2_associated_flow_node *flow1; + u16 count0 = 0; + u16 count1 = 0; + bool ret = false; + + list_for_each_entry(flow0, associated_flow_list0, l_node) { + count0++; + } + list_for_each_entry(flow1, associated_flow_list1, l_node) { + count1++; + } + if (!count0 || count0 != count1) + goto l_end; + + flow0 = list_first_entry(associated_flow_list0, + struct sxe2_associated_flow_node, l_node); + flow1 = list_first_entry(associated_flow_list1, + struct sxe2_associated_flow_node, l_node); + while (count0--) { + if (!sxe2_flow_compare_flow(flow0->flow_ptr, flow1->flow_ptr)) + goto l_end; + flow0 = list_next_entry(flow0, l_node); + flow1 = list_next_entry(flow1, l_node); + } + ret = true; + +l_end: + return ret; +} + +static s32 +sxe2_flow_find_vsig_with_associated_flow_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct list_head *associated_flow_list, + u16 *vsig_idx) +{ + s32 ret = -ENOENT; + u16 i; + + for (i = 0; i < SXE2_MAX_VSIG_NUM; i++) { + if (ppp_ctxt->vsig[i].used && + sxe2_flow_compare_associated_flow_list(&ppp_ctxt->vsig[i]. + associated_flow_list, + associated_flow_list)) { + *vsig_idx = i; + ret = 0; + break; + } + } + + return ret; +} + +STATIC bool sxe2_flow_check_flow_in_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsig_idx) +{ + struct sxe2_associated_flow_node *entry; + bool find = false; + + list_for_each_entry(entry, &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) { + if (sxe2_flow_compare_flow(entry->flow_ptr, flow)) { + find = true; + break; + } + } + + return find; +} + +STATIC s32 sxe2_flow_op_hw_prof(struct sxe2_ppp_common_ctxt *ppp_ctxt, u8 hw_prof_id, + struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_og_chg *chg; + s32 ret = 0; + + if (ppp_ctxt->hw_prof[hw_prof_id].avail) { + ret = 0; + goto l_end; + } + + chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL); + if (!chg) { + LOG_ERROR_BDF("failed to alloc chg op memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + chg->type = SXE2_OG_CHG_TYPE_ES; + chg->info.es.prof_id = hw_prof_id; + list_add(&chg->l_entry, op_list); + + ppp_ctxt->hw_prof[hw_prof_id].avail = true; + +l_end: + return ret; +} + +STATIC s32 sxe2_flow_get_associated_flow_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx, + struct list_head *associated_flow_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *tmp; + struct sxe2_associated_flow_node *flow; + s32 ret = 0; + + list_for_each_entry(flow, &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) { + tmp = (struct sxe2_associated_flow_node *)devm_kmemdup(dev, + (void *)flow, + sizeof(*flow), + GFP_KERNEL); + if (!tmp) { + LOG_ERROR_BDF("failed to alloc vsig flow memory.\n"); + ret = -ENOMEM; + goto l_end; + } + list_add_tail(&tmp->l_node, associated_flow_list); + } + +l_end: + if (ret != 0) { + list_for_each_entry_safe(flow, tmp, associated_flow_list, l_node) { + list_del(&flow->l_node); + devm_kfree(dev, flow); + } + } + return ret; +} + +STATIC void +sxe2_flow_list_add_with_priority(struct list_head *head, + struct sxe2_associated_flow_node *new_node) +{ + struct sxe2_associated_flow_node *pos; + + list_for_each_entry(pos, head, l_node) + { + if (pos->flow_ptr->priority <= new_node->flow_ptr->priority) { + list_add_tail(&new_node->l_node, &pos->l_node); + return; + } + } + + list_add_tail(&new_node->l_node, head); +} + +STATIC s32 sxe2_flow_add_flow_to_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct list_head *associated_flow_list, + struct sxe2_flow_info_node *flow) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *associated_flow; + s32 ret = 0; + + associated_flow = devm_kzalloc(dev, sizeof(*associated_flow), GFP_KERNEL); + if (!associated_flow) { + LOG_ERROR_BDF("failed to alloc vsig flow memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + associated_flow->flow_ptr = flow; + + sxe2_flow_list_add_with_priority(associated_flow_list, associated_flow); + +l_end: + return ret; +} + +s32 sxe2_flow_op_move_vsi_to_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 vsig_idx, + struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_og_chg *chg; + s32 ret = 0; + u16 or_vsig_idx; + + chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL); + if (!chg) { + LOG_ERROR_BDF("failed to alloc chg op memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &or_vsig_idx); + if (ret != 0) { + devm_kfree(dev, chg); + goto l_end; + } + + ret = sxe2_flow_vsi_move_vsig(ppp_ctxt, vsi_sw_idx, vsig_idx); + if (ret != 0) { + devm_kfree(dev, chg); + goto l_end; + } + + chg->type = SXE2_OG_CHG_TYPE_XLT2; + chg->info.xlt2.vsi_hw_idx = adapter->vsi_ctxt.vsi[vsi_sw_idx]->idx_in_dev; + chg->info.xlt2.vsig = vsig_idx; + list_add(&chg->l_entry, op_list); + +l_end: + LOG_INFO_BDF("move vsi[%u](hw_idx:%u) to vsig[%u] ret:%d.\n", vsi_sw_idx, + adapter->vsi_ctxt.vsi[vsi_sw_idx]->idx_in_dev, vsig_idx, ret); + return ret; +} + +static s32 sxe2_flow_free_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, u16 vsig_idx) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *flow; + struct sxe2_associated_flow_node *tmp; + u16 vsi_sw_idx = 0; + u16 i; + + if (vsig_idx >= SXE2_MAX_VSIG_NUM) { + LOG_ERROR_BDF("invalid param, vsig: %u\n", vsig_idx); + ret = -EINVAL; + goto l_end; + } + + if (!ppp_ctxt->vsig[vsig_idx].used) { + LOG_ERROR_BDF("vsig: %d unused\n", vsig_idx); + ret = -ENOENT; + goto l_end; + } + ppp_ctxt->vsig[vsig_idx].used = false; + + list_for_each_entry_safe(flow, tmp, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) { + list_del(&flow->l_node); + devm_kfree(dev, flow); + } + INIT_LIST_HEAD(&ppp_ctxt->vsig[vsig_idx].associated_flow_list); + + for (i = 0; i < ppp_ctxt->vsig[vsig_idx].vsi_cnt; i++) { + vsi_sw_idx = (u16)find_next_bit(ppp_ctxt->vsig[vsig_idx].vsis, + SXE2_MAX_VSI_NUM, vsi_sw_idx); + if (vsi_sw_idx >= SXE2_MAX_VSI_NUM) { + LOG_ERROR_BDF("vsig %d vsi info[%d %d] error.\n", vsig_idx, + ppp_ctxt->vsig[vsig_idx].vsi_cnt, i); + ret = -EINVAL; + } + + clear_bit(vsi_sw_idx, ppp_ctxt->vsig[vsig_idx].vsis); + ppp_ctxt->vsi_to_grp[vsi_sw_idx].idx = SXE2_PPP_DEFAULT_VSIG_IDX; + } + ppp_ctxt->vsig[vsig_idx].vsi_cnt = 0; + +l_end: + return ret; +} + +static s32 +sxe2_flow_remove_associated_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_associated_flow_node *associated_flow) +{ + s32 ret = 0; + u16 i; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_fwc_tcam_idx_batch *tcam_batch = NULL; + u16 batch_cnt = 0; + u32 size; + + for (i = 0; i < associated_flow->tcam_cnt; i++) { + if (associated_flow->tcams[i].used) + batch_cnt++; + } + if (batch_cnt == 0) { + ret = 0; + goto l_end; + } + size = sizeof(struct sxe2_fwc_tcam_idx_batch) + + batch_cnt * sizeof(struct sxe2_fwc_tcam_info); + tcam_batch = kzalloc(size, GFP_KERNEL); + if (!tcam_batch) { + ret = -ENOMEM; + goto l_end; + } + tcam_batch->blk = ppp_ctxt->block_id; + tcam_batch->tcam_cnt = cpu_to_le16(batch_cnt); + for (i = 0; i < associated_flow->tcam_cnt; i++) { + if (associated_flow->tcams[i].used) { + batch_cnt--; + tcam_batch->tcam_info[batch_cnt].action = + SXE2_FWC_TCAM_ACTION_DEL; + tcam_batch->tcam_info[batch_cnt].tcam_idx = + associated_flow->tcams[i].idx; + } + } + + ret = sxe2_fwc_process_tcam_batch(adapter, tcam_batch, size); + if (ret) { + LOG_ERROR_BDF("batch application for TCAM failed, ret=%d\n", ret); + goto l_end; + } + + for (i = 0; i < associated_flow->tcam_cnt; i++) { + if (associated_flow->tcams[i].used) { + (void)sxe2_flow_reless_tcam(ppp_ctxt, + associated_flow->tcams[i].idx); + associated_flow->tcams[i].used = false; + } + } + +l_end: + kfree(tcam_batch); + + return ret; +} + +STATIC s32 sxe2_flow_op_remove_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx, struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *flow; + struct sxe2_associated_flow_node *tmp; + struct sxe2_og_chg *chg; + s32 ret = 0; + u16 vsi_sw_idx = 0; + u16 i; + + list_for_each_entry_safe(flow, tmp, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) { + ret = sxe2_flow_remove_associated_flow(ppp_ctxt, flow); + if (ret != 0) { + LOG_ERROR_BDF("tcam entry free cmd done, ret=%d.\n", ret); + goto l_end; + } + list_del(&flow->l_node); + devm_kfree(dev, flow); + } + + for (i = 0; i < ppp_ctxt->vsig[vsig_idx].vsi_cnt; i++) { + vsi_sw_idx = (u16)find_next_bit(ppp_ctxt->vsig[vsig_idx].vsis, + SXE2_MAX_VSI_NUM, vsi_sw_idx); + if (vsi_sw_idx == SXE2_MAX_VSI_NUM) { + LOG_ERROR_BDF("vsig %d vsi info[%d %d] error.\n", vsig_idx, + ppp_ctxt->vsig[vsig_idx].vsi_cnt, i); + ret = -ENOENT; + goto l_end; + } + chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL); + if (!chg) { + ret = -ENOMEM; + goto l_end; + } + + chg->type = SXE2_OG_CHG_TYPE_XLT2; + chg->info.xlt2.vsi_hw_idx = + adapter->vsi_ctxt.vsi[vsi_sw_idx]->idx_in_dev; + chg->info.xlt2.vsig = SXE2_PPP_DEFAULT_VSIG_IDX; + + list_add(&chg->l_entry, op_list); + } + + ret = sxe2_flow_free_vsig(ppp_ctxt, vsig_idx); +l_end: + LOG_DEBUG_BDF("remove vsig[%u] ret:%d\n", vsig_idx, ret); + return ret; +} + +static s32 sxe2_flow_op_crt_vsig_assoc_flow_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, + struct list_head *associated_flow_list, + u16 *vsig_idx, + struct list_head *op_list) +{ + struct sxe2_associated_flow_node *associated_flow; + s32 ret = 0; + u16 vsig_idx_tmp; + u16 or_vsig_idx; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &or_vsig_idx); + if (ret != 0) + goto l_end; + + vsig_idx_tmp = sxe2_flow_alloc_vsig(ppp_ctxt); + if (vsig_idx_tmp == SXE2_PPP_DEFAULT_VSIG_IDX) { + ret = -EIO; + goto l_end; + } + + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, vsig_idx_tmp, + op_list); + if (ret != 0) + goto l_free; + + list_for_each_entry(associated_flow, associated_flow_list, l_node) + { + ret = sxe2_flow_op_vsig_add_flow(ppp_ctxt, associated_flow->flow_ptr, + vsig_idx_tmp, true, op_list); + if (ret != 0) { + LOG_ERROR_BDF("flow op vsig add flow failed, dst_vsig=%u, " + "ret=%d.\n", + vsig_idx_tmp, ret); + goto l_move; + } + } + + *vsig_idx = vsig_idx_tmp; +l_end: + LOG_DEBUG_BDF("create new vsig[%u](vsi:%u) with flow list, ret:%d.\n", + *vsig_idx, vsi_sw_idx, ret); + return ret; +l_move: + (void)sxe2_flow_vsi_move_vsig(ppp_ctxt, vsi_sw_idx, or_vsig_idx); +l_free: + (void)sxe2_flow_free_vsig(ppp_ctxt, vsig_idx_tmp); + goto l_end; +} + +STATIC s32 sxe2_flow_find_vsig_with_only_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 *vsig_idx) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *tmp; + struct list_head associated_flow_list; + s32 ret = 0; + + INIT_LIST_HEAD(&associated_flow_list); + + tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + ret = -ENOMEM; + goto l_end; + } + tmp->flow_ptr = flow; + list_add(&tmp->l_node, &associated_flow_list); + + ret = sxe2_flow_find_vsig_with_associated_flow_list(ppp_ctxt, + &associated_flow_list, vsig_idx); + list_del(&tmp->l_node); + devm_kfree(dev, tmp); + +l_end: + return ret; +} + +static u16 sxe2_flow_associated_flow_cnt(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx) +{ + struct sxe2_associated_flow_node *flow; + u16 cnt = 0; + + list_for_each_entry(flow, &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) { + cnt++; + } + + return cnt; +} + +static s32 sxe2_flow_remove_flow_in_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + struct list_head *associated_flow_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *tmp; + struct sxe2_associated_flow_node *associated_flow; + s32 ret = -ENOENT; + + SXE2_SET_USED(ppp_ctxt); + list_for_each_entry_safe(associated_flow, tmp, associated_flow_list, l_node) + { + if (sxe2_flow_compare_flow(associated_flow->flow_ptr, flow)) { + list_del(&associated_flow->l_node); + devm_kfree(dev, associated_flow); + ret = 0; + break; + } + } + return ret; +} + +static s32 sxe2_flow_op_vsig_remove_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsig_idx, struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *tmp; + struct sxe2_associated_flow_node *associated_flow; + s32 ret = -ENOENT; + + list_for_each_entry_safe(associated_flow, tmp, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list, + l_node) + { + if (sxe2_flow_compare_flow(associated_flow->flow_ptr, flow)) { + if (sxe2_flow_associated_flow_cnt(ppp_ctxt, vsig_idx) == 1) { + ret = sxe2_flow_op_remove_vsig(ppp_ctxt, vsig_idx, + op_list); + if (ret != 0) + goto l_end; + + } else { + ret = sxe2_flow_remove_associated_flow(ppp_ctxt, + associated_flow); + if (ret != 0) + goto l_end; + + list_del(&associated_flow->l_node); + devm_kfree(dev, associated_flow); + } + break; + } + } + +l_end: + return ret; +} + +STATIC s32 sxe2_flow_op_flow_assoc_vsi_insert_tmp_list(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsi_sw_idx, + struct list_head *op_list, + struct list_head *associated_flow_list, + u16 or_vsig_idx) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + s32 ret = 0; + bool only_vsi; + u16 vsig_idx; + if (sxe2_flow_check_flow_in_vsig(ppp_ctxt, flow, or_vsig_idx)) { + LOG_INFO_BDF("flow already accessed vsi[%d].\n", vsi_sw_idx); + ret = -EEXIST; + goto l_end; + } + + only_vsi = (ppp_ctxt->vsig[or_vsig_idx].vsi_cnt == 1); + + ret = sxe2_flow_get_associated_flow_list(ppp_ctxt, or_vsig_idx, + associated_flow_list); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_add_flow_to_list(ppp_ctxt, associated_flow_list, flow); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_find_vsig_with_associated_flow_list(ppp_ctxt, + associated_flow_list, &vsig_idx); + if (ret == 0) { + LOG_DEBUG_BDF("flow accessed vsi, find same flows vsig[%u].\n", + vsig_idx); + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, vsig_idx, + op_list); + if (ret != 0) + goto l_end; + + if (only_vsi) { + ret = sxe2_flow_op_remove_vsig(ppp_ctxt, or_vsig_idx, + op_list); + if (ret != 0) + goto l_end; + } + } else if (only_vsi) { + LOG_DEBUG_BDF("flow accessed vsi, find only vsi vsig[%u].\n", + or_vsig_idx); + ret = sxe2_flow_op_vsig_add_flow(ppp_ctxt, flow, or_vsig_idx, false, + op_list); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_op_adjust_vsig_tcams_priority(ppp_ctxt, or_vsig_idx, + op_list); + if (ret != 0) + goto l_end; + + } else { + ret = sxe2_flow_op_crt_vsig_assoc_flow_list(ppp_ctxt, + vsi_sw_idx, + associated_flow_list, + &vsig_idx, op_list); + LOG_DEBUG_BDF("flow accessed vsi, or_vsig:%u, create new vsig[%u], " + "ret:%d.\n", + or_vsig_idx, vsig_idx, ret); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_op_adjust_vsig_tcams_priority(ppp_ctxt, vsig_idx, + op_list); + if (ret != 0) + goto l_end; + } +l_end: + return ret; +} + +static s32 sxe2_flow_op_flow_assoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsi_sw_idx) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct list_head op_list; + struct list_head associated_flow_list; + struct sxe2_associated_flow_node *tmp; + struct sxe2_associated_flow_node *del; + struct sxe2_og_chg *tmp_chg; + struct sxe2_og_chg *del_chg; + s32 ret; + u16 vsig_idx; + u16 or_vsig_idx; + + INIT_LIST_HEAD(&op_list); + INIT_LIST_HEAD(&associated_flow_list); + + ret = sxe2_flow_op_hw_prof(ppp_ctxt, flow->prof_id, &op_list); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &or_vsig_idx); + if (ret == 0 && or_vsig_idx != SXE2_PPP_DEFAULT_VSIG_IDX) { + ret = sxe2_flow_op_flow_assoc_vsi_insert_tmp_list(ppp_ctxt, + flow, vsi_sw_idx, &op_list, + &associated_flow_list, + or_vsig_idx); + if (ret) + goto l_end; + + } else { + ret = sxe2_flow_find_vsig_with_only_flow(ppp_ctxt, flow, &vsig_idx); + if (ret == 0) { + LOG_DEBUG_BDF("flow assoc vsi, find only flow vsig[%d], " + "move vsi[%u].\n", + vsig_idx, vsi_sw_idx); + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, + vsig_idx, &op_list); + if (ret != 0) + goto l_end; + + } else { + ret = sxe2_flow_op_creat_vsig_with_flow(ppp_ctxt, flow, + vsi_sw_idx, &op_list); + LOG_DEBUG_BDF("flow accessed vsi, create new vsig, " + "ret:%d.\n", + ret); + if (ret != 0) + goto l_end; + } + } + + if (ret == 0) { + ret = sxe2_fwc_update_profile(ppp_ctxt, ppp_ctxt->block_id, + &op_list); + } + +l_end: + list_for_each_entry_safe(del, tmp, &associated_flow_list, l_node) + { + list_del(&del->l_node); + devm_kfree(dev, del); + } + + list_for_each_entry_safe(del_chg, tmp_chg, &op_list, l_entry) + { + list_del(&del_chg->l_entry); + devm_kfree(dev, del_chg); + } + return ret; +} + +static s32 sxe2_flow_op_flow_dissoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsi_sw_idx) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct list_head op_list; + struct list_head associated_flow_list; + struct sxe2_associated_flow_node *tmp; + struct sxe2_associated_flow_node *del; + struct sxe2_og_chg *tmp_chg; + struct sxe2_og_chg *del_chg; + s32 ret; + u16 vsig_idx; + bool only_vsi; + bool only_flow; + + INIT_LIST_HEAD(&op_list); + INIT_LIST_HEAD(&associated_flow_list); + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &vsig_idx); + if (ret != 0) + goto l_end; + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) + goto l_end; + + only_flow = (sxe2_flow_associated_flow_cnt(ppp_ctxt, vsig_idx) == 1); + only_vsi = (ppp_ctxt->vsig[vsig_idx].vsi_cnt == 1); + + LOG_DEBUG_BDF("flow dissoc vsi, vsig only_flow:%d only_vsi:%d.\n", only_flow, + only_vsi); + + if (only_vsi) { + if (only_flow) { + ret = sxe2_flow_op_remove_vsig(ppp_ctxt, vsig_idx, &op_list); + if (ret != 0) + goto l_end; + } else { + ret = sxe2_flow_op_vsig_remove_flow(ppp_ctxt, flow, vsig_idx, + &op_list); + if (ret != 0) + goto l_end; + ret = sxe2_flow_op_adjust_vsig_tcams_priority(ppp_ctxt, + vsig_idx, &op_list); + if (ret != 0) + goto l_end; + } + } else { + ret = sxe2_flow_get_associated_flow_list(ppp_ctxt, vsig_idx, + &associated_flow_list); + if (ret != 0) + goto l_end; + + ret = sxe2_flow_remove_flow_in_list(ppp_ctxt, flow, + &associated_flow_list); + if (ret != 0) + goto l_end; + + if (list_empty(&associated_flow_list)) { + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, + SXE2_PPP_DEFAULT_VSIG_IDX, + &op_list); + if (ret != 0) + goto l_end; + } else { + ret = sxe2_flow_find_vsig_with_associated_flow_list(ppp_ctxt, + &associated_flow_list, + &vsig_idx); + if (ret == 0) { + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, + vsi_sw_idx, vsig_idx, + &op_list); + if (ret != 0) + goto l_end; + } else { + ret = sxe2_flow_op_crt_vsig_assoc_flow_list(ppp_ctxt, + vsi_sw_idx, + &associated_flow_list, + &vsig_idx, + &op_list); + if (ret != 0) + goto l_end; + ret = sxe2_flow_op_adjust_vsig_tcams_priority(ppp_ctxt, + vsig_idx, + &op_list); + if (ret != 0) + goto l_end; + } + } + } + + if (ret == 0) { + ret = sxe2_fwc_update_profile(ppp_ctxt, ppp_ctxt->block_id, + &op_list); + } + +l_end: + list_for_each_entry_safe(del, tmp, &associated_flow_list, l_node) + { + list_del(&del->l_node); + devm_kfree(dev, del); + } + list_for_each_entry_safe(del_chg, tmp_chg, &op_list, l_entry) + { + list_del(&del_chg->l_entry); + devm_kfree(dev, del_chg); + } + return ret; +} + +STATIC void +sxe2_flow_acl_prof_set_fld(struct sxe2_flow_fld *flow_fld, + struct sxe2_fwc_acl_prof_sel_base_req *prof_sel_req) +{ + u16 dst, i; + u8 src; + + src = flow_fld->xtrct.idx * SXE2_FLOW_FV_SIZE + + flow_fld->xtrct.disp / BITS_PER_BYTE; + dst = flow_fld->last_val.val; + for (i = 0; i < flow_fld->last_val.len; i++) + prof_sel_req->byte_selection[dst++] = src++; +} + +STATIC s32 sxe2_flow_acl_assoc_prof_scen(struct sxe2_adapter *adapter, + struct sxe2_flow_info_node *flow_info) +{ + s32 ret; + struct sxe2_fwc_acl_prof_sel_base_req acl_prof_set; + struct sxe2_fwc_acl_prof_querey_resp acl_prof_query; + struct sxe2_flow_dissector_info *dis_info; + struct sxe2_flow_fld *flow_fld; + u16 i; + + memset(&acl_prof_set, 0x1f, sizeof(acl_prof_set)); + memset(&acl_prof_query, 0, sizeof(acl_prof_query)); + + dis_info = &flow_info->dissectors[0]; + for_each_set_bit(i, dis_info->fields, SXE2_FLOW_FLD_ID_MAX) + { + flow_fld = &dis_info->fld[i]; + sxe2_flow_acl_prof_set_fld(flow_fld, &acl_prof_set); + } + + for (i = 0; i < dis_info->raw_cnt; i++) { + flow_fld = &dis_info->raw[i].fld; + sxe2_flow_acl_prof_set_fld(flow_fld, &acl_prof_set); + } + acl_prof_set.prof_id = flow_info->prof_id; + acl_prof_set.pf_scenario_num[adapter->pf_idx] = flow_info->cfg.scen->scen_id; + ret = sxe2_fwc_acl_set_scen_prof(adapter, &acl_prof_set); + if (ret) { + LOG_ERROR_BDF("Acl prof scen set cmd failed, ret:%d", ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC bool +sxe2_flow_check_flow_conflict_in_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow_check, + u16 vsi_sw_idx) +{ + + s32 ret = 0; + u8 i, j; + u8 find; + bool conflict = false; + u16 or_vsig_idx = SXE2_PPP_DEFAULT_VSIG_IDX; + struct sxe2_associated_flow_node *assoc_flow; + struct sxe2_flow_info_node *flow_tmp = NULL; + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &or_vsig_idx); + if ((ret == 0) && (or_vsig_idx != SXE2_PPP_DEFAULT_VSIG_IDX)) { + list_for_each_entry(assoc_flow, + &ppp_ctxt->vsig[or_vsig_idx].associated_flow_list, + l_node) + { + flow_tmp = assoc_flow->flow_ptr; + if (flow_check->ptg_info.ptg_cnt != + flow_tmp->ptg_info.ptg_cnt) + continue; + for (i = 0; i < flow_check->ptg_info.ptg_cnt; i++) { + find = 0; + for (j = 0; j < flow_tmp->ptg_info.ptg_cnt; j++) { + if (flow_check->ptg_info.ptg[i] == + flow_tmp->ptg_info.ptg[j]) { + find = 1; + break; + } + } + if (!find) + break; + } + + if (find) { + conflict = true; + break; + } + } + } else { + } + + return conflict; +} + +s32 sxe2_flow_assoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (test_bit(vsi_sw_idx, flow->used_vsi)) { + LOG_DEBUG_BDF("vsi %d is already assoc with flow.\n", vsi_sw_idx); + ret = 0; + goto l_end; + } + + if (ppp_ctxt->block_id == SXE2_HW_BLOCK_ID_ACL) { + ret = sxe2_flow_acl_assoc_prof_scen(ppp_ctxt->adapter, flow); + if (ret) + goto l_end; + } + + ret = sxe2_flow_op_flow_assoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) { + LOG_ERROR_BDF("fail to assoc vsi to flow, ret = %d.\n", ret); + goto l_end; + } + + set_bit(vsi_sw_idx, flow->used_vsi); +l_end: + return ret; +} + +s32 sxe2_flow_disassoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (!test_bit(vsi_sw_idx, flow->used_vsi)) { + LOG_DEBUG_BDF("vsi %d is not assoc with flow.\n", vsi_sw_idx); + ret = 0; + goto l_end; + } + + ret = sxe2_flow_op_flow_dissoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) { + LOG_ERROR_BDF("fail to disassoc vsi to flow, ret = %d.\n", ret); + goto l_end; + } + + clear_bit(vsi_sw_idx, flow->used_vsi); + LOG_DEBUG_BDF("flow[%p] disassoc vsi[%u].\n", flow, vsi_sw_idx); +l_end: + return ret; +} + +s32 sxe2_flow_assoc_vsi_fnav(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx, + enum sxe2_fnav_flow_type flow_type) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (test_bit(vsi_sw_idx, flow->used_vsi)) { + LOG_DEBUG_BDF("vsi %d is already assoc with flow.\n", vsi_sw_idx); + ret = 0; + goto l_end; + } + + if (!sxe2_fnav_flow_sup_arfs(flow_type) && + sxe2_flow_check_flow_conflict_in_vsig(ppp_ctxt, flow, vsi_sw_idx)) { + LOG_INFO_BDF("vsi %d is new flow is conflict with same ptgs,.\n", + vsi_sw_idx); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_flow_op_flow_assoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) { + LOG_ERROR_BDF("fail to assoc vsi to flow, ret = %d.\n", ret); + goto l_end; + } + + set_bit(vsi_sw_idx, flow->used_vsi); +l_end: + return ret; +} + +static s32 sxe2_flow_remove_vsi_from_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 vsig_idx) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (vsig_idx >= SXE2_MAX_VSIG_NUM || vsi_sw_idx >= SXE2_MAX_VSI_NUM) { + ret = -EINVAL; + goto l_end; + } + + if (!ppp_ctxt->vsig[vsig_idx].used) { + ret = -ENOENT; + goto l_end; + } + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) { + ret = 0; + goto l_end; + } + + if (!test_bit(vsi_sw_idx, ppp_ctxt->vsig[vsig_idx].vsis)) { + LOG_INFO_BDF("remove vsi error, vsi %d not in vsig %d.\n", + vsi_sw_idx, vsig_idx); + ret = -ENOENT; + goto l_end; + } + + ppp_ctxt->vsi_to_grp[vsi_sw_idx].idx = SXE2_PPP_DEFAULT_VSIG_IDX; + clear_bit(vsi_sw_idx, ppp_ctxt->vsig[vsig_idx].vsis); + ppp_ctxt->vsig[vsig_idx].vsi_cnt--; + +l_end: + return ret; +} + +static s32 sxe2_flow_vsi_move_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 vsig_idx) +{ + s32 ret = 0; + u16 or_vsig_idx; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (vsig_idx >= SXE2_MAX_VSIG_NUM || vsi_sw_idx >= SXE2_MAX_VSI_NUM) { + ret = -EINVAL; + goto l_end; + } + + if (vsig_idx != SXE2_PPP_DEFAULT_VSIG_IDX && + !ppp_ctxt->vsig[vsig_idx].used) { + ret = -ENOENT; + goto l_end; + } + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &or_vsig_idx); + if (ret != 0) + goto l_end; + + if (or_vsig_idx == vsig_idx) { + LOG_DEBUG_BDF("vsi %d already in vsig %d.\n", vsi_sw_idx, vsig_idx); + ret = 0; + goto l_end; + } + + if (or_vsig_idx != SXE2_PPP_DEFAULT_VSIG_IDX) { + ret = sxe2_flow_remove_vsi_from_vsig(ppp_ctxt, vsi_sw_idx, + or_vsig_idx); + if (ret != 0) + goto l_end; + } + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) + goto l_end; + + ppp_ctxt->vsi_to_grp[vsi_sw_idx].idx = vsig_idx; + set_bit(vsi_sw_idx, ppp_ctxt->vsig[vsig_idx].vsis); + ppp_ctxt->vsig[vsig_idx].vsi_cnt++; +l_end: + return ret; +} + +static s32 sxe2_flow_op_creat_vsig_with_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, + u16 vsi_sw_idx, + struct list_head *op_list) +{ + s32 ret = 0; + u16 new_vsig = 0; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + new_vsig = sxe2_flow_alloc_vsig(ppp_ctxt); + if (new_vsig == SXE2_PPP_DEFAULT_VSIG_IDX) { + ret = -EIO; + goto l_end; + } + + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, new_vsig, op_list); + if (ret != 0) + goto l_free; + + ret = sxe2_flow_op_vsig_add_flow(ppp_ctxt, flow, new_vsig, false, op_list); + if (ret != 0) + goto l_free; + +l_end: + LOG_DEBUG_BDF("create new vsig[%u](vsi:%u) with flow, ret:%d.\n", new_vsig, + vsi_sw_idx, ret); + return ret; +l_free: + (void)sxe2_flow_free_vsig(ppp_ctxt, new_vsig); + goto l_end; +} + +static s32 sxe2_flow_op_vsig_add_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsig_idx, + bool tail, struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_associated_flow_node *associated_flow = NULL; + struct sxe2_og_chg *chg; + + u8 vl_mask[SXE2_TCAM_KEY_VAL_SZ] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + u8 dc_mask[SXE2_TCAM_KEY_VAL_SZ] = {0x00, 0xF0, 0xFF, 0x0F, 0x00}; + u8 nm_mask[SXE2_TCAM_KEY_VAL_SZ] = {0x00, 0x00, 0x00, 0x00, 0x00}; + s32 ret = 0; + u16 tcam_idx; + u16 i; + u32 size; + struct sxe2_fwc_tcam_idx_batch *tcam_batch = NULL; + + if (sxe2_flow_check_flow_in_vsig(ppp_ctxt, flow, vsig_idx)) { + ret = -EEXIST; + goto l_end; + } + + associated_flow = devm_kzalloc(dev, sizeof(*associated_flow), GFP_KERNEL); + if (!associated_flow) { + ret = -ENOMEM; + goto l_end; + } + + size = sizeof(struct sxe2_fwc_tcam_idx_batch) + + flow->ptg_info.ptg_cnt * sizeof(struct sxe2_fwc_tcam_info); + + tcam_batch = kzalloc(size, GFP_KERNEL); + if (!tcam_batch) { + ret = -ENOMEM; + goto l_free; + } + tcam_batch->blk = ppp_ctxt->block_id; + tcam_batch->tcam_cnt = cpu_to_le16((u16)flow->ptg_info.ptg_cnt); + for (i = 0; i < flow->ptg_info.ptg_cnt; i++) + tcam_batch->tcam_info[i].action = SXE2_FWC_TCAM_ACTION_ADD; + + ret = sxe2_fwc_process_tcam_batch(adapter, tcam_batch, size); + if (ret) { + LOG_ERROR_BDF("batch application for TCAM failed, ret=%d\n", ret); + goto l_free; + } + + associated_flow->flow_ptr = flow; + associated_flow->tcam_cnt = flow->ptg_info.ptg_cnt; + + for (i = 0; i < associated_flow->tcam_cnt; i++) { + tcam_idx = le16_to_cpu(tcam_batch->tcam_info[i].tcam_idx); + associated_flow->tcams[i].idx = tcam_idx; + associated_flow->tcams[i].ptg = flow->ptg_info.ptg[i]; + associated_flow->tcams[i].prof_id = flow->prof_id; + associated_flow->tcams[i].used = true; + LOG_DEBUG_BDF("alloc tcam[%u]\n", tcam_idx); + } + + for (i = 0; i < associated_flow->tcam_cnt; i++) { + tcam_idx = tcam_batch->tcam_info[i].tcam_idx; + chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL); + if (!chg) { + ret = -ENOMEM; + goto l_free; + } + + chg->type = SXE2_OG_CHG_TYPE_TCAM; + chg->info.tcam.prof_id = flow->prof_id; + chg->info.tcam.tcam_idx = tcam_idx; + + LOG_DEBUG_BDF("config tcam, tcam_idx:%u, prof_id:%u, ptg:%u, " + "vsig:%d, cdid:%u\n", + tcam_idx, flow->prof_id, associated_flow->tcams[i].ptg, + vsig_idx, ppp_ctxt->adapter->port_idx); + ret = sxe2_flow_cfg_tcam_entry(ppp_ctxt, tcam_idx, flow->prof_id, + associated_flow->tcams[i].ptg, + vsig_idx, SXE2_TCAM_DEFAULT_CD_ID, + SXE2_TCAM_DEFAULT_FLAGS, vl_mask, + dc_mask, nm_mask); + if (ret != 0) { + devm_kfree(dev, chg); + goto l_free; + } + list_add(&chg->l_entry, op_list); + } + + if (tail) { + list_add_tail(&associated_flow->l_node, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list); + } else { + sxe2_flow_list_add_with_priority(&ppp_ctxt->vsig[vsig_idx].associated_flow_list, + associated_flow); + } + +l_end: + kfree(tcam_batch); + return ret; +l_free: + (void)sxe2_flow_remove_associated_flow(ppp_ctxt, associated_flow); + devm_kfree(dev, associated_flow); + goto l_end; +} + +#define SXE2_MATCH_DC_KEY 0x0 +#define SXE2_MATCH_DC_KEY_MASK 0x0 +#define SXE2_MATCH_NM_KEY 0x1 +#define SXE2_MATCH_NM_KEY_MASK 0x1 +#define SXE2_MATCH_0_KEY 0x0 +#define SXE2_MATCH_0_KEY_MASK 0x1 +#define SXE2_MATCH_1_KEY 0x1 +#define SXE2_MATCH_1_KEY_MASK 0x0 + +static s32 sxe2_flow_gen_key_word(u8 val, u8 vl_mask, u8 dc_mask, u8 nm_mask, + u8 *key, u8 *key_mask) +{ + s32 ret = 0; + u8 key_temp = *key; + u8 key_mask_temp = *key_mask; + u8 i; + + if ((dc_mask ^ nm_mask) != (dc_mask | nm_mask)) { + ret = -EINVAL; + goto l_end; + } + + *key = 0; + *key_mask = 0; + + for (i = 0; i < 8; i++) { + *key >>= 1; + *key_mask >>= 1; + + if (!(vl_mask & 0x1)) { + *key |= (key_temp & 0x1) << 7; + *key_mask |= (key_mask_temp & 0x1) << 7; + } else if (dc_mask & 0x1) { + *key |= SXE2_MATCH_DC_KEY << 7; + *key_mask |= SXE2_MATCH_DC_KEY_MASK << 7; + } else if (nm_mask & 0x1) { + *key |= SXE2_MATCH_NM_KEY << 7; + *key_mask |= SXE2_MATCH_NM_KEY_MASK << 7; + } else if (val & 0x01) { + *key |= SXE2_MATCH_1_KEY << 7; + *key_mask |= SXE2_MATCH_1_KEY_MASK << 7; + } else { + *key |= SXE2_MATCH_0_KEY << 7; + *key_mask |= SXE2_MATCH_0_KEY_MASK << 7; + } + + dc_mask >>= 1; + nm_mask >>= 1; + vl_mask >>= 1; + val >>= 1; + key_temp >>= 1; + key_mask_temp >>= 1; + } + +l_end: + return ret; +} + +static inline u8 sxe2_setbit_cnt_u8(u8 num) +{ + u8 bits = 0; + u32 i; + + for (i = 0; i < 8; i++) { + bits += (num & 0x1); + num >>= 1; + } + + return bits; +} + +static inline bool max_set_bit_check(const u8 *mask, u16 size, u16 max) +{ + u16 count = 0; + u16 i; + bool ret = false; + + for (i = 0; i < size; i++) { + if (!mask[i]) + continue; + + if (count == max) + goto l_end; + + count += sxe2_setbit_cnt_u8(mask[i]); + if (count > max) + goto l_end; + } + + ret = true; +l_end: + return ret; +} + +static s32 sxe2_flow_set_key(u8 *key, u16 size, u8 *val, u8 *vl_mask, u8 *dc_mask, + u8 *nm_mask, u16 off, u16 len) +{ + s32 ret = 0; + u16 half_size; + u16 i; + + if (size % 2) { + ret = -EINVAL; + goto l_end; + } + half_size = size / 2; + + if (off + len > half_size) { + ret = -EINVAL; + goto l_end; + } + + if (nm_mask && + !max_set_bit_check(nm_mask, len, SXE2_KEY_MATCH_MAX_NM_SET_NUM)) { + ret = -EINVAL; + goto l_end; + } + + for (i = 0; i < len; i++) { + ret = sxe2_flow_gen_key_word((u8)(val[i]), + (u8)(vl_mask ? vl_mask[i] : (u8)0xff), + (u8)(dc_mask ? dc_mask[i] : (u8)0), + (u8)(nm_mask ? nm_mask[i] : (u8)0), key + off + i, + key + half_size + off + i); + if (ret != 0) + goto l_end; + } + +l_end: + return ret; +} + +static s32 sxe2_flow_reless_tcam(struct sxe2_ppp_common_ctxt *ppp_ctxt, u16 tcam_idx) +{ + u8 vl_mask[SXE2_TCAM_KEY_VAL_SZ] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + u8 dc_mask[SXE2_TCAM_KEY_VAL_SZ] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF}; + u8 nm_mask[SXE2_TCAM_KEY_VAL_SZ] = {0x01, 0x00, 0x00, 0x00, 0x00}; + s32 ret = 0; + + ret = sxe2_flow_cfg_tcam_entry(ppp_ctxt, tcam_idx, 0, 0, 0, 0, 0, vl_mask, + dc_mask, nm_mask); + return ret; +} + +#define SXE2_FLOW_TCAM_FULL_KEY_MASK_VSIG 0x0FFF +#define SXE2_FLOW_TCAM_FULL_KEY_MASK_CDID 0x0F +s32 sxe2_flow_cfg_tcam_entry(struct sxe2_ppp_common_ctxt *ppp_ctxt, u16 tcam_idx, + u8 prof_id, u8 ptg_idx, u16 vsig_idx, u8 cdid, + u16 flags, u8 vl_mask[SXE2_TCAM_KEY_VAL_SZ], + u8 dc_mask[SXE2_TCAM_KEY_VAL_SZ], + u8 nm_mask[SXE2_TCAM_KEY_VAL_SZ]) +{ + struct sxe2_prof_tcam_full_key full_key; + struct sxe2_prof_tcam_entry *entry; + s32 ret = 0; + + full_key.vsig = cpu_to_le16(vsig_idx & SXE2_FLOW_TCAM_FULL_KEY_MASK_VSIG); + full_key.flg = cpu_to_le16(flags); + full_key.ptg = ptg_idx; + + cdid |= ppp_ctxt->adapter->port_idx; + full_key.cdid = cdid & SXE2_FLOW_TCAM_FULL_KEY_MASK_CDID; + + entry = &ppp_ctxt->tcam_entry[tcam_idx]; + + ret = sxe2_flow_set_key(entry->key, SXE2_TCAM_KEY_SZ, (u8 *)&full_key, + vl_mask, dc_mask, nm_mask, 0, SXE2_TCAM_KEY_SZ / 2); + if (ret != 0) + goto l_end; + + entry->addr = cpu_to_le16(tcam_idx); + entry->prof_id = prof_id; + +l_end: + return ret; +} + +static void sxe2_flow_op_remove_tcam_add(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 tcam_idx, struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_og_chg *chg; + struct sxe2_og_chg *tmp; + + SXE2_SET_USED(ppp_ctxt); + list_for_each_entry_safe(chg, tmp, op_list, l_entry) + { + if ((chg->type == SXE2_OG_CHG_TYPE_TCAM) && + (chg->info.tcam.tcam_idx == tcam_idx)) { + list_del(&chg->l_entry); + devm_kfree(dev, chg); + } + } +} + +STATIC s32 sxe2_flow_op_tcam_avail_cfg(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx, + struct sxe2_prof_tcam_info *tcam, bool avail, + struct list_head *op_list) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u8 vl_mask[SXE2_TCAM_KEY_VAL_SZ] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + u8 dc_mask[SXE2_TCAM_KEY_VAL_SZ] = {0x00, 0xF0, 0xFF, 0x0F, 0x00}; + u8 nm_mask[SXE2_TCAM_KEY_VAL_SZ] = {0x00, 0x00, 0x00, 0x00, 0x00}; + struct sxe2_og_chg *chg; + s32 ret; + + if (!avail) { + ret = sxe2_flow_reless_tcam(ppp_ctxt, tcam->idx); + sxe2_flow_op_remove_tcam_add(ppp_ctxt, tcam->idx, op_list); + tcam->idx = 0; + tcam->used = false; + } else { + chg = devm_kzalloc(dev, sizeof(*chg), GFP_KERNEL); + if (!chg) { + LOG_ERROR_BDF("failed to alloc chg op memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2_flow_cfg_tcam_entry(ppp_ctxt, tcam->idx, + tcam->prof_id, tcam->ptg, + vsig_idx, SXE2_TCAM_DEFAULT_CD_ID, + SXE2_TCAM_DEFAULT_FLAGS, + vl_mask, dc_mask, nm_mask); + if (ret != 0) { + LOG_ERROR_BDF("tcam entry config failed, ret=%d\n", ret); + devm_kfree(dev, chg); + goto l_end; + } + tcam->used = true; + + chg->type = SXE2_OG_CHG_TYPE_TCAM; + chg->info.tcam.prof_id = tcam->prof_id; + chg->info.tcam.tcam_idx = tcam->idx; + list_add(&chg->l_entry, op_list); + } + +l_end: + return ret; +} + +static bool sxe2_flow_record_ptg_is_used(struct sxe2_prof_tcam_info *tcam, + unsigned long *ptgs_used) +{ + return (bool)test_bit(tcam->ptg, ptgs_used); +} + +static s32 +sxe2_flow_op_adjust_vsig_tcams_priority(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsig_idx, struct list_head *op_list) +{ + DECLARE_BITMAP(ptgs_used, SXE2_MAX_PTG_NUM); + struct sxe2_associated_flow_node *associated_flow; + s32 ret = 0; + u16 i, j = 0; + bool used; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_fwc_tcam_idx_batch *tcam_batch = NULL; + struct sxe2_prof_tcam_info **tcam_info = NULL; + u16 batch_cnt = 0; + u32 size; + + bitmap_zero(ptgs_used, SXE2_MAX_PTG_NUM); + list_for_each_entry(associated_flow, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list, l_node) + { + for (i = 0; i < associated_flow->tcam_cnt; i++) { + used = sxe2_flow_record_ptg_is_used(&associated_flow->tcams[i], + (unsigned long *)ptgs_used); + if (used && associated_flow->tcams[i].used) + batch_cnt++; + else if (!used && !associated_flow->tcams[i].used) + batch_cnt++; + set_bit(associated_flow->tcams[i].ptg, ptgs_used); + } + } + + if (batch_cnt == 0) { + ret = 0; + goto l_end; + } + + size = sizeof(struct sxe2_fwc_tcam_idx_batch) + + batch_cnt * sizeof(struct sxe2_fwc_tcam_info); + tcam_batch = kzalloc(size, GFP_KERNEL); + tcam_info = kcalloc(batch_cnt, sizeof(struct sxe2_prof_tcam_info *), + GFP_KERNEL); + if (!tcam_batch || !tcam_info) { + ret = -ENOMEM; + goto l_end; + } + tcam_batch->blk = ppp_ctxt->block_id; + tcam_batch->tcam_cnt = cpu_to_le16(batch_cnt); + bitmap_zero(ptgs_used, SXE2_MAX_PTG_NUM); + list_for_each_entry(associated_flow, + &ppp_ctxt->vsig[vsig_idx].associated_flow_list, l_node) + { + for (i = 0; i < associated_flow->tcam_cnt; i++) { + used = sxe2_flow_record_ptg_is_used(&associated_flow->tcams[i], + (unsigned long *)ptgs_used); + if (used && associated_flow->tcams[i].used) { + tcam_batch->tcam_info[j].action = + SXE2_FWC_TCAM_ACTION_DEL; + tcam_batch->tcam_info[j].tcam_idx = + cpu_to_le16(associated_flow->tcams[i].idx); + tcam_info[j] = &associated_flow->tcams[i]; + j++; + } else if (!used && !associated_flow->tcams[i].used) { + tcam_batch->tcam_info[j].action = + SXE2_FWC_TCAM_ACTION_ADD; + tcam_info[j] = &associated_flow->tcams[i]; + j++; + } + set_bit(associated_flow->tcams[i].ptg, ptgs_used); + } + } + ret = sxe2_fwc_process_tcam_batch(adapter, tcam_batch, size); + if (ret) { + LOG_ERROR_BDF("process tcam batch failed, ret=%d\n", ret); + goto l_end; + } + + for (i = 0; i < batch_cnt; i++) { + if (tcam_batch->tcam_info[i].action == SXE2_FWC_TCAM_ACTION_DEL) { + LOG_DEBUG_BDF("free used tcam[%u], ptg=%u, prof_id=%u\n", + tcam_info[i]->idx, tcam_info[i]->ptg, + tcam_info[i]->prof_id); + (void)sxe2_flow_op_tcam_avail_cfg(ppp_ctxt, vsig_idx, + tcam_info[i], false, + op_list); + } + } + for (i = 0; i < batch_cnt; i++) { + if (tcam_batch->tcam_info[i].action == SXE2_FWC_TCAM_ACTION_ADD) { + tcam_info[i]->idx = + le16_to_cpu(tcam_batch->tcam_info[i].tcam_idx); + ret = sxe2_flow_op_tcam_avail_cfg(ppp_ctxt, vsig_idx, + tcam_info[i], true, + op_list); + LOG_DEBUG_BDF("alloc unused tcam[%u], ptg=%u, prof_id=%u\n", + tcam_info[i]->idx, tcam_info[i]->ptg, + tcam_info[i]->prof_id); + if (ret) { + LOG_ERROR_BDF("add tcam op list failed, ret=%d\n", + ret); + break; + } + } + } + +l_end: + kfree(tcam_batch); + kfree(tcam_info); + return ret; +} + +static struct sxe2_rss_symm_fv_pair sxe2_rss_symm_fv_list[] = { + {SXE2_FLOW_FLD_ID_IPV4_SA, SXE2_FLOW_FLD_ID_IPV4_DA, + SXE2_FLOW_FLD_SZ_IPV4_ADDR}, + {SXE2_FLOW_FLD_ID_IPV6_SA, SXE2_FLOW_FLD_ID_IPV6_DA, + SXE2_FLOW_FLD_SZ_IPV6_ADDR}, + {SXE2_FLOW_FLD_ID_TCP_SRC_PORT, SXE2_FLOW_FLD_ID_TCP_DST_PORT, + SXE2_FLOW_FLD_SZ_PORT}, + {SXE2_FLOW_FLD_ID_UDP_SRC_PORT, SXE2_FLOW_FLD_ID_UDP_DST_PORT, + SXE2_FLOW_FLD_SZ_PORT}, + {SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, SXE2_FLOW_FLD_ID_SCTP_DST_PORT, + SXE2_FLOW_FLD_SZ_PORT}}; + +static const struct sxe2_flow_fld_info sxe2_flds_info[SXE2_FLOW_FLD_ID_MAX] = { + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_ETH, 0, ETH_ALEN), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_ETH, ETH_ALEN, ETH_ALEN), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_VLAN, 2, SXE2_FLOW_FLD_SZ_VLAN), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_VLAN, 2, SXE2_FLOW_FLD_SZ_VLAN), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_VLAN, 0, SXE2_FLOW_FLD_SZ_VLAN), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_VLAN, 0, SXE2_FLOW_FLD_SZ_VLAN), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_VLAN, 2, SXE2_FLOW_FLD_SZ_VLAN, + 0x0fff), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_VLAN, 2, SXE2_FLOW_FLD_SZ_VLAN, + 0x0fff), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_ETH, 0, SXE2_FLOW_FLD_SZ_ETH_TYPE), + + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV4, 0, + SXE2_FLOW_FLD_SZ_IP_DSCP, 0x00ff), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV6, 0, + SXE2_FLOW_FLD_SZ_IP_DSCP, 0x0ff0), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV4, 8, + SXE2_FLOW_FLD_SZ_IP_TTL, 0xff00), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV4, 8, + SXE2_FLOW_FLD_SZ_IP_PROT, 0x00ff), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV6, 6, + SXE2_FLOW_FLD_SZ_IP_TTL, 0x00ff), + SXE2_FLOW_FLD_INFO_MASK(SXE2_FLOW_HDR_IPV6, 6, + SXE2_FLOW_FLD_SZ_IP_PROT, 0xff00), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV4, 12, + SXE2_FLOW_FLD_SZ_IPV4_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV4, 16, + SXE2_FLOW_FLD_SZ_IPV4_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 8, + SXE2_FLOW_FLD_SZ_IPV6_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 24, + SXE2_FLOW_FLD_SZ_IPV6_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV4, 10, + SXE2_FLOW_FLD_SZ_IP_CHKSUM), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV4, 4, SXE2_FLOW_FLD_SZ_IPV4_ID), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV_FRAG, 4, + SXE2_FLOW_FLD_SZ_IPV6_ID), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 8, + SXE2_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 24, + SXE2_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 8, + SXE2_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 24, + SXE2_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 8, + SXE2_FLOW_FLD_SZ_IPV6_PRE64_ADDR), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_IPV6, 24, + SXE2_FLOW_FLD_SZ_IPV6_PRE64_ADDR), + + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_TCP, 0, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_TCP, 2, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_UDP, 0, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_UDP, 2, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_SCTP, 0, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_SCTP, 2, SXE2_FLOW_FLD_SZ_PORT), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_TCP, 13, + SXE2_FLOW_FLD_SZ_TCP_FLAGS), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_TCP, 16, + SXE2_FLOW_FLD_SZ_TCP_CHKSUM), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_UDP, 6, + SXE2_FLOW_FLD_SZ_UDP_CHKSUM), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_SCTP, 8, + SXE2_FLOW_FLD_SZ_SCTP_CHKSUM), + + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_VXLAN, 12, + SXE2_FLOW_FLD_SZ_VXLAN_VNI), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_GENEVE, 12, + SXE2_FLOW_FLD_SZ_GENEVE_VNI), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_GTPU, 12, + SXE2_FLOW_FLD_SZ_GTPU_TEID), + SXE2_FLOW_FLD_INFO(SXE2_FLOW_HDR_GRE, 4, SXE2_FLOW_FLD_SZ_GRE_TNI), +}; + +static const struct sxe2_ptype_map g_ptype_map = { + .sxe2_ptypes_mac_ofos_all = + { + SXE2_PTYPE_BITMAP(BF, BF, 7F, 7E, FD, + C0, 0C, C6), + SXE2_PTYPE_BITMAP(FE, FD, FD, FB, F7, + EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, FF, 03, + BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 5F, 00, 00), + SXE2_PTYPE_BITMAP(20, 08, 03, 07, FF, + FF, FF, 80), + SXE2_PTYPE_BITMAP(00, 00, 0F, FF, FF, + FF, F0, 80), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, F3, FF, 03, FF, + 3F, 3F, 3E), + SXE2_PTYPE_BITMAP(FF, FF, FF, F0, 00, + 00, FF, F9), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + FF, FF, FF), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + FF, FF, FF), + }, + .sxe2_ptypes_mac_il_all = + { + SXE2_PTYPE_BITMAP(BC, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(F0, 00, 00, 00, 00, + EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 03, + BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 0F, FF, FF, + FF, F0, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, F0, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, FF, F9), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + FF, FF, FF), + }, + .sxe2_ptypes_mac_ofos_with_l3 = + { + SXE2_PTYPE_BITMAP(BF, BF, 7F, 7E, FD, + C0, 00, 00), + SXE2_PTYPE_BITMAP(FE, FD, FD, FB, F7, + EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, FF, 03, + BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 0F, 00, 00), + SXE2_PTYPE_BITMAP(20, 00, 03, 07, FF, + FF, FF, 80), + SXE2_PTYPE_BITMAP(00, 00, 0F, FF, FF, + FF, F0, 80), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, F3, FF, 03, FF, + 3F, 3F, 00), + SXE2_PTYPE_BITMAP(FF, FF, FF, F0, 00, + 00, FF, F9), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + FF, FF, FF), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, FF, + FF, FF, FF), + }, + .sxe2_ptypes_mac_il_with_l3 = + { + SXE2_PTYPE_BITMAP(B8, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(E0, 00, 00, 00, 00, + EF, DD, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 03, + BF, 77, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 0F, FF, FF, + FF, F0, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, E0, 00, 00, 00, + 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, FF, F1), + SXE2_PTYPE_BITMAP(EF, FF, 7F, FB, + FF, 00, 00, 00), + SXE2_PTYPE_BITMAP(DF, FE, FF, F7, + FF, BF, FD, FF), + }, + .sxe2_ptypes_mac_ofos_no_l3 = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 0C, C6), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 50, 00, 00), + SXE2_PTYPE_BITMAP(00, 08, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 3E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_mac_il_no_l3 = + { + SXE2_PTYPE_BITMAP(04, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(10, 00, 00, 00, + 00, 00, 02, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 08, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 10, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 08), + SXE2_PTYPE_BITMAP(10, 00, 80, 04, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 01, 00, 08, + 00, 40, 02, 00), + }, + .sxe2_ptypes_ipv4_ofos_with_l4 = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 0D, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 50, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 00, 00, 01, + 83, E0, FA, 80), + SXE2_PTYPE_BITMAP(00, 00, 05, 55, + 55, 55, 50, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 03, + FF, 00, 38, 00), + SXE2_PTYPE_BITMAP(3F, FE, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(80, 03, FF, E0, + 00, FF, F8, 00), + SXE2_PTYPE_BITMAP(FF, F8, 00, 3F, + FE, 00, 0F, FF), + }, + .sxe2_ptypes_ipv4_il_with_l4 = + { + SXE2_PTYPE_BITMAP(A0, 03, 40, 06, + 80, 00, 00, 00), + SXE2_PTYPE_BITMAP(80, 0D, 00, 1A, + 00, 00, D0, 01), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 03, 40, 06), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 1C, E0, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(03, 80, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(31, 01, 88, 00, + 00, 00, 01, C0), + SXE2_PTYPE_BITMAP(62, 03, 10, 18, + 80, C4, 06, 20), + SXE2_PTYPE_BITMAP(C4, 06, 20, 31, + 01, 88, 0C, 40), + }, + .sxe2_ptypes_ipv6_ofos_with_l4 = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 34, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, A0, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 06, + 7C, 1F, 05, 00), + SXE2_PTYPE_BITMAP(00, 00, 0A, AA, + AA, AA, A0, 80), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 03, FF, 00, + 00, 38, 00, 00), + SXE2_PTYPE_BITMAP(C0, 01, FF, F0, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(7F, FC, 00, 1F, + FF, 00, 07, FF), + SXE2_PTYPE_BITMAP(00, 07, FF, C0, + 01, FF, F0, 00), + }, + .sxe2_ptypes_ipv6_il_with_l4 = + { + SXE2_PTYPE_BITMAP(01, A0, 03, 40, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(06, 80, 0D, 00, + 00, 68, 00, D0), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 01, A0, 03, 40), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 73, 80, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(E0, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(80, 64, 03, 20, + 00, 00, 70, 00), + SXE2_PTYPE_BITMAP(00, C8, 06, 40, + 32, 01, 90, 0C), + SXE2_PTYPE_BITMAP(01, 90, 0C, 80, + 64, 03, 20, 19), + }, + .sxe2_ptypes_ipv4_ofos_no_l4 = + { + SXE2_PTYPE_BITMAP(BF, BF, 7F, 7E, + F0, C0, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, 05, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 03, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 01, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 07, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_ipv4_il_no_l4 = + { + SXE2_PTYPE_BITMAP(18, 04, 30, 08, + 60, 00, 00, 00), + SXE2_PTYPE_BITMAP(60, 10, C0, 21, + 80, 01, 0C, 02), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 04, 30, 08), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 03, 18, 00), + SXE2_PTYPE_BITMAP(00, 00, 03, 33, + 33, 03, F0, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(04, 60, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(06, 80, 34, 00, + 00, 00, 02, 30), + SXE2_PTYPE_BITMAP(0D, 00, 68, 03, + 40, 1A, 00, D0), + SXE2_PTYPE_BITMAP(1A, 00, D0, 06, + 80, 34, 01, A0), + }, + .sxe2_ptypes_ipv6_ofos_no_l4 = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FE, FD, FD, E1, + C3, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 0A, + 03, BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 0C, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 02, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, F0, 00, 00, + 00, 07, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, FF, F9), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_ipv6_il_no_l4 = + { + SXE2_PTYPE_BITMAP(02, 18, 04, 30, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(08, 60, 10, C0, + 00, 86, 01, 0C), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 02, 18, 04, 30), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 0C, 60, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 0C, CC, + CC, FC, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(18, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(40, 1A, 00, D0, + 00, 00, 8C, 01), + SXE2_PTYPE_BITMAP(80, 34, 01, A0, + 0D, 00, 68, 03), + SXE2_PTYPE_BITMAP(00, 68, 03, 40, + 1A, 00, D0, 06), + }, + .sxe2_ptypes_udp_ofos = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 01, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 04, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, F0, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 00, 00, 07, + FF, FF, FF, 80), + SXE2_PTYPE_BITMAP(00, 00, 0F, FF, + FF, FF, F0, 80), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 03, FC, 03, + FC, 38, 38, 00), + SXE2_PTYPE_BITMAP(FF, FF, FF, F0, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, + FF, FF, FF, FF), + SXE2_PTYPE_BITMAP(FF, FF, FF, FF, + FF, FF, FF, FF), + }, + .sxe2_ptypes_udp_il = + { + SXE2_PTYPE_BITMAP(20, 20, 40, 40, + 80, 00, 00, 00), + SXE2_PTYPE_BITMAP(80, 81, 01, 02, + 00, 08, 10, 10), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 20, 40, 40), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 10, 84, 20, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 80, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(10, 20, 81, 00, + 00, 00, 10, 40), + SXE2_PTYPE_BITMAP(20, 41, 02, 08, + 10, 40, 82, 04), + SXE2_PTYPE_BITMAP(40, 82, 04, 10, + 20, 81, 04, 08), + }, + .sxe2_ptypes_tcp_ofos = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 04, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 10, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 03, 00, + 03, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_tcp_il = + { + SXE2_PTYPE_BITMAP(80, 81, 01, 02, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(02, 04, 04, 08, + 00, 20, 40, 40), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 81, 01, 02), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 21, 08, 40, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(41, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 41, 02, 00, + 00, 00, 20, 80), + SXE2_PTYPE_BITMAP(40, 82, 04, 10, + 20, 81, 04, 08), + SXE2_PTYPE_BITMAP(81, 04, 08, 20, + 41, 02, 08, 10), + }, + .sxe2_ptypes_sctp_ofos = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 08, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 20, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_sctp_il = + { + SXE2_PTYPE_BITMAP(01, 02, 02, 04, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(04, 08, 08, 10, + 00, 40, 80, 81), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 01, 02, 02, 04), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 42, 10, 80, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(82, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(81, 04, 08, 20, + 00, 00, 41, 00), + SXE2_PTYPE_BITMAP(02, 08, 10, 40, + 82, 04, 10, 20), + SXE2_PTYPE_BITMAP(04, 10, 20, 81, + 04, 08, 20, 41), + }, + .sxe2_ptypes_vxlan_vni = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(3F, FF, FF, F0, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 03, FF, FF, + FF, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 3F, + FF, FF, F0, 00), + }, + .sxe2_ptypes_gre_of = + { + SXE2_PTYPE_BITMAP(BF, BF, 78, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FE, FD, E0, 00, + 00, EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 03, BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(C0, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, FC, 00, 00, + 00, FF, FF, FF), + SXE2_PTYPE_BITMAP(FF, FF, FF, C0, + 00, 00, 0F, FF), + }, + .sxe2_ptypes_ipv4_ofos_frag = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 40, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_ipv4_il_frag = + { + SXE2_PTYPE_BITMAP(08, 00, 10, 00, + 20, 00, 00, 00), + SXE2_PTYPE_BITMAP(20, 00, 40, 00, + 80, 00, 04, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 10, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 01, 08, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 20, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(04, 00, 20, 00, + 00, 00, 00, 10), + SXE2_PTYPE_BITMAP(08, 00, 40, 02, + 00, 10, 00, 80), + SXE2_PTYPE_BITMAP(10, 00, 80, 04, + 00, 20, 01, 00), + }, + .sxe2_ptypes_ipv6_ofos_frag = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 01, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, + .sxe2_ptypes_ipv6_il_frag = + { + SXE2_PTYPE_BITMAP(00, 08, 00, 10, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 20, 00, 40, + 00, 02, 00, 04), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 08, 00, 10), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 04, 20, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(08, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 10, 00, 80, + 00, 00, 04, 00), + SXE2_PTYPE_BITMAP(00, 20, 01, 00, + 08, 00, 40, 02), + SXE2_PTYPE_BITMAP(00, 40, 02, 00, + 10, 00, 80, 04), + }, + .sxe2_ptypes_ipv4_ofos_all = + { + SXE2_PTYPE_BITMAP(BF, BF, 7F, 7E, + FD, C0, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, EF, DF, DF), + SXE2_PTYPE_BITMAP(00, 00, 00, 55, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 03, 00, 00), + SXE2_PTYPE_BITMAP(20, 00, 01, 01, + 83, E0, FA, 80), + SXE2_PTYPE_BITMAP(00, 00, 05, 55, + 55, 55, 50, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 03, + FF, 00, 3F, 00), + SXE2_PTYPE_BITMAP(3F, FE, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(80, 03, FF, E0, + 00, FF, F8, 00), + SXE2_PTYPE_BITMAP(FF, F8, 00, 3F, + FE, 00, 0F, FF), + }, + .sxe2_ptypes_ipv4_il_all = + { + SXE2_PTYPE_BITMAP(B8, 07, 70, 0E, + E0, 00, 00, 00), + SXE2_PTYPE_BITMAP(E0, 1D, C0, 3B, + 80, 01, DC, 03), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 07, 70, 0E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 1F, F8, 00), + SXE2_PTYPE_BITMAP(00, 00, 03, 33, + 33, 03, F0, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(07, E0, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(37, 81, BC, 00, + 00, 00, 03, F0), + SXE2_PTYPE_BITMAP(6F, 03, 78, 1B, + C0, DE, 06, F0), + SXE2_PTYPE_BITMAP(DE, 06, F0, 37, + 81, BC, 0D, E0), + }, + .sxe2_ptypes_ipv6_ofos_all = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FE, FD, FD, FB, + F7, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, AA, + 03, BF, 7F, 7E), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 0C, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 02, 06, + 7C, 1F, 05, 00), + SXE2_PTYPE_BITMAP(00, 00, 0A, AA, + AA, AA, A0, 80), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(FF, F3, FF, 00, + 00, 3F, 00, 00), + SXE2_PTYPE_BITMAP(C0, 01, FF, F0, + 00, 00, FF, F9), + SXE2_PTYPE_BITMAP(7F, FC, 00, 1F, + FF, 00, 07, FF), + SXE2_PTYPE_BITMAP(00, 07, FF, C0, + 01, FF, F0, 00), + }, + .sxe2_ptypes_ipv6_il_all = + { + SXE2_PTYPE_BITMAP(03, B8, 07, 70, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(0E, E0, 1D, C0, + 00, EE, 01, DC), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 03, B8, 07, 70), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 7F, E0, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 0C, CC, + CC, FC, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(F8, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(C0, 7E, 03, F0, + 00, 00, FC, 01), + SXE2_PTYPE_BITMAP(80, FC, 07, E0, + 3F, 01, F8, 0F), + SXE2_PTYPE_BITMAP(01, F8, 0F, C0, + 7E, 03, F0, 1F), + }, + .sxe2_ptypes_gtpu = + { + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 7F, FF, FE, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + SXE2_PTYPE_BITMAP(00, 00, 00, 00, + 00, 00, 00, 00), + }, +}; + +STATIC void +sxe2_flow_parse_dissectors_hdrs_l2(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + const struct sxe2_ptype_map *ptype_map_ptr, + unsigned long *headers, u8 i) +{ + const unsigned long *ptypes_src; + + if (test_bit(SXE2_FLOW_HDR_ETH, headers) && + test_bit(SXE2_FLOW_HDR_ETH_NON_IP, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_il_no_l3) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_ofos_no_l3); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = + i ? SXE2_FLOW_PRIO_LEVEL_INNER_ETH + : SXE2_FLOW_PRIO_LEVEL_OUTER_ETH; + + } else if (test_bit(SXE2_FLOW_HDR_ETH, headers) && + (test_bit(SXE2_FLOW_HDR_IPV4, headers) || + test_bit(SXE2_FLOW_HDR_IPV6, headers))) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_il_with_l3) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_ofos_with_l3); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = + i ? SXE2_FLOW_PRIO_LEVEL_INNER_ETH + : SXE2_FLOW_PRIO_LEVEL_OUTER_ETH; + + } else if (test_bit(SXE2_FLOW_HDR_ETH, headers)) { + if (ppp_ctxt->block_id == SXE2_HW_BLOCK_ID_FNAV) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_il_no_l3) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_ofos_no_l3); + } else { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_il_all) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_mac_ofos_all); + } + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = + i ? SXE2_FLOW_PRIO_LEVEL_INNER_ETH + : SXE2_FLOW_PRIO_LEVEL_OUTER_ETH; + } +} + +STATIC void +sxe2_flow_parse_dissectors_hdrs_l3(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + const struct sxe2_ptype_map *ptype_map_ptr, + unsigned long *headers, u8 i) +{ + const unsigned long *ptypes_src; + + if (test_bit(SXE2_FLOW_HDR_IPV4, headers) && + (test_bit(SXE2_FLOW_HDR_TCP, headers) || + test_bit(SXE2_FLOW_HDR_UDP, headers) || + test_bit(SXE2_FLOW_HDR_SCTP, headers))) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_il_with_l4) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_ofos_with_l4); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + + } else if (test_bit(SXE2_FLOW_HDR_IPV4, headers) && + test_bit(SXE2_FLOW_HDR_IPV_OTHER, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_il_no_l4) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_ofos_no_l4); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + + } else if (test_bit(SXE2_FLOW_HDR_IPV4, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_il_all) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_ofos_all); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + + } else if (test_bit(SXE2_FLOW_HDR_IPV6, headers) && + (test_bit(SXE2_FLOW_HDR_TCP, headers) || + test_bit(SXE2_FLOW_HDR_UDP, headers) || + test_bit(SXE2_FLOW_HDR_SCTP, headers))) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_il_with_l4) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_ofos_with_l4); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + + } else if (test_bit(SXE2_FLOW_HDR_IPV6, headers) && + test_bit(SXE2_FLOW_HDR_IPV_OTHER, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_il_no_l4) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_ofos_no_l4); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + + } else if (test_bit(SXE2_FLOW_HDR_IPV6, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_il_all) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_ofos_all); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L3 + : SXE2_FLOW_PRIO_LEVLE_OUTER_L3; + } +} + +STATIC void +sxe2_flow_parse_dissectors_hdrs_l3_frag(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + const struct sxe2_ptype_map *ptype_map_ptr, + unsigned long *headers, u8 i) +{ + const unsigned long *ptypes_src; + + if (test_bit(SXE2_FLOW_HDR_IPV4, headers) && + test_bit(SXE2_FLOW_HDR_IPV_FRAG, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_il_frag) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv4_ofos_frag); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = + i ? SXE2_FLOW_PRIO_LEVLE_INNER_L3_FRAG + : SXE2_FLOW_PRIO_LEVEL_OUTER_L3_FRAG; + } else if (test_bit(SXE2_FLOW_HDR_IPV6, headers) && + test_bit(SXE2_FLOW_HDR_IPV_FRAG, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_il_frag) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_ipv6_ofos_frag); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = + i ? SXE2_FLOW_PRIO_LEVLE_INNER_L3_FRAG + : SXE2_FLOW_PRIO_LEVEL_OUTER_L3_FRAG; + } +} + +STATIC void +sxe2_flow_parse_dissectors_hdrs_l4(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + const struct sxe2_ptype_map *ptype_map_ptr, + unsigned long *headers, u8 i) +{ + const unsigned long *ptypes_src; + + if (test_bit(SXE2_FLOW_HDR_UDP, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_udp_il) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_udp_ofos); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L4 + : SXE2_FLOW_PRIO_LEVEL_OUTER_L4; + + } else if (test_bit(SXE2_FLOW_HDR_TCP, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_tcp_il) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_tcp_ofos); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L4 + : SXE2_FLOW_PRIO_LEVEL_OUTER_L4; + + } else if (test_bit(SXE2_FLOW_HDR_SCTP, headers)) { + ptypes_src = i ? (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_sctp_il) + : (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_sctp_ofos); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + flow_params->flow_info->priority = i ? SXE2_FLOW_PRIO_LEVEL_INNER_L4 + : SXE2_FLOW_PRIO_LEVEL_OUTER_L4; + } + + if (test_bit(SXE2_FLOW_HDR_GRE, headers)) { + ptypes_src = (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_gre_of); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + } else if (test_bit(SXE2_FLOW_HDR_VXLAN, headers) || + test_bit(SXE2_FLOW_HDR_GENEVE, headers)) { + ptypes_src = (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_vxlan_vni); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + } else if (test_bit(SXE2_FLOW_HDR_GTPU, headers)) { + ptypes_src = (const unsigned long + *)(ptype_map_ptr->sxe2_ptypes_gtpu); + bitmap_and(flow_params->ptypes, flow_params->ptypes, ptypes_src, + SXE2_MAX_PTYPE_NUM); + } +} + +static s32 sxe2_flow_parse_dissectors_hdrs(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params) +{ + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + s32 ret = 0; + u8 i; + const struct sxe2_ptype_map *ptype_map_ptr = &g_ptype_map; + + memset(flow_params->ptypes, 0xff, sizeof(flow_params->ptypes)); + + for (i = 0; i < flow_params->flow_info->dissector_cnt; i++) { + bitmap_copy(headers, flow_params->flow_info->dissectors[i].headers, + SXE2_FLOW_HDR_MAX); + + sxe2_flow_parse_dissectors_hdrs_l2(ppp_ctxt, flow_params, + ptype_map_ptr, headers, i); + + sxe2_flow_parse_dissectors_hdrs_l3(ppp_ctxt, flow_params, + ptype_map_ptr, headers, i); + + sxe2_flow_parse_dissectors_hdrs_l3_frag(ppp_ctxt, flow_params, + ptype_map_ptr, headers, i); + + sxe2_flow_parse_dissectors_hdrs_l4(ppp_ctxt, flow_params, + ptype_map_ptr, headers, i); + } + + LOG_INFO("generate ptypes: 0x%lX, 0x%lX, 0x%lX, 0x%lX\n, priority: %u", + flow_params->ptypes[0], flow_params->ptypes[1], + flow_params->ptypes[2], flow_params->ptypes[5], + flow_params->flow_info->priority); + + return ret; +} + +static s32 sxe2_flow_parse_fld_to_fv_set_prot_id(struct sxe2_adapter *adapter, + enum sxe2_flow_fld_id fld_id, + u8 dissector, + enum sxe2_prot_id *prot_id) +{ + s32 ret = 0; + + switch (fld_id) { + case SXE2_FLOW_FLD_ID_ETH_DA: + case SXE2_FLOW_FLD_ID_ETH_SA: + *prot_id = (dissector == 0) ? SXE2_PROT_MAC_OF_OR_S + : SXE2_PROT_MAC_IL; + break; + + case SXE2_FLOW_FLD_ID_S_TPID: + case SXE2_FLOW_FLD_ID_S_TCI: + case SXE2_FLOW_FLD_ID_S_VID: + *prot_id = (dissector == 0) ? SXE2_PROT_EVLAN_O : SXE2_PROT_VLAN_IF; + break; + + case SXE2_FLOW_FLD_ID_C_TPID: + case SXE2_FLOW_FLD_ID_C_TCI: + case SXE2_FLOW_FLD_ID_C_VID: + *prot_id = SXE2_PROT_VLAN_O; + break; + case SXE2_FLOW_FLD_ID_ETH_TYPE: + *prot_id = (dissector == 0) ? SXE2_PROT_ETYPE_OL + : SXE2_PROT_ETYPE_IL; + break; + + case SXE2_FLOW_FLD_ID_IPV4_TOS: + case SXE2_FLOW_FLD_ID_IPV4_SA: + case SXE2_FLOW_FLD_ID_IPV4_DA: + case SXE2_FLOW_FLD_ID_IPV4_CHKSUM: + case SXE2_FLOW_FLD_ID_IPV4_PROT: + case SXE2_FLOW_FLD_ID_IPV4_TTL: + case SXE2_FLOW_FLD_ID_IPV4_ID: + *prot_id = (dissector == 0) ? SXE2_PROT_IPV4_OF_OR_S + : SXE2_PROT_IPV4_IL; + break; + + case SXE2_FLOW_FLD_ID_TCP_SRC_PORT: + case SXE2_FLOW_FLD_ID_TCP_DST_PORT: + case SXE2_FLOW_FLD_ID_TCP_FLAGS: + case SXE2_FLOW_FLD_ID_TCP_CHKSUM: + *prot_id = SXE2_PROT_TCP_IL; + break; + case SXE2_FLOW_FLD_ID_UDP_SRC_PORT: + case SXE2_FLOW_FLD_ID_UDP_DST_PORT: + case SXE2_FLOW_FLD_ID_UDP_CHKSUM: + *prot_id = (dissector == 0) ? SXE2_PROT_UDP_OF + : SXE2_PROT_UDP_IL_OR_S; + break; + case SXE2_FLOW_FLD_ID_SCTP_SRC_PORT: + case SXE2_FLOW_FLD_ID_SCTP_DST_PORT: + case SXE2_FLOW_FLD_ID_SCTP_CHKSUM: + *prot_id = SXE2_PROT_SCTP_IL; + break; + + case SXE2_FLOW_FLD_ID_IPV6_PROT: + case SXE2_FLOW_FLD_ID_IPV6_DSCP: + case SXE2_FLOW_FLD_ID_IPV6_SA: + case SXE2_FLOW_FLD_ID_IPV6_DA: + case SXE2_FLOW_FLD_ID_IPV6_TTL: + case SXE2_FLOW_FLD_ID_IPV6_PRE32_SA: + case SXE2_FLOW_FLD_ID_IPV6_PRE32_DA: + case SXE2_FLOW_FLD_ID_IPV6_PRE48_SA: + case SXE2_FLOW_FLD_ID_IPV6_PRE48_DA: + case SXE2_FLOW_FLD_ID_IPV6_PRE64_SA: + case SXE2_FLOW_FLD_ID_IPV6_PRE64_DA: + *prot_id = (dissector == 0) ? SXE2_PROT_IPV6_OF_OR_S + : SXE2_PROT_IPV6_IL; + break; + case SXE2_FLOW_FLD_ID_IPV6_ID: + *prot_id = SXE2_PROT_IPV6_FRAG; + break; + case SXE2_FLOW_FLD_ID_VXLAN_VNI: + case SXE2_FLOW_FLD_ID_GENEVE_VNI: + case SXE2_FLOW_FLD_ID_GTPU_TEID: + *prot_id = SXE2_PROT_UDP_OF; + break; + case SXE2_FLOW_FLD_ID_NVGRE_TNI: + *prot_id = SXE2_PROT_GRE_OF; + break; + default: + LOG_ERROR_BDF("failed to parse unsupport fld_id[%u].\n", fld_id); + ret = -EINVAL; + goto l_end; + } +l_end: + return ret; +} + +static s32 sxe2_flow_parse_fld_to_fv(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + u8 dissector, enum sxe2_flow_fld_id fld_id, + unsigned long *fields) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_flow_fld *fld; + enum sxe2_prot_id prot_id = SXE2_PROT_INVALID; + s32 ret = 0; + u16 mask; + u16 off; + u16 i; + u8 fv_num = ppp_ctxt->hw_fv_num; + u8 fv_word_size; + u8 fv_cnt; + u8 fv_idx; + u16 def_mask_id = 0; + u16 def_fv_id = 0; + + SXE2_SET_USED(fields); + fld = flow_params->flow_info->dissectors[dissector].fld; + + ret = sxe2_flow_parse_fld_to_fv_set_prot_id(adapter, fld_id, dissector, + &prot_id); + if (ret) + goto l_end; + + fv_word_size = SXE2_FLOW_FV_SIZE * BITS_PER_BYTE; + + if (sxe2_flds_info[fld_id].mask != 0 && + ppp_ctxt->block_id != SXE2_HW_BLOCK_ID_ACL) { + ret = sxe2_flow_default_mask_get(ppp_ctxt->block_id, adapter, fld_id, + &def_mask_id, &def_fv_id); + if (ret) { + LOG_ERROR_BDF("failed to parse default mask fld_id[%u].\n", + fld_id); + goto l_end; + } + fld[fld_id].xtrct.prot_id = (u8)prot_id; + fld[fld_id].xtrct.off = + (u16)((sxe2_flds_info[fld_id].off / fv_word_size) * + (u16)SXE2_FLOW_FV_SIZE); + fld[fld_id].xtrct.disp = + (u8)(sxe2_flds_info[fld_id].off % fv_word_size); + fld[fld_id].xtrct.mask = sxe2_flds_info[fld_id].mask; + fld[fld_id].xtrct.idx = (u8)def_fv_id; + + if (flow_params->fv[def_fv_id].prot_id != SXE2_FV_PORT_ID_INVAL) { + LOG_ERROR_BDF("Failed to fv[%d] mask cfg, fv[%d] prot_id:%d " + "off:%d mask:0x%x\n", + fld_id, def_fv_id, + flow_params->fv[def_fv_id].prot_id, + flow_params->fv[def_fv_id].off, + flow_params->fv_mask[def_fv_id]); + ret = -EINVAL; + goto l_end; + } + + flow_params->fv[def_fv_id].prot_id = (u8)prot_id; + flow_params->fv[def_fv_id].off = fld[fld_id].xtrct.off; + flow_params->fv_mask[def_fv_id] = fld[fld_id].xtrct.mask; + + goto l_end; + } + + fld[fld_id].xtrct.prot_id = (u8)prot_id; + fld[fld_id].xtrct.off = (u16)((sxe2_flds_info[fld_id].off / fv_word_size) * + (u16)SXE2_FLOW_FV_SIZE); + fld[fld_id].xtrct.disp = (u8)(sxe2_flds_info[fld_id].off % fv_word_size); + fld[fld_id].xtrct.mask = sxe2_flds_info[fld_id].mask; + fld[fld_id].xtrct.idx = flow_params->fv_cnt; + + LOG_DEBUG_BDF("parse fld[%u] prot_id:%u off:%u disp:%u mask:%u idx:%u.\n", + fld_id, prot_id, fld[fld_id].xtrct.off, fld[fld_id].xtrct.disp, + fld[fld_id].xtrct.mask, fld[fld_id].xtrct.idx); + + fv_cnt = (u8)DIV_ROUND_UP((s16)(fld[fld_id].xtrct.disp + + sxe2_flds_info[fld_id].size), + (s16)fv_word_size); + + off = fld[fld_id].xtrct.off; + mask = fld[fld_id].xtrct.mask; + for (i = 0; i < fv_cnt; i++) { + if (flow_params->fv_cnt >= (fv_num - ppp_ctxt->hw_fv_mask_num)) { + ret = -ENOSPC; + LOG_ERROR_BDF("parse flow flds, used fv max limit, " + "fv_cnt=%u, fv_num=%u, fv_mask_num=%u.\n", + flow_params->fv_cnt, fv_num, + ppp_ctxt->hw_fv_mask_num); + goto l_end; + } + + fv_idx = flow_params->fv_cnt; + + flow_params->fv[fv_idx].prot_id = (u8)prot_id; + flow_params->fv[fv_idx].off = off; + flow_params->fv_mask[fv_idx] = mask; + + LOG_DEBUG_BDF("parse fld[%u] fv[%u] prot_id:%u off:%u mask:%u.\n", + fld_id, fv_idx, prot_id, off, mask); + flow_params->fv_cnt++; + off += SXE2_FLOW_FV_SIZE; + } + +l_end: + return ret; +} + +static s32 sxe2_flow_parse_raw_to_fv(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + u8 dissector) +{ + s32 ret = 0; + u8 i, j; + u16 off; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_flow_raw *raw; + u8 raw_cnt = flow_params->flow_info->dissectors[dissector].raw_cnt; + u8 fv_num = ppp_ctxt->hw_fv_num; + u8 fv_cnt; + + if (!raw_cnt) + return 0; + + if (raw_cnt > SXE2_MAX_RAW_CNT) + return -ENOSPC; + + for (i = 0; i < raw_cnt; i++) { + raw = &flow_params->flow_info->dissectors[dissector].raw[i]; + raw->fld.xtrct.prot_id = SXE2_PROT_MAC_OF_OR_S; + raw->fld.xtrct.off = raw->offset; + raw->fld.xtrct.disp = 0; + raw->fld.xtrct.idx = flow_params->fv_cnt; + fv_cnt = (u8)DIV_ROUND_UP(raw->fld.xtrct.disp + + raw->fld.fld_val.len * + SXE2_FLOW_FV_SIZE, + SXE2_FLOW_FV_SIZE * BITS_PER_BYTE); + off = raw->fld.xtrct.off; + for (j = 0; j < fv_cnt; j++) { + if (flow_params->fv_cnt >= + (fv_num - ppp_ctxt->hw_fv_mask_num)) { + ret = -ENOSPC; + LOG_INFO_BDF("parse flow raws, used fv max " + "limit.\n"); + goto l_end; + } + + flow_params->fv[flow_params->fv_cnt].prot_id = + raw->fld.xtrct.prot_id; + flow_params->fv[flow_params->fv_cnt].off = off; + flow_params->fv_mask[flow_params->fv_cnt] = 0; + flow_params->fv_cnt++; + off += SXE2_FLOW_FV_SIZE; + } + } + +l_end: + return ret; +} + +static s32 sxe2_flow_parse_dissectors_fld(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params) +{ + DECLARE_BITMAP(fields, SXE2_FLOW_FLD_ID_MAX); + s32 ret = 0; + enum sxe2_flow_fld_id j = 0; + unsigned long tmp; + u8 i; + + bitmap_zero(fields, SXE2_FLOW_FLD_ID_MAX); + + for (i = 0; i < flow_params->flow_info->dissector_cnt; i++) { + bitmap_copy(fields, flow_params->flow_info->dissectors[i].fields, + SXE2_FLOW_FLD_ID_MAX); + + for_each_set_bit(tmp, fields, SXE2_FLOW_FLD_ID_MAX) + { + j = (enum sxe2_flow_fld_id)tmp; + ret = sxe2_flow_parse_fld_to_fv(ppp_ctxt, flow_params, i, j, + fields); + if (ret != 0) + goto l_end; + + clear_bit((int)j, fields); + } + + ret = sxe2_flow_parse_raw_to_fv(ppp_ctxt, flow_params, i); + if (ret) + goto l_end; + } + +l_end: + return ret; +} + +#define SXE2_FLOW_FLD_OFF_INVAL 0xffff +STATIC void sxe2_flow_acl_frmt_entry_field(u16 fld, struct sxe2_flow_fld *info, + u8 *buf, u8 *dontcare, u8 *data) +{ + u16 dst, src, mask, k, end_disp, tmp_s = 0, tmp_m = 0; + bool use_mask = false; + u8 disp; + + src = info->fld_val.val; + mask = info->fld_val.mask; + dst = info->last_val.val - SXE2_ACL_PROF_BYTE_SEL_START_IDX; + disp = info->xtrct.disp % BITS_PER_BYTE; + + if (mask != SXE2_FLOW_FLD_OFF_INVAL) + use_mask = true; + + for (k = 0; k < info->last_val.len; k++, dst++) { + buf[dst] = (tmp_s & 0xff00) >> 8; + + dontcare[dst] = (tmp_m & 0xff00) >> 8; + + if (!disp || k < info->last_val.len - 1) { + tmp_s = data[src++] << disp; + + buf[dst] |= tmp_s & 0xff; + + if (use_mask) { + tmp_m = (~data[mask++] & 0xff) << disp; + dontcare[dst] |= tmp_m & 0xff; + } + } + } + + if (disp) { + dst = info->last_val.val - SXE2_ACL_PROF_BYTE_SEL_START_IDX; + for (k = 0; k < disp; k++) + dontcare[dst] |= BIT(k); + } + + end_disp = (disp + sxe2_flds_info[fld].size) % BITS_PER_BYTE; + + if (end_disp) { + dst = info->last_val.val - SXE2_ACL_PROF_BYTE_SEL_START_IDX + + info->last_val.len - 1; + for (k = end_disp; k < BITS_PER_BYTE; k++) + dontcare[dst] |= BIT(k); + } +} + +s32 sxe2_flow_acl_format_lut_act_entry(struct sxe2_adapter *adapter, + struct sxe2_acl_flow_entry *flow_entry, + struct sxe2_flow_info_node *flow, + struct sxe2_acl_flow_action *acts, u8 *data) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_dissector_info *dis_info; + struct sxe2_flow_fld *field; + s32 ret = 0; + u8 *dontcare = NULL; + u8 *buf = NULL; + u8 *key = NULL; + u16 buf_size; + u16 i, j; + + flow_entry->action = devm_kzalloc(dev, sizeof(struct sxe2_acl_flow_action), + GFP_KERNEL); + if (!flow_entry->action) { + ret = -ENOMEM; + goto l_end; + } + memcpy(flow_entry->action, acts, sizeof(*acts)); + + buf_size = flow->cfg.scen->width; + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto l_end; + } + + dontcare = kzalloc(buf_size, GFP_KERNEL); + if (!dontcare) { + ret = -ENOMEM; + goto l_end; + } + memset(dontcare, 0xff, buf_size); + + key = devm_kzalloc(dev, buf_size * 2, GFP_KERNEL); + if (!key) { + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < flow->dissector_cnt; i++) { + dis_info = &flow->dissectors[i]; + for_each_set_bit(j, dis_info->fields, SXE2_FLOW_FLD_ID_MAX) + { + field = &dis_info->fld[j]; + sxe2_flow_acl_frmt_entry_field(j, field, buf, dontcare, + data); + } + + for (j = 0; j < dis_info->raw_cnt; j++) { + struct sxe2_flow_raw *raw = &dis_info->raw[j]; + u16 dst, src, mask, k; + bool use_mask = false; + + src = raw->fld.fld_val.val; + dst = raw->fld.last_val.val - + SXE2_ACL_PROF_BYTE_SEL_START_IDX; + mask = raw->fld.fld_val.mask; + + if (mask != SXE2_FLOW_FLD_OFF_INVAL) + use_mask = true; + + for (k = 0; k < raw->fld.last_val.len; k++, dst++) { + buf[dst] = data[src++]; + if (use_mask) + dontcare[dst] = ~data[mask++]; + else + dontcare[dst] = 0; + } + } + } + + ret = sxe2_flow_set_key(key, buf_size * 2, buf, NULL, dontcare, NULL, 0, + buf_size); + if (ret) + goto l_end; + + flow_entry->entry = key; + flow_entry->entry_size = buf_size * 2; + + kfree(buf); + kfree(dontcare); + return 0; + +l_end: + kfree(buf); + kfree(dontcare); + + if (ret && key) + devm_kfree(dev, key); + + if (ret && flow_entry->action) { + devm_kfree(dev, flow_entry->action); + flow_entry->action = NULL; + } + return ret; +} + +STATIC s32 sxe2_flow_acl_fill_last_fld(struct sxe2_flow_info_params *flow_params) +{ + struct sxe2_flow_dissector_info *dissectors; + DECLARE_BITMAP(fields, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_flow_fld *flow_fld; + struct sxe2_flow_raw *flow_raw; + s32 ret = 0; + u32 fld = 0; + u16 index; + u16 i, j; + u8 dis_cnt; + + index = SXE2_ACL_PROF_BYTE_SEL_START_IDX; + + dis_cnt = flow_params->flow_info->dissector_cnt; + for (i = 0; i < dis_cnt; i++) { + dissectors = &flow_params->flow_info->dissectors[i]; + + bitmap_zero(fields, SXE2_FLOW_FLD_ID_MAX); + bitmap_copy(fields, dissectors->fields, SXE2_FLOW_FLD_ID_MAX); + for_each_set_bit(fld, fields, SXE2_FLOW_FLD_ID_MAX) + { + flow_fld = &dissectors->fld[fld]; + flow_fld->last_val.mask = 0xffff; + + if (flow_fld->type == SXE2_FLOW_FLD_TYPE_VAL) { + flow_fld->last_val.len = DIV_ROUND_UP(sxe2_flds_info[fld].size + + (flow_fld->xtrct.disp % + BITS_PER_BYTE), + BITS_PER_BYTE); + flow_fld->last_val.val = index; + + index += flow_fld->last_val.len; + } + } + + for (j = 0; j < dissectors->raw_cnt; j++) { + flow_raw = &dissectors->raw[j]; + + flow_raw->fld.last_val.mask = 0xffff; + flow_raw->fld.last_val.val = index; + flow_raw->fld.last_val.len = flow_raw->fld.fld_val.len; + + index += flow_raw->fld.last_val.len; + } + } + + if (index > SXE2_ACL_PROF_BYTE_SEL_ELEMS) { + ret = -EINVAL; + goto l_end; + } + + flow_params->match_size = index; + +l_end: + return ret; +} + +STATIC s32 sxe2_flow_acl_select_scen(struct sxe2_ppp_common_ctxt *ppp_ctx, + struct sxe2_flow_info_params *flow_params) +{ + struct sxe2_acl_scen_info *scen; + struct sxe2_acl_scen_info *tmp_scen; + struct sxe2_acl_scen_info *find_scen = NULL; + struct sxe2_acl_tbl_info *acl_tbl_info = + ppp_ctx->adapter->acl_ctxt.acl_tbl_info; + struct sxe2_adapter *adapter = ppp_ctx->adapter; + s32 ret = 0; + + if (list_empty(&acl_tbl_info->scens)) { + LOG_ERROR_BDF("no acl scen.\n"); + ret = -ENODATA; + goto l_end; + } + LOG_DEBUG_BDF("match_size:%d\n", flow_params->match_size); + list_for_each_entry_safe(scen, tmp_scen, &acl_tbl_info->scens, l_entry) + { + LOG_DEBUG_BDF("scen:%d, avail_width:%d\n", scen->scen_id, + scen->avail_width); + if (scen->avail_width >= flow_params->match_size && + (!find_scen || find_scen->avail_width > scen->avail_width)) { + find_scen = scen; + } + } + + if (!find_scen) { + LOG_ERROR_BDF("no acl scen.\n"); + ret = -ENODATA; + goto l_end; + } + + flow_params->flow_info->cfg.scen = find_scen; + +l_end: + return ret; +} + +static s32 sxe2_flow_parse_dissectors(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + ret = sxe2_flow_parse_dissectors_hdrs(ppp_ctxt, flow_params); + if (ret != 0) { + LOG_DEBUG_BDF("failed to parse dissectors hdrs.\n"); + goto l_end; + } + + ret = sxe2_flow_parse_dissectors_fld(ppp_ctxt, flow_params); + if (ret != 0) + LOG_DEBUG_BDF("failed to parse dissectors flds.\n"); + + if (ppp_ctxt->block_id == SXE2_HW_BLOCK_ID_ACL) { + ret = sxe2_flow_acl_fill_last_fld(flow_params); + if (ret) { + LOG_ERROR_BDF("failed to fill last fld.\n"); + goto l_end; + } + ret = sxe2_flow_acl_select_scen(ppp_ctxt, flow_params); + if (ret) { + LOG_ERROR_BDF("failed to select scen.\n"); + goto l_end; + } + } + +l_end: + return ret; +} + +static s32 sxe2_flow_add_profile(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_params *flow_params, + bool fnav_swap) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + DECLARE_BITMAP(ptgs_used, SXE2_MAX_PTG_NUM); + struct sxe2_flow_info_node *flow = flow_params->flow_info; + s32 ret = 0; + u64 ptype_64; + u16 ptype; + u8 prof_id; + u8 ptg; + + SXE2_SET_USED(fnav_swap); + + bitmap_zero((unsigned long *)ptgs_used, SXE2_MAX_PTG_NUM); + + ret = sxe2_flow_alloc_hw_prof_id(ppp_ctxt, &prof_id); + if (ret != 0) + goto l_end; + + if (ppp_ctxt->block_id != SXE2_HW_BLOCK_ID_ACL) { + ret = sxe2_flow_fnav_update_hw_prof_fv_mask(ppp_ctxt, prof_id, + flow_params->fv_mask); + if (ret != 0) + goto l_end; + } + + sxe2_flow_update_hw_prof_fv(ppp_ctxt, prof_id, flow_params->fv); + + LOG_DEBUG_BDF("alloc profile[%u]\n", prof_id); + + (void)sxe2_flow_hw_prof_inc_ref(ppp_ctxt, prof_id); + flow->prof_id = prof_id; + + for_each_set_bit(ptype_64, flow_params->ptypes, SXE2_MAX_PTYPE_NUM) + { + ptype = (u16)ptype_64; + ptg = ppp_ctxt->pt_to_grp[ptype].idx; + + if (test_bit(ptg, ptgs_used)) + continue; + + LOG_INFO_BDF("process ptype[%u] ptg[%u]\n", ptype, ptg); + + set_bit(ptg, ptgs_used); + + flow->ptg_info.ptg[flow->ptg_info.ptg_cnt] = ptg; + + flow->ptg_info.ptg_cnt++; + if (flow->ptg_info.ptg_cnt >= SXE2_MAX_PTG_PER_PROF_NUM) { + LOG_DEBUG_BDF("profile add [%u] used max ptg num %u.\n", + prof_id, flow->ptg_info.ptg_cnt); + break; + } + } + LOG_DEBUG_BDF("profile[%u] used ptg num %u.\n", prof_id, + flow->ptg_info.ptg_cnt); + +l_end: + return ret; +} + +static s32 sxe2_flow_creat_sync(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, struct sxe2_flow_info_node **flow) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_info_params *flow_params; + s32 ret; + u32 i; + + if (!flow) { + LOG_ERROR_BDF("flow is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + flow_params = devm_kzalloc(dev, sizeof(*flow_params), GFP_KERNEL); + if (!flow_params) { + LOG_ERROR_BDF("failed to alloc flow_params.\n"); + ret = -ENOMEM; + goto l_end; + } + + flow_params->flow_info = devm_kzalloc(dev, sizeof(*flow_params->flow_info), + GFP_KERNEL); + if (!flow_params->flow_info) { + LOG_ERROR_BDF("failed to alloc flow_info.\n"); + ret = -ENOMEM; + goto l_end_free; + } + + for (i = 0; i < SXE2_MAX_FV_WORDS; i++) { + flow_params->fv[i].prot_id = SXE2_FV_PORT_ID_INVAL; + flow_params->fv[i].off = SXE2_FV_OFFSET_INVAL; + } + + flow_params->flow_info->dissector_cnt = dissectors_cnt; + for (i = 0; i < dissectors_cnt; i++) + flow_params->flow_info->dissectors[i] = dissectors[i]; + + ret = sxe2_flow_parse_dissectors(ppp_ctxt, flow_params); + if (ret != 0) { + LOG_ERROR_BDF("failed to parse dissectors.\n"); + goto l_end_free; + } + + ret = sxe2_flow_add_profile(ppp_ctxt, flow_params, true); + if (ret != 0) { + LOG_ERROR_BDF("failed to add profile.\n"); + goto l_end_free; + } + + *flow = flow_params->flow_info; + +l_end_free: + if (ret != 0) + devm_kfree(dev, flow_params->flow_info); + + devm_kfree(dev, flow_params); +l_end: + return ret; +} + +enum protocol_stack_layer { + protocol_stack_layer_L2, + protocol_stack_layer_L3, + protocol_stack_layer_L4, +}; + +static void sxe2_layer_hdrs_bitmap_get(enum protocol_stack_layer layer, + unsigned long *headers) +{ + bitmap_zero(headers, SXE2_FLOW_HDR_MAX); + switch (layer) { + case protocol_stack_layer_L2: + set_bit(SXE2_FLOW_HDR_ETH, headers); + set_bit(SXE2_FLOW_HDR_VLAN, headers); + break; + case protocol_stack_layer_L3: + set_bit(SXE2_FLOW_HDR_IPV4, headers); + set_bit(SXE2_FLOW_HDR_IPV6, headers); + break; + case protocol_stack_layer_L4: + set_bit(SXE2_FLOW_HDR_TCP, headers); + set_bit(SXE2_FLOW_HDR_UDP, headers); + set_bit(SXE2_FLOW_HDR_SCTP, headers); + break; + default: + break; + } +} + +static bool sxe2_flow_check_hdrs_correct(struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt) +{ + u8 i; + bool ret = false; + DECLARE_BITMAP(headers_l3, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(headers_l4, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(headers_tmp, SXE2_FLOW_HDR_MAX); + + sxe2_layer_hdrs_bitmap_get(protocol_stack_layer_L3, headers_l3); + sxe2_layer_hdrs_bitmap_get(protocol_stack_layer_L4, headers_l4); + + for (i = 0; i < dissectors_cnt; i++) { + bitmap_and(headers_tmp, dissectors[i].headers, headers_l3, + SXE2_FLOW_HDR_MAX); + if (!bitmap_empty(headers_tmp, SXE2_FLOW_HDR_MAX) && + (bitmap_weight(headers_tmp, SXE2_FLOW_HDR_MAX) != 1)) { + goto l_end; + } + + bitmap_and(headers_tmp, dissectors[i].headers, headers_l4, + SXE2_FLOW_HDR_MAX); + if (!bitmap_empty(headers_tmp, SXE2_FLOW_HDR_MAX) && + (bitmap_weight(headers_tmp, SXE2_FLOW_HDR_MAX) != 1)) { + goto l_end; + } + } + + ret = true; +l_end: + return ret; +} + +s32 sxe2_flow_creat(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, u8 dissectors_cnt, + struct sxe2_flow_info_node **flow) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + + if (!dissectors) { + LOG_ERROR_BDF("dissectors is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + if ((dissectors_cnt == 0) || (dissectors_cnt > SXE2_MAX_DISSECTOR_NUM)) { + LOG_ERROR_BDF("dissectors_cnt is %u.\n", dissectors_cnt); + ret = -EINVAL; + goto l_end; + } + + if (!sxe2_flow_check_hdrs_correct(dissectors, dissectors_cnt)) { + LOG_ERROR_BDF("failed to check hdrs not correct.\n"); + ret = -EINVAL; + goto l_end; + } + + mutex_lock(&ppp_ctxt->flow_list_lock); + ret = sxe2_flow_creat_sync(ppp_ctxt, dissectors, dissectors_cnt, flow); + if (ret == 0) + list_add(&(*flow)->l_node, &ppp_ctxt->flow_list); + else + LOG_ERROR_BDF("failed to create flow.\n"); + + mutex_unlock(&ppp_ctxt->flow_list_lock); + +l_end: + return ret; +} + +static s32 sxe2_flow_op_remove_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_og_chg *tmp_chg; + struct sxe2_og_chg *del_chg; + struct list_head op_list; + s32 ret; + u16 i; + + INIT_LIST_HEAD(&op_list); + + for (i = 0; i < SXE2_MAX_VSIG_NUM; i++) { + if (ppp_ctxt->vsig[i].used) { + if (sxe2_flow_check_flow_in_vsig(ppp_ctxt, flow, i)) { + ret = sxe2_flow_op_vsig_remove_flow(ppp_ctxt, flow, + i, &op_list); + if (ret != 0) + goto l_end; + } + } + } + + (void)sxe2_flow_hw_prof_dec_ref(ppp_ctxt, flow->prof_id); + + ret = sxe2_fwc_update_profile(ppp_ctxt, ppp_ctxt->block_id, &op_list); +l_end: + list_for_each_entry_safe(del_chg, tmp_chg, &op_list, l_entry) + { + list_del(&del_chg->l_entry); + devm_kfree(dev, del_chg); + } + return ret; +} + +s32 sxe2_flow_delete(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow) +{ + s32 ret; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + mutex_lock(&ppp_ctxt->flow_list_lock); + ret = sxe2_flow_op_remove_flow(ppp_ctxt, flow); + if (ret == 0) { + list_del(&flow->l_node); + devm_kfree(dev, flow); + } + mutex_unlock(&ppp_ctxt->flow_list_lock); + + return ret; +} + +s32 sxe2_flow_cfg_clear_muti_vsi_in_vsig(struct sxe2_adapter *adapter, + struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_info_node *flow; + struct sxe2_flow_info_node *flow_tmp; + struct sxe2_og_chg *tmp_chg; + struct sxe2_og_chg *del_chg; + struct list_head op_list; + s32 ret = 0; + + INIT_LIST_HEAD(&op_list); + + list_for_each_entry_safe(flow, flow_tmp, &ppp_ctxt->flow_list, l_node) + { + if (test_bit(vsi_sw_idx, flow->used_vsi)) + clear_bit(vsi_sw_idx, flow->used_vsi); + } + + ret = sxe2_flow_op_move_vsi_to_vsig(ppp_ctxt, vsi_sw_idx, + SXE2_PPP_DEFAULT_VSIG_IDX, &op_list); + if (ret == 0) { + ret = sxe2_fwc_update_profile(ppp_ctxt, ppp_ctxt->block_id, + &op_list); + } + + list_for_each_entry_safe(del_chg, tmp_chg, &op_list, l_entry) + { + list_del(&del_chg->l_entry); + devm_kfree(dev, del_chg); + } + + return ret; +} + +s32 sxe2_rss_delete_vsi_flows_for_vfr(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx) +{ + struct sxe2_ppp_common_ctxt *ppp_ctxt = &rss_ctxt->ppp; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_flow_info_node *flow; + struct sxe2_flow_info_node *tmp; + s32 ret = 0; + u16 vsig_idx; + bool only_vsi; + + if (list_empty(&ppp_ctxt->flow_list)) + goto l_end; + + mutex_lock(&rss_ctxt->rss_cfgs_lock); + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &vsig_idx); + if (ret != 0) + goto l_unlock; + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) + goto l_unlock; + + only_vsi = (ppp_ctxt->vsig[vsig_idx].vsi_cnt == 1); + if (!only_vsi) { + ret = sxe2_flow_cfg_clear_muti_vsi_in_vsig(adapter, ppp_ctxt, + vsi_sw_idx); + if (ret) { + LOG_WARN_BDF("move vsi[%u] to default vsig failed, ret=%d\n", + vsi_sw_idx, ret); + } + goto l_unlock; + } + + list_for_each_entry_safe(flow, tmp, &ppp_ctxt->flow_list, l_node) { + if (test_bit(vsi_sw_idx, flow->used_vsi)) { + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) + goto l_unlock; + + if (bitmap_empty((unsigned long *)flow->used_vsi, + SXE2_MAX_VSI_NUM)) { + ret = sxe2_flow_delete(ppp_ctxt, flow); + if (ret != 0) + goto l_unlock; + } + } + } + +l_unlock: + mutex_unlock(&rss_ctxt->rss_cfgs_lock); +l_end: + LOG_DEBUG_BDF("delete vsi flows end, ret:%d.\n", ret); + return ret; +} + +s32 sxe2_rss_delete_vsi_flows(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx) +{ + struct sxe2_ppp_common_ctxt *ppp_ctxt = &rss_ctxt->ppp; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct sxe2_flow_info_node *flow; + struct sxe2_flow_info_node *tmp; + s32 ret = 0; + u16 vsig_idx; + bool only_vsi; + + mutex_lock(&rss_ctxt->rss_cfgs_lock); + + if (list_empty(&ppp_ctxt->flow_list)) + goto l_delete; + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &vsig_idx); + if (ret != 0) + goto l_unlock; + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) + goto l_delete; + + only_vsi = (ppp_ctxt->vsig[vsig_idx].vsi_cnt == 1); + if (!only_vsi) { + ret = sxe2_flow_cfg_clear_muti_vsi_in_vsig(adapter, ppp_ctxt, + vsi_sw_idx); + if (ret) + goto l_unlock; + else + goto l_delete; + } + + list_for_each_entry_safe(flow, tmp, &ppp_ctxt->flow_list, l_node) { + if (test_bit(vsi_sw_idx, flow->used_vsi)) { + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) + goto l_unlock; + + if (bitmap_empty((unsigned long *)flow->used_vsi, + SXE2_MAX_VSI_NUM)) { + ret = sxe2_flow_delete(ppp_ctxt, flow); + if (ret != 0) + goto l_unlock; + } + } + } + +l_delete: + sxe2_rss_delete_vsi_cfg_list(rss_ctxt, vsi_sw_idx); + +l_unlock: + mutex_unlock(&rss_ctxt->rss_cfgs_lock); + + LOG_DEBUG_BDF("delete vsi flows end, ret:%d.\n", ret); + return ret; +} + +struct sxe2_flow_info_node * +sxe2_find_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, u8 dissectors_cnt) +{ + struct sxe2_flow_info_node *flow_tmp; + struct sxe2_flow_info_node *flow_find = NULL; + u8 i, j; + + mutex_lock(&ppp_ctxt->flow_list_lock); + + list_for_each_entry(flow_tmp, &ppp_ctxt->flow_list, l_node) + { + if (dissectors_cnt && (dissectors_cnt == flow_tmp->dissector_cnt)) { + for (i = 0; i < dissectors_cnt; i++) { + if (!bitmap_equal(dissectors[i].headers, + flow_tmp->dissectors[i].headers, + SXE2_FLOW_HDR_MAX)) { + break; + } + if (!bitmap_equal(dissectors[i].fields, + flow_tmp->dissectors[i].fields, + SXE2_FLOW_FLD_ID_MAX)) { + break; + } + + if (dissectors[i].raw_cnt != + flow_tmp->dissectors[i].raw_cnt) { + break; + } + for (j = 0; j < dissectors[i].raw_cnt; j++) { + if (dissectors[i].raw[j].offset != + flow_tmp->dissectors[i].raw[j].offset) { + break; + } + if (dissectors[i].raw[j].fld.type != + flow_tmp->dissectors[i] + .raw[j] + .fld.type) { + break; + } + if (memcmp(&dissectors[i].raw[j].fld.fld_val, + &flow_tmp->dissectors[i] + .raw[j] + .fld.fld_val, + sizeof(struct + sxe2_flow_fld_val))) { + break; + } + } + if (j != dissectors[i].raw_cnt) + break; + } + + if (i == dissectors_cnt) { + flow_find = flow_tmp; + break; + } + } + } + mutex_unlock(&ppp_ctxt->flow_list_lock); + + return flow_find; +} + +STATIC struct sxe2_flow_info_node * +sxe2_find_flow_with_cond(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, u16 vsi_sw_idx, u32 conds) +{ + struct sxe2_flow_info_node *flow_tmp; + struct sxe2_flow_info_node *flow_find = NULL; + u8 i; + + mutex_lock(&ppp_ctxt->flow_list_lock); + + list_for_each_entry(flow_tmp, &ppp_ctxt->flow_list, l_node) + { + if (dissectors_cnt && (dissectors_cnt == flow_tmp->dissector_cnt)) { + if ((conds & SXE2_FLOW_FIND_FLOW_COND_VSI) && + (!test_bit(vsi_sw_idx, flow_tmp->used_vsi))) { + continue; + } + + for (i = 0; i < dissectors_cnt; i++) { + if (!bitmap_equal(dissectors[i].headers, + flow_tmp->dissectors[i].headers, + SXE2_FLOW_HDR_MAX) || + ((conds & SXE2_FLOW_FIND_FLOW_COND_FLD) && + !bitmap_equal(dissectors[i].fields, + flow_tmp->dissectors[i].fields, + SXE2_FLOW_FLD_ID_MAX))) { + break; + } + } + + if (i == dissectors_cnt) { + flow_find = flow_tmp; + break; + } + } + } + mutex_unlock(&ppp_ctxt->flow_list_lock); + return flow_find; +} + +static void sxe2_rss_xor_symm_fv(struct sxe2_adapter *adapter, + struct sxe2_rss_symm_fv *symm_fv, u8 src, u8 dst, + u8 len) +{ + u8 i; + + len = len / SXE2_FLOW_FV_SIZE; + for (i = 0; i < len; i++) { + symm_fv[src + i].valid = 1; + symm_fv[dst + i].valid = 1; + symm_fv[src + i].fv_idx = dst + i; + symm_fv[dst + i].fv_idx = src + i; + LOG_DEBUG_BDF("Rss symm fv xor[%d:%d].", src + i, dst + i); + } +} + +static s32 sxe2_fwc_rss_symm_fv_set(struct sxe2_adapter *adapter, u16 prof_id, + struct sxe2_rss_symm_fv *symm_fv) +{ + struct sxe2_rss_symm_fv_cfg symm_cfg = {0}; + struct sxe2_cmd_params cmd = {0}; + s32 ret = 0; + + if (!symm_fv) { + ret = -EINVAL; + LOG_ERROR_BDF("Failed to set rss hash symm fv, param error."); + goto l_end; + } + + symm_cfg.prof_id = prof_id; + memcpy(symm_cfg.fv, symm_fv, SXE2_RSS_FV_CNT); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_SYMM_FV_SET, &symm_cfg, + sizeof(symm_cfg), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("Rss hash symm fv set cmd fail, ret=%d", ret); + +l_end: + return ret; +} + +static s32 sxe2_rss_update_symm(struct sxe2_rss_ctxt *rss_ctxt, + struct sxe2_flow_info_node *flow) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct sxe2_flow_dissector_info *dissector; + struct sxe2_rss_symm_fv_pair *symm_pair; + struct sxe2_rss_symm_fv symm_fv[SXE2_RSS_FV_CNT] = {{0}}; + struct sxe2_flow_fld_xtrct *src_fld; + struct sxe2_flow_fld_xtrct *dst_fld; + u8 i = 0; + u8 valid_fld_cnt = 0; + + valid_fld_cnt = sizeof(sxe2_rss_symm_fv_list) / + sizeof(sxe2_rss_symm_fv_list[0]); + + if (flow->cfg.symm) { + dissector = &flow->dissectors[flow->dissector_cnt - 1]; + for (i = 0; i < valid_fld_cnt; i++) { + symm_pair = &sxe2_rss_symm_fv_list[i]; + + src_fld = &dissector->fld[symm_pair->src_fld].xtrct; + dst_fld = &dissector->fld[symm_pair->dst_fld].xtrct; + + if ((src_fld->prot_id != 0) && (dst_fld->prot_id != 0)) { + sxe2_rss_xor_symm_fv(adapter, symm_fv, src_fld->idx, + dst_fld->idx, + symm_pair->fld_len); + } + } + } + + ret = sxe2_fwc_rss_symm_fv_set(adapter, flow->prof_id, symm_fv); + if (ret) + LOG_ERROR_BDF("Failed to update rss symm, ret:%d.", ret); + return ret; +} + +void sxe2_flow_set_diss_fld(struct sxe2_flow_dissector_info *dissector, + enum sxe2_flow_fld_id fld, u16 val, u16 mask, u16 len) +{ + set_bit((int)fld, dissector->fields); + dissector->fld[fld].type = SXE2_FLOW_FLD_TYPE_VAL; + dissector->fld[fld].fld_val.val = val; + dissector->fld[fld].fld_val.mask = mask; + dissector->fld[fld].fld_val.len = len; + + set_bit((int)sxe2_flds_info[fld].hdr, dissector->headers); +} + +void sxe2_flow_add_diss_raw(struct sxe2_flow_dissector_info *dissector, u16 off, + u16 val, u16 mask, u8 len) +{ + if (dissector->raw_cnt < SXE2_MAX_RAW_CNT) { + dissector->raw[dissector->raw_cnt].offset = off; + dissector->raw[dissector->raw_cnt].fld.type = SXE2_FLOW_FLD_TYPE_VAL; + dissector->raw[dissector->raw_cnt].fld.fld_val.val = val; + dissector->raw[dissector->raw_cnt].fld.fld_val.mask = mask; + dissector->raw[dissector->raw_cnt].fld.fld_val.len = len; + } + + dissector->raw_cnt++; +} + +static void sxe2_rss_flow_support_hdrs_get(unsigned long *hdrs) +{ + bitmap_zero(hdrs, SXE2_FLOW_HDR_MAX); + set_bit(SXE2_FLOW_HDR_ETH, hdrs); + set_bit(SXE2_FLOW_HDR_VLAN, hdrs); + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_TCP, hdrs); + set_bit(SXE2_FLOW_HDR_UDP, hdrs); + set_bit(SXE2_FLOW_HDR_SCTP, hdrs); + set_bit(SXE2_FLOW_HDR_GENEVE, hdrs); + set_bit(SXE2_FLOW_HDR_GTPU, hdrs); + set_bit(SXE2_FLOW_HDR_VXLAN, hdrs); + set_bit(SXE2_FLOW_HDR_GRE, hdrs); + set_bit(SXE2_FLOW_HDR_IPV_OTHER, hdrs); + set_bit(SXE2_FLOW_HDR_IPV_FRAG, hdrs); + set_bit(SXE2_FLOW_HDR_ETH_NON_IP, hdrs); +} + +static s32 sxe2_rss_gen_dissector_info(struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, + const struct sxe2_rss_hash_cfg *cfg) +{ + s32 ret = 0; + u64 i; + struct sxe2_flow_dissector_info *dissector; + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(rss_support_hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(headers_l3, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(headers_l4, SXE2_FLOW_HDR_MAX); + + sxe2_rss_flow_support_hdrs_get(rss_support_hdrs); + sxe2_layer_hdrs_bitmap_get(protocol_stack_layer_L3, headers_l3); + sxe2_layer_hdrs_bitmap_get(protocol_stack_layer_L4, headers_l4); + + dissector = &dissectors[dissectors_cnt - 1]; + + for_each_set_bit(i, cfg->hash_flds, SXE2_FLOW_FLD_ID_MAX) + { + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)i, 0xffff, + 0xffff, 0xffff); + } + + bitmap_or(dissector->headers, dissector->headers, cfg->headers, + SXE2_FLOW_HDR_MAX); + + switch (cfg->hdr_type) { + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_IPV_OTHER, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_IPV_OTHER, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_GRE: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_IPV_OTHER, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GRE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_GRE: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_IPV_OTHER, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GRE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GRE: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GRE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GRE: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GRE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_VXLAN: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_VXLAN, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_VXLAN: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_VXLAN, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GENEVE: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GENEVE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GENEVE: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GENEVE, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GTPU: + set_bit(SXE2_FLOW_HDR_IPV4, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GTPU, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + case SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GTPU: + set_bit(SXE2_FLOW_HDR_IPV6, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_UDP, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + set_bit(SXE2_FLOW_HDR_GTPU, + dissectors[SXE2_RSS_OUTER_HEADERS].headers); + break; + default: + break; + } + + bitmap_andnot(hdrs, dissector->headers, rss_support_hdrs, SXE2_FLOW_HDR_MAX); + if (bitmap_weight(hdrs, SXE2_FLOW_HDR_MAX)) { + ret = -EINVAL; + LOG_ERROR("flow param dissector headers error.\n"); + goto l_end; + } + + bitmap_and(hdrs, dissector->headers, headers_l3, SXE2_FLOW_HDR_MAX); + if (!bitmap_empty(hdrs, SXE2_FLOW_HDR_MAX) && + (bitmap_weight(hdrs, SXE2_FLOW_HDR_MAX) != 1)) { + LOG_ERROR("flow cfg dissector headers l3 error.\n"); + ret = -EIO; + goto l_end; + } + + bitmap_and(hdrs, dissector->headers, headers_l4, SXE2_FLOW_HDR_MAX); + if (!bitmap_empty(hdrs, SXE2_FLOW_HDR_MAX) && + (bitmap_weight(hdrs, SXE2_FLOW_HDR_MAX) != 1)) { + LOG_ERROR("flow cfg dissector headers l4 error.\n"); + ret = -EIO; + goto l_end; + } + +l_end: + return ret; +} + +s32 sxe2_flow_default_mask_get(enum sxe2_block_id block_id, + struct sxe2_adapter *adapter, + enum sxe2_flow_fld_id fld_id, u16 *mask_idx, + u16 *fv_idx) +{ + u32 i = 0; + + switch (block_id) { + case SXE2_HW_BLOCK_ID_FNAV: + for (i = 0; i < SXE2_MAX_FV_MASK; i++) { + if (test_bit((int)fld_id, + adapter->fnav_ctxt.fnav_flow_ctxt.ppp.fv_mask[i] + .filds)) { + *mask_idx = adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .fv_mask[i] + .mask_idx; + *fv_idx = adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .fv_mask[i] + .mask_idx; + LOG_INFO_BDF("sxe2 fnav mask filds=0x%lX, " + "mask_idx=%u.\n", + *adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .fv_mask[i] + .filds, + *mask_idx); + return 0; + } + } + return -EINVAL; + case SXE2_HW_BLOCK_ID_RSS: + for (i = 0; i < SXE2_MAX_FV_MASK; i++) { + if (test_bit((int)fld_id, + adapter->rss_flow_ctxt.ppp.fv_mask[i].filds)) { + *mask_idx = adapter->rss_flow_ctxt.ppp.fv_mask[i] + .mask_idx; + *fv_idx = adapter->rss_flow_ctxt.ppp.fv_mask[i] + .mask_idx; + LOG_INFO_BDF("sxe2 fnav mask filds=0x%lX, " + "mask_idx=%u.\n", + *adapter->rss_flow_ctxt.ppp.fv_mask[i] + .filds, + *mask_idx); + return 0; + } + } + return -EINVAL; + default: + return 0; + }; +} + +static void sxe2_flow_free_all_flows(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_info_node *flow; + struct sxe2_flow_info_node *tmp; + + mutex_lock(&ppp_ctxt->flow_list_lock); + list_for_each_entry_safe(flow, tmp, &ppp_ctxt->flow_list, l_node) { + list_del(&flow->l_node); + devm_kfree(dev, flow); + } + mutex_unlock(&ppp_ctxt->flow_list_lock); + + INIT_LIST_HEAD(&ppp_ctxt->flow_list); +} + +static void sxe2_flow_free_all_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + u16 i; + + for (i = 0; i < SXE2_MAX_VSIG_NUM; i++) { + if (ppp_ctxt->vsig[i].used) + (void)sxe2_flow_free_vsig(ppp_ctxt, i); + } +} + +void sxe2_flow_ppp_comm_ctxt_init(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_adapter *adapter, + enum sxe2_block_id block_id) +{ + u16 i; + + memset(ppp_ctxt, 0, sizeof(*ppp_ctxt)); + + ppp_ctxt->block_id = block_id; + ppp_ctxt->adapter = adapter; + + ppp_ctxt->hw_prof_num = sxe2_blk_prof_cnt_get(block_id); + ppp_ctxt->hw_fv_num = sxe2_blk_fv_cnt_get(block_id); + ppp_ctxt->hw_fv_mask_num = sxe2_blk_def_mask_fv_cnt_get(block_id); + + INIT_LIST_HEAD(&ppp_ctxt->flow_list); + mutex_init(&ppp_ctxt->flow_list_lock); + + for (i = 0; i < SXE2_MAX_VSIG_NUM; i++) + INIT_LIST_HEAD(&ppp_ctxt->vsig[i].associated_flow_list); +} + +void sxe2_flow_ppp_comm_ctxt_deinit(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + sxe2_flow_free_all_flows(ppp_ctxt); + mutex_destroy(&ppp_ctxt->flow_list_lock); + + sxe2_flow_free_all_vsig(ppp_ctxt); +} + +void sxe2_flow_ppp_comm_ctxt_clean(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + sxe2_flow_free_all_flows(ppp_ctxt); + + sxe2_flow_free_all_vsig(ppp_ctxt); + + memset(ppp_ctxt->hw_prof, 0, + sizeof(struct sxe2_flow_hw_prof) * SXE2_MAX_PROF_NUM); + memset(ppp_ctxt->tcam_entry, 0, + sizeof(struct sxe2_prof_tcam_entry) * SXE2_MAX_TCAM_NUM); +} + +void sxe2_rss_ppp_ctxt_clean(struct sxe2_rss_ctxt *rss_ctxt) +{ + sxe2_flow_ppp_comm_ctxt_clean(&(rss_ctxt->ppp)); +} + +s32 sxe2_rss_add_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg) +{ + struct sxe2_ppp_common_ctxt *ppp_ctxt = &rss_ctxt->ppp; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_dissector_info *dissectors = NULL; + struct sxe2_flow_info_node *flow; + s32 ret; + u8 dissectors_cnt; + + if (cfg->hdr_type == SXE2_RSS_OUTER_HEADERS) + dissectors_cnt = SXE2_FLOW_DISSECTOR_SINGLE; + else + dissectors_cnt = SXE2_FLOW_DISSECTOR_DOUBLE; + + dissectors = devm_kcalloc(dev, dissectors_cnt, sizeof(*dissectors), + GFP_KERNEL); + if (!dissectors) { + LOG_ERROR_BDF("failed to alloc dissectors.\n"); + ret = -ENOMEM; + goto l_end; + } + + LOG_DEBUG_BDF("add rss cfg, header: %lu, filed: %lu, type: %d\n", + cfg->headers[0], cfg->hash_flds[0], cfg->hdr_type); + + ret = sxe2_rss_gen_dissector_info(dissectors, dissectors_cnt, cfg); + if (ret != 0) + goto l_end; + + flow = sxe2_find_flow_with_cond(ppp_ctxt, dissectors, dissectors_cnt, + vsi_sw_idx, + SXE2_FLOW_FIND_FLOW_COND_VSI | + SXE2_FLOW_FIND_FLOW_COND_FLD); + if (flow) { + LOG_DEBUG_BDF("add rss cfg, find vsi and (fld、hdr) same flow.\n"); + if (flow->cfg.symm == cfg->symm) { + goto l_end; + } else { + flow->cfg.symm = cfg->symm; + goto l_update_symm; + } + } + + flow = sxe2_find_flow_with_cond(ppp_ctxt, dissectors, dissectors_cnt, + vsi_sw_idx, SXE2_FLOW_FIND_FLOW_COND_VSI); + if (flow) { + LOG_DEBUG_BDF("add rss cfg, find vsi and hdr same flow.\n"); + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) + goto l_end; + + sxe2_rss_delete_cfg_list(rss_ctxt, vsi_sw_idx, flow); + + if (bitmap_empty(flow->used_vsi, SXE2_MAX_VSI_NUM)) { + ret = sxe2_flow_delete(ppp_ctxt, flow); + if (ret != 0) + goto l_end; + } + } + + flow = sxe2_find_flow_with_cond(ppp_ctxt, dissectors, dissectors_cnt, + vsi_sw_idx, SXE2_FLOW_FIND_FLOW_COND_FLD); + if (flow) { + LOG_DEBUG_BDF("add rss cfg, find (hdr、fld) same flow.\n"); + if (flow->cfg.symm == cfg->symm) { + ret = sxe2_flow_assoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret == 0) { + ret = sxe2_rss_save_cfg_list(rss_ctxt, vsi_sw_idx, + flow); + } + } else { + ret = -EOPNOTSUPP; + } + goto l_end; + } + + ret = sxe2_flow_creat(ppp_ctxt, dissectors, dissectors_cnt, &flow); + if (ret != 0) { + LOG_INFO_BDF("failed to Create new flow disassoc cnt:%u flow[%p].\n", + dissectors_cnt, flow); + goto l_end; + } + LOG_DEBUG_BDF("create new flow disassoc cnt:%u flow[%p].\n", dissectors_cnt, + flow); + + ret = sxe2_flow_assoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) { + (void)sxe2_flow_delete(ppp_ctxt, flow); + goto l_end; + } + + ret = sxe2_rss_save_cfg_list(rss_ctxt, vsi_sw_idx, flow); + + flow->cfg.symm = cfg->symm; +l_update_symm: + (void)sxe2_rss_update_symm(rss_ctxt, flow); +l_end: + if (dissectors) + devm_kfree(dev, dissectors); + + return ret; +} + +s32 sxe2_rss_rem_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg) +{ + struct sxe2_ppp_common_ctxt *ppp_ctxt = &rss_ctxt->ppp; + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_flow_dissector_info *dissectors; + struct sxe2_flow_info_node *flow; + s32 ret; + u8 dissectors_cnt; + + if (cfg->hdr_type == SXE2_RSS_OUTER_HEADERS) + dissectors_cnt = SXE2_FLOW_DISSECTOR_SINGLE; + else + dissectors_cnt = SXE2_FLOW_DISSECTOR_DOUBLE; + + dissectors = devm_kcalloc(dev, dissectors_cnt, sizeof(*dissectors), + GFP_KERNEL); + if (!dissectors) { + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2_rss_gen_dissector_info(dissectors, dissectors_cnt, cfg); + if (ret != 0) + goto l_free; + + flow = sxe2_find_flow_with_cond(ppp_ctxt, dissectors, dissectors_cnt, + vsi_sw_idx, SXE2_FLOW_FIND_FLOW_COND_FLD); + if (!flow) { + ret = -ENOENT; + LOG_INFO_BDF("remove rss cfg, failed find porf.\n"); + goto l_free; + } + + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, vsi_sw_idx); + if (ret != 0) + goto l_free; + + sxe2_rss_delete_cfg_list(rss_ctxt, vsi_sw_idx, flow); + + if (bitmap_empty(flow->used_vsi, SXE2_MAX_VSI_NUM)) { + ret = sxe2_flow_delete(ppp_ctxt, flow); + LOG_DEBUG_BDF("remove rss cfg. delete flow ret:%d.\n", ret); + } +l_free: + devm_kfree(dev, dissectors); +l_end: + return ret; +} + +static enum sxe2_rss_cfg_hdr_type +sxe2_rss_get_hdr_type(struct sxe2_flow_info_node *flow) +{ + enum sxe2_rss_cfg_hdr_type hdr_type = SXE2_RSS_ANY_HEADERS; + + if (flow->dissector_cnt == SXE2_FLOW_DISSECTOR_SINGLE) { + hdr_type = SXE2_RSS_OUTER_HEADERS; + } else if (flow->dissector_cnt == SXE2_FLOW_DISSECTOR_DOUBLE) { + if (bitmap_empty(flow->dissectors[SXE2_RSS_OUTER_HEADERS].headers, + SXE2_FLOW_HDR_MAX)) { + hdr_type = SXE2_RSS_INNER_HEADERS; + } else if (test_bit(SXE2_FLOW_HDR_IPV4, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + + if (test_bit(SXE2_FLOW_HDR_GRE, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + if (test_bit(SXE2_FLOW_HDR_UDP, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GRE; + else + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_GRE; + } else if (test_bit(SXE2_FLOW_HDR_VXLAN, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_VXLAN; + } else if (test_bit(SXE2_FLOW_HDR_GENEVE, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GENEVE; + } else if (test_bit(SXE2_FLOW_HDR_GTPU, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GTPU; + } else { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4; + } + } else if (test_bit(SXE2_FLOW_HDR_IPV6, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + + if (test_bit(SXE2_FLOW_HDR_GRE, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + if (test_bit(SXE2_FLOW_HDR_UDP, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GRE; + else + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_GRE; + } else if (test_bit(SXE2_FLOW_HDR_VXLAN, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_VXLAN; + } else if (test_bit(SXE2_FLOW_HDR_GENEVE, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GENEVE; + } else if (test_bit(SXE2_FLOW_HDR_GTPU, + flow->dissectors[SXE2_RSS_OUTER_HEADERS] + .headers)) { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GTPU; + } else { + hdr_type = SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6; + } + } + } + + return hdr_type; +} + +s32 sxe2_rss_save_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + struct sxe2_flow_info_node *flow) +{ + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_rss_cfg *rss_cfg; + enum sxe2_rss_cfg_hdr_type hdr_type; + s32 ret = 0; + u8 dissector_idx; + + hdr_type = sxe2_rss_get_hdr_type(flow); + dissector_idx = (u8)(flow->dissector_cnt - (u8)1); + + list_for_each_entry(rss_cfg, &rss_ctxt->rss_cfgs, l_node) + { + if (bitmap_equal(rss_cfg->hash_cfg.hash_flds, + flow->dissectors[dissector_idx].fields, + SXE2_FLOW_FLD_ID_MAX) && + bitmap_equal(rss_cfg->hash_cfg.headers, + flow->dissectors[dissector_idx].headers, + SXE2_FLOW_HDR_MAX) && + rss_cfg->hash_cfg.hdr_type == hdr_type) { + set_bit(vsi_sw_idx, rss_cfg->vsis); + goto l_end; + } + } + + rss_cfg = devm_kzalloc(dev, sizeof(*rss_cfg), GFP_KERNEL); + if (!rss_cfg) { + LOG_ERROR_BDF("failed to alloc rss_cfg memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + bitmap_copy(rss_cfg->hash_cfg.hash_flds, + flow->dissectors[dissector_idx].fields, SXE2_FLOW_FLD_ID_MAX); + bitmap_copy(rss_cfg->hash_cfg.headers, + flow->dissectors[dissector_idx].headers, SXE2_FLOW_HDR_MAX); + rss_cfg->hash_cfg.hdr_type = hdr_type; + rss_cfg->hash_cfg.symm = flow->cfg.symm; + set_bit(vsi_sw_idx, rss_cfg->vsis); + + list_add_tail(&rss_cfg->l_node, &rss_ctxt->rss_cfgs); +l_end: + return ret; +} + +static void sxe2_rss_delete_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + struct sxe2_flow_info_node *flow) +{ + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_rss_cfg *rss_cfg; + struct sxe2_rss_cfg *tmp; + enum sxe2_rss_cfg_hdr_type hdr_type; + u8 dissector_idx; + + hdr_type = sxe2_rss_get_hdr_type(flow); + dissector_idx = (u8)(flow->dissector_cnt - (u8)1); + + list_for_each_entry_safe(rss_cfg, tmp, &rss_ctxt->rss_cfgs, l_node) { + if (bitmap_equal(rss_cfg->hash_cfg.hash_flds, + flow->dissectors[dissector_idx].fields, + SXE2_FLOW_FLD_ID_MAX) && + bitmap_equal(rss_cfg->hash_cfg.headers, + flow->dissectors[dissector_idx].headers, + SXE2_FLOW_HDR_MAX) && + rss_cfg->hash_cfg.hdr_type == hdr_type) { + clear_bit(vsi_sw_idx, rss_cfg->vsis); + if (bitmap_empty(rss_cfg->vsis, SXE2_MAX_VSI_NUM)) { + list_del(&rss_cfg->l_node); + devm_kfree(dev, rss_cfg); + } + goto l_end; + } + } + + LOG_WARN_BDF("delete cfg list error, not find rss cfg.\n"); +l_end: + return; +} + +void sxe2_rss_delete_vsi_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx) +{ + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_rss_cfg *rss_cfg; + struct sxe2_rss_cfg *tmp; + + if (list_empty(&rss_ctxt->rss_cfgs)) + goto l_end; + + list_for_each_entry_safe(rss_cfg, tmp, &rss_ctxt->rss_cfgs, l_node) { + if (test_bit(vsi_sw_idx, rss_cfg->vsis)) { + clear_bit(vsi_sw_idx, rss_cfg->vsis); + if (bitmap_empty(rss_cfg->vsis, SXE2_MAX_VSI_NUM)) { + list_del(&rss_cfg->l_node); + devm_kfree(dev, rss_cfg); + } + } + } +l_end: + return; +} + +void sxe2_rss_get_hash_cfg_with_hdrs(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + unsigned long *headers, + unsigned long *hash_flds) +{ + struct sxe2_rss_cfg *rss_cfg; + + if (bitmap_empty(headers, SXE2_FLOW_HDR_MAX) || + vsi_sw_idx >= SXE2_MAX_VSI_NUM) { + return; + } + + bitmap_zero(hash_flds, SXE2_FLOW_FLD_ID_MAX); + + mutex_lock(&rss_ctxt->rss_cfgs_lock); + list_for_each_entry(rss_cfg, &rss_ctxt->rss_cfgs, l_node) + { + if (test_bit(vsi_sw_idx, rss_cfg->vsis) && + bitmap_equal(rss_cfg->hash_cfg.headers, headers, + SXE2_FLOW_HDR_MAX)) { + bitmap_copy(hash_flds, rss_cfg->hash_cfg.hash_flds, + SXE2_FLOW_FLD_ID_MAX); + break; + } + } + mutex_unlock(&rss_ctxt->rss_cfgs_lock); +} + +s32 sxe2_rss_replay_hash_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx) +{ + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct sxe2_rss_cfg *rss_cfg; + s32 ret = 0; + + if (vsi_sw_idx >= SXE2_VSI_MAX_CNT) { + ret = -EINVAL; + goto l_end; + } + + mutex_lock(&rss_ctxt->rss_cfgs_lock); + list_for_each_entry(rss_cfg, &rss_ctxt->rss_cfgs, l_node) + { + if (test_bit(vsi_sw_idx, rss_cfg->vsis)) { + ret = sxe2_rss_add_cfg(rss_ctxt, vsi_sw_idx, + &rss_cfg->hash_cfg); + if (ret != 0) { + LOG_ERROR_BDF("replay vsi[%d] rss cfg error, " + "ret:%d.\n", + vsi_sw_idx, ret); + break; + } + } + } + mutex_unlock(&rss_ctxt->rss_cfgs_lock); +l_end: + return ret; +} + +void sxe2_rss_comm_init(struct sxe2_rss_ctxt *rss_ctxt) +{ + mutex_init(&rss_ctxt->rss_cfgs_lock); + INIT_LIST_HEAD(&rss_ctxt->rss_cfgs); +} + +void sxe2_rss_comm_deinit(struct sxe2_rss_ctxt *rss_ctxt) +{ + struct sxe2_adapter *adapter = rss_ctxt->ppp.adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_rss_cfg *rss_cfg, *tmp; + + mutex_lock(&rss_ctxt->rss_cfgs_lock); + list_for_each_entry_safe(rss_cfg, tmp, &rss_ctxt->rss_cfgs, l_node) { + list_del(&rss_cfg->l_node); + devm_kfree(dev, rss_cfg); + } + mutex_unlock(&rss_ctxt->rss_cfgs_lock); + mutex_destroy(&rss_ctxt->rss_cfgs_lock); +} + +s32 sxe2_add_rss_flow(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg) +{ + s32 ret; + struct sxe2_rss_hash_cfg cfg_tmp; + + if (vsi_sw_idx >= SXE2_VSI_MAX_CNT || !cfg || !rss_ctxt || + cfg->hdr_type > SXE2_RSS_ANY_HEADERS || + bitmap_empty(cfg->hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + return -EINVAL; + } + + cfg_tmp = *cfg; + mutex_lock(&rss_ctxt->rss_cfgs_lock); + if (cfg->hdr_type < SXE2_RSS_ANY_HEADERS) { + ret = sxe2_rss_add_cfg(rss_ctxt, vsi_sw_idx, &cfg_tmp); + } else { + cfg_tmp.hdr_type = SXE2_RSS_OUTER_HEADERS; + ret = sxe2_rss_add_cfg(rss_ctxt, vsi_sw_idx, &cfg_tmp); + if (!ret) { + cfg_tmp.hdr_type = SXE2_RSS_INNER_HEADERS; + ret = sxe2_rss_add_cfg(rss_ctxt, vsi_sw_idx, &cfg_tmp); + } + } + mutex_unlock(&rss_ctxt->rss_cfgs_lock); + + return ret; +} + +void sxe2_flow_xlt2_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + u32 i = 0; + + LOG_DEV_INFO("xlt2 info dump start, block id %u\n", ppp_ctxt->block_id); + for (i = 0; i < SXE2_MAX_VSI_NUM; i++) { + if (ppp_ctxt->vsi_to_grp[i].idx && adapter->vsi_ctxt.vsi[i]) { + LOG_DEV_INFO("vsi[%u](hw_id:%u)-->vsig[%u]\n", + adapter->vsi_ctxt.vsi[i]->id_in_pf, + adapter->vsi_ctxt.vsi[i]->idx_in_dev, + ppp_ctxt->vsi_to_grp[i].idx); + } + } + LOG_DEV_INFO("xlt2 info dump end, block id %u\n", ppp_ctxt->block_id); +} + +void sxe2_flow_vsig_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + u64 i = 0, j = 0; + struct sxe2_associated_flow_node *associated_flow; + DECLARE_BITMAP(vsis, SXE2_MAX_VSI_NUM); + + bitmap_zero(vsis, SXE2_MAX_VSI_NUM); + + LOG_DEV_INFO("vsig info dump start, block id %u\n", ppp_ctxt->block_id); + for (i = 0; i < SXE2_MAX_VSIG_NUM; i++) { + if (ppp_ctxt->vsig[i].used) { + bitmap_zero(vsis, SXE2_MAX_VSI_NUM); + bitmap_copy(vsis, ppp_ctxt->vsig[i].vsis, SXE2_MAX_VSI_NUM); + LOG_DEV_INFO("vsig[%llu], vsi cnt: %u\n", i, + ppp_ctxt->vsig[i].vsi_cnt); + LOG_DEV_INFO("vsi list:\n"); + for_each_set_bit(j, vsis, SXE2_MAX_VSI_NUM) + { + if (adapter->vsi_ctxt.vsi[j]) { + LOG_DEV_INFO("\tvsi sw_id: %u, hw_id: %u\n", + adapter->vsi_ctxt.vsi[j] + ->id_in_pf, + adapter->vsi_ctxt.vsi[j] + ->idx_in_dev); + } + clear_bit((int)j, vsis); + } + LOG_DEV_INFO("profile list:\n"); + list_for_each_entry(associated_flow, + &ppp_ctxt->vsig[i].associated_flow_list, + l_node) + { + LOG_DEV_INFO("\tprof id: %u, priority: %d, " + "tcam_cnt: %u\n", + associated_flow->flow_ptr->prof_id, + associated_flow->flow_ptr->priority, + associated_flow->tcam_cnt); + for (j = 0; j < associated_flow->tcam_cnt; j++) { + LOG_DEV_INFO("\t\ttcam[%u], used: %d, " + "prof_id: %u, ptg: %u\n", + associated_flow->tcams[j].idx, + associated_flow->tcams[j].used, + associated_flow->tcams[j] + .prof_id, + associated_flow->tcams[j].ptg); + } + } + } + } + LOG_DEV_INFO("vsig info dump end, block id %u\n", ppp_ctxt->block_id); +} + +void sxe2_flow_prof_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + u32 i = 0, j = 0; + + LOG_DEV_INFO("profile info dump start, block id %u\n", ppp_ctxt->block_id); + for (i = 0; i < ppp_ctxt->hw_prof_num; i++) { + if (ppp_ctxt->hw_prof[i].avail) { + LOG_DEV_INFO("prof[%u], mask_sel: 0x%x\n", i, + ppp_ctxt->hw_prof[i].fv_masks_sel); + for (j = 0; j < ppp_ctxt->hw_fv_num; j++) { + LOG_DEV_INFO("\tfv[%u], protocol: %u, offset: %u\n", + j, ppp_ctxt->hw_prof[i].fv[j].prot_id, + ppp_ctxt->hw_prof[i].fv[j].off); + } + } + } + LOG_DEV_INFO("profile info dump end, block id %u\n", ppp_ctxt->block_id); +} + +void sxe2_flow_mask_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + u32 i = 0; + + LOG_DEV_INFO("mask info dump start, block id %u\n", ppp_ctxt->block_id); + for (i = 0; i < SXE2_MAX_FV_MASK; i++) { + LOG_DEV_INFO("mask[%u], fv_idx: %u, mask_val: 0x%x\n", i, + ppp_ctxt->fv_mask[i].mask_idx, + ppp_ctxt->fv_mask[i].mask); + } + LOG_DEV_INFO("mask info dump end, block id %u\n", ppp_ctxt->block_id); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..5532398c5dab145a529f4b7d9367ac8334800bc0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_flow.h @@ -0,0 +1,634 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_flow.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_FLOW_H__ +#define __SXE2_FLOW_H__ + +#include +#include +#include +#include "sxe2_cmd_channel.h" +#include "sxe2_mbx_public.h" + +struct sxe2_adapter; +struct sxe2_rss_ctxt; +struct sxe2_acl_scen_info; +struct sxe2_acl_flow_entry; +struct sxe2_acl_flow_action; + +#if (BITS_PER_LONG == 64) +#define SXE2_PTYPE_MAP_SIZE 16 +#define SXE2_PTYPE_BITMAP(high3, high2, high1, high0, low3, low2, low1, low0) \ + (0x##high3##high2##high1##high0##low3##low2##low1##low0##ULL) +#elif (BITS_PER_LONG == 32) +#define SXE2_PTYPE_MAP_SIZE 32 +#define SXE2_PTYPE_BITMAP(high3, high2, high1, high0, low3, low2, low1, low0) \ + (0x##low3##low2##low1##low0, 0x##high3##high2##high1##high0) +#else +#define SXE2_PTYPE_MAP_SIZE 0 +#define SXE2_PTYPE_BITMAP(high3, high2, high1, high0, low3, low2, low1, low0) \ + (0x##low3##low2##low1##low0, 0x##high3##high2##high1##high0) +#error please modify macro SXE2_PTYPE_MAP_SIZE and SXE2_PTYPE_BITMAP +#endif + +#define SXE2_FV_OFFSET_INVAL 0x1FF +#define SXE2_FV_PORT_ID_INVAL 0xFF +#define SXE2_FV_MASK_NUM 32 +#define SXE2_MAX_FV_WORDS 48 +#define SXE2_MAX_PTYPE_NUM 1024 +#define SXE2_MAX_CDID_NUM 8 +#define SXE2_MAX_PTG_NUM 256 +#define SXE2_MAX_VSIG_NUM 768 +#define SXE2_MAX_VSI_NUM 768 +#define SXE2_MAX_PROF_NUM 256 +#define SXE2_MAX_PTG_PER_PROF_NUM 64 +#define SXE2_MAX_TCAM_PER_PROF_NUM 64 +#define SXE2_MAX_DISSECTOR_NUM 2 +#define SXE2_MAX_RAW_CNT 2 +#define SXE2_MAX_TCAM_NUM 512 +#define SXE2_MAX_FV_MASK 32 +#define SXE2_PPP_DEFAULT_VSIG_IDX 0 +#define SXE2_TCAM_DEFAULT_CD_ID 0 +#define SXE2_TCAM_DEFAULT_FLAGS 0 + +#define SXE2_FNAV_PROF_CNT (128) +#define SXE2_RSS_PROF_CNT (128) +#define SXE2_ACL_PROF_CNT (128) +#define SXE2_FNAV_FV_CNT (30) +#define SXE2_RSS_FV_CNT (24) +#define SXE2_ACL_FV_CNT (32) + +#define SXE2_FLOW_DISSECTOR_SINGLE 1 +#define SXE2_FLOW_DISSECTOR_DOUBLE 2 + +#define SXE2_TCAM_KEY_VAL_SZ 5 +#define SXE2_TCAM_KEY_SZ (2 * SXE2_TCAM_KEY_VAL_SZ) +#define SXE2_KEY_MATCH_MAX_NM_SET_NUM 1 + +#define SXE2_FLOW_FV_SIZE (2) + +#define SXE2_U8_MASK (0xFF) +#define SXE2_U16_MASK (0xFFFF) + +#pragma pack(1) +struct sxe2_fv_word { + u8 prot_id; + u16 off; + u8 resvrd; +}; +#pragma pack() + +enum sxe2_prot_id { + SXE2_PROT_ID_INVAL = 0, + SXE2_PROT_MAC_OF_OR_S = 1, + SXE2_PROT_MAC_O2 = 2, + SXE2_PROT_MAC_IL = 4, + SXE2_PROT_MAC_IN_MAC = 7, + SXE2_PROT_ETYPE_OL = 9, + SXE2_PROT_ETYPE_IL = 10, + SXE2_PROT_PAY = 15, + SXE2_PROT_EVLAN_O = 16, + SXE2_PROT_VLAN_O = 17, + SXE2_PROT_VLAN_IF = 18, + SXE2_PROT_MPLS_OL_MINUS_1 = 27, + SXE2_PROT_MPLS_OL_OR_OS = 28, + SXE2_PROT_MPLS_IL = 29, + SXE2_PROT_IPV4_OF_OR_S = 32, + SXE2_PROT_IPV4_IL = 33, + SXE2_PROT_IPV4_IL_IL = 34, + SXE2_PROT_IPV6_OF_OR_S = 40, + SXE2_PROT_IPV6_IL = 41, + SXE2_PROT_IPV6_IL_IL = 42, + SXE2_PROT_IPV6_NEXT_PROTO = 43, + SXE2_PROT_IPV6_FRAG = 47, + SXE2_PROT_TCP_IL = 49, + SXE2_PROT_UDP_OF = 52, + SXE2_PROT_UDP_IL_OR_S = 53, + SXE2_PROT_GRE_OF = 64, + SXE2_PROT_NSH_F = 84, + SXE2_PROT_ESP_F = 88, + SXE2_PROT_ESP_2 = 89, + SXE2_PROT_SCTP_IL = 96, + SXE2_PROT_ICMP_IL = 98, + SXE2_PROT_ICMPV6_IL = 100, + SXE2_PROT_VRRP_F = 101, + SXE2_PROT_OSPF = 102, + SXE2_PROT_PPPOE = 103, + SXE2_PROT_L2TPV3 = 104, + SXE2_PROT_ECPRI = 105, + SXE2_PROT_PPP = 106, + SXE2_PROT_ATAOE_OF = 114, + SXE2_PROT_CTRL_OF = 116, + SXE2_PROT_LLDP_OF = 117, + SXE2_PROT_ARP_OF = 118, + SXE2_PROT_EAPOL_OF = 120, + SXE2_PROT_META_ID = 255, + SXE2_PROT_INVALID = 255 +}; + +enum sxe2_flow_priority { + SXE2_FLOW_PRIO_LEVEL_OUTER_ETH = 0, + SXE2_FLOW_PRIO_LEVLE_OUTER_L3, + SXE2_FLOW_PRIO_LEVEL_OUTER_L3_FRAG, + SXE2_FLOW_PRIO_LEVEL_OUTER_L4, + SXE2_FLOW_PRIO_LEVEL_INNER_ETH, + SXE2_FLOW_PRIO_LEVEL_INNER_L3, + SXE2_FLOW_PRIO_LEVLE_INNER_L3_FRAG, + SXE2_FLOW_PRIO_LEVEL_INNER_L4, + SXE2_FLOW_PRIO_LEVEL_HIGHEST, +}; + +#define SXE2_FLOW_FLD_SZ_ETH_TYPE 2 +#define SXE2_FLOW_FLD_SZ_VLAN 2 +#define SXE2_FLOW_FLD_SZ_IPV4_ADDR 4 +#define SXE2_FLOW_FLD_SZ_IPV6_ADDR 16 +#define SXE2_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4 +#define SXE2_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6 +#define SXE2_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8 +#define SXE2_FLOW_FLD_SZ_IPV4_ID 2 +#define SXE2_FLOW_FLD_SZ_IPV6_ID 4 +#define SXE2_FLOW_FLD_SZ_IP_CHKSUM 2 +#define SXE2_FLOW_FLD_SZ_TCP_CHKSUM 2 +#define SXE2_FLOW_FLD_SZ_UDP_CHKSUM 2 +#define SXE2_FLOW_FLD_SZ_SCTP_CHKSUM 4 +#define SXE2_FLOW_FLD_SZ_IP_DSCP 1 +#define SXE2_FLOW_FLD_SZ_IP_TTL 1 +#define SXE2_FLOW_FLD_SZ_IP_PROT 1 +#define SXE2_FLOW_FLD_SZ_PORT 2 +#define SXE2_FLOW_FLD_SZ_TCP_FLAGS 1 +#define SXE2_FLOW_FLD_SZ_ICMP_TYPE 1 +#define SXE2_FLOW_FLD_SZ_ICMP_CODE 1 +#define SXE2_FLOW_FLD_SZ_ARP_OPER 2 +#define SXE2_FLOW_FLD_SZ_GRE_KEYID 4 +#define SXE2_FLOW_FLD_SZ_GTP_TEID 4 +#define SXE2_FLOW_FLD_SZ_GTP_QFI 2 +#define SXE2_FLOW_FLD_SZ_PPPOE_SESS_ID 2 +#define SXE2_FLOW_FLD_SZ_PFCP_SEID 8 +#define SXE2_FLOW_FLD_SZ_L2TPV3_SESS_ID 4 +#define SXE2_FLOW_FLD_SZ_ESP_SPI 4 +#define SXE2_FLOW_FLD_SZ_AH_SPI 4 +#define SXE2_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 +#define SXE2_FLOW_FLD_SZ_VXLAN_VNI 4 +#define SXE2_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2 +#define SXE2_FLOW_FLD_SZ_L2TPV2_SESS_ID 2 +#define SXE2_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2 +#define SXE2_FLOW_FLD_SZ_GENEVE_VNI 4 +#define SXE2_FLOW_FLD_SZ_GTPU_TEID 4 +#define SXE2_FLOW_FLD_SZ_GRE_TNI 4 + +struct sxe2_flow_fld_info { + enum sxe2_flow_hdr hdr; + u16 off; + u16 size; + u16 mask; +}; + +#define SXE2_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) \ + { \ + .hdr = _hdr, .off = (_offset_bytes)*BITS_PER_BYTE, \ + .size = (_size_bytes)*BITS_PER_BYTE, .mask = 0, \ + } + +#define SXE2_FLOW_FLD_INFO_MASK(_hdr, _offset_bytes, _size_bytes, _mask) \ + { \ + .hdr = _hdr, .off = (_offset_bytes)*BITS_PER_BYTE, \ + .size = (_size_bytes)*BITS_PER_BYTE, .mask = _mask, \ + } + +struct sxe2_flow_fld_xtrct { + u8 prot_id; + u16 off; + u8 idx; + u8 disp; + u16 mask; +}; + +enum sxe2_flow_fld_type { + SXE2_FLOW_FLD_TYPE_VAL, + SXE2_FLOW_FLD_TYPE_RANGE, +}; + +struct sxe2_flow_fld_val { + u16 val; + u16 mask; + u16 len; +}; + +struct sxe2_flow_fld { + enum sxe2_flow_fld_type type; + struct sxe2_flow_fld_val fld_val; + struct sxe2_flow_fld_val last_val; + struct sxe2_flow_fld_xtrct xtrct; +}; + +struct sxe2_flow_raw { + u16 offset; + struct sxe2_flow_fld fld; +}; + +struct sxe2_flow_dissector_info { + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(fields, + SXE2_FLOW_FLD_ID_MAX); + struct sxe2_flow_fld fld[SXE2_FLOW_FLD_ID_MAX]; + struct sxe2_flow_raw raw[SXE2_MAX_RAW_CNT]; + u8 raw_cnt; +}; + +struct sxe2_prof_ptg_info { + u8 ptg_cnt; + u8 ptg[SXE2_MAX_PTG_PER_PROF_NUM]; +}; + +struct sxe2_prof_tcam_full_key { + __le64 vsig : 12; + __le64 flg : 16; + __le64 ptg : 8; + __le64 cdid : 4; + __le64 pad : 24; +}; +#pragma pack(1) +struct sxe2_prof_tcam_entry { + __le16 addr; + u8 key[SXE2_TCAM_KEY_SZ]; + u8 prof_id; +}; +#pragma pack() + +struct sxe2_prof_tcam_info { + u16 idx; + u8 ptg; + u8 prof_id; + bool used; +}; + +struct sxe2_flow_info_node { + struct list_head l_node; + + u8 prof_id; + + u8 dissector_cnt; + + u8 priority; + + struct sxe2_flow_dissector_info dissectors[SXE2_MAX_DISSECTOR_NUM]; + union { + struct sxe2_acl_scen_info *scen; + bool symm; + } cfg; + struct sxe2_prof_ptg_info ptg_info; + DECLARE_BITMAP(used_vsi, SXE2_MAX_VSI_NUM); + + struct mutex acl_entry_lock; + struct list_head acl_entry; +}; + +struct sxe2_flow_hw_prof { + struct sxe2_fv_word fv[SXE2_MAX_FV_WORDS]; + u32 fv_masks_sel; + u16 ref_cnt; + bool avail; +}; + +struct sxe2_fv_mask { + u16 mask_idx; + u16 mask; + DECLARE_BITMAP(filds, SXE2_FLOW_FLD_ID_MAX); +}; + +struct sxe2_associated_flow_node { + struct list_head l_node; + struct sxe2_flow_info_node *flow_ptr; + struct sxe2_prof_tcam_info tcams[SXE2_MAX_TCAM_PER_PROF_NUM]; + u16 tcam_cnt; +}; + +struct sxe2_vsi_group { + struct list_head associated_flow_list; + DECLARE_BITMAP(vsis, SXE2_MAX_VSI_NUM); + u16 vsi_cnt; + bool used; +}; + +struct sxe2_vsi_to_vsig { + u16 idx; +}; + +struct sxe2_ptype_to_group { + u8 idx; +}; + +struct sxe2_ppp_common_ctxt { + struct list_head flow_list; + struct mutex flow_list_lock; + struct sxe2_ptype_to_group + pt_to_grp[SXE2_MAX_PTYPE_NUM]; + struct sxe2_vsi_group vsig + [SXE2_MAX_VSIG_NUM]; + struct sxe2_vsi_to_vsig + vsi_to_grp[SXE2_MAX_VSI_NUM]; + + struct sxe2_flow_hw_prof + hw_prof[SXE2_MAX_PROF_NUM]; + struct sxe2_fv_mask fv_mask[SXE2_MAX_FV_MASK]; + struct sxe2_prof_tcam_entry + tcam_entry[SXE2_MAX_TCAM_NUM]; + u8 hw_prof_num; + u8 hw_fv_num; + u8 hw_fv_mask_num; + enum sxe2_block_id block_id; + struct sxe2_adapter *adapter; +}; + +struct sxe2_flow_info_params { + struct sxe2_flow_info_node *flow_info; + struct sxe2_fv_word fv[SXE2_MAX_FV_WORDS]; + u16 fv_mask[SXE2_MAX_FV_WORDS]; + u8 fv_cnt; + DECLARE_BITMAP(ptypes, SXE2_MAX_PTYPE_NUM); + u16 match_size; +}; + +struct sxe2_rss_hash_cfg { + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hash_flds, + SXE2_FLOW_FLD_ID_MAX); + enum sxe2_rss_cfg_hdr_type hdr_type; + bool symm; +}; + +struct sxe2_rss_cfg { + struct list_head l_node; + DECLARE_BITMAP(vsis, SXE2_MAX_VSI_NUM); + struct sxe2_rss_hash_cfg hash_cfg; +}; + +struct sxe2_rss_symm_fv_pair { + u16 src_fld; + u16 dst_fld; + u8 fld_len; +}; + +enum sxe2_og_chg_type { + SXE2_OG_CHG_TYPE_XLT2, + SXE2_OG_CHG_TYPE_TCAM, + SXE2_OG_CHG_TYPE_ES, + SXE2_OG_CHG_TYPE_MAX, +}; + +struct sxe2_og_chg { + struct list_head l_entry; + enum sxe2_og_chg_type type; + union og_chg_info { + struct sxe2_og_chg_xlt2 { + u16 vsi_hw_idx; + u16 vsig; + } xlt2; + struct sxe2_og_chg_tcam { + u16 tcam_idx; + u8 prof_id; + } tcam; + struct sxe2_og_chg_es { + u8 prof_id; + } es; + } info; +}; + +#define SXE2_FLD_BIT (32) +#define SXE2_FLD_WIDTH (32) +struct sxe2_ddp_fnav_mask { + u32 val : 16; + u32 rsv : 16; + u32 fldbit_l; + u32 fldbit_h; +}; + +struct sxe2_ddp_rxft_ptg { + u32 ptg0 : 8; + u32 ptg1 : 8; + u32 ptg2 : 8; + u32 ptg3 : 8; +}; + +struct sxe2_ddp_acl_ptg { + u32 ptg0 :8; + u32 ptg1 :8; + u32 ptg2 :8; + u32 ptg3 :8; +}; + +struct sxe2_ptype_map { + unsigned long sxe2_ptypes_mac_ofos_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_mac_il_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_mac_ofos_with_l3 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_mac_il_with_l3 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_mac_ofos_no_l3 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_mac_il_no_l3 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_ofos_with_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_il_with_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_ofos_with_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_il_with_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_ofos_no_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_il_no_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_ofos_no_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_il_no_l4 + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_udp_ofos + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_udp_il + [SXE2_PTYPE_MAP_SIZE]; + + unsigned long sxe2_ptypes_tcp_ofos[SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_tcp_il[SXE2_PTYPE_MAP_SIZE]; + + unsigned long sxe2_ptypes_sctp_ofos[SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_sctp_il[SXE2_PTYPE_MAP_SIZE]; + + unsigned long sxe2_ptypes_vxlan_vni + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_gre_of + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_ofos_frag + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_il_frag + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_ofos_frag + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_il_frag + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_ofos_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv4_il_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_ofos_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_ipv6_il_all + [SXE2_PTYPE_MAP_SIZE]; + unsigned long sxe2_ptypes_gtpu + [SXE2_PTYPE_MAP_SIZE]; +}; + +enum sxe2_fnav_seg_type { + SXE2_FNAV_SEG_NON_TUN = 0, + SXE2_FNAV_SEG_TUN, + SXE2_FNAV_SEG_MAX, +}; + +struct sxe2_fnav_flow_raw { + u16 offset; + u8 len; +}; + +struct sxe2_fnav_flow_seg { + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(fields, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_fnav_flow_raw raw[SXE2_MAX_RAW_CNT]; + u8 raw_cnt; + DECLARE_BITMAP(vsis, SXE2_MAX_VSI_NUM); + bool is_tunnel; + struct sxe2_flow_info_node *flow_ptr; +}; + +struct sxe2_fnav_vsi_used_cnt { + u16 vsi_id_sw; + u32 filter_cnt; +}; + +struct sxe2_fnav_flow_cfg { + struct list_head l_node; + struct sxe2_fnav_flow_seg *seg[SXE2_FNAV_SEG_MAX]; + u32 filter_cnt[SXE2_FNAV_SEG_MAX]; + bool full_match; + enum sxe2_fnav_flow_type flow_type; + struct sxe2_fnav_vsi_used_cnt peer_vsi_used; + struct sxe2_fnav_vsi_used_cnt self_vsi_used; +}; + +void sxe2_flow_set_diss_fld(struct sxe2_flow_dissector_info *dissector, + enum sxe2_flow_fld_id fld, u16 val, u16 mask, + u16 len); + +void sxe2_flow_add_diss_raw(struct sxe2_flow_dissector_info *dissector, u16 off, + u16 val, u16 mask, u8 len); + +struct sxe2_flow_info_node * +sxe2_find_flow(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, u8 dissectors_cnt); + +s32 sxe2_flow_creat(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, struct sxe2_flow_info_node **flow); + +s32 sxe2_flow_delete(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow); + +s32 sxe2_flow_assoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx); + +s32 sxe2_flow_disassoc_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx); + +s32 sxe2_add_rss_flow(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg); + +s32 sxe2_rss_delete_vsi_flows_for_vfr(struct sxe2_rss_ctxt *rss_ctxt, + u16 vsi_sw_idx); + +s32 sxe2_rss_delete_vsi_flows(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx); + +void sxe2_flow_ppp_comm_ctxt_init(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_adapter *adapter, + enum sxe2_block_id block_id); +void sxe2_flow_ppp_comm_ctxt_deinit(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +void sxe2_flow_ppp_comm_ctxt_clean(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +void sxe2_rss_ppp_ctxt_clean(struct sxe2_rss_ctxt *rss_ctxt); + +s32 sxe2_rss_add_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg); + +s32 sxe2_rss_rem_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx, + const struct sxe2_rss_hash_cfg *cfg); + +void sxe2_rss_get_hash_cfg_with_hdrs(struct sxe2_rss_ctxt *rss_ctxt, + u16 vsi_sw_idx, unsigned long *headers, + unsigned long *hash_flds); + +s32 sxe2_rss_replay_hash_cfg(struct sxe2_rss_ctxt *rss_ctxt, u16 vsi_sw_idx); + +void sxe2_rss_comm_init(struct sxe2_rss_ctxt *rss_ctxt); + +void sxe2_rss_comm_deinit(struct sxe2_rss_ctxt *rss_ctxt); + +s32 sxe2_flow_update_fv_mask_sel(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id, u32 mask_sel); + +s32 sxe2_flow_default_mask_get(enum sxe2_block_id block_id, + struct sxe2_adapter *adapter, + enum sxe2_flow_fld_id fld_id, u16 *mask_idx, + u16 *fv_idx); + +void sxe2_rss_delete_vsi_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, + u16 vsi_sw_idx); + +void sxe2_flow_xlt2_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +void sxe2_flow_vsig_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +void sxe2_flow_prof_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +void sxe2_flow_mask_dump(struct sxe2_ppp_common_ctxt *ppp_ctxt); + +s32 sxe2_flow_find_vsig_with_vsi(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 *vsig_idx); + +s32 sxe2_flow_op_move_vsi_to_vsig(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx, u16 vsig_idx, + struct list_head *op_list); + +s32 sxe2_fwc_update_profile(struct sxe2_ppp_common_ctxt *ppp_ctxt, + enum sxe2_block_id blk, struct list_head *chgs); + +s32 sxe2_flow_cfg_clear_muti_vsi_in_vsig(struct sxe2_adapter *adapter, + struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 vsi_sw_idx); + +s32 sxe2_flow_cfg_tcam_entry(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u16 tcam_idx, u8 prof_id, u8 ptg_idx, + u16 vsig_idx, u8 cdid, u16 flags, + u8 vl_mask[SXE2_TCAM_KEY_VAL_SZ], + u8 dc_mask[SXE2_TCAM_KEY_VAL_SZ], + u8 nm_mask[SXE2_TCAM_KEY_VAL_SZ]); + +s32 sxe2_rss_save_cfg_list(struct sxe2_rss_ctxt *rss_ctxt, + u16 vsi_sw_idx, + struct sxe2_flow_info_node *flow); + +s32 sxe2_flow_acl_format_lut_act_entry(struct sxe2_adapter *adapter, struct sxe2_acl_flow_entry *flow_entry, + struct sxe2_flow_info_node *flow, struct sxe2_acl_flow_action *acts, u8 *data); + +s32 sxe2_flow_assoc_vsi_fnav(struct sxe2_ppp_common_ctxt *ppp_ctxt, + struct sxe2_flow_info_node *flow, u16 vsi_sw_idx, enum sxe2_fnav_flow_type flow_type); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.c new file mode 100644 index 0000000000000000000000000000000000000000..cb414f420a028afa722e2e22bec618dbeba97f60 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.c @@ -0,0 +1,4668 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_fnav.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include "sxe2_common.h" +#include "sxe2_cmd.h" +#include "sxe2_log.h" +#include "sxe2_fnav.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_arfs.h" +#include "sxe2_tx.h" + +#define SXE2_FNAV_FLD_OFF(m) offsetof(struct sxe2_fnav_filter_full_key, m) + +#define SXE2_U8_MASK (0xFF) +#define SXE2_U16_MASK (0xFFFF) +#define SXE2_FNAV_TUN_DISSECT_CNT (2) +#define SXE2_FNAV_NO_TUN_DISSECT_CNT (1) +#define SXE2_FNAV_IPV4_PKT_FLAG_MF_OFFSET (20) + +#define SXE2_VXLAN_UDP_DST_PORT (4789) + +struct sxe2_fnav_fld_info { + u32 off; + u32 off_inner; + u8 len; + u8 disp; + u16 mask; +}; + +static const struct sxe2_fnav_fld_info sxe2_flds_info[SXE2_FLOW_FLD_ID_MAX] = { + [SXE2_FLOW_FLD_ID_ETH_TYPE] = {.off = SXE2_FNAV_FLD_OFF(eth.h_proto), + .off_inner = + SXE2_FNAV_FLD_OFF(eth_inner.h_proto), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_ETH_SA] = {.off = SXE2_FNAV_FLD_OFF(eth.h_source), + .off_inner = + SXE2_FNAV_FLD_OFF(eth_inner.h_source), + .len = 6, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_ETH_DA] = {.off = SXE2_FNAV_FLD_OFF(eth.h_dest), + .off_inner = + SXE2_FNAV_FLD_OFF(eth_inner.h_dest), + .len = 6, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_S_TCI] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.s_vlan_tci), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.s_vlan_tci), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_C_TCI] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.c_vlan_tci), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.c_vlan_tci), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_IPV4_SA] = {.off = SXE2_FNAV_FLD_OFF(ip.v4.src_ip), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v4.src_ip), + .len = 4, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_IPV4_DA] = {.off = SXE2_FNAV_FLD_OFF(ip.v4.dst_ip), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v4.dst_ip), + .len = 4, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_IPV4_TOS] = {.off = SXE2_FNAV_FLD_OFF(ip.v4.tos), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v4.tos), + .len = 1, + .disp = 0, + .mask = 0x00ff}, + [SXE2_FLOW_FLD_ID_IPV4_PROT] = { + .off = SXE2_FNAV_FLD_OFF(ip.v4.proto), + .off_inner = SXE2_FNAV_FLD_OFF(ip_inner.v4.proto), + .len = 1, + .disp = 0, + .mask = 0x00ff}, + [SXE2_FLOW_FLD_ID_IPV4_TTL] = {.off = SXE2_FNAV_FLD_OFF(ip.v4.ttl), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v4.ttl), + .len = 1, + .disp = 8, + .mask = 0xff00}, + [SXE2_FLOW_FLD_ID_IPV6_SA] = {.off = SXE2_FNAV_FLD_OFF(ip.v6.src_ip), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v6.src_ip), + .len = 16, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_IPV6_DA] = {.off = SXE2_FNAV_FLD_OFF(ip.v6.dst_ip), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v6.dst_ip), + .len = 16, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_IPV6_DSCP] = {.off = SXE2_FNAV_FLD_OFF(ip.v6.tc), + .off_inner = + SXE2_FNAV_FLD_OFF(ip_inner.v6.tc), + .len = 1, + .disp = 4, + .mask = 0x0ff0}, + [SXE2_FLOW_FLD_ID_IPV6_PROT] = { + .off = SXE2_FNAV_FLD_OFF(ip.v6.proto), + .off_inner = SXE2_FNAV_FLD_OFF(ip_inner.v6.proto), + .len = 1, + .disp = 8, + .mask = 0xff00}, + [SXE2_FLOW_FLD_ID_IPV6_TTL] = {.off = SXE2_FNAV_FLD_OFF(ip.v6.hlim), + .off_inner = SXE2_FNAV_FLD_OFF(ip_inner.v6.hlim), + .len = 1, + .disp = 0, + .mask = 0x00ff}, + [SXE2_FLOW_FLD_ID_TCP_SRC_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.src_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.src_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_TCP_DST_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.dst_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.dst_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_UDP_SRC_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.src_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.src_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_UDP_DST_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.dst_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.dst_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_SCTP_SRC_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.src_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.src_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_SCTP_DST_PORT] = { + .off = SXE2_FNAV_FLD_OFF(l4.dst_port), + .off_inner = SXE2_FNAV_FLD_OFF(l4_inner.dst_port), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_VXLAN_VNI] = { + .off = SXE2_FNAV_FLD_OFF(tunnel_data.vxlan_vni), + .off_inner = + SXE2_FNAV_FLD_OFF(tunnel_data.vxlan_vni), + .len = 4, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_S_TPID] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.vlan_type), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.vlan_type), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_C_TPID] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.vlan_type), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.vlan_type), + .len = 2, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_S_VID] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.s_vlan_vid), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.s_vlan_vid), + .len = 2, + .disp = 0, + .mask = 0x0fff}, + [SXE2_FLOW_FLD_ID_C_VID] = { + .off = SXE2_FNAV_FLD_OFF(ext_data.c_vlan_vid), + .off_inner = + SXE2_FNAV_FLD_OFF(ext_data_inner.c_vlan_vid), + .len = 2, + .disp = 0, + .mask = 0x0fff}, + [SXE2_FLOW_FLD_ID_GENEVE_VNI] = { + .off = SXE2_FNAV_FLD_OFF(tunnel_data.geneve_vni), + .off_inner = + SXE2_FNAV_FLD_OFF(tunnel_data.geneve_vni), + .len = 4, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_GTPU_TEID] = { + .off = SXE2_FNAV_FLD_OFF(tunnel_data.gtpu_teid), + .off_inner = + SXE2_FNAV_FLD_OFF(tunnel_data.gtpu_teid), + .len = 4, + .disp = 0, + .mask = 0}, + [SXE2_FLOW_FLD_ID_NVGRE_TNI] = { + .off = SXE2_FNAV_FLD_OFF(tunnel_data.gre_tni), + .off_inner = SXE2_FNAV_FLD_OFF(tunnel_data.gre_tni), + .len = 4, + .disp = 0, + .mask = 0}, +}; + +static const u8 sxe2_fnav_eth_pkt[22] = {0x00}; + +static const u8 sxe2_fnav_tcp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00}; + +static const u8 sxe2_fnav_udp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_sctp4_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x40, 0x00, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ip4_pkt[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x40, + 0x00, 0x40, 0x10, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +static const u8 sxe2_fnav_tcp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_udp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_sctp6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ip6_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_udp4_vxlan_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x4e, 0x00, 0x00, + 0x40, 0x00, 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ipv6_gre_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x3a, + 0x2f, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0x58}; + +static const u8 sxe2_fnav_tcp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_udp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_sctp4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x20, 0x00, 0x01, 0x00, 0x00, 0x40, 0x84, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ip4_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_tcp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_udp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, +}; + +static const u8 sxe2_fnav_sctp6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ip6_tun_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x45, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, + 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 sxe2_fnav_ip4_frag_pkt[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x20, + 0x00, 0x40, 0x10, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +static const u8 sxe2_fnav_ip6_frag_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x86, 0xDD, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2C, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3B, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const struct sxe2_fnav_base_pkt sxe2_fnav_pkt[] = { + { + SXE2_FNAV_FLOW_TYPE_ETH, + sizeof(sxe2_fnav_eth_pkt), + sxe2_fnav_eth_pkt, + sizeof(sxe2_fnav_eth_pkt), + sxe2_fnav_eth_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV4_TCP, + sizeof(sxe2_fnav_tcp4_pkt), + sxe2_fnav_tcp4_pkt, + sizeof(sxe2_fnav_tcp4_tun_pkt), + sxe2_fnav_tcp4_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV4_UDP, + sizeof(sxe2_fnav_udp4_pkt), + sxe2_fnav_udp4_pkt, + sizeof(sxe2_fnav_udp4_tun_pkt), + sxe2_fnav_udp4_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV4_SCTP, + sizeof(sxe2_fnav_sctp4_pkt), + sxe2_fnav_sctp4_pkt, + sizeof(sxe2_fnav_sctp4_tun_pkt), + sxe2_fnav_sctp4_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV4_OTHER, + sizeof(sxe2_fnav_ip4_pkt), + sxe2_fnav_ip4_pkt, + sizeof(sxe2_fnav_ip4_tun_pkt), + sxe2_fnav_ip4_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV6_TCP, + sizeof(sxe2_fnav_tcp6_pkt), + sxe2_fnav_tcp6_pkt, + sizeof(sxe2_fnav_tcp6_tun_pkt), + sxe2_fnav_tcp6_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV6_UDP, + sizeof(sxe2_fnav_udp6_pkt), + sxe2_fnav_udp6_pkt, + sizeof(sxe2_fnav_udp6_tun_pkt), + sxe2_fnav_udp6_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV6_SCTP, + sizeof(sxe2_fnav_sctp6_pkt), + sxe2_fnav_sctp6_pkt, + sizeof(sxe2_fnav_sctp6_tun_pkt), + sxe2_fnav_sctp6_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER, + sizeof(sxe2_fnav_ip6_pkt), + sxe2_fnav_ip6_pkt, + sizeof(sxe2_fnav_ip6_tun_pkt), + sxe2_fnav_ip6_tun_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_FRAG_IPV4, + sizeof(sxe2_fnav_ip4_frag_pkt), + sxe2_fnav_ip4_frag_pkt, + sizeof(sxe2_fnav_ip4_frag_pkt), + sxe2_fnav_ip4_frag_pkt, + }, + { + SXE2_FNAV_FLOW_TYPE_FRAG_IPV6, + sizeof(sxe2_fnav_ip6_frag_pkt), + sxe2_fnav_ip6_frag_pkt, + sizeof(sxe2_fnav_ip6_frag_pkt), + sxe2_fnav_ip6_frag_pkt, + }, +}; + +#define SXE2_FNAV_PKT_NUM ARRAY_SIZE(sxe2_fnav_pkt) + +void sxe2_fnav_flow_ctxt_init(struct sxe2_adapter *adapter) +{ + sxe2_flow_ppp_comm_ctxt_init(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp, + adapter, SXE2_HW_BLOCK_ID_FNAV); +} + +static void sxe2_fnav_flow_ctxt_deinit(struct sxe2_adapter *adapter) +{ + sxe2_flow_ppp_comm_ctxt_deinit(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); +} + +void sxe2_fnav_enter_reset(struct sxe2_adapter *adapter, bool to_reset) +{ + mutex_lock(&adapter->fnav_ctxt.fnav_state_lock); + if (to_reset) + adapter->fnav_ctxt.state = SXE2_FNAV_STATE_RESET; + else + adapter->fnav_ctxt.state = SXE2_FNAV_STATE_READY; + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); +} + +struct sxe2_fnav_flow_cfg * +sxe2_fnav_find_flow_cfg_by_flow_type(struct sxe2_vsi *vsi, + enum sxe2_fnav_flow_type flow_type) +{ + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + struct sxe2_fnav_flow_cfg *flow_cfg_find = NULL; + + list_for_each_entry(flow_cfg, &vsi->fnav.flow_cfg_list, l_node) { + if (flow_type == flow_cfg->flow_type) { + flow_cfg_find = flow_cfg; + break; + } + if (flow_type < flow_cfg->flow_type) + break; + } + + return flow_cfg_find; +} + +STATIC void sxe2_fnav_flow_cfg_add_list(struct sxe2_vsi *vsi, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + struct sxe2_fnav_flow_cfg *flow_tmp, *pre = NULL; + struct sxe2_vsi_fnav *vsi_fnav = &vsi->fnav; + + list_for_each_entry(flow_tmp, &vsi_fnav->flow_cfg_list, l_node) { + if (flow_tmp->flow_type >= flow_cfg->flow_type) + break; + pre = flow_tmp; + } + + if (pre) + list_add(&flow_cfg->l_node, &pre->l_node); + else + list_add(&flow_cfg->l_node, &vsi_fnav->flow_cfg_list); +} + +void sxe2_fnav_flow_ctxt_clean(struct sxe2_adapter *adapter) +{ + u32 i, j; + struct sxe2_vsi *vsi; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + + sxe2_flow_ppp_comm_ctxt_clean(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); + + for (i = 0; i < adapter->vsi_ctxt.cnt; i++) { + vsi = adapter->vsi_ctxt.vsi[i]; + if (vsi && sxe2_vsi_rxft_support_get(vsi)) { + mutex_lock(&vsi->fnav.flow_cfg_lock); + list_for_each_entry(flow_cfg, &vsi->fnav.flow_cfg_list, + l_node) { + for (j = 0; j < SXE2_FNAV_SEG_MAX; j++) { + if (flow_cfg->seg[j]) + flow_cfg->seg[j]->flow_ptr = NULL; + } + } + mutex_unlock(&vsi->fnav.flow_cfg_lock); + } + } + + adapter->fnav_ctxt.space_bcnt = 0; + adapter->fnav_ctxt.space_gcnt = 0; +} + +static void sxe2_fnav_filter_ctxt_init(struct sxe2_adapter *adapter) +{ + u32 i; + struct sxe2_fnav_context *fnav_ctxt = &adapter->fnav_ctxt; + + fnav_ctxt->pkt_err_cnt = 0; + fnav_ctxt->space_gcnt = 0; + fnav_ctxt->space_bcnt = 0; + + for (i = 0; i < SXE2_FNAV_FLTR_HLIST_CNT; i++) + INIT_HLIST_HEAD(&fnav_ctxt->filter_hlist[i]); + + fnav_ctxt->state = SXE2_FNAV_STATE_READY; + mutex_init(&fnav_ctxt->fnav_state_lock); + mutex_init(&fnav_ctxt->fnav_space_lock); + mutex_init(&fnav_ctxt->filter_lock); +} + +s32 sxe2_flow_fnav_update_hw_prof_fv_mask(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id, u16 *masks) +{ + struct sxe2_adapter *adapter = ppp_ctxt->adapter; + s32 ret = 0; + u32 mask_sel = 0; + u16 i; + + for (i = 0; i < ppp_ctxt->hw_fv_num; i++) { + if (masks[i] && masks[i] != SXE2_U16_MASK) + mask_sel |= (u32)BIT(i); + } + + if (mask_sel == 0) + goto l_end; + + ret = sxe2_flow_update_fv_mask_sel(ppp_ctxt, prof_id, mask_sel); + if (ret) + LOG_ERROR_BDF("failed to update fv mask sel.\n"); + +l_end: + return ret; +} + +static void sxe2_fnav_filter_ctxt_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_fnav_context *fnav_ctxt = &adapter->fnav_ctxt; + + fnav_ctxt->state = SXE2_FNAV_STATE_UNINIT; + mutex_destroy(&fnav_ctxt->fnav_state_lock); + mutex_destroy(&fnav_ctxt->fnav_space_lock); + mutex_destroy(&fnav_ctxt->filter_lock); +} + +void sxe2_fnav_ctxt_init(struct sxe2_adapter *adapter) +{ + clear_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); + if (adapter->caps_ctxt.fnav_space_bsize > 0 || + adapter->caps_ctxt.fnav_space_gsize > 0) { + set_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); + } + sxe2_fnav_flow_ctxt_init(adapter); + sxe2_fnav_filter_ctxt_init(adapter); + sxe2_fnav_stat_ctxt_init(adapter); +} + +void sxe2_fnav_ctxt_deinit(struct sxe2_adapter *adapter) +{ + sxe2_fnav_filter_ctxt_deinit(adapter); + sxe2_fnav_flow_ctxt_deinit(adapter); + sxe2_fnav_stat_ctxt_deinit(adapter); + clear_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); +} + +bool sxe2_fnav_flow_seg_compare(struct sxe2_fnav_flow_seg *seg_a, + struct sxe2_fnav_flow_seg *seg_b) +{ + u8 i = 0; + + if (seg_a->is_tunnel != seg_b->is_tunnel) + return false; + + if (!bitmap_equal(seg_a->headers, seg_b->headers, SXE2_FLOW_HDR_MAX)) + return false; + + if (!bitmap_equal(seg_a->fields, seg_b->fields, SXE2_FLOW_FLD_ID_MAX)) + return false; + + if (seg_a->raw_cnt != seg_b->raw_cnt) + return false; + + for (i = 0; i < seg_a->raw_cnt; i++) { + if (seg_a->raw[i].offset != seg_b->raw[i].offset) + return false; + if (seg_a->raw[i].len != seg_b->raw[i].len) + return false; + } + + return true; +} + +static void sxe2_fnav_gen_dissector_info(struct sxe2_flow_dissector_info *dissectors, + u8 dissectors_cnt, + struct sxe2_fnav_flow_seg *seg) +{ + u64 i; + struct sxe2_flow_dissector_info *dissector; + + dissector = &dissectors[dissectors_cnt - 1]; + + for_each_set_bit(i, seg->fields, SXE2_FLOW_FLD_ID_MAX) { + sxe2_flow_set_diss_fld(dissector, (enum sxe2_flow_fld_id)i, + SXE2_U16_MASK, SXE2_U16_MASK, SXE2_U16_MASK); + } + + bitmap_or(dissector->headers, dissector->headers, seg->headers, + SXE2_FLOW_HDR_MAX); + + for (i = 0; i < seg->raw_cnt; i++) { + sxe2_flow_add_diss_raw(dissector, seg->raw[i].offset, SXE2_U16_MASK, + SXE2_U16_MASK, seg->raw[i].len); + } +} + +STATIC struct sxe2_flow_info_node * +sxe2_fnav_hw_flow_add(struct sxe2_adapter *adapter, struct sxe2_fnav_flow_seg *segs, + s32 *result) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + struct sxe2_flow_dissector_info *dissectors = NULL; + bool is_tunnel = segs->is_tunnel; + u8 dissectors_cnt = is_tunnel ? SXE2_FNAV_TUN_DISSECT_CNT + : SXE2_FNAV_NO_TUN_DISSECT_CNT; + struct sxe2_flow_info_node *flow = NULL; + u16 i = 0; + struct sxe2_fnav_flow_seg *seg = NULL; + + dissectors = devm_kcalloc(dev, dissectors_cnt, sizeof(*dissectors), + GFP_KERNEL); + if (!dissectors) + goto l_end; + + for (i = 0; i < dissectors_cnt; i++) { + seg = &segs[i]; + LOG_DEV_DEBUG("add a flow, header:0x%lX, field[0]:0x%lX, is_tun:%d.\n", + seg->headers[0], seg->fields[0], seg->is_tunnel); + sxe2_fnav_gen_dissector_info(dissectors, (u8)(i + 1), seg); + } + + flow = sxe2_find_flow(ppp_ctxt, dissectors, dissectors_cnt); + if (flow) { + LOG_DEBUG_BDF("find a flow with seg cfg.\n"); + goto l_end; + } + + ret = sxe2_flow_creat(ppp_ctxt, dissectors, dissectors_cnt, &flow); + if (ret) { + LOG_ERROR_BDF("create a flow with seg cfg failed, ret:%d\n", ret); + *result = ret; + } + +l_end: + if (dissectors) + devm_kfree(dev, dissectors); + + return flow; +} + +STATIC bool sxe2_fnav_flow_segs_compare(struct sxe2_fnav_flow_seg *seg_a, + struct sxe2_fnav_flow_seg *seg_b, u8 seg_cnt) +{ + u8 i = 0; + + for (i = 0; i < seg_cnt; i++) { + if (!sxe2_fnav_flow_seg_compare(&seg_a[i], &seg_b[i])) + return false; + } + return true; +} + +s32 sxe2_fnav_hw_flow_del(struct sxe2_adapter *adapter, + struct sxe2_flow_info_node *flow) +{ + s32 ret = 0; + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + + if (!flow) + return 0; + + if (bitmap_empty((unsigned long *)flow->used_vsi, SXE2_MAX_VSI_NUM)) + ret = sxe2_flow_delete(ppp_ctxt, flow); + + return ret; +} + +STATIC bool sxe2_eth_fnav_support_flow_type(enum sxe2_fnav_flow_type flow_type) +{ + bool is_support = false; + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + is_support = true; + break; + default: + break; + } + return is_support; +} + +STATIC bool sxe2_fnav_filter_mutli_vsi_dup(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + bool is_dup = false; + struct sxe2_vsi *other_vsi = NULL; + u16 other_vsi_id; + u16 other_filter_cnt = 0; + + if (!sxe2_eth_fnav_support_flow_type(filter->flow_type)) + goto l_end; + + if (flow_cfg->self_vsi_used.vsi_id_sw == filter->rule_vsi_sw) { + other_vsi_id = flow_cfg->peer_vsi_used.vsi_id_sw; + other_filter_cnt = flow_cfg->peer_vsi_used.filter_cnt; + } else { + other_vsi_id = flow_cfg->self_vsi_used.vsi_id_sw; + other_filter_cnt = flow_cfg->self_vsi_used.filter_cnt; + } + + if (other_vsi_id != SXE2_INVAL_U16 && other_filter_cnt != 0) + other_vsi = adapter->vsi_ctxt.vsi[other_vsi_id]; + + if (!other_vsi) + goto l_end; + + if (sxe2_comm_fnav_filter_search_for_dup(other_vsi, filter)) { + LOG_ERROR_BDF("sxe2 fnav vsi:%u has dup filter with other vsi:%u\n", + filter->rule_vsi_sw, other_vsi_id); + is_dup = true; + } +l_end: + return is_dup; +} + +s32 sxe2_fnav_flow_cfg_add(struct sxe2_vsi *vsi, struct sxe2_fnav_flow_cfg *flow_cfg, + struct sxe2_fnav_flow_seg *seg) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + s32 ret_tmp = 0; + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + enum sxe2_fnav_flow_type flow_type = flow_cfg->flow_type; + u8 level = seg->is_tunnel ? SXE2_FNAV_SEG_TUN : SXE2_FNAV_SEG_NON_TUN; + struct sxe2_fnav_flow_seg *seg_old = flow_cfg->seg[level]; + struct sxe2_flow_info_node *flow; + u8 seg_cnt = seg->is_tunnel ? 2 : 1; + + if (seg_old) { + if (sxe2_fnav_flow_segs_compare(seg_old, seg, seg_cnt)) { + LOG_INFO_BDF("both segs are same, flow_type:%d, tun:%d.\n", + flow_type, seg->is_tunnel); + return -EEXIST; + } + + if (flow_cfg->filter_cnt[level] != 0 || + sxe2_arfs_flow_cfg_used(adapter, vsi->id_in_pf, flow_type)) { + LOG_ERROR_BDF("filter_cnt(%u) is not zero or arfs is using this flow\n", + flow_cfg->filter_cnt[level]); + return -EINVAL; + } + + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, seg->is_tunnel); + if (ret) { + LOG_ERROR_BDF("flow cfg del failed, ret:%d\n", ret); + return ret; + } + } + + flow = sxe2_fnav_hw_flow_add(adapter, seg, &ret_tmp); + if (!flow) { + LOG_ERROR_BDF("fnav hw flow add failed, ret:%d\n", ret_tmp); + ret = ret_tmp ? ret_tmp : -EIO; + goto flow_add_failed; + } + + ret = sxe2_flow_assoc_vsi_fnav(ppp_ctxt, flow, vsi->id_in_pf, flow_type); + if (ret) { + LOG_ERROR_BDF("fnav hw flow associate main vsi failed, ret:%d\n", + ret); + goto assoc_main_vsi_failed; + } + + set_bit(vsi->id_in_pf, seg->vsis); + seg->flow_ptr = flow; + flow_cfg->seg[level] = seg; + + return 0; + +assoc_main_vsi_failed: + (void)sxe2_fnav_hw_flow_del(adapter, flow); +flow_add_failed: + return ret; +} + +s32 sxe2_fnav_flow_cfg_del(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_cfg *flow_cfg, bool is_tunnel) +{ + s32 ret = 0; + u64 vsi_sw_id; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + u8 level = is_tunnel ? SXE2_FNAV_SEG_TUN : SXE2_FNAV_SEG_NON_TUN; + struct sxe2_flow_info_node *flow; + + if (!flow_cfg->seg[level]) + return 0; + + if (!flow_cfg->seg[level]->flow_ptr) + return 0; + + flow = flow_cfg->seg[level]->flow_ptr; + + for_each_set_bit(vsi_sw_id, flow_cfg->seg[level]->vsis, SXE2_MAX_VSI_NUM) { + ret = sxe2_flow_disassoc_vsi(ppp_ctxt, flow, (u16)vsi_sw_id); + if (ret) { + LOG_ERROR_BDF("fnav hw flow disassociate vsi failed,\t" + "vsi_sw_id:%u ret:%d\n", + (u16)vsi_sw_id, ret); + goto l_end; + } + clear_bit((u16)vsi_sw_id, flow_cfg->seg[level]->vsis); + } + + ret = sxe2_fnav_hw_flow_del(adapter, flow); + if (ret) { + LOG_ERROR_BDF("fnav hw flow del failed, vsi_sw_id:%u ret:%d\n", + (u16)vsi_sw_id, ret); + goto l_end; + } + + devm_kfree(dev, flow_cfg->seg[level]); + flow_cfg->seg[level] = NULL; + +l_end: + return ret; +} + +STATIC s32 sxe2_fnav_flow_cfg_replay(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_seg *seg) +{ + s32 ret = 0; + s32 ret_tmp = 0; + u64 i; + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + struct sxe2_flow_info_node *flow; + + flow = sxe2_fnav_hw_flow_add(adapter, seg, &ret_tmp); + if (!flow) { + LOG_ERROR_BDF("fnav hw flow add failed, ret:%d\n", ret_tmp); + ret = ret_tmp ? ret_tmp : -EIO; + goto flow_add_failed; + } + + for_each_set_bit(i, seg->vsis, SXE2_MAX_VSI_NUM) { + ret = sxe2_flow_assoc_vsi(ppp_ctxt, flow, (u16)i); + if (ret) { + LOG_ERROR_BDF("fnav hw flow associate vsi[%u] failed, ret:%d\n", + (u16)i, ret); + goto assoc_vsi_failed; + } + } + + seg->flow_ptr = flow; + + return 0; + +assoc_vsi_failed: + (void)sxe2_fnav_hw_flow_del(adapter, flow); +flow_add_failed: + return ret; +} + +static s32 +sxe2_fnav_default_flow_seg_fill_by_type(enum sxe2_fnav_flow_type flow_type, + struct sxe2_fnav_flow_seg *seg) +{ + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + set_bit(SXE2_FLOW_HDR_TCP, seg->headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, seg->fields); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + set_bit(SXE2_FLOW_HDR_IPV4, seg->headers); + set_bit(SXE2_FLOW_HDR_UDP, seg->headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, seg->fields); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + set_bit(SXE2_FLOW_HDR_IPV6, seg->headers); + set_bit(SXE2_FLOW_HDR_TCP, seg->headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, seg->fields); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + set_bit(SXE2_FLOW_HDR_ETH, seg->headers); + set_bit(SXE2_FLOW_HDR_IPV6, seg->headers); + set_bit(SXE2_FLOW_HDR_UDP, seg->headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, seg->fields); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, seg->fields); + break; + default: + LOG_ERROR("flow type is not supported."); + return -EINVAL; + } + + seg->is_tunnel = false; + + return 0; +} + +static s32 sxe2_fnav_default_flow_set_by_type(struct sxe2_vsi *vsi, + enum sxe2_fnav_flow_type flow_type) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_seg *seg = NULL; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + bool new_alloc_flow = false; + + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (flow_cfg) { + if (flow_cfg->seg[SXE2_FNAV_SEG_NON_TUN] || + flow_cfg->seg[SXE2_FNAV_SEG_TUN]) { + ret = -EINVAL; + goto l_end; + } + } else { + flow_cfg = devm_kzalloc(dev, sizeof(*flow_cfg), GFP_KERNEL); + flow_cfg->flow_type = flow_type; + flow_cfg->peer_vsi_used.vsi_id_sw = + SXE2_INVAL_U16; + flow_cfg->peer_vsi_used.filter_cnt = 0; + flow_cfg->self_vsi_used.vsi_id_sw = vsi->id_in_pf; + flow_cfg->self_vsi_used.filter_cnt = 0; + new_alloc_flow = true; + } + + LOG_INFO_BDF("add default fnav cfg, flow type: %d\n", flow_type); + + seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); + if (!seg) { + ret = -ENOMEM; + goto l_free; + } + + ret = sxe2_fnav_default_flow_seg_fill_by_type(flow_type, seg); + + if (ret) { + LOG_ERROR_BDF("default flow seg fill failed, ret:%d\n", ret); + goto l_free; + } + + ret = sxe2_fnav_flow_cfg_add(vsi, flow_cfg, seg); + if (ret) { + LOG_ERROR_BDF("outer rule add failed, ret:%d\n", ret); + goto l_free; + } + + if (!ret) { + flow_cfg->full_match = true; + if (new_alloc_flow) + sxe2_fnav_flow_cfg_add_list(vsi, flow_cfg); + } else { + if (new_alloc_flow) + devm_kfree(dev, flow_cfg); + } + + goto l_end; + +l_free: + if (seg) + devm_kfree(dev, seg); +l_end: + return ret; +} + +s32 sxe2_fnav_default_flow_set(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + + ret = sxe2_fnav_default_flow_set_by_type(vsi, SXE2_FNAV_FLOW_TYPE_IPV4_TCP); + if (ret) + LOG_WARN_BDF("tcp4 flow full match set failed, ret:%d\n", ret); + + ret = sxe2_fnav_default_flow_set_by_type(vsi, SXE2_FNAV_FLOW_TYPE_IPV4_UDP); + if (ret) + LOG_WARN_BDF("udp4 flow full match set failed, ret:%d\n", ret); + + ret = sxe2_fnav_default_flow_set_by_type(vsi, SXE2_FNAV_FLOW_TYPE_IPV6_TCP); + if (ret) + LOG_WARN_BDF("tcp6 flow full match set failed, ret:%d\n", ret); + + ret = sxe2_fnav_default_flow_set_by_type(vsi, SXE2_FNAV_FLOW_TYPE_IPV6_UDP); + if (ret) + LOG_WARN_BDF("udp6 flow full match set failed, ret:%d\n", ret); + + mutex_unlock(&vsi->fnav.flow_cfg_lock); + return ret; +} + +bool sxe2_fnav_flow_sup_arfs(enum sxe2_fnav_flow_type flow_type) +{ + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + return true; + default: + return false; + } +} + +void sxe2_fnav_filter_hash(struct sxe2_fnav_filter *filter) +{ + u32 hash = 0; + u16 offset = 0; + u8 hash_fld[SXE2_FNAV_HASH_FLD_MAX_SIZE]; + + if (filter->hashed) + return; + + switch (filter->flow_type) { + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP: + memcpy(hash_fld + offset, &filter->full_key.ip.v4.src_ip, + SXE2_FNAV_IPV4_ADDR_SIZE); + offset += SXE2_FNAV_IPV4_ADDR_SIZE; + memcpy(hash_fld + offset, &filter->full_key.ip.v4.dst_ip, + SXE2_FNAV_IPV4_ADDR_SIZE); + offset += SXE2_FNAV_IPV4_ADDR_SIZE; + memcpy(hash_fld + offset, &filter->full_key.l4.src_port, + SXE2_FNAV_L4_PORT_SIZE); + offset += SXE2_FNAV_L4_PORT_SIZE; + memcpy(hash_fld + offset, &filter->full_key.l4.dst_port, + SXE2_FNAV_L4_PORT_SIZE); + offset += SXE2_FNAV_L4_PORT_SIZE; + hash = jhash(hash_fld, SXE2_FNAV_IP4_HASH_FLD_SIZE, 0); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP: + memcpy(hash_fld + offset, &filter->full_key.ip.v6.src_ip, + SXE2_FNAV_IPV6_ADDR_SIZE); + offset += SXE2_FNAV_IPV6_ADDR_SIZE; + memcpy(hash_fld + offset, &filter->full_key.ip.v6.dst_ip, + SXE2_FNAV_IPV6_ADDR_SIZE); + offset += SXE2_FNAV_IPV6_ADDR_SIZE; + memcpy(hash_fld + offset, &filter->full_key.l4.src_port, + SXE2_FNAV_L4_PORT_SIZE); + offset += SXE2_FNAV_L4_PORT_SIZE; + memcpy(hash_fld + offset, &filter->full_key.l4.dst_port, + SXE2_FNAV_L4_PORT_SIZE); + offset += SXE2_FNAV_L4_PORT_SIZE; + hash = jhash(hash_fld, SXE2_FNAV_IP6_HASH_FLD_SIZE, 0); + break; + default: + break; + } + + filter->hash_val = hash; + filter->hashed = true; +} + +void sxe2_fnav_flow_cfg_free(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_cfg *flow_cfg, *cfg_tmp; + u16 j = 0; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + + list_for_each_entry_safe(flow_cfg, cfg_tmp, &vsi->fnav.flow_cfg_list, l_node) { + for (j = 0; j < SXE2_FNAV_SEG_MAX; j++) { + if (flow_cfg->seg[j]) { + devm_kfree(dev, flow_cfg->seg[j]); + flow_cfg->seg[j] = NULL; + } + flow_cfg->filter_cnt[j] = 0; + } + list_del(&flow_cfg->l_node); + devm_kfree(dev, flow_cfg); + } + + mutex_unlock(&vsi->fnav.flow_cfg_lock); +} + +s32 sxe2_fnav_flow_cfg_clear_by_vsi(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + u16 vsi_sw_idx = vsi->id_in_pf; + s32 ret = 0; + u32 j; + struct sxe2_ppp_common_ctxt *ppp_ctxt = + &adapter->fnav_ctxt.fnav_flow_ctxt.ppp; + u16 vsig_idx; + bool only_vsi; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_cfg *flow_cfg, *cfg_tmp; + + ret = sxe2_flow_find_vsig_with_vsi(ppp_ctxt, vsi_sw_idx, &vsig_idx); + if (ret != 0) + goto l_end; + + if (vsig_idx == SXE2_PPP_DEFAULT_VSIG_IDX) + goto l_delete; + + only_vsi = (ppp_ctxt->vsig[vsig_idx].vsi_cnt == 1); + if (!only_vsi) { + ret = sxe2_flow_cfg_clear_muti_vsi_in_vsig(adapter, ppp_ctxt, + vsi_sw_idx); + if (ret) + goto l_end; + else + goto l_delete; + } + + mutex_lock(&vsi->fnav.flow_cfg_lock); + list_for_each_entry_safe(flow_cfg, cfg_tmp, &vsi->fnav.flow_cfg_list, l_node) { + for (j = 0; j < SXE2_FNAV_SEG_MAX; j++) { + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, j); + if (ret) { + mutex_unlock(&vsi->fnav.flow_cfg_lock); + LOG_ERROR_BDF("flow cfg del failed, j:%u\n", j); + goto l_end; + } + } + list_del(&flow_cfg->l_node); + devm_kfree(dev, flow_cfg); + } + mutex_unlock(&vsi->fnav.flow_cfg_lock); + +l_end: + return ret; +l_delete: + sxe2_fnav_flow_cfg_free(vsi); + goto l_end; +} + +s32 sxe2_pf_eth_fnav_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = NULL; + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + vsi = adapter->vsi_ctxt.main_vsi; + adapter->arfs_ctxt.vsi_id_in_pf = vsi->id_in_pf; + + ret = sxe2_ctrl_vsi_init(adapter); + if (ret) { + LOG_DEV_ERR("ctrl vsi init failed, ret=%d\n", ret); + goto l_end; + } + + if (test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + ret = sxe2_fnav_default_flow_set(adapter); + if (ret) + LOG_DEV_WARN("fnav default flow set failed, ret=%d\n", ret); + sxe2_fnav_reserve_stat_idx_alloc(adapter); + } +l_end: + return ret; +} + +void sxe2_pf_eth_fnav_deinit(struct sxe2_adapter *adapter) +{ + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + (void)sxe2_pf_fnav_flow_cfg_clear(adapter); + sxe2_ctrl_vsi_deinit(adapter); +} + +s32 sxe2_pf_eth_fnav_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + ret = sxe2_vsi_rebuild_by_type(adapter, SXE2_VSI_T_CTRL, true); + if (ret) { + LOG_DEV_ERR("ctrl vsi rebuild failed, ret:%d\n", ret); + goto l_end; + } + + ret = sxe2_vsi_enable_by_type(adapter, SXE2_VSI_T_CTRL); + if (ret) { + LOG_DEV_ERR("sxe2_vsi_enable_by_type failed, ret:%d\n", ret); + goto l_end; + } + + if (test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + ret = sxe2_fnav_rule_reply(adapter); + if (ret) { + LOG_DEV_ERR("fnav rule failed, ret:%d\n", ret); + goto l_end; + } + } + + LOG_INFO_BDF("sxe2 pf eth fnav rebuild done. ret:%d.\n", ret); + +l_end: + return ret; +} + +s32 sxe2_pf_fnav_flow_cfg_clear(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u32 j = 0; + u32 filter_cnt = 0; + struct sxe2_vsi *vsi = NULL; + struct sxe2_fnav_flow_cfg *flow_cfg, *cfg_tmp; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + vsi = adapter->vsi_ctxt.main_vsi; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + list_for_each_entry(flow_cfg, &vsi->fnav.flow_cfg_list, l_node) { + filter_cnt += flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN]; + filter_cnt += flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN]; + } + mutex_unlock(&vsi->fnav.flow_cfg_lock); + if (filter_cnt == 0) { + ret = sxe2_fnav_flow_cfg_clear_by_vsi(vsi); + if (ret) + LOG_ERROR_BDF("sxe2 pf fnav flow cfg clear batch failed.\n"); + goto l_end; + } + + mutex_lock(&vsi->fnav.flow_cfg_lock); + list_for_each_entry_safe(flow_cfg, cfg_tmp, &vsi->fnav.flow_cfg_list, l_node) { + filter_cnt = 0; + for (j = 0; j < SXE2_FNAV_SEG_MAX; j++) { + filter_cnt += flow_cfg->filter_cnt[j]; + if (flow_cfg->filter_cnt[j] == 0) { + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, j); + if (ret) { + mutex_unlock(&vsi->fnav.flow_cfg_lock); + LOG_ERROR_BDF("flow cfg del failed, j:%u\n", + j); + goto l_end; + } + } + } + + if (filter_cnt == 0) { + list_del(&flow_cfg->l_node); + devm_kfree(dev, flow_cfg); + } + } + mutex_unlock(&vsi->fnav.flow_cfg_lock); + +l_end: + return ret; +} + +STATIC s32 sxe2_fnav_inputset_fill(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_seg *segs, + struct sxe2_fnav_filter *filter) +{ + s32 ret = 0; + u32 off = 0; + u64 fld = 0; + struct sxe2_fnav_flow_seg *seg = NULL; + u8 *inputset = filter->inputset; + __be16 *val; + u16 val_tmp; + __be16 val_tmp_be; + u16 def_mask_id = 0; + u16 def_fv_id = 0; + u8 def_mask_cnt = adapter->fnav_ctxt.fnav_flow_ctxt.ppp.hw_fv_mask_num; + u8 seg_cnt = 0; + u8 i = 0; + u32 fld_off = 0; + DECLARE_BITMAP(fields, SXE2_FLOW_FLD_ID_MAX); + + if (!segs) { + LOG_ERROR_BDF("flow cfg is NULL.\n"); + return -EINVAL; + } + + if (segs->is_tunnel) + seg_cnt = 2; + else + seg_cnt = 1; + + for (i = 0; i < seg_cnt; i++) { + seg = &segs[i]; + bitmap_zero(fields, SXE2_FLOW_FLD_ID_MAX); + bitmap_copy(fields, seg->fields, SXE2_FLOW_FLD_ID_MAX); + + for_each_set_bit(fld, fields, SXE2_FLOW_FLD_ID_MAX) { + fld_off = i ? sxe2_flds_info[fld].off_inner + : sxe2_flds_info[fld].off; + if (sxe2_flds_info[fld].mask) { + ret = sxe2_flow_default_mask_get(SXE2_HW_BLOCK_ID_FNAV, + adapter, + (enum sxe2_flow_fld_id)fld, + &def_mask_id, &def_fv_id); + if (ret) { + LOG_ERROR_BDF("failed to parse default mask.\n"); + goto l_end; + } + val = (__be16 *)(inputset + ((u64)def_fv_id * 2)); + if (sxe2_flds_info[fld].len == 2) { + val_tmp_be = *((u16 *)((u8 *)(&filter->full_key) + + fld_off)); + val_tmp_be = val_tmp_be & + cpu_to_be16(sxe2_flds_info[fld] + .mask); + *val = val_tmp_be; + } else { + val_tmp = *((u16 *)((u8 *)(&filter->full_key) + + fld_off)); + val_tmp = val_tmp + << sxe2_flds_info[fld].disp; + val_tmp = val_tmp & sxe2_flds_info[fld].mask; + *val = cpu_to_be16(val_tmp); + } + } else { + if ((u32)(off + sxe2_flds_info[fld].len) > + (u32)(((u16)SXE2_FNAV_INPUT_CNT - def_mask_cnt) * + (u8)SXE2_FLOW_FV_SIZE)) { + ret = -ENOSPC; + LOG_ERROR_BDF("used inputset out of max limit, len=%u.\n", + off + sxe2_flds_info[fld].len); + goto l_end; + } + memcpy(inputset + off, + ((u8 *)&filter->full_key) + fld_off, + sxe2_flds_info[fld].len); + off += sxe2_flds_info[fld].len; + } + clear_bit((int)fld, fields); + } + } + + if (filter->full_key.has_flex_filed) { + if ((u32)(off + sizeof(filter->full_key.flex_word)) > + (u32)(((u16)SXE2_FNAV_INPUT_CNT - def_mask_cnt) * + (u16)SXE2_FLOW_FV_SIZE)) { + ret = -ENOSPC; + LOG_ERROR_BDF("used inputset out of max limit, len=%lu.\n", + off + sizeof(filter->full_key.flex_word)); + goto l_end; + } + memcpy(inputset + off, &filter->full_key.flex_word, + sizeof(filter->full_key.flex_word)); + } + +l_end: + return ret; +} + +s32 sxe2_fnav_filter_inputset_fill(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct sxe2_fnav_flow_seg *segs = NULL; + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL) + segs = flow_cfg->seg[SXE2_FNAV_SEG_TUN]; + else + segs = flow_cfg->seg[SXE2_FNAV_SEG_NON_TUN]; + + ret = sxe2_fnav_inputset_fill(adapter, segs, filter); + return ret; +} + +STATIC s32 sxe2_fwc_fnav_filter_update(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_cfg *flow_cfg, + struct sxe2_fnav_filter *filter, bool is_add, + bool tun) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_fwc_fnav_kt_resp kt_resp = {0}; + struct sxe2_fwc_fnav_kt_entry entry = {0}; + struct sxe2_fnav_flow_seg *seg = NULL; + u8 level = tun ? (SXE2_FNAV_TUN_DISSECT_CNT - 1) + : (SXE2_FNAV_NO_TUN_DISSECT_CNT - 1); + u8 space; + + space = tun ? filter->tunn_fd_space : filter->fd_space; + + seg = flow_cfg->seg[level]; + if (!seg || !seg->flow_ptr) { + ret = -EINVAL; + LOG_ERROR_BDF("seg or seh->flow_ptr is NULL. filter_loc: %u,\t" + "flow_type: %d, is_add: %d, tun: %d\n", + filter->filter_loc, filter->flow_type, is_add, tun); + goto l_out; + } + + entry.is_add = (u8)is_add; + entry.prof_id = (u8)(seg->flow_ptr->prof_id); + entry.ori_vsi = cpu_to_le16(filter->ori_vsi_hw); + memcpy(entry.inputset, filter->inputset, + SXE2_FNAV_INPUT_CNT * SXE2_FLOW_FV_SIZE); + entry.vsi = cpu_to_le16(filter->dst_vsi_hw); + entry.fdid = cpu_to_le32(filter->filter_loc); + entry.fdid_prio = filter->fdid_prio; + entry.flow_id = cpu_to_le16((u16)filter->flow_type); + entry.toq_prio = filter->act_prio; + entry.drop = filter->act_type == SXE2_FNAV_ACT_DROP ? 1 : 0; + entry.qindex = cpu_to_le16(filter->q_index); + entry.stat_cnt = cpu_to_le16(filter->stat_index); + entry.stat_ena = filter->stat_ctrl; + entry.to_queue = filter->q_region; + entry.fd_space = space; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_FILTER_UPDATE, &entry, + sizeof(entry), &kt_resp, sizeof(kt_resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("update fnav filter failed, is_add:%d, tun:%d, ret:%d\n", + is_add, tun, ret); + } else { + adapter->fnav_ctxt.space_bcnt = + (u16)(le32_to_cpu(kt_resp.bcnt_global)); + adapter->fnav_ctxt.space_gcnt = (u16)(le32_to_cpu(kt_resp.gcnt_pf)); + } + +l_out: + return ret; +} + +enum sxe2_fnav_flow_type +sxe2_arfs_flow_to_fnav_flow(enum sxe2_fnav_flow_type flow_type) +{ + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP: + return SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP: + return SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP: + return SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + case SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP: + return SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + default: + return flow_type; + } +} + +s32 sxe2_pf_fnav_hw_filter_update(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, bool is_add, + bool is_update, + enum sxe2_fnav_filter_update_type update_type) +{ + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + s32 ret = 0; + enum sxe2_fnav_flow_type flow_type; + + if (!vsi) + return 0; + + if (update_type == SXE2_FNAV_FILTER_UPDATE_ADMIN) { + flow_type = sxe2_arfs_flow_to_fnav_flow(filter->flow_type); + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) { + ret = -EINVAL; + goto l_end; + } + ret = sxe2_fnav_hw_filter_update_with_admin(vsi, filter, flow_cfg, + is_add, false); + if (ret) + goto l_end; + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_hw_filter_update_with_admin(vsi, + filter, + flow_cfg, + is_add, + true); + if (ret && is_add) + (void)sxe2_fnav_hw_filter_update_with_admin(vsi, + filter, + flow_cfg, + false, + false); + } + } else { + ret = sxe2_fnav_hw_filter_update_with_pkt(vsi, filter, is_add, + is_update, false); + if (ret) + goto l_end; + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_hw_filter_update_with_pkt(vsi, + filter, + is_add, + is_update, + true); + if (ret && is_add && !is_update) + (void)sxe2_fnav_hw_filter_update_with_pkt(vsi, + filter, + false, + false, + false); + } + } + +l_end: + return ret; +} + +s32 sxe2_fnav_hw_filter_update_with_admin(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg, + bool is_add, bool is_tunn) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + u8 fd_space; + + mutex_lock(&adapter->fnav_ctxt.fnav_space_lock); + + if (is_add) { + if (adapter->fnav_ctxt.space_gcnt < vsi->fnav.space_gsize) + fd_space = SXE2_FNAV_FD_SPACE_FROM_GUAR; + else + fd_space = SXE2_FNAV_FD_SPACE_FROM_BEST_EFFORT; + + if (is_tunn) + filter->tunn_fd_space = fd_space; + else + filter->fd_space = fd_space; + } else { + fd_space = is_tunn ? filter->tunn_fd_space : filter->fd_space; + } + + ret = sxe2_fwc_fnav_filter_update(adapter, flow_cfg, filter, is_add, + is_tunn); + + mutex_unlock(&adapter->fnav_ctxt.fnav_space_lock); + + return ret; +} + +STATIC void sxe2_fnav_get_prgm_desc(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, + struct sxe2_tx_fnav_desc *desc, bool is_add, + bool is_tunn) +{ + u64 qword; + u8 space; + + space = is_tunn ? filter->tunn_fd_space : filter->fd_space; + + qword = (((u64)(filter->q_index)) << SXE2_FNAV_TX_DESC_QW0_Q_INDEX_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_Q_INDEX_MASK; + qword |= (0ULL << SXE2_FNAV_TX_DESC_QW0_COMP_Q_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_COMP_Q_MASK; + qword |= (SXE2_FNAV_TX_DESC_QW0_COMP_RPT_FAIL + << SXE2_FNAV_TX_DESC_QW0_COMP_RPT_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_COMP_RPT_MASK; + qword |= (((u64)(space)) << SXE2_FNAV_TX_DESC_QW0_FD_SPACE_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_FD_SPACE_MASK; + qword |= (((u64)(filter->stat_index)) + << SXE2_FNAV_TX_DESC_QW0_STAT_CNT_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_STAT_CNT_MASK; + qword |= (SXE2_FNAV_TX_DESC_QW0_STAT_ENA_PKTS + << SXE2_FNAV_TX_DESC_QW0_STAT_ENA_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_STAT_ENA_MASK; + qword |= (0ULL << SXE2_FNAV_TX_DESC_QW0_EVICT_ENA_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_EVICT_ENA_MASK; + qword |= (((u64)(filter->q_region)) << SXE2_FNAV_TX_DESC_QW0_TOQ_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_TOQ_MASK; + qword |= (((u64)(filter->act_prio)) + << SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_MASK; + qword |= (((u64)(filter->act_type == SXE2_FNAV_ACT_DROP ? 1ULL : 0ULL)) + << SXE2_FNAV_TX_DESC_QW0_DROP_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_DROP_MASK; + qword |= (((u64)(filter->flow_type)) + << SXE2_FNAV_TX_DESC_QW0_FLOW_ID_SHIFT) & + SXE2_FNAV_TX_DESC_QW0_FLOW_ID_MASK; + desc->qidx_compq_space_stat = cpu_to_le64(qword); + + qword = (SXE2_TX_DESC_DTYPE_FLTR_PROG << SXE2_FNAV_TX_DESC_QW1_DTYPE_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_DTYPE_MASK; + qword |= ((is_add ? SXE2_FNAV_TX_DESC_QW1_PCMD_UPDATE + : SXE2_FNAV_TX_DESC_QW1_PCMD_REMOVE) + << SXE2_FNAV_TX_DESC_QW1_PCMD_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_PCMD_MASK; + qword |= (((u64)(filter->dst_vsi_hw)) + << SXE2_FNAV_TX_DESC_QW1_FD_VSI_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_FD_VSI_MASK; + qword |= (0ULL << SXE2_FNAV_TX_DESC_QW1_SWAP_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_SWAP_MASK; + qword |= (((u64)(filter->fdid_prio)) + << SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_MASK; + qword |= (SXE2_FNAV_TX_DESC_QW1_FDID_MDID_FNAV + << SXE2_FNAV_TX_DESC_QW1_FDID_MDID_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_FDID_MDID_MASK; + qword |= (((u64)(filter->filter_loc)) << SXE2_FNAV_TX_DESC_QW1_FDID_SHIFT) & + SXE2_FNAV_TX_DESC_QW1_FDID_MASK; + desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword); +} + +STATIC void sxe2_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr) +{ + int idx; + + for (idx = 0; idx < SXE2_IPV6_ADDR_LEN_TO_U32; idx++) + memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx], + sizeof(*addr)); +} + +STATIC void sxe2_pkt_insert_u8(u8 *pkt, int offset, u8 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +STATIC void sxe2_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data) +{ + u8 high, low; + + high = (data >> 4) + (*(pkt + offset) & 0xF0); + memcpy(pkt + offset, &high, sizeof(high)); + + low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4); + memcpy(pkt + offset + 1, &low, sizeof(low)); +} + +STATIC void sxe2_pkt_insert_u16(u8 *pkt, int offset, __be16 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +STATIC void sxe2_pkt_insert_u32(u8 *pkt, int offset, __be32 data) +{ + memcpy(pkt + offset, &data, sizeof(data)); +} + +STATIC void sxe2_pkt_insert_mac_addr(u8 *pkt, u8 *addr) +{ + ether_addr_copy(pkt, addr); +} + +STATIC enum sxe2_fnav_flow_type +sxe2_flow_type_translate_type(struct sxe2_fnav_filter *filter) +{ + enum sxe2_fnav_flow_type flow; + + if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_IPV4_OTHER) { + switch (filter->full_key.ip.v4.proto) { + case IPPROTO_TCP: + flow = SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + break; + case IPPROTO_UDP: + flow = SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + break; + case IPPROTO_SCTP: + flow = SXE2_FNAV_FLOW_TYPE_IPV4_SCTP; + break; + default: + flow = SXE2_FNAV_FLOW_TYPE_IPV4_OTHER; + break; + } + } else if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_IPV6_OTHER) { + switch (filter->full_key.ip.v6.proto) { + case IPPROTO_TCP: + flow = SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + break; + case IPPROTO_UDP: + flow = SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + break; + case IPPROTO_SCTP: + flow = SXE2_FNAV_FLOW_TYPE_IPV6_SCTP; + break; + default: + flow = SXE2_FNAV_FLOW_TYPE_IPV6_OTHER; + break; + } + } else if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP) { + flow = SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + } else if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP) { + flow = SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + } else if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP) { + flow = SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + } else if (filter->flow_type == SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP) { + flow = SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + } else { + flow = filter->flow_type; + } + return flow; +} + +STATIC void sxe2_pkt_insert_by_flow_eth(struct sxe2_fnav_filter *filter, bool frag, + u8 *loc) +{ + sxe2_pkt_insert_mac_addr(loc, filter->full_key.eth.h_dest); + sxe2_pkt_insert_mac_addr(loc + ETH_ALEN, filter->full_key.eth.h_source); + if (filter->full_key.ext_data.s_vlan_tci || + filter->full_key.ext_data.vlan_type) { + sxe2_pkt_insert_u16(loc, SXE2_ETH_TYPE_F_OFFSET, + filter->full_key.ext_data.vlan_type); + sxe2_pkt_insert_u16(loc, SXE2_ETH_VLAN_TCI_OFFSET, + filter->full_key.ext_data.s_vlan_tci); + sxe2_pkt_insert_u16(loc, SXE2_ETH_TYPE_VLAN_OFFSET, + filter->full_key.eth.h_proto); + } else { + sxe2_pkt_insert_u16(loc, SXE2_ETH_TYPE_F_OFFSET, + filter->full_key.eth.h_proto); + } +} + +STATIC void sxe2_pkt_insert_by_flow_ipv4_tcp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_u32(loc, SXE2_IPV4_SRC_ADDR_OFFSET, + filter->full_key.ip.v4.src_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_TCP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u32(loc, SXE2_IPV4_DST_ADDR_OFFSET, + filter->full_key.ip.v4.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_TCP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TOS_OFFSET, filter->full_key.ip.v4.tos); + if (filter->full_key.ip.v4.ttl) { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, + filter->full_key.ip.v4.ttl); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, SXE2_U8_MASK); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); + if (frag) + loc[SXE2_FNAV_IPV4_PKT_FLAG_MF_OFFSET] = SXE2_FNAV_IPV4_PKT_FLAG_MF; +} + +STATIC void sxe2_pkt_insert_by_flow_ipv4_udp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_u32(loc, SXE2_IPV4_SRC_ADDR_OFFSET, + filter->full_key.ip.v4.src_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_UDP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u32(loc, SXE2_IPV4_DST_ADDR_OFFSET, + filter->full_key.ip.v4.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_UDP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TOS_OFFSET, filter->full_key.ip.v4.tos); + if (filter->full_key.ip.v4.ttl) { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, + filter->full_key.ip.v4.ttl); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, SXE2_U8_MASK); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); + sxe2_pkt_insert_mac_addr(loc + ETH_ALEN, filter->full_key.ext_data.src_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv4_sctp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_u32(loc, SXE2_IPV4_SRC_ADDR_OFFSET, + filter->full_key.ip.v4.src_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_SCTP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u32(loc, SXE2_IPV4_DST_ADDR_OFFSET, + filter->full_key.ip.v4.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV4_SCTP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TOS_OFFSET, filter->full_key.ip.v4.tos); + if (filter->full_key.ip.v4.ttl) { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, + filter->full_key.ip.v4.ttl); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, SXE2_U8_MASK); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv4_other(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_u32(loc, SXE2_IPV4_SRC_ADDR_OFFSET, + filter->full_key.ip.v4.src_ip); + sxe2_pkt_insert_u32(loc, SXE2_IPV4_DST_ADDR_OFFSET, + filter->full_key.ip.v4.dst_ip); + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TOS_OFFSET, filter->full_key.ip.v4.tos); + if (filter->full_key.ip.v4.ttl) { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, + filter->full_key.ip.v4.ttl); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV4_TTL_OFFSET, SXE2_U8_MASK); + } + sxe2_pkt_insert_u8(loc, SXE2_IPV4_PROTO_OFFSET, + filter->full_key.ip.v4.proto); + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv6_tcp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_SRC_ADDR_OFFSET, + filter->full_key.ip.v6.src_ip); + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_DST_ADDR_OFFSET, + filter->full_key.ip.v6.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_TCP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_TCP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8_tc(loc, SXE2_IPV6_TC_OFFSET, filter->full_key.ip.v6.tc); + if (filter->full_key.ip.v6.hlim) { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, + filter->full_key.ip.v6.hlim); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, 1); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv6_udp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_SRC_ADDR_OFFSET, + filter->full_key.ip.v6.src_ip); + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_DST_ADDR_OFFSET, + filter->full_key.ip.v6.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_UDP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_UDP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8_tc(loc, SXE2_IPV6_TC_OFFSET, filter->full_key.ip.v6.tc); + if (filter->full_key.ip.v6.hlim) { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, + filter->full_key.ip.v6.hlim); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, 1); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv6_sctp(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_SRC_ADDR_OFFSET, + filter->full_key.ip.v6.src_ip); + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_DST_ADDR_OFFSET, + filter->full_key.ip.v6.dst_ip); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_SCTP_SRC_PORT_OFFSET, + filter->full_key.l4.src_port); + sxe2_pkt_insert_u16(loc, SXE2_IPV6_SCTP_DST_PORT_OFFSET, + filter->full_key.l4.dst_port); + sxe2_pkt_insert_u8_tc(loc, SXE2_IPV6_TC_OFFSET, filter->full_key.ip.v6.tc); + if (filter->full_key.ip.v6.hlim) { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, + filter->full_key.ip.v6.hlim); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, 1); + } + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC void sxe2_pkt_insert_by_flow_ipv6_other(struct sxe2_fnav_filter *filter, + bool frag, u8 *loc) +{ + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_SRC_ADDR_OFFSET, + filter->full_key.ip.v6.src_ip); + sxe2_pkt_insert_ipv6_addr(loc, SXE2_IPV6_DST_ADDR_OFFSET, + filter->full_key.ip.v6.dst_ip); + sxe2_pkt_insert_u8_tc(loc, SXE2_IPV6_TC_OFFSET, filter->full_key.ip.v6.tc); + if (filter->full_key.ip.v6.hlim) { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, + filter->full_key.ip.v6.hlim); + } else { + sxe2_pkt_insert_u8(loc, SXE2_IPV6_HLIM_OFFSET, 1); + } + sxe2_pkt_insert_u8(loc, SXE2_IPV6_PROTO_OFFSET, + filter->full_key.ip.v6.proto); + sxe2_pkt_insert_mac_addr(loc, filter->full_key.ext_data.dst_mac); +} + +STATIC int sxe2_pkt_insert_by_flow(enum sxe2_fnav_flow_type flow, + struct sxe2_fnav_filter *filter, bool frag, + u8 *loc) +{ + switch (flow) { + case SXE2_FNAV_FLOW_TYPE_ETH: + sxe2_pkt_insert_by_flow_eth(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + sxe2_pkt_insert_by_flow_ipv4_tcp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + sxe2_pkt_insert_by_flow_ipv4_udp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + sxe2_pkt_insert_by_flow_ipv4_sctp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + sxe2_pkt_insert_by_flow_ipv4_other(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + sxe2_pkt_insert_by_flow_ipv6_tcp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + sxe2_pkt_insert_by_flow_ipv6_udp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + sxe2_pkt_insert_by_flow_ipv6_sctp(filter, frag, loc); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + sxe2_pkt_insert_by_flow_ipv6_other(filter, frag, loc); + break; + default: + return -EINVAL; + } + return 0; +} + +s32 sxe2_fnav_gen_prgm_pkt(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, u8 *pkt, bool frag, + bool tun) +{ + enum sxe2_fnav_flow_type flow; + u8 *loc_inner = NULL; + u8 *loc; + u16 idx; + u32 tunn_outer_len; + s32 ret; + + flow = sxe2_flow_type_translate_type(filter); + for (idx = 0; idx < SXE2_FNAV_PKT_NUM; idx++) { + if (sxe2_fnav_pkt[idx].flow_type == flow) + break; + } + if (idx == SXE2_FNAV_PKT_NUM) + return -EINVAL; + + if (!tun) { + switch (flow) { + default: + memcpy(pkt, sxe2_fnav_pkt[idx].pkt, + sxe2_fnav_pkt[idx].pkt_len); + break; + } + loc = pkt; + } else { + if (!sxe2_fnav_pkt[idx].tun_pkt) + return -EINVAL; + switch (flow) { + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + case SXE2_FNAV_FLOW_TYPE_ETH: + tunn_outer_len = sizeof(sxe2_fnav_udp4_vxlan_pkt); + memcpy(pkt, sxe2_fnav_udp4_vxlan_pkt, tunn_outer_len); + loc_inner = &pkt[tunn_outer_len]; + memcpy(loc_inner, sxe2_fnav_pkt[idx].pkt, + sxe2_fnav_pkt[idx].pkt_len); + sxe2_pkt_insert_u16(pkt, + SXE2_IPV4_OUTER_LEN_OFFSET, + cpu_to_be16((u16)(tunn_outer_len + + sxe2_fnav_pkt[idx].pkt_len - + SXE2_FNAV_ETH_LEN))); + sxe2_pkt_insert_u16(pkt, + SXE2_IPV4_UDP_LEN_OFFSET, + cpu_to_be16((u16)(sxe2_fnav_pkt[idx].pkt_len + + SXE2_FNAV_VXLAN_UDP_LEN))); + sxe2_pkt_insert_u16(pkt, + SXE2_IPV4_UDP_DST_PORT_OFFSET, + cpu_to_be16((u16)(SXE2_VXLAN_UDP_DST_PORT))); + loc = loc_inner; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + tunn_outer_len = sizeof(sxe2_fnav_ipv6_gre_tun_pkt); + memcpy(pkt, sxe2_fnav_ipv6_gre_tun_pkt, tunn_outer_len); + loc_inner = &pkt[tunn_outer_len]; + memcpy(loc_inner, sxe2_fnav_pkt[idx].pkt, + sxe2_fnav_pkt[idx].pkt_len); + sxe2_pkt_insert_u16(pkt, SXE2_IPV4_OUTER_LEN_OFFSET, + cpu_to_be16(sxe2_fnav_pkt[idx].pkt_len + + SXE2_FNAV_GRE_HEADER_LEN)); + loc = loc_inner; + break; + + default: + return -EINVAL; + } + } + + ret = sxe2_pkt_insert_by_flow(flow, filter, frag, loc); + if (ret) + return ret; + + if (filter->full_key.has_flex_filed) { + loc = pkt; + sxe2_pkt_insert_u16(loc, filter->full_key.flex_offset, + filter->full_key.flex_word); + } + + return 0; +} + +STATIC s32 sxe2_fnav_packet_xmit(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, bool is_add, + bool tun) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_tx_fnav_desc desc; + struct sxe2_vsi *ctrl_vsi; + u8 *pkt; + s32 ret; + + ctrl_vsi = adapter->vsi_ctxt.ctrl_vsi; + if (!ctrl_vsi) + return -EINVAL; + + pkt = devm_kzalloc(dev, SXE2_FNAV_MAX_RAW_PKT_SIZE, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + sxe2_fnav_get_prgm_desc(adapter, filter, &desc, is_add, tun); + ret = sxe2_fnav_gen_prgm_pkt(adapter, filter, pkt, false, tun); + if (ret) + goto l_free; + + ret = sxe2_prgm_fnav_fltr(ctrl_vsi, &desc, pkt); + if (ret) + goto l_free; + + return 0; + +l_free: + devm_kfree(dev, pkt); + return ret; +} + +s32 sxe2_fnav_hw_filter_update_with_pkt(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, bool is_add, + bool is_update, bool is_tunn) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + u8 fd_space; + + mutex_lock(&adapter->fnav_ctxt.fnav_space_lock); + + if (is_add && !is_update) { + if (adapter->fnav_ctxt.space_gcnt < vsi->fnav.space_gsize) + fd_space = SXE2_FNAV_FD_SPACE_FROM_GUAR; + else + fd_space = SXE2_FNAV_FD_SPACE_FROM_BEST_EFFORT; + + if (is_tunn) + filter->tunn_fd_space = fd_space; + else + filter->fd_space = fd_space; + } else { + fd_space = is_tunn ? filter->tunn_fd_space : filter->fd_space; + } + + ret = sxe2_fnav_packet_xmit(adapter, filter, is_add, is_tunn); + if (ret == 0 && !is_update) { + if (is_add) { + if (fd_space == SXE2_FNAV_FD_SPACE_FROM_GUAR) + adapter->fnav_ctxt.space_gcnt++; + else + adapter->fnav_ctxt.space_bcnt++; + } else { + if (fd_space == SXE2_FNAV_FD_SPACE_FROM_GUAR) + adapter->fnav_ctxt.space_gcnt--; + else + adapter->fnav_ctxt.space_bcnt--; + } + } + + mutex_unlock(&adapter->fnav_ctxt.fnav_space_lock); + + return ret; +} + +STATIC s32 sxe2_fnav_flow_update_after_filter_del(struct sxe2_vsi *vsi, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN] == 0) { + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, true); + if (ret) { + LOG_WARN_BDF("delete hw filter failed, ret:%d\n", ret); + goto l_end; + } + } + + if (vsi->id_in_pf == adapter->arfs_ctxt.vsi_id_in_pf && + flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN] == 0 && + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0) { + (void)sxe2_fnav_default_flow_recovery_by_type(vsi, flow_cfg); + + } else if (flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0) { + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, false); + if (ret) { + LOG_WARN_BDF("delete hw filter failed, ret:%d\n", ret); + goto l_end; + } + } + + if (flow_cfg->full_match == false && + flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN] == 0 && + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0) { + list_del(&flow_cfg->l_node); + devm_kfree(dev, flow_cfg); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_fnav_action_to_vf_all_filter_del(struct sxe2_vsi *vsi, + struct sxe2_vf_node *vf_node) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = NULL; + struct sxe2_fnav_filter *filter, *tmp; + u16 i = 0; + u16 vf_vsi_id; + + if (!vsi) + return ret; + + adapter = vsi->adapter; + for (i = 0; i < SXE2_VF_TYPE_NR; i++) { + vf_vsi_id = vf_node->vsi_id[i]; + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry_safe(filter, tmp, &vsi->fnav.filter_list, l_node) { + if (filter->ori_vsi_hw != filter->dst_vsi_hw && + filter->dst_vsi_hw == vf_vsi_id) { + ret = sxe2_fnav_filter_del(vsi, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav delete filter\t" + "failed, ori_vsi_id=%u,\t" + "dst_vsi_hw=%u, ret:%d\n", + filter->ori_vsi_hw, + filter->dst_vsi_hw, ret); + } + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + } + return ret; +} + +s32 sxe2_fwc_fnav_hw_clear(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_HW_CLEAR, NULL, 0, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("clear fnav hw filter failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fnav_switch(struct sxe2_adapter *adapter, bool is_enable) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = NULL; + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + vsi = adapter->vsi_ctxt.main_vsi; + mutex_lock(&adapter->fnav_ctxt.fnav_state_lock); + if (adapter->fnav_ctxt.state != SXE2_FNAV_STATE_READY) { + ret = -EBUSY; + goto l_end; + } + + if (is_enable) { + if (!test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + (void)sxe2_fnav_default_flow_set(adapter); + (void)sxe2_arfs_enable(adapter); + set_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); + } + goto l_end; + } + + if (!test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) + goto l_end; + + ret = sxe2_fnav_del_filter_by_vsi(vsi); + if (ret) { + LOG_ERROR_BDF("delete all filter failed, ret:%d", ret); + goto l_end; + } + + sxe2_arfs_disable(adapter); + + ret = sxe2_pf_fnav_flow_cfg_clear(adapter); + if (ret) { + LOG_ERROR_BDF("clear flow cfg failed, ret:%d", ret); + goto l_end; + } + + clear_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags); + +l_end: + mutex_unlock(&adapter->fnav_ctxt.fnav_state_lock); + return ret; +} + +u32 sxe2_fnav_max_filter_cnt_get_by_vsi(struct sxe2_vsi *vsi) +{ + return vsi->fnav.space_gsize + vsi->fnav.space_bsize; +} + +struct sxe2_fnav_filter * +sxe2_fnav_find_filter_by_loc_unlock(struct sxe2_vsi_fnav *vsi_fnav, u32 loc) +{ + struct sxe2_fnav_filter *filter_tmp = NULL; + struct sxe2_fnav_filter *filter_find = NULL; + + list_for_each_entry(filter_tmp, &vsi_fnav->filter_list, l_node) { + if (loc == filter_tmp->filter_loc) { + filter_find = filter_tmp; + break; + } + if (loc < filter_tmp->filter_loc) + break; + } + + return filter_find; +} + +struct sxe2_fnav_filter *sxe2_fnav_find_filter_by_loc_lock(struct sxe2_vsi *vsi, + u32 loc) +{ + struct sxe2_fnav_filter *filter_tmp = NULL; + struct sxe2_fnav_filter *filter_find = NULL; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry(filter_tmp, &vsi->fnav.filter_list, l_node) { + if (loc == filter_tmp->filter_loc) { + filter_find = filter_tmp; + break; + } + if (loc < filter_tmp->filter_loc) + break; + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + + return filter_find; +} + +STATIC bool sxe2_fnav_filter_cmp(struct sxe2_fnav_filter *fltr_a, + struct sxe2_fnav_filter *fltr_b) +{ + if (fltr_a->flow_type != fltr_b->flow_type) + return false; + + if (!(fltr_a->full_key.has_flex_filed == fltr_b->full_key.has_flex_filed && + fltr_a->full_key.flex_offset == fltr_b->full_key.flex_offset && + fltr_a->full_key.flex_word == fltr_b->full_key.flex_word)) + return false; + + if (memcmp(&fltr_a->full_key.eth, &fltr_b->full_key.eth, + sizeof(fltr_a->full_key.eth))) + return false; + if (memcmp(&fltr_a->full_key.eth_inner, &fltr_b->full_key.eth_inner, + sizeof(fltr_a->full_key.eth_inner))) + return false; + if (memcmp(&fltr_a->full_key.ip, &fltr_b->full_key.ip, + sizeof(fltr_a->full_key.ip))) + return false; + if (memcmp(&fltr_a->full_key.ip_inner, &fltr_b->full_key.ip_inner, + sizeof(fltr_a->full_key.ip_inner))) + return false; + if (memcmp(&fltr_a->full_key.l4, &fltr_b->full_key.l4, + sizeof(fltr_a->full_key.l4))) + return false; + if (memcmp(&fltr_a->full_key.l4_inner, &fltr_b->full_key.l4_inner, + sizeof(fltr_a->full_key.l4_inner))) + return false; + if (memcmp(&fltr_a->full_key.ext_data, &fltr_b->full_key.ext_data, + sizeof(fltr_a->full_key.ext_data))) + return false; + if (memcmp(&fltr_a->full_key.ext_data_inner, &fltr_b->full_key.ext_data_inner, + sizeof(fltr_a->full_key.ext_data_inner))) + return false; + if (memcmp(&fltr_a->full_key.tunnel_data, &fltr_b->full_key.tunnel_data, + sizeof(fltr_a->full_key.tunnel_data))) + return false; + + return true; +} + +bool sxe2_fnav_filter_cmp_with_flow_type(struct sxe2_fnav_filter *fltr_a, + struct sxe2_fnav_filter *fltr_b) +{ + enum sxe2_fnav_flow_type flow_type = fltr_a->flow_type; + + if (fltr_a->flow_type != fltr_b->flow_type) + return false; + + if (!(fltr_a->full_key.has_flex_filed == fltr_b->full_key.has_flex_filed && + fltr_a->full_key.flex_offset == fltr_b->full_key.flex_offset && + fltr_a->full_key.flex_word == fltr_b->full_key.flex_word)) + return false; + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + if (fltr_a->full_key.ext_data.vlan_type == + fltr_b->full_key.ext_data.vlan_type && + fltr_a->full_key.ext_data.s_vlan_tci == + fltr_b->full_key.ext_data.s_vlan_tci && + !memcmp(&fltr_a->full_key.eth, &fltr_b->full_key.eth, + sizeof(fltr_a->full_key.eth))) + return true; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + if (fltr_a->full_key.l4.dst_port == fltr_b->full_key.l4.dst_port && + fltr_a->full_key.l4.src_port == fltr_b->full_key.l4.src_port && + fltr_a->full_key.ip.v4.dst_ip == fltr_b->full_key.ip.v4.dst_ip && + fltr_a->full_key.ip.v4.src_ip == fltr_b->full_key.ip.v4.src_ip && + fltr_a->full_key.ip.v4.tos == fltr_b->full_key.ip.v4.tos) + return true; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + if (fltr_a->full_key.ip.v4.dst_ip == fltr_b->full_key.ip.v4.dst_ip && + fltr_a->full_key.ip.v4.src_ip == fltr_b->full_key.ip.v4.src_ip && + fltr_a->full_key.ip.v4.proto == fltr_b->full_key.ip.v4.proto && + fltr_a->full_key.ip.v4.tos == fltr_b->full_key.ip.v4.tos) + return true; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + if (fltr_a->full_key.l4.dst_port == fltr_b->full_key.l4.dst_port && + fltr_a->full_key.l4.src_port == fltr_b->full_key.l4.src_port && + !memcmp(fltr_a->full_key.ip.v6.dst_ip, + fltr_b->full_key.ip.v6.dst_ip, + sizeof(fltr_a->full_key.ip.v6.dst_ip)) && + !memcmp(fltr_a->full_key.ip.v6.src_ip, + fltr_b->full_key.ip.v6.src_ip, + sizeof(fltr_a->full_key.ip.v6.src_ip)) && + fltr_a->full_key.ip.v6.tc == fltr_b->full_key.ip.v6.tc) + return true; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + if (!memcmp(fltr_a->full_key.ip.v6.dst_ip, + fltr_b->full_key.ip.v6.dst_ip, + sizeof(fltr_a->full_key.ip.v6.dst_ip)) && + !memcmp(fltr_a->full_key.ip.v6.src_ip, + fltr_b->full_key.ip.v6.src_ip, + sizeof(fltr_a->full_key.ip.v6.src_ip)) && + fltr_a->full_key.ip.v6.tc == fltr_b->full_key.ip.v6.tc && + fltr_a->full_key.ip.v6.proto == fltr_b->full_key.ip.v6.proto) + return true; + break; + default: + break; + } + return false; +} + +STATIC bool sxe2_fnav_support_full_match(enum sxe2_fnav_flow_type flow_type) +{ + if ((flow_type == SXE2_FNAV_FLOW_TYPE_IPV4_TCP || + flow_type == SXE2_FNAV_FLOW_TYPE_IPV4_UDP || + flow_type == SXE2_FNAV_FLOW_TYPE_IPV6_TCP || + flow_type == SXE2_FNAV_FLOW_TYPE_IPV6_UDP)) { + return true; + } + + return false; +} + +bool sxe2_fnav_flow_cfg_full_match(struct sxe2_adapter *adapter, + enum sxe2_fnav_flow_type flow_type) +{ + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + if (!vsi) + return false; + + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) + return false; + + return flow_cfg->full_match; +} + +s32 sxe2_fnav_default_flow_recovery_by_type(struct sxe2_vsi *vsi, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + s32 ret = 0; + enum sxe2_fnav_flow_type flow_type = flow_cfg->flow_type; + struct sxe2_adapter *adapter = vsi->adapter; + + if (sxe2_fnav_support_full_match(flow_type) && flow_cfg->full_match && + test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + goto l_end; + } + + ret = sxe2_fnav_flow_cfg_del(adapter, flow_cfg, false); + if (ret) { + LOG_ERROR_BDF("delete outer flow_cfg[%d] failed, ret:%d.\n", + flow_type, ret); + goto l_end; + } + + if (sxe2_fnav_support_full_match(flow_type) && + test_bit(SXE2_FLAG_FNAV_ENABLE, adapter->flags)) { + ret = sxe2_fnav_default_flow_set_by_type(vsi, flow_type); + if (ret) { + LOG_WARN_BDF("flow_cfg[%d] full match set failed, ret:%d\n", + flow_type, ret); + } + } + +l_end: + return ret; +} + +s32 sxe2_fnav_del_filter_by_loc(struct sxe2_vsi *vsi, u32 loc) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = -ENOENT; + struct sxe2_fnav_filter *filter; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + filter = sxe2_fnav_find_filter_by_loc_unlock(&vsi->fnav, loc); + if (filter) { + ret = sxe2_fnav_filter_del(vsi, filter); + if (ret) + LOG_ERROR_BDF("sxe2 fnav delete filter failed, loc=%d, ret:%d\n", + loc, ret); + } + + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + return ret; +} + +static s32 sxe2_fnav_flow_replay(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + u32 j; + struct sxe2_fnav_flow_seg *seg; + struct sxe2_fnav_flow_cfg *flow_tmp = NULL; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + + list_for_each_entry(flow_tmp, &vsi->fnav.flow_cfg_list, l_node) { + for (j = 0; j < SXE2_FNAV_SEG_MAX; j++) { + seg = flow_tmp->seg[j]; + if (seg) { + ret = sxe2_fnav_flow_cfg_replay(adapter, seg); + if (ret) { + LOG_ERROR_BDF("flow cfg replay failed,\t" + "flow_type:%u, level:%u\n", + flow_tmp->flow_type, j); + goto l_end; + } + } + } + } + +l_end: + mutex_unlock(&vsi->fnav.flow_cfg_lock); + return ret; +} + +s32 sxe2_fnav_filter_replay(struct sxe2_vsi *vsi, bool to_vf) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct sxe2_fnav_filter *filter = NULL; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry(filter, &vsi->fnav.filter_list, l_node) { + if ((to_vf && filter->ori_vsi_hw == filter->dst_vsi_hw) || + (!to_vf && filter->ori_vsi_hw != filter->dst_vsi_hw)) { + continue; + } + + ret = sxe2_pf_fnav_hw_filter_update(vsi, filter, true, false, + SXE2_FNAV_FILTER_UPDATE_ADMIN); + if (ret) { + LOG_ERROR_BDF("add hw filter failed, ret:%d\n", ret); + goto l_unlock; + } + } + +l_unlock: + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + return ret; +} + +s32 sxe2_fnav_rule_reply(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + s32 ret = 0; + + if (!vsi) + return 0; + + ret = sxe2_fnav_flow_replay(vsi); + if (ret) { + LOG_DEV_ERR("fnav flow replay failed, ret: %d\n", ret); + goto l_end; + } + + (void)sxe2_fnav_default_flow_set(adapter); + + ret = sxe2_fnav_filter_replay(vsi, false); + if (ret) + LOG_DEV_ERR("fnav filter replay failed, ret: %d\n", ret); + +l_end: + return ret; +} + +void sxe2_fwc_fnav_trace_trigger(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_TRACE_TRIGGER, NULL, 0, NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("fnav trace trigger cmd fail, ret=%d\n", ret); +} + +STATIC void +sxe2_fnav_recorder1_hit_info_first(struct sxe2_adapter *adapter, + struct sxe2_fnav_hit_info *hit_info, + struct sxe2_fnav_addition_info *addition_info) +{ + int i; + u32 tmp[10] = {0}; + u32 inputset[16] = {0}; + + LOG_DEV_INFO("fd trace1 hit info : \r\n"); + LOG_DEV_INFO("entry_vld = 0x%x\r\n", hit_info->entry_vld); + + LOG_DEV_INFO("hit_flg = 0x%x\r\n", hit_info->hit_flg); + LOG_DEV_INFO("ck1 = 0x%x\r\n", hit_info->ck1); + LOG_DEV_INFO("ht_index1 = 0x%x\r\n", hit_info->ht_index1); + + tmp[0] = (u32)(hit_info->ck2_3_0 | (hit_info->ck2_12_4 << 4)); + LOG_DEV_INFO("ck2 = 0x%x\r\n", tmp[0]); + LOG_DEV_INFO("ht_index2 = 0x%x\r\n", hit_info->ht_index2); + LOG_DEV_INFO("ht1_avl = 0x%x\r\n", hit_info->ht1_avl); + + tmp[1] = (u32)(hit_info->ht2_avl_3_0 | (hit_info->ht2_avl_7_4 << 4)); + LOG_DEV_INFO("ht2_avl = 0x%x\r\n", tmp[1]); + LOG_DEV_INFO("kt_index = 0x%x\r\n", hit_info->kt_index); + + LOG_DEV_INFO("qindex = 0x%x\r\n", hit_info->qindex); + + tmp[2] = (u32)(hit_info->stat_0 | (hit_info->stat_13_1 << 1)); + LOG_DEV_INFO("stat = 0x%x\r\n", tmp[2]); + LOG_DEV_INFO("stat_ena = 0x%x\r\n", hit_info->stat_ena); + LOG_DEV_INFO("evict_ena = 0x%x\r\n", hit_info->evict_ena); + LOG_DEV_INFO("to_queue = 0x%x\r\n", hit_info->toqueue); + LOG_DEV_INFO("to_queue_prio = 0x%x\r\n", hit_info->toqueue_prio); + LOG_DEV_INFO("ad_drop = 0x%x\r\n", hit_info->ad_drop); + + tmp[3] = (u32)(hit_info->fdid_8_0 | (hit_info->fdid_31_9 << 9)); + LOG_DEV_INFO("fdid = 0x%x\r\n", tmp[3]); + LOG_DEV_INFO("fdid_prio = 0x%x\r\n", hit_info->fdid_prio); + + tmp[4] = (u32)(hit_info->flow_id_5_0 | (hit_info->flow_id_15_6 << 6)); + LOG_DEV_INFO("flow_id = 0x%x\r\n", tmp[4]); + LOG_DEV_INFO("ad_fd_vsi = 0x%x\r\n", hit_info->ad_fd_vsi); + LOG_DEV_INFO("gl_space = 0x%x\r\n", hit_info->gl_space); + LOG_DEV_INFO("pf_space = 0x%x\r\n", hit_info->pf_space); + LOG_DEV_INFO("vsi_spcae = 0x%x\r\n", hit_info->vsi_space); + + tmp[5] = (u32)(hit_info->profile_id_4_0 | (hit_info->profile_id_6_5 << 5)); + LOG_DEV_INFO("profile_id = 0x%x\r\n", tmp[5]); + LOG_DEV_INFO("vsi = 0x%x\r\n", hit_info->vsi); + + inputset[0] = (hit_info->inset_26_0 << 5) | + ((hit_info->inset_58_27 & 0xf8000000) >> 27); + inputset[1] = ((hit_info->inset_58_27 & 0x7ffffff) << 5) | + ((hit_info->inset_90_59 & 0xf8000000) >> 27); + inputset[2] = ((hit_info->inset_90_59 & 0x7ffffff) << 5) | + ((hit_info->inset_122_91 & 0xf8000000) >> 27); + inputset[3] = ((hit_info->inset_122_91 & 0x7ffffff) << 5) | + ((hit_info->inset_154_123 & 0xf8000000) >> 27); + inputset[4] = ((hit_info->inset_154_123 & 0x7ffffff) << 5) | + ((hit_info->inset_186_155 & 0xf8000000) >> 27); + inputset[5] = ((hit_info->inset_186_155 & 0x7ffffff) << 5) | + ((hit_info->inset_218_187 & 0xf8000000) >> 27); + inputset[6] = ((hit_info->inset_218_187 & 0x7ffffff) << 5) | + ((hit_info->inset_250_219 & 0xf8000000) >> 27); + inputset[7] = ((hit_info->inset_250_219 & 0x7ffffff) << 5) | + ((hit_info->inset_282_251 & 0xf8000000) >> 27); + inputset[8] = ((hit_info->inset_282_251 & 0x7ffffff) << 5) | + ((hit_info->inset_314_283 & 0xf8000000) >> 27); + inputset[9] = ((hit_info->inset_314_283 & 0x7ffffff) << 5) | + ((hit_info->inset_346_315 & 0xf8000000) >> 27); + inputset[10] = ((hit_info->inset_346_315 & 0x7ffffff) << 5) | + ((hit_info->inset_378_347 & 0xf8000000) >> 27); + inputset[11] = ((hit_info->inset_378_347 & 0x7ffffff) << 5) | + ((hit_info->inset_410_379 & 0xf8000000) >> 27); + inputset[12] = ((hit_info->inset_410_379 & 0x7ffffff) << 5) | + ((hit_info->inset_442_411 & 0xf8000000) >> 27); + inputset[13] = ((hit_info->inset_442_411 & 0x7ffffff) << 5) | + ((hit_info->inset_474_443 & 0xf8000000) >> 27); + inputset[14] = ((hit_info->inset_474_443 & 0x7ffffff) << 5) | + ((hit_info->inset_506_475 & 0xf8000000) >> 27); + inputset[15] = ((hit_info->inset_506_475 & 0x7ffffff) << 5) | + hit_info->inset_511_507; + + for (i = 0; i < 16; i++) { + LOG_DEV_INFO("inputset[%02d] = 0x%04x\r\n", 2 * i, + (inputset[i] >> 16) & 0xffff); + LOG_DEV_INFO("inputset[%02d] = 0x%04x\r\n", 2 * i + 1, + inputset[i] & 0xffff); + } +} + +STATIC void +sxe2_fnav_recorder1_hit_info_detail(struct sxe2_adapter *adapter, + struct sxe2_fnav_hit_info *hit_info, + struct sxe2_fnav_addition_info *addition_info) +{ + u32 tmp[10] = {0}; + + switch (hit_info->fail_sts) { + case 0: + LOG_DEV_INFO("fail_sts = 0 : fd hit / edit fd table succ\r\n"); + break; + case 1: + LOG_DEV_INFO("fail_sts = 1 : config not enabled\r\n"); + break; + case 2: + LOG_DEV_INFO("fail_sts = 2 : no global/pf/vsi config space left\r\n"); + break; + case 3: + LOG_DEV_INFO("fail_sts = 3 : FD prog pkt dropped by sw/acl, or\t" + "internal serious conflict occurred\r\n"); + break; + case 4: + LOG_DEV_INFO("fail_sts = 4 : entry not exist, delete fail\r\n"); + break; + case 5: + LOG_DEV_INFO("fail_sts = 5 : entry exist, add fail\r\n"); + break; + case 6: + LOG_DEV_INFO("fail_sts = 6 : bucket and fkot both full\r\n"); + break; + case 7: + LOG_DEV_INFO("fail_sts = 7 : rxft bypass\r\n"); + break; + } + + switch (hit_info->cmd) { + case 0: + LOG_DEV_INFO("cmd = 0 : idle state\r\n"); + break; + case 1: + LOG_DEV_INFO("cmd = 1 : init table\r\n"); + break; + case 2: + LOG_DEV_INFO("cmd = 2 : fd program packet add table\r\n"); + break; + case 3: + LOG_DEV_INFO("cmd = 3 : fd program packet del table\r\n"); + break; + case 4: + LOG_DEV_INFO("cmd = 4 : firmware add table\r\n"); + break; + case 5: + LOG_DEV_INFO("cmd = 5 : firmware del table\r\n"); + break; + case 6: + LOG_DEV_INFO("cmd = 6 : tcp fin/rst del table\r\n"); + break; + case 7: + LOG_DEV_INFO("cmd = 7 : hardware pfr/vmr del table\r\n"); + break; + case 8: + LOG_DEV_INFO("cmd = 8 : firmware pfr/vmr del table\r\n"); + break; + case 9: + LOG_DEV_INFO("cmd = 9 : hardware search table\r\n"); + break; + case 10: + LOG_DEV_INFO("cmd = 10 : firmware search table\r\n"); + break; + case 11: + LOG_DEV_INFO("cmd = 11 : firmware write table(designated address)\r\n"); + break; + case 12: + LOG_DEV_INFO("cmd = 12 : firmware read table\r\n"); + break; + } + tmp[6] = (u32)(hit_info->thread_id_5_0 | (hit_info->thread_id_6 << 6)); + LOG_DEV_INFO("thread_id = 0x%x\r\n", tmp[6]); + LOG_DEV_INFO("pf = 0x%x\r\n", hit_info->pf); + LOG_DEV_INFO("vf_vm = 0x%x\r\n", hit_info->vf_vm); + LOG_DEV_INFO("function_type = 0x%x\r\n", hit_info->function_type); + LOG_DEV_INFO("bypass_ft = 0x%x\r\n", hit_info->bypass_ft); + switch (hit_info->pcmd) { + case 0: + LOG_DEV_INFO("pcmd = 0, update table(no matter exist)\r\n"); + break; + case 1: + LOG_DEV_INFO("pcmd = 1, delete table\r\n"); + break; + case 2: + LOG_DEV_INFO("pcmd = 2, add table\r\n"); + break; + case 3: + LOG_DEV_INFO("pcmd = 3, subsitute table(if not exist, will fail)\r\n"); + break; + } + LOG_DEV_INFO("comp_report = 0x%x\r\n", hit_info->comp_report); + LOG_DEV_INFO("fd_vsi = 0x%x\r\n", hit_info->fd_vsi); + LOG_DEV_INFO("comp_queue = 0x%x\r\n", hit_info->comp_queue); + LOG_DEV_INFO("not_enabled = 0x%x\r\n", hit_info->not_enabled); + LOG_DEV_INFO("bad_profileid = 0x%x\r\n", hit_info->bad_profile_id); + LOG_DEV_INFO("drop = 0x%x\r\n", hit_info->drop); + LOG_DEV_INFO("round_drop = 0x%x\r\n", hit_info->round_drop); + LOG_DEV_INFO("round_cnt = 0x%x\r\n", hit_info->round_cnt); +} + +STATIC void +sxe2_fnav_recorder1_additon_info(struct sxe2_adapter *adapter, + struct sxe2_fnav_addition_info *addition_info) +{ + LOG_DEV_INFO("fd trace1 addition info : \r\n"); + LOG_DEV_INFO("fd_profile_id = 0x%x\r\n", addition_info->fd_profile_id); + if (addition_info->hit_flg & 0x1) + LOG_DEV_INFO("queue_hit_flag[0] vld , fd default action hit\r\n"); + else if (addition_info->hit_flg & 0x2) + LOG_DEV_INFO("queue_hit_flag[1] vld , fd kt/fkot hit\r\n"); + else if (addition_info->hit_flg & 0x4) + LOG_DEV_INFO("queue_hit_flag[2] vld , acl hit\r\n"); + else if (addition_info->hit_flg & 0x8) + LOG_DEV_INFO("queue_hit_flag[3] vld , sw hit\r\n"); + else + LOG_DEV_INFO("queue_hit_flag no vld , no fd/sw/acl select\r\n"); + + if (addition_info->rlt_sel == 0) + LOG_DEV_INFO("queue_sel_result = 0x0, queue/queue region non select\r\n"); + else if (addition_info->rlt_sel == 1) + LOG_DEV_INFO("queue_sel_result = 0x1, queue/queue region from fd\r\n"); + else if (addition_info->rlt_sel == 2) + LOG_DEV_INFO("queue_sel_result = 0x2, queue/queue region from acl/sw\r\n"); + + LOG_DEV_INFO("dst_vsi = 0x%x\r\n", addition_info->dst_vsi); + + LOG_DEV_INFO("rlt_queue = 0x%x\r\n", + addition_info->rlt_queue_8_0 | + (addition_info->rlt_queue_10_9 << 9)); + LOG_DEV_INFO("rlt_toqueue = 0x%x\r\n", addition_info->rlt_toqueue); + LOG_DEV_INFO("rlt_toqueue_pri = 0x%x\r\n", addition_info->rlt_toqueue_pri); + LOG_DEV_INFO("drop = 0x%x\r\n", addition_info->drop); + LOG_DEV_INFO("cmd = 0x%x\r\n", addition_info->cmd); + LOG_DEV_INFO("bypass_absq = 0x%x\r\n", addition_info->bypass_absq); + LOG_DEV_INFO("fd_search_ena = 0x%x\r\n", addition_info->fd_search_ena); + LOG_DEV_INFO("pkt_id = 0x%x\r\n", addition_info->pkt_id); + LOG_DEV_INFO("deflt_qindx_pri = 0x%x\r\n", addition_info->deflt_qindx_pri); + LOG_DEV_INFO("sa_toqueue_pri = 0x%x\r\n", addition_info->sa_toqueue_pri); + LOG_DEV_INFO("search_rss_fs_hit = 0x%x\r\n", + addition_info->search_rss_fs_hit); + LOG_DEV_INFO("bypass_rss = 0x%x\r\n", addition_info->bypass_rss); +} + +STATIC void +sxe2_fnav_recorder1_hit_info(struct sxe2_adapter *adapter, + struct sxe2_fnav_hit_info *hit_info, + struct sxe2_fnav_addition_info *addition_info) +{ + if (hit_info) { + sxe2_fnav_recorder1_hit_info_first(adapter, hit_info, addition_info); + sxe2_fnav_recorder1_hit_info_detail(adapter, hit_info, + addition_info); + } + + if (addition_info) + sxe2_fnav_recorder1_additon_info(adapter, addition_info); +} + +void sxe2_fwc_fnav_trace_recorder(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_fnav_trace_recorder recorder = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_TRACE_RECORDER, NULL, 0, + &recorder, + sizeof(struct sxe2_fnav_trace_recorder)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fnav trace recorder cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("****fnav trace recorder start****"); + LOG_DEV_INFO("status0: %u\n", recorder.trace_status0); + LOG_DEV_INFO("status1: %u\n", recorder.trace_status1); + if (recorder.trace_status1 == 0) { + sxe2_fnav_recorder1_hit_info(adapter, &recorder.hit_info, + &recorder.addition_info); + } + LOG_DEV_INFO("status2: %u\n", recorder.trace_status2); + LOG_DEV_INFO("****fnav trace recorder end****"); +} + +void sxe2_fwc_fnav_hw_sts(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_fnav_hit_info hit_info = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_HW_STS, NULL, 0, &hit_info, + sizeof(struct sxe2_fnav_hit_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("fnav hw sts cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("****fnav hw sts start****"); + + sxe2_fnav_recorder1_hit_info(adapter, &hit_info, NULL); + + LOG_DEV_INFO("****fnav hw sts end****"); +} + +s32 sxe2_fwc_fnav_space_cnt_get(struct sxe2_adapter *adapter, u16 vsi_id, + u32 *gcnt_global, u32 *bcnt_global, u32 *gcnt_pf, + u32 *bcnt_pf, u32 *gcnt_vsi, u32 *bcnt_vsi) +{ + s32 ret; + struct sxe2_fnav_space_cnt space_cnt = {0}; + struct sxe2_cmd_params cmd = {}; + + space_cnt.vsi_id = cpu_to_le16(vsi_id); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_SPACE_CNT_GET, &space_cnt, + sizeof(space_cnt), &space_cnt, sizeof(space_cnt)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav get global space cnt failed, ret=%d\n", + ret); + ret = -EIO; + goto l_end; + } + + *gcnt_global = le32_to_cpu(space_cnt.gcnt_global); + *bcnt_global = le32_to_cpu(space_cnt.bcnt_global); + *gcnt_pf = le32_to_cpu(space_cnt.gcnt_pf); + *bcnt_pf = le32_to_cpu(space_cnt.bcnt_pf); + *gcnt_vsi = le32_to_cpu(space_cnt.gcnt_vsi); + *bcnt_vsi = le32_to_cpu(space_cnt.bcnt_vsi); + +l_end: + return ret; +} + +u32 sxe2_fnav_num_avail_filter(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + u32 bcnt_global = 0; + u32 gcnt_global = 0; + u32 bcnt_pf = 0; + u32 gcnt_pf = 0; + u32 bcnt_vsi = 0; + u32 gcnt_vsi = 0; + s32 ret = 0; + u32 avail_cnt = 0; + + ret = sxe2_fwc_fnav_space_cnt_get(adapter, vsi->idx_in_dev, &gcnt_global, + &bcnt_global, &gcnt_pf, &bcnt_pf, + &gcnt_vsi, &bcnt_vsi); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav get space cnt failed, ret=%d.\n", ret); + goto l_end; + } + + adapter->fnav_ctxt.space_bcnt = (u16)bcnt_global; + adapter->fnav_ctxt.space_gcnt = (u16)gcnt_pf; + + if (vsi->type == SXE2_VSI_T_VF || vsi->type == SXE2_VSI_T_DPDK_VF) + avail_cnt = vsi->fnav.space_bsize - bcnt_global; + else + avail_cnt = vsi->fnav.space_bsize - bcnt_global + + vsi->fnav.space_gsize - gcnt_pf; + + LOG_DEBUG_BDF("space bsize=%u, space gsize=%u, bcnt_global =%u, gcnt=%u, ret=%d.\n", + vsi->fnav.space_bsize, vsi->fnav.space_gsize, bcnt_global, + gcnt_pf, ret); + +l_end: + return avail_cnt; +} + +s32 sxe2_fnav_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter) +{ + u16 i = 0; + u16 j = 0; + u16 table_idx = 0; + s32 ret = 0; + u16 per_size = 0; + u16 ddp_max_cnt; + u8 port_idx = adapter->port_idx; + + per_size = sizeof(struct sxe2_ddp_rxft_ptg); + ddp_max_cnt = (SXE2_MAX_PTYPE_NUM * SXE2_MAX_CDID_NUM) / per_size; + if (!data || base_id >= ddp_max_cnt || cnt > ddp_max_cnt) { + LOG_ERROR_BDF("sxe2 fnav ptg parse from ddp failed, port_idx=%u !\n", + port_idx); + ret = -EINVAL; + goto l_end; + } + + table_idx = (u16)((u32)base_id * per_size); + for (i = 0; i < cnt; i++) { + for (j = 0; j < per_size; j++) { + if (table_idx >= (port_idx * SXE2_MAX_PTYPE_NUM) && + table_idx < ((port_idx + 1) * SXE2_MAX_PTYPE_NUM)) { + adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .pt_to_grp[table_idx % + SXE2_MAX_PTYPE_NUM] + .idx = *data; + } + table_idx++; + data++; + } + } + LOG_INFO_BDF("sxe2 fnav ptg parse from ddp, port_idx=%u !\n", port_idx); + +l_end: + return ret; +} + +s32 sxe2_fnav_mask_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter) +{ + struct sxe2_ddp_fnav_mask *ddp_mask = NULL; + u16 i = 0; + u16 j = 0; + u16 mask_idx = 0; + s32 ret = 0; + + if (!data || base_id >= SXE2_MAX_FV_MASK || + ((base_id + cnt) > SXE2_MAX_FV_MASK)) { + ret = -EINVAL; + goto l_end; + } + + for (i = 0; i < cnt; i++) { + mask_idx = base_id + i; + ddp_mask = (struct sxe2_ddp_fnav_mask *)data; + + adapter->fnav_ctxt.fnav_flow_ctxt.ppp.fv_mask[mask_idx].mask = + ddp_mask->val; + adapter->fnav_ctxt.fnav_flow_ctxt.ppp.fv_mask[mask_idx].mask_idx = + mask_idx; + + bitmap_zero(adapter->fnav_ctxt.fnav_flow_ctxt.ppp.fv_mask[mask_idx] + .filds, + SXE2_FLOW_FLD_ID_MAX); + + for (j = 0; j < min_t(u16, SXE2_FLD_BIT, SXE2_FLD_WIDTH); j++) { + if (ddp_mask->fldbit_l & BIT(j)) + set_bit(j, adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .fv_mask[mask_idx] + .filds); + } + + for (j = 0; j < min_t(u16, (SXE2_FLOW_FLD_ID_MAX - SXE2_FLD_BIT), + SXE2_FLD_WIDTH); + j++) { + if (ddp_mask->fldbit_h & BIT(j)) + set_bit(j + SXE2_FLD_BIT, + adapter->fnav_ctxt.fnav_flow_ctxt.ppp + .fv_mask[mask_idx] + .filds); + } + data += sizeof(struct sxe2_ddp_fnav_mask); + } + +l_end: + return ret; +} + +s32 sxe2_fnav_stat_idx_alloc_with_lock(struct sxe2_adapter *adapter, u16 vsi_id, + u16 *stat_index, bool need_update) +{ + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_node *tmp, *pre = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + u16 stat_base_id = adapter->fnav_ctxt.fnav_stat_ctxt.stat_base; + u16 stat_num = adapter->fnav_ctxt.fnav_stat_ctxt.stat_num; + u16 count = adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt; + u16 new_index = stat_base_id; + + if (count >= stat_num) + return -ENOMEM; + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + list_for_each_entry(tmp, &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list, l_node) { + if (tmp->stat_index > new_index) + break; + else if (tmp->stat_index == new_index) + new_index++; + + pre = tmp; + } + + stat_node = devm_kzalloc(dev, sizeof(*stat_node), GFP_KERNEL); + if (!stat_node) { + LOG_ERROR_BDF("no memory.\n"); + ret = -ENOMEM; + goto l_end; + } + stat_node->stat_index = new_index; + stat_node->vsi_id = vsi_id; + stat_node->need_update = need_update; + if (pre) { + list_add(&stat_node->l_node, &pre->l_node); + } else { + list_add(&stat_node->l_node, + &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list); + } + adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt++; + *stat_index = new_index; + +l_end: + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + return ret; +} + +s32 sxe2_fnav_stat_idx_free_with_lock(struct sxe2_adapter *adapter, u16 stat_index) +{ + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_node *tmp = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + u16 stat_base_id = adapter->fnav_ctxt.fnav_stat_ctxt.stat_base; + u16 stat_num = adapter->fnav_ctxt.fnav_stat_ctxt.stat_num; + struct sxe2_fwc_fnav_stats_resp resp = {}; + + if (stat_index < stat_base_id || stat_index >= stat_base_id + stat_num) { + return -EINVAL; + } + + ret = sxe2_fnav_hw_stats_get(adapter, stat_index, true, + SXE2_FNAV_COUNTER_BANK_ALL, &resp); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav now has valid stat index! idx: %u\n", + stat_index); + goto l_end; + } + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + list_for_each_entry_safe(stat_node, tmp, + &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list, + l_node) { + if (stat_node->stat_index == stat_index) { + list_del(&stat_node->l_node); + devm_kfree(dev, stat_node); + break; + } + } + adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt--; + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + +l_end: + return ret; +} + +STATIC void sxe2_fnav_stat_idx_sw_clear(struct sxe2_adapter *adapter) +{ + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_node *tmp = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + list_for_each_entry_safe(stat_node, tmp, + &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list, + l_node) { + list_del(&stat_node->l_node); + devm_kfree(dev, stat_node); + } + adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt = 0; + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); +} + +void sxe2_fnav_reserve_stat_idx_alloc(struct sxe2_adapter *adapter) +{ + u32 i = 0; + u16 stat_idx = 0; + + for (i = 0; i < SXE2_FNAV_STAT_PF_MAX; i++) { + (void)sxe2_fnav_stat_idx_alloc_with_lock(adapter, + adapter->vsi_ctxt.main_vsi->id_in_pf, + &stat_idx, false); + adapter->fnav_ctxt.fnav_stat_ctxt.stat_rsv_idx[i] = stat_idx; + } +} + +void sxe2_fnav_stat_ctxt_init(struct sxe2_adapter *adapter) +{ + u32 i = 0; + struct sxe2_fnav_stat_ctxt *stat_ctxt = &adapter->fnav_ctxt.fnav_stat_ctxt; + + mutex_init(&stat_ctxt->fnav_stat_lock); + stat_ctxt->stat_base = adapter->caps_ctxt.fnav_stat_base; + stat_ctxt->stat_num = adapter->caps_ctxt.fnav_stat_num; + INIT_LIST_HEAD(&stat_ctxt->fnav_stat_list); + for (i = 0; i < SXE2_FNAV_STAT_PF_MAX; i++) + stat_ctxt->stat_rsv_idx[i] = SXE2_FNAV_INVALID_STAT_IDX; + memset(stat_ctxt->vsi_fnav_match, 0, sizeof(stat_ctxt->vsi_fnav_match)); +} + +void sxe2_fnav_stat_ctxt_deinit(struct sxe2_adapter *adapter) +{ + u32 i = 0; + struct sxe2_fnav_stat_ctxt *stat_ctxt = &adapter->fnav_ctxt.fnav_stat_ctxt; + + sxe2_fnav_stat_idx_sw_clear(adapter); + + for (i = 0; i < SXE2_FNAV_STAT_PF_MAX; i++) + stat_ctxt->stat_rsv_idx[i] = SXE2_FNAV_INVALID_STAT_IDX; + + mutex_destroy(&stat_ctxt->fnav_stat_lock); +} + +void sxe2_fnav_xlt2_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_xlt2_dump(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); +} + +void sxe2_fnav_vsig_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_vsig_dump(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); +} + +void sxe2_fnav_prof_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_prof_dump(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); +} + +void sxe2_fnav_mask_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_mask_dump(&adapter->fnav_ctxt.fnav_flow_ctxt.ppp); +} + +void sxe2_fnav_stats_dump(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u32 gl_bcnt = 0; + u32 gl_gcnt = 0; + u32 pf_bcnt = 0; + u32 pf_gcnt = 0; + u32 vsi_bcnt = 0; + u32 vsi_gcnt = 0; + + LOG_DEV_INFO("fnav stats dump start\n"); + + ret = sxe2_fwc_fnav_space_cnt_get(adapter, + adapter->vsi_ctxt.main_vsi->idx_in_dev, + &gl_gcnt, + &gl_bcnt, &pf_gcnt, &pf_bcnt, + &vsi_gcnt, &vsi_bcnt); + if (ret) { + LOG_DEV_ERR("read space failed, ret %d\n", ret); + goto l_end; + } + + LOG_DEV_INFO("fnav gsize: %u, bsize: %u, gcnt: %u, bcnt: %u\n", + adapter->vsi_ctxt.main_vsi->fnav.space_gsize, + adapter->vsi_ctxt.main_vsi->fnav.space_bsize, + adapter->fnav_ctxt.space_gcnt, adapter->fnav_ctxt.space_bcnt); + LOG_DEV_INFO("fnav global space gcnt: %u, bcnt: %u\n", gl_gcnt, gl_bcnt); + LOG_DEV_INFO("fnav pf space gcnt: %u, bcnt: %u\n", pf_gcnt, pf_bcnt); + LOG_DEV_INFO("fnav vsi space gcnt: %u, bcnt: %u\n", vsi_gcnt, vsi_bcnt); + LOG_DEV_INFO("fnav pkt error cnt: %llu\n", adapter->fnav_ctxt.pkt_err_cnt); + +l_end: + LOG_DEV_INFO("fnav stats dump end\n"); +} + +void sxe2_comm_fnav_msg_convert_fld(unsigned long *flds, + struct sxe2_fnav_comm_proto_hdr *proto_hdr) +{ + DECLARE_BITMAP(tmp_flds_bit, SXE2_FLOW_FLD_ID_MAX); + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + bitmap_zero(tmp_flds_bit, SXE2_FLOW_FLD_ID_MAX); + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + tmp_flds[i] = le32_to_cpu(proto_hdr->flds[i]); + + bitmap_from_arr32(tmp_flds_bit, tmp_flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_or(flds, flds, tmp_flds_bit, SXE2_FLOW_FLD_ID_MAX); +} + +STATIC s32 sxe2_fnav_flow_cfg_update(struct sxe2_vsi *vsi, + struct sxe2_fnav_flow_cfg *flow_cfg, + struct sxe2_fnav_flow_seg *segs_tmp, + bool is_tunnel) +{ + struct sxe2_adapter *adapter = vsi->adapter; + int ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_flow_seg *segs = NULL; + + segs = devm_kzalloc(dev, sizeof(*segs) * SXE2_FNAV_SEG_MAX, GFP_KERNEL); + if (!segs) { + LOG_ERROR_BDF("no memory for seg.\n"); + ret = -ENOMEM; + goto l_end; + } + + if (is_tunnel) + memcpy(segs, segs_tmp, sizeof(*segs) * SXE2_FNAV_SEG_MAX); + else + memcpy(segs, segs_tmp, + sizeof(*segs)); + + segs->is_tunnel = is_tunnel; + + ret = sxe2_fnav_flow_cfg_add(vsi, flow_cfg, segs); + if (ret == -EEXIST) { + goto l_free; + } else if (ret) { + LOG_ERROR_BDF("tunnel rule add failed, ret:%d, vsi id=%u.\n", ret, + vsi->id_in_pf); + goto l_free; + } + +l_end: + return ret; +l_free: + if (segs) + devm_kfree(dev, segs); + + goto l_end; +} + +STATIC void sxe2_fnav_hlist_add_for_arfs(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter) +{ + u16 vsi_id = filter->ori_vsi_sw; + u16 arfs_vsi_id = adapter->arfs_ctxt.vsi_id_in_pf; + + if (vsi_id == arfs_vsi_id) { + if (sxe2_fnav_flow_sup_arfs(filter->flow_type)) { + sxe2_fnav_filter_hash(filter); + hlist_add_head(&filter->hl_node, + &adapter->fnav_ctxt.filter_hlist + [filter->hash_val & + SXE2_FNAV_FLTR_HLIST_MASK]); + } + } +} + +STATIC void sxe2_fnav_hlist_del_for_arfs(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter) +{ + u16 vsi_id = filter->ori_vsi_sw; + u16 arfs_vsi_id = adapter->arfs_ctxt.vsi_id_in_pf; + + if (vsi_id == arfs_vsi_id) { + if (sxe2_fnav_flow_sup_arfs(filter->flow_type)) { + hlist_del(&filter->hl_node); + if (filter->conflict) + sxe2_monitor_work_schedule(adapter); + } + } +} + +STATIC bool sxe2_fnav_flow_parse_full_match(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_seg *segs) +{ + struct sxe2_adapter *adapter = vsi->adapter; + bool full_match = false; + DECLARE_BITMAP(full_match_flds, SXE2_FLOW_FLD_ID_MAX); + + if (vsi->id_in_pf != adapter->arfs_ctxt.vsi_id_in_pf) + goto l_end; + + if (!sxe2_fnav_support_full_match(filter->flow_type)) + goto l_end; + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL) + goto l_end; + + if (filter->full_key.has_flex_filed) + goto l_end; + + switch (filter->flow_type) { + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + bitmap_zero(full_match_flds, SXE2_FLOW_FLD_ID_MAX); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, full_match_flds); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + bitmap_zero(full_match_flds, SXE2_FLOW_FLD_ID_MAX); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, full_match_flds); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + bitmap_zero(full_match_flds, SXE2_FLOW_FLD_ID_MAX); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, full_match_flds); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + bitmap_zero(full_match_flds, SXE2_FLOW_FLD_ID_MAX); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, full_match_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, full_match_flds); + break; + default: + break; + } + + if (bitmap_equal(full_match_flds, segs->fields, SXE2_FLOW_FLD_ID_MAX)) + full_match = true; + +l_end: + return full_match; +} + +STATIC struct sxe2_fnav_flow_cfg * +sxe2_fnav_flow_update_before_filter_add(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_seg *segs, s32 *result) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + int ret = 0; + enum sxe2_fnav_flow_type flow_type; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + bool outer_new = false; + bool full_match; + bool new_alloc_flow = false; + struct sxe2_fnav_flow_cfg *flow_cfg_out = NULL; + + flow_type = filter->flow_type; + if (flow_type == SXE2_FNAV_FLOW_TYPE_NONE) { + LOG_ERROR_BDF("invalid flow type %d, vsi id=%u .\n", flow_type, + vsi->id_in_pf); + ret = -EINVAL; + goto l_end; + } + + full_match = sxe2_fnav_flow_parse_full_match(vsi, filter, segs); + + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) { + flow_cfg = devm_kzalloc(dev, sizeof(*flow_cfg), GFP_KERNEL); + flow_cfg->flow_type = flow_type; + flow_cfg->peer_vsi_used.vsi_id_sw = + SXE2_INVAL_U16; + flow_cfg->peer_vsi_used.filter_cnt = 0; + flow_cfg->self_vsi_used.vsi_id_sw = vsi->id_in_pf; + flow_cfg->self_vsi_used.filter_cnt = 0; + new_alloc_flow = true; + } + + if (sxe2_fnav_filter_mutli_vsi_dup(adapter, filter, flow_cfg)) { + ret = -EEXIST; + goto l_end; + } + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_NO_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_flow_cfg_update(vsi, flow_cfg, segs, false); + if (ret == 0) { + outer_new = true; + } else if (ret == -EEXIST) { + LOG_INFO_BDF("no tunnel rule exist, flow type=%d vsi id=%u.\n", + flow_type, vsi->id_in_pf); + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("no tunnel rule add failed, ret:%d, vsi id=%u.\n", + ret, vsi->id_in_pf); + goto l_free; + } + } + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) + memset(segs, 0, sizeof(struct sxe2_fnav_flow_seg)); + ret = sxe2_fnav_flow_cfg_update(vsi, flow_cfg, segs, true); + if (ret == -EEXIST) { + LOG_DEBUG_BDF("tunnel rule exist.\n"); + ret = 0; + } else if (ret) { + if (outer_new) + (void)sxe2_fnav_flow_cfg_del(adapter, flow_cfg, + false); + LOG_ERROR_BDF("tunnel rule add failed, ret:%d\n", ret); + goto l_free; + } + } + + flow_cfg->full_match = full_match; + if (new_alloc_flow) + sxe2_fnav_flow_cfg_add_list(vsi, flow_cfg); + + flow_cfg_out = flow_cfg; + goto l_end; + +l_free: + if (new_alloc_flow) + devm_kfree(dev, flow_cfg); +l_end: + *result = ret; + return flow_cfg_out; +} + +void sxe2_fnav_filter_add_list_by_loc(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter) +{ + struct sxe2_fnav_filter *filter_tmp, *pre = NULL; + struct sxe2_vsi_fnav *vsi_fnav = &vsi->fnav; + + list_for_each_entry(filter_tmp, &vsi_fnav->filter_list, l_node) { + if (filter_tmp->filter_loc >= filter->filter_loc) + break; + pre = filter_tmp; + } + + if (pre) + list_add(&filter->l_node, &pre->l_node); + else + list_add(&filter->l_node, &vsi_fnav->filter_list); + + vsi_fnav->filter_cnt++; +} + +STATIC void sxe2_fnav_filter_add_list(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, u32 *id) +{ + struct sxe2_fnav_filter *filter_tmp, *pre = NULL; + u32 flow_id = 0; + struct sxe2_vsi_fnav *vsi_fnav = &vsi->fnav; + + flow_id = (u32)find_first_zero_bit(vsi_fnav->flow_ids, + SXE2_FNAV_MAX_FILTERS); + set_bit((int)flow_id, vsi_fnav->flow_ids); + *id = flow_id; + filter->vsi_flow_id = flow_id; + + list_for_each_entry(filter_tmp, &vsi_fnav->filter_list, l_node) { + if (filter_tmp->vsi_flow_id >= filter->vsi_flow_id) + break; + pre = filter_tmp; + } + if (pre) + list_add(&filter->l_node, &pre->l_node); + else + list_add(&filter->l_node, &vsi_fnav->filter_list); + + vsi_fnav->filter_cnt++; +} + +STATIC s32 sxe2_comm_fnav_hw_filter_update(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_NO_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_hw_filter_update_with_admin(vsi, filter, flow_cfg, + true, false); + if (ret) { + if (ret == -ENOSPC) { + LOG_ERROR_BDF("add hw filter failed, no space left\n"); + } else { + LOG_ERROR_BDF("add hw filter failed, ret:%d\n", ret); + } + goto l_unlock; + } + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN]++; + if (filter->rule_vsi_sw != vsi->id_in_pf) { + flow_cfg->peer_vsi_used.vsi_id_sw = filter->rule_vsi_sw; + flow_cfg->peer_vsi_used.filter_cnt++; + } else { + flow_cfg->self_vsi_used.vsi_id_sw = filter->rule_vsi_sw; + flow_cfg->self_vsi_used.filter_cnt++; + } + } + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_hw_filter_update_with_admin(vsi, filter, flow_cfg, + true, true); + if (ret) { + if (ret == -ENOSPC) { + LOG_ERROR_BDF("add hw filter failed, no space left\n"); + } else { + LOG_ERROR_BDF("add hw filter failed, ret:%d\n", ret); + } + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + (void)sxe2_fnav_hw_filter_update_with_admin(vsi, + filter, + flow_cfg, + false, + false); + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN]--; + } + goto l_unlock; + } + flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN]++; + } + + sxe2_fnav_hlist_add_for_arfs(adapter, filter); + +l_unlock: + return ret; +} + +struct sxe2_fnav_filter * +sxe2_comm_fnav_filter_search_for_dup(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter) +{ + bool ret; + struct sxe2_fnav_filter *filter_tmp = NULL; + struct sxe2_fnav_filter *filter_find = NULL; + + list_for_each_entry(filter_tmp, &vsi->fnav.filter_list, l_node) { + ret = sxe2_fnav_filter_cmp(filter, filter_tmp); + if (ret) { + filter_find = filter_tmp; + break; + } + } + + return filter_find; +} + +s32 sxe2_fnav_filter_add_hw(struct sxe2_vsi *vsi, struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_seg *segs) +{ + struct sxe2_adapter *adapter = NULL; + s32 ret = 0; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + + if (!vsi) { + ret = -EINVAL; + LOG_ERROR_BDF("sxe2 fnav filter add failed, vsi is invalid.\n"); + goto l_end; + } + + adapter = vsi->adapter; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + + flow_cfg = sxe2_fnav_flow_update_before_filter_add(vsi, filter, segs, &ret); + if (!flow_cfg) { + LOG_ERROR_BDF("sxe2 fnav flow cfg update before add fail ret: %d !\t" + "vsi type: %u, idx: %u\n", + ret, vsi->type, vsi->id_in_pf); + goto l_unlock; + } + + ret = sxe2_fnav_filter_inputset_fill(vsi, filter, flow_cfg); + if (ret) { + ret = -EINVAL; + LOG_ERROR_BDF("sxe2 fnav inputset fill fail ret: %d ! vsi type: %u,\t" + "idx: %u\n", + ret, vsi->type, vsi->id_in_pf); + goto l_unlock; + } + + ret = sxe2_comm_fnav_hw_filter_update(vsi, filter, flow_cfg); + if (ret) { + (void)sxe2_fnav_flow_update_after_filter_del(vsi, flow_cfg); + LOG_ERROR_BDF("sxe2 fnav update filter fail ret: %d ! vsi type: %u,\t" + "idx: %u\n", + ret, vsi->type, vsi->id_in_pf); + } + +l_unlock: + mutex_unlock(&vsi->fnav.flow_cfg_lock); +l_end: + return ret; +} + +s32 sxe2_fnav_filter_del_hw(struct sxe2_vsi *vsi, struct sxe2_fnav_filter *filter) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + enum sxe2_fnav_flow_type flow_type; + + if (!filter) + goto l_end; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + + flow_type = sxe2_arfs_flow_to_fnav_flow(filter->flow_type); + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) + goto l_unlock; + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_NO_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + ret = sxe2_fnav_hw_filter_update_with_admin(vsi, filter, flow_cfg, + false, false); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav delete outer hw filter failed, ret:%d\n", + ret); + goto l_unlock; + } + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN]--; + if (filter->rule_vsi_sw != vsi->id_in_pf) + flow_cfg->peer_vsi_used.filter_cnt--; + else + flow_cfg->self_vsi_used.filter_cnt--; + } + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + if (filter->tunn_flag) { + ret = sxe2_fnav_hw_filter_update_with_admin(vsi,filter, + flow_cfg, + false, true); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav del inner hw filter failed, ret:%d\n", + ret); + goto l_unlock; + } + } + flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN]--; + } + + ret = sxe2_fnav_flow_update_after_filter_del(vsi, flow_cfg); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav update flow cfg after del failed, ret:%d\n", + ret); + goto l_unlock; + } + sxe2_fnav_hlist_del_for_arfs(adapter, filter); + +l_unlock: + mutex_unlock(&vsi->fnav.flow_cfg_lock); +l_end: + return ret; +} + +STATIC s32 sxe2_fnav_filter_del_sw(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = NULL; + struct sxe2_fnav_flow_cfg *flow_cfg = NULL; + enum sxe2_fnav_flow_type flow_type; + struct device *dev = NULL; + u16 arfs_vsi_id; + + if (!filter || !vsi) + goto l_end; + + adapter = vsi->adapter; + dev = SXE2_ADAPTER_TO_DEV(adapter); + arfs_vsi_id = adapter->arfs_ctxt.vsi_id_in_pf; + + mutex_lock(&vsi->fnav.flow_cfg_lock); + flow_type = sxe2_arfs_flow_to_fnav_flow(filter->flow_type); + flow_cfg = sxe2_fnav_find_flow_cfg_by_flow_type(vsi, flow_type); + if (!flow_cfg) + goto l_unlock; + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_NO_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) { + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN]--; + if (filter->rule_vsi_sw != vsi->id_in_pf) + flow_cfg->peer_vsi_used.filter_cnt--; + else + flow_cfg->self_vsi_used.filter_cnt--; + } + + if (filter->tunn_flag == SXE2_FNAV_TUN_FLAG_TUNNEL || + filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY) + flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN]--; + + if (flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN] == 0 && + flow_cfg->seg[SXE2_FNAV_SEG_TUN]) { + devm_kfree(dev, flow_cfg->seg[SXE2_FNAV_SEG_TUN]); + flow_cfg->seg[SXE2_FNAV_SEG_TUN] = NULL; + } + + if (flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0 && + flow_cfg->seg[SXE2_FNAV_SEG_NON_TUN]) { + devm_kfree(dev, flow_cfg->seg[SXE2_FNAV_SEG_NON_TUN]); + flow_cfg->seg[SXE2_FNAV_SEG_NON_TUN] = NULL; + } + + if (flow_cfg->filter_cnt[SXE2_FNAV_SEG_TUN] == 0 && + flow_cfg->filter_cnt[SXE2_FNAV_SEG_NON_TUN] == 0) { + list_del(&flow_cfg->l_node); + devm_kfree(dev, flow_cfg); + } + + if (filter->ori_vsi_sw == arfs_vsi_id && + sxe2_fnav_flow_sup_arfs(filter->flow_type)) + hlist_del(&filter->hl_node); + +l_unlock: + mutex_unlock(&vsi->fnav.flow_cfg_lock); +l_end: + return ret; +} + +s32 sxe2_fnav_filter_del(struct sxe2_vsi *rule_vsi, struct sxe2_fnav_filter *filter) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = rule_vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 ori_vsi_id = 0; + struct sxe2_vsi *ori_vsi = NULL; + u32 flow_id = 0; + + ori_vsi_id = filter->ori_vsi_sw; + flow_id = filter->vsi_flow_id; + ori_vsi = adapter->vsi_ctxt.vsi[ori_vsi_id]; + + if (!ori_vsi) { + ret = -EINVAL; + LOG_ERROR_BDF("sxe2 fnav del filter failed, ori vsi is null !!!,\t" + "ori_vsi_id=%u, rule_vsi_id=%u", + ori_vsi_id, rule_vsi->id_in_pf); + goto l_delete; + } + + ret = sxe2_fnav_filter_del_hw(ori_vsi, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav delete filter failed, flow_id=%u,\t" + "ori_vsi_id=%u, ret:%d\n", + flow_id, ori_vsi->id_in_pf, ret); + goto l_end; + } + +l_delete: + rule_vsi->fnav.filter_cnt--; + list_del(&filter->l_node); + devm_kfree(dev, filter); + clear_bit((int)flow_id, rule_vsi->fnav.flow_ids); + +l_end: + return ret; +} + +struct sxe2_fnav_filter * +sxe2_fnav_find_filter_by_flow_id_unlock(struct sxe2_vsi_fnav *vsi_fnav, u32 flow_id) +{ + struct sxe2_fnav_filter *filter_tmp = NULL; + struct sxe2_fnav_filter *filter_find = NULL; + + list_for_each_entry(filter_tmp, &vsi_fnav->filter_list, l_node) { + if (flow_id == filter_tmp->vsi_flow_id) { + filter_find = filter_tmp; + break; + } + if (flow_id < filter_tmp->vsi_flow_id) + break; + } + + return filter_find; +} + +s32 sxe2_fnav_del_filter_by_flow_id(struct sxe2_adapter *adapter, u16 rule_vsi_id, + u32 flow_id) +{ + s32 ret = -ENOENT; + struct sxe2_fnav_filter *filter = NULL; + struct sxe2_vsi *rule_vsi = NULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + + rule_vsi = adapter->vsi_ctxt.vsi[rule_vsi_id]; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + filter = sxe2_fnav_find_filter_by_flow_id_unlock(&rule_vsi->fnav, flow_id); + if (filter) { + ret = sxe2_fnav_filter_del(rule_vsi, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav delete filter failed, flow_id=%u,\t" + "rule_vsi_id=%u, ret:%d\n", + flow_id, rule_vsi_id, ret); + } + } + + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_fnav_hw_stats_get(struct sxe2_adapter *adapter, u16 stat_index, + u32 is_clear, enum sxe2_fnav_counter_bank_type bank_type, + struct sxe2_fwc_fnav_stats_resp *resp) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_fnav_stats_req req = {}; + + req.is_clear = (u8)is_clear; + req.counter_idx = cpu_to_le16(stat_index); + req.bank_type = (u8)bank_type; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_STATS_GET, &req, sizeof(req), + resp, sizeof(struct sxe2_fwc_fnav_stats_resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav get state failed, stat_index=%u, ret=%d", + stat_index, ret); + } + return ret; +} + +void sxe2_fnav_match_stats_get(struct sxe2_adapter *adapter, u16 stat_index, + u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_fwc_fnav_stats_resp resp = {}; + u64 fnav_match = 0; + + if (stat_index == SXE2_FNAV_INVALID_STAT_IDX || vsi_id >= SXE2_MAX_VSI_NUM) + return; + + ret = sxe2_fnav_hw_stats_get(adapter, stat_index, true, + SXE2_FNAV_COUNTER_BANK_0, &resp); + if (!ret) + fnav_match = le64_to_cpu(resp.stats[0]); + + adapter->fnav_ctxt.fnav_stat_ctxt.vsi_fnav_match[vsi_id] += fnav_match; +} + +void sxe2_fnav_match_stats_update_batch(struct sxe2_adapter *adapter) +{ + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_ctxt *stat_ctxt = &adapter->fnav_ctxt.fnav_stat_ctxt; + u16 stat_cnt = 0; + u32 req_size = 0; + u32 rsp_size = 0; + s32 ret = 0; + u16 i = 0; + u16 vsi_id = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_fnav_match_req_batch *req = NULL; + struct sxe2_fwc_fnav_match_rsp_batch *rsp = NULL; + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + + list_for_each_entry(stat_node, &stat_ctxt->fnav_stat_list, l_node) { + if (stat_node->need_update) + stat_cnt++; + } + req_size = sizeof(struct sxe2_fwc_fnav_match_req_batch) + + stat_cnt * sizeof(struct sxe2_fwc_fnav_match_req); + rsp_size = sizeof(struct sxe2_fwc_fnav_match_rsp_batch) + + stat_cnt * sizeof(u64); + req = kzalloc(req_size, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto l_free; + } + rsp = kzalloc(rsp_size, GFP_KERNEL); + if (!rsp) { + ret = -ENOMEM; + goto l_free; + } + + req->stat_cnt = cpu_to_le16(stat_cnt); + req->is_clear = true; + req->bank_type = (u8)SXE2_FNAV_COUNTER_BANK_0; + list_for_each_entry(stat_node, &stat_ctxt->fnav_stat_list, l_node) { + if (stat_node->need_update) { + req->match_req[i].vsi_id = stat_node->vsi_id; + req->match_req[i].stat_idx = + cpu_to_le16(stat_node->stat_index); + i++; + } + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_MATCH_GET_BATCH, req, req_size, + rsp, rsp_size); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav get match batch failed, ret=%d", ret); + goto l_free; + } + + for (i = 0; i < stat_cnt; i++) { + vsi_id = req->match_req[i].vsi_id; + adapter->fnav_ctxt.fnav_stat_ctxt.vsi_fnav_match[vsi_id] += + le64_to_cpu(rsp->fnav_match[i]); + } + +l_free: + kfree(req); + kfree(rsp); + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); +} + +STATIC s32 sxe2_fnav_stats_clear_by_vsi(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_node *tmp = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + u16 vsi_id = vsi->id_in_pf; + struct sxe2_fwc_fnav_stats_resp resp = {}; + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + + list_for_each_entry_safe(stat_node, tmp, + &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list, + l_node) { + if (stat_node->vsi_id == vsi_id) { + ret = sxe2_fnav_hw_stats_get(adapter, + stat_node->stat_index, true, + SXE2_FNAV_COUNTER_BANK_ALL, &resp); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav stats clear failed,\t" + "stat_index=%u, ret=%d", + stat_node->stat_index, ret); + break; + } + list_del(&stat_node->l_node); + devm_kfree(dev, stat_node); + adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt--; + } + } + + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + return ret; +} + +void sxe2_fnav_stats_free_by_vsi(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fnav_stat_node *stat_node = NULL; + struct sxe2_fnav_stat_node *tmp = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 vsi_id = vsi->id_in_pf; + + mutex_lock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); + + list_for_each_entry_safe(stat_node, tmp, + &adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_list, + l_node) { + if (stat_node->vsi_id == vsi_id) { + list_del(&stat_node->l_node); + devm_kfree(dev, stat_node); + adapter->fnav_ctxt.fnav_stat_ctxt.stat_cnt--; + } + } + + mutex_unlock(&adapter->fnav_ctxt.fnav_stat_ctxt.fnav_stat_lock); +} + +s32 sxe2_fnav_del_filter_by_vsi(struct sxe2_vsi *rule_vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = rule_vsi->adapter; + struct sxe2_fnav_filter *filter, *tmp; + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry_safe(filter, tmp, &rule_vsi->fnav.filter_list, l_node) { + ret = sxe2_fnav_filter_del(rule_vsi, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fdir delete filter failed,\t" + "rule_vsi_id=%u, ret:%d\n", + rule_vsi->id_in_pf, ret); + break; + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + return ret; +} + +void sxe2_fnav_filter_free_by_vsi(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_fnav *vsi_fnav = NULL; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_fnav_filter *filter, *tmp; + struct sxe2_vsi *ori_vsi = NULL; + + vsi_fnav = &vsi->fnav; + mutex_lock(&adapter->fnav_ctxt.filter_lock); + list_for_each_entry_safe(filter, tmp, &vsi_fnav->filter_list, l_node) { + ori_vsi = adapter->vsi_ctxt.vsi[filter->ori_vsi_sw]; + (void)sxe2_fnav_filter_del_sw(ori_vsi, filter); + list_del(&filter->l_node); + devm_kfree(dev, filter); + } + vsi_fnav->filter_cnt = 0; + mutex_unlock(&adapter->fnav_ctxt.filter_lock); + + bitmap_zero(vsi_fnav->flow_ids, SXE2_FNAV_MAX_FILTERS); +} + +void sxe2_fnav_clean_by_vsi(struct sxe2_vsi *vsi, bool need_clear_hw) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!vsi) + return; + + if (need_clear_hw) { + ret = sxe2_fnav_del_filter_by_vsi(vsi); + if (ret) + LOG_ERROR_BDF("sxe2 fnav clean filter del failed, vsi_id:%d ret:%d !\n", + vsi->id_in_pf, ret); + + ret = sxe2_fnav_stats_clear_by_vsi(vsi); + if (ret) + LOG_ERROR_BDF("sxe2 fnav clean fnav stat idx fail vsi_id:%d ret:%d !\n", + vsi->id_in_pf, ret); + + } else { + sxe2_fnav_stats_free_by_vsi(vsi); + sxe2_fnav_filter_free_by_vsi(vsi); + sxe2_fnav_flow_cfg_free(vsi); + } + + LOG_INFO_BDF("sxe2 fnav clean is done, vsi_id=%u, vsi_type=%u, clear_hw=%u.\n", + vsi->id_in_pf, vsi->type, need_clear_hw); +} + +void sxe2_fnav_vf_cfg_clear(struct sxe2_adapter *adapter) +{ + + struct sxe2_vf_node *vf_node; + struct sxe2_vsi *dpdk_pf_vsi = NULL; + struct sxe2_vsi *main_vsi = adapter->vsi_ctxt.main_vsi; + u16 vf_idx; + + sxe2_for_each_vf(adapter, vf_idx) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + mutex_lock(&adapter->vsi_ctxt.lock); + vf_node = SXE2_VF_NODE(adapter, vf_idx); + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_KERNEL) { + dpdk_pf_vsi = sxe2_vsi_get_by_type_unlock(adapter, + SXE2_VSI_T_DPDK_PF); + if (dpdk_pf_vsi) + (void)sxe2_fnav_action_to_vf_all_filter_del(dpdk_pf_vsi, + vf_node); + } + if (main_vsi) + (void)sxe2_fnav_action_to_vf_all_filter_del(main_vsi, + vf_node); + mutex_unlock(&adapter->vsi_ctxt.lock); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + + memset(adapter->fnav_ctxt.fnav_stat_ctxt.vsi_fnav_match, 0, + sizeof(adapter->fnav_ctxt.fnav_stat_ctxt.vsi_fnav_match)); +} + +STATIC void sxe2_fnav_stat_ctrl_convert(enum sxe2_fnav_stat_ctrl_type ori_stat_ctrl, + u32 *stat_ctrl) +{ + switch (ori_stat_ctrl) { + case SXE2_FNAV_STAT_ENA_NONE: + *stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_NONE; + break; + case SXE2_FNAV_STAT_ENA_PKTS: + *stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_PKTS; + break; + case SXE2_FNAV_STAT_ENA_BYTES: + *stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_BYTES; + break; + case SXE2_FNAV_STAT_ENA_ALL: + *stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_ALL; + break; + default: + *stat_ctrl = SXE2_FNAV_TX_DESC_QW0_STAT_ENA_NONE; + break; + } +} + +STATIC s32 sxe2_comm_fnav_parse_action(struct sxe2_fnav_filter *filter, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + enum sxe2_fnav_act_type act_type = 0; + enum sxe2_fnav_stat_ctrl_type ctrl_type = 0; + u32 stat_ctrl = 0; + u32 dest_num = 0; + u32 mark_num = 0; + s32 ret = 0; + u32 i = 0; + + if (full_msg->action_cnt > SXE2_FNAV_MAX_NUM_ACTIONS) + return -EINVAL; + + for (i = 0; i < full_msg->action_cnt; i++) { + act_type = le32_to_cpu(full_msg->action[i].type); + switch (act_type) { + case SXE2_FNAV_ACTION_DROP: + dest_num++; + filter->act_type = SXE2_FNAV_ACT_DROP; + break; + case SXE2_FNAV_ACTION_PASSTHRU: + dest_num++; + filter->act_type = SXE2_FNAV_ACT_OTHER; + filter->act_prio = SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_ZERO; + break; + case SXE2_FNAV_ACTION_QUEUE: + dest_num++; + filter->act_type = SXE2_FNAV_ACT_QINDEX; + filter->q_index = + le16_to_cpu(full_msg->action[i].act_queue.q_index); + break; + case SXE2_FNAV_ACTION_Q_REGION: + dest_num++; + filter->act_type = SXE2_FNAV_ACT_QGROUP; + filter->q_index = + le16_to_cpu(full_msg->action[i].act_q_region.q_index); + filter->q_region = full_msg->action[i].act_q_region.region; + break; + case SXE2_FNAV_ACTION_MARK: + mark_num++; + filter->filter_loc = + le32_to_cpu(full_msg->action[i].act_mark.mark_id); + filter->fdid_prio = SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_THREE; + break; + case SXE2_FNAV_ACTION_COUNT: + ctrl_type = le32_to_cpu(full_msg->action[i].act_count.stat_ctrl); + sxe2_fnav_stat_ctrl_convert(ctrl_type, &stat_ctrl); + filter->stat_ctrl = (u8)stat_ctrl; + filter->stat_index = + (u16)(le32_to_cpu(full_msg->action[i].act_count.stat_index)); + break; + default: + return -EINVAL; + } + } + if (dest_num == 0 || dest_num >= 2) + return -EINVAL; + + if (mark_num >= 2) + return -EINVAL; + + return ret; +} + +STATIC s32 sxe2_comm_fnav_parse_proto(struct sxe2_fnav_filter *filter, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *hdr; + enum sxe2_flow_hdr hdr_type = 0; + s32 ret = 0; + u32 i = 0; + DECLARE_BITMAP(flds_bit, SXE2_FLOW_FLD_ID_MAX); + + if (full_msg->proto_cnt > SXE2_FNAV_MAX_NUM_PROTO_HDRS) + return -EINVAL; + + for (i = 0; i < full_msg->proto_cnt; i++) { + hdr = &full_msg->proto_hdr[i]; + hdr_type = hdr->type; + switch (hdr_type) { + case SXE2_FLOW_HDR_ETH: + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER) { + filter->full_key.eth_inner.h_proto = hdr->eth.etype; + (void)memcpy(filter->full_key.eth_inner.h_source, + &hdr->eth.src, sizeof(hdr->eth.src)); + (void)memcpy(filter->full_key.eth_inner.h_dest, + &hdr->eth.dst, sizeof(hdr->eth.dst)); + } else { + filter->full_key.eth.h_proto = hdr->eth.etype; + (void)memcpy(filter->full_key.eth.h_source, + &hdr->eth.src, sizeof(hdr->eth.src)); + (void)memcpy(filter->full_key.eth.h_dest, + &hdr->eth.dst, sizeof(hdr->eth.dst)); + } + break; + case SXE2_FLOW_HDR_IPV4: + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER) { + filter->full_key.ip_inner.v4.src_ip = + hdr->ipv4.saddr; + filter->full_key.ip_inner.v4.dst_ip = + hdr->ipv4.daddr; + filter->full_key.ip_inner.v4.tos = hdr->ipv4.tos; + filter->full_key.ip_inner.v4.proto = hdr->ipv4.proto; + filter->full_key.ip_inner.v4.ttl = hdr->ipv4.ttl; + } else { + filter->full_key.ip.v4.src_ip = hdr->ipv4.saddr; + filter->full_key.ip.v4.dst_ip = hdr->ipv4.daddr; + filter->full_key.ip.v4.tos = hdr->ipv4.tos; + filter->full_key.ip.v4.proto = hdr->ipv4.proto; + filter->full_key.ip.v4.ttl = hdr->ipv4.ttl; + } + break; + case SXE2_FLOW_HDR_IPV6: + bitmap_zero(flds_bit, SXE2_FLOW_FLD_ID_MAX); + sxe2_comm_fnav_msg_convert_fld(flds_bit, hdr); + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER) { + (void)memcpy(filter->full_key.ip_inner.v6.src_ip, + &hdr->ipv6.src_ip, + sizeof(hdr->ipv6.src_ip)); + (void)memcpy(filter->full_key.ip_inner.v6.dst_ip, + &hdr->ipv6.dst_ip, + sizeof(hdr->ipv6.dst_ip)); + filter->full_key.ip_inner.v6.tc = hdr->ipv6.tc; + filter->full_key.ip_inner.v6.proto = hdr->ipv6.proto; + filter->full_key.ip_inner.v6.hlim = hdr->ipv6.hlim; + if ((!test_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, + flds_bit)) && + filter->flow_type == + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER) { + filter->full_key.ip_inner.v6.proto = + IPPROTO_NONE; + } + } else { + (void)memcpy(filter->full_key.ip.v6.src_ip, + &hdr->ipv6.src_ip, + sizeof(hdr->ipv6.src_ip)); + (void)memcpy(filter->full_key.ip.v6.dst_ip, + &hdr->ipv6.dst_ip, + sizeof(hdr->ipv6.dst_ip)); + filter->full_key.ip.v6.tc = hdr->ipv6.tc; + filter->full_key.ip.v6.proto = hdr->ipv6.proto; + filter->full_key.ip.v6.hlim = hdr->ipv6.hlim; + if ((!test_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, + flds_bit)) && + filter->flow_type == + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER) { + filter->full_key.ip.v6.proto = IPPROTO_NONE; + } + } + break; + case SXE2_FLOW_HDR_TCP: + case SXE2_FLOW_HDR_UDP: + case SXE2_FLOW_HDR_SCTP: + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER) { + filter->full_key.l4_inner.src_port = + hdr->l4.src_port; + filter->full_key.l4_inner.dst_port = + hdr->l4.dst_port; + } else { + filter->full_key.l4.src_port = hdr->l4.src_port; + filter->full_key.l4.dst_port = hdr->l4.dst_port; + } + break; + case SXE2_FLOW_HDR_VLAN: + bitmap_zero(flds_bit, SXE2_FLOW_FLD_ID_MAX); + sxe2_comm_fnav_msg_convert_fld(flds_bit, hdr); + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER) { + if (test_bit(SXE2_FLOW_FLD_ID_S_TCI, flds_bit)) + filter->full_key.ext_data_inner.s_vlan_tci = + hdr->vlan.vlan_tci; + else if (test_bit(SXE2_FLOW_FLD_ID_C_TCI, flds_bit)) + filter->full_key.ext_data_inner.c_vlan_tci = + hdr->vlan.vlan_tci; + else if (test_bit(SXE2_FLOW_FLD_ID_S_VID, flds_bit)) + filter->full_key.ext_data_inner.s_vlan_vid = + hdr->vlan.vlan_vid; + else if (test_bit(SXE2_FLOW_FLD_ID_C_VID, flds_bit)) + filter->full_key.ext_data_inner.c_vlan_vid = + hdr->vlan.vlan_vid; + + filter->full_key.ext_data_inner.vlan_type = + hdr->vlan.vlan_type; + } else { + if (test_bit(SXE2_FLOW_FLD_ID_S_TCI, flds_bit)) + filter->full_key.ext_data.s_vlan_tci = + hdr->vlan.vlan_tci; + else if (test_bit(SXE2_FLOW_FLD_ID_C_TCI, flds_bit)) + filter->full_key.ext_data.c_vlan_tci = + hdr->vlan.vlan_tci; + else if (test_bit(SXE2_FLOW_FLD_ID_S_VID, flds_bit)) + filter->full_key.ext_data.s_vlan_vid = + hdr->vlan.vlan_vid; + else if (test_bit(SXE2_FLOW_FLD_ID_C_VID, flds_bit)) + filter->full_key.ext_data.c_vlan_vid = + hdr->vlan.vlan_vid; + + filter->full_key.ext_data.vlan_type = + hdr->vlan.vlan_type; + } + break; + case SXE2_FLOW_HDR_VXLAN: + filter->full_key.tunnel_data.vxlan_vni = hdr->vxlan.vni; + break; + case SXE2_FLOW_HDR_GENEVE: + filter->full_key.tunnel_data.geneve_vni = hdr->geneve.vni; + break; + case SXE2_FLOW_HDR_GTPU: + filter->full_key.tunnel_data.gtpu_teid = hdr->gtpu.teid; + break; + case SXE2_FLOW_HDR_GRE: + filter->full_key.tunnel_data.gre_tni = hdr->gre.tni; + break; + case SXE2_FLOW_HDR_IPV_FRAG: + case SXE2_FLOW_HDR_IPV_OTHER: + break; + default: + return -EINVAL; + } + } + return ret; +} + +STATIC s32 sxe2_comm_fnav_filter_parse(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, + u16 ori_vsi_id, u16 dst_vsi_id, + u16 rule_vsi_id, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + s32 ret = 0; + + if (!adapter->vsi_ctxt.vsi[ori_vsi_id] || + !adapter->vsi_ctxt.vsi[dst_vsi_id] || + adapter->vsi_ctxt.vsi[ori_vsi_id]->idx_in_dev >= SXE2_VSI_NUM || + adapter->vsi_ctxt.vsi[dst_vsi_id]->idx_in_dev >= SXE2_VSI_NUM) { + return -EINVAL; + } + + filter->ori_vsi_sw = ori_vsi_id; + filter->ori_vsi_hw = adapter->vsi_ctxt.vsi[ori_vsi_id]->idx_in_dev; + filter->dst_vsi_hw = adapter->vsi_ctxt.vsi[dst_vsi_id]->idx_in_dev; + filter->rule_vsi_sw = rule_vsi_id; + + filter->flow_type = le32_to_cpu(full_msg->flow_type); + + ret = sxe2_comm_fnav_parse_proto(filter, full_msg); + if (ret) + return ret; + + if (full_msg->filter_loc != SXE2_VF_FNAV_INVALID_LOC) { + filter->filter_loc = le32_to_cpu(full_msg->filter_loc); + filter->fdid_prio = SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_THREE; + } + + filter->tunn_flag = (u8)(le32_to_cpu(full_msg->tunn_flag)); + + filter->full_key.has_flex_filed = + full_msg->usr_data.has_flex_filed == 1 ? true : false; + filter->full_key.flex_offset = full_msg->usr_data.flex_offset; + filter->full_key.flex_word = full_msg->usr_data.flex_word; + + filter->act_prio = SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_THREE; + filter->complete_report = SXE2_FNAV_TX_DESC_QW0_COMP_RPT_FAIL; + + filter->fd_space = (u8)SXE2_FNAV_FD_SPACE_FROM_BEST_EFFORT; + filter->tunn_fd_space = (u8)SXE2_FNAV_FD_SPACE_FROM_BEST_EFFORT; + + ret = sxe2_comm_fnav_parse_action(filter, full_msg); + if (ret) + return ret; + + INIT_HLIST_NODE(&filter->hl_node); + + LOG_INFO_BDF("sxe2 fnav parse patter success, flow type=%d, ori_vsi=%u, dst_vsi=%u,\n" + "act type=%d, q index=%u, loc=%u, hash flex=%u, tunn_flag=%u.\n", + filter->flow_type, filter->ori_vsi_hw, filter->dst_vsi_hw, + filter->act_type, filter->q_index, filter->filter_loc, + filter->full_key.has_flex_filed, filter->tunn_flag); + + return 0; +} + +void sxe2_eth_fnav_outer_hdr_set_eth(enum sxe2_fnav_flow_type flow_type, + struct sxe2_fnav_flow_seg *seg_outer) +{ + if (sxe2_eth_fnav_support_flow_type(flow_type)) + set_bit(SXE2_FLOW_HDR_ETH, seg_outer->headers); +} + +STATIC void sxe2_comm_fnav_seg_parse(struct sxe2_vsi *rule_vsi, + struct sxe2_fnav_comm_full_msg *full_msg, + struct sxe2_fnav_flow_seg *segs) +{ + struct sxe2_fnav_flow_seg *seg_outer = NULL; + struct sxe2_fnav_flow_seg *seg_inner = NULL; + struct sxe2_fnav_comm_proto_hdr *hdr = NULL; + enum sxe2_flow_hdr hdr_type = 0; + u32 i = 0; + + memset(segs, 0, sizeof(struct sxe2_fnav_flow_seg) * SXE2_FNAV_SEG_MAX); + seg_outer = &segs[0]; + seg_inner = &segs[1]; + + for (i = 0; i < full_msg->proto_cnt; i++) { + hdr = &full_msg->proto_hdr[i]; + hdr_type = le32_to_cpu(hdr->type); + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_OUTER || + hdr->tunnel_level == SXE2_FNAV_TUNNEL_ANY) { + set_bit((int)hdr_type, seg_outer->headers); + sxe2_comm_fnav_msg_convert_fld(seg_outer->fields, hdr); + } + if (hdr->tunnel_level == SXE2_FNAV_TUNNEL_INNER || + hdr->tunnel_level == SXE2_FNAV_TUNNEL_ANY) { + set_bit((int)hdr_type, seg_inner->headers); + sxe2_comm_fnav_msg_convert_fld(seg_inner->fields, hdr); + } + } + + if (rule_vsi->type == SXE2_VSI_T_VF) + sxe2_eth_fnav_outer_hdr_set_eth(le32_to_cpu(full_msg->flow_type), + seg_outer); + + if (full_msg->usr_data.has_flex_filed) { + seg_outer->raw[0].offset = full_msg->usr_data.flex_offset; + seg_outer->raw[0].len = SXE2_FNAV_FLEX_WROD_SIZE; + seg_outer->raw_cnt = 1; + + seg_inner->raw[0].offset = full_msg->usr_data.flex_offset; + seg_inner->raw[0].len = SXE2_FNAV_FLEX_WROD_SIZE; + seg_inner->raw_cnt = 1; + } +} + +s32 sxe2_comm_add_fnav_filter(struct sxe2_adapter *adapter, u16 ori_vsi_id, + u16 dst_vsi_id, u16 rule_vsi_id, + struct sxe2_fnav_comm_full_msg *full_msg, u32 *flow_id) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_vsi *ori_vsi = NULL; + struct sxe2_vsi *rule_vsi = NULL; + struct sxe2_fnav_filter *filter = NULL; + struct sxe2_fnav_flow_seg segs[SXE2_FNAV_SEG_MAX]; + u32 need_filter = 0; + struct sxe2_fnav_filter *dup_filter = NULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + ori_vsi = adapter->vsi_ctxt.vsi[ori_vsi_id]; + rule_vsi = adapter->vsi_ctxt.vsi[rule_vsi_id]; + + if (!ori_vsi || !rule_vsi) { + LOG_ERROR_BDF("vsi is null.\n"); + ret = -EINVAL; + goto l_end; + } + + filter = devm_kzalloc(dev, sizeof(*filter), GFP_KERNEL); + if (!filter) { + LOG_ERROR_BDF("no memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2_comm_fnav_filter_parse(adapter, filter, ori_vsi_id, dst_vsi_id, + rule_vsi_id, full_msg); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav parse pattern fail ret: %d ! vsi type: %u, idx: %u\n", + ret, rule_vsi->type, rule_vsi->id_in_pf); + goto l_end; + } + + need_filter = filter->tunn_flag == SXE2_FNAV_TUN_FLAG_ANY ? 2 : 1; + if (sxe2_fnav_num_avail_filter(ori_vsi) < need_filter) { + LOG_ERROR_BDF("sxe2 fnav do not has avail filter.\n"); + ret = -ENOSPC; + goto l_end; + } + + mutex_lock(&adapter->fnav_ctxt.filter_lock); + + dup_filter = sxe2_comm_fnav_filter_search_for_dup(rule_vsi, filter); + if (dup_filter && dup_filter->ori_vsi_hw == filter->ori_vsi_hw) { + LOG_ERROR_BDF("sxe2 fnav has dup filter.\n"); + ret = -EEXIST; + goto l_unlock; + } + + sxe2_comm_fnav_seg_parse(rule_vsi, full_msg, segs); + + ret = sxe2_fnav_filter_add_hw(ori_vsi, filter, segs); + if (ret) { + LOG_ERROR_BDF("sxe2 fnav add filter fail ret: %d ! vsi type: %u, idx: %u\n", + ret, rule_vsi->type, rule_vsi->id_in_pf); + goto l_unlock; + } + + sxe2_fnav_filter_add_list(rule_vsi, filter, flow_id); + +l_unlock: + mutex_unlock(&adapter->fnav_ctxt.filter_lock); +l_end: + if (ret && filter) + devm_kfree(dev, filter); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.h new file mode 100644 index 0000000000000000000000000000000000000000..50a333c62b4d647d1fa5fbe2fa11e8fefb0a7499 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_fnav.h @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_fnav.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_FNAV_H__ +#define __SXE2_FNAV_H__ + +#include +#include +#include +#include + +#include "sxe2_vsi.h" +#include "sxe2_flow.h" + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +#define SXE2_FNAV_FLEX_WROD_SIZE 2 + +#define SXE2_FNAV_TUN_PKT_OFF (50) +#define SXE2_FNAV_VXLAN_UDP_LEN (16) +#define SXE2_FNAV_ETH_LEN (14) +#define SXE2_FNAV_GRE_HEADER_LEN (4) + +#define SXE2_FNAV_MAX_RAW_PKT_SIZE (512 + SXE2_FNAV_TUN_PKT_OFF) + +#define SXE2_ETH_TYPE_F_OFFSET 12 +#define SXE2_ETH_VLAN_TCI_OFFSET 14 +#define SXE2_ETH_TYPE_VLAN_OFFSET 16 + +#define SXE2_IPV4_OUTER_LEN_OFFSET 16 + +#define SXE2_IPV4_SRC_ADDR_OFFSET 26 +#define SXE2_IPV4_DST_ADDR_OFFSET 30 +#define SXE2_IPV4_TCP_SRC_PORT_OFFSET 34 +#define SXE2_IPV4_TCP_DST_PORT_OFFSET 36 +#define SXE2_IPV4_UDP_SRC_PORT_OFFSET 34 +#define SXE2_IPV4_UDP_DST_PORT_OFFSET 36 +#define SXE2_IPV4_SCTP_SRC_PORT_OFFSET 34 +#define SXE2_IPV4_SCTP_DST_PORT_OFFSET 36 +#define SXE2_IPV4_UDP_LEN_OFFSET 38 +#define SXE2_IPV4_PROTO_OFFSET 23 +#define SXE2_IPV6_SRC_ADDR_OFFSET 22 +#define SXE2_IPV6_DST_ADDR_OFFSET 38 +#define SXE2_IPV6_TCP_SRC_PORT_OFFSET 54 +#define SXE2_IPV6_TCP_DST_PORT_OFFSET 56 +#define SXE2_IPV6_UDP_SRC_PORT_OFFSET 54 +#define SXE2_IPV6_UDP_DST_PORT_OFFSET 56 +#define SXE2_IPV6_SCTP_SRC_PORT_OFFSET 54 +#define SXE2_IPV6_SCTP_DST_PORT_OFFSET 56 + +#define SXE2_MAC_ETHTYPE_OFFSET 12 +#define SXE2_IPV4_TOS_OFFSET 15 +#define SXE2_IPV4_ID_OFFSET 18 +#define SXE2_IPV4_TTL_OFFSET 22 +#define SXE2_IPV6_TC_OFFSET 14 +#define SXE2_IPV6_HLIM_OFFSET 21 +#define SXE2_IPV6_PROTO_OFFSET 20 +#define SXE2_IPV6_ID_OFFSET 58 +#define SXE2_IPV4_NO_MAC_TOS_OFFSET 1 +#define SXE2_IPV4_NO_MAC_TTL_OFFSET 8 +#define SXE2_IPV4_NO_MAC_PROTO_OFFSET 9 +#define SXE2_IPV4_NO_MAC_SRC_ADDR_OFFSET 12 +#define SXE2_IPV4_NO_MAC_DST_ADDR_OFFSET 16 +#define SXE2_TCP4_NO_MAC_SRC_PORT_OFFSET 20 +#define SXE2_TCP4_NO_MAC_DST_PORT_OFFSET 22 +#define SXE2_UDP4_NO_MAC_SRC_PORT_OFFSET 20 +#define SXE2_UDP4_NO_MAC_DST_PORT_OFFSET 22 +#define SXE2_IPV6_NO_MAC_TC_OFFSET 0 +#define SXE2_IPV6_NO_MAC_HLIM_OFFSET 7 +#define SXE2_IPV6_NO_MAC_PROTO_OFFSET 6 +#define SXE2_IPV6_NO_MAC_SRC_ADDR_OFFSET 8 +#define SXE2_IPV6_NO_MAC_DST_ADDR_OFFSET 24 +#define SXE2_TCP6_NO_MAC_SRC_PORT_OFFSET 40 +#define SXE2_TCP6_NO_MAC_DST_PORT_OFFSET 42 +#define SXE2_UDP6_NO_MAC_SRC_PORT_OFFSET 40 +#define SXE2_UDP6_NO_MAC_DST_PORT_OFFSET 42 +#define SXE2_IPV4_GTPU_TEID_OFFSET 46 +#define SXE2_IPV4_GTPU_QFI_OFFSET 56 +#define SXE2_IPV6_GTPU_TEID_OFFSET 66 +#define SXE2_IPV6_GTPU_QFI_OFFSET 76 +#define SXE2_IPV4_GTPOGRE_TEID_OFFSET 70 +#define SXE2_IPV4_GTPOGRE_QFI_OFFSET 80 +#define SXE2_IPV6_GTPOGRE_TEID_OFFSET 90 +#define SXE2_IPV6_GTPOGRE_QFI_OFFSET 100 +#define SXE2_IPV4_L2TPV3_SESS_ID_OFFSET 34 +#define SXE2_IPV6_L2TPV3_SESS_ID_OFFSET 54 +#define SXE2_IPV4_ESP_SPI_OFFSET 34 +#define SXE2_IPV6_ESP_SPI_OFFSET 54 +#define SXE2_IPV4_AH_SPI_OFFSET 38 +#define SXE2_IPV6_AH_SPI_OFFSET 58 +#define SXE2_IPV4_NAT_T_ESP_SPI_OFFSET 42 +#define SXE2_IPV6_NAT_T_ESP_SPI_OFFSET 62 +#define SXE2_IPV4_VXLAN_VNI_OFFSET 46 +#define SXE2_ECPRI_TP0_PC_ID_OFFSET 18 +#define SXE2_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET 46 +#define SXE2_IPV4_L2TPV2_SESS_ID_OFFSET 46 +#define SXE2_IPV6_L2TPV2_SESS_ID_OFFSET 66 +#define SXE2_IPV4_L2TPV2_LEN_SESS_ID_OFFSET 48 +#define SXE2_IPV6_L2TPV2_LEN_SESS_ID_OFFSET 68 + +#define SXE2_FNAV_IPV4_PKT_FLAG_MF 0x20 +#define SXE2_FNAV_IPV4_PKT_FLAG_MF_SHIFT 8 +#define SXE2_FNAV_IPV4_PKT_FLAG_DF 0x40 + +#define SXE2_FNAV_INVALID_STAT_IDX 0xFFFF + +#define SXE2_FNAV_L4_PROT_TCP 6 +#define SXE2_FNAV_L4_PROT_UDP 17 +#define SXE2_FNAV_L4_PROT_SCTP 132 + +enum sxe2_fnav_stat_idx { + SXE2_FNAV_STAT_PF, + SXE2_FNAV_STAT_CH, + SXE2_ARFS_STAT_TCP4, + SXE2_ARFS_STAT_UDP4, + SXE2_ARFS_STAT_TCP6, + SXE2_ARFS_STAT_UDP6, + SXE2_FNAV_STAT_PF_MAX, +}; + +enum sxe2_hw_fnav_act_type { + SXE2_FNAV_ACT_DROP, + SXE2_FNAV_ACT_QINDEX, + SXE2_FNAV_ACT_QGROUP, + SXE2_FNAV_ACT_OTHER, +}; + +#define SXE2_FNAV_FLTR_HLIST_CNT 1024 +#define SXE2_FNAV_FLTR_HLIST_MASK (SXE2_FNAV_FLTR_HLIST_CNT - 1) +#define SXE2_FNAV_HASH_FLD_MAX_SIZE 36 +#define SXE2_FNAV_IPV4_ADDR_SIZE 4 +#define SXE2_FNAV_IPV6_ADDR_SIZE 16 +#define SXE2_FNAV_L4_PORT_SIZE 2 +#define SXE2_FNAV_IP4_HASH_FLD_SIZE \ + ((SXE2_FNAV_IPV4_ADDR_SIZE + SXE2_FNAV_L4_PORT_SIZE) * 2) +#define SXE2_FNAV_IP6_HASH_FLD_SIZE \ + ((SXE2_FNAV_IPV6_ADDR_SIZE + SXE2_FNAV_L4_PORT_SIZE) * 2) + +struct sxe2_fnav_base_pkt { + enum sxe2_fnav_flow_type flow_type; + u16 pkt_len; + const u8 *pkt; + u16 tun_pkt_len; + const u8 *tun_pkt; +}; + +struct sxe2_fnav_v4 { + __be32 dst_ip; + __be32 src_ip; + __be32 l4_header; + __be32 sec_parm_idx; + u8 tos; + u8 ip_ver; + u8 proto; + u8 ttl; + __be16 packet_id; +}; + +#define SXE2_IPV6_ADDR_LEN_TO_U32 4 + +struct sxe2_fnav_v6 { + __be32 dst_ip[SXE2_IPV6_ADDR_LEN_TO_U32]; + __be32 src_ip[SXE2_IPV6_ADDR_LEN_TO_U32]; + __be32 l4_header; + __be32 sec_parm_idx; + u8 tc; + u8 proto; + u8 hlim; + __be32 packet_id; +}; + +struct sxe2_fnav_l4 { + __be16 dst_port; + __be16 src_port; +}; + +struct sxe2_fnav_extra { + u8 dst_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + __be16 ether_type; + __be32 usr_def[2]; + __be16 vlan_type; + __be16 s_vlan_tci; + __be16 c_vlan_tci; + __be16 s_vlan_vid; + __be16 c_vlan_vid; +}; + +struct sxe2_fnav_ipsec { + __be32 sec_parm_idx; +}; + +struct sxe2_fnav_l2tpv3 { + __be32 session_id; +}; + +struct sxe2_fnav_tunnel_id { + union { + __be32 vxlan_vni; + __be32 geneve_vni; + __be32 gtpu_teid; + __be32 gre_tni; + }; +}; + +struct sxe2_fnav_filter_full_key { + struct ethhdr eth, eth_mask; + struct ethhdr eth_inner, eth_mask_inner; + + union { + struct sxe2_fnav_v4 v4; + struct sxe2_fnav_v6 v6; + } ip, mask; + + union { + struct sxe2_fnav_v4 v4; + struct sxe2_fnav_v6 v6; + } ip_inner, mask_inner; + + struct sxe2_fnav_l4 l4, l4_mask; + struct sxe2_fnav_l4 l4_inner, l4_mask_inner; + + struct sxe2_fnav_extra ext_data; + struct sxe2_fnav_extra ext_mask; + + struct sxe2_fnav_extra ext_data_inner; + struct sxe2_fnav_extra ext_mask_inner; + + struct sxe2_fnav_tunnel_id tunnel_data; + struct sxe2_fnav_tunnel_id tunnel_mask; + + bool has_flex_filed; + u16 flex_offset; + __be16 flex_word; + + bool flow_ext; +}; + +struct sxe2_fnav_filter { + struct list_head l_node; + struct hlist_node + hl_node; + u32 hash_val; + bool hashed; + bool conflict; + u32 filter_loc; + u8 fdid_prio; + u16 q_index; + u16 origin_q_index; + u8 q_region; + u8 act_prio; + u16 ori_vsi_sw; + u16 ori_vsi_hw; + u16 dst_vsi_hw; + u16 rule_vsi_sw; + enum sxe2_hw_fnav_act_type + act_type; + enum sxe2_fnav_flow_type flow_type; + struct sxe2_fnav_filter_full_key full_key; + u8 complete_report; + u8 stat_ctrl; + u16 stat_index; + u8 inputset[SXE2_FNAV_INPUT_CNT * 2]; + u8 fd_space; + u8 tunn_fd_space; + u32 vsi_flow_id; + u8 tunn_flag; + u16 vf_idx; +}; + +enum sxe2_fnav_filter_update_type { + SXE2_FNAV_FILTER_UPDATE_ADMIN = 0, + SXE2_FNAV_FILTER_UPDATE_PKT, +}; + +enum sxe2_fnav_fd_space_type { + SXE2_FNAV_FD_SPACE_FROM_GUAR = 0, + SXE2_FNAV_FD_SPACE_FROM_BEST_EFFORT, + SXE2_FNAV_FD_SPACE_FROM_GUAR_1ST_BE_2ND, + SXE2_FNAV_FD_SPACE_FROM_BE_1ST_GUAR_2ND, +}; + +struct sxe2_fnav_flow_context { + struct sxe2_ppp_common_ctxt ppp; +}; + +enum sxe2_fnav_state { + SXE2_FNAV_STATE_UNINIT, + SXE2_FNAV_STATE_READY, + SXE2_FNAV_STATE_RESET, +}; + +struct sxe2_fnav_stat_node { + struct list_head l_node; + u16 vsi_id; + u16 stat_index; + bool need_update; +}; + +struct sxe2_fnav_stat_ctxt { + u16 stat_base; + u16 stat_num; + u16 stat_cnt; + u16 stat_rsv_idx[SXE2_FNAV_STAT_PF_MAX]; + u64 vsi_fnav_match[SXE2_MAX_VSI_NUM]; + struct mutex fnav_stat_lock; + struct list_head + fnav_stat_list; +}; + +struct sxe2_fnav_context { + u16 space_gcnt; + u16 space_bcnt; + u64 pkt_err_cnt; + struct mutex filter_lock; + struct mutex fnav_space_lock; + struct hlist_head filter_hlist[SXE2_FNAV_FLTR_HLIST_CNT]; + enum sxe2_fnav_state state; + struct mutex fnav_state_lock; + + struct sxe2_fnav_flow_context fnav_flow_ctxt; + struct sxe2_fnav_stat_ctxt fnav_stat_ctxt; +}; + +void sxe2_fnav_ctxt_init(struct sxe2_adapter *adapter); + +void sxe2_fnav_ctxt_deinit(struct sxe2_adapter *adapter); + +void sxe2_fnav_enter_reset(struct sxe2_adapter *adapter, bool to_reset); + +void sxe2_fnav_flow_ctxt_clean(struct sxe2_adapter *adapter); + +s32 sxe2_fnav_flow_cfg_del(struct sxe2_adapter *adapter, + struct sxe2_fnav_flow_cfg *flow_cfg, bool is_tunnel); + +s32 sxe2_fnav_flow_cfg_add(struct sxe2_vsi *vsi, struct sxe2_fnav_flow_cfg *flow_cfg, + struct sxe2_fnav_flow_seg *seg); + +s32 sxe2_fnav_default_flow_set(struct sxe2_adapter *adapter); + +bool sxe2_fnav_flow_sup_arfs(enum sxe2_fnav_flow_type flow_type); + +void sxe2_fnav_filter_hash(struct sxe2_fnav_filter *filter); + +s32 sxe2_pf_fnav_flow_cfg_clear(struct sxe2_adapter *adapter); + +s32 sxe2_fnav_switch(struct sxe2_adapter *adapter, bool is_enable); + +u32 sxe2_fnav_max_filter_cnt_get_by_vsi(struct sxe2_vsi *vsi); + +struct sxe2_fnav_filter +*sxe2_fnav_find_filter_by_loc_unlock(struct sxe2_vsi_fnav *fnav_filter_ctxt, u32 loc); + +struct sxe2_fnav_filter * +sxe2_fnav_find_filter_by_loc_lock(struct sxe2_vsi *vsi, u32 loc); + +bool sxe2_fnav_filter_cmp_with_flow_type(struct sxe2_fnav_filter *fltrA, + struct sxe2_fnav_filter *fltrB); + +bool sxe2_fnav_flow_cfg_full_match(struct sxe2_adapter *adapter, + enum sxe2_fnav_flow_type flow_type); + +s32 sxe2_fnav_filter_inputset_fill(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg); + +s32 sxe2_fnav_hw_filter_update_with_admin(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_cfg *flow_cfg, + bool is_add, bool is_tunn); + +s32 sxe2_fnav_hw_filter_update_with_pkt(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + bool is_add, bool is_update, bool is_tunn); + +s32 sxe2_fnav_default_flow_recovery_by_type(struct sxe2_vsi *vsi, + struct sxe2_fnav_flow_cfg *flow_cfg); + +s32 sxe2_fnav_del_filter_by_loc(struct sxe2_vsi *vsi, u32 loc); + +s32 sxe2_fwc_fnav_hw_clear(struct sxe2_adapter *adapter); + +s32 sxe2_fnav_rule_reply(struct sxe2_adapter *adapter); + +void sxe2_fwc_fnav_trace_trigger(struct sxe2_adapter *adapter); + +void sxe2_fwc_fnav_trace_recorder(struct sxe2_adapter *adapter); + +void sxe2_fwc_fnav_hw_sts(struct sxe2_adapter *adapter); + +bool sxe2_fnav_flow_seg_compare(struct sxe2_fnav_flow_seg *seg_a, + struct sxe2_fnav_flow_seg *seg_b); + +s32 sxe2_fnav_hw_flow_del(struct sxe2_adapter *adapter, + struct sxe2_flow_info_node *flow); + +s32 sxe2_vf_fnav_filter_inputset_fill(struct sxe2_vf_node *vf, + struct sxe2_fnav_filter *filter); + +s32 sxe2_fnav_filter_clean_for_vf(struct sxe2_vf_node *vf, bool is_vfr); + +u32 sxe2_fnav_num_avail_filter(struct sxe2_vsi *vsi); + +s32 sxe2_flow_fnav_update_hw_prof_fv_mask(struct sxe2_ppp_common_ctxt *ppp_ctxt, + u8 prof_id, u16 *masks); + +s32 sxe2_fnav_gen_prgm_pkt(struct sxe2_adapter *adapter, + struct sxe2_fnav_filter *filter, u8 *pkt, bool frag, + bool tun); + +s32 sxe2_pf_fnav_hw_filter_update(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, bool is_add, bool is_update, + enum sxe2_fnav_filter_update_type update_type); + +s32 sxe2_fnav_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +s32 sxe2_fnav_mask_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +void sxe2_fnav_flow_ctxt_init(struct sxe2_adapter *adapter); + +s32 sxe2_fwc_fnav_space_cnt_get(struct sxe2_adapter *adapter, u16 vsi_id, + u32 *gcnt_global, u32 *bcnt_global, u32 *gcnt_pf, u32 *bcnt_pf, + u32 *gcnt_vsi, u32 *bcnt_vsi); + +s32 sxe2_fnav_stat_idx_alloc_with_lock(struct sxe2_adapter *adapter, + u16 vsi_id, u16 *stat_index, bool need_update); + +s32 sxe2_fnav_stat_idx_free_with_lock(struct sxe2_adapter *adapter, + u16 stat_index); + +void sxe2_fnav_reserve_stat_idx_alloc(struct sxe2_adapter *adapter); + +void sxe2_fnav_stat_ctxt_init(struct sxe2_adapter *adapter); + +void sxe2_fnav_stat_ctxt_deinit(struct sxe2_adapter *adapter); + +void sxe2_fnav_xlt2_dump(struct sxe2_adapter *adapter); + +void sxe2_fnav_vsig_dump(struct sxe2_adapter *adapter); + +void sxe2_fnav_prof_dump(struct sxe2_adapter *adapter); + +void sxe2_fnav_mask_dump(struct sxe2_adapter *adapter); + +void sxe2_fnav_stats_dump(struct sxe2_adapter *adapter); + +s32 sxe2_fnav_filter_replay(struct sxe2_vsi *vsi, bool to_vf); + +void sxe2_comm_fnav_msg_convert_fld(unsigned long *flds, + struct sxe2_fnav_comm_proto_hdr *proto_hdr); + +struct sxe2_fnav_filter * +sxe2_comm_fnav_filter_search_for_dup(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter); + +s32 sxe2_fnav_filter_del_hw(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter); + +struct sxe2_fnav_filter +*sxe2_fnav_find_filter_by_flow_id_unlock(struct sxe2_vsi_fnav *vsi_fnav, + u32 flow_id); + +s32 sxe2_fnav_del_filter_by_flow_id(struct sxe2_adapter *adapter, + u16 rule_vsi_id, u32 flow_id); + +s32 sxe2_fnav_hw_stats_get(struct sxe2_adapter *adapter, u16 stat_index, + u32 is_clear, + enum sxe2_fnav_counter_bank_type bank_type, + struct sxe2_fwc_fnav_stats_resp *resp); + +void sxe2_fnav_match_stats_get(struct sxe2_adapter *adapter, u16 stat_index, u16 vsi_id); + +void sxe2_fnav_match_stats_update_batch(struct sxe2_adapter *adapter); + +void sxe2_fnav_vf_cfg_clear(struct sxe2_adapter *adapter); + +s32 sxe2_comm_add_fnav_filter(struct sxe2_adapter *adapter, + u16 ori_vsi_id, u16 dst_vsi_id, u16 rule_vsi_id, + struct sxe2_fnav_comm_full_msg *full_msg, u32 *flow_id); + +void sxe2_fnav_clean_by_vsi(struct sxe2_vsi *vsi, bool need_clear_hw); + +s32 sxe2_fnav_del_filter_by_vsi(struct sxe2_vsi *vsi); + +s32 sxe2_fnav_filter_del(struct sxe2_vsi *rule_vsi, struct sxe2_fnav_filter *filter); + +void sxe2_fnav_filter_free_by_vsi(struct sxe2_vsi *vsi); + +void sxe2_fnav_flow_cfg_free(struct sxe2_vsi *vsi); + +s32 sxe2_fnav_filter_add_hw(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter, + struct sxe2_fnav_flow_seg *segs); + +struct sxe2_fnav_flow_cfg +*sxe2_fnav_find_flow_cfg_by_flow_type(struct sxe2_vsi *vsi, + enum sxe2_fnav_flow_type flow_type); + +enum sxe2_fnav_flow_type sxe2_arfs_flow_to_fnav_flow(enum sxe2_fnav_flow_type flow_type); + +void sxe2_fnav_filter_add_list_by_loc(struct sxe2_vsi *vsi, + struct sxe2_fnav_filter *filter); + +s32 sxe2_pf_eth_fnav_init(struct sxe2_adapter *adapter); + +void sxe2_pf_eth_fnav_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_pf_eth_fnav_rebuild(struct sxe2_adapter *adapter); + +s32 sxe2_fnav_flow_cfg_clear_by_vsi(struct sxe2_vsi *vsi); + +void sxe2_fnav_stats_free_by_vsi(struct sxe2_vsi *vsi); + +void sxe2_eth_fnav_outer_hdr_set_eth(enum sxe2_fnav_flow_type flow_type, + struct sxe2_fnav_flow_seg *seg_outer); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.c new file mode 100644 index 0000000000000000000000000000000000000000..160ca71402850d9163e35c737419685bcfea2a9d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_host_cli.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_host_cli.h" +#include "sxe2_ioctl.h" +#include "sxe2_cli_drv_priv.h" + +#define SXE2_CLI_CMD_DFLT_TIMEOUT (30) + +#define SXE2_CLI_CMD_DFLT_TIMEOUT_MS \ + (30000) + +#define SXE2_MAX_IOCTL_CMDS (1) + +#define SXE2_IOCTL_MEMDUP_LEN (56) + +STATIC dev_t sxe2_cdev_major; +STATIC struct class *sxe2_cdev_class; +STATIC struct sxe2_cli_dev_mgr sxe2_cdev_mgr; + +STATIC struct mutex sxe2_minor_lock; +STATIC DEFINE_IDR(sxe2_minor_idr); + +struct sxe2_cli_dev_mgr *sxe2_cdev_mgr_get(void) +{ + return &sxe2_cdev_mgr; +} + +STATIC s32 sxe2_cli_open(struct inode *inode, struct file *filep) +{ + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + s32 ret = 0; + struct sxe2_cli_dev_mgr_data *cdev_mgr; + + cdev_mgr = container_of(inode->i_cdev, struct sxe2_cli_dev_mgr_data, + cdev_info.cdev); + + filep->private_data = cdev_mgr; + + mutex_lock(&cli_dev_mgr->lock); + + atomic_inc(&cdev_mgr->ref_count); + + if (cdev_mgr->status == SXE2_CDEV_STATUS_UNACCESS) { + ret = -EACCES; + atomic_dec(&cdev_mgr->ref_count); + goto l_unlock; + } + +l_unlock: + mutex_unlock(&cli_dev_mgr->lock); + return ret; +} + +STATIC s32 sxe2_cli_close(struct inode *inode, struct file *filep) +{ + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + struct sxe2_cli_dev_mgr_data *cdev_mgr = + (struct sxe2_cli_dev_mgr_data *)filep->private_data; + s32 ref_count = 0; + + mutex_lock(&cli_dev_mgr->lock); + ref_count = atomic_dec_return(&cdev_mgr->ref_count); + if (ref_count == 0) { + mutex_unlock(&cli_dev_mgr->lock); + wake_up(&cdev_mgr->waitq); + } else { + mutex_unlock(&cli_dev_mgr->lock); + } + + return 0; +} + +STATIC s32 sxe2_do_cli_cmd(struct sxe2_adapter *adapter, unsigned int cmd_code, + unsigned long arg) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_ioctl_sync_cmd *cmd_buf; + + void __user *argp = (void __user *)arg; + + cmd_buf = memdup_user(argp, SXE2_IOCTL_MEMDUP_LEN); + if (IS_ERR(cmd_buf)) { + ret = (s32)PTR_ERR(cmd_buf); + LOG_ERROR_BDF("memdup_user mem failed, ret=%d\n", ret); + (void)cmd_buf; + goto l_end; + } + + LOG_DEBUG_BDF("get user cmd: trace_id=0x%llx, in_len=%u, out_len=%u, cli_ver 0x%x\n", + cmd_buf->trace_id, cmd_buf->in_len, cmd_buf->out_len, + cmd_buf->ver); + + if (SXE2_MK_VER_MAJOR(cmd_buf->ver) != SXE2_DRV_CLI_VER_MAJOR) { + ret = -EOPNOTSUPP; + goto l_free; + } + + cmd.opcode = SXE2_CMD_MAX; + cmd.req_data = cmd_buf->in_data; + cmd.req_len = (u16)cmd_buf->in_len; + cmd.resp_data = cmd_buf->out_data; + cmd.resp_len = (u16)cmd_buf->out_len; + cmd.is_interruptible = true; + cmd.timeout = cmd_buf->timeout ? cmd_buf->timeout + : SXE2_CLI_CMD_DFLT_TIMEOUT; + cmd.trace_id = cmd_buf->trace_id; + + if (cmd_code == SXE2_CMD_IOCTL_SYNC_CMD) + ret = sxe2_cmd_cli_exec(adapter, &cmd); + else if (cmd_code == SXE2_CMD_IOCTL_SYNC_DRV_CMD) + ret = sxe2_cmd_cli_drv_exec(adapter, &cmd); + else + ret = -EFAULT; + + if (ret) { + LOG_ERROR_BDF("sxe2 cli cmd(%d) trace_id=0x%llx error, ret=%d\n", + cmd_code, cmd.trace_id, ret); + goto l_free; + } + + cmd_buf->ver = SXE2_DRV_CLI_VER; + if (ret == 0 && copy_to_user(argp, cmd_buf, sizeof(*cmd_buf))) { + LOG_ERROR_BDF("copy_to_user failed, len=%zu\n", sizeof(*cmd_buf)); + ret = -EFAULT; + goto l_free; + } + +l_free: + kfree(cmd_buf); +l_end: + return ret; +} + +STATIC long sxe2_cli_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + long ret; + struct sxe2_adapter *adapter = NULL; + struct sxe2_cli_dev_mgr_data *cdev_mgr = NULL; + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + + if (!filep || cmd == 0 || arg == 0) { + LOG_ERROR("filep=%p cmd=%d arg=%ld\n", filep, cmd, arg); + ret = -EINVAL; + goto l_end; + } + + cdev_mgr = (struct sxe2_cli_dev_mgr_data *)filep->private_data; + + mutex_lock(&cli_dev_mgr->lock); + if (cdev_mgr->status == SXE2_CDEV_STATUS_UNACCESS) { + mutex_unlock(&cli_dev_mgr->lock); + ret = -EACCES; + goto l_end; + } + mutex_unlock(&cli_dev_mgr->lock); + + adapter = (struct sxe2_adapter *)cdev_mgr->adapter; + + LOG_DEBUG_BDF("driver ioctl cmd=%x, arg=0x%lx\n", cmd, arg); + + if (down_interruptible(&adapter->cdev_mgr->cdev_info.cdev_sem)) { + LOG_WARN_BDF("ioctl concurrency full\n"); + ret = -ERESTARTSYS; + goto l_end; + } + LOG_DEBUG_BDF("driver ioctl cmd=%x, arg=0x%lx get sem\n", cmd, arg); + + switch (cmd) { + case SXE2_CMD_IOCTL_SYNC_CMD: + case SXE2_CMD_IOCTL_SYNC_DRV_CMD: + ret = sxe2_do_cli_cmd(adapter, cmd, arg); + break; + default: + ret = -EINVAL; + LOG_ERROR_BDF("unknown ioctl cmd, filep=%p, cmd=%d,arg=0x%8.8lx\n", + filep, cmd, arg); + break; + } + + if (ret) { + LOG_ERROR_BDF("filep=%p, cmd=%d, arg=%lx, ret=%ld\n", filep, cmd, + arg, ret); + } + + up(&adapter->cdev_mgr->cdev_info.cdev_sem); + +l_end: + LOG_DEBUG_BDF("driver ioctl cmd=%x, arg=0x%lx end, ret:%ld\n", cmd, arg, + ret); + return ret; +} + +const struct file_operations sxe2_cdev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sxe2_cli_ioctl, + .open = sxe2_cli_open, + .release = sxe2_cli_close, +}; + +s32 sxe2_cli_cdev_register(void) +{ + s32 ret; + u16 i; + + memset(&sxe2_cdev_mgr, 0, sizeof(sxe2_cdev_mgr)); + mutex_init(&sxe2_cdev_mgr.lock); + for (i = 0; i < SXE2_CLI_DEV_MGR_DATA_SIZE; i++) { + sxe2_cdev_mgr.cdev_mgr[i].id = i; + sema_init(&sxe2_cdev_mgr.cdev_mgr[i].cdev_info.cdev_sem, + SXE2_MAX_IOCTL_CMDS); + } + + ret = alloc_chrdev_region(&sxe2_cdev_major, 0, SXE2_MAX_DEVICES_NUM, + SXE2_CHRDEV_NAME); + if (ret) { + LOG_ERROR("alloc cdev number failed: %d\n", ret); + goto l_alloc_cdev_failed; + } + + sxe2_cdev_class = class_create(THIS_MODULE, SXE2_CHRDEV_CLASS_NAME); + if (IS_ERR(sxe2_cdev_class)) { + ret = (s32)PTR_ERR(sxe2_cdev_class); + LOG_ERROR("create cdev class failed: %d\n", ret); + goto l_create_class_failed; + } + + mutex_init(&sxe2_minor_lock); + + return 0; + +l_create_class_failed: + unregister_chrdev_region(sxe2_cdev_major, SXE2_MAX_DEVICES_NUM); +l_alloc_cdev_failed: + return ret; +} + +void sxe2_cli_cdev_unregister(void) +{ + class_destroy(sxe2_cdev_class); + unregister_chrdev_region(sxe2_cdev_major, SXE2_MAX_DEVICES_NUM); + idr_destroy(&sxe2_minor_idr); + + mutex_destroy(&sxe2_minor_lock); + mutex_destroy(&sxe2_cdev_mgr.lock); +} + +STATIC s32 sxe2_minor_get(s32 *dev_minor) +{ + s32 ret = -ENOMEM; + + mutex_lock(&sxe2_minor_lock); + ret = idr_alloc(&sxe2_minor_idr, NULL, 0, (s32)SXE2_MAX_DEVICES_NUM, + GFP_KERNEL); + if (ret >= 0) { + *dev_minor = ret; + ret = 0; + } + mutex_unlock(&sxe2_minor_lock); + return ret; +} + +STATIC void sxe2_minor_free(s32 dev_minor) +{ + mutex_lock(&sxe2_minor_lock); + idr_remove(&sxe2_minor_idr, dev_minor); + mutex_unlock(&sxe2_minor_lock); +} + +STATIC void sxe2_cli_cdev_mgr_init(struct sxe2_adapter *adapter) +{ + atomic_set(&adapter->cdev_mgr->ref_count, 0); + adapter->cdev_mgr->status = SXE2_CDEV_STATUS_NORMAL; + + init_waitqueue_head(&adapter->cdev_mgr->waitq); +} + +STATIC s32 sxe2_cli_cdev_mgr_get(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + unsigned long offset; + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + unsigned long *map = cli_dev_mgr->map; + + mutex_lock(&cli_dev_mgr->lock); + + offset = bitmap_find_next_zero_area(map, SXE2_CLI_DEV_MGR_DATA_SIZE, 0, + SXE2_CLI_DEV_MGR_DATA_CNT, 0); + if (offset >= SXE2_CLI_DEV_MGR_DATA_SIZE) { + LOG_INFO("get cdev mgr(%ld) over max pf count(%d).\n", offset, + SXE2_CLI_DEV_MGR_DATA_SIZE); + ret = -EPERM; + goto end; + } + + bitmap_set(map, (u32)offset, SXE2_CLI_DEV_MGR_DATA_CNT); + adapter->cdev_mgr = &cli_dev_mgr->cdev_mgr[offset]; + cli_dev_mgr->cdev_mgr[offset].adapter = adapter; + sxe2_cli_cdev_mgr_init(adapter); + +end: + mutex_unlock(&cli_dev_mgr->lock); + return ret; +} + +STATIC void sxe2_cli_cdev_wait_clear(struct sxe2_adapter *adapter) +{ + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + s32 ret; + int is_ref_count_zero; + unsigned long cur_jiffies; + + if (adapter->cdev_mgr) { + mutex_lock(&cli_dev_mgr->lock); + adapter->cdev_mgr->status = SXE2_CDEV_STATUS_UNACCESS; + mutex_unlock(&cli_dev_mgr->lock); + + do { + is_ref_count_zero = + (atomic_read(&adapter->cdev_mgr + ->ref_count) == + 0); + cur_jiffies = msecs_to_jiffies(SXE2_CLI_CMD_DFLT_TIMEOUT_MS); + ret = (s32)wait_event_timeout(adapter->cdev_mgr->waitq, + is_ref_count_zero, + (long)cur_jiffies); + (void)is_ref_count_zero; + (void)cur_jiffies; + if (!ret) { + LOG_INFO_BDF("cdev(%d) wait ref count time out.", + adapter->cdev_mgr->id); + } else { + mutex_lock(&cli_dev_mgr->lock); + if (atomic_read(&adapter->cdev_mgr->ref_count) == + 0) { + mutex_unlock(&cli_dev_mgr->lock); + break; + } + mutex_unlock(&cli_dev_mgr->lock); + } + } while (1); + } +} + +STATIC void sxe2_cli_cdev_mgr_put(struct sxe2_adapter *adapter) +{ + struct sxe2_cli_dev_mgr *cli_dev_mgr = sxe2_cdev_mgr_get(); + unsigned long *map = cli_dev_mgr->map; + u16 dev_mgr_id; + + if (adapter->cdev_mgr) { + dev_mgr_id = adapter->cdev_mgr->id; + adapter->cdev_mgr->adapter = NULL; + adapter->cdev_mgr = NULL; + bitmap_clear(map, dev_mgr_id, SXE2_CLI_DEV_MGR_DATA_CNT); + } +} + +s32 sxe2_cli_cdev_create(struct sxe2_adapter *adapter) +{ + s32 ret; + s32 dev_major, dev_minor; + struct pci_dev *pdev = adapter->pdev; + struct sxe2_cdev_info *cdev_info = NULL; + + ret = sxe2_cli_cdev_mgr_get(adapter); + if (ret) { + LOG_DEV_ERR("register netdev notifier failed, ret=%d\n", ret); + goto l_cdev_mgr_get_failed; + } + + ret = sxe2_minor_get(&dev_minor); + if (ret) { + LOG_DEV_ERR("cdev minor get failed, ret=%d\n", ret); + ret = -ENOMEM; + goto l_get_minor_failed; + } + + cdev_info = &adapter->cdev_mgr->cdev_info; + dev_major = (s32)MAJOR(sxe2_cdev_major); + cdev_info->dev_no = (dev_t)MKDEV(dev_major, dev_minor); + cdev_init(&cdev_info->cdev, &sxe2_cdev_fops); + cdev_info->cdev.owner = THIS_MODULE; + cdev_info->cdev.ops = &sxe2_cdev_fops; + + LOG_INFO_BDF("cdev_add: dev_major: %d, dev_minor: %d.\n", dev_major, + dev_minor); + + ret = cdev_add(&cdev_info->cdev, cdev_info->dev_no, 1); + if (ret) { + LOG_DEV_ERR("failed to add cdev dev_no=%ld, ret=%d\n", + (unsigned long)cdev_info->dev_no, ret); + goto l_add_cdev_failed; + } + + cdev_info->device = + device_create(sxe2_cdev_class, NULL, cdev_info->dev_no, NULL, + SXE2_CHRDEV_NAME "-%04x:%02x:%02x.%x", + pci_domain_nr(pdev->bus), pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (IS_ERR(cdev_info->device)) { + ret = (s32)PTR_ERR(cdev_info->device); + LOG_DEV_ERR("failed to create device, dev_no=%ld\n", + (unsigned long)cdev_info->dev_no); + goto l_create_dev_failed; + } + + LOG_INFO("create char dev[%p] dev_no[major:minor=%u:%u] on pci_dev[%p]\t" + "belongs to class dev[%p] success\n", + &cdev_info->cdev, dev_major, dev_minor, adapter->pdev, + cdev_info->device); + + return 0; + +l_create_dev_failed: + cdev_del(&cdev_info->cdev); +l_add_cdev_failed: + sxe2_minor_free(dev_minor); +l_get_minor_failed: + sxe2_cli_cdev_mgr_put(adapter); +l_cdev_mgr_get_failed: + return ret; +} + +void sxe2_cli_cdev_delete(struct sxe2_adapter *adapter) +{ + s32 dev_minor; + struct sxe2_cdev_info *cdev_info = &adapter->cdev_mgr->cdev_info; + + dev_minor = (s32)MINOR(cdev_info->dev_no); + + sxe2_cli_cdev_wait_clear(adapter); + + LOG_INFO("delete char dev[%p], dev_no[major:minor=%u:%u]\n", + &cdev_info->cdev, MAJOR(cdev_info->dev_no), dev_minor); + + device_destroy(sxe2_cdev_class, cdev_info->dev_no); + cdev_del(&cdev_info->cdev); + sxe2_minor_free(dev_minor); + + sxe2_cli_cdev_mgr_put(adapter); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.h new file mode 100644 index 0000000000000000000000000000000000000000..720e97f0f790497417b7d2affb3784e6cc093eab --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_host_cli.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_host_cli.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HOST_CLI_H__ +#define __SXE2_HOST_CLI_H__ + +#include +#include +#include + +#include "sxe2.h" +#include "sxe2_cdev.h" + +#define SXE2_CHRDEV_NAME "sxe2-cli" +#define SXE2_MAX_DEVICES_NUM BIT(MINORBITS) +#define SXE2_CHRDEV_CLASS_NAME SXE2_CHRDEV_NAME + +s32 sxe2_cli_cdev_register(void); + +void sxe2_cli_cdev_unregister(void); + +s32 sxe2_cli_cdev_create(struct sxe2_adapter *adapter); + +void sxe2_cli_cdev_delete(struct sxe2_adapter *adapter); + +struct sxe2_cli_dev_mgr *sxe2_cdev_mgr_get(void); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.c new file mode 100644 index 0000000000000000000000000000000000000000..6e8ffcaf492a3181fbeeb319229865d13ca24965 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ipsec.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_netdev.h" +#include "sxe2_log.h" +#include "sxe2_ipsec.h" +#include "sxe2_rx.h" +#include "sxe2_tx.h" +#include "sxe2_spec.h" + +#ifndef SADB_X_EALG_SM4CBC +#define SADB_X_EALG_SM4CBC 24 +#endif + +#ifndef SADB_X_AALG_SM3_256HMAC +#define SADB_X_AALG_SM3_256HMAC 10 +#endif + +#define SXE2_IPSEC_AES_KEY_BITS (256) +#define SXE2_IPSEC_SHA256_AUTH_BITS (256) +#define SXE2_IPSEC_SHA256_TRUNC_BITS (128) + +#define SXE2_IPSEC_SM4_KEY_BITS (128) +#define SXE2_IPSEC_SM3_AUTH_BITS (256) +#define SXE2_IPSEC_SM3_TRUNC_BITS (96) + +#define SXE2_IPSEC_TX_ENCRYPT (BIT(0)) +#define SXE2_IPSEC_TX_ENGINE_SM4 (BIT(1)) + +#define SXE2_IPSEC_RX_VALID (BIT(0)) +#define SXE2_IPSEC_RX_IPV6 (BIT(2)) +#define SXE2_IPSEC_RX_DECRYPT (BIT(3)) +#define SXE2_IPSEC_RX_ENGINE_SM4 (BIT(4)) + +#define SXE2_IPSEC_ALG_SM4 (1) +#define SXE2_IPSEC_ALG_AES (0) + +enum sxe2_rx_ipsec_status { + SXE2_IPSEC_DECRYPT_INTIGRITY_SUCCESS, + SXE2_IPSEC_PACKET_TOOLONG_OVER_2K, + SXE2_IPSEC_SPI_IP_NOT_MATCHED, + SXE2_IPSEC_IP_KEY_INVALID, + SXE2_IPSEC_PAYLOAD_ICV_NOTALIGNED, + SXE2_IPSEC_ICV_CHECK_FAIL, + SXE2_IPSEC_DISABLED_BYPASS, + SXE2_IPSEC_MACSEC_BYPASS +}; + +static s32 sxe2_ipsec_xs_validate(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + + if (xs->props.aalgo != SADB_AALG_NONE) { + if (!xs->aalg) { + LOG_ERROR_BDF("auth instance error.\n"); + + return -EINVAL; + } + + if (xs->props.aalgo != SADB_X_AALG_SHA2_256HMAC && + xs->props.aalgo != SADB_X_AALG_SM3_256HMAC) { + LOG_ERROR_BDF("sxe2 only support hmac(sha256) or sm3(256)," + "otherwise do not use auth\n"); + + return -EINVAL; + } + } + + if (!xs->ealg) { + LOG_ERROR_BDF("sxe2 cannot offload without ealg\n"); + + return -EINVAL; + } + + if (xs->props.ealgo != SADB_X_EALG_AESCBC && + xs->props.ealgo != SADB_X_EALG_SM4CBC) { + LOG_ERROR_BDF("sxe2 only support cbc(aes) or cbc(sm4)\n"); + return -EINVAL; + } + + if (xs->props.calgo != SADB_X_CALG_NONE) { + LOG_ERROR_BDF("sxe2 do not support compressed xfrm offloads.\n"); + return -EINVAL; + } + + if (xs->props.family != AF_INET && xs->props.family != AF_INET6) { + LOG_ERROR_BDF("sxe2 only support IPv4/6 xfrm offloads.\n"); + return -EINVAL; + } + + if (xs->props.mode != XFRM_MODE_TUNNEL) { + LOG_ERROR_BDF("sxe2 only support tunnel xfrm offloads\n"); + return -EINVAL; + } + + if (xs->id.proto != IPPROTO_ESP) { + LOG_ERROR_BDF("sxe2 only support ESP xfrm offloads\n"); + return -EINVAL; + } + + if (xs->ealg->alg_key_len != SXE2_IPSEC_SM4_KEY_BITS && + xs->ealg->alg_key_len != SXE2_IPSEC_AES_KEY_BITS) { + LOG_ERROR_BDF("sxe2 only support 256bit or 128bit key\n"); + return -EINVAL; + } + + if (xs->aalg && + xs->aalg->alg_key_len != SXE2_IPSEC_SM3_AUTH_BITS && + xs->aalg->alg_key_len != SXE2_IPSEC_SHA256_AUTH_BITS) { + LOG_ERROR_BDF("sxe2 check aalg auth and trunc length failed.keylen:%d\n", + xs->aalg->alg_key_len); + return -EINVAL; + } + + if (xs->aalg && + xs->aalg->alg_trunc_len != SXE2_IPSEC_SHA256_TRUNC_BITS && + xs->aalg->alg_trunc_len != SXE2_IPSEC_SM3_TRUNC_BITS) { + LOG_ERROR_BDF("sxe2 check aalg auth and trunc length failed.trunc_len:%d\n", + xs->aalg->alg_trunc_len); + return -EINVAL; + } + + return 0; +} + +STATIC int sxe2_ipsec_parse_proto_keys(struct xfrm_state *xs, u8 *ealg_key, + u8 *aalg_key) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + + if (!xs->ealg) { + LOG_ERROR_BDF("null ealg.\n"); + return -EINVAL; + } + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + if (xs->ealg->alg_key_len != SXE2_IPSEC_AES_KEY_BITS) { + LOG_ERROR_BDF("failed to check ipsec keylen[%d]\n", + xs->ealg->alg_key_len); + return -EINVAL; + } + + memcpy(ealg_key, xs->ealg->alg_key, SXE2_IPSEC_AESKEY_LENTH); + + if (xs->aalg) { + if (xs->aalg->alg_key_len != SXE2_IPSEC_SHA256_AUTH_BITS || + xs->aalg->alg_trunc_len != SXE2_IPSEC_SHA256_TRUNC_BITS) { + LOG_ERROR_BDF("failed to check auth key len [%d = %d] or " + "auth trunc len[%d = %d]\n", + xs->aalg->alg_key_len, + SXE2_IPSEC_SHA256_AUTH_BITS, + xs->aalg->alg_trunc_len, + SXE2_IPSEC_SHA256_TRUNC_BITS); + + return -EINVAL; + } + + memcpy(aalg_key, xs->aalg->alg_key, SXE2_IPSEC_AESAUTH_LENTH); + } + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + if (xs->ealg->alg_key_len != SXE2_IPSEC_SM4_KEY_BITS) { + LOG_ERROR_BDF("failed to check ipsec keylen[%d]\n", + xs->ealg->alg_key_len); + return -EINVAL; + } + memcpy(ealg_key, xs->ealg->alg_key, SXE2_IPSEC_SMKEY_LENGTH); + if (xs->aalg) { + if ((xs->aalg->alg_key_len != SXE2_IPSEC_SM3_AUTH_BITS) || + (xs->aalg->alg_trunc_len != SXE2_IPSEC_SM3_TRUNC_BITS)) { + LOG_ERROR_BDF("failed to check ipsec keylen[%d]\n", + xs->aalg->alg_key_len); + return -EINVAL; + } + memcpy(aalg_key, xs->aalg->alg_key, SXE2_IPSEC_SMAUTH_LENTH); + } + } else { + LOG_ERROR_BDF("wrong aelg name:%s .\n", xs->ealg->alg_name); + return -EINVAL; + } + + return 0; +} + +bool sxe2_is_ipsec_offload_enable(struct net_device *netdev) +{ + return netdev->features & NETIF_F_HW_ESP; +} + +static s32 sxe2_ipsec_empty_rxtable_find(struct sxe2_adapter *adapter) +{ + u32 i; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + if (ipsec->rx_sa_cnt == ipsec->max_rx_sa_cnt) + goto out_full; + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + if (!ipsec->rx_sa_table[i].used) { + LOG_DEBUG_BDF("get empty rx sa cnt is %d\n", i); + ipsec->rx_sa_table[i].index = (u16)i; + return ipsec->rx_sa_table[i].index; + } + } + +out_full: + LOG_ERROR_BDF("failed to get empty rx sa, table is full\n"); + + return -ENOSPC; +} + +s32 sxe2_fwc_ipsec_rx_sa_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_set_req *req, + struct sxe2_fwc_ipsec_rxsa_add_resp *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_RXSA_SET, req, + sizeof(*req), resp, sizeof(*resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fwc_ipsec_rx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_add_req *req, + struct sxe2_fwc_ipsec_rxsa_add_resp *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_RXSA_ADD, req, sizeof(*req), + resp, sizeof(*resp)); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_RXSA_ADD, req, + sizeof(*req), resp, sizeof(*resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_ipsec_rx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_rx_sa *rsa) +{ + s32 ret; + u32 i; + u32 mode = 0; + struct sxe2_fwc_ipsec_rxsa_add_req req; + struct sxe2_fwc_ipsec_rxsa_add_resp resp; + + if (rsa->engine) + mode |= SXE2_IPSEC_RX_ENGINE_SM4; + + if (rsa->is_auth) + mode |= SXE2_IPSEC_RX_DECRYPT; + + if (rsa->ipv6) + mode |= SXE2_IPSEC_RX_IPV6; + + req.mode = cpu_to_le32(mode); + req.spi = cpu_to_le32(rsa->spi); + + LOG_DEBUG_BDF("mode:%d ,spi:0x%x\n", req.mode, req.spi); + for (i = 0; i < SXE2_IPV6_ADDR_LEN; i++) + req.ipaddr[i] = cpu_to_le32(rsa->ipaddr[i]); + + memcpy(req.encrypt_keys, rsa->enc_key, SXE2_IPSEC_KEY_LEN); + memcpy(req.auth_keys, rsa->auth_key, SXE2_IPSEC_KEY_LEN); + + req.func_type = rsa->is_vf ? SXE2_VF : SXE2_PF; + req.func_id = rsa->is_vf ? (u8)(adapter->vf_ctxt.vfid_base + rsa->vf_id) : adapter->pf_idx; + req.drv_id = 0; + + ret = sxe2_fwc_ipsec_rx_sa_add(adapter, &req, &resp); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa, ret=%d\n", ret); + ret = -EIO; + goto out; + } + + rsa->hw_index = le32_to_cpu(resp.sa_idx); + rsa->ip_id = resp.ip_id; + LOG_DEBUG_BDF("add rx sa ok - sa_idx:%d ip_id:%d\n", rsa->hw_index, + rsa->ip_id); + +out: + + return ret; +} + +static s32 sxe2_ipsec_rx_state_add(struct sxe2_adapter *adapter, + struct xfrm_state *xs, bool is_vf, u32 vf_id, + u32 *hw_index) +{ + s32 ret; + u32 i; + struct sxe2_rx_sa rsa; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + unsigned long flags; + + ret = sxe2_ipsec_empty_rxtable_find(adapter); + if (ret < 0) { + LOG_ERROR_BDF("failed to get empty rx_sa idx.\n"); + return ret; + } + + (void)memset(&rsa, 0x0, sizeof(rsa)); + rsa.xs = xs; + + if (xs->aalg) + rsa.is_auth = true; + else + rsa.is_auth = false; + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + rsa.engine = SXE2_IPSEC_ALG_AES; + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + rsa.engine = SXE2_IPSEC_ALG_SM4; + } else { + rsa.engine = SXE2_IPSEC_ALG_AES; + LOG_ERROR_BDF("wrong ealg[%s]\n", xs->ealg->alg_name); + } + + if (xs->props.family == AF_INET6) { + rsa.ipv6 = true; + for (i = 0; i < SXE2_IPV6_ADDR_LEN; i++) + rsa.ipaddr[i] = xs->id.daddr.a6[i]; + + } else { + rsa.ipv6 = false; + rsa.ipaddr[0] = xs->id.daddr.a4; + } + + rsa.index = (u16)ret; + rsa.used = true; + rsa.spi = xs->id.spi; + LOG_DEBUG_BDF("xs spi:0x%x rsa.spi:0x%x\n", be32_to_cpu(xs->id.spi), + be32_to_cpu(rsa.spi)); + ret = sxe2_ipsec_parse_proto_keys(xs, rsa.enc_key, rsa.auth_key); + if (ret) { + LOG_ERROR_BDF("failed to get key data for Rx SA table\n"); + return ret; + } + if (is_vf) { + rsa.is_vf = true; + rsa.vf_id = vf_id; + } + ret = sxe2_ipsec_rx_sa_add(adapter, &rsa); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa ret=%d.\n", ret); + memset(&rsa, 0x0, sizeof(rsa)); + return ret; + } + LOG_DEBUG_BDF("add fw rx_sa[index:%d] ok.\n", rsa.hw_index); + + xs->xso.offload_handle = rsa.index; + *hw_index = rsa.hw_index; + (void)memcpy(&ipsec->rx_sa_table[rsa.index], &rsa, sizeof(rsa)); + + spin_lock_irqsave(&ipsec->rx_hlist_lock, flags); + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_sa_table[rsa.index].hlist, + rsa.xs->id.spi); + ipsec->rx_sa_cnt++; + spin_unlock_irqrestore(&ipsec->rx_hlist_lock, flags); + LOG_DEBUG_BDF("[count++]current rx sa cnt is %d\n", ipsec->rx_sa_cnt); + + return 0; +} + +static s32 sxe2_ipsec_empty_txtable_find(struct sxe2_adapter *adapter) +{ + u32 i; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + if (ipsec->tx_sa_cnt == ipsec->max_tx_sa_cnt) + goto out_full; + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + if (!ipsec->tx_sa_table[i].used) { + LOG_DEBUG_BDF("get empty tx sa cnt is %d\n", i); + return (s32)i; + } + } +out_full: + LOG_ERROR_BDF("failed to get empty tx sa, table is full\n"); + return -ENOSPC; +} + +s32 sxe2_fwc_ipsec_tx_sa_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_set_req *req, + struct sxe2_fwc_ipsec_txsa_add_resp *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_TXSA_SET, req, + sizeof(*req), resp, sizeof(*resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to set tx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fwc_ipsec_tx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_add_req *req, + struct sxe2_fwc_ipsec_txsa_add_resp *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_TXSA_ADD, req, + sizeof(*req), resp, sizeof(*resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_ipsec_tx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_tx_sa *sa) +{ + s32 ret; + u32 mode = 0; + u32 i; + struct sxe2_fwc_ipsec_txsa_add_req req; + struct sxe2_fwc_ipsec_txsa_add_resp resp; + + if (sa->engine) + mode |= SXE2_IPSEC_TX_ENGINE_SM4; + + if (sa->is_auth) + mode |= SXE2_IPSEC_TX_ENCRYPT; + + req.mode = cpu_to_le32(mode); + + for (i = 0; i < SXE2_IPSEC_KEY_LEN; i++) { + req.encrypt_keys[i] = sa->enc_key[i]; + req.auth_keys[i] = sa->auth_key[i]; + } + + req.func_type = sa->is_vf ? SXE2_VF : SXE2_PF; + req.func_id = sa->is_vf ? (u8)(adapter->vf_ctxt.vfid_base + sa->vf_id) : adapter->pf_idx; + req.drv_id = 0; + + ret = sxe2_fwc_ipsec_tx_sa_add(adapter, &req, &resp); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa, ret=%d\n", ret); + ret = -EIO; + goto out; + } + sa->hw_index = le32_to_cpu(resp.index); + +out: + + return ret; +} + +static s32 sxe2_ipsec_tx_state_add(struct sxe2_adapter *adapter, + struct xfrm_state *xs, bool is_vf, u32 vf_id, + u32 *hw_index) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + struct sxe2_tx_sa tsa; + + ret = sxe2_ipsec_empty_txtable_find(adapter); + if (ret < 0) { + LOG_ERROR_BDF("no more tx sa space.\n"); + return ret; + } + + (void)memset(&tsa, 0x0, sizeof(tsa)); + tsa.index = (u16)ret; + tsa.xs = xs; + tsa.used = true; + + if (xs->aalg) + tsa.is_auth = true; + else + tsa.is_auth = false; + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + tsa.engine = SXE2_IPSEC_ALG_AES; + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + tsa.engine = SXE2_IPSEC_ALG_SM4; + } else { + tsa.engine = SXE2_IPSEC_ALG_AES; + LOG_ERROR_BDF("wrong ealg[%s]\n", xs->ealg->alg_name); + } + + ret = sxe2_ipsec_parse_proto_keys(xs, tsa.enc_key, tsa.auth_key); + if (ret) { + LOG_ERROR_BDF("failed to get key data for Tx SA table\n"); + memset(&tsa, 0x0, sizeof(tsa)); + return ret; + } + + tsa.spi = xs->id.spi; + + if (is_vf) { + tsa.is_vf = true; + tsa.vf_id = vf_id; + } + + ret = sxe2_ipsec_tx_sa_add(adapter, &tsa); + if (ret) { + LOG_ERROR_BDF("fFailed to request Tx SA entry\n"); + memset(&tsa, 0x0, sizeof(tsa)); + return ret; + } + LOG_DEBUG_BDF("add fw tx_sa[index:%d] ok.\n", tsa.index); + + xs->xso.offload_handle = tsa.index; + *hw_index = tsa.hw_index; + (void)memcpy(&ipsec->tx_sa_table[tsa.index], &tsa, sizeof(tsa)); + ipsec->tx_sa_cnt++; + LOG_DEBUG_BDF("[count++]current tx sa cnt is %d\n", ipsec->tx_sa_cnt); + + return 0; +} + +#ifdef IPSEC_STATE_ADD_API_NEED_1_PARAMS +static int sxe2_ipsec_state_add(struct xfrm_state *xs) +#else +static int sxe2_ipsec_state_add(struct xfrm_state *xs, + struct netlink_ext_ack *extack) +#endif +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + s32 ret; + u32 hw_index; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + + if (adapter->ipsec_ctxt.status != SXE2_IPSEC_READY) { + LOG_ERROR_BDF("failed to add sa, because ipsec status is wrong\n"); + ret = -EBUSY; + goto out; + } + + if (!sxe2_is_ipsec_offload_enable(netdev)) { + LOG_ERROR_BDF("failed to add sa, because ipsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + ret = sxe2_ipsec_xs_validate(xs); + if (ret) + goto out; + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + ret = sxe2_ipsec_rx_state_add(adapter, xs, false, 0, &hw_index); + if (ret) + LOG_ERROR_BDF("failed to add rx sa\n"); + } else { + ret = sxe2_ipsec_tx_state_add(adapter, xs, false, 0, &hw_index); + if (ret) + LOG_ERROR_BDF("failed to add tx sa\n"); + } + +out: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + + return ret; +} + +static struct sxe2_rx_sa *sxe2_ipsec_rx_sa_find_rcu(struct sxe2_adapter *adapter, + u32 hw_idx, u32 spi) +{ + struct sxe2_rx_sa *rsa = NULL; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + bool hit = false; + + if (!ipsec->rx_sa_table) { + LOG_ERROR_BDF("rx sa table is NULL.\n"); + return NULL; + } + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + { + if (rsa->used == false) + continue; + + if (hw_idx == rsa->index) { + LOG_DEBUG_BDF("rsa use:%d sa:[%d %d]\n", rsa->used, + rsa->hw_index, hw_idx); + hit = true; + break; + } + } + rcu_read_unlock(); + + if (!hit) { + LOG_ERROR_BDF("failed to find rsa[sa:%d,spi:%d]\n", hw_idx, spi); + return NULL; + } + return rsa; +} + +static struct sxe2_rx_sa *sxe2_ipsec_rx_sa_find(struct sxe2_adapter *adapter, + u32 hw_idx, u32 spi) +{ + struct sxe2_rx_sa *rsa = NULL; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 i; + + if (!ipsec->rx_sa_table) { + LOG_ERROR_BDF("rx sa table is NULL.\n"); + return NULL; + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + rsa = &ipsec->rx_sa_table[i]; + if (rsa->used == false) + continue; + if (hw_idx == rsa->index && spi == rsa->spi) { + LOG_DEBUG_BDF("rsa use:%d sa:[%d %d]\n", rsa->used, + rsa->hw_index, hw_idx); + return rsa; + } + } + + LOG_ERROR_BDF("failed to find rsa[sa:%d,spi:%d]\n", hw_idx, spi); + + return NULL; +} + +s32 sxe2_fwc_ipsec_rx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_del_req *req) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_RXSA_DEL, req, + sizeof(*req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to delete tx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_ipsec_rx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_rx_sa *rsa) +{ + s32 ret; + struct sxe2_fwc_ipsec_rxsa_del_req req; + + req.sa_idx = cpu_to_le16((u16)rsa->hw_index); + req.spi = cpu_to_le32(rsa->spi); + req.ip_id = (u8)rsa->ip_id; + req.func_type = rsa->is_vf ? SXE2_VF : SXE2_PF; + req.func_id = rsa->is_vf ? (u8)(adapter->vf_ctxt.vfid_base + rsa->vf_id) : adapter->pf_idx; + req.drv_id = 0; + + ret = sxe2_fwc_ipsec_rx_sa_del(adapter, &req); + if (ret) { + LOG_ERROR_BDF("failed to delete tx sa, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static void sxe2_ipsec_vf_xs_free(struct xfrm_state *xs) +{ + if (!xs) + return; + + if (xs->ealg) { + kfree_sensitive(xs->ealg); + xs->ealg = NULL; + } + + if (xs->aalg) { + kfree_sensitive(xs->aalg); + xs->aalg = NULL; + } + + kfree_sensitive(xs); +} + +void sxe2_ipsec_rx_state_free(struct sxe2_adapter *adapter, struct sxe2_rx_sa *rsa) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + if (!rsa || !rsa->used) { + LOG_ERROR_BDF("invalid rx sa selected\n"); + return; + } + + ret = sxe2_ipsec_rx_sa_del(adapter, rsa); + + if (ret) + LOG_ERROR_BDF("failed to delete rx_sa[%d], ret :%d\n", + rsa->hw_index, ret); + else + LOG_DEBUG_BDF("delete fw rx_sa[%d] ok.\n", rsa->hw_index); + + rsa->used = false; + + if (rsa->is_vf) + sxe2_ipsec_vf_xs_free(rsa->xs); + + synchronize_rcu(); + (void)memset(rsa, 0, sizeof(struct sxe2_rx_sa)); + + ipsec->rx_sa_cnt--; + + LOG_DEBUG_BDF("[count--]current rx sa cnt is %d\n", ipsec->rx_sa_cnt); +} + +s32 sxe2_fwc_ipsec_tx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_del_req *req) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_TXSA_DEL, req, + sizeof(*req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to delete tx sa with adminq, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_ipsec_tx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_tx_sa *tsa) +{ + s32 ret; + struct sxe2_fwc_ipsec_txsa_del_req req; + + req.sa_idx = cpu_to_le16((u16)tsa->hw_index); + req.func_type = tsa->is_vf ? SXE2_VF : SXE2_PF; + req.func_id = tsa->is_vf ? (u8)(adapter->vf_ctxt.vfid_base + tsa->vf_id) : adapter->pf_idx; + req.drv_id = 0; + + ret = sxe2_fwc_ipsec_tx_sa_del(adapter, &req); + if (ret) { + LOG_ERROR_BDF("failed to delete tx sa, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static struct sxe2_tx_sa *sxe2_ipsec_tx_sa_find(struct sxe2_adapter *adapter, + u32 index) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct sxe2_tx_sa *tsa; + + if (!ipsec->tx_sa_table) { + LOG_ERROR_BDF("tx sa table is NULL\n"); + goto out; + } + + tsa = &ipsec->tx_sa_table[index]; + if (tsa->used == false) { + LOG_ERROR_BDF("request sa is not used\n"); + goto out; + } + + return tsa; + +out: + return NULL; +} + +void sxe2_ipsec_tx_state_free(struct sxe2_adapter *adapter, struct sxe2_tx_sa *tsa) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + + if (!tsa || !tsa->used) { + LOG_ERROR_BDF("failed to find txsa, sa_idx=%d\n", tsa->hw_index); + return; + } + + ret = sxe2_ipsec_tx_sa_del(adapter, tsa); + if (ret) { + LOG_ERROR_BDF("failed to delete tx_sa[%d] from fw, ret=%d\n", + tsa->hw_index, ret); + } else { + LOG_DEBUG_BDF("delete tx_sa[%d] from fw ok.\n", tsa->hw_index); + } + tsa->hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + tsa->used = false; + + if (tsa->is_vf && tsa->xs) + sxe2_ipsec_vf_xs_free(tsa->xs); + + (void)memset(tsa, 0, sizeof(struct sxe2_tx_sa)); + ipsec->tx_sa_cnt--; + LOG_DEBUG_BDF("[count--]current tx sa cnt is %d\n", ipsec->tx_sa_cnt); +} + +static void sxe2_ipsec_state_delete(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_rx_sa *rx_sa = NULL; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + unsigned long flags; + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + LOG_DEBUG_BDF("delete rx sa.\n"); + rx_sa = sxe2_ipsec_rx_sa_find_rcu(adapter, + (u32)xs->xso.offload_handle, xs->id.spi); + if (!rx_sa) { + LOG_ERROR_BDF("failed to find rxsa ,spi = 0x%x,sa idx = 0x%lx\n", + xs->id.spi, xs->xso.offload_handle); + return; + } + + spin_lock_irqsave(&ipsec->rx_hlist_lock, flags); + hash_del_rcu(&rx_sa->hlist); + spin_unlock_irqrestore(&ipsec->rx_hlist_lock, flags); + } else { + LOG_DEBUG_BDF("delete tx sa.\n"); + } +} + +static void sxe2_ipsec_state_free(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_rx_sa *rx_sa = NULL; + struct sxe2_tx_sa *tx_sa = NULL; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (adapter->ipsec_ctxt.status == SXE2_IPSEC_RESETTING) + LOG_WARN_BDF("try to free sa when nic is resetting.\n"); + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + LOG_DEBUG_BDF("free rx sa.\n"); + rx_sa = sxe2_ipsec_rx_sa_find(adapter, (u32)xs->xso.offload_handle, xs->id.spi); + if (!rx_sa) { + LOG_ERROR_BDF("failed to find rxsa ,spi = 0x%x,sa idx = 0x%lx\n", + xs->id.spi, xs->xso.offload_handle); + goto out; + } + + sxe2_ipsec_rx_state_free(adapter, rx_sa); + } else { + LOG_DEBUG_BDF("free tx sa.\n"); + tx_sa = sxe2_ipsec_tx_sa_find(adapter, (u32)xs->xso.offload_handle); + if (!tx_sa) { + LOG_ERROR_BDF("failed to find txsa ,sa idx = 0x%lx\n", + xs->xso.offload_handle); + goto out; + } + + sxe2_ipsec_tx_state_free(adapter, tx_sa); + } + +out: + + mutex_unlock(&adapter->ipsec_ctxt.context_lock); +} + +static bool sxe2_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != 5) + return false; + + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + if (skb->len > SXE2_IPSEC_PAYLOAD_LIMIT) + return false; + + return true; +} + +const struct xfrmdev_ops sxe2_ipsec_ops = { + .xdo_dev_state_add = sxe2_ipsec_state_add, + .xdo_dev_state_delete = sxe2_ipsec_state_delete, + .xdo_dev_state_free = sxe2_ipsec_state_free, + .xdo_dev_offload_ok = sxe2_ipsec_offload_ok, +}; + +s32 sxe2_ipsec_fwc_get_ipsec_capa(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_capa_resq *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_GET_CAPA, NULL, 0, resp, + sizeof(*resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to get ipsec capacity with adminq, ret=%d\n", + ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_ipsec_get_ipsec_capa(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_fwc_ipsec_capa_resq resp; + + ret = sxe2_ipsec_fwc_get_ipsec_capa(adapter, &resp); + if (ret) { + LOG_ERROR_BDF("failed to init ipsec, ret=%d\n", ret); + ret = -EIO; + goto out; + } + + adapter->ipsec_ctxt.max_tx_sa_cnt = resp.tx_sa_cnt; + adapter->ipsec_ctxt.max_rx_sa_cnt = resp.rx_sa_cnt; +out: + return ret; +} + +s32 sxe2_ipsec_init(struct sxe2_adapter *adapter) +{ + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + u32 size, i; + + memset(ipsec, 0, sizeof(struct sxe2_ipsec_context)); + ipsec->status = SXE2_IPSEC_UNINITIALIZED; + mutex_init(&adapter->ipsec_ctxt.context_lock); + ret = sxe2_ipsec_get_ipsec_capa(adapter); + if (ret) { + LOG_ERROR_BDF("failed to get ipsec capability.\n"); + goto l_out; + } + + LOG_DEBUG_BDF("[init]current ipsec depth:[tx:%d rx:%d]\n", + ipsec->max_tx_sa_cnt, ipsec->max_rx_sa_cnt); + + if (ipsec->max_tx_sa_cnt == 0 && ipsec->max_rx_sa_cnt == 0) { + ret = 0; + goto l_disable; + } + + size = (u32)sizeof(struct sxe2_rx_sa) * ipsec->max_rx_sa_cnt; + ipsec->rx_sa_table = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_sa_table) { + LOG_ERROR_BDF("ipsec rx sa table mem:%uB alloc fail.\n", size); + ret = -ENOMEM; + goto l_out; + } + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) + ipsec->rx_sa_table[i].hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + + size = (u32)sizeof(struct sxe2_tx_sa) * ipsec->max_tx_sa_cnt; + ipsec->tx_sa_table = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_sa_table) { + LOG_ERROR_BDF("ipsec tx sa table mem:%uB alloc fail.\n", size); + ret = -ENOMEM; + goto l_free_rx_sa; + } + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) + ipsec->tx_sa_table[i].hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + + spin_lock_init(&adapter->ipsec_ctxt.rx_hlist_lock); + ipsec->rx_sa_cnt = 0; + ipsec->tx_sa_cnt = 0; + hash_init(ipsec->rx_sa_list); + + (void)memset(&ipsec->hw_stats, 0x0, sizeof(struct sxe2_ipsec_hw_stats)); + + netdev->xfrmdev_ops = &sxe2_ipsec_ops; + ipsec->status = SXE2_IPSEC_READY; + +l_disable: + return ret; + +l_free_rx_sa: + kfree(ipsec->rx_sa_table); + ipsec->rx_sa_table = NULL; + +l_out: + LOG_ERROR_BDF("failed to init ipsec\n"); + ipsec->status = SXE2_IPSEC_ERROR; + mutex_destroy(&adapter->ipsec_ctxt.context_lock); + + return ret; +} + +void sxe2_ipsec_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + ipsec->status = SXE2_IPSEC_UNINITIALIZED; + kfree(ipsec->rx_sa_table); + ipsec->rx_sa_table = NULL; + + kfree(ipsec->tx_sa_table); + ipsec->tx_sa_table = NULL; + + mutex_destroy(&ipsec->context_lock); +} + +s32 sxe2_ipsec_tx(struct sxe2_queue *txq, struct sxe2_tx_buf *first, + struct sxe2_tx_offload_info *offload) +{ + struct sxe2_netdev_priv *priv = netdev_priv(txq->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct xfrm_state *xs; + struct sec_path *sp; + struct sxe2_tx_sa *tsa; + + u32 index; + u64 qw1 = offload->ctxt_desc_qw1; + + if (skb_is_gso(first->skb)) { + LOG_ERROR_BDF("failed to offload ipsec,because tso is on.\n"); + goto out_failed; + } + + sp = skb_sec_path(first->skb); + if (unlikely(!sp->len)) { + LOG_ERROR_BDF("failed to get sec path length = %d\n", sp->len); + txq->stats->ipsec_stats.tx_error_invalid_sp++; + goto out_failed; + } + + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + LOG_ERROR_BDF("failed to get xfrm state,xs=%p\n", xs); + txq->stats->ipsec_stats.tx_error_invalid_state++; + goto out_failed; + } + + index = (u32)xs->xso.offload_handle; + tsa = sxe2_ipsec_tx_sa_find(adapter, index); + if (unlikely(!tsa || !tsa->used)) { + LOG_ERROR_BDF("unavailable sa_idx=%d\n", index); + txq->stats->ipsec_stats.tx_error_invalid_state++; + goto out_failed; + } + + qw1 |= (u64)SXE2_TX_DESC_DTYPE_CTXT; + qw1 |= 0x1 << SXE2_TXCD_QW1_IPSEC_EN_S; + + if (tsa->is_auth == false) + qw1 |= 0x1 << SXE2_TXCD_QW1_IPSEC_MODE_S; + + qw1 |= tsa->engine << SXE2_TXCD_QW1_IPSEC_ENGINE_MODE_S; + qw1 |= (tsa->hw_index << SXE2_TXCD_QW1_IPSEC_SA_IDX_S) & + SXE2_TXCD_QW1_IPSEC_SA_IDX_M; + + offload->ctxt_desc_qw1 = qw1; + offload->ctxt_desc_ipsec_offset = (u16)skb_transport_offset(first->skb); +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("tx ipsec offload succeed!qw1:0x%llx, " + "ipsecoffset:0x%x,sa_index:%d\n", + offload->ctxt_desc_qw1, offload->ctxt_desc_ipsec_offset, + tsa->hw_index); + } +#endif + + txq->stats->ipsec_stats.tx_offload_success++; + return 0; + +out_failed: + + return -1; +} + +static bool sxe2_update_ipsec_stats(struct sxe2_adapter *adapter, + union sxe2_rx_desc *desc, struct sxe2_queue *rxq) +{ + bool ret = false; + u8 ipsec_status = 0; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + ipsec_status = SXE2_RX_DESC_IPSEC_STATUS_GET(le32_to_cpu(desc->wb.status_lrocnt_fdpf_id)); + switch (ipsec_status) { + case SXE2_IPSEC_DECRYPT_INTIGRITY_SUCCESS: + ipsec->hw_stats.rx_decrypt_success++; + ret = false; + break; + case SXE2_IPSEC_PACKET_TOOLONG_OVER_2K: + ipsec->hw_stats.over_2k++; + ret = true; + break; + case SXE2_IPSEC_SPI_IP_NOT_MATCHED: + ipsec->hw_stats.spi_ip_not_matched++; + ret = true; + break; + case SXE2_IPSEC_IP_KEY_INVALID: + ipsec->hw_stats.ip_key_invalid++; + ret = true; + break; + case SXE2_IPSEC_PAYLOAD_ICV_NOTALIGNED: + ipsec->hw_stats.not_aligned++; + ret = true; + break; + case SXE2_IPSEC_ICV_CHECK_FAIL: + ipsec->hw_stats.icv_fail++; + ret = true; + break; + case SXE2_IPSEC_DISABLED_BYPASS: + ipsec->hw_stats.bypass_disable++; + ret = true; + break; + case SXE2_IPSEC_MACSEC_BYPASS: + ipsec->hw_stats.bypass_macsec++; + ret = true; + break; + } + + if (ret && ipsec_status != SXE2_IPSEC_DISABLED_BYPASS && + ipsec_status != SXE2_IPSEC_MACSEC_BYPASS) { + LOG_ERROR_BDF("ipsec offload failed,status is %d\n", ipsec_status); + rxq->stats->ipsec_stats.rx_error_decrypt_fail++; + } + + return ret; +} + +void sxe2_ipsec_rx(struct sxe2_queue *rxq, union sxe2_rx_desc *desc, + struct sk_buff *skb, u16 ptype) +{ + struct sxe2_netdev_priv *priv = netdev_priv(rxq->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct xfrm_state *xs = NULL; + struct xfrm_offload *xo = NULL; + struct sxe2_rx_sa *rsa; +#ifndef IPSEC_NEED_USE_SECPATH_DUP + struct sec_path *sp; +#endif + __be32 spi; + bool is_ipv4; + bool is_ipv6; + struct iphdr *ip4 = NULL; + struct ipv6hdr *ip6 = NULL; + u8 *c_hdr; + struct sxe2_rx_ptype_info ptype_info; + + if (sxe2_update_ipsec_stats(adapter, desc, rxq)) + return; + + ptype_info = sxe2_rx_ptype_parse(ptype); + + is_ipv4 = (ptype_info.outer_ip == SXE2_PTYPE_OUTER_IP && + ptype_info.outer_ip_ver == SXE2_PTYPE_OUTER_IPV4); + + is_ipv6 = (ptype_info.outer_ip == SXE2_PTYPE_OUTER_IP && + ptype_info.outer_ip_ver == SXE2_PTYPE_OUTER_IPV6); + + if (is_ipv4) { + ip4 = (struct iphdr *)(skb->data); + c_hdr = (u8 *)ip4 + (size_t)(ip4->ihl * 4); + } else if (is_ipv6) { + ip6 = (struct ipv6hdr *)(skb->data); + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + LOG_DEBUG_BDF("rx ipsec offload failed.ptype:%d!\n", ptype); + rxq->stats->ipsec_stats.rx_error_invalid_ptype++; + return; + } + + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + { + if (spi == rsa->xs->id.spi) { + if (is_ipv4) { + if (ip4->daddr == rsa->xs->id.daddr.a4) { + xs = rsa->xs; + xfrm_state_hold(xs); + break; + } + } else if (is_ipv6) { + if (!ipv6_addr_cmp(&rsa->xs->id.daddr.in6, + &ip6->daddr)) { + xs = rsa->xs; + xfrm_state_hold(xs); + break; + } + } + } + } + rcu_read_unlock(); + + if (unlikely(!xs)) { + LOG_ERROR_BDF("ipsec offload failed, xs is NULL!\n"); + rxq->stats->ipsec_stats.rx_error_invalid_state++; + return; + } + +#ifdef IPSEC_NEED_USE_SECPATH_DUP + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) { + LOG_ERROR_BDF("rx ipsec offload failed,sp is NULL!\n"); + return; + } + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; +#else + sp = secpath_set(skb); + if (unlikely(!sp)) { + LOG_ERROR_BDF("rx ipsec offload failed,sp is NULL!\n"); + rxq->stats->ipsec_stats.rx_error_invalid_sp++; + + return; + } + + sp->xvec[sp->len++] = xs; + sp->olen++; +#endif + + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + rxq->stats->ipsec_stats.rx_offload_success++; +} + +void sxe2_ipsec_stop(struct sxe2_adapter *adapter) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 i; + s32 ret; + struct sxe2_rx_sa *rsa; + struct sxe2_tx_sa *tsa; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (ipsec->status != SXE2_IPSEC_READY) + goto out_unlock; + + ipsec->status = SXE2_IPSEC_RESETTING; + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + tsa = &ipsec->tx_sa_table[i]; + if (!tsa) + continue; + + if (!tsa->used) + continue; + + if (tsa->is_vf) + continue; + + ret = sxe2_ipsec_tx_sa_del(adapter, tsa); + if (ret) + LOG_ERROR_BDF("failed to delete tx sa[%d],ret=%d\n", + tsa->hw_index, ret); + else + LOG_DEBUG_BDF("delete fw tx_sa[index:%d] ok.\n", + tsa->hw_index); + tsa->hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + rsa = &ipsec->rx_sa_table[i]; + if (!rsa) + continue; + + if (!rsa->used) + continue; + + if (rsa->is_vf) + continue; + + ret = sxe2_ipsec_rx_sa_del(adapter, rsa); + if (ret) + LOG_ERROR_BDF("failed to delete rx sa[%d],ret=%d\n", + rsa->hw_index, ret); + else + LOG_DEBUG_BDF("delete fw rx_sa[index:%d] ok.\n", i); + rsa->hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + } +out_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); +} + +void sxe2_ipsec_sa_clean(struct sxe2_adapter *adapter) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 i; + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) + ipsec->rx_sa_table[i].hw_index = SXE2_IPSEC_HW_INDEX_INVALID; + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) + ipsec->tx_sa_table[i].hw_index = SXE2_IPSEC_HW_INDEX_INVALID; +} + +s32 sxe2_ipsec_rebuild(struct sxe2_adapter *adapter) +{ + u32 i; + s32 ret; + struct sxe2_tx_sa *tsa; + struct sxe2_rx_sa *rsa; + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (ipsec->status != SXE2_IPSEC_RESETTING) { + LOG_DEBUG_BDF("ipsec rebuild after initial failed\n"); + goto out_unlock; + } + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + tsa = &ipsec->tx_sa_table[i]; + if (!tsa->used) + continue; + if (tsa->is_vf) + continue; + + ret = sxe2_ipsec_tx_sa_add(adapter, tsa); + if (ret) { + sxe2_ipsec_state_delete(tsa->xs); + tsa->used = false; + ipsec->tx_sa_cnt--; + LOG_DEV_ERR("failed to request Tx SA[spi:0x%x] in reset, " + "please del this sa ret: %d\n", + be32_to_cpu(tsa->spi), ret); + ret = -EIO; + } else { + LOG_DEBUG_BDF("add fw tx_sa[index:%d] ok.\n", i); + } + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + rsa = &ipsec->rx_sa_table[i]; + if (!rsa->used) + continue; + if (rsa->is_vf) + continue; + + ret = sxe2_ipsec_rx_sa_add(adapter, rsa); + if (ret) { + sxe2_ipsec_state_delete(rsa->xs); + rsa->used = false; + ipsec->rx_sa_cnt--; + LOG_DEV_ERR("failed to request Rx SA[spi:0x%x] in reset, " + "please del this sa, ret: %d\n", + be32_to_cpu(rsa->spi), ret); + ret = -EIO; + } else { + LOG_DEBUG_BDF("add fw rx_sa[index:%d] ok.\n", i); + } + } + + ipsec->status = SXE2_IPSEC_READY; + +out_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + + return 0; +} + +bool sxe2_is_ipsec_can_not_disable(struct sxe2_adapter *adapter) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + return (!!ipsec->rx_sa_cnt) || (!!ipsec->tx_sa_cnt); +} + +bool sxe2_ipsec_conflict_features_check(struct sxe2_adapter *adapter, + struct net_device *netdev) +{ + netdev_features_t tso_features = + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6; + + netdev_features_t features = netdev->features; + + if (features & NETIF_F_LRO) + return true; + + if (features & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)) + return true; + + if (features & (NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM)) { + return true; + } + + if (features & (tso_features)) + return true; + +#ifdef HAVE_MACSEC_SUPPORT + if (features & NETIF_F_HW_MACSEC) + return true; +#endif + + return false; +} + +s32 sxe2_ipsec_vf_sa_add(struct sxe2_adapter *adapter, u32 vf_id, + struct sxe2_vf_ipsec_sa_add_msg *req, u32 *hw_index, + bool is_restore) +{ + s32 ret = 0; + u32 enc_size, auth_size, trunc_size; + struct xfrm_state *xs = NULL; + xs = kzalloc(sizeof(*xs), GFP_KERNEL); + if (!xs) { + ret = -ENOMEM; + return ret; + } + + if (req->dir == SXE2_IPSEC_DIR_RX) { +#ifdef IPSEC_HAVE_XS_XSO_DIR + xs->xso.dir = XFRM_DEV_OFFLOAD_IN; +#else + xs->xso.flags |= XFRM_OFFLOAD_INBOUND; +#endif + } + + xs->id.spi = req->spi; + + if (req->mode & SXE2_MBX_IPSEC_IPV6) { + xs->props.family = AF_INET6; + (void)memcpy(&xs->id.daddr.a6, req->addr, sizeof(xs->id.daddr.a6)); + } else { + xs->props.family = AF_INET; + (void)memcpy(&xs->id.daddr.a4, req->addr, sizeof(xs->id.daddr.a4)); + } + +#ifdef IPSEC_HAVE_REAL_DEV + xs->xso.dev = adapter->vsi_ctxt.main_vsi->netdev; + xs->xso.real_dev = adapter->vsi_ctxt.main_vsi->netdev; +#else + xs->xso.dev = adapter->vsi_ctxt.main_vsi->netdev; +#endif + + if (req->mode & SXE2_MBX_IPSEC_SM4) { + xs->props.ealgo = SADB_X_EALG_SM4CBC; + enc_size = SXE2_IPSEC_SM4_KEY_BITS; + auth_size = SXE2_IPSEC_SM3_AUTH_BITS; + trunc_size = SXE2_IPSEC_SM3_AUTH_BITS; + } else { + xs->props.ealgo = SADB_X_EALG_AESCBC; + enc_size = SXE2_IPSEC_AES_KEY_BITS; + auth_size = SXE2_IPSEC_SHA256_AUTH_BITS; + trunc_size = SXE2_IPSEC_SHA256_TRUNC_BITS; + } + + xs->ealg = kzalloc(sizeof(*xs->ealg) + enc_size / BITS_PER_BYTE, GFP_KERNEL); + + if (!xs->ealg) { + ret = -ENOMEM; + goto err; + } + + xs->ealg->alg_key_len = enc_size; + (void)memcpy(xs->ealg->alg_key, req->enc_key, enc_size / BITS_PER_BYTE); + + if (req->mode & SXE2_MBX_IPSEC_AUTH) { + xs->aalg = kzalloc(sizeof(*xs->aalg) + auth_size / BITS_PER_BYTE, + GFP_KERNEL); + if (!xs->aalg) { + ret = -ENOMEM; + goto err; + } + + xs->aalg->alg_key_len = auth_size; + (void)memcpy(xs->aalg->alg_key, req->auth_key, + auth_size / BITS_PER_BYTE); + xs->aalg->alg_trunc_len = trunc_size; + } + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + + if (req->dir == SXE2_IPSEC_DIR_RX) { + ret = sxe2_ipsec_rx_state_add(adapter, xs, true, vf_id, hw_index); + if (ret) + goto err_unlock; + } else { + ret = sxe2_ipsec_tx_state_add(adapter, xs, true, vf_id, hw_index); + if (ret) + goto err_unlock; + } + + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + + return ret; + +err_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + +err: + sxe2_ipsec_vf_xs_free(xs); + + return ret; +} + +s32 sxe2_ipsec_vf_sa_free(struct sxe2_adapter *adapter, u32 vf_id, + struct sxe2_vf_ipsec_sa_del_msg *req) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret = 0; + u32 i = 0; + u32 hw_index = req->sa_idx; + struct sxe2_tx_sa *tsa = NULL; + struct sxe2_rx_sa *rsa = NULL; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (req->dir == SXE2_IPSEC_DIR_RX) { + if (hw_index >= ipsec->max_rx_sa_cnt) { + ret = -EINVAL; + goto out_unlock; + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + if (ipsec->rx_sa_table[i].used && + ipsec->rx_sa_table[i].is_vf && + ipsec->rx_sa_table[i].vf_id == vf_id && + ipsec->rx_sa_table[i].hw_index == hw_index) { + rsa = &ipsec->rx_sa_table[i]; + break; + } + } + + if (!rsa) { + LOG_ERROR_BDF("failed to delete vf sa ,sa index:%d, vf id:%d\n", + hw_index, vf_id); + ret = -ENOENT; + goto out_unlock; + } + + sxe2_ipsec_rx_state_free(adapter, rsa); + } else { + if (hw_index >= ipsec->max_tx_sa_cnt) { + ret = -EINVAL; + goto out_unlock; + } + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + if (ipsec->tx_sa_table[i].used && + ipsec->tx_sa_table[i].is_vf && + ipsec->tx_sa_table[i].vf_id == vf_id && + ipsec->tx_sa_table[i].hw_index == hw_index) { + tsa = &ipsec->tx_sa_table[i]; + break; + } + } + + if (!tsa) { + ret = -ENOENT; + goto out_unlock; + } + + sxe2_ipsec_tx_state_free(adapter, tsa); + } + +out_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + + return ret; +} + +static s32 sxe2_fwc_ipsec_drv_clear(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_drv_clr_req *req) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_IPSEC_DRV_CLEAR, req, + sizeof(*req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to clear driver sa, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_dpdk_ipsec_resource_release(struct sxe2_adapter *adapter, + struct sxe2_obj *obj) +{ + struct sxe2_fwc_ipsec_drv_clr_req req; + + req.func_type = obj->func_type; + req.func_id = obj->func_type == SXE2_PF ? obj->pf_id : obj->vf_id; + req.drv_id = (u8)((obj->drv_type << 6) | obj->drv_id); + return sxe2_fwc_ipsec_drv_clear(adapter, &req); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..16a8d168a579239d31485114591882da73a1b321 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ipsec.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ipsec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_IPSEC_H__ +#define __SXE2_IPSEC_H__ + +#include +#ifdef NEED_COMPAT_DIM +#include "sxe2_compat_dim.h" +#else +#include +#endif +#include +#include +#include +#include "sxe2_cmd.h" + +#define SXE2_IPSEC_KEY_LENGTH (32) + +#define SXE2_IPSEC_AESKEY_LENTH (32) +#define SXE2_IPSEC_AESAUTH_LENTH (32) +#define SXE2_IPSEC_SMKEY_LENGTH (16) +#define SXE2_IPSEC_SMAUTH_LENTH (32) + +#define SXE2_IPSEC_PAYLOAD_LIMIT (2048) + +#define SXE2_IPV6_ADDR_LEN (4) + +#define SXE2_IPSEC_HW_INDEX_INVALID (0xFFFFFFFF) + +enum sxe2_ptype_outer_protocol; +struct sxe2_vt_ipsec_msg; +struct sxe2_adapter; +union sxe2_rx_desc; +struct sxe2_tx_offload_info; + +enum sxe2_ipsec_status { + SXE2_IPSEC_UNINITIALIZED, + SXE2_IPSEC_READY, + SXE2_IPSEC_RESETTING, + SXE2_IPSEC_ERROR, +}; + +struct sxe2_rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + __be32 spi; + u16 index; + u32 ipaddr[SXE2_IPV6_ADDR_LEN]; + u8 auth_key[SXE2_IPSEC_KEY_LENGTH]; + u8 enc_key[SXE2_IPSEC_KEY_LENGTH]; + bool ipv6; + bool is_auth; + bool engine; + bool used; + u32 hw_index; + u32 ip_id; + bool is_vf; + u32 vf_id; +}; + +struct sxe2_tx_sa { + struct xfrm_state *xs; + u16 index; + u8 auth_key[SXE2_IPSEC_KEY_LENGTH]; + u8 enc_key[SXE2_IPSEC_KEY_LENGTH]; + bool is_auth; + bool engine; + bool used; + u32 hw_index; + bool is_vf; + u32 vf_id; + __be32 spi; +}; + +struct sxe2_ipsec_hw_stats { + u64 rx_decrypt_success; + u64 over_2k; + u64 spi_ip_not_matched; + u64 ip_key_invalid; + u64 not_aligned; + u64 icv_fail; + u64 bypass_disable; + u64 bypass_macsec; +}; + +struct sxe2_ipsec_sw_stats { + u64 tx_error_invalid_sp; + u64 tx_error_invalid_state; + u64 tx_offload_success; + + u64 rx_error_invalid_sp; + u64 rx_error_invalid_state; + u64 rx_error_invalid_ptype; + u64 rx_error_decrypt_fail; + u64 rx_offload_success; +}; + +struct sxe2_ipsec_context { + u32 max_tx_sa_cnt; + u32 max_rx_sa_cnt; + struct sxe2_tx_sa *tx_sa_table; + struct sxe2_rx_sa *rx_sa_table; + u32 rx_sa_cnt; + u32 tx_sa_cnt; + enum sxe2_ipsec_status status; + struct sxe2_ipsec_hw_stats hw_stats; + DECLARE_HASHTABLE(rx_sa_list, 12); + struct mutex context_lock; + spinlock_t rx_hlist_lock; +}; + +s32 sxe2_fwc_ipsec_init(struct sxe2_adapter *adapter); + +s32 sxe2_ipsec_init(struct sxe2_adapter *adapter); + +void sxe2_ipsec_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_ipsec_tx(struct sxe2_queue *txq, struct sxe2_tx_buf *first, + struct sxe2_tx_offload_info *offload); +void sxe2_ipsec_rx(struct sxe2_queue *rxq, union sxe2_rx_desc *desc, + struct sk_buff *skb, u16 ptype); + +void sxe2_ipsec_stop(struct sxe2_adapter *adapter); + +void sxe2_ipsec_sa_clean(struct sxe2_adapter *adapter); + +s32 sxe2_ipsec_rebuild(struct sxe2_adapter *adapter); + +bool sxe2_is_ipsec_can_not_disable(struct sxe2_adapter *adapter); + +bool sxe2_ipsec_conflict_features_check(struct sxe2_adapter *adapter, + struct net_device *netdev); + +bool sxe2_is_ipsec_offload_enable(struct net_device *netdev); + +s32 sxe2_ipsec_vf_sa_add(struct sxe2_adapter *adapter, u32 vf_id, + struct sxe2_vf_ipsec_sa_add_msg *req, u32 *hw_index, + bool is_restore); + +s32 sxe2_ipsec_vf_sa_free(struct sxe2_adapter *adapter, u32 vf_id, + struct sxe2_vf_ipsec_sa_del_msg *req); + +void sxe2_ipsec_vf_sa_clear(struct sxe2_adapter *adapter, u32 vf_id); + +void sxe2_ipsec_rx_state_free(struct sxe2_adapter *adapter, + struct sxe2_rx_sa *rsa); + +void sxe2_ipsec_tx_state_free(struct sxe2_adapter *adapter, + struct sxe2_tx_sa *tsa); + +s32 sxe2_ipsec_fwc_get_ipsec_capa(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_capa_resq *resp); + +s32 sxe2_fwc_ipsec_tx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_add_req *req, + struct sxe2_fwc_ipsec_txsa_add_resp *resp); + +s32 sxe2_fwc_ipsec_tx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_del_req *req); + +s32 sxe2_fwc_ipsec_tx_sa_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_txsa_set_req *req, + struct sxe2_fwc_ipsec_txsa_add_resp *resp); + +s32 sxe2_fwc_ipsec_rx_sa_add(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_add_req *req, + struct sxe2_fwc_ipsec_rxsa_add_resp *resp); + +s32 sxe2_fwc_ipsec_rx_sa_del(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_del_req *req); + +s32 sxe2_fwc_ipsec_rx_sa_set(struct sxe2_adapter *adapter, + struct sxe2_fwc_ipsec_rxsa_set_req *req, + struct sxe2_fwc_ipsec_rxsa_add_resp *resp); + +s32 sxe2_dpdk_ipsec_resource_release(struct sxe2_adapter *adapter, + struct sxe2_obj *obj); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..3cf048088e8f0f909aa9778b53cda9575c75b4e2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.c @@ -0,0 +1,947 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_irq.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2_monitor.h" +#include "sxe2_rx.h" +#include "sxe2_tx.h" +#include "sxe2_xsk.h" +#include "sxe2_irq.h" + +#define SXE2_MSIX_SCHEME_CNT 9 +#define SXE2_DIM_DFLT_PROFILE_IDX 1 + +#define SXE2_MSIX_SCHEME_SAFE_MOD_INDEX 0 +#define SXE2_MSIX_SCHEME_NORMAL_MOD_INDEX 1 + +#define SXE2_MSIX_CNT_WITH_2_PORT 1024 +#define SXE2_ADQ_MAX_MSIXS 256 + +#define SXE2_DYNAMIC_STATE_ITR 4 + +static inline bool sxe2_judge_over_size(struct sxe2_adapter *adapter, u32 scheme_cnt) +{ + if (scheme_cnt >= SXE2_MSIX_SCHEME_CNT) { + LOG_INFO_BDF("msix scheme count(%d) init over max size(%d)\n", + scheme_cnt, SXE2_MSIX_SCHEME_CNT); + return true; + } + return false; +} + +static const u16 tx_itr_profile[] = { + 2, + 8, + 40, + 128, + 256 +}; + +static const u16 rx_itr_profile[] = { + 2, + 8, + 16, + 32, + 64 +}; + +irqreturn_t sxe2_msix_ring_irq_handler(int __always_unused irq, void *data) +{ + struct sxe2_irq_data *irq_data = (struct sxe2_irq_data *)data; + + if (!SXE2_IRQ_HAS_TXQ(irq_data) && !SXE2_IRQ_HAS_RXQ(irq_data)) + goto l_end; + + irq_data->event_ctr++; + + napi_schedule(&irq_data->napi); +l_end: + return IRQ_HANDLED; +} + +irqreturn_t sxe2_msix_lb_rx_irq_handler(int __always_unused irq, void *data) +{ + return IRQ_HANDLED; +} + +irqreturn_t sxe2_msix_ctrl_vsi_handler(int __always_unused irq, void *data) +{ + struct sxe2_irq_data *irq_data = (struct sxe2_irq_data *)data; + +#define FNAV_RX_DESC_CLEAN_BUDGET 64 + if (SXE2_IRQ_HAS_RXQ(irq_data)) + (void)sxe2_rxq_irq_clean(irq_data->rx.list.next, + FNAV_RX_DESC_CLEAN_BUDGET); + + if (SXE2_IRQ_HAS_TXQ(irq_data)) + sxe2_ctrl_txq_irq_clean(irq_data->tx.list.next); + + return IRQ_HANDLED; +} + +STATIC s32 sxe2_msix_entries_alloc(struct sxe2_adapter *adapter, u16 msix_cnt) +{ + s32 ret = 0; + u16 i; + + adapter->irq_ctxt.msix_entries = + devm_kcalloc(&adapter->pdev->dev, msix_cnt, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->irq_ctxt.msix_entries) { + ret = -ENOMEM; + LOG_DEV_ERR("msi-x irq entry num:%u per size:%lu kcalloc failed, ret=%d\n", + msix_cnt, sizeof(struct msix_entry), ret); + goto l_end; + } + + for (i = 0; i < msix_cnt; i++) + adapter->irq_ctxt.msix_entries[i].entry = i; + +l_end: + return ret; +} + +static void sxe2_msix_entries_free(struct sxe2_adapter *adapter) +{ + if (!adapter->irq_ctxt.msix_entries) + return; + devm_kfree(&adapter->pdev->dev, adapter->irq_ctxt.msix_entries); + adapter->irq_ctxt.msix_entries = NULL; +} + +STATIC s32 sxe2_msix_enable(struct sxe2_adapter *adapter, u16 min_msix, u16 msix_cnt) +{ + s32 ret; + + ret = sxe2_msix_entries_alloc(adapter, msix_cnt); + if (ret) + goto l_end; + + ret = pci_enable_msix_range(adapter->pdev, adapter->irq_ctxt.msix_entries, + min_msix, msix_cnt); + if (ret < 0) { + LOG_ERROR_BDF("enable msix range[%d-%d] failed, ret=%d\n", min_msix, + msix_cnt, ret); + goto l_ena_failed; + } + return ret; + +l_ena_failed: + sxe2_msix_entries_free(adapter); +l_end: + return ret; +} + +STATIC void sxe2_msix_deinit(struct sxe2_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + sxe2_msix_entries_free(adapter); +} + +STATIC void sxe2_safe_mode_irq_num_init(struct sxe2_adapter *adapter) +{ + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + + irq_layout->event = SXE2_EVENT_MSIX_CNT; + irq_layout->fnav = 0; + irq_layout->eswitch = 0; + irq_layout->dpdk_eswitch = 0; + irq_layout->lan = SXE2_LAN_MSIX_MIN_CNT; + irq_layout->dpdk = 0; + irq_layout->rdma = 0; + irq_layout->macvlan = 0; + irq_layout->sriov = 0; +} + +STATIC void sxe2_safe_mode_irq_layout_init(struct sxe2_adapter *adapter) +{ + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u16 max_msix = adapter->irq_ctxt.max_cnt; + + irq_layout->event_offset = SXE2_EVENT_IRQ_IDX; + irq_layout->fnav_offset = irq_layout->event_offset + irq_layout->event; + irq_layout->eswitch_offset = irq_layout->fnav_offset + irq_layout->fnav; + irq_layout->lan_offset = irq_layout->eswitch_offset + irq_layout->eswitch; + irq_layout->rdma_offset = irq_layout->lan_offset + irq_layout->lan; + irq_layout->macvlan_offset = irq_layout->rdma_offset + irq_layout->rdma; + irq_layout->sriov_offset = max_msix; +} + +STATIC void sxe2_irq_num_init(struct sxe2_adapter *adapter) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 local_cpu_cnt = sxe2_local_cpus_cnt_get(dev); + u32 standard_cpu_cnt = sxe2_standardize_cpu_cnt(local_cpu_cnt); + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u32 mode = (u32)sxe2_com_mode_get(adapter); + + irq_layout->event = SXE2_EVENT_MSIX_CNT; + irq_layout->lb = SXE2_LB_RXQ_MSIX_CNT; + irq_layout->fnav = SXE2_FNAV_MSIX_CNT; + irq_layout->eswitch = SXE2_ESWITCH_MSIX_CNT; + + if (mode == SXE2_COM_MODULE_KERNEL) { + irq_layout->fnav = SXE2_FNAV_MSIX_CNT; + irq_layout->lan = (u16)max_t(int, standard_cpu_cnt, + SXE2_LAN_MSIX_MIN_CNT); + irq_layout->rdma = (u16)(standard_cpu_cnt > SXE2_RDMA_MSIX_MIN_CNT ? + standard_cpu_cnt + SXE2_RDMA_AEQ_MSIX_CNT : + SXE2_RDMA_MSIX_MIN_CNT); + } else if (mode == SXE2_COM_MODULE_DPDK) { + irq_layout->dpdk = SXE2_DPDK_MSIX_MAX_CNT; + irq_layout->dpdk_eswitch = SXE2_DPDK_ESWITCH_MSIX_CNT; + } else { + irq_layout->fnav = SXE2_FNAV_MSIX_CNT; + irq_layout->lan = (u16)max_t(int, standard_cpu_cnt, + SXE2_LAN_MSIX_MIN_CNT); + irq_layout->rdma = (u16)(standard_cpu_cnt > SXE2_RDMA_MSIX_MIN_CNT ? + standard_cpu_cnt + SXE2_RDMA_AEQ_MSIX_CNT : + SXE2_RDMA_MSIX_MIN_CNT); + irq_layout->dpdk = SXE2_DPDK_MSIX_DFLT_CNT; + irq_layout->dpdk_eswitch = SXE2_DPDK_ESWITCH_MSIX_CNT; + } + + irq_layout->macvlan = 0; + irq_layout->sriov = 0; +} + +STATIC void sxe2_irq_layout_init(struct sxe2_adapter *adapter) +{ + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u16 max_msix = adapter->irq_ctxt.max_cnt; + + irq_layout->event_offset = SXE2_EVENT_IRQ_IDX; + irq_layout->lb_offset = irq_layout->event_offset + irq_layout->event; + irq_layout->fnav_offset = irq_layout->lb_offset + irq_layout->lb; + irq_layout->eswitch_offset = irq_layout->fnav_offset + irq_layout->fnav; + + irq_layout->lan_offset = irq_layout->eswitch_offset + irq_layout->eswitch; + irq_layout->rdma_offset = irq_layout->lan_offset + irq_layout->lan; + irq_layout->dpdk_offset = irq_layout->rdma_offset + irq_layout->rdma; + irq_layout->dpdk_eswitch_offset = irq_layout->dpdk_offset + irq_layout->dpdk; + + irq_layout->macvlan_offset = + irq_layout->dpdk_eswitch_offset + irq_layout->dpdk_eswitch; + irq_layout->sriov_offset = max_msix; +} + +STATIC void sxe2_msix_irq_num_init(struct sxe2_adapter *adapter) +{ + if (sxe2_is_safe_mode(adapter)) + sxe2_safe_mode_irq_num_init(adapter); + else + sxe2_irq_num_init(adapter); +} + +STATIC void sxe2_msix_irq_layout_init(struct sxe2_adapter *adapter) +{ + if (sxe2_is_safe_mode(adapter)) + sxe2_safe_mode_irq_layout_init(adapter); + else + sxe2_irq_layout_init(adapter); +} + +STATIC u16 sxe2_msix_num_calc(struct sxe2_adapter *adapter) +{ + u16 irq_cnt = 0; + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + + irq_cnt += irq_layout->lan + irq_layout->event + irq_layout->rdma + + irq_layout->eswitch + irq_layout->fnav + irq_layout->lb + + irq_layout->dpdk + irq_layout->dpdk_eswitch; + + return irq_cnt; +} + +STATIC void sxe2_msix_adjust(struct sxe2_adapter *adapter, u16 max_msix) +{ + u16 left_cnt; + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u32 mode = (u32)sxe2_com_mode_get(adapter); + + s32 expected_cnt = sxe2_msix_num_calc(adapter); + + if (sxe2_is_safe_mode(adapter)) + goto l_end; + + left_cnt = (u16)(max_msix - SXE2_EVENT_MSIX_CNT); + if (expected_cnt > max_msix) { + if (mode == SXE2_COM_MODULE_KERNEL) { + left_cnt = (u16)(left_cnt - SXE2_FNAV_MSIX_CNT - + SXE2_ESWITCH_MSIX_CNT); + + irq_layout->lan = left_cnt / 2; + irq_layout->rdma = (u16)(left_cnt - irq_layout->lan); + } else if (mode == SXE2_COM_MODULE_DPDK) { + irq_layout->dpdk = + (u16)(left_cnt - (u16)SXE2_ESWITCH_MSIX_CNT - + (u16)SXE2_DPDK_ESWITCH_MSIX_CNT); + } else { + left_cnt = (u16)(left_cnt - SXE2_FNAV_MSIX_CNT - + SXE2_DPDK_MSIX_MIN_CNT - + SXE2_DPDK_ESWITCH_MSIX_CNT - + SXE2_ESWITCH_MSIX_CNT); + irq_layout->lan = left_cnt / 2; + irq_layout->rdma = (u16)(left_cnt - irq_layout->lan); + irq_layout->dpdk = SXE2_DPDK_MSIX_MIN_CNT; + } + } + +l_end: + return; +} + +STATIC s32 sxe2_msix_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + s32 expected_cnt; + s32 applied_cnt = 0; + u16 min_msix; + u16 max_msix = adapter->irq_ctxt.max_cnt; + + sxe2_msix_irq_num_init(adapter); + + min_msix = sxe2_min_msix_num_calc(adapter); + + expected_cnt = sxe2_msix_num_calc(adapter) + SXE2_MAX_NUM_VMDQ_VSI; + + applied_cnt = min_t(s32, expected_cnt, (s32)max_msix); + + applied_cnt = sxe2_msix_enable(adapter, min_msix, (u16)applied_cnt); + if (applied_cnt < 0) { + ret = applied_cnt; + goto l_end; + } + + if (applied_cnt < (expected_cnt - SXE2_MAX_NUM_VMDQ_VSI)) { + clear_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags); + adapter->irq_ctxt.fixed_cnt = (u16)applied_cnt; + sxe2_msix_adjust(adapter, (u16)applied_cnt); + } else { + adapter->irq_ctxt.fixed_cnt = sxe2_msix_num_calc(adapter); + } + + adapter->irq_ctxt.avail_cnt = (u16)expected_cnt; + + sxe2_msix_irq_layout_init(adapter); + +l_end: + return ret; +} + +s32 sxe2_irq_init(struct sxe2_adapter *adapter) +{ + s32 ret; + + ret = sxe2_msix_init(adapter); + if (ret) + goto l_end; + + ret = sxe2_event_irq_request(adapter); + if (ret) + goto l_failed; + + return ret; + +l_failed: + sxe2_msix_deinit(adapter); +l_end: + return ret; +} + +STATIC s32 sxe2_msix_resume(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + s32 applied_cnt = 0; + u16 min_msix; + + min_msix = sxe2_min_msix_num_calc(adapter); + + applied_cnt = sxe2_msix_enable(adapter, min_msix, + adapter->irq_ctxt.avail_cnt); + if (applied_cnt < + adapter->irq_ctxt.avail_cnt) { + ret = -ENOMEM; + goto l_end; + } + +l_end: + return ret; +} + +s32 sxe2_irq_resume(struct sxe2_adapter *adapter) +{ + s32 ret; + + ret = sxe2_msix_resume(adapter); + if (ret != 0) + goto l_failed; + + ret = sxe2_event_irq_request(adapter); + if (ret != 0) + goto l_failed; + + return ret; + +l_failed: + sxe2_msix_deinit(adapter); + return ret; +} + +void sxe2_irq_deinit(struct sxe2_adapter *adapter) +{ + sxe2_event_irq_free(adapter); + sxe2_msix_deinit(adapter); +} + +#define EVENT_INDEX0 (0) +#define EVENT_INDEX1 (1) +#define EVENT_INDEX2 (2) +STATIC void sxe2_event_irq_common_handler(struct sxe2_adapter *adapter, u32 oicr) +{ + u32 fw_event; + struct sxe2_hw *hw = &adapter->hw; + s32 gltsyn_stat; + + if (oicr & SXE2_PF_INT_OICR_FW) { + fw_event = sxe2_hw_fw_irq_cause_get(hw); + if (fw_event & SXE2_PF_INT_RDMA_AEQ_OVERFLOW) { + LOG_ERROR_BDF("recv fw rdma aeq overflow irq cause.\n"); + set_bit(SXE2_FLAG_RDMA_AEQ_OVERFLOW, adapter->flags); + sxe2_monitor_work_schedule(adapter); + } + + if (fw_event & SXE2_PF_INT_CGMAC_LINK_CHG) { + LOG_INFO_BDF("lsc irq occur.\n"); + set_bit(SXE2_FLAG_LINK_CHECK, adapter->flags); + sxe2_monitor_work_schedule(adapter); + } + + if (fw_event & SXE2_PF_INT_VFLR_DONE) { + LOG_INFO_BDF("vflr irq occur.\n"); + set_bit(SXE2_FLAG_VFLR_PENDING, adapter->flags); + sxe2_dev_ctrl_work_schedule(adapter); + } + } + + if (oicr & SXE2_PF_INT_OICR_FWQ_INT || oicr & SXE2_PF_INT_OICR_MBXQ_INT) + sxe2_rq_recv_work_schedule(adapter); + + if (oicr & SXE2_PF_INT_OICR_ECC_ERR) + adapter->dev_ctrl_ctxt.print_flag |= SXE2_PRINT_ECC_ERROR; + + if ((oicr & SXE2_PF_INT_OICR_INT_CFG_ADDR_ERR) || + (oicr & SXE2_PF_INT_OICR_INT_CFG_DATA_ERR) || + (oicr & SXE2_PF_INT_OICR_INT_CFG_ADR_UNRANGE)) { + adapter->dev_ctrl_ctxt.print_flag |= SXE2_PRINT_REG_CFG_ERR; + } + + if (oicr & SXE2_PF_INT_OICR_INT_RAM_CONFLICT) + adapter->dev_ctrl_ctxt.print_flag |= SXE2_PRINT_RAM_CONFLICT; + + if (oicr & SXE2_PF_INT_OICR_TSYN_TX) + sxe2_ptp_txts_process(adapter); + + if (oicr & SXE2_PF_INT_OICR_TSYN_EVENT) { + gltsyn_stat = sxe2_hw_ptp_stat_get(&adapter->hw); + if (gltsyn_stat & GLTSYN_STAT_EVENT0_M) + set_bit(EVENT_INDEX0, adapter->ptp_ctxt.extts.irq); + + if (gltsyn_stat & GLTSYN_STAT_EVENT1_M) + set_bit(EVENT_INDEX1, adapter->ptp_ctxt.extts.irq); + + if (gltsyn_stat & GLTSYN_STAT_EVENT2_M) + set_bit(EVENT_INDEX2, adapter->ptp_ctxt.extts.irq); + + sxe2_ptp_extts_intr(adapter); + } + + if (oicr & SXE2_PF_INT_OICR_LAN_TX_ERR) + set_bit(SXE2_FLAG_MDD_TX_PENDING, adapter->flags); + + if (oicr & SXE2_PF_INT_OICR_LAN_RX_ERR) + set_bit(SXE2_FLAG_MDD_RX_PENDING, adapter->flags); + + if (oicr & SXE2_PF_INT_OICR_LAN_TX_ERR || oicr & SXE2_PF_INT_OICR_LAN_RX_ERR) + sxe2_monitor_work_schedule(adapter); +} + +STATIC irqreturn_t sxe2_msix_event_irq_handler(int irq, void *data) +{ + struct sxe2_adapter *adapter = data; + struct sxe2_hw *hw = &adapter->hw; + u32 oicr, ena_mask; +#ifndef SXE2_HARDWARE_SIM + + if (unlikely(sxe2_corer_check(adapter))) { + sxe2_dev_ctrl_work_schedule(adapter); + goto l_end; + } +#endif + oicr = sxe2_hw_evt_irq_cause_get(hw); + ena_mask = sxe2_hw_evt_irq_mask_get(hw); + + ena_mask |= SXE2_PF_INT_OICR_FWQ_INT | SXE2_PF_INT_OICR_MBXQ_INT; + + ena_mask |= SXE2_PF_INT_OICR_INT_CFG_ADDR_ERR | + SXE2_PF_INT_OICR_INT_CFG_DATA_ERR | + SXE2_PF_INT_OICR_INT_CFG_ADR_UNRANGE | SXE2_PF_INT_OICR_ECC_ERR | + SXE2_PF_INT_OICR_INT_RAM_CONFLICT; + + LOG_DEBUG_BDF("event irq oicr:0x%x, ena_mask: 0x%x.\n", oicr, ena_mask); + oicr &= ena_mask; + adapter->irq_ctxt.event_irq_cnt++; + + sxe2_event_irq_common_handler(adapter, oicr); +#ifndef SXE2_HARDWARE_SIM +l_end: +#endif + sxe2_hw_irq_enable(hw, SXE2_EVENT_IRQ_IDX); + return IRQ_HANDLED; +} + +void sxe2_event_irq_enable(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + u32 value = 0; + u16 itr_idx = SXE2_ITR_IDX_0; + + value = SXE2_PF_INT_OICR_VFLR | SXE2_PF_INT_OICR_SWINT | + SXE2_PF_INT_OICR_FW | SXE2_PF_INT_OICR_LAN_TX_ERR | + SXE2_PF_INT_OICR_LAN_RX_ERR | SXE2_PF_INT_OICR_GRST; + + sxe2_hw_evt_irq_cfg(hw, value, itr_idx, SXE2_EVENT_IRQ_IDX); + sxe2_hw_fwq_irq_cfg(hw, itr_idx, SXE2_EVENT_IRQ_IDX); + sxe2_hw_mbxq_irq_cfg(hw, itr_idx, SXE2_EVENT_IRQ_IDX); + (void)sxe2_flush(hw); + + sxe2_hw_irq_itr_set(hw, SXE2_EVENT_IRQ_IDX, itr_idx, SXE2_ITR_20K); + (void)sxe2_flush(hw); + + sxe2_hw_irq_enable(hw, SXE2_EVENT_IRQ_IDX); + + adapter->cmd_channel_ctxt.mode = SXE2_CMD_NOTIFY; +} + +void sxe2_event_irq_disable(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + struct msix_entry *msix_entries = NULL; + + adapter->cmd_channel_ctxt.mode = SXE2_CMD_POLLING; + + msix_entries = adapter->irq_ctxt.msix_entries; + sxe2_hw_fwq_irq_clear(hw); + sxe2_hw_mbxq_irq_clear(hw); + + sxe2_hw_evt_irq_clear(hw); + + if (msix_entries) + synchronize_irq(msix_entries[SXE2_EVENT_IRQ_IDX].vector); + + sxe2_hw_irq_disable(hw, SXE2_EVENT_IRQ_IDX); + sxe2_hw_irq_itr_set(hw, SXE2_EVENT_IRQ_IDX, SXE2_ITR_IDX_0, 0); + + (void)sxe2_flush(hw); +} + +s32 sxe2_event_irq_request(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct msix_entry *msix_entries = adapter->irq_ctxt.msix_entries; + + (void)snprintf(adapter->irq_ctxt.event_int_name, + sizeof(adapter->irq_ctxt.event_int_name) - 1, "%s-%s:event", + dev_driver_string(dev), dev_name(dev)); + ret = devm_request_irq(dev, msix_entries[SXE2_EVENT_IRQ_IDX].vector, + sxe2_msix_event_irq_handler, 0, + adapter->irq_ctxt.event_int_name, adapter); + if (ret) { + LOG_DEV_ERR("devm_request_irq for %s failed, ret=%d\n", + adapter->irq_ctxt.event_int_name, ret); + goto l_end; + } + + sxe2_event_irq_enable(adapter); + +l_end: + return ret; +} + +void sxe2_event_irq_free(struct sxe2_adapter *adapter) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct msix_entry *msix_entries = adapter->irq_ctxt.msix_entries; + + sxe2_event_irq_disable(adapter); + + if (msix_entries) + devm_free_irq(dev, msix_entries[SXE2_EVENT_IRQ_IDX].vector, adapter); + + memset(adapter->irq_ctxt.event_int_name, 0, + sizeof(adapter->irq_ctxt.event_int_name)); +} + +s32 sxe2_irq_offset_get(struct sxe2_adapter *adapter, u16 cnt, u8 vsi_type) +{ + + s32 ret = 0; + s32 offset; + unsigned long *map = adapter->irq_ctxt.map; + u16 size = adapter->irq_ctxt.avail_cnt; + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u16 start_idx = irq_layout->macvlan_offset; + + if (cnt > size) { + ret = -ENOMEM; + LOG_ERROR_BDF("get %d irq from %u failed, ret=%d\n", cnt, size, ret); + goto l_end; + } + + switch (vsi_type) { + case SXE2_VSI_T_PF: + offset = irq_layout->lan_offset; + break; + case SXE2_VSI_T_LB: + offset = irq_layout->lb_offset; + break; + case SXE2_VSI_T_CTRL: + offset = irq_layout->fnav_offset; + break; + case SXE2_VSI_T_RDMA: + offset = irq_layout->rdma_offset; + break; + case SXE2_VSI_T_ESW: + offset = irq_layout->eswitch_offset; + break; + case SXE2_VSI_T_DPDK_PF: + offset = irq_layout->dpdk_offset; + break; + case SXE2_VSI_T_DPDK_ESW: + offset = irq_layout->dpdk_eswitch_offset; + break; + case SXE2_VSI_T_MACVLAN: + mutex_lock(&adapter->irq_ctxt.lock); + offset = (s32)bitmap_find_next_zero_area(map, + (s32)size, (s32)start_idx, (s32)cnt, 0); + if (offset >= (s32)size) { + ret = -ENOMEM; + LOG_ERROR_BDF("get %d irqs from map(size %u) failed, ret=%d\n", + cnt, size, ret); + mutex_unlock(&adapter->irq_ctxt.lock); + goto l_end; + } + bitmap_set(map, (unsigned int)offset, (unsigned int)cnt); + irq_layout->macvlan++; + mutex_unlock(&adapter->irq_ctxt.lock); + break; + default: + offset = -1; + break; + } + + if (offset >= (s32)size) { + ret = -ENOMEM; + LOG_ERROR_BDF("get %d irqs from map(size %u) failed, ret=%d\n", cnt, + size, ret); + goto l_end; + } + ret = offset; + +l_end: + return ret; +} + +void sxe2_irq_rate_limit_init(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + u16 rate_limit; + + if ((SXE2_IS_ITR_DYNAMIC(&irq_data->tx) || + SXE2_IS_ITR_DYNAMIC(&irq_data->rx)) && + irq_data->vsi != irq_data->vsi->adapter->eswitch_ctxt.esw_vsi) { + rate_limit = SXE2_DYNAMIC_STATE_ITR; + } else { + rate_limit = irq_data->rate_limit; + } + + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, rate_limit); +} + +static void sxe2_dim_work_tx(struct work_struct *work) +{ + struct sxe2_irq_data *irq_data; + struct sxe2_q_container *qc; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + irq_data = (struct sxe2_irq_data *)dim->priv; + qc = &irq_data->tx; + + WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_itr_profile)); + SXE2_BUG_ON(dim->profile_ix >= ARRAY_SIZE(tx_itr_profile)); + + itr = tx_itr_profile[dim->profile_ix]; + + sxe2_itr_set(irq_data, qc, itr); + + dim->state = DIM_START_MEASURE; +} + +static void sxe2_dim_work_rx(struct work_struct *work) +{ + struct sxe2_irq_data *irq_data; + struct sxe2_q_container *qc; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + irq_data = (struct sxe2_irq_data *)dim->priv; + qc = &irq_data->rx; + + WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_itr_profile)); + SXE2_BUG_ON(dim->profile_ix >= ARRAY_SIZE(rx_itr_profile)); + + itr = rx_itr_profile[dim->profile_ix]; + + sxe2_itr_set(irq_data, qc, itr); + + dim->state = DIM_START_MEASURE; +} + +void sxe2_dim_init(struct sxe2_irq_data *irq_data) +{ + struct sxe2_q_container *qc; + u16 itr; + + qc = &irq_data->tx; + INIT_WORK(&qc->dim.work, sxe2_dim_work_tx); + qc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qc->dim.profile_ix = SXE2_DIM_DFLT_PROFILE_IDX; + qc->dim.priv = irq_data; + + itr = (u16)(SXE2_IS_ITR_DYNAMIC(qc) ? tx_itr_profile[qc->dim.profile_ix] + : qc->itr_setting); + + sxe2_itr_set(irq_data, qc, itr); + + qc = &irq_data->rx; + INIT_WORK(&qc->dim.work, sxe2_dim_work_rx); + qc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qc->dim.profile_ix = SXE2_DIM_DFLT_PROFILE_IDX; + qc->dim.priv = irq_data; + + itr = (u16)(SXE2_IS_ITR_DYNAMIC(qc) ? rx_itr_profile[qc->dim.profile_ix] + : qc->itr_setting); + sxe2_itr_set(irq_data, qc, itr); +} + +static void sxe2_net_dim(u16 event_ctr, u64 packets, u64 bytes, struct dim *dim) +{ + struct dim_sample dim_sample = {}; + + dim_update_sample(event_ctr, packets, bytes, &dim_sample); + dim_sample.comp_ctr = 0; + + if (ktime_ms_delta(dim_sample.time, dim->start_sample.time) >= 1000) + dim->state = DIM_START_MEASURE; + + net_dim(dim, dim_sample); +} + +void sxe2_dynamic_itr(struct sxe2_irq_data *irq_data) +{ + struct sxe2_q_container *tqc = &irq_data->tx; + struct sxe2_q_container *rqc = &irq_data->rx; + struct sxe2_queue *queue; + + if (SXE2_IS_ITR_DYNAMIC(tqc)) { + u64 packets = 0, bytes = 0; + + sxe2_for_each_queue(queue, irq_data->tx.list) + { + packets += queue->stats->packets; + bytes += queue->stats->bytes; + } + sxe2_net_dim(irq_data->event_ctr, packets, bytes, &irq_data->tx.dim); + } + + if (SXE2_IS_ITR_DYNAMIC(rqc)) { + u64 packets = 0, bytes = 0; + + sxe2_for_each_queue(queue, irq_data->rx.list) + { + packets += queue->stats->packets; + bytes += queue->stats->bytes; + } + sxe2_net_dim(irq_data->event_ctr, packets, bytes, &irq_data->rx.dim); + } +} + +void sxe2_irq_itr_init(struct sxe2_irq_data *irq_data) +{ + sxe2_dim_init(irq_data); +} + +void sxe2_trigger_soft_intr(struct sxe2_hw *hw, struct sxe2_irq_data *irq_data) +{ + u32 value; + + irq_data->multiple_polling = false; + value = SXE2_VF_DYN_CTL_INTENABLE | + SXE2_VF_DYN_CTL_CLEARPBA | + (SXE2_ITR_IDX_2 << SXE2_VF_DYN_CTL_ITR_IDX_S) | + (SXE2_ITR_20K / hw->hw_cfg.itr_gran) + << SXE2_VF_DYN_CTL_INTERVAL_S | + SXE2_VF_DYN_CTL_SWINT_TRIG | + SXE2_ITR_IDX_2 << SXE2_VF_DYN_CTL_SW_ITR_IDX_S | + SXE2_VF_DYN_CTL_SW_ITR_IDX_ENABLE; + + sxe2_hw_irq_dyn_ctl(hw, irq_data->idx_in_pf, value); +} + +STATIC void sxe2_napi_irq_enable(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + + if (irq_data->multiple_polling) + sxe2_trigger_soft_intr(hw, irq_data); + else + sxe2_hw_irq_enable(hw, irq_data->idx_in_pf); +} + +int sxe2_napi_poll(struct napi_struct *napi, int weight) +{ + struct sxe2_irq_data *irq_data = + container_of(napi, struct sxe2_irq_data, napi); + struct sxe2_queue *txq; + struct sxe2_queue *rxq; + int total_cleaned = 0; + int budget_per_ring; + bool complete = true; + s32 clean; + + sxe2_for_each_queue(txq, irq_data->tx.list) + { +#ifdef HAVE_AF_XDP_ZC_SUPPORT + bool wd = txq->xsk_pool ? sxe2_txq_irq_clean_zc(txq, weight) + : sxe2_txq_irq_clean(txq, weight); +#else + bool wd = sxe2_txq_irq_clean(txq, weight); +#endif + if (!wd) + complete = false; + } + + if (unlikely(weight <= 0)) + return weight; + + if (unlikely(irq_data->rx.list.cnt > 1)) + budget_per_ring = max_t(int, ((u32)weight / irq_data->rx.list.cnt), + 1); + else + budget_per_ring = weight; + + sxe2_trace(irq_rxclean_begin, irq_data, total_cleaned); + sxe2_for_each_queue(rxq, irq_data->rx.list) + { + sxe2_trace(rxq_clean_begin, rxq); +#ifdef HAVE_AF_XDP_ZC_SUPPORT + clean = rxq->xsk_pool ? sxe2_rx_irq_clean_zc(rxq, budget_per_ring) + : sxe2_rxq_irq_clean(rxq, budget_per_ring); +#else + clean = sxe2_rxq_irq_clean(rxq, budget_per_ring); +#endif + sxe2_trace(rxq_clean_end, rxq, clean); + total_cleaned += clean; + if (clean >= budget_per_ring) + complete = false; + } + sxe2_trace(irq_rxclean_end, irq_data, total_cleaned); + + if (!complete) { + irq_data->multiple_polling = true; + return weight; + } + + if (napi_complete_done(napi, total_cleaned)) { + sxe2_dynamic_itr(irq_data); + sxe2_napi_irq_enable(irq_data); + } + + return min_t(int, total_cleaned, (weight - 1)); +} + +int sxe2_esw_napi_poll(struct napi_struct *napi, int weight) +{ + struct sxe2_irq_data *irq_data = + container_of(napi, struct sxe2_irq_data, napi); + struct sxe2_queue *txq; + struct sxe2_queue *rxq; + int total_cleaned = 0; + bool complete = true; + + sxe2_for_each_queue(txq, irq_data->tx.list) + { + bool wd = sxe2_txq_irq_clean(txq, weight); + + if (!wd) + complete = false; + } + + if (unlikely(weight <= 0)) + return weight; + + sxe2_for_each_queue(rxq, irq_data->rx.list) + { + s32 clean = sxe2_rxq_irq_clean(rxq, weight); + + total_cleaned += clean; + if (clean >= weight) + complete = false; + } + + if (!complete) { + irq_data->multiple_polling = true; + return weight; + } + + if (napi_complete_done(napi, total_cleaned)) + sxe2_napi_irq_enable(irq_data); + + return min_t(int, total_cleaned, (weight - 1)); +} + +s32 sxe2_dpdk_irq_cnt_get(void *adapter) +{ + struct sxe2_adapter *pf_adapter = adapter; + + return pf_adapter->irq_ctxt.irq_layout.dpdk; +} + +s32 sxe2_dpdk_irq_vector_idx_get(void *adapter, u16 irq_idx) +{ + struct sxe2_adapter *pf_adapter = adapter; + u16 offset = pf_adapter->irq_ctxt.irq_layout.dpdk_offset + irq_idx; + + if (!pf_adapter->irq_ctxt.msix_entries) + return -EINVAL; + + return (s32)pf_adapter->irq_ctxt.msix_entries[offset].vector; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e03e1ce1e9d05f092647df81819ffdc1099ba5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_irq.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_irq.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_IRQ_H__ +#define __SXE2_IRQ_H__ + +#include +#ifdef NEED_COMPAT_DIM +#include "sxe2_compat_dim.h" +#else +#include +#endif +#include +#include + +#include "sxe2_queue.h" + +struct sxe2_vsi; +struct sxe2_adapter; +struct sxe2_hw; + +#define SXE2_IRQ_MAX_CNT (2048) + +#define SXE2_LAN_MSIX_MIN_CNT (1) +#define SXE2_EVENT_MSIX_CNT (1) + +#define SXE2_MSIX_MIN_CNT \ + (SXE2_LAN_MSIX_MIN_CNT + SXE2_EVENT_MSIX_CNT) +#define SXE2_FNAV_MSIX_CNT 1 +#define SXE2_RDMA_MSIX_MIN_CNT 2 +#define SXE2_ESWITCH_MSIX_CNT 1 +#define SXE2_RDMA_AEQ_MSIX_CNT 1 +#define SXE2_LB_RXQ_MSIX_CNT 1 + +#define SXE2_DPDK_MSIX_MAX_CNT 256 +#define SXE2_DPDK_MSIX_DFLT_CNT 64 +#define SXE2_DPDK_MSIX_MIN_CNT 1 +#define SXE2_DPDK_ESWITCH_MSIX_CNT SXE2_ESWITCH_MSIX_CNT + +#define SXE2_NON_SAFEMODE_MIN_MSIX \ + (SXE2_MSIX_MIN_CNT + SXE2_RDMA_MSIX_MIN_CNT \ + + SXE2_FNAV_MSIX_CNT + SXE2_ESWITCH_MSIX_CNT + SXE2_LB_RXQ_MSIX_CNT) + +#define SXE2_EVENT_IRQ_IDX 0 + +#define SXE2_ITR_8K 124 +#define SXE2_ITR_20K 50 +#define SXE2_ITR_MAX 8160 + +#define SXE2_TX_ITR_IDX SXE2_ITR_IDX_1 +#define SXE2_RX_ITR_IDX SXE2_ITR_IDX_0 +#define SXE2_TX_DFLT_ITR SXE2_ITR_20K +#define SXE2_RX_DFLT_ITR SXE2_ITR_20K + +#define SXE2_IS_ITR_DYNAMIC(qc) \ + ((qc)->itr_mode == SXE2_ITR_DYNAMIC) + +#define SXE2_IRQ_HAS_TXQ(irq_data) \ + ((irq_data)->tx.list.next) +#define SXE2_IRQ_HAS_RXQ(irq_data) \ + ((irq_data)->rx.list.next) + +#define SXE2_INT_NAME_MAX_LEN (IFNAMSIZ + 16) + +enum sxe2_itr_idx { + SXE2_ITR_IDX_0 = 0, + SXE2_ITR_IDX_1, + SXE2_ITR_IDX_2, + SXE2_ITR_IDX_NONE, +}; + +enum sxe2_itr_mode { + SXE2_ITR_STATIC = 0, + SXE2_ITR_DYNAMIC = 1, +}; + +struct sxe2_irq_layout { + u16 event; + u16 event_offset; + + u16 lb; + u16 lb_offset; + + u16 fnav; + u16 fnav_offset; + + u16 eswitch; + u16 eswitch_offset; + + u16 lan; + u16 lan_offset; + + u16 rdma; + u16 rdma_offset; + + u16 dpdk; + u16 dpdk_offset; + + u16 dpdk_eswitch; + u16 dpdk_eswitch_offset; + + u16 macvlan; + u16 macvlan_offset; + + u16 sriov; + u16 sriov_offset; +}; + +struct sxe2_irq_context { + u16 max_cnt; + u16 avail_cnt; + u16 fixed_cnt; + u16 base_idx_in_dev; + u16 rdma_base_idx; + struct sxe2_irq_layout irq_layout; + struct msix_entry *msix_entries; + s8 event_int_name[SXE2_INT_NAME_MAX_LEN]; + u32 event_irq_cnt; + struct mutex lock; + DECLARE_BITMAP(map, SXE2_IRQ_MAX_CNT); +}; + +struct sxe2_q_container { + struct sxe2_list list; + struct dim dim; + u16 itr_idx; + u16 itr_setting; + u16 itr_mode; +}; + +struct sxe2_irq_data { + u16 idx_in_vsi; + u16 idx_in_pf; + u16 rate_limit; + u8 multiple_polling : 1; + u16 event_ctr; + struct sxe2_vsi *vsi; + struct napi_struct napi; + struct sxe2_q_container tx; + struct sxe2_q_container rx; + s8 name[SXE2_INT_NAME_MAX_LEN]; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; +}; + +s32 sxe2_irq_init(struct sxe2_adapter *adapter); + +void sxe2_irq_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_event_irq_request(struct sxe2_adapter *adapter); + +void sxe2_event_irq_free(struct sxe2_adapter *adapter); + +void sxe2_event_irq_enable(struct sxe2_adapter *adapter); + +void sxe2_event_irq_disable(struct sxe2_adapter *adapter); + +s32 sxe2_irq_offset_get(struct sxe2_adapter *adapter, u16 cnt, u8 vsi_type); + +irqreturn_t sxe2_msix_ring_irq_handler(int __always_unused irq, void *data); + +irqreturn_t sxe2_msix_ctrl_vsi_handler(int __always_unused irq, void *data); + +irqreturn_t sxe2_msix_lb_rx_irq_handler(int __always_unused irq, void *data); + +void sxe2_dynamic_itr(struct sxe2_irq_data *irq_data); + +void sxe2_irq_rate_limit_init(struct sxe2_irq_data *irq_data); + +void sxe2_dim_init(struct sxe2_irq_data *irq_data); + +void sxe2_irq_itr_init(struct sxe2_irq_data *irq_data); + +int sxe2_napi_poll(struct napi_struct *napi, int weight); + +int sxe2_esw_napi_poll(struct napi_struct *napi, int weight); + +void sxe2_trigger_soft_intr(struct sxe2_hw *hw, struct sxe2_irq_data *irq_data); + +s32 sxe2_irq_resume(struct sxe2_adapter *adapter); + +s32 sxe2_dpdk_irq_cnt_get(void *adapter); + +s32 sxe2_dpdk_irq_vector_idx_get(void *adapter, u16 irq_idx); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.c new file mode 100644 index 0000000000000000000000000000000000000000..b81e3fdee38a854837187867b1bd928bdee6d3c0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.c @@ -0,0 +1,1657 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_lag.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include "sxe2_lag.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_tc.h" +#include "sxe2_aux_driver.h" +#include "sxe2_common.h" + +#define SXE2_LAG_BOND_MODE_UNSET -1 +#define SXE2_LAG_ADAPTER_IDX_UNSET -1 + +#define SXE2_LAG_PRIMARY_ID 0 +#define SXE2_LAG_REDUNDANT_ID 1 + +static DEFINE_IDA(sxe2_lag_ida); +static struct mutex sxe2_lag_list_mtx; +static struct sxe2_lag_list sxe2_lag_head; + +int allow_repeat_sn; +module_param(allow_repeat_sn, int, 0644); + +MODULE_PARM_DESC(allow_repeat_sn, + "Indicates device can be probed successfully or not when SN is repeat."); + +void sxe2_lag_init_once(void) +{ + mutex_init(&sxe2_lag_list_mtx); + INIT_LIST_HEAD(&sxe2_lag_head.node); +} + +void sxe2_lag_deinit_once(void) +{ + mutex_destroy(&sxe2_lag_list_mtx); +} + +STATIC void sxe2_lag_list_lock(void) +{ + mutex_lock(&sxe2_lag_list_mtx); +} + +STATIC void sxe2_lag_list_unlock(void) +{ + mutex_unlock(&sxe2_lag_list_mtx); +} + +STATIC struct sxe2_lag_context *sxe2_lag_alloc(void) +{ + size_t size; + struct sxe2_lag_context *lag; + + size = sizeof(struct sxe2_lag_context); + lag = kmalloc(size, GFP_ATOMIC); + if (lag == NULL) { + LOG_ERROR("alloc lag ctxt failed.\n"); + goto l_end; + } + + memset(lag, 0, sizeof(struct sxe2_lag_context)); +l_end: + + return lag; +} + +STATIC struct sxe2_lag_context *sxe2_lag_context_find(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = NULL; + struct list_head *tmp; + struct list_head *n; + struct sxe2_lag_list *entry = NULL; + + list_for_each_safe(tmp, n, &sxe2_lag_head.node) + { + entry = list_entry(tmp, struct sxe2_lag_list, node); + if (!entry->lag) { + LOG_ERROR_BDF("lag is null.\n"); + continue; + } + + if (memcmp(adapter->serial_num, entry->lag->serial_num, + SXE2_SERIAL_NUM_LEN) == 0) { + lag = entry->lag; + break; + } + } + + LOG_INFO_BDF("find lag %p.\n", lag); + + return lag; +} + +static bool sxe2_lag_is_primary(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + bool ret = false; + + if (adapter == lag->adapters[SXE2_LAG_PRIMARY_ID]) + ret = true; + + return ret; +} + +bool sxe2_lag_is_bonded(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + return lag->bonded; +} + +struct sxe2_adapter *sxe2_lag_role_find(struct sxe2_lag_context *lag, + int role) +{ + struct sxe2_adapter *adapter = NULL; + + switch (role) { + case SXE2_LAG_ADAPTER_TYPE_PRIMARY: + adapter = lag->adapters[SXE2_LAG_PRIMARY_ID]; + break; + case SXE2_LAG_ADAPTER_TYPE_REDUNDANT: + adapter = lag->adapters[SXE2_LAG_REDUNDANT_ID]; + break; + case SXE2_LAG_ADAPTER_TYPE_ACTIVE: + if (lag->active_id != SXE2_LAG_ADAPTER_IDX_UNSET) + adapter = lag->adapters[lag->active_id]; + break; + default: + break; + } + + return adapter; +} + +static void sxe2_lag_active_set(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + int i; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + if (adapter == lag->adapters[i]) { + lag->active_id = i; + break; + } + } + + LOG_INFO("set lag active idx %d.\n", lag->active_id); +} + +static void sxe2_lag_info_display(struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_adapter *bond_adapter = NULL; + struct sxe2_adapter *active_adapter = NULL; + struct net_device *netdev; + struct net_device *upperdev; + int i; + const char *name; + const char *upper; + const char *primary; + const char *active; + const char *mode; + + active_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_ACTIVE); + if (lag->bond_mode == BOND_MODE_ACTIVEBACKUP) + mode = "act-back"; + else if (lag->bond_mode == SXE2_LAG_BOND_MODE_UNSET) + mode = "unset"; + else + mode = "act-act"; + + LOG_INFO_BDF("lag display: current bond state %d mode %s\n", lag->bonded, + mode); + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + bond_adapter = lag->adapters[i]; + netdev = bond_adapter->vsi_ctxt.main_vsi->netdev; + rcu_read_lock(); + upperdev = netdev_master_upper_dev_get_rcu(netdev); + upper = upperdev ? netdev_name(upperdev) : "unset"; + rcu_read_unlock(); + + name = netdev ? netdev_name(netdev) : "unset"; + primary = sxe2_lag_is_primary(bond_adapter) ? "TRUE" : "FALSE"; + + active = "unset"; + if (active_adapter) { + active = active_adapter->pf_idx == bond_adapter->pf_idx ? "TRUE" : "FALSE"; + } + LOG_INFO_BDF("lag display: %s , upper:%s, primary:%s active %s\n", + name, upper, primary, active); + } +} + +static struct sxe2_adapter *sxe2_lag_get_adapter(struct sxe2_lag_context *lag, u8 pf) +{ + return lag->adapters[pf]; +} + +STATIC void sxe2_lag_move_nodes(struct sxe2_lag_context *lag, u8 oldpf, u8 newpf, + bool is_aa, u8 pf_idx) +{ + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_adapter *old_adapter = sxe2_lag_get_adapter(lag, oldpf); + struct sxe2_adapter *new_adapter = sxe2_lag_get_adapter(lag, newpf); + struct aux_rdma_qset_params qset = {}; + int ret; + u16 new_teid; + u8 user_pri; + u8 dest = SXE2_RDMA_PF0; + + if (!old_adapter || !new_adapter) { + LOG_WARN_BDF("Could not locate resources to move node\n"); + return; + } + + sxe2_for_each_user_prio(user_pri) + { + if (is_aa) { + qset.teid = lag->rdma_qsets[user_pri].teid[pf_idx]; + qset.user_pri = user_pri; + qset.qset_id = lag->rdma_qsets[user_pri].qset_id[pf_idx]; + memcpy(qset.tc, lag->rdma_qsets[user_pri].tc, + sizeof(qset.tc)); + if (!qset.teid) + continue; + + ret = sxe2_txsched_qset_node_move(old_adapter, new_adapter, + &qset, &new_teid, + (u8)is_aa); + if (ret) { + LOG_ERROR_BDF("Lag aa move nodes error,ret=%d\n", ret); + return; + } + + if (newpf == SXE2_LAG_PF0) + dest = SXE2_RDMA_PF0; + else if (newpf == SXE2_LAG_PF1) + dest = SXE2_RDMA_PF1; + + lag->rdma_qsets[user_pri].teid[pf_idx] = new_teid; + lag->rdma_qsets[user_pri].qset_port[pf_idx] = dest; + } else { + qset.teid = lag->rdma_qset[user_pri].teid; + qset.user_pri = user_pri; + qset.qset_id = lag->rdma_qset[user_pri].qset_id; + memcpy(qset.tc, lag->rdma_qset[user_pri].tc, + sizeof(qset.tc)); + if (!qset.teid) + continue; + + ret = sxe2_txsched_qset_node_move(old_adapter, new_adapter, + &qset, &new_teid, + (u8)is_aa); + if (ret) { + LOG_ERROR_BDF("Lag move nodes error,ret=%d\n", ret); + return; + } + + if (newpf == SXE2_LAG_PF0) + dest = SXE2_RDMA_PF0; + else if (newpf == SXE2_LAG_PF1) + dest = SXE2_RDMA_PF1; + + lag->rdma_qset[user_pri].teid = new_teid; + lag->rdma_qset[user_pri].qset_port = dest; + } + } +} + +STATIC int sxe2_lag_move_node(struct sxe2_lag_context *lag, u8 oldpf, u8 newpf, + u8 user_pri, bool is_aa, u8 pf_idx) +{ + struct sxe2_adapter *old_adapter = sxe2_lag_get_adapter(lag, oldpf); + struct sxe2_adapter *new_adapter = sxe2_lag_get_adapter(lag, newpf); + struct sxe2_adapter *adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + struct aux_rdma_qset_params qset = {}; + u16 new_teid; + int ret; + u8 dest = SXE2_RDMA_PF0; + + if (!old_adapter || !new_adapter) { + LOG_WARN_BDF("Could not locate resources to move node\n"); + ret = -EINVAL; + goto l_end; + } + + if (is_aa) { + qset.user_pri = user_pri; + qset.teid = lag->rdma_qsets[user_pri].teid[pf_idx]; + qset.qset_id = lag->rdma_qsets[user_pri].qset_id[pf_idx]; + memcpy(qset.tc, lag->rdma_qsets[user_pri].tc, sizeof(qset.tc)); + ret = sxe2_txsched_qset_node_move(old_adapter, new_adapter, &qset, + &new_teid, (u8)is_aa); + if (ret) { + LOG_ERROR_BDF("Lag aa move nodes error,ret=%d\n", ret); + goto l_end; + } + + if (newpf == SXE2_LAG_PF0) + dest = SXE2_RDMA_PF0; + else if (newpf == SXE2_LAG_PF1) + dest = SXE2_RDMA_PF1; + + lag->rdma_qsets[user_pri].teid[pf_idx] = new_teid; + lag->rdma_qsets[user_pri].qset_port[pf_idx] = dest; + } else { + qset.user_pri = user_pri; + qset.teid = lag->rdma_qset[user_pri].teid; + qset.qset_id = lag->rdma_qset[user_pri].qset_id; + memcpy(qset.tc, lag->rdma_qsets[user_pri].tc, sizeof(qset.tc)); + ret = sxe2_txsched_qset_node_move(old_adapter, new_adapter, &qset, + &new_teid, (u8)is_aa); + if (ret) { + LOG_ERROR_BDF("Lag move nodes error,ret=%d\n", ret); + goto l_end; + } + + if (newpf == SXE2_LAG_PF0) + dest = SXE2_RDMA_PF0; + else if (newpf == SXE2_LAG_PF1) + dest = SXE2_RDMA_PF1; + + lag->rdma_qset[user_pri].teid = new_teid; + lag->rdma_qset[user_pri].qset_port = dest; + } + +l_end: + return 0; +} + +static void sxe2_lag_unlink(struct sxe2_adapter *adapter) +{ + struct aux_core_dev_info *cdev; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct sxe2_adapter *primary_adapter; + + if (sxe2_lag_is_primary(adapter)) { + lag->active_id = SXE2_LAG_ADAPTER_IDX_UNSET; + cdev = &adapter->aux_ctxt.cdev_info; + if (cdev->adev) + cdev->rdma_pf_bitmap &= ~SXE2_RDMA_PF0; + } else { + primary_adapter = sxe2_lag_role_find(lag, + SXE2_LAG_ADAPTER_TYPE_PRIMARY); + if (primary_adapter) { + cdev = &primary_adapter->aux_ctxt.cdev_info; + if (cdev->adev) + cdev->rdma_pf_bitmap &= ~SXE2_RDMA_PF1; + } + } +} + +STATIC s32 sxe2_lag_del_prune_list(struct sxe2_adapter *primary_adapter, + struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *list_itr = NULL; + u16 vsi_id; + + vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_VLAN].rule_head; + list_for_each_entry(list_itr, list_head, list_entry) + { + if (test_bit(vsi_id, list_itr->vsi_list->vsi_map) && + bitmap_weight(list_itr->vsi_list->vsi_map, SXE2_VSI_MAX_CNT) == 1 && + list_itr->vsi_list->need_bond == 1) { + ret = sxe2_vsi_list_update_bond(adapter, + list_itr->vsi_list, + primary_adapter, false); + if (ret) { + LOG_ERROR_BDF("Error adding VSI prune list\n"); + return ret; + } + list_itr->vsi_list->need_bond = 0; + return ret; + } + } + ret = -EEXIST; + return ret; +} + +STATIC s32 sxe2_lag_add_prune_list(struct sxe2_adapter *primary_adapter, + struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *list_itr = NULL; + u16 vsi_id; + + vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_VLAN].rule_head; + list_for_each_entry(list_itr, list_head, list_entry) + { + if ((test_bit(vsi_id, list_itr->vsi_list->vsi_map)) && + (bitmap_weight(list_itr->vsi_list->vsi_map, SXE2_VSI_MAX_CNT) == + 1)) { + list_itr->vsi_list->need_bond = 1; + ret = sxe2_vsi_list_update_bond(adapter, list_itr->vsi_list, + primary_adapter, true); + if (ret) { + LOG_ERROR("Error adding VSI prune list\n"); + return ret; + } + return ret; + } + } + ret = -EEXIST; + return ret; +} + +static s32 sxe2_lag_rdma_create_fltr(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + LOG_INFO_BDF("lag create rdma action start.\n"); + + if (sxe2_lag_is_primary(adapter)) { + struct sxe2_fwc_switch_large_action large_act; + + memset(&large_act, 0, sizeof(large_act)); + large_act.action[0].reg.rsv0 = 0; + large_act.action[0].reg.rsv1 = 0; + large_act.action[0].reg.list = 0; + large_act.action[0].reg.fwd_vsi000 = 0; + large_act.action[0].reg.rsv2 = 0; + large_act.action[0].reg.vsi_list = adapter->vsi_ctxt.main_vsi->idx_in_dev; + large_act.action[0].reg.valid = 1; + large_act.idx = 0; + + ret = sxe2_fwc_switch_large_action_cfg(adapter, &large_act, + SXE2_CMD_SWITCH_LARGE_ACTION_CFG); + if (ret) { + LOG_ERROR_BDF("large action add fail, ret=%d\n", ret); + return ret; + } + } else { + ret = sxe2_bond_single_rule_setup(adapter, true); + if (ret) { + LOG_ERROR_BDF("single action add fail, ret=%d\n", ret); + return ret; + } + } + + LOG_INFO_BDF("lag create rdma action proc complete.\n"); + + return ret; +} + +static void sxe2_lag_rdma_del_action(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_fwc_switch_large_action lg_act; + + LOG_INFO_BDF("lag del rdma action start.\n"); + if (!sxe2_lag_is_primary(adapter)) { + LOG_ERROR("adapter is not primary.\n"); + goto l_end; + } + + memset(&lg_act, 0, sizeof(lg_act)); + lg_act.action[0].reg.rsv0 = 0; + lg_act.action[0].reg.rsv1 = 0; + lg_act.action[0].reg.list = 0; + lg_act.action[0].reg.fwd_vsi000 = 0; + lg_act.action[0].reg.rsv2 = 0; + lg_act.action[0].reg.vsi_list = + adapter->vsi_ctxt.main_vsi->idx_in_dev; + lg_act.action[0].reg.valid = 0; + lg_act.idx = 0; + + ret = sxe2_fwc_switch_large_action_cfg(adapter, &lg_act, + SXE2_CMD_SWITCH_LARGE_ACTION_CFG); + if (ret) + LOG_ERROR_BDF("large action del fail, ret=%d\n", ret); + + LOG_INFO_BDF("lag del rdma action process complete.\n"); + +l_end: + LOG_INFO_BDF("lag del rdma action end.\n"); +} + +static void sxe2_lag_reclaim_nodes(struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *primary_adapter; + struct sxe2_adapter *redundant_adapter; + + primary_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + if (!primary_adapter) { + LOG_ERROR("find primary failed.\n"); + goto l_end; + } + + redundant_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_REDUNDANT); + if (!redundant_adapter) { + LOG_ERROR("find redundant failed.\n"); + goto l_end; + } + + sxe2_lag_move_nodes(lag, redundant_adapter->pf_idx, primary_adapter->pf_idx, + false, 0); + +l_end: + return; +} + +STATIC void sxe2_lag_aa_reclaim_nodes(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev) +{ + sxe2_lag_move_nodes(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, true, SXE2_LAG_PF1); + + sxe2_lag_move_nodes(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, true, SXE2_LAG_PF0); +} + +void sxe2_lag_aa_reclaim_node(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 user_pri) +{ + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, user_pri, true, + SXE2_LAG_PF1); + + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, user_pri, true, + SXE2_LAG_PF0); +} + +void sxe2_lag_ab_reclaim_node(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 user_pri) +{ + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, user_pri, false, + SXE2_LAG_PF1); +} + +static bool sxe2_lag_monitor_act_back(struct sxe2_lag_context *lag) +{ + bool send = false; + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_adapter *primary_adapter; + struct sxe2_adapter *redundant_adapter; + struct sxe2_adapter *active_adapter; + struct aux_core_dev_info *cdev = NULL; + struct sxe2_lag_dev_info *info = NULL; + u8 primary_pf = SXE2_RDMA_INVALID_PF; + u8 redundant_pf = SXE2_RDMA_INVALID_PF; + u8 active_pf = SXE2_RDMA_INVALID_PF; + u8 old_pf = SXE2_RDMA_INVALID_PF; + s32 i; + + LOG_INFO_BDF("lag mode act-back proc start.\n"); + primary_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + redundant_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_REDUNDANT); + cdev = &primary_adapter->aux_ctxt.cdev_info; + + primary_pf = primary_adapter->pf_idx; + redundant_pf = redundant_adapter->pf_idx; + + if (cdev->rdma_pf_bitmap == SXE2_RDMA_INVALID_PF) { + cdev->rdma_pf_bitmap = SXE2_RDMA_BOTH_PF; + LOG_INFO_BDF("lag update cdev rdma pf_bitmap 0x3.\n"); + } + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + info = &lag->lag_wk.info[i]; + if (info->slave_state == BOND_STATE_ACTIVE) + active_pf = lag->adapters[i]->pf_idx; + } + + if (active_pf == SXE2_RDMA_INVALID_PF) { + LOG_INFO_BDF("lag mode act-back: not found active func.\n"); + goto l_end; + } + + active_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_ACTIVE); + if (active_adapter && active_pf == active_adapter->pf_idx) { + LOG_INFO_BDF("lag mode act-back: active pf already set.\n"); + goto l_end; + } + + if (primary_pf == active_pf) { + cdev->rdma_pf_bitmap |= SXE2_RDMA_PF0; + cdev->rdma_pf_bitmap &= ~SXE2_RDMA_PF1; + sxe2_lag_active_set(primary_adapter); + old_pf = redundant_pf; + + } else { + cdev->rdma_pf_bitmap |= SXE2_RDMA_PF1; + cdev->rdma_pf_bitmap &= ~SXE2_RDMA_PF0; + sxe2_lag_active_set(redundant_adapter); + old_pf = primary_pf; + } + + if (lag->lag_wk.event == SXE2_LAG_EVENT_BONDINFO) { + sxe2_lag_move_nodes(lag, old_pf, active_pf, false, 0); + send = true; + } + LOG_INFO_BDF("lag after send failover bitmap:%d\n", cdev->rdma_pf_bitmap); + +l_end: + + LOG_INFO_BDF("lag mode act-back proc end.\n"); + return send; +} + +void sxe2_lag_aa_failover(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 dest) +{ + struct aux_rdma_multi_qset_params *qsets; + u8 not_all = 0; + u8 i; + + LOG_DEBUG("lag aa failover proc start.\n"); + + if (!(cdev->rdma_pf_bitmap & dest)) + goto l_end; + + not_all = cdev->rdma_pf_bitmap ^ SXE2_RDMA_BOTH_PF; + + sxe2_for_each_user_prio(i) + { + qsets = lag->rdma_qsets; + + if (dest == SXE2_RDMA_PF0) { + if (qsets[i].teid[SXE2_LAG_PF0] && + qsets[i].qset_port[SXE2_LAG_PF0] != dest) { + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, + SXE2_LAG_PF0, i, true, + SXE2_LAG_PF0); + qsets[i].qset_port[SXE2_LAG_PF0] = dest; + } + + if (not_all && qsets[i].teid[SXE2_LAG_PF1] && + qsets[i].qset_port[SXE2_LAG_PF1] != dest) { + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, + SXE2_LAG_PF0, i, true, + SXE2_LAG_PF1); + qsets[i].qset_port[SXE2_LAG_PF1] = dest; + } + } else { + if (qsets[i].teid[SXE2_LAG_PF1] && + qsets[i].qset_port[SXE2_LAG_PF1] != dest) { + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF0, + SXE2_LAG_PF1, i, true, + SXE2_LAG_PF1); + qsets[i].qset_port[SXE2_LAG_PF1] = dest; + } + + if (not_all && qsets[i].teid[SXE2_LAG_PF0] && + qsets[i].qset_port[SXE2_LAG_PF0] != dest) { + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF0, + SXE2_LAG_PF1, i, true, + SXE2_LAG_PF0); + qsets[i].qset_port[SXE2_LAG_PF0] = dest; + } + } + } + + LOG_INFO("lag after send failover bitmap:%d\n", cdev->rdma_pf_bitmap); + +l_end: + LOG_DEBUG("lag aa failover proc end.\n"); +} + +static u8 sxe2_lag_get_move_dest(u8 old_st, u8 new_st) +{ + u8 dest = SXE2_RDMA_INVALID_PF; + + if ((old_st & BIT(0)) != (new_st & BIT(0))) { + if (new_st & BIT(0)) + dest = SXE2_RDMA_PF0; + else + dest = SXE2_RDMA_PF1; + } else if ((old_st & BIT(1)) != (new_st & BIT(1))) { + if (new_st & BIT(1)) + dest = SXE2_RDMA_PF1; + else + dest = SXE2_RDMA_PF0; + } + + return dest; +} + +static bool sxe2_lag_monitor_act_act(struct sxe2_lag_context *lag) +{ + bool send = false; + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_adapter *primary_adapter; + struct aux_core_dev_info *cdev = NULL; + struct sxe2_lag_dev_info *info = NULL; + u8 primary_pf = SXE2_RDMA_INVALID_PF; + u8 dest = SXE2_RDMA_INVALID_PF; + u8 pf_bitmap = 0; + s32 i; + + LOG_INFO_BDF("lag mode act-act: proc start.\n"); + + primary_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + primary_pf = primary_adapter->pf_idx; + cdev = &primary_adapter->aux_ctxt.cdev_info; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + info = &lag->lag_wk.info[i]; + if (info->slave_link == BOND_LINK_UP) { + if (primary_pf == lag->adapters[i]->pf_idx) { + pf_bitmap |= SXE2_RDMA_PF0; + LOG_INFO_BDF("lag mode act-act: prim change to active.\n"); + } else { + pf_bitmap |= SXE2_RDMA_PF1; + LOG_INFO_BDF("lag mode act-act: redundant change to active.\n"); + } + } else { + if (primary_pf == lag->adapters[i]->pf_idx) { + pf_bitmap &= ~SXE2_RDMA_PF0; + LOG_INFO_BDF("lag mode act-act: prim change to down.\n"); + } else { + pf_bitmap &= ~SXE2_RDMA_PF1; + LOG_INFO_BDF("lag mode act-act: redundant change to down.\n"); + } + } + } + + dest = sxe2_lag_get_move_dest(cdev->rdma_pf_bitmap, pf_bitmap); + if (dest == SXE2_RDMA_INVALID_PF) { + LOG_WARN("lag get node move dest failed, old st %d new st %d.\n", + cdev->rdma_pf_bitmap, pf_bitmap); + goto l_end; + } + + cdev->rdma_pf_bitmap = pf_bitmap; + + if (lag->lag_wk.event == SXE2_LAG_EVENT_BONDINFO) { + sxe2_lag_aa_failover(lag, cdev, dest); + send = true; + } + + LOG_INFO_BDF("lag mode act-act: primary %d redundant %d proc end.\n", + SXE2_LAG_PF0, SXE2_LAG_PF1); +l_end: + return send; +} + +static bool sxe2_lag_is_configurable(struct sxe2_adapter *adapter) +{ + bool ret = true; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + if (lag->state[SXE2_LAG_PF0] != SXE2_LAG_STATE_READY || + lag->state[SXE2_LAG_PF1] != SXE2_LAG_STATE_READY) + ret = false; + + LOG_DEBUG_BDF("pf state available(%d) for config lag.\n", ret); + + return ret; +} + +static void sxe2_lag_del_devs(struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_adapter *primary_adapter; + struct sxe2_adapter *redundant_adapter; + struct aux_core_dev_info *cdev_info; + + LOG_DEBUG_BDF("lag pf %d in bond, need del rdma aux dev.\n", + adapter->pf_idx); + + primary_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + cdev_info = &primary_adapter->aux_ctxt.cdev_info; + redundant_adapter = sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_REDUNDANT); + + if (cdev_info->bond_mode == SXE2_LAG_MODE_ACTIVE_ACTIVE) + sxe2_lag_aa_reclaim_nodes(lag, cdev_info); + else + sxe2_lag_reclaim_nodes(lag); + + (void)sxe2_lag_del_prune_list(primary_adapter, redundant_adapter); + (void)sxe2_bond_single_rule_setup(redundant_adapter, false); + sxe2_lag_unlink(redundant_adapter); + + sxe2_lag_rdma_del_action(primary_adapter); + sxe2_lag_unlink(primary_adapter); + + if (lag->bond_id != -1) { + ida_simple_remove(&sxe2_lag_ida, lag->bond_id); + lag->bond_id = -1; + } +} + +static void sxe2_lag_add_devs(struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *adapter = lag->adapters[0]; + struct sxe2_lag_work *work = &lag->lag_wk; + struct sxe2_adapter *primary_adapter; + struct sxe2_adapter *redundant_adapter; + struct aux_core_dev_info *cdev_info; + + LOG_DEBUG_BDF("lag enter bond mode %d.\n", work->bond_mode); + + primary_adapter = lag->adapters[SXE2_LAG_PRIMARY_ID]; + lag->bond_id = ida_alloc(&sxe2_lag_ida, GFP_KERNEL); + lag->bond_mode = work->bond_mode; + + sxe2_lag_active_set(primary_adapter); + (void)sxe2_lag_rdma_create_fltr(primary_adapter); + + redundant_adapter = lag->adapters[SXE2_LAG_REDUNDANT_ID]; + (void)sxe2_lag_rdma_create_fltr(redundant_adapter); + (void)sxe2_lag_add_prune_list(primary_adapter, redundant_adapter); + + cdev_info = &primary_adapter->aux_ctxt.cdev_info; + if (lag->bond_mode == BOND_MODE_ACTIVEBACKUP) { + cdev_info->bond_mode = SXE2_LAG_MODE_ACTIVE_BACKUP; + (void)sxe2_lag_monitor_act_back(lag); + } else { + cdev_info->bond_mode = SXE2_LAG_MODE_ACTIVE_ACTIVE; + (void)sxe2_lag_monitor_act_act(lag); + } +} + +STATIC void sxe2_lag_aa_alloced_node_move_original(struct sxe2_lag_context *lag, + u8 dest, u8 user_pri) +{ + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF0, SXE2_LAG_PF1, user_pri, true, + SXE2_LAG_PF1); +} + +STATIC void sxe2_lag_aa_alloced_move_node(struct sxe2_lag_context *lag, u8 dest, + u8 user_pri) +{ + if (dest == SXE2_RDMA_PF0) + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, user_pri, + true, SXE2_LAG_PF1); + else if (dest == SXE2_RDMA_PF1) + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF0, SXE2_LAG_PF1, user_pri, + true, SXE2_LAG_PF0); +} + +STATIC void sxe2_lag_ab_alloced_move_node(struct sxe2_lag_context *lag, + u8 dest, u8 user_pri) +{ + if (dest == SXE2_LAG_PF0) + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF1, SXE2_LAG_PF0, user_pri, + false, SXE2_LAG_PF1); + else if (dest == SXE2_LAG_PF1) + (void)sxe2_lag_move_node(lag, SXE2_LAG_PF0, SXE2_LAG_PF1, user_pri, + false, SXE2_LAG_PF0); +} + +void sxe2_lag_alloced_node_move(struct aux_core_dev_info *cdev_info, u8 user_pri, + bool is_aa) +{ + struct sxe2_adapter *adapter = cdev_info->adapter; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + u8 dest; + u8 dflt_bitmap = SXE2_RDMA_BOTH_PF; + u8 active_pf = SXE2_RDMA_INVALID_PF; + + if (is_aa) { + sxe2_lag_aa_alloced_node_move_original(lag, SXE2_RDMA_PF1, user_pri); + dest = sxe2_lag_get_move_dest(dflt_bitmap, + cdev_info->rdma_pf_bitmap); + + if (cdev_info->rdma_pf_bitmap == SXE2_RDMA_INVALID_PF) + cdev_info->rdma_pf_bitmap = SXE2_RDMA_BOTH_PF; + + if (dest != SXE2_RDMA_INVALID_PF) { + sxe2_lag_aa_alloced_move_node(lag, dest, user_pri); + LOG_INFO("lag mode act-act: init st %d cdev st %d.\n", + dflt_bitmap, cdev_info->rdma_pf_bitmap); + } else { + LOG_INFO_BDF("lag mode act-act: no need move nodes.\n"); + } + + } else { + if (lag->lag_wk.info[0].slave_state == BOND_STATE_ACTIVE) + active_pf = lag->adapters[0]->pf_idx; + else if (lag->lag_wk.info[1].slave_state == BOND_STATE_ACTIVE) + active_pf = lag->adapters[1]->pf_idx; + + if (adapter->pf_idx == active_pf || + active_pf == SXE2_RDMA_INVALID_PF) { + LOG_INFO_BDF("lag mode act-back: no need move nodes.\n"); + } else { + sxe2_lag_ab_alloced_move_node(lag, active_pf, user_pri); + LOG_INFO_BDF("lag mode act-back: move nodes from %d to %d.\n", + adapter->pf_idx, active_pf); + } + } +} + +static s32 sxe2_lag_changeupper_proc(struct sxe2_lag_work *work) +{ + s32 ret = 0; + struct sxe2_lag_context *lag = work->lag; + struct sxe2_adapter *adapter = NULL; + struct sxe2_adapter *primary_adapter = NULL; + struct sxe2_adapter *tmp_adapter = NULL; + struct sxe2_adapter *bonded_adapters[SXE2_MAX_BOND_DEV_NUM] = {}; + struct aux_core_dev_info *cdev_info = NULL; + s32 i; + bool bonded; + + mutex_lock(&lag->lock); + adapter = lag->adapters[0]; + + LOG_INFO_BDF("lag changeupper_event_proc start.\n"); + + if (!test_and_clear_bit(SXE2_LAG_FLAGS_WK_PENDING, &lag->flags)) { + LOG_ERROR_BDF("lag: no work bit set.\n"); + mutex_unlock(&lag->lock); + goto l_end; + } + + set_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags); + + bonded = lag->lag_wk.is_bonded; + lag->bonded = bonded; + if (bonded) + sxe2_lag_add_devs(lag); + else + sxe2_lag_del_devs(lag); + + sxe2_lag_info_display(lag); + + work->state = SXE2_LAG_WK_ST_UNSET; + work->event = SXE2_LAG_EVENT_UNSET; + + primary_adapter =sxe2_lag_role_find(lag, SXE2_LAG_ADAPTER_TYPE_PRIMARY); + memcpy(bonded_adapters, lag->adapters, + sizeof(struct sxe2_adapter *) * SXE2_MAX_BOND_DEV_NUM); + + mutex_unlock(&lag->lock); + + if (bonded) { + for (i = SXE2_MAX_BOND_DEV_NUM; i > 0; i--) { + tmp_adapter = bonded_adapters[i - 1]; + cdev_info = &tmp_adapter->aux_ctxt.cdev_info; + sxe2_rdma_aux_delete(cdev_info); + } + + (void)sxe2_rdma_aux_add(primary_adapter); + } else { + cdev_info = &primary_adapter->aux_ctxt.cdev_info; + sxe2_rdma_aux_delete(cdev_info); + + cdev_info->bond_mode = SXE2_LAG_MODE_NONE; + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + tmp_adapter = bonded_adapters[i]; + (void)sxe2_rdma_aux_add(tmp_adapter); + } + } + +l_end: + mutex_lock(&lag->lock); + clear_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags); + mutex_unlock(&lag->lock); + LOG_INFO_BDF("lag changeupper_event_proc end.\n"); + + return ret; +} + +static s32 sxe2_lag_bondinfo_proc(struct sxe2_lag_work *work) +{ + s32 ret = 0; + bool send = false; + struct sxe2_lag_context *lag = work->lag; + struct sxe2_adapter *adapter; + unsigned int event; + + mutex_lock(&lag->lock); + adapter = lag->adapters[0]; + + LOG_INFO_BDF("lag bondinfo_event_proc start.\n"); + + if (!test_and_clear_bit(SXE2_LAG_FLAGS_WK_PENDING, &lag->flags)) { + LOG_ERROR_BDF("lag: no work bit set.\n"); + mutex_unlock(&lag->lock); + goto l_end; + } + + set_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags); + + lag->bond_mode = work->bond_mode; + if (lag->bond_mode == BOND_MODE_ACTIVEBACKUP) + send = sxe2_lag_monitor_act_back(lag); + else + send = sxe2_lag_monitor_act_act(lag); + + sxe2_lag_info_display(lag); + + event = lag->lag_wk.event; + work->state = SXE2_LAG_WK_ST_DONE; + work->event = SXE2_LAG_EVENT_UNSET; + + mutex_unlock(&lag->lock); + + if (event == SXE2_LAG_EVENT_BONDINFO && send) + (void)sxe2_rdma_aux_send_failover_event(adapter); + +l_end: + mutex_lock(&lag->lock); + clear_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags); + mutex_unlock(&lag->lock); + LOG_INFO_BDF("lag bondinfo_event_proc end.\n"); + + return ret; +} + +static void sxe2_lag_work_cb(struct work_struct *work) +{ + struct sxe2_lag_work *lag_work; + s32 ret = 0; + + lag_work = container_of(work, struct sxe2_lag_work, task); + LOG_INFO("lag work process event %d.\n", lag_work->event); + + if (lag_work->event == SXE2_LAG_EVENT_CHANGEUPPER) + ret = sxe2_lag_changeupper_proc(lag_work); + else if (lag_work->event == SXE2_LAG_EVENT_BONDINFO) + ret = sxe2_lag_bondinfo_proc(lag_work); + + if (ret) + LOG_WARN("lag work process failed, ret:%d.\n", ret); +} + +STATIC int sxe2_lag_netdev_idx_get(struct sxe2_lag_context *lag, + struct net_device *dev) +{ + int i; + int idx = -1; + struct sxe2_adapter *adapter; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + adapter = lag->adapters[i]; + if (adapter && adapter->vsi_ctxt.main_vsi->netdev == dev) { + idx = i; + LOG_DEBUG("find pf %d in bond.\n", adapter->pf_idx); + break; + } + } + + return idx; +} + +STATIC int +sxe2_lag_changeupper_event_fill(struct sxe2_adapter *adapter, + struct netdev_notifier_changeupper_info *info) +{ + int changed = 0; + struct net_device *upper = info->upper_dev; + struct net_device *ndev_tmp; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct slave *slave; + bool is_bonded; + bool is_in_lag; + bool mode_supported; + int bond_status = 0; + int num_slaves = 0; + int bond_mode = -1; + int idx = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) + { + idx = sxe2_lag_netdev_idx_get(lag, ndev_tmp); + if (idx >= 0) { + slave = bond_slave_get_rcu(ndev_tmp); + if (slave) + bond_mode = BOND_MODE(slave->bond); + bond_status |= (1 << idx); + } + + num_slaves++; + } + rcu_read_unlock(); + + if (!(bond_status & 0x3)) + goto l_end; + + is_in_lag = num_slaves == SXE2_MAX_BOND_DEV_NUM && bond_status == 0x3; + + if (bond_mode == BOND_MODE_ACTIVEBACKUP || bond_mode == BOND_MODE_XOR || + bond_mode == BOND_MODE_8023AD) { + lag->lag_wk.bond_mode = bond_mode; + mode_supported = true; + LOG_INFO_BDF("bond mode %d.\n", bond_mode); + } else { + mode_supported = false; + LOG_ERROR_BDF("unsupport bond mode %d.\n", bond_mode); + } + + is_bonded = is_in_lag && mode_supported; + if (lag->lag_wk.is_bonded != is_bonded) { + lag->lag_wk.is_bonded = is_bonded; + changed = 1; + } + +l_end: + if (changed) { + lag->lag_wk.event = SXE2_LAG_EVENT_CHANGEUPPER; + lag->lag_wk.state = SXE2_LAG_WK_ST_WAIT_PROC; + } + + LOG_INFO_BDF("changeupper syn proc done: changed %d\n", changed); + return changed; +} + +static int sxe2_lag_bondinfo_event_fill(struct sxe2_adapter *adapter, + struct netdev_notifier_bonding_info *info) +{ + int changed = 0; + struct net_device *event_netdev = info->info.dev; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct sxe2_lag_dev_info *linfo = NULL; + struct sxe2_lag_dev_info event_info = {}; + int idx = 0; + + idx = sxe2_lag_netdev_idx_get(lag, event_netdev); + if (idx < 0) + goto l_end; + + event_info.slave_state = info->bonding_info.slave.state; + event_info.slave_link = info->bonding_info.slave.link; + + lag->lag_wk.bond_mode = info->bonding_info.master.bond_mode; + linfo = &lag->lag_wk.info[idx]; + + if (memcmp(&event_info, linfo, sizeof(struct sxe2_lag_dev_info) == 0)) + goto l_end; + + linfo->slave_state = info->bonding_info.slave.state; + linfo->slave_link = info->bonding_info.slave.link; + + if (!lag->bonded) + goto l_end; + + changed = 1; + +l_end: + if (changed && lag->lag_wk.state != SXE2_LAG_WK_ST_WAIT_PROC) { + lag->lag_wk.event = SXE2_LAG_EVENT_BONDINFO; + lag->lag_wk.state = SXE2_LAG_WK_ST_WAIT_PROC; + } + + LOG_INFO_BDF("bondinfo syn proc done: changed %d\n", changed); + + return changed; +} + +STATIC void sxe2_lag_work_sched(struct sxe2_lag_context *lag) +{ + int i; + struct sxe2_adapter *adapter = NULL; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + adapter = lag->adapters[i]; + if (adapter) { + if (!sxe2_lag_is_configurable(adapter)) { + LOG_INFO_BDF("lag work sched: pf %d is busy.\n", + adapter->pf_idx); + return; + } + } else { + LOG_DEBUG_BDF("lag work sched: find idx adapter null.\n"); + return; + } + } + + set_bit(SXE2_LAG_FLAGS_WK_PENDING, &lag->flags); + if (test_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags)) { + LOG_INFO("lag: work is processing, queue work latter.\n"); + return; + } + (void)queue_work(lag->wkq, &lag->lag_wk.task); +} + +static bool sxe2_lag_safe_mode_check(struct sxe2_lag_context *lag) +{ + bool in_safe_mode = false; + int i; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + if (lag->adapters[i] && sxe2_is_safe_mode(lag->adapters[i])) { + in_safe_mode = true; + LOG_ERROR("pf %d in safe mode.\n", lag->adapters[i]->pf_idx); + break; + } + } + + return in_safe_mode; +} + +static bool sxe2_lag_sriov_check(struct sxe2_lag_context *lag) +{ + struct sxe2_adapter *adapter; + bool in_sriov = false; + int i; + + for (i = 0; i < SXE2_MAX_BOND_DEV_NUM; i++) { + adapter = lag->adapters[i]; + if (adapter) { + mutex_lock(&adapter->vf_ctxt.vfs_lock); + if (sxe2_vf_is_exist(adapter)) { + in_sriov = true; + LOG_DEV_ERR("pf %d in sriov.\n", + lag->adapters[i]->pf_idx); + } + mutex_unlock(&adapter->vf_ctxt.vfs_lock); + + if (in_sriov) + break; + } + } + + return in_sriov; +} + +static int sxe2_lag_event_process(struct sxe2_adapter *adapter, unsigned long event, + void *ptr) +{ + int ret = NOTIFY_DONE; + int changed = 0; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + if (!net_eq(dev_net(netdev), &init_net)) + goto l_end; + + if (sxe2_lag_safe_mode_check(adapter->lag_ctxt)) + goto l_end; + + if (sxe2_lag_sriov_check(adapter->lag_ctxt)) + goto l_end; + + if (!test_bit(SXE2_FLAG_RDMA_LOADED, adapter->flags)) + goto l_end; + + if (event == NETDEV_CHANGEUPPER) + changed = sxe2_lag_changeupper_event_fill(adapter, ptr); + else + changed = sxe2_lag_bondinfo_event_fill(adapter, ptr); + + if (changed) { + sxe2_lag_work_sched(adapter->lag_ctxt); + } +l_end: + LOG_INFO_BDF("lag work sched : event %lu changed %d.\n", event, changed); + + return ret; +} + +static bool sxe2_lag_event_check(struct sxe2_adapter *adapter, unsigned long event, + void *ptr) +{ + bool support = true; +#ifndef SXE2_CFG_RELEASE + struct netdev_notifier_bonding_info *binfo = ptr; +#endif + struct netdev_notifier_changeupper_info *cinfo = ptr; + + switch (event) { + case NETDEV_CHANGEUPPER: + LOG_INFO_BDF("lag check: changeupper event_netdev %s upper %s\n", + netdev_name(cinfo->info.dev), + netdev_name(cinfo->upper_dev)); + if (!netif_is_lag_master(cinfo->upper_dev)) { + support = false; + LOG_ERROR_BDF("lag link: upper %s is not lag master.\n", + netdev_name(cinfo->upper_dev)); + } + break; + case NETDEV_BONDING_INFO: + LOG_INFO_BDF("lag check: bondinfo event_netdev %s\n", + netdev_name(binfo->info.dev)); + break; + default: + LOG_INFO_BDF("lag check: ignore event %lu.\n", event); + support = false; + } + + return support; +} + +STATIC int sxe2_lag_netdev_event_handler(struct notifier_block *notif_blk, + unsigned long event, void *ptr) +{ + s32 ret = NOTIFY_DONE; + struct sxe2_lag_context *lag; + struct sxe2_adapter *adapter; + + lag = container_of(notif_blk, struct sxe2_lag_context, notif_block); + + mutex_lock(&lag->lock); + adapter = lag->adapters[0]; + + if (!sxe2_lag_event_check(adapter, event, ptr)) + goto l_end; + + switch (event) { + case NETDEV_CHANGEUPPER: + case NETDEV_BONDING_INFO: + (void)sxe2_lag_event_process(adapter, event, ptr); + break; + default: + LOG_DEBUG_BDF("lag ignore netdev notifier event %ld.\n", event); + break; + } + +l_end: + LOG_DEBUG_BDF("lag netdev event handler end.\n"); + mutex_unlock(&lag->lock); + return ret; +} + +STATIC struct sxe2_lag_context *sxe2_lag_ctxt_init(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = NULL; + struct sxe2_lag_list *entry; + size_t size; + + lag = sxe2_lag_alloc(); + if (!lag) { + LOG_ERROR_BDF("alloc lag failed.\n"); + goto l_end; + } + + lag->wkq = alloc_ordered_workqueue("%s-LAG-%s", __WQ_LEGACY | WQ_MEM_RECLAIM, + SXE2_DRV_NAME, adapter->serial_num); + if (!lag->wkq) { + LOG_ERROR_BDF("Failed to create LAG workqueue.\n"); + goto l_free_lag; + } + + mutex_init(&lag->lock); + + lag->bond_mode = SXE2_LAG_BOND_MODE_UNSET; + lag->ref_num = 0; + lag->bonded = false; + memcpy(lag->serial_num, adapter->serial_num, SXE2_SERIAL_NUM_LEN); + + lag->lag_wk.lag = lag; + lag->lag_wk.state = SXE2_LAG_WK_ST_UNSET; + INIT_WORK(&lag->lag_wk.task, sxe2_lag_work_cb); + + size = sizeof(struct sxe2_lag_list); + entry = kzalloc(size, GFP_KERNEL); + if (!entry) { + LOG_ERROR_BDF("alloc lag list node failed.\n"); + goto l_mutex_deinit; + } + + entry->lag = lag; + list_add(&entry->node, &sxe2_lag_head.node); + + LOG_INFO_BDF("lag init success lag 0x%p.\n", lag); + return lag; + +l_mutex_deinit: + mutex_destroy(&lag->lock); + destroy_workqueue(lag->wkq); + +l_free_lag: + kfree(lag); + lag = NULL; + +l_end: + return lag; +} + +STATIC void sxe2_lag_ctxt_deinit(struct sxe2_lag_context *lag) +{ + struct list_head *tmp; + struct list_head *n; + struct sxe2_lag_list *entry = NULL; + + cancel_work_sync(&lag->lag_wk.task); + + list_for_each_safe(tmp, n, &sxe2_lag_head.node) + { + entry = list_entry(tmp, struct sxe2_lag_list, node); + if (entry->lag == lag) { + list_del(&entry->node); + LOG_DEBUG("del entry of lag %p.\n", lag); + break; + } + + entry = NULL; + } + + if (!entry) { + LOG_ERROR("find lag %p entry failed.\n", lag); + goto l_end; + } + + destroy_workqueue(lag->wkq); + + mutex_destroy(&lag->lock); + + kfree(lag); + kfree(entry); + +l_end: + return; +} + +bool sxe2_lag_support(struct sxe2_adapter *adapter) +{ + int pf_cnt = adapter->aux_ctxt.cdev_info.pf_cnt; + bool supported = false; + + if (pf_cnt != SXE2_MAX_BOND_DEV_NUM || allow_repeat_sn) { + goto l_end; + } + + supported = true; +l_end: + + return supported; +} + +int sxe2_lag_init(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = NULL; + int ret = 0; + + LOG_INFO_BDF("lag init pf %d.\n", adapter->pf_idx); + + if (!sxe2_lag_support(adapter)) { + LOG_DEV_INFO("unsupport lag.\n"); + goto l_end; + } + sxe2_lag_list_lock(); + lag = sxe2_lag_context_find(adapter); + if (!lag) { + lag = sxe2_lag_ctxt_init(adapter); + if (!lag) { + ret = -ENOMEM; + LOG_DEV_ERR("lag init failed.\n"); + sxe2_lag_list_unlock(); + goto l_end; + } + } + + mutex_lock(&lag->lock); + if (lag->ref_num >= SXE2_MAX_BOND_DEV_NUM) { + LOG_DEV_ERR("netdev is arrive max %d.\n", SXE2_MAX_BOND_DEV_NUM); + ret = -EMLINK; + goto l_unlock; + } + + adapter->lag_ctxt = lag; + lag->adapters[adapter->pf_idx] = adapter; + lag->ref_num++; + + lag->state[adapter->pf_idx] = SXE2_LAG_STATE_READY; + + LOG_INFO_BDF("lag init success use pf %d lag 0x%p serial_num %s.\n", + adapter->pf_idx, lag, lag->serial_num); +l_unlock: + mutex_unlock(&lag->lock); + sxe2_lag_list_unlock(); + +l_end: + + if (lag && lag->ref_num == SXE2_MAX_BOND_DEV_NUM) { + if (ret == 0 && !lag->notif_block.notifier_call) { + lag->notif_block.notifier_call = + sxe2_lag_netdev_event_handler; + ret = register_netdevice_notifier(&lag->notif_block); + if (ret) { + lag->notif_block.notifier_call = NULL; + LOG_DEV_ERR("FAIL register netdev event handler!\n"); + return ret; + } + LOG_INFO_BDF("netdev event handler registered\n"); + } else { + LOG_DEV_ERR("SN: %s netdev event has already register, " + "maybe Serial Num is repeat ret: %d.\n", + adapter->serial_num, ret); + } + } + + return ret; +} + +void sxe2_lag_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct notifier_block *notif_blk; + + if (!sxe2_lag_support(adapter)) + goto l_end; + + if (!lag) { + LOG_ERROR_BDF("adapter already unlinkd lag.\n"); + goto l_end; + } + + sxe2_lag_list_lock(); + + if (lag->ref_num == SXE2_MAX_BOND_DEV_NUM) { + notif_blk = &lag->notif_block; + if (notif_blk->notifier_call) { + (void)unregister_netdevice_notifier(notif_blk); + LOG_INFO("LAG event handler unregistered\n"); + } + } + + mutex_lock(&lag->lock); + adapter->lag_ctxt = NULL; + lag->adapters[adapter->pf_idx] = NULL; + lag->ref_num--; + mutex_unlock(&lag->lock); + + if (lag->ref_num == 0) { + sxe2_lag_ctxt_deinit(lag); + LOG_INFO_BDF("lag ctxt deinit success.\n"); + } + + sxe2_lag_list_unlock(); + + LOG_INFO_BDF("lag deinit success.\n"); + +l_end: + return; +} + +static void sxe2_lag_enter_reset(struct sxe2_adapter *adapter, bool to_reset) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + if (adapter->pf_idx > SXE2_MAX_PF_NUM) + return; + + mutex_lock(&lag->lock); + if (to_reset) + lag->state[adapter->pf_idx] = SXE2_LAG_STATE_RESET; + else + lag->state[adapter->pf_idx] = SXE2_LAG_STATE_READY; + + LOG_INFO_BDF("lag pf %d stat enter %d\n", + adapter->pf_idx, lag->state[adapter->pf_idx]); + mutex_unlock(&lag->lock); +} + +static bool sxe2_lag_enter_ready(struct sxe2_adapter *adapter) +{ + bool ready = false; + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + mutex_lock(&lag->lock); + if (lag->state[SXE2_LAG_PF0] == SXE2_LAG_STATE_READY && + lag->state[SXE2_LAG_PF1] == SXE2_LAG_STATE_READY) + ready = true; + mutex_unlock(&lag->lock); + + return ready; +} + +void sxe2_lag_stop(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + struct aux_core_dev_info *cdev_info; + struct sxe2_adapter *primary_adapter; + + if (!sxe2_lag_support(adapter)) + goto l_end; + + LOG_DEBUG_BDF("lag stop proc start.\n"); + + cancel_work_sync(&lag->lag_wk.task); + + if (!sxe2_lag_enter_ready(adapter)) { + LOG_DEBUG_BDF("lag stop proc: already in reset.\n"); + goto l_end; + } + + sxe2_lag_enter_reset(adapter, true); + + mutex_lock(&lag->lock); + if (!lag->bonded) { + mutex_unlock(&lag->lock); + goto l_end; + } + + sxe2_lag_del_devs(lag); + primary_adapter = sxe2_lag_role_find(lag, SXE2_LAG_PRIMARY_ID); + mutex_unlock(&lag->lock); + + cdev_info = &primary_adapter->aux_ctxt.cdev_info; + sxe2_rdma_aux_delete(cdev_info); + +l_end: + LOG_DEBUG_BDF("lag stop proc end.\n"); +} + +void sxe2_lag_rebuild(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + if (!sxe2_lag_support(adapter)) + goto l_end; + + if (sxe2_is_safe_mode(adapter)) { + LOG_WARN_BDF("running in safe mode, lag does not need rebuild.\n"); + goto l_end; + } + + LOG_DEBUG_BDF("lag rebuild proc start.\n"); + + sxe2_lag_enter_reset(adapter, false); + + mutex_lock(&lag->lock); + if (!lag->bonded) { + mutex_unlock(&lag->lock); + goto l_end; + } + + LOG_DEBUG_BDF("lag pf %d in bond, need rebuild rdma aux dev.\n", + adapter->pf_idx); + + lag->lag_wk.event = SXE2_LAG_EVENT_CHANGEUPPER; + lag->lag_wk.state = SXE2_LAG_WK_ST_WAIT_PROC; + + sxe2_lag_work_sched(adapter->lag_ctxt); + mutex_unlock(&lag->lock); + +l_end: + + LOG_DEBUG_BDF("lag rebuild proc end.\n"); +} + +void sxe2_lag_proc(struct sxe2_adapter *adapter) +{ + struct sxe2_lag_context *lag = adapter->lag_ctxt; + + if (!sxe2_lag_support(adapter)) + goto l_end; + + mutex_lock(&lag->lock); + if (!test_bit(SXE2_LAG_FLAGS_WK_PROCESS, &lag->flags) && + test_bit(SXE2_LAG_FLAGS_WK_PENDING, &lag->flags)) { + (void)queue_work(lag->wkq, &lag->lag_wk.task); + LOG_INFO_BDF("lag: find pending event, queue work now.\n"); + } + + mutex_unlock(&lag->lock); + +l_end: + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.h new file mode 100644 index 0000000000000000000000000000000000000000..80a48e436a089ec404b35bc41de18c73a2f58961 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lag.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_lag.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_LAG_H__ +#define __SXE2_LAG_H__ +#include "sxe2_netdev.h" +#include "sxe2_drv_aux.h" +#include + +#define SXE2_LAG_PF0 0 +#define SXE2_LAG_PF1 1 + +#define SXE2_MAX_PF_NUM 4 +#define SXE2_MAX_BOND_DEV_NUM 2 + +#define SXE2_LAG_EVENT_UNSET 0 +#define SXE2_LAG_EVENT_CHANGEUPPER BIT(0) +#define SXE2_LAG_EVENT_BONDINFO BIT(1) + +#define SXE2_LAG_MODE_NONE 0 +#define SXE2_LAG_MODE_ACTIVE_BACKUP 1 +#define SXE2_LAG_MODE_ACTIVE_ACTIVE 2 + +#define sxe2_for_each_user_prio(i) \ + for ((i) = 0; (i) < SXE2_MAX_USER_PRIORITY; (i)++) + +enum sxe2_lag_flags { + SXE2_LAG_FLAGS_WK_PENDING, + SXE2_LAG_FLAGS_WK_PROCESS, + SXE2_LAG_FLAGS_NBITS +}; + +enum sxe2_lag_adapter_type { + SXE2_LAG_ADAPTER_TYPE_PRIMARY, + SXE2_LAG_ADAPTER_TYPE_REDUNDANT, + SXE2_LAG_ADAPTER_TYPE_ACTIVE, +}; + +enum sxe2_lag_work_state { + SXE2_LAG_WK_ST_UNSET, + SXE2_LAG_WK_ST_WAIT_PROC, + SXE2_LAG_WK_ST_DONE, +}; + +enum sxe2_lag_state { + SXE2_LAG_STATE_UNINIT, + SXE2_LAG_STATE_READY, + SXE2_LAG_STATE_RESET, +}; + +struct sxe2_lag_list { + struct list_head node; + struct sxe2_lag_context *lag; +}; + +struct sxe2_lag_dev_info { + int slave_state; + int slave_link; +}; + +struct sxe2_lag_work { + struct work_struct task; + struct sxe2_lag_context *lag; + enum sxe2_lag_work_state state; + struct sxe2_lag_dev_info + info[SXE2_MAX_BOND_DEV_NUM]; + int bond_mode; + unsigned int event; + unsigned long is_bonded : 1; +}; + +struct sxe2_lag_context { + struct sxe2_adapter *adapters + [SXE2_MAX_BOND_DEV_NUM]; + struct aux_rdma_qset_params + rdma_qset[SXE2_MAX_USER_PRIORITY]; + struct aux_rdma_multi_qset_params + rdma_qsets[SXE2_MAX_USER_PRIORITY]; + struct notifier_block notif_block; + struct mutex lock; + struct workqueue_struct *wkq; + struct sxe2_lag_work lag_wk; + + unsigned long flags; + + u8 serial_num[SXE2_SERIAL_NUM_LEN]; + int active_id; + int ref_num; + int bond_mode; + int bond_id; + bool bonded; + enum sxe2_lag_state state[SXE2_MAX_BOND_DEV_NUM]; +}; + +bool sxe2_lag_support(struct sxe2_adapter *adapter); + +int sxe2_lag_init(struct sxe2_adapter *adapter); + +void sxe2_lag_deinit(struct sxe2_adapter *adapter); + +void sxe2_lag_init_once(void); + +void sxe2_lag_alloced_node_move(struct aux_core_dev_info *cdev_info, + u8 user_pri, bool is_aa); + +void sxe2_lag_deinit_once(void); + +void sxe2_lag_aa_failover(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 dest); +void sxe2_lag_aa_reclaim_node(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 user_pri); + +void sxe2_lag_ab_reclaim_node(struct sxe2_lag_context *lag, + struct aux_core_dev_info *cdev, u8 user_pri); + +bool sxe2_lag_is_bonded(struct sxe2_adapter *adapter); + +void sxe2_lag_stop(struct sxe2_adapter *adapter); + +void sxe2_lag_rebuild(struct sxe2_adapter *adapter); + +void sxe2_lag_proc(struct sxe2_adapter *adapter); + +struct sxe2_adapter *sxe2_lag_role_find(struct sxe2_lag_context *lag, + int role); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.c new file mode 100644 index 0000000000000000000000000000000000000000..2dedfc8f3d2051fba688fdea60099f03248b0cb7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_linkchg.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_linkchg.h" +#include "sxe2_event.h" +#include "sxe2_common.h" +#include "sxe2_ethtool.h" +#include "sxe2_monitor.h" + +#define CGMAC_LINKCHG_INFO(i) (0x002B4000 + (i) * 0x4000) +s32 sxe2_white_list_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len) +{ + enum sxe2_drv_event_code *whilt_ptr = (enum sxe2_drv_event_code *)buf; + enum sxe2_drv_event_code whilt_list = *whilt_ptr; + + if (whilt_list == SXE2_EVENT_CODE_SFP_WHITE_LIST) { + LOG_DEV_WARN("an unsupport optical module type was detected\n"); + LOG_DEV_WARN("refer to the sxe2 ethernet adapters and devices usr guide " + "for a list of supported modules\n"); + } + + return 0; +} + +s32 sxe2_tx_fault_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len) +{ + enum sxe2_drv_event_code *tx_fault_ptr = (enum sxe2_drv_event_code *)buf; + enum sxe2_drv_event_code tx_fault = *tx_fault_ptr; + + if (tx_fault == SXE2_EVENT_CODE_SFP_TX_FAULT) + LOG_INFO_BDF("Optical Mod is happened tx fault!!"); + + return 0; +} + +s32 sxe2_tx_fault_event_count_mib(struct sxe2_adapter *adapter, void *buf, + u32 buf_len) +{ + struct sxe2_tx_fault_count_mib *tx_fault_count = (struct sxe2_tx_fault_count_mib *)buf; + + LOG_DEV_INFO("Optical Mod is happened tx faultcount: %llu", + tx_fault_count->tx_fault_count); + + return 0; +} + +void sxe2_link_get_info_config(struct sxe2_adapter *adapter, u8 *link_state, + u32 *link_speed) +{ + *link_state = sxe2_get_pf_link_status(adapter); + if (*link_state == SXE2_LINK_DOWN) + *link_speed = SXE2_LINK_SPEED_UNKNOWN; + else + *link_speed = sxe2_get_link_speed(adapter); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.h new file mode 100644 index 0000000000000000000000000000000000000000..9043f083cedc6e2f06d495320145f7ed1cd9474f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_linkchg.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_linkchg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_LINKCHG_H__ +#define __SXE2_LINKCHG_H__ +#include +#include +#include "sxe2_cmd.h" +#include "sxe2_drv_aux.h" + +s32 sxe2_white_list_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +s32 sxe2_tx_fault_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +s32 sxe2_tx_fault_event_count_mib(struct sxe2_adapter *adapter, void *buf, u32 buf_len); + +void sxe2_link_get_info_config(struct sxe2_adapter *adapter, u8 *link_state, u32 *link_speed); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp.h new file mode 100644 index 0000000000000000000000000000000000000000..332657080d741999182ffadd959f12daf3f5fa68 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_lldp.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_LLDP_H__ +#define __SXE2_LLDP_H__ + +#include "sxe2_cmd.h" +#include "sxe2_log.h" + +static inline void sxe2_lldp_fw_stats(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_lldp_stats lldp_stats = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_FW_STATS, NULL, 0, + &lldp_stats, sizeof(lldp_stats)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("lldp fw state cmd fail, ret=%d\n", ret); + return; + } + LOG_DEV_INFO("\t lldp fw stats\n"); + LOG_DEV_INFO("\t\t lldp_enable=%d\n", lldp_stats.lldp_enable); + LOG_DEV_INFO("\t\t admin_status=%d\n", lldp_stats.admin_status); + LOG_DEV_INFO("\t\t rx_state=%d\n", lldp_stats.rx_state); + LOG_DEV_INFO("\t\t tx_state=%d\n", lldp_stats.tx_state); + + LOG_DEV_INFO("\t\t tx_failed=%d\n", lldp_stats.tx_failed); + LOG_DEV_INFO("\t\t tx_frames_out_total=%d\n", lldp_stats.tx_frames_out_total); + LOG_DEV_INFO("\t\t tx_lldpdu_length_errors=%d\n", lldp_stats.tx_lldpdu_length_errors); + LOG_DEV_INFO("\t\t rx_ageouts_total=%d\n", lldp_stats.rx_ageouts_total); + LOG_DEV_INFO("\t\t rx_frames_discarded_total=%d\n", lldp_stats.rx_frames_discarded_total); + LOG_DEV_INFO("\t\t rx_frames_in_errors_total=%d\n", lldp_stats.rx_frames_in_errors_total); + LOG_DEV_INFO("\t\t rx_frames_in_total=%d\n", lldp_stats.rx_frames_in_total); + LOG_DEV_INFO("\t\t rx_tlvs_discarded_total=%d\n", lldp_stats.rx_tlvs_discarded_total); + LOG_DEV_INFO("\t\t rx_tlvs_unrecognized_total=%d\n", lldp_stats.rx_tlvs_unrecognized_total); +} + +static inline void sxe2_lldp_remote_mibs_dump(struct sxe2_adapter *adapter) +{ + s32 ret, i, j; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_lldp_mibs_info info = { 0 }; + struct sxe2_fwc_lldp_mibs_dump_req dump_req; + struct sxe2_fwc_lldp_mibs_dump_resp *dump_resp; + + dump_resp = kzalloc(sizeof(*dump_resp), GFP_KERNEL); + if (!dump_resp) { + LOG_DEV_ERR("low memory\n"); + return; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_REMOTE_MIBS_INFO, NULL, 0, + &info, sizeof(info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("lldp remote mibs dump cmd fail, ret=%d\n", ret); + goto l_end; + } + LOG_DEV_INFO("\t lldp remote mibs dump_resp\n"); + LOG_DEV_INFO("\t\t lldp remote mibs count %d\n", info.count); + + for (i = 0; i < info.count; i++) { + dump_req.index = (u8)i; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_REMOTE_MIBS_DUMP, + &dump_req, sizeof(dump_req), + dump_resp, sizeof(*dump_resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("lldp remote mibs dump, ret=%d\n", ret); + goto l_end; + } + LOG_DEV_INFO("\t\t lldp remote mib[%d]\n", i); + print_hex_dump(KERN_INFO, "\t\t content: ", DUMP_PREFIX_OFFSET, 16, 1, + dump_resp->buffer, dump_resp->size, true); + memset(dump_resp->buffer, 0, dump_resp->size); + LOG_DEV_INFO("\t\t ets cfg parse:\n"); + LOG_DEV_INFO("\t\t ets cfg willing %d\n", dump_resp->ets_cfg.willing); + LOG_DEV_INFO("\t\t ets cfg cbs %d\n", dump_resp->ets_cfg.cbs); + LOG_DEV_INFO("\t\t ets cfg maxtcs %d\n", dump_resp->ets_cfg.maxtcs); + LOG_DEV_INFO("\t\t ets cfg prioTable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_cfg.prioTable[0], dump_resp->ets_cfg.prioTable[1], + dump_resp->ets_cfg.prioTable[2], dump_resp->ets_cfg.prioTable[3], + dump_resp->ets_cfg.prioTable[4], dump_resp->ets_cfg.prioTable[5], + dump_resp->ets_cfg.prioTable[6], dump_resp->ets_cfg.prioTable[7]); + LOG_DEV_INFO("\t\t ets cfg tcbwtable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_cfg.tcbwtable[0], dump_resp->ets_cfg.tcbwtable[1], + dump_resp->ets_cfg.tcbwtable[2], dump_resp->ets_cfg.tcbwtable[3], + dump_resp->ets_cfg.tcbwtable[4], dump_resp->ets_cfg.tcbwtable[5], + dump_resp->ets_cfg.tcbwtable[6], dump_resp->ets_cfg.tcbwtable[7]); + LOG_DEV_INFO("\t\t ets cfg tsatable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_cfg.tsatable[0], dump_resp->ets_cfg.tsatable[1], + dump_resp->ets_cfg.tsatable[2], dump_resp->ets_cfg.tsatable[3], + dump_resp->ets_cfg.tsatable[4], dump_resp->ets_cfg.tsatable[5], + dump_resp->ets_cfg.tsatable[6], dump_resp->ets_cfg.tsatable[7]); + LOG_DEV_INFO("\t\t ets rec parse:\n"); + LOG_DEV_INFO("\t\t ets rec prioTable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_rec.prioTable[0], dump_resp->ets_rec.prioTable[1], + dump_resp->ets_rec.prioTable[2], dump_resp->ets_rec.prioTable[3], + dump_resp->ets_rec.prioTable[4], dump_resp->ets_rec.prioTable[5], + dump_resp->ets_rec.prioTable[6], dump_resp->ets_rec.prioTable[7]); + LOG_DEV_INFO("\t\t ets rec tcbwtable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_rec.tcbwtable[0], dump_resp->ets_rec.tcbwtable[1], + dump_resp->ets_rec.tcbwtable[2], dump_resp->ets_rec.tcbwtable[3], + dump_resp->ets_rec.tcbwtable[4], dump_resp->ets_rec.tcbwtable[5], + dump_resp->ets_rec.tcbwtable[6], dump_resp->ets_rec.tcbwtable[7]); + LOG_DEV_INFO("\t\t ets rec tsatable 0:%d,1:%d,2:%d,3:%d,4:%d,5:%d,6:%d,7:%d\n", + dump_resp->ets_rec.tsatable[0], dump_resp->ets_rec.tsatable[1], + dump_resp->ets_rec.tsatable[2], dump_resp->ets_rec.tsatable[3], + dump_resp->ets_rec.tsatable[4], dump_resp->ets_rec.tsatable[5], + dump_resp->ets_rec.tsatable[6], dump_resp->ets_rec.tsatable[7]); + LOG_DEV_INFO("\t\t pfc cfg parse:\n"); + LOG_DEV_INFO("\t\t pfc cfg willing %d\n", dump_resp->pfc_cfg.willing); + LOG_DEV_INFO("\t\t pfc cfg mbc %d\n", dump_resp->pfc_cfg.mbc); + LOG_DEV_INFO("\t\t pfc cfg pfccap 0x%02x\n", dump_resp->pfc_cfg.pfccap); + LOG_DEV_INFO("\t\t pfc cfg pfcena 0x%02x\n", dump_resp->pfc_cfg.pfcena); + + LOG_DEV_INFO("\t\t app cfg parse:\n"); + LOG_DEV_INFO("\t\t app cfg numapps %d\n", dump_resp->num_apps); + for (j = 0; j < dump_resp->num_apps; j++) { + LOG_DEV_INFO("\t\t app cfg app[%d] %d,%d,%d\n", j, + dump_resp->app_cfg[j].priority, dump_resp->app_cfg[j].selector, + dump_resp->app_cfg[j].protId); + } + } +l_end: + kfree(dump_resp); +} + +#ifdef SXE2_CFG_DEBUG +static inline void sxe2_lldp_dcbx_agent_on(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_fw_agent req = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, &req, sizeof(req), + NULL, 0); + + req.enable = 1; + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_DEV_ERR("lldp fw state cmd fail, ret=%d\n", ret); +} + +static inline void sxe2_lldp_dcbx_agent_off(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_fw_agent req = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, &req, sizeof(req), + NULL, 0); + + req.enable = 0; + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_DEV_ERR("lldp fw state cmd fail, ret=%d\n", ret); +} + +static inline void sxe2_lldp_dcbx_agent_is_on(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_fw_agent resp = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_DCBX_FW_AGENT_GET, NULL, 0, + &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_DEV_ERR("lldp fw state cmd fail, ret=%d\n", ret); + + LOG_DEV_INFO("\t lldp fw stats\n"); + LOG_DEV_INFO("\t\t lldp dcbx agent is %s\n", resp.enable ? "on" : "off"); +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.c new file mode 100644 index 0000000000000000000000000000000000000000..7d701b14dbe191b450972ac6268585ac47729c68 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.c @@ -0,0 +1,646 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_lldp_tlv.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_dcb.h" +#include "sxe2_dcb_nl.h" +#include "sxe2_lldp_tlv.h" +#include + +static void sxe2_ieee_ets_common_tlv_add(u8 *buf, struct sxe2_dcb_ets_cfg *ets_cfg) +{ + u32 i; + u8 offset = 0; + u8 priority0, priority1; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS / 2; i++) { + priority0 = ets_cfg->prio_tbl[i * 2] & 0xF; + priority1 = ets_cfg->prio_tbl[i * 2 + 1] & 0xF; + buf[offset] = (u8)(priority0 << SXE2_IEEE_ETS_PRIO_1_S) | priority1; + offset++; + } + + sxe2_for_each_tc(i) { + buf[offset] = ets_cfg->tcbw_tbl[i]; + buf[IEEE_8021QAZ_MAX_TCS + offset] = ets_cfg->tsa_tbl[i]; + offset++; + } +} + +static void sxe2_ieee_ets_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 typelen; + u32 ouisubtype; + u8 maxtcwilling = 0; + u8 *buf = tlv->tlvinfo; + struct sxe2_dcb_ets_cfg *etscfg; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((SXE2_IEEE_8021QAZ_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_IEEE_SUBTYPE_ETS_CFG); + + tlv->ouisubtype = htonl(ouisubtype); + + etscfg = &dcbcfg->ets; + + if (etscfg->willing) + maxtcwilling = BIT(SXE2_IEEE_ETS_WILLING_S); + maxtcwilling |= etscfg->maxtcs & SXE2_IEEE_ETS_MAXTC_M; + buf[0] = maxtcwilling; + + sxe2_ieee_ets_common_tlv_add(&buf[1], etscfg); +} + +static void sxe2_ieee_etsrec_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 typelen; + u32 ouisubtype; + u8 *buf = tlv->tlvinfo; + struct sxe2_dcb_ets_cfg *etsrec; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((SXE2_IEEE_8021QAZ_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = htonl(ouisubtype); + + etsrec = &dcbcfg->etsrec; + + sxe2_ieee_ets_common_tlv_add(&buf[1], etsrec); +} + +static void sxe2_ieee_pfc_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_IEEE_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((SXE2_IEEE_8021QAZ_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + buf[0] = 0; + buf[1] = 0; + if (dcbcfg->pfc.willing) + buf[0] = BIT(SXE2_IEEE_PFC_WILLING_S); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(SXE2_IEEE_PFC_MBC_S); + + buf[0] |= (u8)(dcbcfg->pfc.cap & 0xF); + buf[1] = dcbcfg->pfc.enable; +} + +static void sxe2_ieee_app_pri_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 typelen, len, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + if (dcbcfg->numapps == 0) + return; + ouisubtype = ((SXE2_IEEE_8021QAZ_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = htonl(ouisubtype); + + offset++; + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].prio & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (u8)(priority << SXE2_IEEE_APP_PRIO_S) | selector; + buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; + + offset += 3; + i++; + if (i >= SXE2_DCBX_MAX_APPS) + break; + } + + len = sizeof(tlv->ouisubtype) + 1 + (i * 3); + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | (len & 0x1FF)); + tlv->typelen = htons(typelen); +} + +static void sxe2_dscp_up_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + int i; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_DSCP_UP_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((SXE2_DSCP_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_DSCP_SUBTYPE_DSCP2UP); + tlv->ouisubtype = htonl(ouisubtype); + + for (i = 0; i < SXE2_DSCP_NUM_VAL; i++) { + buf[i] = dcbcfg->dscp_map[i]; + buf[i + SXE2_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i]; + } + + buf[i] = 0; + + buf[i + SXE2_DSCP_IPV6_OFFSET] = 0; +} + +#define SXE2_BYTES_PER_TC 8 +static void sxe2_dscp_enf_tlv_add(struct sxe2_lldp_org_tlv *tlv) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_DSCP_ENF_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((SXE2_DSCP_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_DSCP_SUBTYPE_ENFORCE); + tlv->ouisubtype = htonl(ouisubtype); + + (void)memset(buf, 0, 2 * (IEEE_8021QAZ_MAX_TCS * SXE2_BYTES_PER_TC)); +} + +static void sxe2_dscp_tc_bw_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u32 i; + u16 typelen; + u8 offset = 0; + u32 ouisubtype; + u8 *buf = tlv->tlvinfo; + struct sxe2_dcb_ets_cfg *etscfg; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_DSCP_TC_BW_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((SXE2_DSCP_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_DSCP_SUBTYPE_TCBW); + tlv->ouisubtype = htonl(ouisubtype); + + etscfg = &dcbcfg->ets; + buf[0] = etscfg->maxtcs & SXE2_IEEE_ETS_MAXTC_M; + + offset = 5; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + buf[offset] = etscfg->tcbw_tbl[i]; + buf[offset + IEEE_8021QAZ_MAX_TCS] = etscfg->tsa_tbl[i]; + buf[offset + IEEE_8021QAZ_MAX_TCS * 2] = etscfg->prio_tbl[i]; + offset++; + } +} + +static void sxe2_dscp_pfc_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 typelen; + u32 ouisubtype; + u8 *buf = tlv->tlvinfo; + + typelen = ((SXE2_TLV_TYPE_ORG << SXE2_LLDP_TLV_TYPE_S) | + SXE2_DSCP_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((SXE2_DSCP_OUI << SXE2_LLDP_TLV_OUI_S) | + SXE2_DSCP_SUBTYPE_PFC); + tlv->ouisubtype = htonl(ouisubtype); + + buf[0] = dcbcfg->pfc.cap & 0xF; + buf[1] = dcbcfg->pfc.enable; +} + +STATIC void sxe2_dcb_tlv_add(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg, u16 tlvid) +{ + if (dcbcfg->qos_mode == SXE2_QOS_MODE_VLAN) { + switch (tlvid) { + case SXE2_IEEE_TLV_ID_ETS_CFG: + sxe2_ieee_ets_tlv_add(tlv, dcbcfg); + break; + case SXE2_IEEE_TLV_ID_ETS_REC: + sxe2_ieee_etsrec_tlv_add(tlv, dcbcfg); + break; + case SXE2_IEEE_TLV_ID_PFC_CFG: + sxe2_ieee_pfc_tlv_add(tlv, dcbcfg); + break; + case SXE2_IEEE_TLV_ID_APP_PRI: + sxe2_ieee_app_pri_tlv_add(tlv, dcbcfg); + break; + default: + break; + } + } else { + switch (tlvid) { + case SXE2_TLV_ID_DSCP_UP: + sxe2_dscp_up_tlv_add(tlv, dcbcfg); + break; + case SXE2_TLV_ID_DSCP_ENF: + sxe2_dscp_enf_tlv_add(tlv); + break; + case SXE2_TLV_ID_DSCP_TC_BW: + sxe2_dscp_tc_bw_tlv_add(tlv, dcbcfg); + break; + case SXE2_TLV_ID_DSCP_TO_PFC: + sxe2_dscp_pfc_tlv_add(tlv, dcbcfg); + break; + default: + break; + } + } +} + +void sxe2_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 typelen; + struct sxe2_lldp_org_tlv *tlv; + u16 len, offset = 0, tlvid = SXE2_TLV_ID_START; + + tlv = (struct sxe2_lldp_org_tlv *)lldpmib; + while (1) { + sxe2_dcb_tlv_add(tlv, dcbcfg, tlvid++); + + typelen = ntohs(tlv->typelen); + len = (typelen & SXE2_LLDP_TLV_LEN_M) >> SXE2_LLDP_TLV_LEN_S; + if (len) + offset += len + SXE2_TLV_HEADER_LEN; + + if (tlvid >= SXE2_TLV_ID_END_OF_LLDPPDU || offset > SXE2_LLDPDU_SIZE) + break; + + if (len) + tlv = (struct sxe2_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelen) + len); + } + + *miblen = offset; +} + +static void sxe2_ieee_ets_common_tlv_parse(u8 *buf, struct sxe2_dcb_ets_cfg *ets_cfg) +{ + u8 offset = 0; + int i; + + for (i = 0; i < 4; i++) { + ets_cfg->prio_tbl[i * 2] = + FIELD_GET(SXE2_IEEE_ETS_PRIO_1_M, buf[offset]); + ets_cfg->prio_tbl[i * 2 + 1] = + FIELD_GET(SXE2_IEEE_ETS_PRIO_0_M, buf[offset]); + offset++; + } + + sxe2_for_each_tc(i) { + ets_cfg->tcbw_tbl[i] = buf[offset]; + ets_cfg->tsa_tbl[i] = buf[IEEE_8021QAZ_MAX_TCS + offset++]; + } +} + +static void sxe2_ieee_etscfg_tlv_parse(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + struct sxe2_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + + etscfg = &dcbcfg->ets; + etscfg->willing = FIELD_GET((u32)SXE2_IEEE_ETS_WILLING_M, buf[0]); + etscfg->cbs = FIELD_GET((u32)SXE2_IEEE_ETS_CBS_M, buf[0]); + etscfg->maxtcs = FIELD_GET(SXE2_IEEE_ETS_MAXTC_M, buf[0]); + + sxe2_ieee_ets_common_tlv_parse(&buf[1], etscfg); +} + +static void sxe2_ieee_etsrec_tlv_parse(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + sxe2_ieee_ets_common_tlv_parse(&buf[1], &dcbcfg->etsrec); +} + +static void sxe2_ieee_pfccfg_tlv_parse(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + dcbcfg->pfc.willing = FIELD_GET((u32)SXE2_IEEE_PFC_WILLING_M, buf[0]); + dcbcfg->pfc.mbc = FIELD_GET((u32)SXE2_IEEE_PFC_MBC_M, buf[0]); + dcbcfg->pfc.cap = FIELD_GET(SXE2_IEEE_PFC_CAP_M, buf[0]); + dcbcfg->pfc.enable = buf[1]; +} + +static void sxe2_ieee_app_tlv_parse(struct sxe2_lldp_org_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 offset = 0; + u16 typelen; + u32 i = 0; + u16 len; + u8 *buf; + + typelen = ntohs(tlv->typelen); + len = FIELD_GET(SXE2_LLDP_TLV_LEN_M, typelen); + buf = tlv->tlvinfo; + + len -= (sizeof(tlv->ouisubtype) + 1); + + offset++; + + while (offset < len) { + dcbcfg->app[i].prio = FIELD_GET(SXE2_IEEE_APP_PRIO_M, buf[offset]); + dcbcfg->app[i].selector = FIELD_GET(SXE2_IEEE_APP_SEL_M, buf[offset]); + dcbcfg->app[i].prot_id = (u16)((buf[offset + 1] << 0x8) | buf[offset + 2]); + offset += 3; + i++; + if (i >= SXE2_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +STATIC void sxe2_ieee_tlv_parse(struct sxe2_lldp_org_tlv *tlv, struct sxe2_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)FIELD_GET(SXE2_LLDP_TLV_SUBTYPE_M, ouisubtype); + switch (subtype) { + case SXE2_IEEE_SUBTYPE_ETS_CFG: + sxe2_ieee_etscfg_tlv_parse(tlv, dcbcfg); + break; + case SXE2_IEEE_SUBTYPE_ETS_REC: + sxe2_ieee_etsrec_tlv_parse(tlv, dcbcfg); + break; + case SXE2_IEEE_SUBTYPE_PFC_CFG: + sxe2_ieee_pfccfg_tlv_parse(tlv, dcbcfg); + break; + case SXE2_IEEE_SUBTYPE_APP_PRI: + sxe2_ieee_app_tlv_parse(tlv, dcbcfg); + break; + default: + break; + } +} + +STATIC void sxe2_cee_pgcfg_tlv_parse(struct sxe2_cee_feat_tlv *tlv, struct sxe2_dcbx_cfg *dcbcfg) +{ + struct sxe2_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + int i; + + etscfg = &dcbcfg->ets; + + if (tlv->en_will_err & SXE2_CEE_FEAT_TLV_WILLING_M) + etscfg->willing = 1; + + etscfg->cbs = 0; + for (i = 0; i < 4; i++) { + etscfg->prio_tbl[i * 2] = + FIELD_GET(SXE2_CEE_PGID_PRIO_1_M, buf[offset]); + etscfg->prio_tbl[i * 2 + 1] = + FIELD_GET(SXE2_CEE_PGID_PRIO_0_M, buf[offset]); + offset++; + } + + sxe2_for_each_tc(i) { + etscfg->tcbw_tbl[i] = buf[offset++]; + + if (etscfg->prio_tbl[i] == SXE2_CEE_PGID_STRICT) + dcbcfg->ets.tsa_tbl[i] = SXE2_IEEE_TSA_STRICT; + else + dcbcfg->ets.tsa_tbl[i] = SXE2_IEEE_TSA_ETS; + } + + etscfg->maxtcs = buf[offset]; +} + +STATIC void sxe2_cee_pfccfg_tlv_parse(struct sxe2_cee_feat_tlv *tlv, + struct sxe2_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & SXE2_CEE_FEAT_TLV_WILLING_M) + dcbcfg->pfc.willing = 1; + + dcbcfg->pfc.enable = buf[0]; + dcbcfg->pfc.cap = buf[1]; +} + +STATIC void sxe2_cee_app_tlv_parse(struct sxe2_cee_feat_tlv *tlv, struct sxe2_dcbx_cfg *dcbcfg) +{ + u16 len, typelen, offset = 0; + struct sxe2_cee_app_prio *app; + u8 i; + + typelen = ntohs(tlv->hdr.typelen); + len = FIELD_GET(SXE2_LLDP_TLV_LEN_M, typelen); + + dcbcfg->numapps = len / sizeof(*app); + if (!dcbcfg->numapps) + return; + if (dcbcfg->numapps > SXE2_DCBX_MAX_APPS) + dcbcfg->numapps = SXE2_DCBX_MAX_APPS; + + for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + + app = (struct sxe2_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < SXE2_MAX_USER_PRIORITY; up++) + if (app->prio_map & BIT(up)) + break; + + dcbcfg->app[i].prio = up; + + selector = (app->upper_oui_sel & SXE2_CEE_APP_SELECTOR_M); + switch (selector) { + case SXE2_CEE_APP_SEL_ETHTYPE: + dcbcfg->app[i].selector = SXE2_APP_SEL_ETHTYPE; + break; + case SXE2_CEE_APP_SEL_TCPIP: + dcbcfg->app[i].selector = SXE2_APP_SEL_TCPIP; + break; + default: + dcbcfg->app[i].selector = selector; + } + + dcbcfg->app[i].prot_id = ntohs(app->protocol); + offset += sizeof(*app); + } +} + +STATIC void sxe2_cee_tlv_parse(struct sxe2_lldp_org_tlv *tlv, struct sxe2_dcbx_cfg *dcbcfg) +{ + struct sxe2_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u16 len, tlvlen, typelen; + u32 ouisubtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)FIELD_GET(SXE2_LLDP_TLV_SUBTYPE_M, ouisubtype); + if (subtype != SXE2_CEE_DCBX_TYPE) + return; + + typelen = ntohs(tlv->typelen); + tlvlen = FIELD_GET(SXE2_LLDP_TLV_LEN_M, typelen); + len = sizeof(tlv->typelen) + sizeof(ouisubtype) + + sizeof(struct sxe2_cee_ctrl_tlv); + if (tlvlen <= len) + return; + + sub_tlv = (struct sxe2_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < SXE2_CEE_MAX_FEAT_TYPE) { + u16 sublen; + + typelen = ntohs(sub_tlv->hdr.typelen); + sublen = FIELD_GET(SXE2_LLDP_TLV_LEN_M, typelen); + subtype = FIELD_GET(SXE2_LLDP_TLV_TYPE_M, typelen); + switch (subtype) { + case SXE2_CEE_SUBTYPE_PG_CFG: + sxe2_cee_pgcfg_tlv_parse(sub_tlv, dcbcfg); + break; + case SXE2_CEE_SUBTYPE_PFC_CFG: + sxe2_cee_pfccfg_tlv_parse(sub_tlv, dcbcfg); + break; + case SXE2_CEE_SUBTYPE_APP_PRI: + sxe2_cee_app_tlv_parse(sub_tlv, dcbcfg); + break; + default: + return; + } + feat_tlv_count++; + sub_tlv = (struct sxe2_cee_feat_tlv *) + ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +static void sxe2_parse_org_tlv(struct sxe2_lldp_org_tlv *tlv, struct sxe2_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = ntohl(tlv->ouisubtype); + oui = FIELD_GET((u32)SXE2_LLDP_TLV_OUI_M, ouisubtype); + + switch (oui) { + case SXE2_IEEE_8021QAZ_OUI: + sxe2_ieee_tlv_parse(tlv, dcbcfg); + break; + case SXE2_CEE_DCBX_OUI: + sxe2_cee_tlv_parse(tlv, dcbcfg); + break; + default: + break; + } +} + +s32 sxe2_lldp_to_dcb_cfg(u8 *lldpmib, struct sxe2_dcbx_cfg *dcbcfg) +{ + struct sxe2_lldp_org_tlv *tlv; + u16 offset = 0; + u16 typelen; + u16 type; + u16 len; + + if (!lldpmib || !dcbcfg) + return -EINVAL; + + tlv = (struct sxe2_lldp_org_tlv *)lldpmib; + while (1) { + typelen = ntohs(tlv->typelen); + type = FIELD_GET(SXE2_LLDP_TLV_TYPE_M, typelen); + len = FIELD_GET(SXE2_LLDP_TLV_LEN_M, typelen); + offset += sizeof(typelen) + len; + + if (type == SXE2_TLV_TYPE_END || offset > SXE2_LLDPDU_SIZE) + break; + + switch (type) { + case SXE2_TLV_TYPE_ORG: + sxe2_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + tlv = (struct sxe2_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + } + + return 0; +} + +STATIC s32 sxe2_fw_dcbx_local_mib_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_local_mib_get *mib) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_LLDP_MIB_GET, NULL, 0, mib, + (sizeof(struct sxe2_fwc_local_mib_get) + SXE2_LLDPDU_SIZE)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("add vsi failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fw_dcbx_agent_cfg_get(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *dcbcfg) +{ + s32 ret; + struct sxe2_fwc_local_mib_get *mib; + int i; + + mib = devm_kzalloc(&adapter->pdev->dev, (sizeof(struct sxe2_fwc_local_mib_get) + + SXE2_LLDPDU_SIZE), GFP_KERNEL); + if (!mib) + return -ENOMEM; + + ret = sxe2_fw_dcbx_local_mib_get(adapter, mib); + if (!ret) + ret = sxe2_lldp_to_dcb_cfg(mib->mib_buffer, dcbcfg); + + devm_kfree(&adapter->pdev->dev, mib); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + dcbcfg->usr_bw_value[i] = 0; + dcbcfg->hw_bw_value[i] = SXE2_TXSCHED_DFLT_BW; + } + + return ret; +} \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.h new file mode 100644 index 0000000000000000000000000000000000000000..b0d9e3efce2a7f2846e02a98f5d8188e00f4544c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_lldp_tlv.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_lldp_tlv.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_LLDP_TLV_H__ +#define __SXE2_LLDP_TLV_H__ + +#include +#include +#include "sxe2_cmd.h" + +#define SXE2_TLV_STATUS_OPER 0x1 +#define SXE2_TLV_STATUS_SYNC 0x2 +#define SXE2_TLV_STATUS_ERR 0x4 +#define SXE2_APP_PROT_ID_ISCSI_860 0x035c +#define SXE2_APP_SEL_ETHTYPE 0x1 +#define SXE2_APP_SEL_TCPIP 0x2 +#define SXE2_CEE_APP_SEL_ETHTYPE 0x0 +#define SXE2_CEE_APP_SEL_TCPIP 0x1 + +#define SXE2_DCBX_STATUS_NOT_STARTED 0 +#define SXE2_DCBX_STATUS_IN_PROGRESS 1 +#define SXE2_DCBX_STATUS_DONE 2 +#define SXE2_DCBX_STATUS_DIS 7 + +#define SXE2_TLV_TYPE_END 0 +#define SXE2_TLV_TYPE_ORG 127 + +#define SXE2_IEEE_8021QAZ_OUI 0x0080C2 +#define SXE2_IEEE_SUBTYPE_ETS_CFG 9 +#define SXE2_IEEE_SUBTYPE_ETS_REC 10 +#define SXE2_IEEE_SUBTYPE_PFC_CFG 11 +#define SXE2_IEEE_SUBTYPE_APP_PRI 12 + +#define SXE2_CEE_DCBX_OUI 0x001B21 +#define SXE2_CEE_DCBX_TYPE 2 + +#define SXE2_CEE_SUBTYPE_PG_CFG 2 +#define SXE2_CEE_SUBTYPE_PFC_CFG 3 +#define SXE2_CEE_SUBTYPE_APP_PRI 4 +#define SXE2_CEE_MAX_FEAT_TYPE 3 + +#define SXE2_LLDP_TLV_LEN_S 0 +#define SXE2_LLDP_TLV_LEN_M (0x01FF << SXE2_LLDP_TLV_LEN_S) +#define SXE2_LLDP_TLV_TYPE_S 9 +#define SXE2_LLDP_TLV_TYPE_M (0x7F << SXE2_LLDP_TLV_TYPE_S) +#define SXE2_LLDP_TLV_SUBTYPE_S 0 +#define SXE2_LLDP_TLV_SUBTYPE_M (0xFF << SXE2_LLDP_TLV_SUBTYPE_S) +#define SXE2_LLDP_TLV_OUI_S 8 +#define SXE2_LLDP_TLV_OUI_M (0xFFFFFFUL << SXE2_LLDP_TLV_OUI_S) + +#define SXE2_IEEE_ETS_MAXTC_S 0 +#define SXE2_IEEE_ETS_MAXTC_M (0x7 << SXE2_IEEE_ETS_MAXTC_S) +#define SXE2_IEEE_ETS_CBS_S 6 +#define SXE2_IEEE_ETS_CBS_M BIT(SXE2_IEEE_ETS_CBS_S) +#define SXE2_IEEE_ETS_WILLING_S 7 +#define SXE2_IEEE_ETS_WILLING_M BIT(SXE2_IEEE_ETS_WILLING_S) +#define SXE2_IEEE_ETS_PRIO_0_S 0 +#define SXE2_IEEE_ETS_PRIO_0_M (0x7 << SXE2_IEEE_ETS_PRIO_0_S) +#define SXE2_IEEE_ETS_PRIO_1_S 4 +#define SXE2_IEEE_ETS_PRIO_1_M (0x7 << SXE2_IEEE_ETS_PRIO_1_S) +#define SXE2_CEE_PGID_PRIO_0_S 0 +#define SXE2_CEE_PGID_PRIO_0_M (0xF << SXE2_CEE_PGID_PRIO_0_S) +#define SXE2_CEE_PGID_PRIO_1_S 4 +#define SXE2_CEE_PGID_PRIO_1_M (0xF << SXE2_CEE_PGID_PRIO_1_S) +#define SXE2_CEE_PGID_STRICT 15 + +#define SXE2_IEEE_TSA_STRICT 0 +#define SXE2_IEEE_TSA_ETS 2 + +#define SXE2_IEEE_PFC_CAP_S 0 +#define SXE2_IEEE_PFC_CAP_M (0xF << SXE2_IEEE_PFC_CAP_S) +#define SXE2_IEEE_PFC_MBC_S 6 +#define SXE2_IEEE_PFC_MBC_M BIT(SXE2_IEEE_PFC_MBC_S) +#define SXE2_IEEE_PFC_WILLING_S 7 +#define SXE2_IEEE_PFC_WILLING_M BIT(SXE2_IEEE_PFC_WILLING_S) + +#define SXE2_IEEE_APP_SEL_S 0 +#define SXE2_IEEE_APP_SEL_M (0x7 << SXE2_IEEE_APP_SEL_S) +#define SXE2_IEEE_APP_PRIO_S 5 +#define SXE2_IEEE_APP_PRIO_M (0x7 << SXE2_IEEE_APP_PRIO_S) + +#define SXE2_LLDPDU_SIZE 1500 +#define SXE2_TLV_HEADER_LEN 2 + +#define SXE2_IEEE_ETS_TLV_LEN 25 +#define SXE2_IEEE_PFC_TLV_LEN 6 +#define SXE2_IEEE_APP_TLV_LEN 11 + +#define SXE2_DSCP_UP_TLV_LEN 148 +#define SXE2_DSCP_ENF_TLV_LEN 132 +#define SXE2_DSCP_TC_BW_TLV_LEN 33 +#define SXE2_DSCP_PFC_TLV_LEN 6 + +#define SXE2_IEEE_TLV_ID_ETS_CFG 3 +#define SXE2_IEEE_TLV_ID_ETS_REC 4 +#define SXE2_IEEE_TLV_ID_PFC_CFG 5 +#define SXE2_IEEE_TLV_ID_APP_PRI 6 +#define SXE2_TLV_ID_END_OF_LLDPPDU 7 +#define SXE2_TLV_ID_START SXE2_IEEE_TLV_ID_ETS_CFG +#define SXE2_TLV_ID_DSCP_UP 3 +#define SXE2_TLV_ID_DSCP_ENF 4 +#define SXE2_TLV_ID_DSCP_TC_BW 5 +#define SXE2_TLV_ID_DSCP_TO_PFC 6 + +struct sxe2_lldp_org_tlv { + __be16 typelen; + __be32 ouisubtype; + u8 tlvinfo[]; +} __packed; + +struct sxe2_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct sxe2_cee_ctrl_tlv { + struct sxe2_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct sxe2_cee_feat_tlv { + struct sxe2_cee_tlv_hdr hdr; + u8 en_will_err; +#define SXE2_CEE_FEAT_TLV_ENA_M 0x80 +#define SXE2_CEE_FEAT_TLV_WILLING_M 0x40 +#define SXE2_CEE_FEAT_TLV_ERR_M 0x20 + u8 subtype; + u8 tlvinfo[]; +}; + +struct sxe2_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; +#define SXE2_CEE_APP_SELECTOR_M 0x03 + __be16 lower_oui; + u8 prio_map; +} __packed; + +void sxe2_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, + struct sxe2_dcbx_cfg *dcbcfg); + +s32 sxe2_fw_dcbx_agent_cfg_get(struct sxe2_adapter *adapter, + struct sxe2_dcbx_cfg *dcbcfg); + +s32 sxe2_lldp_to_dcb_cfg(u8 *lldpmib, struct sxe2_dcbx_cfg *dcbcfg); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.c new file mode 100644 index 0000000000000000000000000000000000000000..1d236c3d30d61185a13b8748300474308ca8bc1a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.c @@ -0,0 +1,1235 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_macsec.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_netdev.h" +#include "sxe2_log.h" +#include "sxe2_macsec.h" +#include "sxe2_rx.h" +#include "sxe2_tx.h" +#include "sxe2_spec.h" + +#ifdef HAVE_MACSEC_SUPPORT + +static u64 swap_uint64(u64 val) +{ + val = ((val << 8) & 0xFF00FF00FF00FF00) | ((val >> 8) & 0x00FF00FF00FF00FF); + val = ((val << 16) & 0xFFFF0000FFFF0000) | ((val >> 16) & 0x0000FFFF0000FFFF); + return (val << 32) | (val >> 32); +} + +STATIC bool sxe2_macsec_offload_en(struct net_device *netdev) +{ + return netdev->features & NETIF_F_HW_MACSEC; +} + +static bool sxe2_macsec_secy_check(struct macsec_context *ctx) +{ + const struct macsec_secy *secy = ctx->secy; + + if (secy->key_len != SXE2_MACSEC_KEY_LEN) { + LOG_ERROR("MACsec offload is supported only when key_len is %d\n", + SXE2_MACSEC_KEY_LEN); + return false; + } + + if (secy->icv_len != SXE2_MACSEC_KEY_LEN) { + LOG_ERROR("MACsec offload is supported only when icv_len is %d\n", + SXE2_MACSEC_KEY_LEN); + return false; + } + + if (!secy->protect_frames) { + LOG_ERROR("MACsec offload is supported only when protect_frames is set\n"); + return false; + } + + if (secy->replay_window) { + LOG_ERROR("MACsec offload: replay_protect isn't supported\n"); + return false; + } + + if (secy->tx_sc.scb) { + LOG_ERROR("MACsec offload: scb isn't supported\n"); + return false; + } + + return true; +} + +static void sxe2_macsec_txsc_clear(struct sxe2_macsec_context *macsec) +{ + struct sxe2_macsec_txsc *tx_sc = &macsec->tx_sc; + + tx_sc->active = 0; + tx_sc->encrypt = 0; + tx_sc->aisci = 0; + tx_sc->es = 0; +} + +static void sxe2_macsec_rxsc_clear(struct sxe2_macsec_context *macsec) +{ + struct sxe2_macsec_rxsc *rx_sc = macsec->rx_sc; + + rx_sc->active = 0; + rx_sc->protect = 0; + rx_sc->sci = 0; +} + +static void sxe2_macsec_rxsc_release(struct sxe2_macsec_context *macsec) +{ + struct sxe2_macsec_rxsc *rx_sc; + + rx_sc = rcu_replace_pointer(macsec->rx_sc, NULL, 1); + + synchronize_rcu(); + + if (rx_sc) { + if (rx_sc->md_dst) + dst_release(&rx_sc->md_dst->dst); + kfree_rcu(rx_sc, rcu); + }; +} + +static void sxe2_macsec_sa_clear(struct sxe2_macsec_sa *sa) +{ + sa->added = false; + sa->active = false; + sa->an_value = 0; + sa->ssci = 0; + (void)memset(sa->pn, 0, sizeof(sa->pn)); + (void)memset(sa->salt, 0, sizeof(sa->salt)); + (void)memset(sa->key, 0, sizeof(sa->key)); +} + +static s32 sxe2_macsec_txsc_cfg(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_txsc *tx_sc = &macsec->tx_sc; + struct sxe2_fw_macsec_txsc txsc; + + txsc.active = (u8)tx_sc->active; + txsc.xpn = (u8)macsec->xpn; + txsc.aisci = (u8)tx_sc->aisci; + txsc.es = (u8)tx_sc->es; + txsc.encrypt = (u8)tx_sc->encrypt; + txsc.sci = cpu_to_le64(macsec->sci); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MACSEC_TXSC_CFG, &txsc, + sizeof(struct sxe2_fw_macsec_txsc), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("macsec txsc config failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_macsec_txsa_cfg(struct sxe2_adapter *adapter, + struct sxe2_macsec_sa *tx_sa) +{ + s32 ret = 0; + u8 i; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fw_macsec_sa txsa; + + txsa.active = (u8)tx_sa->active; + txsa.an_value = tx_sa->an_value; + txsa.ssci = cpu_to_le32(tx_sa->ssci); + for (i = 0; i < MACSEC_PN_LEN_MAX; i++) + txsa.pn[i] = cpu_to_le32(tx_sa->pn[i]); + + for (i = 0; i < MACSEC_SALT_COUNT; i++) + txsa.salt[i] = cpu_to_le32(tx_sa->salt[i]); + + for (i = 0; i < MACSEC_KEY_LEN; i++) + txsa.key[i] = cpu_to_le32(tx_sa->key[i]); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MACSEC_TXSA_CFG, &txsa, + sizeof(struct sxe2_fw_macsec_sa), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("macsec txsa config failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_macsec_rxsc_cfg(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_rxsc *rx_sc = macsec->rx_sc; + struct sxe2_fw_macsec_rxsc rxsc; + + rxsc.active = (u8)rx_sc->active; + rxsc.xpn = (u8)macsec->xpn; + rxsc.protect = (u8)macsec->protect; + rxsc.validate_mode = macsec->validate_frames; + rxsc.sci = cpu_to_le64(rx_sc->sci); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MACSEC_RXSC_CFG, &rxsc, + sizeof(struct sxe2_fw_macsec_rxsc), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("macsec rxsc config failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_macsec_rxsa_cfg(struct sxe2_adapter *adapter, + struct sxe2_macsec_sa *rx_sa) +{ + s32 ret = 0; + u8 i; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fw_macsec_sa rxsa; + + rxsa.active = (u8)rx_sa->active; + rxsa.an_value = rx_sa->an_value; + rxsa.ssci = cpu_to_le32(rx_sa->ssci); + for (i = 0; i < MACSEC_PN_LEN_MAX; i++) + rxsa.pn[i] = cpu_to_le32(rx_sa->pn[i]); + + for (i = 0; i < MACSEC_SALT_COUNT; i++) + rxsa.salt[i] = cpu_to_le32(rx_sa->salt[i]); + + for (i = 0; i < MACSEC_KEY_LEN; i++) + rxsa.key[i] = cpu_to_le32(rx_sa->key[i]); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MACSEC_RXSA_CFG, &rxsa, + sizeof(struct sxe2_fw_macsec_sa), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("macsec rxsa config failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_macsec_add_secy(struct macsec_context *ctx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_txsc *tx_sc = &macsec->tx_sc; + const struct macsec_secy *secy = ctx->secy; + const struct macsec_tx_sc *ctx_tx_sc = &secy->tx_sc; + + LOG_DEBUG_BDF("macsec add secy start...\n"); + + if (!sxe2_macsec_secy_check(ctx)) + return -EINVAL; + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to add secy, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (macsec->netdev) { + LOG_ERROR_BDF("macsec secy add error, macsec net_device already exist.\n"); + ret = -EINVAL; + goto out; + } + + macsec->validate_frames = secy->validate_frames; + macsec->xpn = secy->xpn; + macsec->sci = swap_uint64(secy->sci); + macsec->protect = secy->replay_protect; + + tx_sc->active = ctx_tx_sc->active; + tx_sc->aisci = ctx_tx_sc->send_sci; + tx_sc->es = ctx_tx_sc->end_station; + tx_sc->encrypt = ctx_tx_sc->encrypt; + + ret = sxe2_macsec_txsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec update txsc, param config failed.\n"); + goto out; + } + + macsec->netdev = secy->netdev; + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_upd_secy(struct macsec_context *ctx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_txsc *tx_sc = &macsec->tx_sc; + struct sxe2_macsec_rxsc *rx_sc = macsec->rx_sc; + const struct macsec_secy *secy = ctx->secy; + const struct macsec_tx_sc *ctx_tx_sc = &secy->tx_sc; + + LOG_DEBUG_BDF("macsec upd secy start...\n"); + + if (!sxe2_macsec_secy_check(ctx)) + return -EINVAL; + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to upd secy, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (macsec->xpn != secy->xpn || macsec->sci != swap_uint64(secy->sci) || + tx_sc->aisci != ctx_tx_sc->send_sci || + tx_sc->es != ctx_tx_sc->end_station || + tx_sc->encrypt != ctx_tx_sc->encrypt) { + macsec->xpn = secy->xpn; + macsec->sci = swap_uint64(secy->sci); + tx_sc->aisci = ctx_tx_sc->send_sci; + tx_sc->es = ctx_tx_sc->end_station; + tx_sc->encrypt = ctx_tx_sc->encrypt; + + ret = sxe2_macsec_txsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec txsc config failed.\n"); + goto out; + } + } + + if (macsec->validate_frames != secy->validate_frames || + macsec->protect != secy->replay_protect || + macsec->xpn != secy->xpn) { + macsec->validate_frames = secy->validate_frames; + macsec->protect = secy->replay_protect; + + if (!rx_sc) { + LOG_INFO_BDF("rxsc not add, can not update validate_frames, " + "protect or pn/xpn.\n"); + goto out; + } + + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec rxsc config failed.\n"); + goto out; + } + } + +out: + mutex_unlock(&macsec->lock); + + return 0; +} + +static s32 sxe2_macsec_del_secy(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 i; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *tx_sa; + struct sxe2_macsec_sa *rx_sa; + + LOG_DEBUG_BDF("macsec del secy start...\n"); + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to del secy, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + macsec->xpn = 0; + macsec->sci = 0; + macsec->protect = 0; + macsec->validate_frames = 0; + + sxe2_macsec_txsc_clear(macsec); + ret = sxe2_macsec_txsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec delete txsc, delete failed.\n"); + goto out; + } + + for (i = 0; i < SXE2_MACSEC_MAX_SA; i++) { + tx_sa = &macsec->tx_sc.sa[i]; + if (!tx_sa->added) + continue; + + sxe2_macsec_sa_clear(tx_sa); + (void)sxe2_macsec_txsa_cfg(adapter, tx_sa); + } + + if (!macsec->rx_sc) { + LOG_ERROR_BDF("macsec delete rxsc, rxsc is null, not need delete.\n"); + goto netdev_null; + } + + sxe2_macsec_rxsc_clear(macsec); + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec delete rxsc, delete failed.\n"); + goto out; + } + + for (i = 0; i < SXE2_MACSEC_MAX_SA; i++) { + rx_sa = &macsec->rx_sc->sa[i]; + if (!rx_sa->added) + continue; + + sxe2_macsec_sa_clear(rx_sa); + (void)sxe2_macsec_rxsa_cfg(adapter, rx_sa); + } + sxe2_macsec_rxsc_release(macsec); + +netdev_null: + macsec->netdev = NULL; + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_add_txsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 sa_id = ctx->sa.assoc_num; + u8 i, j, other_sa; + u8 key[SXE2_MACSEC_KEY_LEN]; + u8 max_id = SXE2_MACSEC_KEY_LEN - 1; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *tx_sa; + const struct macsec_secy *secy = ctx->secy; + const struct macsec_tx_sc *ctx_tx_sc = &secy->tx_sc; + const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa; + + LOG_DEBUG_BDF("macsec add txsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to add txsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + tx_sa = &macsec->tx_sc.sa[sa_id]; + + if (tx_sa->added) { + LOG_ERROR_BDF("macsec tx sa add error, tx sa %d already exist.\n", sa_id); + ret = -EEXIST; + goto out; + } + + other_sa = sa_id ? 0 : 1; + if (macsec->tx_sc.sa[other_sa].added && + macsec->tx_sc.sa[other_sa].active && ctx_tx_sa->active) { + LOG_ERROR_BDF("macsec add txsa, only support one sa active.\n"); + ret = -EOPNOTSUPP; + goto out; + } + + tx_sa->active = ctx_tx_sa->active; + tx_sa->an_value = ctx->sa.assoc_num; + tx_sa->pn[0] = ctx_tx_sa->next_pn_halves.lower; + for (i = 0; i <= max_id; i++) { + j = (u8)(max_id - i); + key[i] = ctx->sa.key[j]; + } + if (macsec->xpn) { + tx_sa->ssci = ctx_tx_sa->ssci; + tx_sa->pn[1] = ctx_tx_sa->next_pn_halves.upper; + (void)memcpy(tx_sa->salt, &ctx_tx_sa->key.salt, MACSEC_SALT_LEN); + } + (void)memcpy(tx_sa->key, key, SXE2_MACSEC_KEY_LEN); + + if (!secy->operational || tx_sa->an_value != ctx_tx_sc->encoding_sa) { + LOG_ERROR_BDF("macsec txsa add failed, operational[%d], " + "an_value[%d], encoding_sa[%d].\n", + secy->operational, tx_sa->an_value, + ctx_tx_sc->encoding_sa); + goto out; + } + + ret = sxe2_macsec_txsa_cfg(adapter, tx_sa); + if (ret) { + LOG_ERROR_BDF("macsec txsa config failed.\n"); + goto out; + } + + tx_sa->added = true; + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_upd_txsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 sa_id = ctx->sa.assoc_num; + u8 other_sa; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *tx_sa; + const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa; + + LOG_DEBUG_BDF("macsec upd txsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to upd txsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + tx_sa = &macsec->tx_sc.sa[sa_id]; + + if (!tx_sa->added) { + LOG_ERROR_BDF("macsec tx sa update error, tx sa %d doesn't exist.\n", sa_id); + ret = -EINVAL; + goto out; + } + + if (tx_sa->pn[0] != ctx_tx_sa->next_pn_halves.lower) { + LOG_INFO_BDF("MACsec offload: update TX sa %d PN isn't supported\n", sa_id); + ret = -EINVAL; + goto out; + } + + if (tx_sa->active == ctx_tx_sa->active) { + LOG_INFO_BDF("update tx sa active=%d , no change, so no config.\n", + ctx_tx_sa->active); + goto out; + } + + other_sa = sa_id ? 0 : 1; + if (macsec->tx_sc.sa[other_sa].added && + macsec->tx_sc.sa[other_sa].active && ctx_tx_sa->active) { + LOG_ERROR_BDF("macsec upd txsa, only support one sa active.\n"); + ret = -EOPNOTSUPP; + goto out; + } + + tx_sa->active = ctx_tx_sa->active; + + ret = sxe2_macsec_txsa_cfg(adapter, tx_sa); + if (ret) { + LOG_ERROR_BDF("macsec txsa fill and cfg failed.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_del_txsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 sa_id = ctx->sa.assoc_num; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *tx_sa; + + LOG_DEBUG_BDF("macsec del txsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to del txsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + tx_sa = &macsec->tx_sc.sa[sa_id]; + + if (!tx_sa->added) { + LOG_ERROR_BDF("macsec tx sa delete error, tx sa %d doesn't exist.\n", sa_id); + ret = -EEXIST; + goto out; + } + + sxe2_macsec_sa_clear(tx_sa); + + ret = sxe2_macsec_txsa_cfg(adapter, tx_sa); + if (ret) { + LOG_ERROR_BDF("macsec txsa clear and delete failed.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_add_rxsc(struct macsec_context *ctx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_rxsc *rx_sc; + const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc; + + LOG_DEBUG_BDF("macsec add rxsc start...\n"); + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to add rxsc, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (macsec->rx_sc) { + LOG_ERROR_BDF("macsec rx sc add error, rx sc already exist.\n"); + ret = -EEXIST; + goto out; + } + + rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); + if (!rx_sc) { + ret = -ENOMEM; + goto out; + } + + rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); + if (!rx_sc->md_dst) { + ret = -ENOMEM; + goto out; + } + + rx_sc->active = ctx_rx_sc->active; + rx_sc->sci = swap_uint64(ctx_rx_sc->sci); +#ifdef SUPPORT_MACSEC_INFO_STRUCT + rx_sc->md_dst->u.macsec_info.sci = ctx_rx_sc->sci; +#endif + rcu_assign_pointer(macsec->rx_sc, rx_sc); + + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec add rxsc, param fill or config failed.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_upd_rxsc(struct macsec_context *ctx) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_rxsc *rx_sc = macsec->rx_sc; + const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc; + + LOG_DEBUG_BDF("macsec upd rxsc start...\n"); + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to upd rxsc, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (!rx_sc) { + LOG_INFO_BDF("not create rxsc, not support update.\n"); + ret = -EINVAL; + goto out; + } + + if (rx_sc->active == ctx_rx_sc->active) { + LOG_INFO_BDF("update rx sc active=%d , no change, so no config.\n", + ctx_rx_sc->active); + goto out; + } + + rx_sc->active = ctx_rx_sc->active; + + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec add rxsc, param fill or config failed.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_del_rxsc(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 i; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *rx_sa; + + LOG_DEBUG_BDF("macsec del rxsc start...\n"); + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to del rxsc, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (!macsec->rx_sc) { + LOG_ERROR_BDF("macsec rx sc delete error, rx sc doesn't exist.\n"); + ret = -EINVAL; + goto out; + } + + sxe2_macsec_rxsc_clear(macsec); + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec delete rxsc, delete failed.\n"); + goto out; + } + + for (i = 0; i < SXE2_MACSEC_MAX_SA; i++) { + rx_sa = &macsec->rx_sc->sa[i]; + if (!rx_sa->added) + continue; + + sxe2_macsec_sa_clear(rx_sa); + (void)sxe2_macsec_rxsa_cfg(adapter, rx_sa); + } + + sxe2_macsec_rxsc_release(macsec); + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_add_rxsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 i, j, other_sa; + u8 sa_id = ctx->sa.assoc_num; + u8 key[SXE2_MACSEC_KEY_LEN]; + u8 max_id = SXE2_MACSEC_KEY_LEN - 1; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *rx_sa; + const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa; + + LOG_DEBUG_BDF("macsec add rxsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to add rxsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (!macsec->rx_sc) { + LOG_INFO_BDF("not create rxsc, not support add rxsa.\n"); + ret = -EINVAL; + goto out; + } + + rx_sa = &macsec->rx_sc->sa[sa_id]; + + if (rx_sa->added) { + LOG_ERROR_BDF("macsec rx sa add error, rx sa %d already exist.\n", sa_id); + ret = -EINVAL; + goto out; + } + + other_sa = sa_id ? 0 : 1; + if (macsec->rx_sc->sa[other_sa].added && + macsec->rx_sc->sa[other_sa].active && ctx_rx_sa->active) { + LOG_ERROR_BDF("macsec add rxsa, only support one sa active.\n"); + ret = -EOPNOTSUPP; + goto out; + } + + rx_sa->active = ctx_rx_sa->active; + rx_sa->an_value = ctx->sa.assoc_num; + rx_sa->pn[0] = ctx_rx_sa->next_pn_halves.lower; + for (i = 0; i <= max_id; i++) { + j = (u8)(max_id - i); + key[i] = ctx->sa.key[j]; + } + if (macsec->xpn) { + rx_sa->ssci = ctx_rx_sa->ssci; + rx_sa->pn[1] = ctx_rx_sa->next_pn_halves.upper; + (void)memcpy(rx_sa->salt, &ctx_rx_sa->key.salt, MACSEC_SALT_LEN); + } + (void)memcpy(rx_sa->key, key, SXE2_MACSEC_KEY_LEN); + + ret = sxe2_macsec_rxsa_cfg(adapter, rx_sa); + if (ret) { + LOG_ERROR_BDF("macsec rx sa fill and cfg error.\n"); + goto out; + } + + rx_sa->added = true; + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_upd_rxsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 sa_id = ctx->sa.assoc_num; + u8 other_sa; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *rx_sa; + const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa; + + LOG_DEBUG_BDF("macsec upd rxsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to upd rxsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (!macsec->rx_sc) { + LOG_INFO_BDF("not create rxsc, not support update rxsa.\n"); + ret = -EINVAL; + goto out; + } + + rx_sa = &macsec->rx_sc->sa[sa_id]; + + if (!rx_sa->added) { + LOG_ERROR_BDF("macsec rx sa update error, rx sa %d doesn't exist.\n", sa_id); + ret = -EINVAL; + goto out; + } + + if (rx_sa->pn[0] != ctx_rx_sa->next_pn_halves.lower) { + LOG_INFO_BDF("MACsec offload: update RX sa %d PN isn't supported\n", sa_id); + ret = -EINVAL; + goto out; + } + + if (rx_sa->active == ctx_rx_sa->active) { + LOG_INFO_BDF("update tx sa active=%d , no change, so no config.\n", + ctx_rx_sa->active); + goto out; + } + + other_sa = sa_id ? 0 : 1; + if (macsec->rx_sc->sa[other_sa].added && + macsec->rx_sc->sa[other_sa].active && ctx_rx_sa->active) { + LOG_ERROR_BDF("macsec upd rxsa, only support one sa active.\n"); + ret = -EOPNOTSUPP; + goto out; + } + + rx_sa->active = ctx_rx_sa->active; + + ret = sxe2_macsec_rxsa_cfg(adapter, rx_sa); + if (ret) { + LOG_ERROR_BDF("macsec rx sa fill and cfg error.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static s32 sxe2_macsec_del_rxsa(struct macsec_context *ctx) +{ + s32 ret = 0; + u8 sa_id = ctx->sa.assoc_num; + struct sxe2_netdev_priv *priv = netdev_priv(ctx->netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_sa *rx_sa; + + LOG_DEBUG_BDF("macsec del rxsa start...\n"); + + if (ctx->sa.assoc_num >= SXE2_MACSEC_MAX_SA) { + LOG_ERROR_BDF("macsec sa max num = 2, sa_id should be 0 or 1.\n"); + ret = -EINVAL; + goto out; + } + + mutex_lock(&macsec->lock); + + if (!sxe2_macsec_offload_en(ctx->netdev)) { + LOG_ERROR_BDF("failed to del rxsa, because macsec offload is disable.\n"); + ret = -EINVAL; + goto out; + } + + if (macsec->state != SXE2_MACSEC_STATE_READY) { + ret = -EBUSY; + goto out; + } + + if (!macsec->rx_sc) { + LOG_INFO_BDF("not create rxsc, not support delete rxsa.\n"); + ret = -EINVAL; + goto out; + } + + rx_sa = &macsec->rx_sc->sa[sa_id]; + + if (!rx_sa->added) { + LOG_ERROR_BDF("macsec rx sa delete error, rx sa %d doesn't exist.\n", sa_id); + ret = -EINVAL; + goto out; + } + + sxe2_macsec_sa_clear(rx_sa); + + ret = sxe2_macsec_rxsa_cfg(adapter, rx_sa); + if (ret) { + LOG_ERROR_BDF("macsec rxsa clear and delete failed.\n"); + goto out; + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +static const struct macsec_ops sxe2_macsec_ops = { + .mdo_add_secy = sxe2_macsec_add_secy, + .mdo_upd_secy = sxe2_macsec_upd_secy, + .mdo_del_secy = sxe2_macsec_del_secy, + .mdo_add_txsa = sxe2_macsec_add_txsa, + .mdo_upd_txsa = sxe2_macsec_upd_txsa, + .mdo_del_txsa = sxe2_macsec_del_txsa, + .mdo_add_rxsc = sxe2_macsec_add_rxsc, + .mdo_upd_rxsc = sxe2_macsec_upd_rxsc, + .mdo_del_rxsc = sxe2_macsec_del_rxsc, + .mdo_add_rxsa = sxe2_macsec_add_rxsa, + .mdo_upd_rxsa = sxe2_macsec_upd_rxsa, + .mdo_del_rxsa = sxe2_macsec_del_rxsa, +}; + +static s32 sxe2_macsec_fix_cfg(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MACSEC_FIX_CFG, NULL, 0, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("macsec fix cfg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_macsec_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct net_device *netdev = adapter->vsi_ctxt.main_vsi->netdev; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + + LOG_DEBUG_BDF("macsec init start...\n"); + + mutex_init(&adapter->macsec_ctxt.lock); + netdev->macsec_ops = &sxe2_macsec_ops; + macsec->netdev = NULL; + macsec->rx_sc = NULL; + + ret = sxe2_macsec_fix_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec init failed.\n"); + goto l_err; + } + + macsec->state = SXE2_MACSEC_STATE_READY; + + return ret; + +l_err: + macsec->state = SXE2_MACSEC_STATE_UNINIT; + netdev->macsec_ops = NULL; + mutex_destroy(&adapter->macsec_ctxt.lock); + return ret; +} + +void sxe2_macsec_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + + LOG_DEBUG_BDF("macsec deinit start...\n"); + + macsec->state = SXE2_MACSEC_STATE_UNINIT; + mutex_destroy(&macsec->lock); +} + +s32 sxe2_macsec_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u8 sa_id; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_rxsc *rx_sc = macsec->rx_sc; + struct sxe2_macsec_sa *tx_sa; + struct sxe2_macsec_sa *rx_sa; + + mutex_lock(&macsec->lock); + + LOG_DEBUG_BDF("macsec rebuild start...\n"); + + if (!macsec->netdev) { + LOG_ERROR_BDF("macsec rebuild txsc, netdev is null, not need config.\n"); + goto out; + } + + ret = sxe2_macsec_txsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec update txsc, param config failed.\n"); + goto out; + } + + for (sa_id = 0; sa_id < SXE2_MACSEC_MAX_SA; sa_id++) { + tx_sa = &macsec->tx_sc.sa[sa_id]; + if (!tx_sa->added) + continue; + + ret = sxe2_macsec_txsa_cfg(adapter, tx_sa); + if (ret) { + LOG_ERROR_BDF("macsec txsa config failed.\n"); + goto out; + } + } + + if (!rx_sc) { + LOG_ERROR_BDF("macsec rebuild rxsc, rxsc is null, not need config.\n"); + goto out; + } + + ret = sxe2_macsec_rxsc_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("macsec add rxsc, param fill or config failed.\n"); + goto out; + } + + for (sa_id = 0; sa_id < SXE2_MACSEC_MAX_SA; sa_id++) { + rx_sa = &macsec->rx_sc->sa[sa_id]; + if (!rx_sa->added) + continue; + + ret = sxe2_macsec_rxsa_cfg(adapter, rx_sa); + if (ret) { + LOG_ERROR_BDF("macsec rx sa fill and cfg error.\n"); + goto out; + } + } + +out: + mutex_unlock(&macsec->lock); + + return ret; +} + +bool sxe2_macsec_offload(struct sxe2_adapter *adapter, struct sk_buff *skb) +{ + bool ret = false; +#ifdef SXE2_UNSUPPORT + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + return md_dst && (md_dst->type == METADATA_MACSEC); +#endif + + if (adapter->vsi_ctxt.main_vsi->netdev->features & NETIF_F_HW_MACSEC) + ret = true; + else + ret = false; + + return ret; +} + +void sxe2_macsec_rx(struct sxe2_queue *rxq, struct sk_buff *skb) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + struct sxe2_macsec_rxsc *rx_sc; + + rcu_read_lock(); + + rx_sc = rcu_dereference(macsec->rx_sc); + if (rx_sc) { + dst_hold(&rx_sc->md_dst->dst); + skb_dst_set(skb, &rx_sc->md_dst->dst); + } + + rcu_read_unlock(); +} + +void sxe2_macsec_enter_reset(struct sxe2_adapter *adapter, bool to_reset) +{ + mutex_lock(&adapter->macsec_ctxt.lock); + if (to_reset) + adapter->macsec_ctxt.state = SXE2_MACSEC_STATE_RESET; + else + adapter->macsec_ctxt.state = SXE2_MACSEC_STATE_READY; + + mutex_unlock(&adapter->macsec_ctxt.lock); +} + +bool sxe2_macsec_conflict_features_check(struct net_device *netdev) +{ + netdev_features_t features = netdev->features; + + if (features & NETIF_F_HW_ESP) + return true; + + return false; +} + +bool sxe2_is_macsec_can_not_disable(struct sxe2_adapter *adapter) +{ + struct sxe2_macsec_context *macsec = &adapter->macsec_ctxt; + + return (!!macsec->netdev); +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.h new file mode 100644 index 0000000000000000000000000000000000000000..57d8934b9735de8596a50e589288c86e233cd4b5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macsec.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_macsec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MACSEC_H__ +#define __SXE2_MACSEC_H__ + +#include "sxe2_compat.h" + +#ifdef HAVE_MACSEC_SUPPORT +#include + +#define SXE2_MACSEC_MAX_DEVICE_NUM 1 + +#define SXE2_MACSEC_MAX_SA 2 + +#define SXE2_MACSEC_KEY_LEN 16 + +enum sxe2_macsec_state { + SXE2_MACSEC_STATE_UNINIT, + SXE2_MACSEC_STATE_READY, + SXE2_MACSEC_STATE_RESET, +}; + +struct sxe2_macsec_sa { + bool added; + bool active; + u8 an_value; + u32 key[4]; + u32 pn[2]; + u32 ssci; + u32 salt[MACSEC_SALT_COUNT]; +}; + +struct sxe2_macsec_txsc { + bool active; + bool encrypt; + bool aisci; + bool es; + struct sxe2_macsec_sa sa[SXE2_MACSEC_MAX_SA]; +}; + +struct sxe2_macsec_rxsc { + struct rcu_head rcu; + bool active; + bool protect; + struct sxe2_macsec_sa sa[SXE2_MACSEC_MAX_SA]; + sci_t sci; + struct metadata_dst *md_dst; +}; + +struct sxe2_macsec_context { + const struct net_device *netdev; + enum sxe2_macsec_state state; + struct mutex lock; + struct sxe2_macsec_rxsc *rx_sc; + struct sxe2_macsec_txsc tx_sc; + bool xpn; + sci_t sci; + bool protect; + enum macsec_validation_type validate_frames; +}; + +s32 sxe2_macsec_init(struct sxe2_adapter *adapter); + +void sxe2_macsec_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_macsec_rebuild(struct sxe2_adapter *adapter); + +bool sxe2_macsec_offload(struct sxe2_adapter *adapter, struct sk_buff *skb); + +void sxe2_macsec_rx(struct sxe2_queue *rxq, struct sk_buff *skb); + +s32 sxe2_macsec_rxsc_clear_and_del(struct macsec_context *ctx); + +s32 sxe2_macsec_rxsa_fill_and_cfg(u8 sa_id, struct macsec_context *ctx); + +s32 sxe2_macsec_rxsa_clear_and_del(u8 sa_id, struct macsec_context *ctx); + +s32 sxe2_macsec_txsc_fill_and_cfg(struct macsec_context *ctx); + +s32 sxe2_macsec_txsa_upd_cfg(struct macsec_context *ctx); + +void sxe2_macsec_enter_reset(struct sxe2_adapter *adapter, bool to_reset); + +bool sxe2_macsec_conflict_features_check(struct net_device *netdev); + +bool sxe2_is_macsec_can_not_disable(struct sxe2_adapter *adapter); + +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.c new file mode 100644 index 0000000000000000000000000000000000000000..014d4a1ac8d76b76f8533318b22c343f143c62e6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_macvlan.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_common.h" +#include "sxe2_macvlan.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_xsk.h" + +s32 sxe2_vsi_cfg_netdev_tc0(struct sxe2_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + int ret; + + if (!netdev) + return -EINVAL; + + ret = netdev_set_num_tc(netdev, 1); + if (ret) { + LOG_ERROR_BDF("Error setting num TC\n"); + return ret; + } + + ret = netdev_set_tc_queue(netdev, 0, vsi->txqs.q_cnt, 0); + if (ret) { + LOG_ERROR_BDF("Error setting TC queue\n"); + goto set_tc_queue_err; + } + + return 0; +set_tc_queue_err: + WARN_ON_ONCE(netdev_set_num_tc(netdev, 0)); + return ret; +} + +#ifdef HAVE_NETDEV_SB_DEV +STATIC s32 sxe2_netdev_sb_chnl_cfg(struct sxe2_vsi *vsi, struct sxe2_vsi *parent_vsi, + struct net_device *vdev, u16 macvlan_id) +{ + s32 ret; + struct net_device *netdev = parent_vsi->netdev; + u16 offset = parent_vsi->txqs.q_cnt + macvlan_id; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = netdev_set_sb_channel(vdev, macvlan_id + 1); + if (ret) { + LOG_DEV_ERR("Error setting netdev_set_sb_channel %d\n", ret); + return ret; + } + + ret = netdev_bind_sb_channel_queue(netdev, vdev, 0, vsi->txqs.q_cnt, offset); + if (ret) { + LOG_DEV_ERR("Error setting netdev_bind_sb_channel_queue %d\n", ret); + WARN_ON_ONCE(netdev_set_sb_channel(vdev, 0)); + } + + return ret; +} +#endif + +STATIC void sxe2_netdev_sb_chnl_uncfg(struct net_device *netdev, + struct net_device *vdev) +{ +#ifdef HAVE_NETDEV_SB_DEV + netdev_unbind_sb_channel(netdev, vdev); + WARN_ON_ONCE(netdev_set_sb_channel(vdev, 0)); +#endif +} + +STATIC void sxe2_fwd_del_macvlan_unlock(struct net_device *netdev, void *accel_priv) +{ + struct sxe2_macvlan *mv = accel_priv; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *parent_vsi = np->vsi; + struct sxe2_vsi *vsi = mv->vsi; + struct sxe2_adapter *adapter = parent_vsi->adapter; + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct net_device *vdev = mv->vdev; + s32 mv_id = mv->id; + + sxe2_netdev_sb_chnl_uncfg(netdev, vdev); + + macvlan->num_macvlan--; + clear_bit(mv_id, macvlan->avail_macvlan); + list_del(&mv->list); + devm_kfree(&adapter->pdev->dev, mv); + + if (parent_vsi->txqs.q[parent_vsi->txqs.q_cnt + mv_id]) + parent_vsi->txqs.q[parent_vsi->txqs.q_cnt + mv_id] = NULL; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + vsi->netdev = NULL; + set_bit(SXE2_VSI_S_MACVLAN_DEL, vsi->state); + sxe2_monitor_work_schedule(adapter); + } else { + sxe2_vsi_destroy_unlock(vsi); + } + + LOG_DEV_INFO("Delete MACVLAN of %s.\n", vdev->name); +} + +void sxe2_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *parent_vsi = np->vsi; + struct sxe2_adapter *adapter = parent_vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_fwd_del_macvlan_unlock(netdev, accel_priv); + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +STATIC s32 sxe2_add_macvlan_check(struct sxe2_adapter *adapter, + struct net_device *vdev) +{ + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("can not do MACVLAN offload. device is in Safe Mode\n"); + return -EOPNOTSUPP; + } + + if (macvlan->num_macvlan == macvlan->max_num_macvlan) { + LOG_DEV_ERR("MACVLAN offload limit reached\n"); + return -ENOSPC; + } + + if (vdev->num_rx_queues != SXE2_DFLT_RXQ_VMDQ_VSI || + vdev->num_tx_queues != SXE2_DFLT_TXQ_VMDQ_VSI) { + LOG_DEV_ERR("can not do MACVLAN offload. %s has multiple queues\n", + vdev->name); + return -EOPNOTSUPP; + } + + if (sxe2_usable_txqs_cnt_get(adapter) < SXE2_DFLT_TXQ_VMDQ_VSI || + sxe2_usable_rxqs_cnt_get(adapter) < SXE2_DFLT_RXQ_VMDQ_VSI) { + LOG_DEV_ERR("can not do MACVLAN offload. Not enough queues\n"); + return -ENOSPC; + } + + return 0; +} + +STATIC void *sxe2_fwd_add_macvlan_unlock(struct net_device *netdev, + struct net_device *vdev) +{ + s32 ret; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *parent_vsi = np->vsi; + struct sxe2_vsi *vsi = NULL; + struct sxe2_adapter *adapter = parent_vsi->adapter; + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct sxe2_macvlan *mv = NULL; + s32 avail_id, offset, i; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u8 mac[ETH_ALEN]; + + ret = sxe2_add_macvlan_check(adapter, vdev); + if (ret) + return ERR_PTR(ret); + + if (test_bit(SXE2_VSI_S_DISABLE, parent_vsi->state)) { + LOG_DEV_ERR("pf vsi disabled, try later.\n"); + ret = -EBUSY; + goto l_end; + } + + avail_id = (s32)find_first_zero_bit(macvlan->avail_macvlan, + macvlan->max_num_macvlan); + + vsi = sxe2_macvlan_vsi_create(adapter); + if (!vsi) { + LOG_DEV_ERR("Failed to create MACVLAN offload (VMDQ) VSI\n"); + ret = -EIO; + goto l_end; + } + + macvlan->num_macvlan++; + offset = parent_vsi->txqs.q_cnt + avail_id; + vsi->netdev = vdev; + + sxe2_for_each_vsi_txq(vsi, i) + { + parent_vsi->txqs.q[offset + i] = vsi->txqs.q[i]; + } + + ret = sxe2_vsi_cfg_netdev_tc0(vsi); + if (ret) + goto l_vsi_destroy; + +#ifdef HAVE_NETDEV_SB_DEV + ret = sxe2_netdev_sb_chnl_cfg(vsi, parent_vsi, vdev, (u16)avail_id); + if (ret) + goto l_vsi_destroy; +#endif + + sxe2_napi_add(vsi); + + ret = sxe2_vsi_open(vsi); + if (ret) + goto l_sb_chnl_uncfg; + + ether_addr_copy(mac, vdev->dev_addr); + ret = sxe2_mac_rule_add(vsi, mac); + if (ret == -EEXIST) { + LOG_DEV_INFO("can't add MAC filters %pM for VSI %d, error %d\n", mac, + vsi->idx_in_dev, ret); + } else if (ret) { + LOG_DEV_INFO("can't add MAC filters %pM for VSI %d, error %d\n", mac, + vsi->idx_in_dev, ret); + ret = -ENOMEM; + goto l_add_mac_err; + } + + mv = devm_kzalloc(dev, sizeof(*mv), GFP_KERNEL); + if (!mv) { + ret = -ENOMEM; + goto l_mv_init_err; + } + INIT_LIST_HEAD(&mv->list); + mv->parent_vsi = parent_vsi; + mv->vsi = vsi; + mv->id = avail_id; + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &macvlan->macvlan_list); + + set_bit(avail_id, macvlan->avail_macvlan); + + LOG_DEV_INFO("MACVLAN offloads for %s are on\n", vdev->name); + + return mv; + +l_mv_init_err: + sxe2_vsi_fltr_remove(adapter, vsi->idx_in_dev); +l_add_mac_err: + WARN_ON_ONCE(sxe2_vsi_close(vsi)); + sxe2_napi_del(vsi); + vsi->netdev = NULL; +l_sb_chnl_uncfg: + sxe2_netdev_sb_chnl_uncfg(netdev, vdev); +l_vsi_destroy: + macvlan->num_macvlan--; + sxe2_vsi_destroy_unlock(vsi); +l_end: + return ERR_PTR(ret); +} + +void *sxe2_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + void *mv; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *parent_vsi = np->vsi; + struct sxe2_adapter *adapter = parent_vsi->adapter; + s32 ret; + u16 old_txq_cnt = (u16)vdev->real_num_tx_queues; + u16 old_rxq_cnt = (u16)vdev->real_num_rx_queues; + + ret = sxe2_netdev_q_cnt_set(vdev, SXE2_DFLT_TXQ_VMDQ_VSI, + SXE2_DFLT_RXQ_VMDQ_VSI, true); + if (ret) { + LOG_DEV_ERR("macvlan netdev real tx:%u rx:%u set failed:%d\n", + SXE2_DFLT_TXQ_VMDQ_VSI, SXE2_DFLT_RXQ_VMDQ_VSI, ret); + return NULL; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + mv = sxe2_fwd_add_macvlan_unlock(netdev, vdev); + mutex_unlock(&adapter->vsi_ctxt.lock); + + if (!mv) + WARN_ON_ONCE(sxe2_netdev_q_cnt_set(vdev, old_txq_cnt, old_rxq_cnt, + true)); + + return mv; +} + +STATIC s32 sxe2_macvlan_enable_check_without_lock(struct sxe2_vsi *vsi, bool init) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (init && test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_end; + } + +#ifdef HAVE_XDP_SUPPORT + if (sxe2_xdp_is_enable(vsi)) { + ret = -EPERM; + LOG_DEV_ERR("MACVLAN offload cannot be supported - xdp enabled\n"); + goto l_end; + } +#endif + + if (!test_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags)) { + ret = -EPERM; + LOG_DEV_ERR("MACVLAN offload cannot be supported - VMDQ is disabled\n"); + goto l_end; + } + + if (test_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags)) { + ret = -EPERM; + LOG_DEV_ERR("MACVLAN offload cannot be supported - dcb enabled\n"); + goto l_end; + } + + if (sxe2_is_safe_mode(adapter)) { + ret = -EOPNOTSUPP; + LOG_DEV_ERR("MACVLAN offload cannot be configured - Device is in Safe Mode\n"); + goto l_end; + } + + if (sxe2_eswitch_is_offload(adapter)) { + ret = -EOPNOTSUPP; + LOG_DEV_ERR("MACVLAN offload cannot be configured - switchdev is enabled\n"); + } + +l_end: + return ret; +} + +STATIC struct sxe2_queue **sxe2_macvlan_q_alloc(struct sxe2_vsi *vsi) +{ + unsigned int total_q_cnt; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct sxe2_queue **temp_queues = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 txqs = sxe2_usable_txqs_cnt_get(adapter); + u16 rxqs = sxe2_usable_rxqs_cnt_get(adapter); + + macvlan->max_num_macvlan = (u16)min3(txqs, rxqs, (u16)SXE2_MAX_MACVLANS); + + total_q_cnt = vsi->txqs.q_cnt + macvlan->max_num_macvlan; + + temp_queues = devm_kcalloc(dev, total_q_cnt, sizeof(*temp_queues), + GFP_KERNEL); + if (!temp_queues) + macvlan->max_num_macvlan = 0; + + return temp_queues; +} + +s32 sxe2_macvlan_init(struct sxe2_vsi *vsi, bool init) +{ + s32 ret = 0; + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct sxe2_queue **temp_queues; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 i; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + + ret = sxe2_macvlan_enable_check_without_lock(vsi, init); + if (ret) { + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + } + + temp_queues = sxe2_macvlan_q_alloc(vsi); + if (!temp_queues) { + ret = -ENOMEM; + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + } + + ret = sxe2_vsi_close(vsi); + if (ret) { + devm_kfree(dev, temp_queues); + macvlan->max_num_macvlan = 0; + if (netif_running(vsi->netdev)) + WARN_ON_ONCE(sxe2_vsi_open(vsi)); + + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + } + + set_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags); + mutex_unlock(&adapter->vsi_ctxt.lock); + + ret = sxe2_netdev_q_cnt_set(netdev, vsi->txqs.q_cnt, vsi->rxqs.q_cnt, init); + if (ret) { + clear_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags); + devm_kfree(dev, temp_queues); + macvlan->max_num_macvlan = 0; + if (netif_running(vsi->netdev)) { + mutex_lock(&adapter->vsi_ctxt.lock); + WARN_ON_ONCE(sxe2_vsi_open(vsi)); + mutex_unlock(&adapter->vsi_ctxt.lock); + } + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (init && test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto dis_vsi_err; + } + sxe2_for_each_vsi_txq(vsi, i) + { + temp_queues[i] = vsi->txqs.q[i]; + } + + vsi->origin_txqs = vsi->txqs.q; + vsi->txqs.q = temp_queues; + + if (!init) { + LOG_WARN_BDF("macvlan rebuild no need enable vsi.\n"); + goto l_unlock; + } + + ret = sxe2_vsi_cfg_netdev_tc0(vsi); + if (ret) + goto set_num_tc_err; + + INIT_LIST_HEAD(&macvlan->macvlan_list); + + goto ena_vsi; + +set_num_tc_err: + vsi->txqs.q = vsi->origin_txqs; + vsi->origin_txqs = NULL; + +dis_vsi_err: + clear_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags); + devm_kfree(dev, temp_queues); + macvlan->max_num_macvlan = 0; + +ena_vsi: + if (netif_running(vsi->netdev)) + WARN_ON_ONCE(sxe2_vsi_open(vsi)); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + if (ret) { + WARN_ON_ONCE(sxe2_netdev_q_cnt_set(netdev, vsi->txqs.q_cnt, + vsi->rxqs.q_cnt, init)); + } + +l_end: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +s32 sxe2_macvlan_deinit(struct sxe2_vsi *vsi, bool locked) +{ + struct sxe2_macvlan *mv, *mv_tmp; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct sxe2_queue **temp_queues = NULL; + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (locked && test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + LOG_ERROR_BDF("vsi disabled, try later.\n"); + goto l_unlock; + } + + clear_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags); + + (void)sxe2_vsi_close(vsi); + + list_for_each_entry_safe(mv, mv_tmp, &macvlan->macvlan_list, list) { + (void)sxe2_mac_rule_del(adapter, mv->vsi->idx_in_dev, mv->mac); +#ifdef HAVE_NETDEV_SB_DEV + (void)macvlan_release_l2fw_offload(mv->vdev); +#endif + sxe2_fwd_del_macvlan_unlock(mv->parent_vsi->netdev, mv); + } + + ret = netdev_set_num_tc(vsi->netdev, 0); + if (ret) + LOG_ERROR_BDF("set num:0 tc NOK, ret:%d\n", ret); + + mutex_unlock(&adapter->vsi_ctxt.lock); + ret = sxe2_netdev_q_cnt_set(vsi->netdev, vsi->txqs.q_cnt, vsi->rxqs.q_cnt, + locked); + if (ret) { + LOG_ERROR_BDF("netdev txq_cnt:%u rxq_cnt:%u set failed %d.\n", + vsi->txqs.q_cnt, vsi->rxqs.q_cnt, ret); + goto l_end; + } + + macvlan->max_num_macvlan = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + temp_queues = vsi->txqs.q; + vsi->txqs.q = vsi->origin_txqs; + vsi->origin_txqs = NULL; + + if (locked) { + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + LOG_ERROR_BDF("vsi disabled, try later.\n"); + goto l_unlock; + } + + if (netif_running(vsi->netdev) && sxe2_vsi_open(vsi)) + LOG_ERROR_BDF("main vsi enable failed %d.\n", ret); + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + if (temp_queues) + devm_kfree(&adapter->pdev->dev, temp_queues); + +l_end: + return ret; +} + +STATIC void sxe2_macvlan_replay(struct sxe2_adapter *adapter) +{ + struct sxe2_macvlan_context *macvlan = &adapter->macvlan_ctxt; + struct sxe2_macvlan *mv, *mv_temp; + struct sxe2_vsi *parent_vsi; + s32 offset; + u16 i; +#ifdef HAVE_NETDEV_SB_DEV + s32 ret; +#endif + + mutex_lock(&adapter->vsi_ctxt.lock); + + list_for_each_entry_safe(mv, mv_temp, &macvlan->macvlan_list, list) { + parent_vsi = mv->parent_vsi; + offset = parent_vsi->txqs.q_cnt + mv->id; + + sxe2_for_each_vsi_txq(mv->vsi, i) + { + parent_vsi->txqs.q[offset + i] = mv->vsi->txqs.q[i]; + } + +#ifdef HAVE_NETDEV_SB_DEV + ret = sxe2_netdev_sb_chnl_cfg(mv->vsi, parent_vsi, mv->vdev, + (u16)mv->id); + if (ret) { + LOG_ERROR_BDF("sxe2_netdev_sb_chnl_cfg failed: %d.\n", ret); + (void)sxe2_mac_rule_del(adapter, mv->vsi->idx_in_dev, + mv->mac); + (void)macvlan_release_l2fw_offload(mv->vdev); + sxe2_fwd_del_macvlan_unlock(mv->parent_vsi->netdev, mv); + continue; + } +#endif + } + + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +void sxe2_fwd_del_macvlan_deay(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi || vsi->type != SXE2_VSI_T_MACVLAN) + continue; + + if (!test_bit(SXE2_VSI_S_MACVLAN_DEL, vsi->state)) + continue; + + sxe2_vsi_destroy_unlock(vsi); + } + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +s32 sxe2_macvlan_rebuild(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_vsi *parent_vsi = adapter->vsi_ctxt.main_vsi; + + if (!test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) + return 0; + + ret = sxe2_vsi_rebuild_by_type(adapter, SXE2_VSI_T_MACVLAN, true); + if (ret) { + LOG_DEV_ERR("sxe2_vsi_rebuild_by_type failed ret:%d.\n", ret); + goto l_end; + } + + ret = sxe2_macvlan_init(parent_vsi, false); + if (ret) { + LOG_DEV_ERR("sxe2_macvlan_init failed ret:%d.\n", ret); + goto l_err; + } + + sxe2_macvlan_replay(adapter); + + rtnl_lock(); + ret = sxe2_vsi_enable_by_type(adapter, SXE2_VSI_T_MACVLAN); + rtnl_unlock(); + if (!ret) { + LOG_DEV_ERR("sxe2_vsi_enable_by_type failed ret:%d.\n", ret); + goto l_end; + } + +l_err: + WARN_ON_ONCE(sxe2_macvlan_deinit(parent_vsi, false)); +l_end: + return ret; +} + +bool sxe2_macvlan_is_enabled(struct sxe2_adapter *adapter) +{ + return !!(test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)); +} + +#ifdef SXE2_MACVLAN_STATS +struct sxe2_macvlan *sxe2_get_macvlan(int id, struct sxe2_adapter *adapter) +{ + struct sxe2_macvlan *mv; + + if (!(test_bit(id, adapter->macvlan_ctxt.avail_macvlan))) + return NULL; + + list_for_each_entry(mv, &adapter->macvlan_ctxt.macvlan_list, list) { + if (id == mv->id) + return mv; + } + return NULL; +} +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.h new file mode 100644 index 0000000000000000000000000000000000000000..f9f1d82b6af7d6d61ddf3f1df74d8dcbb54a4511 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_macvlan.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_macvlan.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MACVLAN_H__ +#define __SXE2_MACVLAN_H__ + +#define SXE2_DFLT_TXQ_VMDQ_VSI (1) +#define SXE2_DFLT_RXQ_VMDQ_VSI (1) +#define SXE2_DFLT_VEC_VMDQ_VSI (1) +#define SXE2_MAX_NUM_VMDQ_VSI (16) +#define SXE2_MAX_TXQ_VMDQ_VSI (4) +#define SXE2_MAX_RXQ_VMDQ_VSI (4) + +struct sxe2_adapter; +struct sxe2_vsi; + +struct sxe2_macvlan { + struct list_head list; + s32 id; + struct net_device *vdev; + struct sxe2_vsi *parent_vsi; + struct sxe2_vsi *vsi; + u8 mac[ETH_ALEN]; +}; + +struct sxe2_macvlan_context { + DECLARE_BITMAP(avail_macvlan, SXE2_MAX_MACVLANS); + struct list_head macvlan_list; + u16 num_macvlan; + u16 max_num_macvlan; +}; + +s32 sxe2_vsi_cfg_netdev_tc0(struct sxe2_vsi *vsi); + +bool sxe2_macvlan_is_enabled(struct sxe2_adapter *adapter); + +s32 sxe2_macvlan_init(struct sxe2_vsi *vsi, bool init); + +s32 sxe2_macvlan_deinit(struct sxe2_vsi *vsi, bool locked); + +s32 sxe2_macvlan_rebuild(struct sxe2_adapter *adapter); + +void sxe2_fwd_del_macvlan(struct net_device *netdev, void *accel_priv); + +void *sxe2_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev); + +void sxe2_fwd_del_macvlan_deay(struct sxe2_adapter *adapter); + +#ifdef SXE2_MACVLAN_STATS +struct sxe2_macvlan *sxe2_get_macvlan(int id, struct sxe2_adapter *adapter); +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_main.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_main.c new file mode 100644 index 0000000000000000000000000000000000000000..b5a48bf47598ddef142a02f97fe00b4449dc6327 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_main.c @@ -0,0 +1,1665 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_main.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_log.h" +#include "sxe2_devlink.h" +#include "sxe2_version.h" +#include "sxe2_common.h" +#include "sxe2_netdev.h" +#include "sxe2_queue.h" +#include "sxe2_monitor.h" +#include "sxe2_sriov.h" +#include "sxe2_log_export.h" +#include "sxe2_debugfs.h" +#include "sxe2_event.h" +#include "sxe2_dcb.h" +#include "sxe2_rss.h" +#include "sxe2_fnav.h" +#include "sxe2_txsched.h" +#include "sxe2_ipsec.h" +#include "sxe2_macsec.h" +#include "sxe2_ddp.h" +#include "sxe2_ethtool.h" +#include "sxe2_lag.h" +#include "sxe2_xsk.h" +#include "sxe2_udp_tunnel.h" +#include "sxe2_com_ioctl.h" +#include "sxe2_irq.h" +#include "sxe2_vsi.h" + +#define CREATE_TRACE_POINTS +#include "sxe2_trace.h" +#undef CREATE_TRACE_POINTS + +#define SXE2_DMA_BIT_WIDTH_64 64 + +STATIC const struct pci_device_id sxe2_pci_tbl[] = { + {SXE2_PCI_VENDOR_ID_1, SXE2_PCI_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID, + 0, 0, 0}, + {SXE2_PCI_VENDOR_ID_2, SXE2_PCI_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID, + 0, 0, 0}, + {SXE2_PCI_VENDOR_ID_1, SXE2_PCI_DEVICE_ID_10B3, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + {SXE2_PCI_VENDOR_ID_206F, SXE2_PCI_DEVICE_ID_1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + { + 0, + }}; + +STATIC int com_mode = SXE2_COM_MODULE_UNDEFINED; +module_param(com_mode, uint, 0644); +MODULE_PARM_DESC(com_mode, "driver mode. kernel:0, mixed:2(default)"); + +STATIC int debug = -1; +module_param(debug, int, 0644); +#ifndef CONFIG_DYNAMIC_DEBUG +MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), debug_mask (0x8XXXXXXXX)"); +#else +MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); +#endif + +#ifdef SXE2_CFG_DEBUG +int reg_log; +module_param(reg_log, int, 0644); +MODULE_PARM_DESC(reg_log, "reg read/write log, 0-off 1-on."); + +int switch_heart_check = 1; +module_param(switch_heart_check, int, 0644); +MODULE_PARM_DESC(switch_heart_check, + "heart check switch on/off. switch off:0. switch on:1. default: 1"); + +s32 g_pf_switch_stats = 1; +module_param(g_pf_switch_stats, int, 0644); +MODULE_PARM_DESC(g_pf_switch_stats, + "pf switch stats open/close. open:1. close:1. default: 1"); +#endif + +int allow_inval_mac; +module_param(allow_inval_mac, int, 0644); +MODULE_PARM_DESC(allow_inval_mac, + "Indicates device can be probed successfully or \t" + "not when mac addr invalid."); + +static void sxe2_com_ctxt_fill(void *adapter) +{ + struct sxe2_adapter *pf_adapter = adapter; + + pf_adapter->com_ctxt.pdev = pf_adapter->pdev; + pf_adapter->com_ctxt.func_type = SXE2_PF; + pf_adapter->com_ctxt.pf_id = pf_adapter->pf_idx; +} + +int sxe2_com_mode_get(void *adapter) +{ + return ((struct sxe2_adapter *)adapter)->drv_mode; +} + +int sxe2_g_com_mode_get(void) +{ + return com_mode; +} + +static struct sxe2_com_ops g_com_ops = { + .com_ctxt_fill = sxe2_com_ctxt_fill, + .cmd_exec = sxe2_com_cmd_send, + .get_irq_num = sxe2_dpdk_irq_cnt_get, + .get_vector = sxe2_dpdk_irq_vector_idx_get, + .release = sxe2_dpdk_resource_release, + .com_mode_get = sxe2_com_mode_get, +}; + +static inline u32 sxe2_readl(const __iomem void *reg) +{ + return readl(reg); +} + +static inline void sxe2_writel(u32 value, __iomem void *reg) +{ + writel(value, reg); +} + +STATIC int sxe2_pci_init(struct sxe2_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pdev; + + ret = pcim_enable_device(pdev); + if (ret) + goto l_end; + + ret = dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXE2_DMA_BIT_WIDTH_64)); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] 64 dma mask and coherent set failed\n", + adapter->pdev->dev.id); + goto l_end; + } + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif + + pci_set_master(pdev); + + (void)pci_save_state(pdev); + pci_set_drvdata(pdev, adapter); + +l_end: + return ret; +} + +void sxe2_pci_deinit(struct sxe2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(adapter->pdev); +#endif + + pci_set_drvdata(pdev, NULL); +} + +s32 sxe2_hw_cfg_info_get(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u32 value; + + value = sxe2_hw_irq_gran_info_get(&adapter->hw); + if (value == SXE2_REG_INVALID_VALUE) + return -EIO; + + adapter->hw.hw_cfg.credit_interval_gran = + value & SXE2_PFG_INT_CTL_CREDIT_GRAN + ? SXE2_PFG_INT_CTL_CREDIT_GRAN_1 + : SXE2_PFG_INT_CTL_CREDIT_GRAN_0; + adapter->hw.hw_cfg.itr_gran = + (u16)FIELD_GET(SXE2_PFG_INT_CTL_ITR_GRAN, value); + if (!adapter->hw.hw_cfg.itr_gran) + adapter->hw.hw_cfg.itr_gran = SXE2_PFG_INT_CTL_ITR_GRAN_0; + + LOG_DEBUG_BDF("hw cfg info: itr_gran %d, intrl_gran %d.\n", + adapter->hw.hw_cfg.itr_gran, + adapter->hw.hw_cfg.credit_interval_gran); + + return ret; +} + +STATIC s32 sxe2_bar_region_map(struct pci_dev *pdev, struct sxe2_map_info *map) +{ + resource_size_t size, base; + void __iomem *addr; + + if (WARN_ON(map->end <= map->start)) { + LOG_ERROR("map end:0x%llx start:0x%llx invalid.\n", map->end, + map->start); + return -EIO; + } + + size = map->end - map->start; + base = pci_resource_start(pdev, map->bar_idx) + map->start; + addr = ioremap(base, size); + if (!addr) { + LOG_ERROR("%s: remap at offset:%llu size:%llu failed\n", __func__, + map->start, size); + return -EIO; + } + + map->addr = addr; + LOG_INFO("start:0x%llx end:0x%llx size:0x%llx map success addr:%pK.\n", + map->start, map->end, size, addr); + + return 0; +} + +STATIC s32 sxe2_bar_addr_map(struct sxe2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + struct sxe2_hw_map *map_info; + resource_size_t bar_len; + u32 nr_maps; + u32 i; + s32 ret; + + bar_len = pci_resource_len(pdev, 0); + if (bar_len > SXE2_BAR_RDMA_WB_END) + nr_maps = 2; + else + nr_maps = 1; + + map_info = kzalloc(struct_size(map_info, maps, nr_maps), GFP_KERNEL); + if (!map_info) { + LOG_ERROR_BDF("nr_maps:%u bar_len:%llu map info memory alloc fail.\n", + nr_maps, bar_len); + return -ENOMEM; + } + + map_info->map_cnt = nr_maps; + + ret = pci_request_mem_regions(pdev, dev_driver_string(dev)); + if (ret) { + LOG_DEV_ERR("nr_maps:%u bar_len:%llu pci_request_mem_regions failed.\n", + nr_maps, bar_len); + goto err_free_hw_addr; + } + + for (i = 0; i < nr_maps; i++) { + map_info->maps[i].bar_idx = 0; + if (i == 0) { + map_info->maps[0].start = 0; + map_info->maps[0].end = min_t(resource_size_t, bar_len, + SXE2_BAR_RDMA_WB_START); + } else if (i == 1) { + map_info->maps[1].start = SXE2_BAR_RDMA_WB_END; + map_info->maps[1].end = bar_len; + } + ret = sxe2_bar_region_map(pdev, &map_info->maps[i]); + if (ret) + goto err_release_mem_regions; + } + + adapter->hw.hw_map = (typeof(adapter->hw.hw_map))map_info; + + LOG_INFO_BDF("bar_len:0x%llx map_cnt:%u map_info:%pK.\n", bar_len, nr_maps, + map_info); + return 0; + +err_release_mem_regions: + if (i == 1) + iounmap(map_info->maps[0].addr); + pci_release_mem_regions(pdev); +err_free_hw_addr: + kfree(map_info); + return ret; +} + +STATIC void sxe2_bar_addr_unmap(struct sxe2_adapter *adapter) +{ + struct sxe2_hw_map *hw_map = (struct sxe2_hw_map *)adapter->hw.hw_map; + struct pci_dev *pdev = adapter->pdev; + u32 i; + + if (WARN_ON(!hw_map)) + return; + + adapter->hw.hw_map = NULL; + for (i = 0; i < hw_map->map_cnt; i++) + iounmap(hw_map->maps[i].addr); + kfree(hw_map); + + pci_release_mem_regions(pdev); +} + +STATIC s32 sxe2_request_fw(struct sxe2_adapter *adapter, + const struct firmware **firmware) +{ + s32 err = 0; + struct device *dev = &adapter->pdev->dev; + + err = request_firmware(firmware, SXE2_DDP_PKG_FILE, dev); + if (err) + LOG_DEV_ERR("the DDP package file was not found or could not be read.\t" + "Entering Safe Mode\n"); + + return err; +} + +STATIC s32 sxe2_init_ddp_config(struct sxe2_adapter *adapter) +{ + const struct firmware *firmware = NULL; + s32 err; + + err = sxe2_request_fw(adapter, &firmware); + if (err) + return err; + + sxe2_load_pkg(firmware, adapter); + release_firmware(firmware); + return err; +} + +void sxe2_fw_version_get(struct sxe2_adapter *adapter) +{ + u32 fw_ver; + struct sxe2_hw *hw = &adapter->hw; + + fw_ver = sxe2_fw_ver_get(hw); + hw->fw_ver.build_id = fw_ver & SXE2_FW_VER_BUILD_M; + hw->fw_ver.fix_version_id = + (fw_ver & SXE2_FW_VER_FIX_M) >> SXE2_FW_VER_FIX_SHIFT; + hw->fw_ver.sub_version_id = + (fw_ver & SXE2_FW_VER_SUB_M) >> SXE2_FW_VER_SUB_SHIFT; + hw->fw_ver.main_version_id = + (fw_ver & SXE2_FW_VER_MAIN_M) >> SXE2_FW_VER_MAIN_SHIFT; +} + +static void sxe2_link_status_sync(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FLM_LINK_STATUS_SYNC, NULL, 0, NULL, + 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_DEV_ERR("sync link status failed, ret=%d\n", ret); +} + +STATIC void sxe2_hw_board_type_get(struct sxe2_adapter *adapter) +{ + struct sxe2_hw *hw = &adapter->hw; + u32 value; + + value = sxe2_fw_pop_get(hw); + LOG_INFO_BDF("pop type reg value=%d.\n", value); + if (!!value) + hw->is_pop_type = true; + else + hw->is_pop_type = false; +} + +STATIC s32 sxe2_hw_base_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_hw *hw; + + hw = &adapter->hw; + hw->adapter = adapter; + + sxe2_dev_ctrl_init_once(adapter); + +#if !defined(SXE2_TEST) && !defined(SXE2VF_TEST) + ret = sxe2_wait_reset_done(adapter, SXE2_RESET_CORER); + if (ret) + goto l_end; +#endif + ret = sxe2_stop_drop(adapter); + if (ret) + goto l_end; + + adapter->dev_ctrl_ctxt.dev_state = SXE2_DEVSTATE_ACCESSIBLE; + + ret = sxe2_bar_addr_map(adapter); + if (ret) { + LOG_DEV_ERR("pci bar map fail, ret=%d\n", ret); + goto l_end; + } + + sxe2_hw_reg_handle_init(hw, sxe2_readl, sxe2_writel); + + ret = sxe2_wait_fw_init(adapter); + if (ret) + goto l_unmap; + + ret = sxe2_reset_sync(adapter, SXE2_RESET_PFR); + if (ret) { + LOG_DEV_ERR("PFR failed, ret=%d\n", ret); + goto l_unmap; + } + + sxe2_fw_version_get(adapter); + + ret = sxe2_cmd_channels_init(adapter); + if (ret) { + LOG_DEV_ERR("init cmd channel failed, ret=%d\n", ret); + goto l_unmap; + } + + if (sxe2_drv_mode_get(adapter)) { + LOG_ERROR_BDF("get drv mode failed, ret=%d\n", ret); + adapter->drv_mode = SXE2_COM_MODULE_MIXED; + } + + ret = sxe2_fwc_clear_pf_cfg(adapter); + if (ret) { + LOG_DEV_ERR("clear pf cfg failed, ret=%d\n", ret); + goto l_cmd_channel_release; + } + + ret = sxe2_fwc_pxe_disable(adapter); + if (ret) { + LOG_DEV_ERR("pxe mode disable failed, ret=%d\n", ret); + goto l_unmap; + } + + sxe2_hw_board_type_get(adapter); + + ret = sxe2_hw_cfg_info_get(adapter); + if (ret) { + LOG_DEV_ERR("get hw cfg failed, ret=%d\n", ret); + goto l_cmd_channel_release; + } + + set_bit(SXE2_FLAG_SWITCHDEV_CAPABLE, adapter->flags); + + ret = sxe2_hw_mtu_init(adapter, SXE2_MAX_FRAME_SIZE, true); + if (ret) { + LOG_DEV_ERR("hw mtu init failed, ret=%d\n", ret); + goto l_cmd_channel_release; + } + (void)sxe2_init_ddp_config(adapter); + + ret = sxe2_caps_get(adapter); + if (ret) { + LOG_DEV_ERR("get device and function caps failed, ret=%d\n", ret); + goto l_cmd_channel_release; + } + sxe2_hw_pf_stats_update(adapter); + + sxe2_link_status_sync(adapter); + + return ret; + +l_cmd_channel_release: + sxe2_cmd_channels_deinit(adapter); +l_unmap: + sxe2_bar_addr_unmap(adapter); +l_end: + sxe2_dev_ctrl_deinit_once(adapter); + return ret; +} + +STATIC void sxe2_hw_base_deinit(struct sxe2_adapter *adapter) +{ + sxe2_cmd_channels_deinit(adapter); + + if (!sxe2_hw_is_fault(&adapter->hw)) { + if (sxe2_reset_sync(adapter, SXE2_RESET_PFR)) + LOG_ERROR_BDF("PFR failed.\n"); + } + + sxe2_bar_addr_unmap(adapter); + + sxe2_free_seg(adapter); + + sxe2_dev_ctrl_deinit_once(adapter); +} + +STATIC void sxe2_sw_lock_init(struct sxe2_adapter *adapter) +{ + mutex_init(&adapter->irq_ctxt.lock); + mutex_init(&adapter->q_ctxt.lock); + mutex_init(&adapter->vsi_ctxt.lock); + mutex_init(&adapter->dcb_ctxt.tc_mutex); + mutex_init(&adapter->aux_ctxt.adev_mutex); + mutex_init(&adapter->switch_ctxt.lldp_rule_lock); + spin_lock_init(&adapter->monitor_ctxt.lock); +} + +STATIC void sxe2_sw_lock_deinit(struct sxe2_adapter *adapter) +{ + mutex_destroy(&adapter->switch_ctxt.lldp_rule_lock); + mutex_destroy(&adapter->irq_ctxt.lock); + mutex_destroy(&adapter->q_ctxt.lock); + mutex_destroy(&adapter->vsi_ctxt.lock); + mutex_destroy(&adapter->dcb_ctxt.tc_mutex); + mutex_destroy(&adapter->aux_ctxt.adev_mutex); + mutex_destroy(&adapter->udp_tunnel_ctxt.lock); +} + +STATIC s32 sxe2_sw_vsi_array_alloc(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + adapter->vsi_ctxt.vsi = + devm_kcalloc(dev, adapter->vsi_ctxt.max_cnt, + sizeof(*adapter->vsi_ctxt.vsi), GFP_KERNEL); + if (!adapter->vsi_ctxt.vsi) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc vsis failed, count: %d, size: %zu.\n", + adapter->vsi_ctxt.max_cnt, + sizeof(*adapter->vsi_ctxt.vsi)); + } + + return ret; +} + +STATIC void sxe2_link_ctxt_init(struct sxe2_adapter *adapter) +{ + memset(&adapter->link_ctxt, 0, sizeof(struct sxe2_cmd_link_context)); + mutex_init(&adapter->link_ctxt.link_status_lock); +} + +STATIC void sxe2_link_ctxt_deinit(struct sxe2_adapter *adapter) +{ + mutex_destroy(&adapter->link_ctxt.link_status_lock); + memset(&adapter->link_ctxt, 0, sizeof(struct sxe2_cmd_link_context)); +} + +STATIC s32 sxe2_sw_init_once(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + sxe2_link_ctxt_init(adapter); + + sxe2_sw_lock_init(adapter); + + ret = sxe2_sw_vsi_array_alloc(adapter); + if (ret) + goto l_vsis_alloc_failed; + + sxe2_vf_init(adapter); + + ret = sxe2_switch_context_init(adapter); + if (ret) { + LOG_ERROR_BDF("init switch ctx failed, ret=%d\n", ret); + goto l_switch_ctx_init_failed; + } + sxe2_fnav_ctxt_init(adapter); + (void)sxe2_arfs_init(adapter); + + ret = sxe2_acl_init(adapter); + if (ret) { + LOG_ERROR_BDF("init acl failed, ret=%d\n", ret); + goto l_acl_init_failed; + } + + sxe2_rss_flow_ctxt_init(adapter); + + (void)sxe2_ddp_params_store(adapter); + + ATOMIC_INIT_NOTIFIER_HEAD(&adapter->com_ctxt.irqs.irq_nh); + + return 0; + +l_acl_init_failed: + sxe2_arfs_deinit(adapter); + sxe2_fnav_ctxt_deinit(adapter); + sxe2_switch_context_deinit(adapter); +l_switch_ctx_init_failed: + sxe2_vf_deinit(adapter); + if (adapter->vsi_ctxt.vsi) { + devm_kfree(dev, adapter->vsi_ctxt.vsi); + adapter->vsi_ctxt.vsi = NULL; + } + +l_vsis_alloc_failed: + sxe2_sw_lock_deinit(adapter); + sxe2_link_ctxt_deinit(adapter); + return ret; +} + +STATIC void sxe2_sw_deinit_once(struct sxe2_adapter *adapter) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + sxe2_rss_flow_ctxt_deinit(adapter); + + sxe2_arfs_deinit(adapter); + sxe2_fnav_ctxt_deinit(adapter); + sxe2_acl_deinit(adapter); + + sxe2_vf_deinit(adapter); + + if (adapter->vsi_ctxt.vsi) { + devm_kfree(dev, adapter->vsi_ctxt.vsi); + adapter->vsi_ctxt.vsi = NULL; + } + + sxe2_switch_context_deinit(adapter); + + sxe2_sw_lock_deinit(adapter); + sxe2_link_ctxt_deinit(adapter); +} + +STATIC s32 sxe2_mac_addr_init(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u8 broadcast[ETH_ALEN]; + + ret = sxe2_default_mac_addr_get(vsi, hw->mac_info.perm_addr); + if (ret) + return ret; + + ret = sxe2_cur_mac_addr_set(vsi, hw->mac_info.perm_addr); + if (ret) + return ret; + + if (!is_valid_ether_addr(hw->mac_info.perm_addr)) { + LOG_DEV_INFO("current mac addr:%pM invalid.\n", + hw->mac_info.perm_addr); + } + + eth_hw_addr_set(vsi->netdev, hw->mac_info.perm_addr); + + if (allow_inval_mac == 0) { + (void)mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_mac_addr_add(vsi, hw->mac_info.perm_addr, + SXE2_MAC_OWNER_NETDEV); + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) + return ret; + } + + eth_broadcast_addr(broadcast); + ret = sxe2_mac_rule_add(vsi, broadcast); + + return ret; +} + +STATIC void sxe2_log_pkg_init(struct sxe2_adapter *adapter, s32 err) +{ + switch (err) { + case SXE2_DDP_PKG_SUCCESS: + LOG_DEV_INFO("the DDP package was successfully loaded\n"); + break; + case -SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + LOG_DEV_INFO("DDP package already present on device.\n"); + break; + case -SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: + LOG_DEV_ERR("the device has a DDP package that is not supported by\t" + "the driver.\n"); + break; + case -SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + LOG_DEV_INFO("the driver could not load the DDP package file.\t" + "because a compatible DDP package is already present\t" + "on the device.\n"); + break; + case -SXE2_DDP_PKG_FW_MISMATCH: + LOG_DEV_ERR("the firmware loaded on the device is not compatible\t" + "with the DDP package. Please update the device's NVM.\t" + "Entering safe mode.\n"); + break; + case -SXE2_DDP_PKG_INVALID_FILE: + LOG_DEV_ERR("the DDP package file is invalid. Entering Safe\t" + "Mode.\n"); + break; + case -SXE2_DDP_PKG_FILE_VERSION_TOO_HIGH: + LOG_DEV_ERR("the DDP package file version is higher than the driver\t" + "supports. Please use an updated driver. Entering Safe\t" + "Mode.\n"); + break; + case -SXE2_DDP_PKG_FILE_VERSION_TOO_LOW: + LOG_DEV_ERR("the DDP package file version is lower than the driver\t" + "supports.\n"); + break; + case -SXE2_DDP_PKG_NO_SEC_MANIFEST: + LOG_DEV_ERR("the DDP package could not be loaded because its\t" + "security manifest is missing. Please use a valid DDP\t" + "Package. Entering Safe Mode.\n"); + break; + case -SXE2_DDP_PKG_MANIFEST_INVALID: + case -SXE2_DDP_PKG_BUFFER_INVALID: + LOG_DEV_ERR("an error occurred on the device while loading the DDP package.\n" + "The device will be reset.\n"); + break; + case -SXE2_DDP_PKG_ERR: + default: + LOG_DEV_ERR("an unknown error occurred when loading the DDP package.\n" + "Entering Safe Mode.\n"); + break; + } +} + +void sxe2_load_pkg(const struct firmware *firmware, struct sxe2_adapter *adapter) +{ + s32 ret = SXE2_DDP_PKG_ERR; + struct sxe2_hw *hw = &adapter->hw; + + if (firmware && !hw->pkg_copy) { + ret = sxe2_copy_and_init_pkg(adapter, firmware->data, + firmware->size); + sxe2_log_pkg_init(adapter, ret); + } else if (!firmware && hw->pkg_copy) { + ret = sxe2_init_pkg(adapter, hw->pkg_copy, hw->pkg_size); + } else { + LOG_DEV_WARN("The DDP package file failed to load. Entering Safe Mode.\n"); + } + + if (!sxe2_is_init_pkg_successful(ret)) { + clear_bit(SXE2_FLAG_ADVANCE_MODE, adapter->flags); + return; + } + + set_bit(SXE2_FLAG_ADVANCE_MODE, adapter->flags); +} + +STATIC s32 sxe2_serial_num_get(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_fwc_serial_num_resp resp = {}; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_PF_SERIAL_GET, NULL, 0, &resp, + sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("get serial num failed, ret=%d\n", ret); + ret = -EIO; + } + + memcpy(adapter->serial_num, resp.serial_num, SXE2_SERIAL_NUM_LEN); + + return ret; +} + +STATIC s32 sxe2_init_eth(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_vsi *pf_vsi; + + ret = sxe2_txsched_init(adapter); + if (ret) { + LOG_DEV_ERR("txsched failed, ret=%d\n", ret); + goto l_sched_init_failed; + } + + ret = sxe2_irq_init(adapter); + if (ret) { + LOG_DEV_ERR("init irq failed, ret=%d\n", ret); + goto l_irq_init_failed; + } + + sxe2_queue_init(adapter); + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + ret = sxe2_main_vsi_create(adapter); + if (ret) { + LOG_DEV_ERR("create main vsi failed, ret=%d\n", ret); + goto l_main_vsi_failed; + } + + pf_vsi = adapter->vsi_ctxt.main_vsi; + ret = sxe2_netdev_init(pf_vsi); + if (ret) { + LOG_DEV_ERR("netdev init failed, ret=%d\n", ret); + goto l_netdev_init_failed; + } + ret = sxe2_mac_addr_init(pf_vsi); + if (ret) { + LOG_DEV_ERR("mac filter config failed, ret=%d\n", ret); + goto l_netdev_fltr_failed; + } + sxe2_napi_add(pf_vsi); + } + + ret = sxe2_serial_num_get(adapter); + if (ret) { + LOG_DEV_ERR("get serial num failed, ret=%d\n", ret); + goto l_netdev_fltr_failed; + } + + ret = sxe2_lldp_agent_event_init(adapter); + if (ret) { + LOG_ERROR_BDF("init lldp event failed, ret=%d\n", ret); + goto l_netdev_fltr_failed; + } + + return 0; + +l_netdev_fltr_failed: + sxe2_netdev_deinit(adapter->vsi_ctxt.main_vsi); +l_netdev_init_failed: + sxe2_vsi_destroy(adapter->vsi_ctxt.main_vsi); + adapter->vsi_ctxt.main_vsi = NULL; +l_main_vsi_failed: + sxe2_irq_deinit(adapter); +l_irq_init_failed: +l_sched_init_failed: + sxe2_txsched_deinit(adapter); + + return ret; +} + +STATIC void sxe2_deinit_eth(struct sxe2_adapter *adapter) +{ + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + sxe2_napi_del(adapter->vsi_ctxt.main_vsi); + sxe2_netdev_deinit(adapter->vsi_ctxt.main_vsi); + sxe2_vsi_destroy(adapter->vsi_ctxt.main_vsi); + adapter->vsi_ctxt.main_vsi = NULL; + } + sxe2_irq_deinit(adapter); + sxe2_txsched_deinit(adapter); +} + +STATIC s32 sxe2_init_aux(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + if (!sxe2_is_safe_mode(adapter)) { + ret = sxe2_rdma_aux_init(adapter); + if (ret) { + LOG_DEV_ERR("rdma aux init failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_rdma_aux_add(adapter); + if (ret) { + LOG_DEV_ERR("rdma aux add failed, ret=%d\n", ret); + ret = 0; + } + } else { + LOG_DEV_INFO("running in safe mode,rdma is not supported\n"); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_init_feature_with_netdev(struct sxe2_adapter *adapter) +{ + s32 ret; + +#ifdef HAVE_MACSEC_SUPPORT + ret = sxe2_macsec_init(adapter); + if (ret) { + LOG_DEV_ERR("macsec initial failed.\n"); + goto l_macsec_init_failed; + } +#endif + + ret = sxe2_ipsec_init(adapter); + if (ret) { + LOG_DEV_ERR("ipsec initial failed.\n"); + goto l_ipsec_init_failed; + } + + ret = sxe2_lag_init(adapter); + if (ret) { + LOG_DEV_ERR("lag init failed.\n"); + goto l_lag_init_failed; + } + + ret = sxe2_cli_cdev_create(adapter); + if (ret) { + LOG_DEV_ERR("cli char dev create failed, ret=%d\n", ret); + goto l_cdev_create_failed; + } + +#ifdef HAVE_TC_INDIR_BLOCK + ret = sxe2_tc_indir_block_register(adapter->vsi_ctxt.main_vsi); + if (ret) { + LOG_DEV_ERR("register netdev notifier failed, ret=%d\n", ret); + goto l_tc_block_register_failed; + } +#endif + return 0; + +#ifdef HAVE_TC_INDIR_BLOCK +l_tc_block_register_failed: + sxe2_cli_cdev_delete(adapter); +#endif +l_cdev_create_failed: + sxe2_lag_deinit(adapter); +l_lag_init_failed: + sxe2_ipsec_deinit(adapter); +l_ipsec_init_failed: +#ifdef HAVE_MACSEC_SUPPORT + sxe2_macsec_deinit(adapter); +l_macsec_init_failed: +#endif + return ret; +} + +STATIC void sxe2_deinit_feature_with_netdev(struct sxe2_adapter *adapter) +{ +#ifdef HAVE_TC_INDIR_BLOCK + sxe2_tc_indir_block_unregister(adapter->vsi_ctxt.main_vsi); +#endif + sxe2_cli_cdev_delete(adapter); + + sxe2_lag_deinit(adapter); + + sxe2_ipsec_deinit(adapter); + +#ifdef HAVE_MACSEC_SUPPORT + sxe2_macsec_deinit(adapter); +#endif +} + +STATIC s32 sxe2_init_feature_without_netdev(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_pf_eth_fnav_init(adapter); + if (ret) { + LOG_DEV_ERR("ctrl vsi init failed, ret=%d\n", ret); + goto l_fnav_init_failed; + } + + ret = sxe2_ptp_init(adapter); + if (ret) { + LOG_DEV_ERR("ptp init failed, ret=%d\n", ret); + goto l_ptp_init_failed; + } + clear_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags); + + ret = sxe2_dcb_init(adapter, false); + if (ret) { + LOG_DEV_ERR("dcb init failed\n"); + goto l_dcb_init_failed; + } + return ret; + +l_dcb_init_failed: + sxe2_ptp_deinit(adapter); +l_ptp_init_failed: + sxe2_pf_eth_fnav_deinit(adapter); +l_fnav_init_failed: + return ret; +} + +STATIC void sxe2_deinit_feature_without_netdev(struct sxe2_adapter *adapter) +{ + sxe2_dcb_deinit(adapter, false); + + sxe2_ptp_deinit(adapter); + + sxe2_pf_eth_fnav_deinit(adapter); +} + +STATIC void sxe2_init_work_tasks(struct sxe2_adapter *adapter) +{ + sxe2_monitor_init(adapter); + sxe2_dev_ctrl_init(adapter); +} + +STATIC void sxe2_deinit_work_tasks(struct sxe2_adapter *adapter) +{ + sxe2_dev_ctrl_deinit(adapter); + + sxe2_monitor_stop(adapter); +} + +STATIC void sxe2_start_work_tasks(struct sxe2_adapter *adapter) +{ + sxe2_monitor_start(adapter); + + sxe2_dev_state_set(adapter, SXE2_DEVSTATE_RUNNING, 0); + + sxe2_dev_ctrl_work_start(adapter); +} + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +static struct udp_tunnel_nic_info sxe2_udp_tunnels = { + .set_port = sxe2_udp_tunnel_set_port, + .unset_port = sxe2_udp_tunnel_unset_port, +#ifdef HAVE_UDP_TUNNEL_NIC_INFO_MAY_SLEEP + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, +#endif + .tables = { + { + .n_entries = 1, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, + }, + { + .n_entries = 1, + .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, + }, + { + .n_entries = 1, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, + }, + }, +}; +#endif + +static void sxe2_udp_tunnel_init(struct sxe2_adapter *adapter) +{ + struct sxe2_udp_tunnel_context *udp_tunnel_ctxt = &adapter->udp_tunnel_ctxt; + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + adapter->udp_tunnel_nic = &sxe2_udp_tunnels; +#ifdef HAVE_UDP_TUNNEL_NIC_SHARED + adapter->udp_tunnel_nic->shared = &adapter->udp_tunnel_shared; +#endif +#endif + + memset(udp_tunnel_ctxt, 0, sizeof(struct sxe2_udp_tunnel_context)); + mutex_init(&udp_tunnel_ctxt->lock); + bitmap_zero(udp_tunnel_ctxt->vsi_map, SXE2_MAX_VSI_NUM); +} + +STATIC s32 sxe2_sw_base_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_vsi *pf_vsi; + + ret = sxe2_sw_init_once(adapter); + if (ret) + return ret; + + sxe2_init_work_tasks(adapter); + + ret = sxe2_init_eth(adapter); + if (ret) { + LOG_DEV_ERR("init eth device failed, ret=%d\n", ret); + goto l_eth_init_failed; + } + + ret = sxe2_log_export_init(adapter); + if (ret) { + LOG_DEV_ERR("log export init failed, ret=%d\n", ret); + goto l_dump_init_failed; + } + + if (sxe2_is_safe_mode(adapter)) + goto l_probe_dev_regist; + + ret = sxe2_init_feature_without_netdev(adapter); + if (ret) + goto l_init_feature_without_netdev_failed; + +l_probe_dev_regist: + pf_vsi = adapter->vsi_ctxt.main_vsi; + ret = sxe2_netdev_register(pf_vsi); + if (ret) { + LOG_DEV_ERR("netdev register failed, ret=%d\n", ret); + goto l_netdev_reg_failed; + } + + ret = sxe2_init_feature_with_netdev(adapter); + if (ret) + goto l_init_feature_with_netdev_failed; + + ret = sxe2_init_aux(adapter); + if (ret) + goto l_aux_init_failed; + + if (!sxe2_is_safe_mode(adapter)) { + ret = sxe2_com_init(&adapter->com_ctxt, adapter, &g_com_ops); + if (ret) + goto l_com_init_failed; + } + + sxe2_debugfs_pf_init(adapter); + + sxe2_start_work_tasks(adapter); + + sxe2_udp_tunnel_init(adapter); + + return ret; + +l_com_init_failed: + sxe2_rdma_aux_deinit(adapter); +l_aux_init_failed: + sxe2_deinit_feature_with_netdev(adapter); +l_init_feature_with_netdev_failed: + unregister_netdev(pf_vsi->netdev); +l_netdev_reg_failed: + sxe2_deinit_feature_without_netdev(adapter); +l_init_feature_without_netdev_failed: + sxe2_log_export_deinit(adapter); +l_dump_init_failed: + sxe2_deinit_eth(adapter); +l_eth_init_failed: + sxe2_deinit_work_tasks(adapter); + sxe2_sw_deinit_once(adapter); + + return ret; +} + +STATIC void sxe2_sw_base_deinit(struct sxe2_adapter *adapter) +{ + if (test_bit(SXE2_PF_STOPPED, &adapter->dev_ctrl_ctxt.flag)) { + if (sxe2_cmd_channels_enable(adapter)) + LOG_DEV_WARN("cmd channel enable failed.\n"); + sxe2_event_irq_enable(adapter); + } + + sxe2_lldp_agent_event_deinit(adapter); + + sxe2_stop_lfc(adapter); + sxe2_dcb_deinit(adapter, false); + + sxe2_monitor_stop(adapter); + + sxe2_dev_ctrl_deinit(adapter); + + sxe2_com_deinit(&adapter->com_ctxt); + + sxe2_vf_deinit(adapter); + + sxe2_cli_cdev_delete(adapter); + + sxe2_rdma_aux_deinit(adapter); + +#ifdef HAVE_TC_INDIR_BLOCK + sxe2_tc_indir_block_unregister(adapter->vsi_ctxt.main_vsi); +#endif + sxe2_ptp_deinit(adapter); + + sxe2_lag_deinit(adapter); + + unregister_netdev(adapter->vsi_ctxt.main_vsi->netdev); + + sxe2_netdev_deinit(adapter->vsi_ctxt.main_vsi); + + sxe2_ctrl_vsi_deinit(adapter); + +#ifdef HAVE_MACSEC_SUPPORT + sxe2_macsec_deinit(adapter); +#endif + + sxe2_ipsec_deinit(adapter); + + sxe2_vsi_destroy_all(adapter); + + sxe2_switch_fltr_restore_clean(adapter); + + sxe2_switch_context_deinit(adapter); + + sxe2_txsched_deinit(adapter); + + sxe2_irq_deinit(adapter); + + sxe2_sw_deinit_once(adapter); + + sxe2_debugfs_pf_exit(adapter); +} + +#ifdef CONFIG_PM +static int __maybe_unused sxe2_pm_suspend(struct device *dev) +{ + s32 ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + + LOG_DEBUG_BDF("suspend was called\n"); + + if (test_and_set_bit(SXE2_FLAG_SUSPEND, adapter->flags)) + goto out; + + sxe2_dev_ctrl_work_stop(adapter); + + sxe2_pf_stop(adapter, SXE2_PF_STOP_NORMAL); + + sxe2_rdma_aux_delete(&adapter->aux_ctxt.cdev_info); + + sxe2_irq_deinit(adapter); + + ret = pci_save_state(pdev); + if (ret) { + LOG_DEV_ERR("pci_save_state failed with error code:%d\n", ret); + goto out; + } + + ret = pci_set_power_state(pdev, PCI_D3hot); + if (ret) + LOG_DEV_ERR("pci_set_power_state with error code:%d\n", ret); + +out: + LOG_DEV_DEBUG("suspend end, ret=%d\n", ret); + return ret; +} + +static int __maybe_unused sxe2_pm_resume(struct device *dev) +{ + s32 ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + + LOG_DEBUG_BDF("resume was called\n"); + + if (!test_bit(SXE2_FLAG_SUSPEND, adapter->flags)) + goto out; + + ret = pci_set_power_state(pdev, PCI_D0); + if (ret) { + LOG_DEV_ERR("pci_set_power_state with error code:%d\n", ret); + goto out; + } + pci_restore_state(pdev); + ret = pci_save_state(pdev); + if (ret) { + LOG_DEV_ERR("pci_save_state with error code:%d\n", ret); + goto out; + } + + if (!pci_device_is_present(pdev)) { + LOG_DEV_ERR("pci device has been lost\n"); + ret = -ENODEV; + goto out; + } + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_DEV_ERR("cannot enable device after resume\n"); + goto out; + } + + ret = sxe2_irq_resume(adapter); + if (ret) { + LOG_DEV_ERR("irq resume err, ret=%d\n", ret); + goto out; + } + + ret = sxe2_reset_async(adapter, SXE2_RESET_PFR); + if (ret) { + LOG_DEV_ERR("PFR failed during resume, ret=%d\n", ret); + goto out; + } + sxe2_dev_ctrl_work_start(adapter); + + clear_bit(SXE2_FLAG_SUSPEND, adapter->flags); + +out: + LOG_DEV_DEBUG("resume end, ret=%d\n", ret); + return ret; +} +#endif + +STATIC void sxe2_msg_level_init(struct sxe2_adapter *adapter) +{ + adapter->msglvl_ctxt.msg_enable = netif_msg_init(debug, SXE2_DFLT_NETIF_M); +#ifndef CONFIG_DYNAMIC_DEBUG + if (debug < -1) + adapter->msglvl_ctxt.debug_mask = (u64)debug; +#endif +} + +STATIC int sxe2_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + int ret; + struct sxe2_adapter *adapter; + + adapter = sxe2_adapter_create(pdev); + if (!adapter) { + ret = -ENOMEM; + LOG_ERROR("can't probe virtual\n"); + goto l_end; + } + + sxe2_msg_level_init(adapter); + + sxe2_devlink_register(adapter); + ret = sxe2_pci_init(adapter); + if (ret) { + LOG_DEV_ERR("pci init failed, ret=%d\n", ret); + goto l_create; + } + + ret = sxe2_hw_base_init(adapter); + if (ret) { + LOG_DEV_ERR("hardware base init failed.(ret:%d)\n", ret); + goto l_pci_deinit; + } + + ret = sxe2_sw_base_init(adapter); + if (ret) { + LOG_DEV_ERR("software base init failed.(ret:%d)\n", ret); + goto l_sw_base_init_failed; + } + + return ret; + +l_sw_base_init_failed: + sxe2_hw_base_deinit(adapter); +l_pci_deinit: + sxe2_pci_deinit(adapter); +l_create: + sxe2_devlink_unregister(adapter); +l_end: + return ret; +} + +STATIC void sxe2_remove(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + + if (!adapter) { + LOG_WARN("adapter NULL, skip remove oper.\n"); + return; + } + + LOG_DEBUG_BDF("sxe2 driver remove start.\n"); + + sxe2_dev_ctrl_deinit(adapter); + + (void)sxe2_fnav_switch(adapter, false); + + if (adapter->vf_ctxt.num_vfs) + (void)sxe2_vfs_disable(adapter, true); + + (void)sxe2_pf_stop(adapter, SXE2_PF_STOP_RESET_NOTICE_RDMA); + sxe2_sw_base_deinit(adapter); + sxe2_hw_base_deinit(adapter); + + LOG_DEBUG_BDF("sxe2 driver remove end.\n"); + + (void)pci_wait_for_pending_transaction(pdev); +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(adapter->pdev); +#endif + + sxe2_pci_deinit(adapter); + + sxe2_devlink_unregister(adapter); +} + +STATIC void sxe2_shutdown(struct pci_dev *pdev) +{ + sxe2_remove(pdev); +} + +STATIC void __sxe2_pci_err_reset_prepare(struct sxe2_adapter *adapter) +{ + if (!test_bit(SXE2_FLAG_SUSPEND, adapter->flags)) { + sxe2_dev_ctrl_work_stop(adapter); + sxe2_pf_stop(adapter, SXE2_PF_STOP_RESET_NOTICE_RDMA); + } +} + +STATIC void sxe2_pci_err_reset_prepare(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + + LOG_DEBUG_BDF("pflr trigger.\n"); + + __sxe2_pci_err_reset_prepare(adapter); +} + +STATIC void sxe2_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + int pos; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vfdev; + + (void)pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == pdev) + pci_restore_msi_state(vfdev); + vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + } + } +} + +STATIC pci_ers_result_t sxe2_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t error) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t ret; + + LOG_DEV_WARN("pci err:%u detected.\n", error); + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable pci err:0x%x\n", + __func__, error); + ret = PCI_ERS_RESULT_DISCONNECT; + goto l_out; + } + + __sxe2_pci_err_reset_prepare(adapter); + + pci_disable_device(pdev); + ret = error == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT + : PCI_ERS_RESULT_NEED_RESET; + +l_out: + LOG_DEV_WARN("pci err:%u detected done ret:%d.\n", error, ret); + + return ret; +} + +STATIC pci_ers_result_t sxe2_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; + s32 ret; + + LOG_DEV_WARN("pci err slot reset\n"); + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_DEV_ERR("Cannot re-enable PCI device after reset, error %d\n", + ret); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + (void)pci_save_state(pdev); + (void)pci_wake_from_d3(pdev, false); + + ret = sxe2_wait_reset_done(adapter, SXE2_RESET_PFR); + if (!ret) + result = PCI_ERS_RESULT_RECOVERED; + else + result = PCI_ERS_RESULT_DISCONNECT; + } + + ret = pci_aer_clear_nonfatal_status(pdev); + if (ret) + LOG_DEV_ERR("pci_aer_clear_nonfatal_status failed, error %d\n", ret); + + LOG_DEV_WARN("pci err slot reset done %d.\n", ret); + + return result; +} + +STATIC void sxe2_pci_err_resume(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + + LOG_DEV_WARN("pci err resume\n"); + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable\n", __func__); + return; + } + + if (test_bit(SXE2_FLAG_SUSPEND, adapter->flags)) { + LOG_DEV_ERR("%s failed to resume normal operations!\n", __func__); + return; + } + + sxe2_restore_all_vfs_msi_state(pdev); + + if (sxe2_reset_sync(adapter, SXE2_RESET_PFR)) + LOG_ERROR_BDF("PFR failed.\n"); + + sxe2_rdma_aux_delete(&adapter->aux_ctxt.cdev_info); + + if (sxe2_pf_rebuild(adapter)) + LOG_DEV_ERR("rebuild pf failed.\n"); + + sxe2_dev_ctrl_work_start(adapter); + LOG_DEV_WARN("pci err resume done\n"); +} + +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +STATIC void sxe2_pci_err_reset_done(struct pci_dev *pdev) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + s32 ret; + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable\n", __func__); + return; + } + + if (test_bit(SXE2_FLAG_SUSPEND, adapter->flags)) { + LOG_DEV_ERR("%s failed to resume normal operations!\n", __func__); + return; + } + + sxe2_restore_all_vfs_msi_state(pdev); + + ret = sxe2_wait_reset_done(adapter, SXE2_RESET_PFR); + if (ret) + LOG_DEV_ERR("wait pflr done failed: %d.\n", ret); + + sxe2_rdma_aux_delete(&adapter->aux_ctxt.cdev_info); + + ret = sxe2_pf_rebuild(adapter); + if (ret) { + LOG_DEV_ERR("rebuild pf failed: %d.\n", ret); + goto work_start; + } + + ret = sxe2_reset_all_vfs(adapter); + if (ret) + LOG_DEV_ERR("reset all vfs failed %d.\n", ret); + +work_start: + sxe2_dev_ctrl_work_start(adapter); +} +#endif + +static int __maybe_unused sxe2_pm_resume(struct device *dev); +static int __maybe_unused sxe2_pm_suspend(struct device *dev); + +static __maybe_unused SIMPLE_DEV_PM_OPS(sxe2_pm_ops, sxe2_pm_suspend, + sxe2_pm_resume); + +STATIC const struct pci_error_handlers sxe2_pci_err_handler = { +#ifdef HAVE_PCI_ERROR_HANDLER_RESET_PREPARE + .reset_prepare = sxe2_pci_err_reset_prepare, + .reset_done = sxe2_pci_err_reset_done, +#endif + .error_detected = sxe2_pci_err_detected, + .slot_reset = sxe2_pci_err_slot_reset, + .resume = sxe2_pci_err_resume, +}; + +STATIC struct pci_driver sxe2_pci_driver = { + .name = SXE2_DRV_NAME, + .id_table = sxe2_pci_tbl, + .probe = sxe2_probe, + .remove = sxe2_remove, +#ifdef CONFIG_PM + .driver.pm = &sxe2_pm_ops, +#endif + .shutdown = sxe2_shutdown, + .sriov_configure = sxe2_sriov_configure, + .err_handler = &sxe2_pci_err_handler, +}; + +STATIC int __init sxe2_init(void) +{ + int ret; + + LOG_PR_INFO("%s init start, version[%s], commit_id[%s], branch[%s], build_time[%s]\n", + SXE2_DRV_DESCRIPTION, SXE2_VERSION, SXE2_COMMIT_ID, SXE2_BRANCH, + SXE2_BUILD_TIME); + +#ifndef SXE2_CFG_RELEASE + ret = sxe2_log_init(false); + if (ret < 0) { + LOG_PR_ERR("sxe2 log init fail.(err:%d)\n", ret); + goto l_end; + } +#endif + + ret = sxe2_monitor_create(); + if (ret) + goto l_log_init_rollback; + + ret = sxe2_cmd_work_create(); + if (ret) + goto l_workqueue_rollback; + + ret = sxe2_dev_ctrl_work_create(); + if (ret) + goto l_cmd_workqueue_rollback; + + ret = sxe2_cli_cdev_register(); + if (ret) { + LOG_ERROR("register cli char dev failed\n"); + goto l_dev_ctrl_workqueue_rollback; + } + + ret = sxe2_com_adapter_register(SXE2_PF); + if (ret) { + LOG_ERROR("register dpdk char dev failed\n"); + goto l_cdev_create_rollback; + } + + sxe2_lag_init_once(); + + sxe2_ptp_owner_init_once(); + + sxe2_debugfs_init(); + + ret = pci_register_driver(&sxe2_pci_driver); + if (ret) { + LOG_PR_ERR("register pci driver failed\n"); + goto l_com_register_rollback; + } + + return 0; + +l_com_register_rollback: + sxe2_debugfs_exit(); + sxe2_ptp_owner_deinit_once(); + sxe2_lag_deinit_once(); + sxe2_com_adapter_unregister(); +l_cdev_create_rollback: + sxe2_cli_cdev_unregister(); +l_dev_ctrl_workqueue_rollback: + sxe2_dev_ctrl_work_destroy(); +l_cmd_workqueue_rollback: + sxe2_cmd_work_destroy(); +l_workqueue_rollback: + sxe2_monitor_destroy(); +l_log_init_rollback: +#ifndef SXE2_CFG_RELEASE + sxe2_log_exit(); +l_end: +#endif + + return ret; +} + +STATIC void __exit sxe2_exit(void) +{ + pci_unregister_driver(&sxe2_pci_driver); + + sxe2_debugfs_exit(); + + sxe2_lag_deinit_once(); + + sxe2_com_adapter_unregister(); + + sxe2_cli_cdev_unregister(); + + sxe2_cmd_work_destroy(); + + sxe2_monitor_destroy(); + + sxe2_dev_ctrl_work_destroy(); + + sxe2_ptp_owner_deinit_once(); + +#ifndef SXE2_CFG_RELEASE + sxe2_log_exit(); +#endif +} + +MODULE_DEVICE_TABLE(pci, sxe2_pci_tbl); +MODULE_INFO(build_time, SXE2_BUILD_TIME); +MODULE_INFO(branch, SXE2_BRANCH); +MODULE_INFO(commit_id, SXE2_COMMIT_ID); +MODULE_DESCRIPTION(SXE2_DRV_DESCRIPTION); +MODULE_AUTHOR(SXE2_DRV_AUTHOR); +MODULE_VERSION(SXE2_VERSION); +MODULE_LICENSE(SXE2_DRV_LICENSE); +MODULE_ALIAS(SXE2_DRV_NAME); + +module_init(sxe2_init); +module_exit(sxe2_exit); diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.c new file mode 100644 index 0000000000000000000000000000000000000000..48d99517cbd0f3884d2f3dbc529f7baf978518e6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_channel.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_log.h" +#include "sxe2_mbx_channel.h" + +s32 sxe2_mbx_msg_send(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + s32 ret = 0; + + ret = sxe2_cmd_mbx_exec(adapter, cmd_params); + if (ret) + LOG_ERROR_BDF("pf send msg to vf:%u opcode:0x%x req_len:%u \t" + "fail.(err:%d)\n", + cmd_params->vf_idx, cmd_params->opcode, + cmd_params->req_len, ret); + + return ret; +} + +s32 sxe2_mbx_msg_reply(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params) +{ + s32 ret; + + ret = sxe2_cmd_mbx_reply(adapter, cmd_params); + if (unlikely(ret)) + LOG_ERROR_BDF("pf send msg to vf:%u opcode:0x%x session_id:0x%llx \t" + "req_len:%u fail.(err:%d)\n", + cmd_params->vf_idx, cmd_params->opcode, + cmd_params->session_id, cmd_params->req_len, ret); + + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..6944c0809a7a2238092005690df9f1c5ee2cf607 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_channel.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_channel.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MBX_CHANNEL_H__ +#define __SXE2_MBX_CHANNEL_H__ + +s32 sxe2_mbx_msg_send(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params); + +s32 sxe2_mbx_msg_reply(struct sxe2_adapter *adapter, + struct sxe2_cmd_params *cmd_params); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.c new file mode 100644 index 0000000000000000000000000000000000000000..ebe9734df853064f706e4bfd40aaba2e11fdfba4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.c @@ -0,0 +1,3777 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_msg.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_mbx_msg.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_mbx_channel.h" +#include "sxe2_mbx_public.h" +#include "sxe2_log.h" +#include "sxe2_rx.h" +#include "sxe2_vsi.h" +#include "sxe2_sriov.h" +#include "sxe2_switch.h" +#include "sxe2_tx.h" +#include "sxe2_hw.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_ethtool.h" +#include "sxe2_rss.h" +#include "sxe2_ptp.h" +#include "sxe2_monitor.h" +#include "sxe2_txsched.h" +#include "sxe2_tc.h" +#include "sxe2_com_ioctl.h" + +#define SXE2_CALC_RESP_LEN(data_obj, max_tran_len) \ + min_t(u32, sizeof(data_obj), (max_tran_len)) + +static bool sxe2_mbx_msg_vsi_id_is_valid(struct sxe2_vf_node *vf, u16 vsi_id) +{ + return (vf->vsi_id[SXE2_VF_TYPE_ETH] == vsi_id) || + (vf->vsi_id[SXE2_VF_TYPE_DPDK] == vsi_id); +} + +void sxe2_mbx_msg_params_fill(struct sxe2_cmd_params *cmd, u32 opc, void *req_data, + u32 req_len, u16 vf_idx, bool no_resp) +{ + cmd->opcode = opc; + cmd->req_data = req_data; + cmd->req_len = (u16)req_len; + cmd->vf_idx = vf_idx; + cmd->no_resp = no_resp; + + sxe2_trace_id_alloc(&cmd->trace_id); +} + +STATIC void sxe2_mbx_msg_reply_params_fill(struct sxe2_cmd_params *cmd, u32 opc, + void *req_data, u32 req_len, u16 vf_idx, + u64 session_id, s32 err_code) +{ + cmd->opcode = opc; + cmd->req_data = req_data; + cmd->req_len = (u16)req_len; + cmd->vf_idx = vf_idx; + cmd->session_id = session_id; + cmd->err_code = err_code; + cmd->no_resp = true; + + sxe2_trace_id_alloc(&cmd->trace_id); + + LOG_DEBUG("vf_id:%d opcode:0x%x req_len:%u session_id:0x%llx err_code:%d.\n", + vf_idx, opc, req_len, session_id, err_code); +} + +STATIC s32 sxe2_ver_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_vf_ver_msg pf_ver = {0}; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_vf_ver_msg *ver_msg = + (struct sxe2_vf_ver_msg *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + vf->vf_ver.major = le16_to_cpu(ver_msg->major); + vf->vf_ver.minor = le16_to_cpu(ver_msg->minor); + + pf_ver.major = cpu_to_le16(SXE2_VF_VERSION_MAJOR); + pf_ver.minor = cpu_to_le16(SXE2_VF_VERSION_MINOR); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, &pf_ver, + SXE2_CALC_RESP_LEN(pf_ver, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, 0); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + LOG_INFO_BDF("rcv vf:%u drv version:%d.%d reply pf version:%d.%d.(ret:%d)\n", + vf->vf_idx, le16_to_cpu(ver_msg->major), + le16_to_cpu(ver_msg->minor), SXE2_VF_VERSION_MAJOR, + SXE2_VF_VERSION_MINOR, ret); + + return ret; +} + +static s32 sxe2_reset_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret; + struct sxe2_adapter *adapter = vf->adapter; + + ret = sxe2_reset_vf(adapter, vf->vf_idx, 0); + + LOG_INFO_BDF("vf:%u reset request handle ret:%d\n", vf->vf_idx, ret); + + return ret; +} + +static u32 sxe2_speed_get(struct sxe2_vf_node *vf) +{ +#if defined(SXE2_HARDWARE_ASIC) + return sxe2_get_link_speed(vf->adapter); +#endif +} + +static bool sxe2_vf_is_link_up(struct sxe2_vf_node *vf) +{ + if (vf->prop.link_forced) + return vf->prop.link_up; + else + return sxe2_get_pf_link_status(vf->adapter); +} + +void sxe2_notify_vf_link_state(struct sxe2_vf_node *vf) +{ + struct sxe2_cmd_params params = {0}; + struct sxe2_vf_link_msg link_msg = {0}; + struct sxe2_adapter *adapter = vf->adapter; + u32 len = sizeof(struct sxe2_vf_link_msg); + + if (sxe2_vf_is_link_up(vf)) { + LOG_INFO("change link up.\n"); + link_msg.status = 1; + link_msg.speed = sxe2_speed_get(vf); + } else { + LOG_INFO("change link down.\n"); + link_msg.status = 0; + link_msg.speed = SXE2_LINK_SPEED_UNKNOWN; + } + + sxe2_mbx_msg_params_fill(¶ms, SXE2_VF_LINK_UPDATE_NOTIFY, &link_msg, len, + vf->vf_idx, true); + (void)sxe2_mbx_msg_send(adapter, ¶ms); +} + +static s32 sxe2_rxq_cfg_ena_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret; + u16 i = 0; + struct sxe2_vsi *vsi; + struct sxe2_vf_rxq_msg *rxq_msg = + (struct sxe2_vf_rxq_msg *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET( + msg_info->buf)); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_vf_rxq_ctxt *ctxt; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_rxq_cfg_params *rxq_params; + u32 len; + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(rxq_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(rxq_msg->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto l_reply; + } + + len = sizeof(*rxq_params) + + sizeof(struct sxe2_ctxt_elem) * le16_to_cpu(rxq_msg->q_cnt); + + rxq_params = kzalloc(len, GFP_KERNEL); + if (!rxq_params) { + ret = -SXE2_VF_ERR_NO_MEMORY; + LOG_ERROR_BDF("rxq msg mem %uB alloc failed.\n", len); + goto l_reply; + } + + rxq_params->vsi_id = le16_to_cpu(rxq_msg->vsi_id); + rxq_params->q_cnt = le16_to_cpu(rxq_msg->q_cnt); + rxq_params->max_frame_size = le16_to_cpu(rxq_msg->max_frame_size); + for (i = 0; i < rxq_params->q_cnt; i++) { + ctxt = &rxq_msg->ctxt[i]; + rxq_params->cfg[i].queue_id = le16_to_cpu(ctxt->queue_id); + rxq_params->cfg[i].depth = le16_to_cpu(ctxt->depth); + rxq_params->cfg[i].buf_len = le16_to_cpu(ctxt->buf_len); + rxq_params->cfg[i].dma_addr = le64_to_cpu(ctxt->dma_addr); + rxq_params->cfg[i].keep_crc_en = ctxt->keep_crc_en; + rxq_params->cfg[i].lro_en = ctxt->lro_status; + } + + ret = sxe2_rxq_cfg_ena_common_handle(adapter, rxq_params); + + kfree(rxq_params); + +l_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +static s32 sxe2_mbx_txq_cfg_reply(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + u32 i, len; + struct sxe2_vsi *vsi; + struct sxe2_vf_txq_ctxt *ctxt; + struct sxe2_cmd_params params = {0}; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_txq_ucmd_en_params *txq_params; + struct sxe2_vf_txq_ctxt_msg *req = + (struct sxe2_vf_txq_ctxt_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(req->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(req->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto l_reply; + } + + len = sizeof(*txq_params) + + sizeof(struct sxe2_vf_txq_ctxt) * le16_to_cpu(req->q_cnt); + + txq_params = kzalloc(len, GFP_KERNEL); + if (!txq_params) { + LOG_ERROR_BDF("txq msg mem %uB alloc failed.\n", len); + ret = -SXE2_VF_ERR_NO_MEMORY; + goto l_reply; + } + + txq_params->q_cnt = le16_to_cpu(req->q_cnt); + txq_params->vsi_idx = le16_to_cpu(req->vsi_id); + for (i = 0; i < txq_params->q_cnt; i++) { + ctxt = &req->ctxs[i]; + txq_params->ctxts[i].depth = le16_to_cpu(ctxt->depth); + txq_params->ctxts[i].dma_addr = le64_to_cpu(ctxt->dma_addr); + txq_params->ctxts[i].queue_id = le16_to_cpu(ctxt->queue_id); + txq_params->ctxts[i].sched_mode = le32_to_cpu(ctxt->sched_mode); + } + ret = sxe2_txq_cfg_ena_common_handle(adapter, txq_params); + if (ret) { + LOG_ERROR_BDF("usr vsi[%d] txq[%d] num[%u] cfg failed\n", + txq_params->vsi_idx, txq_params->ctxts[0].queue_id, + txq_params->q_cnt); + } else { + LOG_INFO_BDF("usr vsi[%d] txq[%d] num[%u] cfg success\n", + txq_params->vsi_idx, txq_params->ctxts[0].queue_id, + txq_params->q_cnt); + } + + kfree(txq_params); + +l_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, le64_to_cpu(cmd_hdr->session_id), + ret); + (void)sxe2_mbx_msg_reply(adapter, ¶ms); + return 0; +} + +STATIC s32 sxe2_res_get_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_fw_ver_msg *fw_ver = &adapter->hw.fw_ver; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vf_vfres_msg *vfres = NULL; + struct sxe2_vf_vsi_res *vsi_res = NULL; + struct sxe2_vf_vfres_msg_req *vfreq = + (struct sxe2_vf_vfres_msg_req + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u8 i; + + vfres = kzalloc(sizeof(*vfres), GFP_KERNEL); + if (!vfres) { + ret = -SXE2_VF_ERR_NO_MEMORY; + goto err; + } + + vsi_res = vfres->vsi_res; + + if (vfreq->support_sw_stats) + set_bit(SXE2_FLAG_VFSWSTATS_ENABLE, adapter->flags); + else + clear_bit(SXE2_FLAG_VFSWSTATS_ENABLE, adapter->flags); + + vfres->num_vsis = cpu_to_le16(SXE2_VF_VSI_CNT_USED); + + for (i = 0; i < vfres->num_vsis; i++) + vsi_res[i].vsi_id = cpu_to_le16(vf->vsi_id[i]); + + vfres->rxft_cap.fnav_space_bsize = + cpu_to_le16(adapter->caps_ctxt.fnav_space_bsize); + vfres->rxft_cap.fnav_space_gsize = 0; + vfres->rxft_cap.rss_key_size = cpu_to_le16(SXE2_RSS_HASH_KEY_SIZE); + vfres->rxft_cap.rss_lut_size = cpu_to_le16(SXE2_RSS_LUT_SIZE_64); + vfres->rxft_cap.rss_lut_type = cpu_to_le16(SXE2_RSS_VSI_LUT); + + vfres->q_cnt = cpu_to_le16(adapter->vf_ctxt.q_cnt); + vfres->max_vectors = cpu_to_le16(adapter->vf_ctxt.irq_cnt); + vfres->itr_gran = cpu_to_le16(adapter->hw.hw_cfg.itr_gran); + ether_addr_copy(vfres->addr, vf->mac_addr.addr); + vfres->port_vlan_exsit = (u8)sxe2_port_vlan_is_exist(vf); + vfres->is_switchdev = + (u8)test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags); + vfres->max_vlan_cnt = cpu_to_le16(vf->prop.trusted ? VLAN_N_VID + : SXE2_VF_VLAN_CNT_MAX); + + vfres->pf_cnt = adapter->aux_ctxt.cdev_info.pf_cnt; + vfres->fw_ver.main_version_id = fw_ver->main_version_id; + vfres->fw_ver.sub_version_id = fw_ver->sub_version_id; + vfres->fw_ver.fix_version_id = fw_ver->fix_version_id; + vfres->fw_ver.build_id = fw_ver->build_id; + vfres->tm_layers = 3; + vfres->cap_flags = SXE2_VF_OFFLOAD_L2 | SXE2_VF_OFFLOAD_VLAN | + SXE2_VF_OFFLOAD_IPSEC | SXE2_VF_OFFLOAD_PTP | + SXE2_VF_OFFLOAD_TM; + + vfres->cap_flags |= SXE2_VF_OFFLOAD_RSS; + + vfres->cap_flags |= SXE2_VF_OFFLOAD_FNAV; + vfres->parent_pfid = adapter->pf_idx; + vfres->parent_portid = adapter->port_idx; + vfres->vf_id_in_dev = cpu_to_le16(vf->vf_idx + adapter->vf_ctxt.vfid_base); + + if (sxe2_txsch_is_vf_vsi_agg_mode(adapter)) { + vfres->vf_txsch_cap.layer_cap = 3; + vfres->vf_txsch_cap.prio_num = 8; + vfres->vf_txsch_cap.tm_mid_node_num = 8; + } else { + vfres->vf_txsch_cap.layer_cap = 3; + vfres->vf_txsch_cap.prio_num = 4; + vfres->vf_txsch_cap.tm_mid_node_num = 4; + } + + set_bit(SXE2_VF_STATE_ACTIVE, vf->states); + + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, (void *)vfres, + SXE2_CALC_RESP_LEN(*vfres, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, 0); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + kfree(vfres); + goto l_end; + +err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + +l_end: + return ret; +} + +STATIC void sxe2_vsi_stats_to_le(struct sxe2_vsi *vsi, + struct sxe2_vf_vsi_hw_stats *stats) +{ + struct sxe2_vsi_hw_stats *new_stats = &vsi->vsi_stats.vsi_hw_stats; + + stats->rx_vsi_unicast_packets = + cpu_to_le64(new_stats->rx_vsi_unicast_packets); + stats->rx_vsi_bytes = cpu_to_le64(new_stats->rx_vsi_bytes); + stats->tx_vsi_unicast_packets = + cpu_to_le64(new_stats->tx_vsi_unicast_packets); + stats->tx_vsi_bytes = cpu_to_le64(new_stats->tx_vsi_bytes); + stats->rx_vsi_multicast_packets = + cpu_to_le64(new_stats->rx_vsi_multicast_packets); + stats->tx_vsi_multicast_packets = + cpu_to_le64(new_stats->tx_vsi_multicast_packets); + stats->rx_vsi_broadcast_packets = + cpu_to_le64(new_stats->rx_vsi_broadcast_packets); + stats->tx_vsi_broadcast_packets = + cpu_to_le64(new_stats->tx_vsi_broadcast_packets); +} + +STATIC void sxe2_hw_vsi_stats_clear(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_hw_stats *stats = &vsi->vsi_stats.vsi_hw_stats; + + stats->rx_vsi_unicast_packets = 0; + stats->rx_vsi_bytes = 0; + stats->tx_vsi_unicast_packets = 0; + stats->tx_vsi_bytes = 0; + stats->rx_vsi_multicast_packets = 0; + stats->tx_vsi_multicast_packets = 0; + stats->rx_vsi_broadcast_packets = 0; + stats->tx_vsi_broadcast_packets = 0; +} + +STATIC s32 sxe2_stats_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vsi *vsi; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_vsi_res *res_msg = + (struct sxe2_vf_vsi_res *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(res_msg->vsi_id))) { + ret = -SXE2_VF_ERR_PARAM; + LOG_DEV_ERR("vsi id:%d is invalid.\n", le16_to_cpu(res_msg->vsi_id)); + goto err; + } + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(res_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(res_msg->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto err; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_hw_vsi_stats_clear(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); +err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + LOG_DEV_INFO("vf stats clear. vsi id:%d ret:%d\n", + le16_to_cpu(res_msg->vsi_id), ret); + return ret; +} + +STATIC s32 sxe2_stats_get_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vsi *vsi; + struct sxe2_vf_vsi_sw_stats *vf_sw_stats; + struct sxe2_vsi_sw_stats *vsi_sw_stats; + struct sxe2_cmd_params params = {0}; + struct sxe2_vf_hw_stats_rsp vf_stats_rsp = {{0}, 0}; + struct sxe2_vf_vsi_hw_stats *vf_hw_stats = &vf_stats_rsp.hw_stats; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_sw_stats *res_msg = + (struct sxe2_vf_sw_stats *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(res_msg->vsi_id))) { + ret = -SXE2_VF_ERR_PARAM; + LOG_DEV_ERR("vsi id:%d is invalid.\n", le16_to_cpu(res_msg->vsi_id)); + goto err; + } + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(res_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(res_msg->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto err; + } + + vf_sw_stats = &res_msg->sw_stats; + vsi_sw_stats = &vsi->vsi_stats.vsi_sw_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi_sw_stats->rx_bytes = le64_to_cpu(vf_sw_stats->rx_bytes); + vsi_sw_stats->rx_packets = le64_to_cpu(vf_sw_stats->rx_packets); + vsi_sw_stats->tx_bytes = le64_to_cpu(vf_sw_stats->tx_bytes); + vsi_sw_stats->tx_packets = le64_to_cpu(vf_sw_stats->tx_packets); + + sxe2_hw_vsi_stats_update(vsi); + + sxe2_vsi_stats_to_le(vsi, vf_hw_stats); + + mutex_unlock(&adapter->vsi_ctxt.lock); + + sxe2_fnav_match_stats_get(adapter, le16_to_cpu(res_msg->fnav_stats_idx), + vsi->id_in_pf); + vf_stats_rsp.fnav_match = + cpu_to_le64(adapter->fnav_ctxt.fnav_stat_ctxt + .vsi_fnav_match[vsi->id_in_pf]); + +err: + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &vf_stats_rsp, + SXE2_CALC_RESP_LEN(vf_stats_rsp, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +STATIC s32 sxe2_stats_push_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vsi *vsi; + struct sxe2_vf_vsi_sw_stats *vf_sw_stats; + struct sxe2_vsi_sw_stats *vsi_sw_stats; + struct sxe2_cmd_params params = {0}; + struct sxe2_vf_hw_stats_rsp vf_stats_rsp = {{0}, 0}; + struct sxe2_vf_vsi_hw_stats *vf_hw_stats = &vf_stats_rsp.hw_stats; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_sw_stats *res_msg = + (struct sxe2_vf_sw_stats *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(res_msg->vsi_id))) { + ret = -SXE2_VF_ERR_PARAM; + LOG_DEV_ERR("vsi id:%d is invalid.\n", le16_to_cpu(res_msg->vsi_id)); + goto err; + } + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(res_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(res_msg->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto err; + } + + vf_sw_stats = &res_msg->sw_stats; + vsi_sw_stats = &vsi->vsi_stats.vsi_sw_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi_sw_stats->rx_bytes = le64_to_cpu(vf_sw_stats->rx_bytes); + vsi_sw_stats->rx_packets = le64_to_cpu(vf_sw_stats->rx_packets); + vsi_sw_stats->tx_bytes = le64_to_cpu(vf_sw_stats->tx_bytes); + vsi_sw_stats->tx_packets = le64_to_cpu(vf_sw_stats->tx_packets); + + sxe2_vsi_stats_to_le(vsi, vf_hw_stats); + + mutex_unlock(&adapter->vsi_ctxt.lock); + +err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, &vf_stats_rsp, + sizeof(vf_stats_rsp), vf->vf_idx, session_id, + ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +STATIC bool sxe2_vsi_txq_idx_is_valid(struct sxe2_vsi *vsi, u16 queue_idx) +{ + return (queue_idx < vsi->txqs.q_cnt); +} + +STATIC bool sxe2_vsi_rxq_idx_is_valid(struct sxe2_vsi *vsi, u16 queue_idx) +{ + return (queue_idx < vsi->rxqs.q_cnt); +} + +STATIC s32 sxe2_vsi_queue_idx_is_valid(struct sxe2_vsi *vsi, + struct sxe2_irq_data *irq_data, + struct sxe2_vf_irq_map *irq_map) +{ + struct sxe2_adapter *adapter = vsi->adapter; + u64 vsi_q_id, vsi_q_id_idx; + unsigned long qmap; + u16 queue_cnt = 0; + s32 ret = SXE2_VF_ERR_SUCCESS; + + qmap = le16_to_cpu(irq_map->txq_map); + queue_cnt = vsi->txqs.q_cnt; + for_each_set_bit(vsi_q_id_idx, &qmap, queue_cnt) { + vsi_q_id = vsi_q_id_idx; + + if (!sxe2_vsi_txq_idx_is_valid(vsi, (u16)vsi_q_id)) { + LOG_DEV_ERR("irq map tx queue:%llu failed.\n", vsi_q_id); + ret = -SXE2_VF_ERR_PARAM; + goto l_out; + } + } + + qmap = le16_to_cpu(irq_map->rxq_map); + queue_cnt = vsi->rxqs.q_cnt; + for_each_set_bit(vsi_q_id_idx, &qmap, queue_cnt) { + vsi_q_id = vsi_q_id_idx; + + if (!sxe2_vsi_rxq_idx_is_valid(vsi, (u16)vsi_q_id)) { + LOG_DEV_ERR("irq map rx queue:%llu failed.\n", vsi_q_id); + ret = -SXE2_VF_ERR_PARAM; + goto l_out; + } + } + +l_out: + return ret; +} + +STATIC void sxe2_irq_queue_cfg(struct sxe2_irq_data *irq_data, + struct sxe2_q_container *q_container, + struct sxe2_queue *queue, __le16 itr_idx) +{ + SXE2_BUG_ON((q_container->list.cnt == 0) && + (q_container->list.next)); + SXE2_BUG_ON((q_container->list.cnt != 0) && + (!q_container->list.next)); + + q_container->itr_idx = le16_to_cpu(itr_idx); + queue->irq_data = irq_data; + sxe2_queue_add(queue, &q_container->list); +} + +STATIC s32 sxe2_irq_map(struct sxe2_vsi *vsi, struct sxe2_irq_data *irq_data, + struct sxe2_vf_irq_map *irq_map) +{ + u64 vsi_q_id, vsi_q_id_idx; + unsigned long qmap; + u16 queue_cnt = 0; + s32 ret = SXE2_VF_ERR_SUCCESS; + + ret = sxe2_vsi_queue_idx_is_valid(vsi, irq_data, irq_map); + if (ret) + goto l_out; + + qmap = le16_to_cpu(irq_map->txq_map); + queue_cnt = vsi->txqs.q_cnt; + for_each_set_bit(vsi_q_id_idx, &qmap, queue_cnt) { + vsi_q_id = vsi_q_id_idx; + sxe2_irq_queue_cfg(irq_data, &irq_data->tx, vsi->txqs.q[vsi_q_id], + irq_map->txitr_idx); + } + + qmap = le16_to_cpu(irq_map->rxq_map); + queue_cnt = vsi->rxqs.q_cnt; + for_each_set_bit(vsi_q_id_idx, &qmap, queue_cnt) { + vsi_q_id = vsi_q_id_idx; + sxe2_irq_queue_cfg(irq_data, &irq_data->rx, vsi->rxqs.q[vsi_q_id], + irq_map->rxitr_idx); + } + +l_out: + return ret; +} + +STATIC s32 sxe2_irq_map_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = NULL; + struct sxe2_vf_irq_map *irq_map; + u16 i, num_irqs, irq_id; + struct sxe2_vf_irq_map_msg *irq_map_msg = + (struct sxe2_vf_irq_map_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u16 vsi_id = le16_to_cpu(irq_map_msg->vsi_id); + + num_irqs = le16_to_cpu(irq_map_msg->num_irqs); + + if (adapter->vf_ctxt.irq_cnt < num_irqs || !num_irqs) { + ret = -SXE2_VF_ERR_PARAM; + LOG_INFO_BDF("num_irqs:%d invalid.\n", num_irqs); + goto err; + } + + vsi = sxe2_vf_vsi_get(vf, vsi_id); + if (!vsi) { + ret = -SXE2_VF_ERR_PARAM; + LOG_INFO_BDF("vsi_id:%d vsi null.\n", vsi_id); + goto err; + } + + sxe2_vsi_queues_irqs_unmap(vsi); + + for (i = 0; i < num_irqs; i++) { + struct sxe2_irq_data *irq_data; + + irq_map = &irq_map_msg->irq_maps[i]; + + irq_id = le16_to_cpu(irq_map->irq_id); + if (irq_id >= vsi->irqs.cnt) { + ret = -SXE2_VF_ERR_PARAM; + LOG_INFO_BDF("irq_id:%d exceed vsi:%d irq cnt:%d.\n", irq_id, + vsi_id, vsi->irqs.cnt); + goto err; + } + + irq_data = vsi->irqs.irq_data[i]; + if (!irq_data) { + ret = -SXE2_VF_ERR_PARAM; + goto err; + } + + ret = sxe2_irq_map(vsi, irq_data, irq_map); + if (ret) { + LOG_DEV_ERR("vsi irq map failed.ret:%u\n", ret); + goto err; + } + sxe2_vsi_irqs_setup(vsi); + } +err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +STATIC s32 sxe2_irq_unmap_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi; + struct sxe2_vf_irq_unmap_msg *irq_unmap_msg = + (struct sxe2_vf_irq_unmap_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u16 vsi_id = le16_to_cpu(irq_unmap_msg->vsi_id); + + vsi = sxe2_vf_vsi_get(vf, vsi_id); + if (!vsi) { + ret = -SXE2_VF_ERR_PARAM; + LOG_INFO_BDF("vsi_id:%d vsi null.\n", vsi_id); + goto err; + } + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, vsi_id)) { + ret = -SXE2_VF_ERR_PARAM; + LOG_DEV_ERR("vsi id:%d is invalid.\n", vsi_id); + goto err; + } + + sxe2_vsi_queues_irqs_unmap(vsi); + + sxe2_vsi_irqs_release(vsi); + +err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +static bool sxe2_addr_msg_is_valid(struct sxe2_vf_node *vf, + struct sxe2_vf_addr_msg *addr_msg, bool add) +{ + int addr_msg_has_pf_mac = 0; + u32 i = 0; + struct sxe2_adapter *adapter = vf->adapter; + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(addr_msg->vsi_id))) { + LOG_ERROR("addr msg invalid vsi_id:%u correct vsi_id_eth:%u\t" + "vsi_id_dpdk:%u.\n", + le16_to_cpu(addr_msg->vsi_id), + vf->vsi_id[SXE2_VF_TYPE_ETH], + vf->vsi_id[SXE2_VF_TYPE_DPDK]); + return false; + } + + if (add) { + if (!is_zero_ether_addr(vf->mac_addr.addr)) { + for (i = 0; i < addr_msg->addr_cnt; i++) { + if (ether_addr_equal(addr_msg->elem[i].addr, + vf->mac_addr.addr)) + addr_msg_has_pf_mac++; + } + } + + if (!sxe2_vf_is_trusted(vf) && + ((addr_msg->addr_cnt + vf->mac_cnt - addr_msg_has_pf_mac) > + SXE2_VF_MACADDR_CNT_MAX)) { + LOG_DEV_ERR("vf:%u mac addr most add:%u, set vf to trusted\t" + "mode to add more\n", + vf->vf_idx, + (SXE2_VF_MACADDR_CNT_MAX - vf->mac_cnt)); + return false; + } + } + + return true; +} + +static s32 sxe2_vf_addr_add(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_addr *addr) +{ + struct sxe2_adapter *adapter = vf->adapter; + u8 *mac_addr = addr->addr; + s32 ret; + + if (is_unicast_ether_addr(mac_addr) && + !ether_addr_equal(mac_addr, vf->mac_addr.addr) && + !sxe2_vf_set_mac_is_allow(vf)) { + LOG_DEV_ERR("untrusted vf modify mac addr from pf not permed\n"); + return -EPERM; + } + if (!vsi) { + LOG_DEV_ERR("vsi is NULL\n"); + return -EINVAL; + } + + ret = sxe2_mac_rule_add(vsi, mac_addr); + if (ret == -EEXIST) { + LOG_WARN_BDF("MAC %pM already exists for VF %d\n", mac_addr, + vf->vf_idx); + ret = SXE2_VF_ERR_SUCCESS; + } else if (ret) { + LOG_DEV_ERR("Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_idx, ret); + goto l_out; + } else { + if (is_unicast_ether_addr(mac_addr)) { + ret = sxe2_mac_spoofchk_ext_rule_add(adapter, + vsi->idx_in_dev, + mac_addr); + if (ret) { + LOG_DEV_ERR("Failed to add mac spoof ext rule %pM \t" + "for VF %d vsi %u\n, error %d\n", + mac_addr, vf->vf_idx, vsi->idx_in_dev, + ret); + (void)sxe2_mac_rule_del(adapter, vsi->idx_in_dev, + mac_addr); + return ret; + } + } + vf->mac_cnt++; + } + + LOG_INFO_BDF("vf:%u mac:%pM added mac_cnt:%u\n", vf->vf_idx, mac_addr, + vf->mac_cnt); + + if (vsi->type == SXE2_VSI_T_VF && addr->type == SXE2_VF_MAC_TYPE_P) + ether_addr_copy(vf->mac_addr.addr, mac_addr); +l_out: + return ret; +} + +static s32 sxe2_vf_addr_del(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_addr *addr) +{ + struct sxe2_adapter *adapter = vf->adapter; + u8 *mac_addr = addr->addr; + s32 ret; + + if (!sxe2_vf_set_mac_is_allow(vf) && + ether_addr_equal(mac_addr, vf->mac_addr.addr)) + return 0; + + if (!vsi) { + LOG_DEV_INFO("Ingnore invalid vsi, vsi is NULL\n"); + return 0; + } + + ret = sxe2_mac_rule_del(adapter, vsi->idx_in_dev, mac_addr); + if (ret == -ENOENT) { + LOG_WARN_BDF("MAC %pM does not exist for VF %d\n", mac_addr, + vf->vf_idx); + return -ENOENT; + } else if (ret) { + LOG_DEV_ERR("Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_idx, ret); + return -EIO; + } + + if (is_unicast_ether_addr(mac_addr)) { + ret = sxe2_mac_spoofchk_ext_rule_del(adapter, vsi->idx_in_dev, + mac_addr); + if (ret) { + LOG_DEV_ERR("Failed to del MAC spoof rule %pM for VF %d,\t" + "error %d\n", + mac_addr, vf->vf_idx, ret); + ret = sxe2_mac_rule_add(vsi, mac_addr); + if (ret) + LOG_DEV_ERR("Failed to add MAC %pM for VF %d, error\t" + "%d\n", + mac_addr, vf->vf_idx, ret); + } + } + + vf->mac_cnt--; + LOG_INFO_BDF("vf:%u mac:%pM del mac_cnt:%u\n", vf->vf_idx, mac_addr, + vf->mac_cnt); + + return ret; +} + +static s32 sxe2_addr_msg_handle(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info, bool add) +{ + s32 (*sxe2_vf_addr_action)(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_addr *addr); + s32 h_ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vf_addr_msg *msg = + (struct sxe2_vf_addr_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vsi *vsi; + u32 i = 0; + s32 ret; + + if (add) + sxe2_vf_addr_action = sxe2_vf_addr_add; + else + sxe2_vf_addr_action = sxe2_vf_addr_del; + + if (!sxe2_addr_msg_is_valid(vf, msg, add)) { + h_ret = -EPERM; + goto l_msg_reply; + } + + if (msg->is_user) { + for (i = 0; i < msg->addr_cnt; i++) { + u8 *mac_addr = msg->elem[i].addr; + + if (is_zero_ether_addr(mac_addr)) + continue; + + ret = sxe2_vf_addr_action(vf, vf->dpdk_vf_vsi, + &msg->elem[i]); + if (ret == -EEXIST || ret == -ENOENT) { + continue; + } else if (ret) { + h_ret = ret; + break; + } + } + } else { + vsi = sxe2_vf_vsi_get(vf, vf->vsi_id[SXE2_VF_TYPE_ETH]); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", + vf->vsi_id[SXE2_VF_TYPE_ETH]); + h_ret = -SXE2_VF_ERR_PARAM; + goto l_msg_reply; + } + for (i = 0; i < msg->addr_cnt; i++) { + u8 *mac_addr = msg->elem[i].addr; + + if (is_broadcast_ether_addr(mac_addr) || + is_zero_ether_addr(mac_addr)) + continue; + + ret = sxe2_vf_addr_action(vf, vsi, &msg->elem[i]); + if (ret == -EEXIST || ret == -ENOENT) { + continue; + } else if (ret) { + h_ret = ret; + break; + } + } + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("add:%u mac addr cnt:%u complete cnt:%u ret:%d.\n", + add, msg->addr_cnt, i, ret); + return ret; +} + +static s32 sxe2_addr_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + return sxe2_addr_msg_handle(vf, msg_info, true); +} + +static s32 sxe2_addr_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + return sxe2_addr_msg_handle(vf, msg_info, false); +} + +static s32 sxe2_addr_update_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_addr_update_msg *update_msg; + s32 ret; + u16 old_vsi, new_vsi; + u8 *mac_addr; + struct sxe2_vsi *vsi; + + update_msg = (struct sxe2_vf_addr_update_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(update_msg->vsi_id))) { + LOG_ERROR("addr msg invalid vsi_id:%u correct vsi_id:%u.\n", + le16_to_cpu(update_msg->vsi_id), + vf->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EPERM; + goto l_msg_reply; + } + + if (update_msg->to_user) { + old_vsi = vf->vsi_id[SXE2_VF_TYPE_ETH]; + new_vsi = vf->vsi_id[SXE2_VF_TYPE_DPDK]; + } else { + old_vsi = vf->vsi_id[SXE2_VF_TYPE_DPDK]; + new_vsi = vf->vsi_id[SXE2_VF_TYPE_ETH]; + } + + mac_addr = update_msg->addr; + ret = sxe2_mac_rule_update(adapter, mac_addr, old_vsi, new_vsi); + if (ret) { + LOG_ERROR_BDF("update mac rule %pM, failed, to_user:%u, \t" + "old_vsi:%u,\t" + "new_vsi:%u, ret:%d.\n", + mac_addr, update_msg->to_user, old_vsi, new_vsi, ret); + goto l_msg_reply; + } + + if (is_unicast_ether_addr(mac_addr)) { + vsi = sxe2_vf_vsi_get(vf, vf->vsi_id[SXE2_VF_TYPE_ETH]); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", + vf->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -SXE2_VF_ERR_PARAM; + goto l_msg_reply; + } + + ret = sxe2_mac_spoof_rule_update(vsi, vf->dpdk_vf_vsi, mac_addr, + update_msg->to_user); + if (ret) { + LOG_ERROR_BDF("update spoofchk rule %pM, failed, \t" + "to_user:%u, old_vsi:%u,\t" + "new_vsi:%u, ret:%d.\n", + mac_addr, update_msg->to_user, old_vsi, + new_vsi, ret); + (void)sxe2_mac_rule_update(adapter, mac_addr, new_vsi, + old_vsi); + } + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("update mac rule mbx msg reply failed ret:%d.\n", ret); + return ret; +} + +static s32 sxe2_promisc_update_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_promisc_update_msg *update_msg; + s32 ret; + u16 old_vsi, new_vsi; + + update_msg = (struct sxe2_vf_promisc_update_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(update_msg->vsi_id))) { + LOG_ERROR("addr msg invalid vsi_id:%u correct vsi_id:%u.\n", + le16_to_cpu(update_msg->vsi_id), + vf->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EPERM; + goto l_msg_reply; + } + + if (update_msg->to_user) { + old_vsi = vf->vsi_id[SXE2_VF_TYPE_ETH]; + new_vsi = vf->vsi_id[SXE2_VF_TYPE_DPDK]; + } else { + old_vsi = vf->vsi_id[SXE2_VF_TYPE_DPDK]; + new_vsi = vf->vsi_id[SXE2_VF_TYPE_ETH]; + } + + if (update_msg->is_promisc) { + ret = sxe2_promisc_rule_update(adapter, old_vsi, new_vsi); + if (ret) { + LOG_ERROR_BDF("update promisc rule failed, to_user:%u, \t" + "old_vsi:%u, \t" + "new_vsi:%u, ret:%d.\n", + update_msg->to_user, old_vsi, new_vsi, ret); + } + } else { + ret = sxe2_allmulti_rule_update(adapter, old_vsi, new_vsi); + if (ret) { + LOG_ERROR_BDF("update allmulti rule failed, to_user:%u, \t" + "old_vsi:%u, \t" + "new_vsi:%u, ret:%d.\n", + update_msg->to_user, old_vsi, new_vsi, ret); + } + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("update promisc or aullmulti rule mbx msg reply \t" + "failed ret:%d.\n", + ret); + return ret; +} + +static s32 sxe2_vf_vlan_add(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_vlan *vlan_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret; + struct sxe2_vlan vlan = {0}; + + vlan.tpid = le16_to_cpu(vlan_info->tpid); + vlan.vid = le16_to_cpu(vlan_info->vid); + + if (vf->vlan_info.vlan_cnt >= + (vf->prop.trusted ? VLAN_N_VID : SXE2_VF_VLAN_CNT_MAX)) + return -EIO; + ret = sxe2_vlan_rule_add(vsi, &vlan); + if (ret == -EEXIST) { + ret = 0; + } else if (ret) { + LOG_DEV_ERR("Failed to add vlan tpid:0x%x vid:%u for VF %d, error \t" + "%d\n", + vlan.tpid, vlan.vid, vf->vf_idx, ret); + goto l_out; + } else { + if (vsi->type != SXE2_VSI_T_DPDK_VF || vlan.vid != 0) + vf->vlan_info.vlan_cnt++; + } + + LOG_INFO_BDF("vf:%u vlan tpid:0x%x vid:0x%x added vlan_cnt:%u.\n", + vf->vf_idx, vlan.tpid, vlan.vid, vf->vlan_info.vlan_cnt); +l_out: + return ret; +} + +static s32 sxe2_vf_vlan_del(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_vlan *vlan_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret; + struct sxe2_vlan vlan = {0}; + + vlan.tpid = le16_to_cpu(vlan_info->tpid); + vlan.vid = le16_to_cpu(vlan_info->vid); + + ret = sxe2_vlan_rule_del(adapter, vsi->idx_in_dev, &vlan); + if (ret == -ENOENT) { + ret = 0; + } else if (ret) { + LOG_DEV_ERR("Failed to del vlan tpid:0x%x vid:%u for VF %d, error \t" + "%d\n", + vlan.tpid, vlan.vid, vf->vf_idx, ret); + goto l_out; + } else { + if (vsi->type != SXE2_VSI_T_DPDK_VF || vlan.vid != 0) + vf->vlan_info.vlan_cnt--; + } + + LOG_INFO_BDF("vf:%u vlan tpid:0x%x vid:0x%x deleted vlan_cnt:%u.\n", + vf->vf_idx, vlan.tpid, vlan.vid, vf->vlan_info.vlan_cnt); + +l_out: + return ret; +} + +static s32 sxe2_user_vlan_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 (*sxe2_vf_vlan_action)(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_vlan *vlan); + s32 h_ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_vf_user_vlan_msg *msg = + (struct sxe2_vf_user_vlan_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + u64 sesstion_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + if (msg->is_add && sxe2_port_vlan_is_exist(vf)) { + LOG_WARN_BDF("vf %u has set port vlan, not support add vlan rule.\n", + vf->vf_idx); + h_ret = SXE2_VF_ERR_SUCCESS; + goto l_msg_reply; + } + + if (msg->is_add) + sxe2_vf_vlan_action = sxe2_vf_vlan_add; + else + sxe2_vf_vlan_action = sxe2_vf_vlan_del; + + ret = sxe2_vf_vlan_action(vf, vf->dpdk_vf_vsi, &msg->vlan); + if (ret != -EEXIST && ret != -ENOENT) + h_ret = ret; + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, sesstion_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("add:%u, vid:%u, tpid:%u, ret:%d.\n", msg->is_add, + msg->vlan.vid, msg->vlan.tpid, ret); + return ret; +} + +static s32 sxe2_repr_addr_update_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr update mac rule mbx msg \t" + "reply failed ret:%d.\n", + ret); + return ret; +} + +static s32 sxe2_repr_promisc_update_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr update peomisc rule mbx msg \t" + "reply failed ret:%d.\n", + ret); + return ret; +} + +static bool sxe2_qps_dis_msg_is_valid(struct sxe2_vf_node *vf, + struct sxe2_vf_qps_dis_msg *dis_msg) +{ + struct sxe2_vsi *vsi; + struct sxe2_adapter *adapter = vf->adapter; + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(dis_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(dis_msg->vsi_id)); + return false; + } + + if ((le16_to_cpu(dis_msg->qps_cnt) > vsi->rxqs.q_cnt) || !dis_msg->qps_cnt) { + LOG_ERROR_BDF("msg qps cnt:%u invalid max:%u.\n", dis_msg->qps_cnt, + vsi->rxqs.q_cnt); + return false; + } + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(dis_msg->vsi_id))) { + LOG_ERROR_BDF("msg vsi_id:%u invalid vf[%u] extract vsi_id:%u.\n", + le16_to_cpu(dis_msg->vsi_id), vf->vf_idx, + vsi->idx_in_dev); + return false; + } + + return true; +} + +STATIC s32 sxe2_queues_dis_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = NULL; + struct sxe2_vf_qps_dis_msg *dis_msg = + (struct sxe2_vf_qps_dis_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!sxe2_qps_dis_msg_is_valid(vf, dis_msg)) { + ret = -SXE2_VF_ERR_PARAM; + goto l_err_reply; + } + + vsi = sxe2_vf_vsi_get(vf, le16_to_cpu(dis_msg->vsi_id)); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", le16_to_cpu(dis_msg->vsi_id)); + ret = -SXE2_VF_ERR_PARAM; + goto l_err_reply; + } + + if (sxe2_txqs_stop(vsi)) { + ret = -SXE2_VF_ERR_PARAM; + LOG_ERROR_BDF("vsi:%u txqs disable failed.\n", vsi->idx_in_dev); + } + + if (sxe2_rxqs_stop(vsi)) { + ret = -SXE2_VF_ERR_PARAM; + LOG_ERROR_BDF("vsi:%u rxqs disable failed.\n", vsi->idx_in_dev); + } + +l_err_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + (void)sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +STATIC s32 sxe2_txq_dis_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_vf_q_stop_msg *req = + (struct sxe2_vf_q_stop_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_txq_ucmd_dis_params txq_params; + + txq_params.q_idx = le16_to_cpu(req->q_idx); + txq_params.vsi_id = le16_to_cpu(req->vsi_id); + txq_params.sched_mode = SXE2_UCMD_TXQ_MODE_DEFAULT; + ret = sxe2_txq_dis_common_handle(adapter, &txq_params); + if (ret) + LOG_ERROR_BDF("usr vsi[%d] txq[%d] dis failed\n", txq_params.vsi_id, + txq_params.q_idx); + else + LOG_INFO_BDF("usr vsi[%d] txq[%d] dis success\n", txq_params.vsi_id, + txq_params.q_idx); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + (void)sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +STATIC s32 sxe2_rxq_dis_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_vf_q_stop_msg *dis_msg = + (struct sxe2_vf_q_stop_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_rxq_dis_params dis_params = {}; + + dis_params.q_idx = le16_to_cpu(dis_msg->q_idx); + dis_params.vsi_id = le16_to_cpu(dis_msg->vsi_id); + + ret = sxe2_rxq_disable_common_handle(adapter, &dis_params); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + return ret; +} + +STATIC s32 sxe2_ptp_get_time_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_ptp_clock_res res = {0}; + struct timespec64 hwts; + + if (!sxe2_ptp_primary_timer_read(adapter, &hwts)) { + LOG_ERROR_BDF("failed to read 1588 timer.\n"); + ret = -EIO; + goto l_end; + } + res.clock_ns = cpu_to_le32((u32)hwts.tv_nsec); + res.clock_s = cpu_to_le64((u64)hwts.tv_sec); +l_end: + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &res, + SXE2_CALC_RESP_LEN(res, cmd_hdr->tran_out_len), vf->vf_idx, + session_id, ret); + (void)sxe2_mbx_msg_reply(adapter, ¶ms); + return 0; +} + +static s32 sxe2_aux_rdma_msg_handler(struct sxe2_adapter *adapter, u16 vf_id, + u8 *msg, u16 len, u64 session_id) +{ + struct aux_core_dev_info *cdev_info = &adapter->aux_ctxt.cdev_info; + struct sxe2_auxiliary_drv *iadrv; + s32 ret = -ENODEV; + struct sxe2_cmd_params params = {0}; + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + if (!cdev_info->adev) { + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + LOG_WARN_BDF("adev null, vf:%u sid:0x%llx no need handler.\n", vf_id, + session_id); + return 0; + } + device_lock(&cdev_info->adev->dev); + iadrv = sxe2_rdma_aux_drv_get(cdev_info); + if (iadrv && iadrv->aux_ops.vc_receive) + ret = iadrv->aux_ops.vc_receive(cdev_info, vf_id, msg, len, + session_id); + device_unlock(&cdev_info->adev->dev); + + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + if (ret) { + LOG_ERROR_BDF("failed to send message to rdma pf.\n"); + sxe2_mbx_msg_reply_params_fill(¶ms, SXE2_VF_RDMA, NULL, 0, vf_id, + session_id, ret); + return sxe2_mbx_msg_reply(adapter, ¶ms); + } + return ret; +} + +static s32 sxe2_rdma_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + ret = sxe2_aux_rdma_msg_handler(adapter, vf->vf_idx, msg_info->buf, + msg_info->msg_len, session_id); + + return ret; +} + +s32 sxe2_aux_reply_rdma_msg_to_vf(struct sxe2_adapter *adapter, u16 vf_id, u8 *msg, + u16 len, u64 session_id) +{ + struct sxe2_cmd_params params = {0}; + + sxe2_mbx_msg_reply_params_fill(¶ms, SXE2_VF_RDMA, msg, len, vf_id, + session_id, 0); + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +static s32 sxe2_qv_map_unmap_params_chk(const struct sxe2_vf_node *vf, + const struct aux_qvlist_info *qvlist) +{ + u32 i; + s32 ret = 0; + struct sxe2_adapter *adapter = vf->adapter; + u32 num_msix_per_vf = adapter->vf_ctxt.irq_cnt; + const struct aux_qv_info *qv_info; + + for (i = 0; i < qvlist->num_vectors; i++) { + qv_info = &qvlist->qv_info[i]; + if (qv_info->v_idx >= num_msix_per_vf) + goto err; + + if (qv_info->ceq_idx == SXE2_RDMA_VCHNL_Q_INVALID_IDX && + qv_info->aeq_idx == SXE2_RDMA_VCHNL_Q_INVALID_IDX) + goto err; + if (qv_info->aeq_idx != SXE2_RDMA_VCHNL_Q_INVALID_IDX && + qv_info->aeq_idx >= num_msix_per_vf) + goto err; + } + goto end; +err: + LOG_INFO_BDF("rdma map params invalid.(vf:%d num_vectors:%d irq cnt:%d \t" + "v_idx:%d aeq_idx:%d ceq_idx:%d)\n", + vf->vf_idx, qvlist->num_vectors, num_msix_per_vf, + qv_info->v_idx, qv_info->aeq_idx, qv_info->ceq_idx); + ret = -SXE2_VF_ERR_PARAM; +end: + + return ret; +} + +STATIC s32 sxe2_qv_map_unmap_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + const struct sxe2_vf_ops *ops = vf->vf_ops; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u32 i; + s32 ret = SXE2_VF_ERR_SUCCESS; + + struct aux_qvlist_info *qv_map_msg = + (struct aux_qvlist_info *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + ret = sxe2_qv_map_unmap_params_chk(vf, qv_map_msg); + if (ret) + goto reply; + + for (i = 0; i < qv_map_msg->num_vectors; i++) { + struct aux_qv_info *qv_info = &qv_map_msg->qv_info[i]; + + if (msg_info->opcode == SXE2_VF_QV_MAP) + ops->cfg_rdma_irq_map(vf, qv_info); + else + ops->clear_rdma_irq_map(vf, qv_info); + } + +reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +static s32 sxe2_vf_rdma_mgr_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vf_rdma_mgr_cmd_msg *auxmgr_msg = + (struct sxe2_vf_rdma_mgr_cmd_msg + *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + u8 *recv_msg; + u16 recv_len = (u16)auxmgr_msg->resv_len; + u32 opcode; + + recv_msg = kzalloc(recv_len, GFP_KERNEL); + if (!recv_msg) { + LOG_INFO_BDF("memory not enough! buffer is nullptr.\n"); + return -ENOMEM; + } + + opcode = auxmgr_msg->opcode; + + ret = sxe2_rdma_msg_send(adapter, opcode, auxmgr_msg->msg, + (u16)auxmgr_msg->msg_len, recv_msg, recv_len); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, (void *)recv_msg, + recv_len, vf->vf_idx, session_id, ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + kfree(recv_msg); + return ret; +} + +STATIC s32 sxe2_promisc_cfg_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_vsi *vsi; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vf_promisc_msg *promisc_msg = + (struct sxe2_vf_promisc_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + u32 promisc_flags = le32_to_cpu(promisc_msg->flags); + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (promisc_msg->is_user) + vsi = vf->dpdk_vf_vsi; + else + vsi = vf->vsi; + + if (!vsi) { + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + if (!sxe2_vf_is_trusted(vf)) { + LOG_INFO_BDF("Untrusted vf %d is attempting to configure \t" + "promiscuous mode\n", + vf->vf_idx); + goto l_end; + } + + if (promisc_flags & SXE2_VF_PROMISC_MULTICAST) { + ret = sxe2_allmulti_rule_add(vsi); + if (ret && ret != -EEXIST) + LOG_ERROR_BDF("add allmulti filter failed, ret %d\n", ret); + } else { + ret = sxe2_allmulti_rule_del(adapter, vsi->idx_in_dev); + if (ret && ret != -ENOENT) + LOG_ERROR_BDF("delete allmulti filter failed, ret %d\n", + ret); + } + + if (promisc_flags & SXE2_VF_PROMISC) { + ret = sxe2_promisc_rule_add(vsi); + if (ret && ret != -EEXIST) { + LOG_ERROR_BDF("add promisc filter failed, ret %d\n", ret); + goto l_end; + } + if (!sxe2_port_vlan_is_exist(vf)) + (void)sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, + false); + + } else { + ret = sxe2_promisc_rule_del(adapter, vsi->idx_in_dev); + if (ret && ret != -ENOENT) { + LOG_ERROR_BDF("delete promisc filter failed, ret %d\n", ret); + goto l_end; + } + if (!sxe2_port_vlan_is_exist(vf) && + (promisc_flags & SXE2_VF_VLAN_FILTER)) + (void)sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, + true); + } +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, 0); + + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +STATIC s32 sxe2_vlan_offload_cfg_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_vf_vlan_offload_cfg *vlan_cfg = + (struct sxe2_vf_vlan_offload_cfg + *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_hw *hw = &vf->adapter->hw; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret = 0; + bool port_vlan_exist = sxe2_port_vlan_is_exist(vf); + + if ((vlan_cfg->ctag_strip_enable != SXE2_VF_VLAN_STATUS_INVALID && + sxe2_hw_desc_vlan_param_check(port_vlan_exist, true, ETH_P_8021Q)) || + (vlan_cfg->stag_strip_enable != SXE2_VF_VLAN_STATUS_INVALID && + sxe2_hw_desc_vlan_param_check(port_vlan_exist, true, ETH_P_8021AD)) || + (vlan_cfg->ctag_insert_enable != SXE2_VF_VLAN_STATUS_INVALID && + sxe2_hw_desc_vlan_param_check(port_vlan_exist, false, ETH_P_8021Q)) || + (vlan_cfg->stag_insert_enable != SXE2_VF_VLAN_STATUS_INVALID && + sxe2_hw_desc_vlan_param_check(port_vlan_exist, false, ETH_P_8021AD))) { + ret = -SXE2_VF_ERR_HANDLE_ERROR; + goto l_reply; + } + + if (vlan_cfg->ctag_strip_enable != SXE2_VF_VLAN_STATUS_INVALID) { + (void)sxe2_hw_desc_vlan_strip_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021Q, + port_vlan_exist, + vlan_cfg->ctag_strip_enable); + } + + if (vlan_cfg->stag_strip_enable != SXE2_VF_VLAN_STATUS_INVALID) { + (void)sxe2_hw_desc_vlan_strip_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021AD, + port_vlan_exist, + vlan_cfg->stag_strip_enable); + } + + if ((vlan_cfg->ctag_insert_enable != SXE2_VF_VLAN_STATUS_INVALID) || + (vlan_cfg->stag_insert_enable != SXE2_VF_VLAN_STATUS_INVALID)) { + if (!vlan_cfg->ctag_insert_enable) { + (void)sxe2_hw_desc_vlan_insert_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021Q, port_vlan_exist, + vlan_cfg->ctag_insert_enable); + } else if (!vlan_cfg->stag_insert_enable) { + (void)sxe2_hw_desc_vlan_insert_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021AD, port_vlan_exist, + vlan_cfg->stag_insert_enable); + } + + if (vlan_cfg->ctag_insert_enable) { + (void)sxe2_hw_desc_vlan_insert_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021Q, port_vlan_exist, + vlan_cfg->ctag_insert_enable); + } else if (vlan_cfg->stag_insert_enable) { + (void)sxe2_hw_desc_vlan_insert_switch(hw, + vf->vsi_id[SXE2_VF_TYPE_ETH], + ETH_P_8021AD, port_vlan_exist, + vlan_cfg->stag_insert_enable); + } + } +l_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, 0); + + ret = sxe2_mbx_msg_reply(vf->adapter, ¶ms); + + return ret; +} + +STATIC s32 sxe2_vlan_filter_cfg_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_vf_vlan_filter_cfg *filter_cfg = + (struct sxe2_vf_vlan_filter_cfg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_cmd_params params = {0}; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret = 0; + u16 vsi_id_indev; + + if (filter_cfg->is_user) + vsi_id_indev = vf->vsi_id[SXE2_VF_TYPE_DPDK]; + else + vsi_id_indev = vf->vsi_id[SXE2_VF_TYPE_ETH]; + + ret = sxe2_vlan_filter_control(adapter, vsi_id_indev, + filter_cfg->ctag_filter_enable); + if (ret) + LOG_ERROR_BDF("vf:%u vlan filter cfg to %u fail.\n", vf->vf_idx, + filter_cfg->ctag_filter_enable); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, 0); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + return ret; +} + +static bool sxe2_vlan_msg_is_valid(struct sxe2_vf_node *vf, + struct sxe2_vf_vlan_filter_msg *msg, bool add) +{ + return true; +} + +static s32 sxe2_vlan_msg_handle(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info, bool add) +{ + s32 (*sxe2_vf_vlan_action)(struct sxe2_vf_node *vf, struct sxe2_vsi *vsi, + struct sxe2_vf_vlan *vlan); + s32 h_ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_vf_vlan_filter_msg *msg = + (struct sxe2_vf_vlan_filter_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_vlan vlan = {0}; + u64 sesstion_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vsi *vsi; + u32 i = 0; + s32 ret; + + if (add && sxe2_port_vlan_is_exist(vf)) { + LOG_WARN_BDF("vf %u has set port vlan, not support add vlan rule.\n", + vf->vf_idx); + h_ret = SXE2_VF_ERR_SUCCESS; + goto l_msg_reply; + } + + if (add) + sxe2_vf_vlan_action = sxe2_vf_vlan_add; + else + sxe2_vf_vlan_action = sxe2_vf_vlan_del; + + if (!sxe2_vlan_msg_is_valid(vf, msg, add)) { + h_ret = -EINVAL; + goto l_msg_reply; + } + + vsi = sxe2_vf_vsi_get(vf, vf->vsi_id[SXE2_VF_TYPE_ETH]); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", vf->vsi_id[SXE2_VF_TYPE_ETH]); + h_ret = -SXE2_VF_ERR_PARAM; + goto l_msg_reply; + } + + for (i = 0; i < le16_to_cpu(msg->vlan_cnt); i++) { + vlan.tpid = le16_to_cpu(msg->elem[i].tpid); + vlan.vid = le16_to_cpu(msg->elem[i].vid); + if (vlan.vid == 0 && vlan.tpid == ETH_P_8021Q) + continue; + ret = sxe2_vf_vlan_action(vf, vsi, &msg->elem[i]); + if (ret == -EEXIST || ret == -ENOENT) { + continue; + } else if (ret) { + h_ret = ret; + break; + } + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, sesstion_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("add:%u vlan addr cnt:%u complete cnt:%u ret:%d.\n", + add, msg->vlan_cnt, i, ret); + return ret; +} + +STATIC s32 sxe2_vlan_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + return sxe2_vlan_msg_handle(vf, msg_info, true); +} + +STATIC s32 sxe2_vlan_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + return sxe2_vlan_msg_handle(vf, msg_info, false); +} + +static s32 sxe2_link_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_link_msg link_msg; + s32 ret; + + link_msg.status = (u8)sxe2_vf_is_link_up(vf); + if (link_msg.status) + link_msg.speed = sxe2_speed_get(vf); + else + link_msg.speed = SXE2_LINK_SPEED_UNKNOWN; + + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &link_msg, + SXE2_CALC_RESP_LEN(link_msg, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, 0); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + LOG_INFO_BDF("vf:%u link speed:%u link_up:%u.(ret:%d).\n", vf->vf_idx, + link_msg.speed, link_msg.status, ret); + + return ret; +} + +static s32 sxe2_vf_ethtool_info_get_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_msg_ethtool_info ethtool_info; + s32 ret; + struct flm_link_info_pasist pasist_info; + + (void)memset(ðtool_info, 0, sizeof(struct sxe2_msg_ethtool_info)); + + ret = sxe2_get_link_configure(adapter, ðtool_info.cfg); + if (ret) { + ethtool_info.cfg.optical_module.current_connection = + SXE2_FW_CONNECT_MDDE_UNKNOW; + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + } + + ret = sxe2_get_support_speed_ability(adapter, ðtool_info.ability); + if (ret) + LOG_ERROR_BDF("failed to get speed_ability, ret=%d\n", ret); + + pasist_info.speed = FLM_FW_SPEED_AUTO; + ret = sxe2_link_get_pasist_info(adapter, &pasist_info); + if (ret) + LOG_ERROR_BDF("failed to get speed_ability, ret=%d\n", ret); + ethtool_info.usr_link_speed = pasist_info.speed; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, ðtool_info, + sizeof(struct sxe2_msg_ethtool_info), + vf->vf_idx, session_id, 0); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + LOG_INFO_BDF("vf:%u get ethtool info.(ret:%d).\n", vf->vf_idx, ret); + + return ret; +} + +static s32 sxe2_vf_vsi_cfg_msg_check(struct sxe2_vf_node *vf_node, + struct sxe2_vsi_cfg_params *params) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vf_node->adapter; + + if (params->irq_base_idx >= adapter->irq_ctxt.irq_layout.sriov || + params->irq_cnt > adapter->irq_ctxt.irq_layout.sriov || + params->txq_base_idx >= adapter->q_ctxt.txq_layout.sriov || + params->txq_cnt > adapter->q_ctxt.txq_layout.sriov || + params->rxq_base_idx >= adapter->q_ctxt.txq_layout.sriov || + params->rxq_cnt > adapter->q_ctxt.rxq_layout.sriov || + params->vsi_id >= + (adapter->vsi_ctxt.max_cnt + adapter->vsi_ctxt.base_idx_in_dev) || + params->vsi_id < adapter->vsi_ctxt.base_idx_in_dev) { + ret = -EINVAL; + LOG_ERROR_BDF("irq_base:%u irq_cnt:%u txq_base:%u txq_cnt:%u\t" + "rxq_base:%u rxq_cnt:%u vsi id:%u max:%u base:%u\t" + "sriov irq cnt:%u txq cnt:%u rxq cnt:%u ret:%d.\n", + params->irq_base_idx, params->irq_cnt, + params->txq_base_idx, params->txq_cnt, + params->rxq_base_idx, params->rxq_cnt, params->vsi_id, + adapter->vsi_ctxt.max_cnt, + adapter->vsi_ctxt.base_idx_in_dev, + adapter->irq_ctxt.irq_layout.sriov, + adapter->q_ctxt.txq_layout.sriov, + adapter->q_ctxt.rxq_layout.sriov, ret); + } + + return ret; +} + +STATIC s32 sxe2_vf_vsi_rule_cfg(struct sxe2_adapter *adapter, + struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + sxe2_vf_trust_cfg_restore(vf_node); + + if (!sxe2_eswitch_is_offload(adapter) && vsi->type == SXE2_VSI_T_VF) { + ret = sxe2_vf_base_l2_filter_setup(vf_node, vsi); + if (ret) { + LOG_ERROR("vf:%u vsi %u base l2 filter setup fail %d.\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + return ret; + } + } else if (!sxe2_eswitch_is_offload(adapter) && + vsi->type == SXE2_VSI_T_DPDK_VF && + sxe2_port_vlan_is_exist(vf_node)) { + ret = sxe2_vf_vsi_port_vlan_cfg(vf_node, vsi); + if (ret) { + LOG_ERROR("vf:%u vsi %u port vlan cfg fail %d.\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + return ret; + } + } else if (sxe2_eswitch_is_offload(adapter) && + vsi->type == SXE2_VSI_T_DPDK_VF) { + ret = sxe2_vf_sp_rule_add(vf_node, true); + if (ret) { + LOG_ERROR("vf:%u vsi %u sp rule add fail %d.\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + return ret; + } + } + return ret; +} + +STATIC s32 __sxe2_vf_vsi_cfg(struct sxe2_vf_node *vf_node, + struct sxe2_vf_vsi_cfg *cfg_info) +{ + struct sxe2_vsi_cfg_params params = {}; + struct sxe2_adapter *adapter = vf_node->adapter; + struct sxe2_vsi *vsi; + s32 ret; + + params.vsi_id = le16_to_cpu(cfg_info->vsi_id); + params.irq_base_idx = le16_to_cpu(cfg_info->irq_base_idx); + params.irq_cnt = le16_to_cpu(cfg_info->irq_cnt); + params.txq_base_idx = le16_to_cpu(cfg_info->txq_base_idx); + params.txq_cnt = le16_to_cpu(cfg_info->txq_cnt); + params.rxq_base_idx = le16_to_cpu(cfg_info->rxq_base_idx); + params.rxq_cnt = le16_to_cpu(cfg_info->rxq_cnt); + + ret = sxe2_vf_vsi_cfg_msg_check(vf_node, ¶ms); + if (ret) { + LOG_ERROR_BDF("vf_idx:%u vsi create param check fail.\n", + vf_node->vf_idx); + return ret; + } + + ret = sxe2_vf_vsi_type_get(vf_node, params.vsi_id, ¶ms.type); + if (ret) { + LOG_ERROR_BDF("vf_idx:%u vsi type get fail.\n", vf_node->vf_idx); + return ret; + } + + sxe2_vf_vsi_destroy_by_id(vf_node, params.vsi_id); + + params.vf = vf_node; + vsi = sxe2_vsi_create(adapter, ¶ms); + if (!vsi) { + LOG_ERROR_BDF("vf_idx:%u vsi create fail.\n", vf_node->vf_idx); + return -ENOMEM; + } + + LOG_INFO_BDF("vf[%u] hw vsi_id:%u type:%d vsi irq cnt:%u vsi queue \t" + "cnt:%u.\n", + vf_node->vf_idx, vsi->idx_in_dev, vsi->type, vsi->irqs.cnt, + vsi->rxqs.q_cnt); + + if (params.type == SXE2_VSI_T_VF) { + vf_node->vsi = vsi; + if (vf_node->repr) { + vf_node->repr->src_vsi = vsi; + vf_node->repr->vf_idx = vf_node->vf_idx; + } + + } else if (params.type == SXE2_VSI_T_DPDK_VF) { + vf_node->dpdk_vf_vsi = vsi; + if (vf_node->repr) { + vf_node->repr->dpdk_vf_vsi = vsi; + vf_node->repr->vf_idx = vf_node->vf_idx; + LOG_ERROR_BDF("dpdk_vf_vsi vsi id:%u.\n", vsi->idx_in_dev); + } + } + ret = sxe2_vf_vsi_rule_cfg(adapter, vf_node, vsi); + if (ret) { + LOG_ERROR_BDF("vf[%u] hw vsi_id:%u type:%d rule cfg failed.\n", + vf_node->vf_idx, vsi->idx_in_dev, vsi->type); + sxe2_vf_vsi_destroy_by_id(vf_node, params.vsi_id); + goto l_end; + } + +l_end: + + return ret; +} + +STATIC s32 __sxe2_vf_vsi_decfg(struct sxe2_vf_node *vf_node, + struct sxe2_vf_vsi_cfg *cfg_info) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + u16 vsi_id = le16_to_cpu(cfg_info->vsi_id); + s32 ret = 0; + + sxe2_vf_vsi_destroy_by_id(vf_node, vsi_id); + + LOG_INFO_BDF("vf:%d vsi_id:%d vsi destroyed.\n", vf_node->vf_idx, vsi_id); + + return ret; +} + +static s32 sxe2_vf_vsi_cfg_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_vsi_cfg *vsi_cfg = + (struct sxe2_vf_vsi_cfg *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + s32 ret; + + if (vsi_cfg->is_clear) + ret = __sxe2_vf_vsi_decfg(vf, vsi_cfg); + else + ret = __sxe2_vf_vsi_cfg(vf, vsi_cfg); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +static s32 sxe2_vf_user_driver_vsi_release(struct sxe2_vf_node *vf) +{ + struct sxe2_vsi *vsi = NULL; + s32 ret = 0; + struct sxe2_adapter *adapter = vf->adapter; + + vsi = sxe2_vf_vsi_get(vf, vf->vsi_id[SXE2_VF_TYPE_DPDK]); + if (!vsi) { + ret = -SXE2_VF_ERR_PARAM; + LOG_ERROR_BDF("invalid vsi id:%d.\n", vf->vsi_id[SXE2_VF_TYPE_DPDK]); + return ret; + } + + LOG_INFO_BDF("dpdk release vsi:%u txqs disable start.\n", vsi->idx_in_dev); + + if (sxe2_txqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u txqs disable failed.\n", vsi->idx_in_dev); + + if (sxe2_rxqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u rxqs disable failed.\n", vsi->idx_in_dev); + + sxe2_vf_dpdk_cfg_clear(vf, true); + + return ret; +} + +static s32 sxe2_vf_user_driver_release_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_user_driver_release *release = + (struct sxe2_vf_user_driver_release + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + s32 ret = 0; + struct sxe2_obj obj = {0}; + + (void)sxe2_vf_user_driver_vsi_release(vf); + + obj.func_type = SXE2_VF; + obj.vf_id = release->func_id; + obj.drv_type = SXE2_DPDK_DRV; + obj.drv_id = release->drv_id; + if (sxe2_dpdk_ipsec_resource_release(adapter, &obj)) + LOG_DEV_ERR("func[%u] drv[%u] ipsec clear failed.\n", obj.vf_id, + (obj.drv_type << 6 | obj.drv_id)); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +STATIC s32 sxe2_rss_key_get_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + u8 *key_msg = NULL; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_err; + } + + key_msg = kzalloc(SXE2_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (!key_msg) { + LOG_ERROR_BDF("sxe2 vf rss get key no memory.\n"); + ret = -SXE2_VF_ERR_NO_MEMORY; + goto l_err; + } + + ret = sxe2_fwc_rss_hkey_get(vsi, key_msg); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss get key failed, ret: %d, vsi id: %u.\n", + ret, vsi->id_in_pf); + goto l_err; + } + + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, key_msg, + SXE2_CALC_RESP_LEN(*key_msg, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + goto l_end; + +l_err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + +l_end: + kfree(key_msg); + return ret; +} + +STATIC s32 sxe2_rss_lut_get_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + u8 *lut_msg = NULL; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_err; + } + + lut_msg = kzalloc(vsi->rss_ctxt.lut_size, GFP_KERNEL); + if (!lut_msg) { + LOG_ERROR_BDF("sxe2 vf rss get lut no memory.\n"); + ret = -SXE2_VF_ERR_NO_MEMORY; + goto l_err; + } + + ret = sxe2_fwc_rss_lut_get(vsi, lut_msg, vsi->rss_ctxt.lut_size); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss get lut failed, ret: %d, vsi id: %u.\n", + ret, vsi->id_in_pf); + goto l_err; + } + + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, lut_msg, + SXE2_CALC_RESP_LEN(*lut_msg, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + goto l_end; + +l_err: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); +l_end: + kfree(lut_msg); + return ret; +} + +STATIC s32 sxe2_rss_key_set_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u8 *key = (u8 *)(msg_info->buf + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + ret = sxe2_fwc_rss_hkey_set(vsi, key); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set key fwc failed, ret: %d, vsi id: \t" + "%u.\n", + ret, vsi->id_in_pf); + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set key reply failed, ret: %d, vf id: \t" + "%u.\n", + ret, vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_lut_set_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u8 *lut = (u8 *)(msg_info->buf + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + ret = sxe2_fwc_rss_lut_set(vsi, lut, vsi->rss_ctxt.lut_size); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set lut fwc failed, ret: %d, vsi id: \t" + "%u.\n", + ret, vsi->id_in_pf); + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set key reply failed, ret: %d, vf id: \t" + "%u.\n", + ret, vf->vf_idx); + + return ret; +} + +STATIC void +sxe2_rss_hash_msg_convert_hash_cfg(struct sxe2_rss_hash_cfg *hash_cfg, + struct sxe2_vf_rss_hash_msg *rss_hash_msg) +{ + u32 tmp_headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + (void)memset(hash_cfg, 0, sizeof(struct sxe2_rss_hash_cfg)); + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_HDR_MAX); i++) + tmp_headers[i] = le32_to_cpu(rss_hash_msg->headers[i]); + + bitmap_from_arr32(hash_cfg->headers, tmp_headers, SXE2_FLOW_HDR_MAX); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + tmp_flds[i] = le32_to_cpu(rss_hash_msg->hash_flds[i]); + + bitmap_from_arr32(hash_cfg->hash_flds, tmp_flds, SXE2_FLOW_FLD_ID_MAX); + + hash_cfg->hdr_type = le32_to_cpu(rss_hash_msg->hdr_type); + hash_cfg->symm = rss_hash_msg->symm == 1 ? true : false; +} + +STATIC s32 sxe2_rss_cfg_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_rss_hash_msg *cfg_msg = + (struct sxe2_vf_rss_hash_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_rss_hash_cfg cfg; + + if (sxe2_is_safe_mode(adapter) || !vsi) { + LOG_ERROR_BDF("sxe2 vf rss is in safe mode, not support.\n"); + ret = -EINVAL; + goto l_out; + } + + sxe2_rss_hash_msg_convert_hash_cfg(&cfg, cfg_msg); + + if (bitmap_empty(cfg.headers, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + ret = -EINVAL; + goto l_out; + } + + if (bitmap_empty(cfg.hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + ret = -EINVAL; + goto l_out; + } + + ret = sxe2_add_rss_flow(&adapter->rss_flow_ctxt, vsi->id_in_pf, &cfg); + if (ret != 0) + LOG_ERROR_BDF("invalid field type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + +l_out: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_cfg_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_rss_hash_msg *cfg_msg = + (struct sxe2_vf_rss_hash_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_rss_hash_cfg cfg; + + if (sxe2_is_safe_mode(adapter) || !vsi) { + LOG_ERROR_BDF("sxe2 vf rss is in safe mode, not support.\n"); + ret = -EINVAL; + goto l_out; + } + + sxe2_rss_hash_msg_convert_hash_cfg(&cfg, cfg_msg); + + if (bitmap_empty(cfg.headers, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + ret = -EINVAL; + goto l_out; + } + + if (bitmap_empty(cfg.hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR_BDF("invalid header type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + ret = -EINVAL; + goto l_out; + } + + ret = sxe2_rss_rem_cfg(&adapter->rss_flow_ctxt, vsi->id_in_pf, &cfg); + if (ret != 0) + LOG_ERROR_BDF("invalid field type! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + +l_out: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_cfg_replay_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (sxe2_is_safe_mode(adapter) || !vsi) { + LOG_ERROR_BDF("sxe2 vf rss is in safe mode, not support.\n"); + ret = -EINVAL; + goto l_out; + } + + ret = sxe2_rss_replay_hash_cfg(&adapter->rss_flow_ctxt, vsi->id_in_pf); + if (ret) { + sxe2_rss_vsi_flow_clean(vsi); + LOG_ERROR_BDF("sxe2 vf rss default flow set failed, ret=%d.\n", ret); + } + +l_out: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf add default rss cfg failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_add_default_cfg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (sxe2_is_safe_mode(adapter) || !vsi) { + LOG_ERROR_BDF("sxe2 vf rss is in safe mode, not support.\n"); + ret = -EINVAL; + goto l_out; + } + + sxe2_rss_delete_vsi_cfg_list(&adapter->rss_flow_ctxt, vsi->id_in_pf); + + ret = sxe2_rss_default_flow_set(vsi); + if (ret) { + sxe2_rss_vsi_flow_clean(vsi); + LOG_ERROR_BDF("sxe2 vf rss default flow set failed, ret=%d.\n", ret); + } + +l_out: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_cfg_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + sxe2_rss_vsi_flow_clean(vsi); + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reply vf failed! vf_id %u\n", vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_rss_hash_ctrl_set_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_rss_hash_ctrl *hash_ctrl = + (struct sxe2_vf_rss_hash_ctrl + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_vsi_hctrl hctrl = {0}; + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + hctrl.vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + hctrl.hash_type = hash_ctrl->hash_func; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_VSI_HCTRL_SET, &hctrl, + sizeof(hctrl), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss set hash ctrl fw failed! idx: %u\n", + vsi->id_in_pf); + ret = -EIO; + } + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set hash ctrl replay failed! vf_id: " + "%u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_fnav_filter_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_fnav_add_filter_resp filter_resp; + struct sxe2_fnav_comm_full_msg *filter_msg = + (struct sxe2_fnav_comm_full_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u32 flow_id = 0; + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + ret = sxe2_comm_add_fnav_filter(adapter, vsi->id_in_pf, vsi->id_in_pf, + vsi->id_in_pf, filter_msg, &flow_id); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav parse pattern fail ret: %d ! idx: %u\n", + ret, vsi->id_in_pf); + + filter_resp.flow_id = cpu_to_le32(flow_id); + +l_end: + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &filter_resp, + SXE2_CALC_RESP_LEN(filter_resp, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_fnav_filter_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_fnav_filter_del_msg *del_msg = + (struct sxe2_vf_fnav_filter_del_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u32 flow_id; + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + flow_id = le32_to_cpu(del_msg->flow_id); + ret = sxe2_fnav_del_filter_by_flow_id(adapter, vsi->id_in_pf, flow_id); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav param check fail ret: %d ! vsi type: \t" + "%u, idx: %u\n", + ret, vsi->type, vsi->id_in_pf); + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_fnav_filter_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + ret = sxe2_fnav_del_filter_by_vsi(vsi); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav param check fail ret: %d ! vsi type: \t" + "%u, idx: %u\n", + ret, vsi->type, vsi->id_in_pf); + } + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_fnav_stat_alloc_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_vf_fnav_stat_alloc_req_msg *stat_req = + (struct sxe2_vf_fnav_stat_alloc_req_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_fnav_stat_msg stat_msg = {0}; + u16 stat_index = 0; + bool need_update = false; + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + need_update = stat_req->need_update; + ret = sxe2_fnav_stat_idx_alloc_with_lock(adapter, vsi->id_in_pf, &stat_index, + need_update); + if (ret) + LOG_ERROR_BDF("sxe2 fnav now has valid stat index! vsi type: %u, \t" + "idx: %u\n", + vsi->type, vsi->id_in_pf); + else + stat_msg.stat_index = cpu_to_le16(stat_index); + +l_end: + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &stat_msg, + SXE2_CALC_RESP_LEN(stat_msg, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id: %u\n", + vf->vf_idx); + + return ret; +} + +STATIC s32 sxe2_fnav_stat_free_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_fnav_stat_msg *stat_msg = + (struct sxe2_vf_fnav_stat_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u16 stat_index = 0; + + stat_index = le16_to_cpu(stat_msg->stat_index); + ret = sxe2_fnav_stat_idx_free_with_lock(adapter, stat_index); + if (ret) + LOG_ERROR_BDF("sxe2 fnav now has valid stat index! vf_id: %u, idx: \t" + "%u\n", + vf->vf_idx, stat_index); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id: %u, idx: %u\n", + vf->vf_idx, stat_index); + + return ret; +} + +STATIC s32 sxe2_fnav_stat_query_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_fnav_stat_query_req_msg *stat_msg = + (struct sxe2_vf_fnav_stat_query_req_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_vf_fnav_stat_query_resp_msg stat_resp; + u16 stat_index = 0; + u32 is_clear = 0; + enum sxe2_fnav_stat_ctrl_type stat_type; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_fnav_stats_req req = {}; + struct sxe2_fwc_fnav_stats_resp resp = {}; + u64 hits = 0; + u64 bytes = 0; + + stat_index = le16_to_cpu(stat_msg->stat_index); + is_clear = le32_to_cpu(stat_msg->is_clear); + stat_type = le32_to_cpu(stat_msg->stat_ctrl); + + req.is_clear = is_clear ? true : false; + req.counter_idx = cpu_to_le16(stat_index); + req.bank_type = SXE2_FNAV_COUNTER_BANK_ALL; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FNAV_STATS_GET, &req, sizeof(req), + &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav get state failed, stat_index=%u, ret=%d", + stat_index, ret); + } else { + switch (stat_type) { + case SXE2_FNAV_STAT_ENA_PKTS: + hits += le64_to_cpu(resp.stats[0]); + break; + case SXE2_FNAV_STAT_ENA_BYTES: + bytes += le64_to_cpu(resp.stats[0]); + break; + case SXE2_FNAV_STAT_ENA_ALL: + hits += le64_to_cpu(resp.stats[0]); + bytes += le64_to_cpu(resp.stats[1]); + break; + default: + break; + } + } + + stat_resp.stat_index = stat_msg->stat_index; + stat_resp.stat_hits = cpu_to_le64(hits); + stat_resp.stat_bytes = cpu_to_le64(bytes); + + sxe2_mbx_msg_reply_params_fill( + ¶ms, msg_info->opcode, &stat_resp, + SXE2_CALC_RESP_LEN(stat_resp, cmd_hdr->tran_out_len), + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav reply vf failed! vf_id: %u, idx: %u\n", + vf->vf_idx, stat_index); + } + + return ret; +} + +STATIC s32 sxe2_fnav_match_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + if (!vsi) { + LOG_ERROR_BDF("vsi is null!\n"); + ret = -SXE2_VF_ERR_PARAM; + goto l_end; + } + + adapter->fnav_ctxt.fnav_stat_ctxt.vsi_fnav_match[vsi->id_in_pf] = 0; + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + return ret; +} + +#ifdef SXE2_SUPPORT_ACL +STATIC s32 sxe2_vf_acl_filter_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_vsi *vsi = vf->vsi; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_cmd_params params = {0}; + s32 ret = 0; + + ret = sxe2_acl_add_rule_ethtool(vsi, fsp); + if (ret) + LOG_ERROR_BDF("add vf filter failed, ret:%d\n", ret); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf acl reply vf failed! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + } + + return ret; +} + +STATIC s32 sxe2_vf_acl_filter_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2vf_acl_filter_del_req *req = + (struct sxe2vf_acl_filter_del_req + *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_cmd_params params = {0}; + s32 ret = 0; + + ret = sxe2_acl_del_filter_by_id(vsi, + SXE2_GEN_FILTER_ID(vsi->idx_in_dev, req->filter_id)); + if (ret) + LOG_ERROR_BDF("delete filter failed, ret:%d\n", ret); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf acl reply vf failed! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + + return ret; +} + +STATIC s32 sxe2_vf_acl_filter_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_vsi *vsi = vf->vsi; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_cmd_params params = {0}; + s32 ret = 0; + + ret = sxe2_acl_del_filter_by_vsi(vsi); + if (ret) + LOG_ERROR_BDF("delete filter failed, ret:%d\n", ret); + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf acl reply vf failed! vsi type: %u, idx: %u\n", + vsi->type, vsi->id_in_pf); + } + + return ret; +} +#endif + +static s32 sxe2_vf_ipsec_sa_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_ipsec_sa_add_msg *req = + (struct sxe2_vf_ipsec_sa_add_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_vf_ipsec_sa_add_resp resp; + u32 hw_sa_index = SXE2_IPSEC_HW_INDEX_INVALID; + + if (!sxe2_vf_is_trusted(vf)) { + ret = -SXE2_VF_ERR_PARAM; + goto out; + } + + ret = sxe2_ipsec_vf_sa_add(adapter, vf->vf_idx, req, &hw_sa_index, false); + if (ret) { + LOG_ERROR_BDF("failed to add vf sa, ret: %d ! vf idx: %u\n", ret, + vf->vf_idx); + goto out; + } + + resp.sa_idx = hw_sa_index; + +out: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, &resp, + SXE2_CALC_RESP_LEN(resp, cmd_hdr->tran_out_len), + vf->vf_idx, + session_id, ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("failed to add vf sa! vf idx: %u\n", vf->vf_idx); + + return ret; +} + +static s32 sxe2_vf_ipsec_sa_clear_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_ipsec_sa_del_msg *req = + (struct sxe2_vf_ipsec_sa_del_msg + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + + ret = sxe2_ipsec_vf_sa_free(adapter, vf->vf_idx, req); + if (ret) { + LOG_ERROR_BDF("failed to free vf sa, ret: %d ! vf idx: %u\n", ret, + vf->vf_idx); + } + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("failed to add vf sa! vf idx: %u\n", vf->vf_idx); + + return ret; +} + +static s32 sxe2_vf_ipsec_get_capa_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + s32 ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2vf_get_capa_response resp; + + if (adapter->ipsec_ctxt.status != SXE2_IPSEC_READY && + adapter->ipsec_ctxt.status != SXE2_IPSEC_RESETTING) { + ret = 0; + resp.rx_sa_cnt = 0; + resp.tx_sa_cnt = 0; + } else { + resp.rx_sa_cnt = (u16)adapter->ipsec_ctxt.max_rx_sa_cnt; + resp.tx_sa_cnt = (u16)adapter->ipsec_ctxt.max_tx_sa_cnt; + } + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, &resp, + SXE2_CALC_RESP_LEN(resp, cmd_hdr->tran_out_len), + vf->vf_idx, + session_id, ret); + + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("failed to get capa! vf idx: %u\n", vf->vf_idx); + + return ret; +} + +static s32 sxe2_vf_rdma_dump_pcap_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + struct sxe2vf_rdma_dump_pcap_msg *msg = + (struct sxe2vf_rdma_dump_pcap_msg + *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + + vsi = sxe2_vf_vsi_get(vf, vf->vsi_id[SXE2_VF_TYPE_ETH]); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", vf->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -SXE2_VF_ERR_PARAM; + goto l_msg_reply; + } + + ret = sxe2_rdma_dump_pcap_setup(vsi, msg->mac, msg->is_add); + if (ret) { + LOG_ERROR_BDF("failed to setup vf rdma dump pcap! vf idx: %u\n", + vf->vf_idx); + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + + return sxe2_mbx_msg_reply(adapter, ¶ms); +} + +static s32 +sxe2_vf_passthrough_user_driver_data_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret = 0; + struct sxe2_com_user_data_passthrough_req *req = + (struct sxe2_com_user_data_passthrough_req + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + struct sxe2_com_user_data_passthrough_resp *resp = NULL; + struct sxe2_drv_cmd_params *cmd_param = NULL; + struct sxe2_obj obj = {0}; + u32 total_resp_len = 0; + + cmd_param = kzalloc(sizeof(*cmd_param), GFP_KERNEL); + if (!cmd_param) { + LOG_ERROR_BDF("Mem alloc failed ret:%d", ret); + ret = -SXE2_VF_ERR_NO_MEMORY; + goto l_end; + } + + memcpy(&obj, &req->obj, sizeof(struct sxe2_mbx_obj)); + cmd_param->opcode = req->opcode; + cmd_param->vsi_id = req->vsi_id; + cmd_param->req_len = req->req_len; + cmd_param->resp_len = req->resp_len; + + if (cmd_param->req_len > 0) { + cmd_param->req_data = kzalloc(cmd_param->req_len, GFP_KERNEL); + if (!cmd_param->req_data) { + LOG_ERROR_BDF("Mem alloc failed ret:%d", ret); + ret = -SXE2_VF_ERR_NO_MEMORY; + goto l_end; + } + (void)memcpy(cmd_param->req_data, req->cmd_buff, cmd_param->req_len); + } + + if (cmd_param->resp_len > 0) { + total_resp_len = sizeof(*resp) + cmd_param->resp_len; + resp = kzalloc(total_resp_len, GFP_KERNEL); + if (!resp) { + LOG_ERROR_BDF("Mem alloc failed ret:%d", ret); + ret = -SXE2_VF_ERR_NO_MEMORY; + total_resp_len = 0; + goto l_end; + } + cmd_param->resp_data = resp->cmd_buff; + } + + ret = sxe2_com_cmd_send(adapter, &obj, cmd_param); + if (ret) { + LOG_ERROR_BDF("dpdk_passthrough_vf_data failed ret:%d\n", ret); + goto l_end; + } + +l_end: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, resp, + total_resp_len, vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + + if (cmd_param) { + kfree(cmd_param->req_data); + kfree(cmd_param); + } + + kfree(resp); + + return ret; +} + +static s32 sxe2_vf_drv_mode_set_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + struct sxe2_vf_drv_mode_req *vf_msg = + (struct sxe2_vf_drv_mode_req + *)(msg_info->buf + + SXE2VF_MBX_DATA_OFFSET(msg_info->buf)); + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret = 0; + + vf->mode = vf_msg->drv_mode; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("failed to set drv mode ret:%d vf idx: %u\n", ret, + vf->vf_idx); + + return ret; +} + +static s32 sxe2_vf_drv_mode_get_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + struct sxe2_vf_drv_mode_resp resp; + s32 ret = 0; + + resp.drv_mode = vf->mode; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, &resp, + sizeof(resp), vf->vf_idx, session_id, ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("failed to set drv mode ret:%d vf idx: %u\n", ret, + vf->vf_idx); + + return ret; +} + +struct sxe2_mbx_msg_table vf_msg_table[SXE2_VF_OPCODE_NR] = { + [SXE2_VF_VERSION_MATCH] = {SXE2_VF_VERSION_MATCH, sxe2_ver_msg_func}, + [SXE2_VF_RESET_REQUEST] = {SXE2_VF_RESET_REQUEST, + sxe2_reset_msg_func}, + [SXE2_VF_HW_RES_GET] = {SXE2_VF_HW_RES_GET, sxe2_res_get_msg_func}, + [SXE2_VF_STATS_GET] = {SXE2_VF_STATS_GET, sxe2_stats_get_msg_func}, + [SXE2_VF_STATS_PUSH] = {SXE2_VF_STATS_PUSH, + sxe2_stats_push_msg_func}, + [SXE2_VF_RXQ_CFG_AND_ENABLE] = {SXE2_VF_RXQ_CFG_AND_ENABLE, + sxe2_rxq_cfg_ena_msg_func}, + + [SXE2_VF_MAC_ADDR_ADD] = {SXE2_VF_MAC_ADDR_ADD, + sxe2_addr_add_msg_func}, + [SXE2_VF_MAC_ADDR_DEL] = {SXE2_VF_MAC_ADDR_DEL, + sxe2_addr_del_msg_func}, + [SXE2_VF_MAC_ADDR_UPDATE] = {SXE2_VF_MAC_ADDR_UPDATE, + sxe2_addr_update_msg_func}, + [SXE2_VF_PROMISC_UPDATE] = {SXE2_VF_PROMISC_UPDATE, + sxe2_promisc_update_msg_func}, + [SXE2_VF_USER_VLAN_PROCESS] = {SXE2_VF_USER_VLAN_PROCESS, + sxe2_user_vlan_msg_func}, + + [SXE2_VF_TXQ_CFG_AND_ENABLE] = {SXE2_VF_TXQ_CFG_AND_ENABLE, + sxe2_mbx_txq_cfg_reply}, + [SXE2_VF_IRQ_MAP] = {SXE2_VF_IRQ_MAP, sxe2_irq_map_msg_func}, + [SXE2_VF_IRQ_UNMAP] = {SXE2_VF_IRQ_UNMAP, sxe2_irq_unmap_msg_func}, + [SXE2_VF_QUEUES_DISABLE] = {SXE2_VF_QUEUES_DISABLE, + sxe2_queues_dis_msg_func}, + [SXE2_VF_PROMISC_CFG] = {SXE2_VF_PROMISC_CFG, + sxe2_promisc_cfg_msg_func}, + [SXE2_VF_VLAN_OFFLOAD_CFG] = {SXE2_VF_VLAN_OFFLOAD_CFG, + sxe2_vlan_offload_cfg_msg_func}, + [SXE2_VF_VLAN_FILTER_CFG] = {SXE2_VF_VLAN_FILTER_CFG, + sxe2_vlan_filter_cfg_msg_func}, + [SXE2_VF_VLAN_ADD] = {SXE2_VF_VLAN_ADD, sxe2_vlan_add_msg_func}, + [SXE2_VF_VLAN_DEL] = {SXE2_VF_VLAN_DEL, sxe2_vlan_del_msg_func}, + + [SXE2_VF_LINK_STATUS_GET] = {SXE2_VF_LINK_STATUS_GET, + sxe2_link_msg_func}, + + [SXE2_VF_RDMA] = {SXE2_VF_RDMA, sxe2_rdma_msg_func}, + [SXE2_VF_QV_MAP] = {SXE2_VF_QV_MAP, sxe2_qv_map_unmap_msg_func}, + [SXE2_VF_QV_UNMAP] = {SXE2_VF_QV_UNMAP, sxe2_qv_map_unmap_msg_func}, + [SXE2_VF_RDMA_MGR_CMD] = {SXE2_VF_RDMA_MGR_CMD, + sxe2_vf_rdma_mgr_msg_func}, + + [SXE2_VF_GET_RSS_KEY] = {SXE2_VF_GET_RSS_KEY, + sxe2_rss_key_get_msg_func}, + [SXE2_VF_GET_RSS_LUT] = {SXE2_VF_GET_RSS_LUT, + sxe2_rss_lut_get_msg_func}, + [SXE2_VF_SET_RSS_KEY] = {SXE2_VF_SET_RSS_KEY, + sxe2_rss_key_set_msg_func}, + [SXE2_VF_SET_RSS_LUT] = {SXE2_VF_SET_RSS_LUT, + sxe2_rss_lut_set_msg_func}, + [SXE2_VF_ADD_RSS_CFG] = {SXE2_VF_ADD_RSS_CFG, + sxe2_rss_cfg_add_msg_func}, + [SXE2_VF_CLEAR_RSS_CFG] = {SXE2_VF_CLEAR_RSS_CFG, + sxe2_rss_cfg_clear_msg_func}, + [SXE2_VF_SET_RSS_HASH_CTRL] = {SXE2_VF_SET_RSS_HASH_CTRL, + sxe2_rss_hash_ctrl_set_msg_func}, + [SXE2_VF_DEL_RSS_CFG] = {SXE2_VF_DEL_RSS_CFG, + sxe2_rss_cfg_del_msg_func}, + [SXE2_VF_ADD_DEFAULT_RSS_CFG] = {SXE2_VF_ADD_DEFAULT_RSS_CFG, + sxe2_rss_add_default_cfg_func}, + [SXE2_VF_REPLAY_RSS_CFG] = {SXE2_VF_REPLAY_RSS_CFG, + sxe2_rss_cfg_replay_func}, + [SXE2_VF_FNAV_FILTER_ADD] = {SXE2_VF_FNAV_FILTER_ADD, + sxe2_fnav_filter_add_msg_func}, + [SXE2_VF_FNAV_FILTER_DEL] = {SXE2_VF_FNAV_FILTER_DEL, + sxe2_fnav_filter_del_msg_func}, + [SXE2_VF_FNAV_FILTER_CLEAR] = {SXE2_VF_FNAV_FILTER_CLEAR, + sxe2_fnav_filter_clear_msg_func}, + [SXE2_VF_FNAV_ALLOC_STAT] = {SXE2_VF_FNAV_ALLOC_STAT, + sxe2_fnav_stat_alloc_msg_func}, + [SXE2_VF_FNAV_FREE_STAT] = {SXE2_VF_FNAV_FREE_STAT, + sxe2_fnav_stat_free_msg_func}, + [SXE2_VF_FNAV_QUERY_STAT] = {SXE2_VF_FNAV_QUERY_STAT, + sxe2_fnav_stat_query_msg_func}, + [SXE2_VF_FNAV_MATCH_CLEAR] = {SXE2_VF_FNAV_MATCH_CLEAR, + sxe2_fnav_match_clear_msg_func}, + [SXE2_VF_STATS_CLEAR] = {SXE2_VF_STATS_CLEAR, + sxe2_stats_clear_msg_func}, + [SXE2_VF_RXQ_DISABLE] = {SXE2_VF_RXQ_DISABLE, sxe2_rxq_dis_msg_func}, + [SXE2_VF_TXQ_DISABLE] = {SXE2_VF_TXQ_DISABLE, sxe2_txq_dis_msg_func}, + + [SXE2_VF_GET_PTP_CLOCK] = {SXE2_VF_GET_PTP_CLOCK, + sxe2_ptp_get_time_msg_func}, + + [SXE2_VF_IPSEC_SA_ADD] = {SXE2_VF_IPSEC_SA_ADD, + sxe2_vf_ipsec_sa_add_msg_func}, + [SXE2_VF_IPSEC_SA_CLEAR] = {SXE2_VF_IPSEC_SA_CLEAR, + sxe2_vf_ipsec_sa_clear_msg_func}, + [SXE2_VF_IPSEC_GET_CAPA] = {SXE2_VF_IPSEC_GET_CAPA, + sxe2_vf_ipsec_get_capa_msg_func}, + + [SXE2_VF_RDMA_DUMP_PCAP] = {SXE2_VF_RDMA_DUMP_PCAP, + sxe2_vf_rdma_dump_pcap_msg_func}, + [SXE2_VF_GET_ETHTOOL_INFO] = {SXE2_VF_GET_ETHTOOL_INFO, + sxe2_vf_ethtool_info_get_msg_func}, + [SXE2_VF_VSI_CFG] = {SXE2_VF_VSI_CFG, sxe2_vf_vsi_cfg_msg_func}, +#ifdef SXE2_SUPPORT_ACL + [SXE2_VF_ACL_FILTER_ADD] = {SXE2_VF_ACL_FILTER_ADD, + sxe2_vf_acl_filter_add_msg_func}, + [SXE2_VF_ACL_FILTER_DEL] = {SXE2_VF_ACL_FILTER_DEL, + sxe2_vf_acl_filter_del_msg_func}, + [SXE2_VF_ACL_FILTER_CLEAR] = {SXE2_VF_ACL_FILTER_CLEAR, + sxe2_vf_acl_filter_clear_msg_func}, +#endif + [SXE2_VF_USER_DRIVER_RELEASE] = {SXE2_VF_USER_DRIVER_RELEASE, + sxe2_vf_user_driver_release_msg_func}, + [SXE2_VF_PASSTHROUGH_USER_VF_DATA] = {SXE2_VF_PASSTHROUGH_USER_VF_DATA, + sxe2_vf_passthrough_user_driver_data_func}, + [SXE2_VF_DRV_MODE_SET] = {SXE2_VF_DRV_MODE_SET, + sxe2_vf_drv_mode_set_func}, + [SXE2_VF_DRV_MODE_GET] = {SXE2_VF_DRV_MODE_GET, + sxe2_vf_drv_mode_get_func}, + + [0] = {0, NULL}}; + +struct sxe2_mbx_msg_table *sxe2_mbx_msg_table_get(void) +{ + return &vf_msg_table[0]; +} + +void sxe2_mbx_msg_table_set(struct sxe2_vf_node *vf) +{ + vf->msg_table = &vf_msg_table[0]; +} + +static s32 sxe2_repr_addr_add_msg_handle(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_vf_addr_msg *msg = + (struct sxe2_vf_addr_msg *)(msg_info->buf + + SXE2VF_MBX_FULL_HDR_SIZE); + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + u32 i = 0; + s32 ret; + + if (!sxe2_mbx_msg_vsi_id_is_valid(vf, le16_to_cpu(msg->vsi_id))) { + h_ret = -EINVAL; + goto l_msg_reply; + } + + for (i = 0; i < msg->addr_cnt; i++) { + u8 *mac_addr = msg->elem[i].addr; + + if (!is_unicast_ether_addr(mac_addr) || + ether_addr_equal(mac_addr, vf->mac_addr.addr)) + continue; + + if (vf->prop.mac_from_pf) { + LOG_DEV_ERR("VF attempting to override administratively set \n" + "MAC address\n"); + h_ret = -EINVAL; + goto l_msg_reply; + } + if (msg->elem[i].type == SXE2_VF_MAC_TYPE_P) + ether_addr_copy(vf->mac_addr.addr, mac_addr); + + break; + } + +l_msg_reply: + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("add mac addr cnt:%u complete cnt:%u ret:%d.\n", + msg->addr_cnt, i, ret); + return ret; +} + +static s32 sxe2_repr_addr_del_msg_handle(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = SXE2_VF_ERR_SUCCESS; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr mac addr del msg reply failed.ret:%d.\n", ret); + return ret; +} + +static s32 sxe2_repr_vlan_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr vlan add msg reply failed.ret:%d.\n", ret); + return ret; +} + +static s32 sxe2_repr_user_vlan_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr vlan add msg reply failed.ret:%d.\n", ret); + return ret; +} + +static s32 sxe2_repr_promisc_add_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr vlan add msg reply failed.ret:%d.\n", ret); + return ret; +} + +static s32 sxe2_repr_vlan_del_msg_func(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 h_ret = -EPERM; + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_cmd_params params = {0}; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg_info->buf; + u64 session_id = le64_to_cpu(cmd_hdr->session_id); + s32 ret; + + sxe2_mbx_msg_reply_params_fill(¶ms, msg_info->opcode, NULL, 0, + vf->vf_idx, session_id, h_ret); + ret = sxe2_mbx_msg_reply(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("repr vlan del msg reply failed.ret:%d.\n", ret); + return ret; +} + +struct sxe2_mbx_msg_table eswitch_vf_msg_table[SXE2_VF_OPCODE_NR] = { + [SXE2_VF_VERSION_MATCH] = {SXE2_VF_VERSION_MATCH, sxe2_ver_msg_func}, + [SXE2_VF_RESET_REQUEST] = {SXE2_VF_RESET_REQUEST, + sxe2_reset_msg_func}, + [SXE2_VF_HW_RES_GET] = {SXE2_VF_HW_RES_GET, sxe2_res_get_msg_func}, + [SXE2_VF_STATS_GET] = {SXE2_VF_STATS_GET, sxe2_stats_get_msg_func}, + [SXE2_VF_STATS_PUSH] = {SXE2_VF_STATS_PUSH, + sxe2_stats_push_msg_func}, + [SXE2_VF_RXQ_CFG_AND_ENABLE] = {SXE2_VF_RXQ_CFG_AND_ENABLE, + sxe2_rxq_cfg_ena_msg_func}, + + [SXE2_VF_MAC_ADDR_ADD] = {SXE2_VF_MAC_ADDR_ADD, + sxe2_repr_addr_add_msg_handle}, + [SXE2_VF_MAC_ADDR_DEL] = {SXE2_VF_MAC_ADDR_DEL, + sxe2_repr_addr_del_msg_handle}, + [SXE2_VF_MAC_ADDR_UPDATE] = {SXE2_VF_MAC_ADDR_UPDATE, + sxe2_repr_addr_update_msg_func}, + [SXE2_VF_PROMISC_UPDATE] = {SXE2_VF_PROMISC_UPDATE, + sxe2_repr_promisc_update_msg_func}, + + [SXE2_VF_TXQ_CFG_AND_ENABLE] = {SXE2_VF_TXQ_CFG_AND_ENABLE, + sxe2_mbx_txq_cfg_reply}, + [SXE2_VF_IRQ_MAP] = {SXE2_VF_IRQ_MAP, sxe2_irq_map_msg_func}, + [SXE2_VF_IRQ_UNMAP] = {SXE2_VF_IRQ_UNMAP, sxe2_irq_unmap_msg_func}, + [SXE2_VF_QUEUES_DISABLE] = {SXE2_VF_QUEUES_DISABLE, + sxe2_queues_dis_msg_func}, + [SXE2_VF_PROMISC_CFG] = {SXE2_VF_PROMISC_CFG, + sxe2_repr_promisc_add_msg_func}, + [SXE2_VF_VLAN_OFFLOAD_CFG] = {SXE2_VF_VLAN_OFFLOAD_CFG, + sxe2_vlan_offload_cfg_msg_func}, + [SXE2_VF_VLAN_FILTER_CFG] = {SXE2_VF_VLAN_FILTER_CFG, + sxe2_vlan_filter_cfg_msg_func}, + [SXE2_VF_VLAN_ADD] = {SXE2_VF_VLAN_ADD, sxe2_repr_vlan_add_msg_func}, + [SXE2_VF_VLAN_DEL] = {SXE2_VF_VLAN_DEL, sxe2_repr_vlan_del_msg_func}, + [SXE2_VF_USER_VLAN_PROCESS] = {SXE2_VF_USER_VLAN_PROCESS, + sxe2_repr_user_vlan_add_msg_func}, + [SXE2_VF_LINK_STATUS_GET] = {SXE2_VF_LINK_STATUS_GET, + sxe2_link_msg_func}, + [SXE2_VF_STATS_CLEAR] = {SXE2_VF_STATS_CLEAR, + sxe2_stats_clear_msg_func}, + + [SXE2_VF_RDMA] = {SXE2_VF_RDMA, sxe2_rdma_msg_func}, + [SXE2_VF_QV_MAP] = {SXE2_VF_QV_MAP, sxe2_qv_map_unmap_msg_func}, + [SXE2_VF_QV_UNMAP] = {SXE2_VF_QV_UNMAP, sxe2_qv_map_unmap_msg_func}, + [SXE2_VF_RDMA_MGR_CMD] = {SXE2_VF_RDMA_MGR_CMD, + sxe2_vf_rdma_mgr_msg_func}, + + [SXE2_VF_GET_RSS_KEY] = {SXE2_VF_GET_RSS_KEY, + sxe2_rss_key_get_msg_func}, + [SXE2_VF_GET_RSS_LUT] = {SXE2_VF_GET_RSS_LUT, + sxe2_rss_lut_get_msg_func}, + [SXE2_VF_SET_RSS_KEY] = {SXE2_VF_SET_RSS_KEY, + sxe2_rss_key_set_msg_func}, + [SXE2_VF_SET_RSS_LUT] = {SXE2_VF_SET_RSS_LUT, + sxe2_rss_lut_set_msg_func}, + [SXE2_VF_ADD_RSS_CFG] = {SXE2_VF_ADD_RSS_CFG, + sxe2_rss_cfg_add_msg_func}, + [SXE2_VF_CLEAR_RSS_CFG] = {SXE2_VF_CLEAR_RSS_CFG, + sxe2_rss_cfg_clear_msg_func}, + [SXE2_VF_SET_RSS_HASH_CTRL] = {SXE2_VF_SET_RSS_HASH_CTRL, + sxe2_rss_hash_ctrl_set_msg_func}, + [SXE2_VF_DEL_RSS_CFG] = {SXE2_VF_DEL_RSS_CFG, + sxe2_rss_cfg_del_msg_func}, + [SXE2_VF_ADD_DEFAULT_RSS_CFG] = {SXE2_VF_ADD_DEFAULT_RSS_CFG, + sxe2_rss_add_default_cfg_func}, + [SXE2_VF_REPLAY_RSS_CFG] = {SXE2_VF_REPLAY_RSS_CFG, + sxe2_rss_cfg_replay_func}, + + [SXE2_VF_FNAV_FILTER_ADD] = {SXE2_VF_FNAV_FILTER_ADD, + sxe2_fnav_filter_add_msg_func}, + [SXE2_VF_FNAV_FILTER_DEL] = {SXE2_VF_FNAV_FILTER_DEL, + sxe2_fnav_filter_del_msg_func}, + [SXE2_VF_FNAV_FILTER_CLEAR] = {SXE2_VF_FNAV_FILTER_CLEAR, + sxe2_fnav_filter_clear_msg_func}, + [SXE2_VF_FNAV_ALLOC_STAT] = {SXE2_VF_FNAV_ALLOC_STAT, + sxe2_fnav_stat_alloc_msg_func}, + [SXE2_VF_FNAV_FREE_STAT] = {SXE2_VF_FNAV_FREE_STAT, + sxe2_fnav_stat_free_msg_func}, + [SXE2_VF_FNAV_QUERY_STAT] = {SXE2_VF_FNAV_QUERY_STAT, + sxe2_fnav_stat_query_msg_func}, + [SXE2_VF_FNAV_MATCH_CLEAR] = {SXE2_VF_FNAV_MATCH_CLEAR, + sxe2_fnav_match_clear_msg_func}, + + [SXE2_VF_RXQ_DISABLE] = {SXE2_VF_RXQ_DISABLE, sxe2_rxq_dis_msg_func}, + [SXE2_VF_TXQ_DISABLE] = {SXE2_VF_TXQ_DISABLE, sxe2_txq_dis_msg_func}, + [SXE2_VF_GET_PTP_CLOCK] = {SXE2_VF_GET_PTP_CLOCK, + sxe2_ptp_get_time_msg_func}, + [SXE2_VF_RDMA_DUMP_PCAP] = {SXE2_VF_RDMA_DUMP_PCAP, + sxe2_vf_rdma_dump_pcap_msg_func}, + + [SXE2_VF_IPSEC_SA_ADD] = {SXE2_VF_IPSEC_SA_ADD, + sxe2_vf_ipsec_sa_add_msg_func}, + [SXE2_VF_IPSEC_SA_CLEAR] = {SXE2_VF_IPSEC_SA_CLEAR, + sxe2_vf_ipsec_sa_clear_msg_func}, + [SXE2_VF_IPSEC_GET_CAPA] = {SXE2_VF_IPSEC_GET_CAPA, + sxe2_vf_ipsec_get_capa_msg_func}, + + [SXE2_VF_GET_ETHTOOL_INFO] = {SXE2_VF_GET_ETHTOOL_INFO, + sxe2_vf_ethtool_info_get_msg_func}, +#ifdef SXE2_SUPPORT_ACL + [SXE2_VF_ACL_FILTER_ADD] = {SXE2_VF_ACL_FILTER_ADD, + sxe2_vf_acl_filter_add_msg_func}, + [SXE2_VF_ACL_FILTER_DEL] = {SXE2_VF_ACL_FILTER_DEL, + sxe2_vf_acl_filter_del_msg_func}, + [SXE2_VF_ACL_FILTER_CLEAR] = {SXE2_VF_ACL_FILTER_CLEAR, + sxe2_vf_acl_filter_clear_msg_func}, +#endif + [SXE2_VF_VSI_CFG] = {SXE2_VF_VSI_CFG, sxe2_vf_vsi_cfg_msg_func}, + [SXE2_VF_USER_DRIVER_RELEASE] = {SXE2_VF_USER_DRIVER_RELEASE, + sxe2_vf_user_driver_release_msg_func}, + [SXE2_VF_PASSTHROUGH_USER_VF_DATA] = {SXE2_VF_PASSTHROUGH_USER_VF_DATA, + sxe2_vf_passthrough_user_driver_data_func}, + [SXE2_VF_DRV_MODE_SET] = {SXE2_VF_DRV_MODE_SET, + sxe2_vf_drv_mode_set_func}, + [SXE2_VF_DRV_MODE_GET] = {SXE2_VF_DRV_MODE_GET, + sxe2_vf_drv_mode_get_func}, + + [0] = {0, NULL}}; + +struct sxe2_mbx_msg_table *sxe2_esw_mbx_msg_table_get(void) +{ + return &eswitch_vf_msg_table[0]; +} + +static s32 sxe2_msg_len_check(struct sxe2_mbx_msg_info *msg_info) +{ + return 0; +} + +static bool sxe2_opcode_is_support(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + return true; +} + +static bool sxe2_is_vf_init_opcode(u32 opcode) +{ + bool ret; + + switch (opcode) { + case SXE2_VF_RESET_REQUEST: + case SXE2_VF_VERSION_MATCH: + case SXE2_VF_DRV_MODE_GET: + case SXE2_VF_HW_RES_GET: + ret = true; + break; + default: + ret = false; + break; + } + + return ret; +} + +static s32 sxe2_vf_status_check(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vf->adapter; + + if (test_bit(SXE2_VF_STATE_DIS, vf->states)) { + ret = -EBUSY; + LOG_ERROR_BDF("vf:%u disabled vf states:0x%lx.\n", vf->vf_idx, + *vf->states); + return ret; + } + + if (!test_bit(SXE2_VF_STATE_ACTIVE, vf->states) && + !sxe2_is_vf_init_opcode(msg_info->opcode)) { + ret = -EINVAL; + LOG_ERROR_BDF("vf not active opcode:0x%x forbidden.\n", + msg_info->opcode); + } + + return ret; +} + +static s32 sxe2_vf_msg_check(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info) +{ + s32 ret = 0; + + (void)sxe2_msg_len_check(msg_info); + (void)sxe2_opcode_is_support(vf, msg_info); + + ret = sxe2_vf_status_check(vf, msg_info); + + return ret; +} + +static s32 sxe2_vf_msg_parse(struct sxe2_adapter *adapter, struct sxe2_recv_msg *msg, + struct sxe2_mbx_msg_info *msg_info) +{ + struct sxe2_drv_msg_hdr msg_hdr; + struct sxe2_cmd_hdr *cmd_hdr = (struct sxe2_cmd_hdr *)msg->buf; + u32 data_offset = 0; + u32 raw_len = 0; +#ifndef SXE2_CFG_RELEASE + u32 vf_id = le32_to_cpu(msg->desc.custom2); +#endif + (void)memcpy((u8 *)&msg_hdr, msg->buf + cmd_hdr->hdr_len, sizeof(msg_hdr)); + msg_info->opcode = le32_to_cpu(msg_hdr.op_code); + msg_info->msg_len = msg->buf_len; + + data_offset = cmd_hdr->hdr_len + msg_hdr.data_offset; + raw_len = msg->buf_len - data_offset; + if (le32_to_cpu(msg_hdr.data_len) != raw_len || + (msg_info->opcode >= SXE2_VF_OPCODE_NR)) { + LOG_ERROR("vf:%u desc msg len:%u and hdr msg len:%u mismatch.\n", + SXE2_VF_IDX(vf_id), raw_len, + le32_to_cpu(msg_hdr.data_len)); + return -SXE2_VF_ERR_PARAM; + } + + if (msg_info->msg_len) { + msg_info->buf = kzalloc(msg_info->msg_len, GFP_KERNEL); + if (!msg_info->buf) { + LOG_ERROR("vf:%u msg len:%u alloc failed.\n", + SXE2_VF_IDX(vf_id), raw_len); + return -SXE2_VF_ERR_NO_MEMORY; + } + + (void)memcpy(msg_info->buf, msg->buf, msg_info->msg_len); + } + LOG_INFO_BDF("vf:%u msg request opcode:0x%x raw_len:%u sid:0x%llx \t" + "trace_id:0x%llx.\n", + SXE2_VF_IDX(vf_id), msg_info->opcode, msg_info->msg_len, + le64_to_cpu(cmd_hdr->session_id), + le64_to_cpu(cmd_hdr->trace_id)); + + return 0; +} + +static s32 sxe2_vf_id_valid(struct sxe2_adapter *adapter, u32 vf_id) +{ + u8 src_type = (vf_id >> SXE2_MBX_DESC_SRC_TYPE_SHIFT) & + SXE2_MBX_DESC_SRC_TYPE_MASK; + u8 vf_idx = vf_id & SXE2_VF_IDX_MASK; + s32 ret = 0; + + if (src_type != SXE2_MBX_MSG_SRC_TYPE_VF || + vf_idx >= adapter->vf_ctxt.num_vfs) { + LOG_ERROR_BDF("invalid vf_id:%u num_vfs:%u maybe src_type \t" + "invalid.\n", + vf_id, adapter->vf_ctxt.num_vfs); + ret = -SXE2_VF_ERR_INVALID_VF_ID; + } + + return ret; +} + +void sxe2_cmd_vf_msg_handler(struct sxe2_adapter *adapter, struct sxe2_recv_msg *msg) +{ + u32 vf_id = le32_to_cpu(msg->desc.custom2); + struct sxe2_vf_node *vf; + struct sxe2_mbx_msg_info msg_info = {0}; + struct sxe2_cmd_params param = {0}; + s32 ret; + struct sxe2_cmd_hdr *cmd_hdr; + u64 session_id; + + if (sxe2_vf_msg_parse(adapter, msg, &msg_info)) + return; + + ret = sxe2_vf_id_valid(adapter, vf_id); + if (ret) + goto l_free_msg_data; + + vf_id = SXE2_VF_IDX(vf_id); + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_id)); + vf = sxe2_vf_node_get(adapter, (u16)vf_id); + if (!vf) { + ret = -EINVAL; + goto l_unlock; + } + + ret = sxe2_vf_msg_check(vf, &msg_info); + if (ret) + goto l_unlock; + + if (vf->msg_table[msg_info.opcode].func) { + vf->msg_table[msg_info.opcode].func(vf, &msg_info); + } else { + LOG_DEV_ERR("vf:%u mbx msg opcode:0x%x invalid func NULL.\n", + vf->vf_idx, msg_info.opcode); + ret = -SXE2_VF_ERR_PARAM; + } + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_id)); + +l_free_msg_data: + if (ret) { + cmd_hdr = (struct sxe2_cmd_hdr *)msg_info.buf; + session_id = le64_to_cpu(cmd_hdr->session_id); + + LOG_ERROR_BDF("vf:%u mbx msg opcode:0x%x sid:0x%llx fail.\n", vf_id, + msg_info.opcode, session_id); + sxe2_mbx_msg_reply_params_fill(¶m, msg_info.opcode, NULL, 0, + (u16)vf_id, session_id, ret); + (void)sxe2_mbx_msg_reply(adapter, ¶m); + } + kfree(msg_info.buf); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..e863b604905a30499336aacf90c204a855711aa9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_mbx_msg.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MBX_MSG_H__ +#define __SXE2_MBX_MSG_H__ + +#include "sxe2.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_fnav.h" + +enum { + SXE2_MBX_MSG_SRC_TYPE_PF = 0, + SXE2_MBX_MSG_SRC_TYPE_VF, + SXE2_MBX_MSG_SRC_TYPE_VSI, +}; + +#define SXE2_MBX_DESC_SRC_TYPE_SHIFT 10 +#define SXE2_MBX_DESC_SRC_TYPE_MASK 0x3 + +struct sxe2_mbx_msg_info { + u32 opcode; + u16 msg_len; + u8 *buf; +}; + +struct sxe2_mbx_msg_table { + u32 opcode; + s32 (*func)(struct sxe2_vf_node *vf, + struct sxe2_mbx_msg_info *msg_info); +}; + +void sxe2_cmd_vf_msg_handler(struct sxe2_adapter *adapter, + struct sxe2_recv_msg *msg); + +struct sxe2_mbx_msg_table *sxe2_mbx_msg_table_get(void); + +void sxe2_notify_vf_link_state(struct sxe2_vf_node *vf); + +void sxe2_mbx_msg_params_fill(struct sxe2_cmd_params *cmd, u32 opc, + void *req_data, u32 req_len, u16 vf_idx, + bool no_resp); +struct sxe2_mbx_msg_table *sxe2_esw_mbx_msg_table_get(void); + +void sxe2_mbx_msg_table_set(struct sxe2_vf_node *vf); + +s32 sxe2_aux_reply_rdma_msg_to_vf(struct sxe2_adapter *adapter, u16 vf_id, + u8 *msg, u16 len, u64 session_id); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.c new file mode 100644 index 0000000000000000000000000000000000000000..a183799ec05be15c733fc2298385a58100e12757 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_monitor.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_tx.h" +#include "sxe2_monitor.h" +#include "sxe2_ethtool.h" +#include "sxe2_mbx_channel.h" +#include "sxe2_aux_driver.h" +#include "sxe2_ipsec.h" +#include "sxe2_arfs.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_lag.h" +#include "sxe2_cmd.h" + +#define SXE2_MAX_MDD_REQ_BUF \ + (sizeof(struct sxe2_fwc_mdd_req) + \ + SXE2_VF_NUM * sizeof(struct sxe2_mdd_vf_req)) + +#define SXE2_MAX_MDD_RESP_BUF \ + (sizeof(struct sxe2_fwc_mdd_resp) + \ + SXE2_VF_NUM * sizeof(struct sxe2_mdd_vf_resp)) + +struct workqueue_struct *sxe2_workqueue; + +#ifdef SXE2_CFG_DEBUG +extern int g_pf_switch_stats; +#endif + +static void sxe2_monitor_work_complete(struct sxe2_adapter *adapter) +{ + BUG_ON(!test_bit(SXE2_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state)); + /* in order to force CPU ordering */ + smp_mb__before_atomic(); + clear_bit(SXE2_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); +} + +int sxe2_sync_mac_add(struct net_device *netdev, const u8 *addr) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_mac_sync_entry *mac_entry; + + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); + if (!mac_entry) + return -ENOMEM; + + ether_addr_copy(mac_entry->mac_addr, addr); + list_add(&mac_entry->list_entry, &vsi->mac_filter.tmp_sync_list); + + return 0; +} + +int sxe2_unsync_mac_add(struct net_device *netdev, const u8 *addr) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_mac_sync_entry *mac_entry; + + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); + if (!mac_entry) + return -ENOMEM; + + ether_addr_copy(mac_entry->mac_addr, addr); + list_add(&mac_entry->list_entry, &vsi->mac_filter.tmp_unsync_list); + + return 0; +} + +STATIC s32 sxe2_vsi_promisc_sync_legacy(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + + if (!(netdev->flags & IFF_ALLMULTI) && !(netdev->flags & IFF_PROMISC)) { + if (sxe2_allmulti_rule_in_use(vsi)) { + ret = sxe2_allmulti_rule_del(adapter, vsi->idx_in_dev); + if (ret) { + LOG_DEV_ERR("delete allmulti filter failed, ret %d\n", + ret); + goto l_end; + } + } + + if (sxe2_promisc_rule_in_use(vsi)) { + ret = sxe2_promisc_rule_del(adapter, vsi->idx_in_dev); + if (ret) { + LOG_DEV_ERR("delete promisc filter failed, ret %d\n", + ret); + goto l_end; + } + } + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, + true); + if (ret) { + LOG_DEV_ERR("turn on vlan filter failed, ret %d\n", + ret); + goto l_end; + } + } + } else if ((netdev->flags & IFF_ALLMULTI) && + !(netdev->flags & IFF_PROMISC)) { + if (!sxe2_allmulti_rule_in_use(vsi) && + !adapter->user_pf_ctxt.is_allmulti_set) { + ret = sxe2_allmulti_rule_add(vsi); + if (ret) { + LOG_DEV_ERR("add allmulti filter failed, ret %d\n", + ret); + goto l_end; + } + } + + if (sxe2_promisc_rule_in_use(vsi)) { + ret = sxe2_promisc_rule_del(adapter, vsi->idx_in_dev); + if (ret) { + LOG_DEV_ERR("delete promisc filter failed, ret %d\n", + ret); + goto l_end; + } + } + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, + true); + if (ret) { + LOG_DEV_ERR("turn on vlan filter failed, ret %d\n", + ret); + goto l_end; + } + } + } else { + if (!sxe2_allmulti_rule_in_use(vsi) && + !adapter->user_pf_ctxt.is_allmulti_set) { + ret = sxe2_allmulti_rule_add(vsi); + if (ret) { + LOG_DEV_ERR("add allmulti filter failed, ret %d\n", + ret); + goto l_end; + } + } + + if (!sxe2_promisc_rule_in_use(vsi) && + !adapter->user_pf_ctxt.is_promisc_set) { + ret = sxe2_promisc_rule_add(vsi); + if (ret) { + LOG_DEV_ERR("add promisc filter failed, ret %d\n", + ret); + goto l_end; + } + } + + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, false); + if (ret) { + LOG_DEV_ERR("turn off vlan filter failed, ret %d\n", ret); + goto l_end; + } + } + +l_end: + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); + + return ret; +} + +STATIC s32 sxe2_vsi_l2_fltr_sync(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct net_device *netdev = vsi->netdev; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_mac_sync_entry *list_itr = NULL; + struct sxe2_mac_sync_entry *tmp = NULL; + struct sxe2_mac_filter *mac_filter = &vsi->mac_filter; + + if (!vsi->netdev) + return 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + mutex_lock(&mac_filter->sync_lock); + + INIT_LIST_HEAD(&mac_filter->tmp_sync_list); + INIT_LIST_HEAD(&mac_filter->tmp_unsync_list); + + if (test_bit(SXE2_VSI_S_MAC_FLTR_CHANGED, vsi->state)) { + clear_bit(SXE2_VSI_S_MAC_FLTR_CHANGED, vsi->state); + netif_addr_lock_bh(netdev); + (void)__dev_uc_sync(netdev, sxe2_sync_mac_add, sxe2_unsync_mac_add); + (void)__dev_mc_sync(netdev, sxe2_sync_mac_add, sxe2_unsync_mac_add); + netif_addr_unlock_bh(netdev); + } + + list_for_each_entry_safe(list_itr, tmp, &mac_filter->tmp_sync_list, + list_entry) { + ret = sxe2_mac_addr_add(vsi, list_itr->mac_addr, + SXE2_MAC_OWNER_UC_MC); + if (ret) + LOG_DEV_ERR("add mac filter failed, addr %pM, ret %d\n", + list_itr->mac_addr, ret); + list_del(&list_itr->list_entry); + kfree(list_itr); + } + + list_for_each_entry_safe(list_itr, tmp, &mac_filter->tmp_unsync_list, + list_entry) { + ret = sxe2_mac_addr_del(vsi, list_itr->mac_addr, + SXE2_MAC_OWNER_UC_MC); + if (ret) + LOG_DEV_ERR("delete mac filter failed, addr %pM, ret %d\n", + list_itr->mac_addr, ret); + list_del(&list_itr->list_entry); + kfree(list_itr); + } + mutex_unlock(&mac_filter->sync_lock); + mutex_unlock(&adapter->vsi_ctxt.lock); + + ret = 0; + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) { + rtnl_lock(); + ret = sxe2_vsi_promisc_sync_legacy(vsi); + rtnl_unlock(); + } + + return ret; +} + +STATIC void sxe2_l2_fltr_sync(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + + if (!test_bit(SXE2_FLAG_FLTR_SYNC, adapter->flags)) + return; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + LOG_ERROR("can't sync l2 fltr. device not ready\n"); + return; + } + + clear_bit(SXE2_FLAG_FLTR_SYNC, adapter->flags); + + if (test_bit(SXE2_VSI_S_MAC_FLTR_CHANGED, vsi->state)) + (void)sxe2_vsi_l2_fltr_sync(vsi); +} + +static void sxe2_stats_update_work(struct sxe2_adapter *adapter) +{ + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_stats_update(adapter); + mutex_unlock(&adapter->vsi_ctxt.lock); + + sxe2_repr_vf_vsis_stats_acculate_update(adapter); +} + +STATIC void sxe2_mtu_changed_handler(struct sxe2_adapter *adapter) +{ + if (test_and_clear_bit(SXE2_FLAG_MTU_CHANGED, adapter->flags)) + (void)sxe2_rdma_aux_send_mtu_changed_event(adapter); +} + +static void sxe2_mdd_req_fill(struct sxe2_adapter *adapter, + struct sxe2_fwc_mdd_req *req) +{ + struct sxe2_vf_node *vf_node; + u16 idx; + u16 i = 0; + struct sxe2_mdd_vf_req *vf = req->vfs; + + sxe2_for_each_vf(adapter, idx) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + + vf->vf_idx = cpu_to_le16(vf_node->vf_idx); + + vf++; + i++; + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + + req->vf_cnt = cpu_to_le16(i); +} + +STATIC s32 sxe2_fwc_vf_mdd_check(struct sxe2_adapter *adapter, + struct sxe2_fwc_mdd_req *req, u32 req_len, + struct sxe2_fwc_mdd_resp *resp, u32 resp_len) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_MDD_CHECK, req, req_len, resp, + resp_len); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi mdd check failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static void sxe2_vf_mdd_reply_process(struct sxe2_adapter *adapter, + struct sxe2_fwc_mdd_resp *resp) +{ + u32 i; + u16 vf_idx; + u16 pf_idx = adapter->pf_idx; + u32 mdd_tx_event; + u8 mdd_rx_event; + struct sxe2_mdd_vf_resp *mdd_vfs = resp->mdd_vfs; + struct sxe2_vf_node *vf_node; + + mdd_tx_event = le32_to_cpu(resp->pf_mdd_tx_event); + mdd_rx_event = resp->pf_mdd_rx_event; + if (mdd_tx_event) { + LOG_DEV_WARN("Malicious Driver Detection Tx event on PF# %d.\n", + pf_idx); + LOG_DEV_WARN("pf mdd event 0x%04x detected.\n", mdd_tx_event); + } + + if (mdd_rx_event) { + LOG_DEV_WARN("Malicious Driver Detection Rx event on PF# %d.\n", + pf_idx); + LOG_DEV_WARN("pf mdd rx event 0x%04x detected.\n", mdd_rx_event); + } + + mdd_tx_event = le32_to_cpu(resp->vf_mdd_tx_event); + mdd_rx_event = resp->vf_mdd_rx_event; + if (mdd_tx_event || mdd_rx_event) { + LOG_DEV_WARN("vf mdd tx event 0x%04x rx event 0x%04x detected.\n", + mdd_tx_event, mdd_rx_event); + + for (i = 0; i < resp->mdd_vf_cnt; i++) { + vf_idx = le16_to_cpu(mdd_vfs[i].vf_idx); + + if (sxe2_vf_id_check(adapter, vf_idx)) { + LOG_ERROR_BDF("invalid mdd vf_idx:%u.\n", vf_idx); + continue; + } + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node = sxe2_vf_node_get(adapter, vf_idx); + if (!vf_node) { + LOG_ERROR_BDF("vf %d node is null.\n", vf_idx); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + continue; + } + + if (sxe2_check_vf_ready_for_cfg(vf_node)) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + continue; + } + + LOG_DEV_WARN("vf %d mdd tx event 0x%04x rx event 0x%04x mdd\t" + "%d detected.\n", + vf_idx, mdd_tx_event, mdd_rx_event, + mdd_vfs[i].mdd); + if (mdd_vfs[i].mdd & SXE2_MDD_TYPE_TX) { + LOG_DEV_WARN("Malicious Driver Detection Tx event\t" + "on PF# %d VF# %d Mac %pM.\t" + "mdd-auto-reset-vfs=%s\n", + pf_idx, vf_idx, vf_node->mac_addr.addr, + test_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, + adapter->flags) + ? "on" + : "off"); + } + + if (mdd_vfs[i].mdd & SXE2_MDD_TYPE_RX) { + LOG_DEV_WARN("Malicious Driver Detection Rx event " + "on PF# %d VF# %d Mac %pM. " + "mdd-auto-reset-vfs=%s\n", + pf_idx, vf_idx, vf_node->mac_addr.addr, + test_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, + adapter->flags) + ? "on" + : "off"); + } + + if (test_bit(SXE2_FLAG_MDD_AUTO_RESET_VF, adapter->flags)) { + LOG_DEV_WARN("vf #%d detected mdd event reset vf now.\n", + vf_idx); + (void)sxe2_reset_vf(adapter, vf_idx, 0); + } + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + } +} + +bool sxe2_get_pf_link_status(struct sxe2_adapter *adapter) +{ + bool link_status; + + u32 val = SXE2_REG_READ(&adapter->hw, SXE2_LINK_STATUS_BASE); + + if (val == SXE2_REG_INVALID_VALUE) { + link_status = SXE2_LINK_DOWN; + goto end; + } + + switch (adapter->port_idx) { + case SXE2_PORT0_CNT: + if ((val >> SXE2_LINK_STATUS_PORT0_POS) & SXE2_LINK_STATUS_MASK) + link_status = SXE2_LINK_UP; + else + link_status = SXE2_LINK_DOWN; + break; + case SXE2_PORT1_CNT: + if ((val >> SXE2_LINK_STATUS_PORT1_POS) & SXE2_LINK_STATUS_MASK) + link_status = SXE2_LINK_UP; + else + link_status = SXE2_LINK_DOWN; + break; + case SXE2_PORT2_CNT: + if ((val >> SXE2_LINK_STATUS_PORT2_POS) & SXE2_LINK_STATUS_MASK) + link_status = SXE2_LINK_UP; + else + link_status = SXE2_LINK_DOWN; + break; + case SXE2_PORT3_CNT: + if ((val >> SXE2_LINK_STATUS_PORT3_POS) & SXE2_LINK_STATUS_MASK) + link_status = SXE2_LINK_UP; + else + link_status = SXE2_LINK_DOWN; + break; + default: + link_status = SXE2_LINK_DOWN; + goto end; + } + +end: + return link_status; +} + +u32 sxe2_get_link_speed(struct sxe2_adapter *adapter) +{ + u32 link_speed, val, reg; + + reg = SXE2_REG_READ(&adapter->hw, SXE2_LINK_SPEED_BASE); + if (reg == SXE2_REG_INVALID_VALUE) { + link_speed = SXE2_LINK_SPEED_UNKNOWN; + goto end; + } + switch (adapter->port_idx) { + case SXE2_PORT0_CNT: + val = (reg >> SXE2_LINK_SPEED_PORT0_POS) & SXE2_LINK_SPEED_MASK; + if (val == SXE2_LINK_REG_GET_10G_VALUE) + link_speed = SXE2_LINK_SPEED_10G; + else if (val == SXE2_LINK_REG_GET_25G_VALUE) + link_speed = SXE2_LINK_SPEED_25G; + else if (val == SXE2_LINK_REG_GET_50G_VALUE) + link_speed = SXE2_LINK_SPEED_50G; + else if (val == SXE2_LINK_REG_GET_100G_VALUE) + link_speed = SXE2_LINK_SPEED_100G; + else + link_speed = SXE2_LINK_SPEED_UNKNOWN; + + break; + case SXE2_PORT1_CNT: + val = (reg >> SXE2_LINK_SPEED_PORT1_POS) & SXE2_LINK_SPEED_MASK; + if (val == SXE2_LINK_REG_GET_10G_VALUE) + link_speed = SXE2_LINK_SPEED_10G; + else if (val == SXE2_LINK_REG_GET_25G_VALUE) + link_speed = SXE2_LINK_SPEED_25G; + else + link_speed = SXE2_LINK_SPEED_UNKNOWN; + + break; + case SXE2_PORT2_CNT: + val = (reg >> SXE2_LINK_SPEED_PORT2_POS) & SXE2_LINK_SPEED_MASK; + if (val == SXE2_LINK_REG_GET_10G_VALUE) + link_speed = SXE2_LINK_SPEED_10G; + else if (val == SXE2_LINK_REG_GET_25G_VALUE) + link_speed = SXE2_LINK_SPEED_25G; + else if (val == SXE2_LINK_REG_GET_50G_VALUE) + link_speed = SXE2_LINK_SPEED_50G; + else + link_speed = SXE2_LINK_SPEED_UNKNOWN; + + break; + case SXE2_PORT3_CNT: + val = (reg >> SXE2_LINK_SPEED_PORT3_POS) & SXE2_LINK_SPEED_MASK; + if (val == SXE2_LINK_REG_GET_10G_VALUE) + link_speed = SXE2_LINK_SPEED_10G; + else if (val == SXE2_LINK_REG_GET_25G_VALUE) + link_speed = SXE2_LINK_SPEED_25G; + else + link_speed = SXE2_LINK_SPEED_UNKNOWN; + + break; + default: + link_speed = SXE2_LINK_SPEED_UNKNOWN; + goto end; + } +end: + return link_speed; +} + +static bool sxe2_pf_carrier(struct sxe2_adapter *adapter) +{ + bool link_status; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct net_device *netdev = NULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (!vsi || !vsi->netdev) { + link_status = false; + goto l_end; + } + + netdev = vsi->netdev; + mutex_lock(&adapter->link_ctxt.link_status_lock); + link_status = sxe2_get_pf_link_status(adapter); + if (link_status) { + if (netif_running(netdev) && !netif_carrier_ok(netdev) && + !test_bit(SXE2_VSI_S_DOWN, vsi->state)) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + LOG_DEV_INFO("nic link is up\n"); + } + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + LOG_DEV_INFO("nic link is down\n"); + } + mutex_unlock(&adapter->link_ctxt.link_status_lock); + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return link_status; +} + +STATIC void sxe2_linkchk_notify_vfs(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf; + u32 bkt; + + sxe2_for_each_vf(adapter, bkt) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, bkt)); + vf = sxe2_vf_node_get(adapter, (u16)bkt); + if (!vf) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, bkt)); + continue; + } + + if (sxe2_check_vf_ready_for_cfg(vf)) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, bkt)); + continue; + } + + sxe2_notify_vf_link_state(vf); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, bkt)); + } +} + +STATIC void sxe2_linkchk_handler(struct sxe2_adapter *adapter) +{ + if (test_and_clear_bit(SXE2_FLAG_LINK_CHECK, adapter->flags)) { + LOG_INFO_BDF("process link check task.\n"); + (void)sxe2_pf_carrier(adapter); + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_EC_LINK_CHG); + sxe2_linkchk_notify_vfs(adapter); + } +} + +STATIC void sxe2_mdd_work(struct sxe2_adapter *adapter) +{ + u8 *req_buf; + u8 *resp_buf; + struct sxe2_fwc_mdd_req *req; + struct sxe2_fwc_mdd_resp *resp; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 req_len, resp_len; + + req_buf = devm_kzalloc(dev, SXE2_MAX_MDD_REQ_BUF, GFP_KERNEL); + if (!req_buf) + return; + + resp_buf = devm_kzalloc(dev, SXE2_MAX_MDD_RESP_BUF, GFP_KERNEL); + if (!resp_buf) { + devm_kfree(dev, req_buf); + return; + } + + mutex_lock(&adapter->vf_ctxt.vfs_lock); + req = (struct sxe2_fwc_mdd_req *)req_buf; + resp = (struct sxe2_fwc_mdd_resp *)resp_buf; + + if (test_and_clear_bit(SXE2_FLAG_MDD_TX_PENDING, adapter->flags)) + req->mdd_check = SXE2_MDD_TYPE_TX; + + if (test_and_clear_bit(SXE2_FLAG_MDD_RX_PENDING, adapter->flags)) + req->mdd_check = SXE2_MDD_TYPE_RX; + + if (req->mdd_check == 0) + goto l_free; + + sxe2_mdd_req_fill(adapter, req); + req_len = (u32)(sizeof(struct sxe2_fwc_mdd_req) + + pci_num_vf(adapter->pdev) * sizeof(struct sxe2_mdd_vf_req)); + resp_len = SXE2_MAX_MDD_RESP_BUF; + + if (sxe2_fwc_vf_mdd_check(adapter, req, req_len, resp, resp_len)) { + LOG_ERROR_BDF("vsi mdd check failed.\n"); + goto l_free; + } + + sxe2_vf_mdd_reply_process(adapter, resp); + +l_free: + mutex_unlock(&adapter->vf_ctxt.vfs_lock); + devm_kfree(dev, req_buf); + devm_kfree(dev, resp_buf); +} + +STATIC void sxe2_dcbx_agent_status_proc(struct sxe2_adapter *adapter) +{ + if (test_bit(SXE2_FLAG_FW_DCBX_DIS_PENDING, adapter->flags) && + test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) { + rtnl_lock(); + (void)sxe2_dcbx_agent_disable(adapter); + rtnl_unlock(); + clear_bit(SXE2_FLAG_FW_DCBX_DIS_PENDING, adapter->flags); + } +} + +void sxe2_work_cb(struct work_struct *work) +{ + struct sxe2_monitor_context *monitor = + container_of(work, struct sxe2_monitor_context, work); + struct sxe2_adapter *adapter = + container_of(monitor, struct sxe2_adapter, monitor_ctxt); + unsigned long start_time = jiffies; + + sxe2_fwd_del_macvlan_deay(adapter); + +#ifdef SXE2_CFG_DEBUG + if (g_pf_switch_stats) + sxe2_stats_update_work(adapter); +#else + sxe2_stats_update_work(adapter); +#endif + + sxe2_l2_fltr_sync(adapter); + + sxe2_mtu_changed_handler(adapter); + + sxe2_aux_aeq_overflow_handler(adapter); + + sxe2_mdd_work(adapter); + + sxe2_linkchk_handler(adapter); + + sxe2_tx_hang_check_subtask(adapter); + + sxe2_lag_proc(adapter); + + if (sxe2_is_safe_mode(adapter)) + goto l_complete; + + sxe2_arfs_filters_sync(adapter); + + sxe2_dcbx_agent_status_proc(adapter); + +l_complete: + sxe2_monitor_work_complete(adapter); + + if (time_after(jiffies, (start_time + adapter->monitor_ctxt.period))) + mod_timer(&adapter->monitor_ctxt.timer, jiffies); +} + +void sxe2_monitor_work_schedule(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->monitor_ctxt.lock, flags); + if (!test_bit(SXE2_MONITOR_WORK_DISABLED, &adapter->monitor_ctxt.state) && + !test_and_set_bit(SXE2_MONITOR_WORK_SCHED, + &adapter->monitor_ctxt.state)) { + sxe2_queue_work(adapter, sxe2_workqueue, + &adapter->monitor_ctxt.work); + } + spin_unlock_irqrestore(&adapter->monitor_ctxt.lock, flags); +} + +STATIC void sxe2_timer_cb(struct timer_list *timer) +{ + struct sxe2_monitor_context *monitor = + container_of(timer, struct sxe2_monitor_context, timer); + struct sxe2_adapter *adapter = + container_of(monitor, struct sxe2_adapter, monitor_ctxt); + + mod_timer(&adapter->monitor_ctxt.timer, + adapter->monitor_ctxt.period + jiffies); + + sxe2_monitor_work_schedule(adapter); + (void)monitor; +} + +void sxe2_monitor_init(struct sxe2_adapter *adapter) +{ + + adapter->monitor_ctxt.period = SXE2_NORMAL_TIMER_PERIOD; + + set_bit(SXE2_MONITOR_WORK_DISABLED, &adapter->monitor_ctxt.state); + + INIT_WORK(&adapter->monitor_ctxt.work, sxe2_work_cb); + + timer_setup(&adapter->monitor_ctxt.timer, sxe2_timer_cb, 0); +} + +void sxe2_monitor_stop(struct sxe2_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->monitor_ctxt.lock, flags); + set_bit(SXE2_MONITOR_WORK_DISABLED, &adapter->monitor_ctxt.state); + spin_unlock_irqrestore(&adapter->monitor_ctxt.lock, flags); + + if (adapter->monitor_ctxt.timer.function) + del_timer_sync(&adapter->monitor_ctxt.timer); + if (adapter->monitor_ctxt.work.func) + cancel_work_sync(&adapter->monitor_ctxt.work); + + clear_bit(SXE2_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); +} + +void sxe2_monitor_start(struct sxe2_adapter *adapter) +{ + set_bit(SXE2_FLAG_LINK_CHECK, adapter->flags); + + clear_bit(SXE2_MONITOR_WORK_DISABLED, &adapter->monitor_ctxt.state); + sxe2_monitor_work_schedule(adapter); + mod_timer(&adapter->monitor_ctxt.timer, + adapter->monitor_ctxt.period + jiffies); +} + +s32 sxe2_monitor_create(void) +{ + s32 ret = 0; + + sxe2_workqueue = alloc_workqueue("%s", 0, 0, SXE2_DRV_NAME); + if (!sxe2_workqueue) { + LOG_PR_ERR("failed to create workqueue\n"); + ret = -ENOMEM; + } + + return ret; +} + +void sxe2_monitor_destroy(void) +{ + destroy_workqueue(sxe2_workqueue); + sxe2_workqueue = NULL; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.h new file mode 100644 index 0000000000000000000000000000000000000000..5636d5ef1e311e337cdde2a9100acad56a9f38b9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_monitor.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_monitor.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MONITOR_H__ +#define __SXE2_MONITOR_H__ + +#include + +#define SXE2_NORMAL_TIMER_PERIOD (2 * HZ) + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +enum sxe2_monitor_task_state { + SXE2_MONITOR_WORK_INITED, + + SXE2_MONITOR_WORK_SCHED, + SXE2_MONITOR_WORK_DISABLED, +}; + +struct sxe2_monitor_context { + struct timer_list timer; + unsigned long period; + unsigned long state; + struct work_struct work; + spinlock_t lock; +}; + +enum sxe2_link_get_speed { + SXE2_LINK_SPEED_UNKNOWN = 0, + SXE2_LINK_SPEED_10G = 10000, + SXE2_LINK_SPEED_25G = 25000, + SXE2_LINK_SPEED_50G = 50000, + SXE2_LINK_SPEED_100G = 100000, + SXE2_LINK_SPEED_AUTO = 200000, +}; + +enum sxe2_speed_cap { + LINK_SPEED_UNKNOWN, + LINK_SPEED_1G = 0x1, + LINK_SPEED_10G = 0x2, + LINK_SPEED_25G = 0x4, + LINK_SPEED_50G = 0x8, + LINK_SPEED_100G = 0x10, +}; + +enum sxe2_link_get_status { + SXE2_LINK_DOWN = 0, + SXE2_LINK_UP = 1, + SXE2_LINK_ERROR = 15, +}; + +struct sxe2_mac_sync_entry { + struct list_head list_entry; + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_cmd_link_context { + u32 fec; + u32 current_link_speed; + bool tx_fc; + bool rx_fc; + struct mutex link_status_lock; +}; + +void sxe2_monitor_init(struct sxe2_adapter *adapter); + +void sxe2_monitor_work_schedule(struct sxe2_adapter *adapter); + +void sxe2_work_cb(struct work_struct *work); + +void sxe2_monitor_stop(struct sxe2_adapter *adapter); + +void sxe2_monitor_start(struct sxe2_adapter *adapter); + +s32 sxe2_monitor_create(void); + +void sxe2_monitor_destroy(void); + +int sxe2_sync_mac_add(struct net_device *netdev, const u8 *addr); + +int sxe2_unsync_mac_add(struct net_device *netdev, const u8 *addr); + +#if defined(SXE2_HARDWARE_ASIC) +u32 sxe2_get_link_speed(struct sxe2_adapter *adapter); + +bool sxe2_get_pf_link_status(struct sxe2_adapter *adapter); +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_msglevel.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_msglevel.h new file mode 100644 index 0000000000000000000000000000000000000000..1044b12ef37e7d02dd9cf25df63d26190ef6674d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_msglevel.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_msglevel.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MSGLEVEL_H__ +#define __SXE2_MSGLEVEL_H__ + +#define SXE2_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define SXE2_DBG_USER BIT_ULL(31) + +struct sxe2_msglevel_context { + u32 msg_enable; + u64 debug_mask; +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.c new file mode 100644 index 0000000000000000000000000000000000000000..12528e879b73ebbd0fa4eaa3a63dff0498adf5c6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.c @@ -0,0 +1,2930 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_netdev.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_common.h" +#include "sxe2_dcb_nl.h" +#include "sxe2_ethtool.h" +#include "sxe2_txsched.h" +#include "sxe2_macvlan.h" +#include "sxe2_tc.h" +#include "sxe2_sriov.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_ipsec.h" +#include "sxe2_fnav.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_xsk.h" +#include "sxe2_monitor.h" +#include "sxe2_linkchg.h" +#include "sxe2_cmd.h" + +#define SXE2_SET_FEATURE(features, feature, enable) \ + do { \ + typeof(feature) _feature = (feature); \ + if (enable) \ + *features |= _feature; \ + else \ + *features &= ~_feature; \ + } while (0) + +static inline int sxe2_conflict_features_chk(u64 changed_features, u64 features, + u64 con1, u64 con2) +{ + if ((changed_features & con1 && features & con1) && + (changed_features & con2 && features & con2)) + return -EINVAL; + + return 0; +} + +static int sxe2_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct sxe2_netdev_priv *netpriv = netdev_priv(dev); + struct sxe2_adapter *adapter = netpriv->vsi->adapter; + + switch (cmd) { + case SIOCSHWTSTAMP: + return sxe2_ptp_hwts_set(adapter, ifr); + case SIOCGHWTSTAMP: + return sxe2_ptp_hwts_get(adapter, ifr); + default: + break; + } + return -EOPNOTSUPP; +} + +void sxe2_fetch_u64_data_per_ring(struct u64_stats_sync *syncp, + struct sxe2_queue_stats *stats, u64 *pkts, + u64 *bytes) +{ + u32 start; + + do { + start = u64_stats_fetch_begin(syncp); + *pkts = stats->packets; + *bytes = stats->bytes; + } while (u64_stats_fetch_retry(syncp, start)); +} + +#ifdef HAVE_RTNL_LINK_NDO_GET_STATS64 +STATIC struct rtnl_link_stats64 *sxe2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +STATIC void sxe2_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_sw_vsi_stats_update(vsi); + + stats->tx_packets = vsi->vsi_stats.vsi_sw_stats.tx_packets; + stats->rx_packets = vsi->vsi_stats.vsi_sw_stats.rx_packets; + stats->tx_bytes = vsi->vsi_stats.vsi_sw_stats.tx_bytes; + stats->rx_bytes = vsi->vsi_stats.vsi_sw_stats.rx_bytes; + + stats->multicast = vsi->vsi_stats.vsi_hw_stats.rx_vsi_multicast_packets; + stats->rx_crc_errors = adapter->pf_stats.pf_hw_stats.rx_crc_errors; + + if (vsi->type == SXE2_VSI_T_PF) { + stats->rx_errors = adapter->pf_stats.pf_hw_stats.rx_crc_errors + + adapter->pf_stats.pf_hw_stats.rx_illegal_bytes + + adapter->pf_stats.pf_hw_stats.rx_len_errors + + adapter->pf_stats.pf_hw_stats.rx_undersize_good + + vsi->vsi_stats.vsi_sw_stats.rx_csum_err + + adapter->pf_stats.pf_hw_stats.rx_oversize_good + + vsi->vsi_stats.vsi_sw_stats.rx_pkts_sw_drop; + stats->rx_length_errors = + adapter->pf_stats.pf_hw_stats.rx_len_errors; + stats->rx_missed_errors = + adapter->pf_stats.pf_hw_stats.rx_out_of_buffer; + } + +#ifdef HAVE_RTNL_LINK_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_XDP_SUPPORT +STATIC s32 sxe2_max_xdp_frame_size(struct sxe2_vsi *vsi) +{ + if (PAGE_SIZE >= 8192 || + test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, vsi->adapter->flags)) + return SXE2_RXBUF_2048 - XDP_PACKET_HEADROOM; + else + return SXE2_RXBUF_3072; +} +#endif + +s32 sxe2_set_mtu_cfg(struct sxe2_adapter *adapter, u32 set_mtu) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_fw_mtu_info mtu = {0}; + + mtu.is_set_hw = false; + mtu.mtu = set_mtu; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MAC_MTU_SET, &mtu, + sizeof(struct sxe2_fw_mtu_info), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("failed to mtu, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +static int sxe2_change_mtu(struct net_device *netdev, int new_mtu) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + u32 old_mtu = netdev->mtu; + + if (new_mtu < ETH_MIN_MTU || new_mtu > SXE2_MAX_MTU) { + LOG_NETDEV_ERR("new MTU invalid. mtu range is %d-%d", ETH_MIN_MTU, + SXE2_MAX_MTU); + return -EINVAL; + } + + if (new_mtu == (int)netdev->mtu) { + LOG_NETDEV_WARN("MTU is already %u\n", netdev->mtu); + return 0; + } + +#ifdef HAVE_XDP_SUPPORT + if (sxe2_xdp_is_enable(vsi)) { + int frame_size = sxe2_max_xdp_frame_size(vsi); + + if (new_mtu + SXE2_PACKET_HDR_PAD > frame_size) { + LOG_NETDEV_ERR("max MTU for XDP usage is %d\n", + frame_size - SXE2_PACKET_HDR_PAD); + return -EINVAL; + } + } +#endif + + netdev->mtu = (unsigned int)new_mtu; + ret = sxe2_set_mtu_cfg(adapter, netdev->mtu); + if (ret) { + LOG_NETDEV_ERR("max set failed is mtu:%d, ret: %d\n", netdev->mtu, + ret); + } + ret = sxe2_vsi_down_up(vsi); + if (ret) { + netdev->mtu = old_mtu; + LOG_NETDEV_ERR("changing MTU from %u to %d failed.\n", old_mtu, + new_mtu); + goto l_end; + } + + LOG_NETDEV_DEBUG("changed MTU to %d suc\n", new_mtu); + + if (new_mtu > SXE2_IPSEC_PAYLOAD_LIMIT && + sxe2_is_ipsec_offload_enable(netdev)) { + LOG_NETDEV_WARN("SXE2:the maximum encryption length of IPsec is " + "2k.\n" + "If the packet length is greater than 2k,\n:" + "the hardware ipsec offloading may fail.\n"); + } + + set_bit(SXE2_FLAG_MTU_CHANGED, adapter->flags); + +l_end: + return ret; +} + +void sxe2_set_vlan_offload_features(struct sxe2_vsi *vsi, + netdev_features_t current_features, + netdev_features_t requested_features) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + netdev_features_t current_stag_strip; + netdev_features_t requested_stag_strip; + netdev_features_t current_ctag_strip; + netdev_features_t requested_ctag_strip; + netdev_features_t current_stag_insert; + netdev_features_t requested_stag_insert; + netdev_features_t current_ctag_insert; + netdev_features_t requested_ctag_insert; + + current_stag_strip = current_features & NETIF_F_HW_VLAN_STAG_RX; + requested_stag_strip = requested_features & NETIF_F_HW_VLAN_STAG_RX; + current_ctag_strip = current_features & NETIF_F_HW_VLAN_CTAG_RX; + requested_ctag_strip = requested_features & NETIF_F_HW_VLAN_CTAG_RX; + current_stag_insert = current_features & NETIF_F_HW_VLAN_STAG_TX; + requested_stag_insert = requested_features & NETIF_F_HW_VLAN_STAG_TX; + current_ctag_insert = current_features & NETIF_F_HW_VLAN_CTAG_TX; + requested_ctag_insert = requested_features & NETIF_F_HW_VLAN_CTAG_TX; + + if (current_ctag_strip != requested_ctag_strip && !requested_ctag_strip) + (void)sxe2_hw_desc_vlan_strip_switch(hw, vsi->idx_in_dev, + ETH_P_8021Q, false, false); + else if ((current_stag_strip != requested_stag_strip) && + !requested_stag_strip) + (void)sxe2_hw_desc_vlan_strip_switch(hw, vsi->idx_in_dev, + ETH_P_8021AD, false, false); + + if (current_ctag_strip != requested_ctag_strip && requested_ctag_strip) + (void)sxe2_hw_desc_vlan_strip_switch(hw, vsi->idx_in_dev, + ETH_P_8021Q, false, true); + else if ((current_stag_strip != requested_stag_strip) && + requested_stag_strip) + (void)sxe2_hw_desc_vlan_strip_switch(hw, vsi->idx_in_dev, + ETH_P_8021AD, false, true); + + if (current_ctag_insert != requested_ctag_insert && !requested_ctag_insert) + (void)sxe2_hw_desc_vlan_insert_switch(hw, vsi->idx_in_dev, + ETH_P_8021Q, false, false); + else if ((current_stag_insert != requested_stag_insert) && + !requested_stag_insert) + (void)sxe2_hw_desc_vlan_insert_switch(hw, vsi->idx_in_dev, + ETH_P_8021AD, false, false); + + if (current_ctag_insert != requested_ctag_insert && requested_ctag_insert) + (void)sxe2_hw_desc_vlan_insert_switch(hw, vsi->idx_in_dev, + ETH_P_8021Q, false, true); + else if ((current_stag_insert != requested_stag_insert) && + requested_stag_insert) + (void)sxe2_hw_desc_vlan_insert_switch(hw, vsi->idx_in_dev, + ETH_P_8021AD, false, true); +} + +s32 sxe2_set_vlan_filter_features(struct sxe2_vsi *vsi, netdev_features_t features) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + return ret; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) { + if (vsi->netdev && !(vsi->netdev->flags & IFF_PROMISC)) + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, + true); + } else { + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, false); + } + return ret; +} + +static s32 sxe2_set_vlan_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + netdev_features_t current_features, requested_features; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = np->vsi->adapter; + s32 ret = 0; + + current_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; + requested_features = features & NETIF_VLAN_OFFLOAD_FEATURES; + if (current_features ^ requested_features) { + sxe2_set_vlan_offload_features(vsi, current_features, + requested_features); + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_CTAG_RX, + (features & NETIF_F_HW_VLAN_CTAG_RX)); + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_CTAG_TX, + (features & NETIF_F_HW_VLAN_CTAG_TX)); + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_STAG_RX, + (features & NETIF_F_HW_VLAN_STAG_RX)); + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_STAG_TX, + (features & NETIF_F_HW_VLAN_STAG_TX)); + } + + current_features = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + requested_features = features & NETIF_VLAN_FILTERING_FEATURES; + if (current_features ^ requested_features) { + ret = sxe2_set_vlan_filter_features(vsi, features); + if (!ret) { + SXE2_SET_FEATURE(oper_features, + NETIF_VLAN_FILTERING_FEATURES, + (features & NETIF_VLAN_FILTERING_FEATURES)); + } + } + + LOG_DEBUG_BDF("current features %llx, request features %llx\n", + netdev->features, features); + return ret; +} + +static s32 sxe2_set_lro_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool need_reinit = false; + s32 ret = 0; + bool lro_ena = !!(features & NETIF_F_LRO); + bool old_lro_feature = (bool)test_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags); + + if (!(features & NETIF_F_LRO)) { + if (test_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags)) { + need_reinit = true; + clear_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags); + LOG_DEBUG_BDF("lro disabled and need reinit\n"); + } + } else { + if (!(features & NETIF_F_RXCSUM)) { + LOG_NETDEV_ERR("Cannot simultaneously enable lro and " + "disable rx csum.\n"); + return -EOPNOTSUPP; + } + + if (!(test_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags))) { + need_reinit = true; + set_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags); + LOG_DEBUG_BDF("lro enabled and need reinit\n"); + } + } + + if (need_reinit) { + if (!test_and_set_bit(SXE2_VSI_S_DOWN, vsi->state)) { + ret = sxe2_vsi_down(vsi); + if (ret) { + LOG_NETDEV_ERR("set_features if_down err %d\n", ret); + goto l_end; + } + + ret = sxe2_vsi_up(vsi); + if (ret) { + LOG_NETDEV_ERR("set_features if_up err %d\n", ret); + } else { + SXE2_SET_FEATURE(oper_features, NETIF_F_LRO, + lro_ena); + } + } + } + +l_end: + if (ret) { + if (old_lro_feature) + set_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags); + else + clear_bit(SXE2_VSI_FLAG_LRO_ENABLE, vsi->flags); + } + + return ret; +} + +static s32 sxe2_set_macvlan_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + + if ((features & NETIF_F_HW_L2FW_DOFFLOAD) && + !(netdev->features & NETIF_F_HW_L2FW_DOFFLOAD)) { + ret = sxe2_macvlan_init(vsi, true); + if (!ret) { + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_L2FW_DOFFLOAD, + (features & NETIF_F_HW_L2FW_DOFFLOAD)); + } + } else if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && + (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD)) { + ret = sxe2_macvlan_deinit(vsi, true); + if (!ret) { + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_L2FW_DOFFLOAD, + (features & NETIF_F_HW_L2FW_DOFFLOAD)); + } + } + + return ret; +} + +static s32 sxe2_set_fnav_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + netdev_features_t changed_features = netdev->features ^ features; + bool fnav_ena = !!(features & NETIF_F_NTUPLE); + + if (changed_features & NETIF_F_NTUPLE) { + ret = sxe2_fnav_switch(adapter, fnav_ena); + if (!ret) { + SXE2_SET_FEATURE(oper_features, NETIF_F_NTUPLE, fnav_ena); + } else { + LOG_DEV_ERR("%s feature %llx failed, ret %d\n", + fnav_ena ? "enable" : "disable", + (u64)NETIF_F_NTUPLE, ret); + } + } + + return ret; +} + +static s32 sxe2_set_rxcsum_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + bool rxcsum_ena = !!(features & NETIF_F_RXCSUM); + + SXE2_SET_FEATURE(oper_features, NETIF_F_RXCSUM, rxcsum_ena); + + return ret; +} + +static s32 sxe2_set_rxfcs_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool need_reinit = false; + s32 ret = 0; + bool rxfcs_ena = !!(features & NETIF_F_RXFCS); + bool old_rxfcs_feature = + (bool)test_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags); + + if (!(features & NETIF_F_RXFCS)) { + if (test_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags)) { + need_reinit = true; + clear_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags); + LOG_DEBUG_BDF("rxfcs disabled and need reinit\n"); + } + } else { + if (!(test_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags))) { + need_reinit = true; + set_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags); + LOG_DEBUG_BDF("rxfcs enabled and need reinit\n"); + } + } + + if (need_reinit) { + if (!test_and_set_bit(SXE2_VSI_S_DOWN, vsi->state)) { + ret = sxe2_vsi_down(vsi); + if (ret) { + LOG_NETDEV_ERR("set_features if_down err %d\n", ret); + goto l_end; + } + + ret = sxe2_vsi_up(vsi); + if (ret) { + LOG_NETDEV_ERR("set_features if_up err %d\n", ret); + } else { + SXE2_SET_FEATURE(oper_features, NETIF_F_RXFCS, + rxfcs_ena); + } + } + } + +l_end: + if (ret) { + if (old_rxfcs_feature) + set_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags); + else + clear_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, vsi->flags); + } + + return ret; +} + +#ifdef HAVE_MACSEC_SUPPORT +static s32 sxe2_set_macsec_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + netdev_features_t changed_features = netdev->features ^ features; + bool macsec_ena = !!(features & NETIF_F_HW_MACSEC); + + if (changed_features & NETIF_F_HW_MACSEC) { + if (macsec_ena) { + LOG_DEBUG_BDF("enable macsec offload(off to on).\n"); + if (sxe2_macsec_conflict_features_check(netdev)) { + LOG_DEV_ERR("failed to enable macsec offload,\t" + "please disable ipsec offload " + "feature.\n"); + ret = -EINVAL; + } + } + { + LOG_DEBUG_BDF("disable macsec offload switch(on to off).\n"); + if (sxe2_is_macsec_can_not_disable(adapter)) { + LOG_DEV_ERR("can not disable macsec offload,\t" + "please delete the secy first.\n"); + ret = -EINVAL; + } + } + if (!ret) { + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_MACSEC, + macsec_ena); + } + } + + return ret; +} +#endif + +STATIC s32 sxe2_set_ipsec_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + netdev_features_t changed_features = netdev->features ^ features; + bool ipsec_ena = !!(features & NETIF_F_HW_ESP); + + if (changed_features & NETIF_F_HW_ESP) { + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (ipsec_ena) { + LOG_DEBUG_BDF("enable ipsec offload(off to on).\n"); + if (sxe2_ipsec_conflict_features_check(adapter, netdev)) { + LOG_DEV_ERR("failed to enable ipsec offload, please " + "disable tx segmentation offload " + "features,tx vlan offload feature and " + "LRO offload feature.\n"); + ret = -EINVAL; + } + } else { + LOG_DEBUG_BDF("disable ipsec offload switch(on to off).\n"); + if (sxe2_is_ipsec_can_not_disable(adapter)) { + LOG_DEV_ERR("can not disable ipsec offload, please " + "delete all xfrm state before disable " + "ipsec offload\n"); + ret = -EINVAL; + } + } + if (!ret) { + SXE2_SET_FEATURE(oper_features, NETIF_F_HW_ESP, ipsec_ena); + } + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + } + + return ret; +} + +static s32 sxe2_conflict_features_check(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed_features = netdev->features ^ features; + netdev_features_t conflict1, conflict2; + + (void)changed_features; + (void)conflict1; + (void)conflict2; + + conflict1 = NETIF_F_HW_ESP; + conflict2 = 0 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL +#ifdef NETIF_F_GSO_UDP_L4 + | NETIF_F_GSO_UDP_L4 +#endif + | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | NETIF_F_LRO | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM +#ifdef HAVE_MACSEC_SUPPORT + | NETIF_F_HW_MACSEC +#endif + ; + + (void)sxe2_conflict_features_chk(changed_features, features, conflict1, + conflict2); + +#ifdef HAVE_MACSEC_SUPPORT + conflict1 = NETIF_F_HW_MACSEC; + conflict2 = NETIF_F_HW_ESP; + (void)sxe2_conflict_features_chk(changed_features, features, conflict1, + conflict2); +#endif + + return 0; +} + +static int sxe2_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + netdev_features_t oper_features; + bool part_failed = false; + + s32 ret = 0; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("device is in safe mode - not enabling advanced netdev " + "features"); + return 0; + } + + ret = sxe2_conflict_features_check(netdev, features); + if (ret) { + LOG_DEV_ERR("some features are conflict\n"); + return ret; + } + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; + } + + oper_features = netdev->features; + + ret = sxe2_set_rxfcs_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2_set_lro_features(netdev, features, &oper_features); + if (ret) { + part_failed = true; + goto skip_rxcsum; + } + + ret = sxe2_set_rxcsum_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + +skip_rxcsum: +#ifdef HAVE_MACSEC_SUPPORT + ret = sxe2_set_macsec_features(netdev, features, &oper_features); + if (ret) + part_failed = true; +#endif + ret = sxe2_set_ipsec_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2_set_vlan_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2_set_fnav_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + + ret = sxe2_set_macvlan_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + if (part_failed) { + netdev->features = oper_features; + ret = -EINVAL; + } + + return ret; +} + +static netdev_features_t +sxe2_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, + netdev_features_t features) +{ + size_t len; + bool gso = skb_is_gso(skb); + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (gso && (skb_shinfo(skb)->gso_size < SXE2_TXCD_QW1_MSS_MIN)) { + LOG_WARN_BDF("gso size < 88, not support\n"); + features &= ~NETIF_F_GSO_MASK; + } + + if (skb_network_offset(skb) < 0) { + goto out_rm_features; + } else { + len = (size_t)skb_network_offset(skb); + if (len > SXE2_TXDD_MACLEN_MAX || len & 0x1) { + LOG_WARN_BDF("The mac header exceeds the max length,\t" + "not support tso/csum, maclen = %zu\n", + len); + goto out_rm_features; + } + } + + len = skb_network_header_len(skb); + if (len > SXE2_TXDD_IPLEN_MAX || len & 0x1) { + LOG_WARN_BDF("The ip header exceeds the max length,\t" + "not support tso/csum, iplen = %zu\n", + len); + goto out_rm_features; + } + + if (skb->encapsulation) { + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = (size_t)skb_inner_network_header(skb) - + (size_t)skb_transport_header(skb); + if (len > SXE2_TXDD_L4LEN_MAX || len & 0x1) { + LOG_WARN_BDF("tunnel:The inner L4 header exceeds " + "the max length, not support tso/csum, " + "l4 len = %zu\n", + len); + goto out_rm_features; + } + } + + len = skb_inner_network_header_len(skb); + if (len > SXE2_TXDD_IPLEN_MAX || len & 0x1) { + LOG_WARN_BDF("tunnel:The inner ip header exceeds the max " + "length, not support tso/csum, ip len = %zu\n", + len); + goto out_rm_features; + } + } + + return features; + +out_rm_features: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static netdev_features_t sxe2_fix_ipsec_features(struct sxe2_adapter *adapter, + struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t tso_features; + + if (netdev->features & NETIF_F_HW_ESP) { + tso_features = NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP6; + if (features & (tso_features)) { + LOG_DEV_WARN("ipsec is conflicted with tx segmentation " + "offload.\n"); + features &= ~(tso_features); + } + if (features & + (NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM)) { + LOG_DEV_WARN("ipsec is conflicted with tx Checksum " + "offload.\n"); + features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM); + } + if (features & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)) { + LOG_DEV_WARN("ipsec is conflicted with tx VLAN offload.\n"); + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + } + if (features & NETIF_F_LRO) { + LOG_DEV_WARN("ipsec is conflicted with LRO.\n"); + features &= ~(NETIF_F_LRO); + } +#ifdef HAVE_MACSEC_SUPPORT + if (features & NETIF_F_HW_MACSEC) { + LOG_DEV_WARN("ipsec is conflicted with macsec offload.\n"); + features &= ~(NETIF_F_HW_MACSEC); + } +#endif + } + return features; +} + +static netdev_features_t sxe2_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + netdev_features_t request_features; + + request_features = features; + + LOG_DEBUG_BDF("fix features:0x%llx netdev features:0x%llx.\n", features, + netdev->features); + + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + if (req_vlan_fltr != cur_vlan_fltr) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else { + LOG_DEV_WARN("802.1Q and 802.1ad VLAN filtering must be " + "either both on or both off.\n" + "VLAN filtering has been enabled for both " + "types.\n"); + if (!cur_ctag && !cur_stag) + features |= NETIF_VLAN_FILTERING_FEATURES; + else + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } + } + + if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && + (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + LOG_DEV_WARN("cannot support CTAG and STAG VLAN stripping and/or " + "insertion simultaneously since CTAG and STAG offloads " + "are mutually exclusive, clearing STAG offload " + "settings\n"); + features &= ~(NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); + } + features = sxe2_fix_ipsec_features(adapter, netdev, features); + +#ifdef HAVE_MACSEC_SUPPORT + if (netdev->features & NETIF_F_HW_MACSEC) { + if (features & NETIF_F_HW_ESP) { + features &= ~NETIF_F_HW_ESP; + LOG_DEV_WARN("cannot turn on ipsec offload when macsec " + "offload is on.\n"); + } + } +#endif + + LOG_DEV_DEBUG("request features %llx, fix features %llx\n", request_features, + features); + + return features; +} + +s32 sxe2_open(struct net_device *netdev) +{ + u16 old_txq_cnt = (u16)netdev->real_num_tx_queues; + u16 old_rxq_cnt = (u16)netdev->real_num_rx_queues; + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + bool link_status = true; + +#ifdef CONFIG_LOCKDEP + WARN_ON(lockdep_is_held(&adapter->vsi_ctxt.lock) == LOCK_STATE_HELD); +#endif + ret = sxe2_netdev_q_cnt_set(netdev, vsi->txqs.q_cnt, vsi->rxqs.q_cnt, true); + if (ret) + goto l_end; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + LOG_NETDEV_ERR("can't open net device while vsi is disabled"); + ret = -EBUSY; + goto unlock; + } + + netif_carrier_off(netdev); + + if (vsi->type == SXE2_VSI_T_PF) { + mutex_lock(&adapter->link_ctxt.link_status_lock); + link_status = sxe2_get_pf_link_status(adapter); + mutex_unlock(&adapter->link_ctxt.link_status_lock); + if (!link_status) + (void)sxe2_link_up(adapter); + } + + ret = sxe2_vsi_open(vsi); + +unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + if (ret) + (void)sxe2_netdev_q_cnt_set(netdev, old_txq_cnt, old_rxq_cnt, true); + + return ret; +} + +s32 sxe2_net_link_down(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + enum flm_link_status link_status = FLM_PORT_DOWN; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FLM_LINK_UP_DOWN_SET, &link_status, + sizeof(enum flm_link_status), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + ret = -EIO; + goto out; + } + +out: + return ret; +} + +s32 sxe2_link_up(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + struct flm_link_info req = {0}; + struct flm_link_info_pasist persist_cfg = {0}; + + ret = sxe2_link_get_pasist_info(adapter, &persist_cfg); + if (ret) { + LOG_ERROR_BDF("Failed to get persist cfg, ret=%d\n", ret); + goto out; + } + + req.is_link_up = persist_cfg.link_status; + req.fec = persist_cfg.fec_mode; + req.fc_mode = persist_cfg.fc_mode; + req.speed = persist_cfg.speed; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FLM_LINK_UP, &req, + sizeof(struct flm_link_info), NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("failed to link cfg, ret=%d\n", ret); + ret = -EIO; + } + +out: + return ret; +} + +s32 sxe2_stop(struct net_device *netdev) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_mac_filter *mac_filter = &vsi->mac_filter; + struct sxe2_mac_sync_entry *list_itr = NULL; + struct sxe2_mac_sync_entry *tmp = NULL; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + goto unlock; + + if (test_bit(SXE2_FLAG_LINK_DOWN_ON_CLOSE, adapter->flags)) { + ret = sxe2_net_link_down(adapter); + if (ret) + goto unlock; + } + + ret = sxe2_vsi_close(vsi); + + (void)mutex_lock(&mac_filter->sync_lock); + INIT_LIST_HEAD(&mac_filter->tmp_unsync_list); + + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, sxe2_unsync_mac_add); + __dev_mc_unsync(netdev, sxe2_unsync_mac_add); + netif_addr_unlock_bh(netdev); + + list_for_each_entry_safe(list_itr, tmp, &mac_filter->tmp_unsync_list, + list_entry) + { + (void)sxe2_mac_addr_del(vsi, list_itr->mac_addr, + SXE2_MAC_OWNER_UC_MC); + list_del(&list_itr->list_entry); + kfree(list_itr); + } + (void)mutex_unlock(&mac_filter->sync_lock); + +unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2_set_mac_address(struct net_device *netdev, void *pi) +{ + int ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sockaddr *addr = pi; + u8 *mac; + u8 old_mac[ETH_ALEN]; + + mac = (u8 *)addr->sa_data; + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, mac)) + return 0; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + LOG_ERROR("can't set mac %pM. device not ready\n", mac); + ret = -EBUSY; + goto l_unlock; + } + + netif_addr_lock_bh(netdev); + ether_addr_copy(old_mac, netdev->dev_addr); + netif_addr_unlock_bh(netdev); + + ret = sxe2_cur_mac_addr_set(vsi, mac); + if (ret) { + LOG_DEV_ERR("set mac addr failed, mac %pM, ret %d\n", mac, ret); + ret = -EADDRNOTAVAIL; + goto l_unlock; + } + + ret = sxe2_mac_addr_del(vsi, old_mac, SXE2_MAC_OWNER_NETDEV); + if (ret) { + LOG_DEV_ERR("delete mac filter failed, mac %pM, ret %d\n", old_mac, + ret); + } + + ret = sxe2_mac_addr_add(vsi, mac, SXE2_MAC_OWNER_NETDEV); + if (ret) { + LOG_DEV_ERR("add mac filter failed, mac %pM, ret %d\n", mac, ret); + goto l_add_new_mac_fail; + } + + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, mac); + netif_addr_unlock_bh(netdev); + + goto l_unlock; + +l_add_new_mac_fail: + ret = sxe2_mac_addr_add(vsi, old_mac, SXE2_MAC_OWNER_NETDEV); + if (ret) { + LOG_DEV_ERR("add pre mac filter failed, mac %pM, ret %d\n", old_mac, + ret); + } + + ret = sxe2_cur_mac_addr_set(vsi, old_mac); + if (ret) { + LOG_DEV_ERR("set mac addr failed, mac %pM, ret %d\n", old_mac, ret); + } + ret = -EADDRNOTAVAIL; + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static struct net_device *sxe2_netdev_alloc(struct sxe2_vsi *vsi) +{ + struct sxe2_netdev_priv *priv; + struct net_device *netdev; + struct sxe2_adapter *adapter = vsi->adapter; + u16 txq_cnt = adapter->irq_ctxt.irq_layout.lan; + u16 rxq_cnt = adapter->irq_ctxt.irq_layout.lan; + + if (test_bit(SXE2_FLAG_VMDQ_CAPABLE, vsi->adapter->flags)) { + txq_cnt += SXE2_MAX_MACVLANS; + rxq_cnt += SXE2_MAX_MACVLANS; + } + netdev = alloc_etherdev_mqs(sizeof(*priv), txq_cnt, rxq_cnt); + if (!netdev) { + LOG_DEV_ERR("alloc netdev failed, priv size %zu, txqs %u, rxqs %u\n", + sizeof(*priv), txq_cnt, rxq_cnt); + goto l_end; + } + + vsi->netdev = netdev; + priv = netdev_priv(netdev); + priv->vsi = vsi; + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + LOG_INFO_BDF("vsi[%u][%u] type:%u netdev:%pK vsi:%pK.\n", vsi->id_in_pf, + vsi->idx_in_dev, vsi->type, netdev, vsi); + +l_end: + return netdev; +} + +static void sxe2_netdev_lro_feature_init(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + + if (netdev->features & NETIF_F_LRO) + set_bit(SXE2_VSI_FLAG_LRO_ENABLE, priv->vsi->flags); + else + clear_bit(SXE2_VSI_FLAG_LRO_ENABLE, priv->vsi->flags); +} + +static void sxe2_netdev_rxfcs_feature_init(struct net_device *netdev) +{ + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + + if (netdev->features & NETIF_F_RXFCS) + set_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, priv->vsi->flags); + else + clear_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, priv->vsi->flags); +} + +void sxe2_netdev_feature_init(struct net_device *netdev) +{ + netdev_features_t defaults; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + netdev_features_t csum_features; + netdev_features_t tso_features; + netdev_features_t vlan_features; + + if (sxe2_is_safe_mode(adapter)) { + netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; + netdev->hw_features = netdev->features; + return; + } + + defaults = NETIF_F_SG | NETIF_F_RXHASH | + NETIF_F_NTUPLE | + NETIF_F_HIGHDMA; + + csum_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlan_features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER; + + tso_features = NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP6; + + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + netdev->hw_features = + defaults | csum_features | tso_features | vlan_features; + netdev->features = netdev->hw_features; + + netdev->hw_features |= NETIF_F_LRO | NETIF_F_RXFCS; + + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_enc_features |= defaults | csum_features | tso_features; + + netdev->vlan_features |= + defaults | csum_features | tso_features | NETIF_F_LRO; + + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + netdev->hw_features |= NETIF_F_HW_TC; + +#ifdef HAVE_MACSEC_SUPPORT + netdev->hw_features |= NETIF_F_HW_MACSEC; +#endif + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + + sxe2_netdev_lro_feature_init(netdev); + + sxe2_netdev_rxfcs_feature_init(netdev); + + if (test_bit(SXE2_FLAG_VMDQ_CAPABLE, adapter->flags)) + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + LOG_DEBUG_BDF("netdev init features:0x%llx hw_features:0x%llx.\n", + netdev->features, netdev->hw_features); +} + +static int sxe2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) +{ + struct sxe2_netdev_priv *np = netdev_priv(dev); + struct sxe2_vsi *vsi = np->vsi; + u16 bmode = vsi->adapter->switch_ctxt.evb_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +#ifdef NETDEV_NO_NEED_EXTACK_PRAM +static int sxe2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) +#else +static int sxe2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags, + struct netlink_ext_ack __always_unused *extack) +#endif +{ + struct sxe2_netdev_priv *np = netdev_priv(dev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_switch_context *switch_ctxt; + struct nlattr *attr, *br_spec; + int rem = 0; + struct sxe2_vsi *vsi; + u16 i; + __u16 mode; + u16 old_mode; + int ret = 0; + + switch_ctxt = &adapter->switch_ctxt; + (void)mutex_lock(&adapter->vsi_ctxt.lock); + (void)mutex_lock(&switch_ctxt->evb_mode_lock); + + if (test_bit(SXE2_VSI_S_DISABLE, np->vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + switch_ctxt = &adapter->switch_ctxt; + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) + { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) { + ret = -EINVAL; + goto l_unlock; + } + + if (switch_ctxt->evb_mode == mode) + continue; + old_mode = switch_ctxt->evb_mode; + switch_ctxt->evb_mode = mode; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + ret = sxe2_vsi_loopback_control( + adapter, vsi->idx_in_dev, + (mode == BRIDGE_MODE_VEB ? true : false)); + if (ret) { + switch_ctxt->evb_mode = old_mode; + goto l_unlock; + } + } + + ret = sxe2_rule_bridge_mode_update(adapter); + if (ret) { + switch_ctxt->evb_mode = old_mode; + goto l_unlock; + } + } + +l_unlock: + (void)mutex_unlock(&switch_ctxt->evb_mode_lock); + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef NETDEV_NO_NEED_EXTACK_PRAM +static int sxe2_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, u16 vid, + u16 flags) +{ + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} +#else +static int sxe2_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, u16 vid, + u16 flags, struct netlink_ext_ack __always_unused *extack) +{ + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} +#endif + +static int sxe2_set_tx_maxrate(struct net_device *netdev, int queue_index, + u32 maxrate) +{ + int ret; + struct sxe2_queue *txq; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (maxrate && (maxrate > (SXE2_TXSCHED_MAX_BW / 1000))) { + LOG_NETDEV_ERR("invalid max rate %u specified for the queue %d\n", + maxrate, queue_index); + return -EINVAL; + } + + if (maxrate && (maxrate > adapter->link_ctxt.current_link_speed)) { + LOG_NETDEV_ERR("invalid max rate %u specified for the queue %d in " + "port %d\n", + maxrate, queue_index, adapter->port_idx); + return -EINVAL; + } + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, np->vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + txq = vsi->txqs.q[queue_index]; + + if (!maxrate) + ret = sxe2_txsched_q_bw_lmt_cfg(vsi, txq, SXE2_NODE_RL_TYPE_EIR, + SXE2_TXSCHED_DFLT_BW); + else + ret = sxe2_txsched_q_bw_lmt_cfg(vsi, txq, SXE2_NODE_RL_TYPE_EIR, + maxrate * 1000); + if (ret) { + LOG_NETDEV_ERR("unable to set tx max rate, ret=%d, txq_idx=%u, " + "maxrate=%u\n", + ret, queue_index, maxrate); + } + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef NDO_FDB_DEL_API_NEED_5_PARAMS +static int sxe2_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, u16 vid) +#else +static int sxe2_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, u16 vid, + struct netlink_ext_ack *xtack) +#endif +{ + return ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); +} + +static int sxe2_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vlan vlan; + u16 proto_u16; + int ret; + + if (!vid && be16_to_cpu(proto) == ETH_P_8021Q) + return 0; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + proto_u16 = be16_to_cpu(proto); + vlan = SXE2_VLAN(proto_u16, vid, 0); + ret = sxe2_vlan_rule_add(vsi, &vlan); + if (ret == -EEXIST) + ret = 0; + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vlan vlan; + u16 proto_u16; + int ret; + + if (!vid && be16_to_cpu(proto) == ETH_P_8021Q) + return 0; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + proto_u16 = be16_to_cpu(proto); + vlan = SXE2_VLAN(proto_u16, vid, 0); + ret = sxe2_vlan_rule_del(adapter, vsi->idx_in_dev, &vlan); + if (ret == -ENOENT) + ret = 0; + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static LIST_HEAD(sxe2_block_cb_list); + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static s32 sxe2_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + + if (type == TC_SETUP_BLOCK) + return flow_block_cb_setup_simple(type_data, &sxe2_block_cb_list, + sxe2_setup_tc_block_cb, np, np, + true); + return -EOPNOTSUPP; +} +#else +static s32 sxe2_setup_tc(struct net_device *netdev, u32 __always_unused handle, + __be16 __always_unused proto, struct tc_to_netdev *tc) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + + if (tc->type == TC_SETUP_CLSFLOWER) + return sxe2_setup_tc_cls_flower(np, np->vsi->netdev, tc->cls_flower); + return -EOPNOTSUPP; +} +#endif + +static u8 sxe2_get_dscp_up(struct sxe2_dcbx_cfg *dcbcfg, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return dcbcfg->dscp_map[dscp]; +} + +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV +#ifdef NDO_SELECT_QUEUE_NEED_4_PARAMS +STATIC u16 sxe2_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +STATIC u16 sxe2_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#endif +#else +#ifdef NDO_SELECT_QUEUE_NEED_4_PARAMS +STATIC u16 sxe2_select_queue(struct net_device *netdev, struct sk_buff *skb, + void __always_unused *accel_priv, + select_queue_fallback_t fallback) +#else +STATIC u16 sxe2_select_queue(struct net_device *netdev, struct sk_buff *skb, + void __always_unused *accel_priv) +#endif +#endif + +{ + struct sxe2_dcbx_cfg *dcbcfg; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + dcbcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + if (dcbcfg->qos_mode == SXE2_QOS_MODE_DSCP) + skb->priority = sxe2_get_dscp_up(dcbcfg, skb); + +#ifdef NDO_SELECT_QUEUE_NEED_4_PARAMS +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + return fallback(netdev, skb, sb_dev); +#else + return fallback(netdev, skb); +#endif +#else + return netdev_pick_tx(netdev, skb, sb_dev); +#endif +} + +s32 sxe2_check_vf_ready_for_cfg(struct sxe2_vf_node *vf) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vf->adapter; + + if (test_bit(SXE2_VF_STATE_DIS, vf->states)) { + ret = -EBUSY; + LOG_ERROR_BDF("vf:%u pf status:0x%lx vf states:0x%lx.\n", vf->vf_idx, + *adapter->flags, *vf->states); + } + + return ret; +} + +STATIC s32 sxe2_set_vf_spoofchk(struct net_device *netdev, s32 vf_idx, bool ena) +{ + s32 ret; + struct sxe2_vf_node *vf_node; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEV_INFO("switchdev mode not support change spoofchk status.\n"); + return -EOPNOTSUPP; + } + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) + goto l_end; + + if (ena == vf_node->prop.spoofchk) { + LOG_DEV_DEBUG("vf spoofchk already %s\n", ena ? "on" : "off"); + ret = 0; + goto l_end; + } + + eth_vsi = vf_node->vsi; + user_vsi = vf_node->dpdk_vf_vsi; + if (!eth_vsi && !user_vsi) { + vf_node->prop.spoofchk = (u8)ena; + goto l_end; + } else if (!eth_vsi && user_vsi) { + ret = sxe2_vsi_spoofchk_control(adapter, user_vsi->idx_in_dev, ena); + if (ret) { + LOG_DEV_ERR("failed to set spoofchk %s for vf %d vsi %d\n " + "error %d\n", + ena ? "on" : "off", vf_idx, user_vsi->idx_in_dev, + ret); + } + } else if (eth_vsi && !user_vsi) { + ret = sxe2_vsi_spoofchk_control(adapter, eth_vsi->idx_in_dev, ena); + if (ret) { + LOG_DEV_ERR("failed to set spoofchk %s for vf %d vsi %d\n " + "error %d\n", + ena ? "on" : "off", vf_idx, eth_vsi->idx_in_dev, + ret); + } + } else { + ret = sxe2_vsi_spoofchk_control(adapter, eth_vsi->idx_in_dev, ena); + if (ret) { + LOG_DEV_ERR("failed to set spoofchk %s for vf %d vsi %d\n " + "error %d\n", + ena ? "on" : "off", vf_idx, eth_vsi->idx_in_dev, + ret); + goto l_end; + } + ret = sxe2_vsi_spoofchk_control(adapter, user_vsi->idx_in_dev, ena); + if (ret) { + LOG_DEV_ERR("failed to set spoofchk %s for vf %d vsi %d\n " + "error %d\n", + ena ? "on" : "off", vf_idx, user_vsi->idx_in_dev, + ret); + (void)sxe2_vsi_spoofchk_control(adapter, eth_vsi->idx_in_dev, + !ena); + } + } + + if (!ret) + vf_node->prop.spoofchk = (u8)ena; + +l_end: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + return ret; +} + +STATIC s32 sxe2_set_vf_mac(struct net_device *netdev, s32 vf_idx, u8 *mac_addr) +{ + s32 ret = 0; + struct sxe2_vf_node *vf_node; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (is_multicast_ether_addr(mac_addr)) { + LOG_NETDEV_ERR("%pM not a valid unicast address\n", mac_addr); + return -EINVAL; + } + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_unlock; + } + + if (ether_addr_equal(vf_node->mac_addr.addr, mac_addr)) { + LOG_INFO_BDF("vf:%u no need dup set mac addr:%pM.\n", vf_idx, + mac_addr); + goto l_unlock; + } + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) { + LOG_ERROR_BDF("vf:%u pf flags:0x%lx vf states:0x%lx not ready.\n", + vf_idx, *adapter->flags, *vf_node->states); + goto l_unlock; + } + + ether_addr_copy(vf_node->mac_addr.addr, mac_addr); + if (is_zero_ether_addr(mac_addr)) { + vf_node->prop.mac_from_pf = false; + LOG_NETDEV_INFO("mac on vf %d. vf driver will be reinitialized\n", + vf_idx); + } else { + vf_node->prop.mac_from_pf = true; + LOG_NETDEV_INFO("setting mac %pM on vf %d. vf driver will be " + "reinitialized\n", + mac_addr, vf_idx); + } + ret = sxe2_reset_vf(adapter, (u16)vf_idx, SXE2_VF_RESET_FLAG_NOTIFY); + if (ret) + LOG_ERROR_BDF("vf:%u set mac:%pM failed.\n", vf_idx, mac_addr); + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + + return ret; +} + +STATIC s32 sxe2_get_vf_cfg(struct net_device *netdev, s32 vf_idx, + struct ifla_vf_info *info) +{ + s32 ret; + struct sxe2_vf_node *vf_node; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) + goto l_end; + + info->vf = (u32)vf_idx; + ether_addr_copy(info->mac, vf_node->mac_addr.addr); + + info->vlan = sxe2_vf_port_vid_get(vf_node); + info->qos = sxe2_vf_port_vprio_get(vf_node); + + info->trusted = vf_node->prop.trusted; + info->spoofchk = vf_node->prop.spoofchk; + if (!vf_node->prop.link_forced) + info->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf_node->prop.link_up) + info->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + info->linkstate = IFLA_VF_LINK_STATE_DISABLE; + info->max_tx_rate = vf_node->prop.max_tx_rate; + info->min_tx_rate = vf_node->prop.min_tx_rate; + +l_end: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + return ret; +} + +STATIC s32 sxe2_set_vf_trust(struct net_device *netdev, s32 vf_idx, bool status) +{ + s32 ret = 0; + struct sxe2_vf_node *vf_node; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEV_INFO("switchdev mode not support change vf trust status.\n"); + return -EOPNOTSUPP; + } + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_unlock; + } + + if (status == vf_node->prop.trusted) + goto l_unlock; + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) + goto l_unlock; + + vf_node->prop.trusted = (u8)status; + LOG_DEV_INFO("vf %u is now %strusted\n", vf_idx, status ? "" : "un"); + + ret = sxe2_reset_vf(adapter, (u16)vf_idx, SXE2_VF_RESET_FLAG_NOTIFY); + if (ret) + LOG_ERROR_BDF("vf:%u set trust failed.\n", vf_idx); + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + return ret; +} + +STATIC s32 sxe2_set_vf_link_state(struct net_device *netdev, s32 vf_idx, s32 state) +{ + s32 ret; + struct sxe2_vf_node *vf_node; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) + return -EINVAL; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) + goto l_end; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + vf_node->prop.link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf_node->prop.link_forced = true; + vf_node->prop.link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf_node->prop.link_forced = true; + vf_node->prop.link_up = false; + break; + default: + ret = -EINVAL; + goto l_end; + } + + sxe2_notify_vf_link_state(vf_node); + +l_end: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + return ret; +} + +#ifdef HAVE_XDP_SUPPORT +STATIC void sxe2_vsi_assign_bpf_prog(struct sxe2_vsi *vsi, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + int i; + + old_prog = xchg(&vsi->xdp_prog, prog); + + sxe2_for_each_vsi_rxq(vsi, i) + WRITE_ONCE(vsi->rxqs.q[i]->xdp_prog, vsi->xdp_prog); + + if (old_prog && old_prog != prog) + bpf_prog_put(old_prog); +} + +STATIC s32 sxe2_xdp_alloc_and_setup_rings(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + s32 i; + + for (i = 0; i < (s32)vsi->num_xdp_txq; i++) { + u16 xdp_q_idx = vsi->txqs.q_cnt + (u16)i; + struct sxe2_queue *xdp_ring; + + xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); + + if (!xdp_ring) + goto free_xdp_rings; + + xdp_ring->idx_in_pf = SXE2_Q_IDX_INVAL; + xdp_ring->idx_in_vsi = xdp_q_idx; + xdp_ring->vsi = vsi; + xdp_ring->dev = dev; + xdp_ring->depth = vsi->txqs.depth; + WRITE_ONCE(vsi->xdp_rings.q[i], xdp_ring); + if (sxe2_tx_ring_alloc(vsi->xdp_rings.q[i], vsi)) + goto free_xdp_rings; + sxe2_set_ring_xdp(xdp_ring); + xdp_ring->netdev = NULL; +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_AF_XDP_ZC_SUPPORT + xdp_ring->xsk_pool = sxe2_xsk_pool(xdp_ring); +#endif +#endif + } + vsi->xdp_rings.q_cnt = (u16)vsi->num_xdp_txq; + vsi->xdp_rings.q_alloc = (u16)vsi->num_xdp_txq; + + return 0; + +free_xdp_rings: + for (; i >= 0; i--) + if (vsi->xdp_rings.q[i]) + sxe2_tx_ring_free(vsi->xdp_rings.q[i]); + return -ENOMEM; +} + +void sxe2_vsi_xdp_qs_stats_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_qs_stats *vsi_qs_stat = &vsi->vsi_qs_stats; + u16 i; + + if (vsi_qs_stat->xdp_stats) { + for (i = 0; i < vsi->num_xdp_txq; i++) { + kfree(vsi_qs_stat->xdp_stats[i]); + WRITE_ONCE(vsi_qs_stat->xdp_stats[i], NULL); + } + kfree(vsi_qs_stat->xdp_stats); + vsi_qs_stat->xdp_stats = NULL; + } +} + +STATIC s32 sxe2_vsi_xdp_qs_stats_init(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_qs_stats *vsi_qs_stats; + struct sxe2_adapter *adapter = vsi->adapter; + u16 i; + + vsi_qs_stats = &vsi->vsi_qs_stats; + + if (!vsi_qs_stats->xdp_stats) { + vsi_qs_stats->xdp_stats = kcalloc(vsi->num_xdp_txq, + sizeof(*vsi_qs_stats->xdp_stats), + GFP_KERNEL); + if (!vsi_qs_stats->xdp_stats) { + LOG_ERROR_BDF("alloc txqs stats failed, count: %d, size: " + "%zu.\n", + vsi->xdp_rings.q_cnt, + sizeof(*vsi_qs_stats->xdp_stats)); + goto err_out; + } + } + + for (i = 0; i < vsi->num_xdp_txq; i++) { + struct sxe2_queue_stats *txq_stats; + struct sxe2_queue *txq = vsi->xdp_rings.q[i]; + + txq_stats = vsi_qs_stats->xdp_stats[i]; + if (!txq_stats) { + txq_stats = kzalloc(sizeof(*txq_stats), GFP_KERNEL); + if (!txq_stats) + goto err_out; + + WRITE_ONCE(vsi_qs_stats->xdp_stats[i], txq_stats); + } + + txq->stats = txq_stats; + } + + return 0; + +err_out: + sxe2_vsi_xdp_qs_stats_deinit(vsi); + return -ENOMEM; +} + +s32 sxe2_prepare_xdp_rings(struct sxe2_vsi *vsi, struct bpf_prog *prog) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_irq_data *irq_data; + s32 xdp_rings_rem = (s32)vsi->num_xdp_txq; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 i; + s32 ret; + s32 xdp_rings_per_v, q_id, q_base; + + vsi->xdp_rings.q = devm_kcalloc(dev, vsi->num_xdp_txq, + sizeof(*vsi->xdp_rings.q), GFP_KERNEL); + if (!vsi->xdp_rings.q) + return -ENOMEM; + + if (sxe2_xdp_alloc_and_setup_rings(vsi)) + goto clear_xdp_rings; + + ret = sxe2_vsi_queues_get(vsi, SXE2_DATA_XDP_TQ); + if (ret) { + LOG_DEV_ERR("get txqs %d failed(%d).\n", vsi->xdp_rings.q_cnt, ret); + goto err_map_xdp; + } + adapter->q_ctxt.txq_layout.xdp += vsi->xdp_rings.q_cnt; + + sxe2_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + xdp_rings_per_v = + (s32)DIV_ROUND_UP(xdp_rings_rem, vsi->irqs.cnt - i); + q_base = (s32)(vsi->num_xdp_txq) - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct sxe2_queue *xdp_ring = vsi->xdp_rings.q[q_id]; + + xdp_ring->irq_data = irq_data; + sxe2_queue_add(xdp_ring, &irq_data->tx.list); + } + xdp_rings_rem -= xdp_rings_per_v; + } + + ret = sxe2_vsi_xdp_qs_stats_init(vsi); + if (ret) { + LOG_DEV_ERR("failed qs stats config for xdp, error(%d)\n", ret); + goto err_map_xdp; + } + + ret = sxe2_txsched_lan_vsi_cfg(vsi); + if (ret) { + LOG_DEV_ERR("failed vsi lan queue config for xdp, error(%d)\n", ret); + goto clear_xdp_stats; + } + + sxe2_vsi_assign_bpf_prog(vsi, prog); + + return 0; + +clear_xdp_stats: + sxe2_vsi_xdp_qs_stats_deinit(vsi); + +err_map_xdp: + (void)mutex_lock(&adapter->q_ctxt.lock); + for (i = 0; i < (s32)vsi->num_xdp_txq; i++) { + if (vsi->xdp_rings.q[i]->idx_in_pf != SXE2_Q_IDX_INVAL) { + clear_bit(vsi->xdp_rings.q[i]->idx_in_pf, + adapter->q_ctxt.txq_layout.txq_map); + vsi->xdp_rings.q[i]->idx_in_pf = SXE2_Q_IDX_INVAL; + } + } + (void)mutex_unlock(&adapter->q_ctxt.lock); + +clear_xdp_rings: + for (i = 0; i < (s32)vsi->num_xdp_txq; i++) + if (vsi->xdp_rings.q[i]) { + kfree_rcu(vsi->xdp_rings.q[i], rcu); + vsi->xdp_rings.q[i] = NULL; + } + devm_kfree(dev, vsi->xdp_rings.q); + vsi->xdp_rings.q = NULL; + + return -ENOMEM; +} + +s32 sxe2_destroy_xdp_rings(struct sxe2_vsi *vsi, bool is_rebuild) +{ + s32 ret; + u32 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_irq_data *irq_data; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_queue *queue; + + if (!vsi->irqs.irq_data || !vsi->irqs.irq_data[0]) + goto free_qmap; + + sxe2_for_each_vsi_irq(vsi, i) + { + struct sxe2_list list; + + irq_data = vsi->irqs.irq_data[i]; + list.next = NULL; + list.cnt = 0; + sxe2_for_each_queue(queue, irq_data->tx.list) + { + if (!sxe2_queue_is_xdp(queue)) + sxe2_queue_add(queue, &list); + } + irq_data->tx.list.cnt = list.cnt; + irq_data->tx.list.next = list.next; + } + +free_qmap: + (void)mutex_lock(&adapter->q_ctxt.lock); + for (i = 0; i < vsi->num_xdp_txq; i++) { + clear_bit(vsi->xdp_rings.q[i]->idx_in_pf, + adapter->q_ctxt.txq_layout.txq_map); + vsi->xdp_rings.q[i]->idx_in_pf = SXE2_Q_IDX_INVAL; + } + adapter->q_ctxt.txq_layout.xdp = 0; + (void)mutex_unlock(&adapter->q_ctxt.lock); + + for (i = 0; i < vsi->num_xdp_txq; i++) + if (vsi->xdp_rings.q[i]) { + if (vsi->xdp_rings.q[i]->desc.base_addr) { + synchronize_rcu(); + sxe2_tx_ring_free(vsi->xdp_rings.q[i]); + } + kfree_rcu(vsi->xdp_rings.q[i], rcu); + WRITE_ONCE(vsi->xdp_rings.q[i], NULL); + } + + devm_kfree(dev, vsi->xdp_rings.q); + vsi->xdp_rings.q = NULL; + + if (is_rebuild) + return 0; + + sxe2_vsi_assign_bpf_prog(vsi, NULL); + + ret = sxe2_txsched_lan_vsi_cfg(vsi); + if (ret) + LOG_DEV_ERR("Failed VSI LAN queue config for XDP, error(%d)\n", ret); + + sxe2_vsi_xdp_qs_stats_deinit(vsi); + return ret; +} +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +STATIC void sxe2_vsi_rx_napi_schedule(struct sxe2_vsi *vsi) +{ + s32 i; + + sxe2_for_each_vsi_rxq(vsi, i) + { + struct sxe2_queue *rx_ring = vsi->rxqs.q[i]; + + if (rx_ring->xsk_pool) + napi_schedule(&rx_ring->irq_data->napi); + } +} +#endif + +#ifdef HAVE_XDP_SUPPORT +STATIC void sxe2_clear_xdp_stats(struct sxe2_vsi *vsi) +{ + s32 i; + struct sxe2_queue *rx_ring; + + sxe2_for_each_vsi_rxq(vsi, i) + { + rx_ring = READ_ONCE(vsi->rxqs.q[i]); + if (rx_ring) + memset(&rx_ring->stats->rx_stats.xdp_stats, 0, + sizeof(rx_ring->stats->rx_stats.xdp_stats)); + } +} + +STATIC s32 sxe2_xdp_setup_prog(struct sxe2_vsi *vsi, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + u32 frame_size = vsi->netdev->mtu + SXE2_PACKET_HDR_PAD; + bool if_running = netif_running(vsi->netdev); + s32 ret = 0, xdp_ring_err = 0; + + if (frame_size > vsi->rxqs.rx_buf_len) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); + return -EOPNOTSUPP; + } + + if (sxe2_xdp_is_enable(vsi) == !!prog) { + sxe2_vsi_assign_bpf_prog(vsi, prog); + return 0; + } + + if (if_running && !test_and_set_bit(SXE2_VSI_S_DOWN, vsi->state)) { + ret = sxe2_vsi_down(vsi); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Preparing device for XDP attach failed"); + return ret; + } + } + +#ifdef HAVE_XDP_SUPPORT + if (!sxe2_xdp_is_enable(vsi) && prog) { + sxe2_xdp_queue_cnt_set(vsi, vsi->rxqs.q_cnt); + + xdp_ring_err = sxe2_prepare_xdp_rings(vsi, prog); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, + "Setting up XDP Tx resources failed"); + sxe2_clear_xdp_stats(vsi); + } else if (sxe2_xdp_is_enable(vsi) && !prog) { + xdp_ring_err = sxe2_destroy_xdp_rings(vsi, false); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, + "Freeing XDP Tx resources failed"); + } +#endif + + if (if_running) + ret = sxe2_vsi_up(vsi); + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_AF_XDP_NETDEV_UMEM + if (!ret && prog) + sxe2_vsi_rx_napi_schedule(vsi); +#else + if (!ret && prog && vsi->xsk_umems) + sxe2_vsi_rx_napi_schedule(vsi); +#endif +#endif + + return (ret || xdp_ring_err) ? -ENOMEM : 0; +} + +STATIC s32 sxe2_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct sxe2_netdev_priv *priv = netdev_priv(dev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = priv->vsi->adapter; + s32 ret = 0; + + if (sxe2_is_safe_mode(adapter)) { +#ifdef HAVE_XDP_QUERY_PROG + if (xdp->command == XDP_QUERY_PROG) { + xdp->prog_id = 0; + return 0; + } +#endif + LOG_DEV_ERR("safe mode not support xdp config.\n"); + return -EOPNOTSUPP; + } + + if (vsi->type != SXE2_VSI_T_PF) { + LOG_DEV_ERR("device type(%d) not support xdp setting.\n", vsi->type); + return -EINVAL; + } + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + +#ifdef HAVE_XDP_QUERY_PROG + if (xdp->command == XDP_QUERY_PROG) { + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; + ret = 0; + goto unlock; + } +#endif + + if (test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) { + LOG_DEV_ERR("MACVLAN is enabled, can not set xdp.\n"); + ret = -EPERM; + goto unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + if (!!xdp->prog) { + ret = -EBUSY; + goto unlock; + } + } + + switch (xdp->command) { + case XDP_SETUP_PROG: + ret = sxe2_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + break; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + case XDP_SETUP_XSK_POOL: +#ifdef HAVE_NETDEV_BPF_XSK_POOL + ret = sxe2_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); +#else + ret = sxe2_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); +#endif + break; +#endif + default: + ret = -EINVAL; + break; + } + +unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} +#endif + +#ifdef HAVE_XDP_SUPPORT +STATIC s32 sxe2_xdp_xmit(struct net_device *dev, s32 frame_cnt, + struct xdp_frame **frames, u32 flags) +{ + struct sxe2_netdev_priv *np = netdev_priv(dev); + struct sxe2_vsi *vsi = np->vsi; + u32 queue_index; + struct sxe2_queue *xdp_ring; + s32 nxmit = 0, i; + + preempt_disable(); + queue_index = smp_processor_id(); + preempt_enable(); + queue_index = (queue_index % vsi->num_xdp_txq); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (test_bit(SXE2_VSI_S_DOWN, vsi->state)) { + nxmit = -ENETDOWN; + goto l_end; + } + + if (!sxe2_xdp_is_enable(vsi) || queue_index >= vsi->num_xdp_txq) { + nxmit = -ENXIO; + goto l_end; + } + + xdp_ring = vsi->xdp_rings.q[queue_index]; + for (i = 0; i < frame_cnt; i++) { + struct xdp_frame *xdpf = frames[i]; + s32 err; + + err = sxe2_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); + if (err != SXE2_XDP_TX) + break; + nxmit++; + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + sxe2_xdp_ring_update_tail(xdp_ring); + +l_end: + return nxmit; +} +#endif + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NDO_XSK_WAKEUP +s32 sxe2_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags) +#else +s32 sxe2_xsk_async_xmit(struct net_device *netdev, u32 queue_id) +#endif +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_irq_data *q_vector; + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_queue *ring; + s32 ret = 0; + + if (test_bit(SXE2_VSI_S_DOWN, vsi->state)) { + ret = -ENETDOWN; + goto l_end; + } + + if (!sxe2_xdp_is_enable(vsi)) { + ret = -ENXIO; + goto l_end; + } + + if (queue_id >= vsi->num_xdp_txq) { + ret = -ENXIO; + goto l_end; + } + + if (!vsi->xdp_rings.q[queue_id]->xsk_pool) { + ret = -ENXIO; + goto l_end; + } + + ring = vsi->xdp_rings.q[queue_id]; + + q_vector = ring->irq_data; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + sxe2_trigger_soft_intr(&vsi->adapter->hw, q_vector); + +l_end: + return ret; +} +#endif + +STATIC s32 sxe2_set_vf_bw_check_param(struct net_device *netdev, s32 vf_idx, + s32 min_tx_rate, s32 max_tx_rate) +{ + s32 ret = 0; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (sxe2_min_tx_rate_oversubscribed(adapter, vf_idx, min_tx_rate)) { + ret = -EINVAL; + goto l_end; + } + + if (min_tx_rate && ((u32)min_tx_rate > (SXE2_TXSCHED_MAX_BW / 1000))) { + LOG_NETDEV_ERR("invalid min rate %u specified for the vf %d\n", + (u32)min_tx_rate, vf_idx); + ret = -EINVAL; + goto l_end; + } + + if (max_tx_rate && ((u32)max_tx_rate > (SXE2_TXSCHED_MAX_BW / 1000))) { + LOG_NETDEV_ERR("invalid max rate %u specified for the vf %d\n", + (u32)max_tx_rate, vf_idx); + ret = -EINVAL; + goto l_end; + } + + if (max_tx_rate && + ((u32)max_tx_rate > adapter->link_ctxt.current_link_speed)) { + LOG_NETDEV_ERR("invalid max rate %u specified for the vf %d in port " + "%d\n", + (u32)max_tx_rate, vf_idx, adapter->port_idx); + ret = -EINVAL; + goto l_end; + } + + if (min_tx_rate && max_tx_rate && min_tx_rate > max_tx_rate) { + LOG_NETDEV_ERR("invalid vf[%d] min rate %u > max rate %u\n", vf_idx, + min_tx_rate, max_tx_rate); + ret = -EINVAL; + goto l_end; + } +l_end: + return ret; +} + +STATIC s32 sxe2_set_vf_bw_check(struct net_device *netdev, s32 vf_idx, + s32 min_tx_rate, s32 max_tx_rate) +{ + s32 ret; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + if (test_bit(SXE2_VSI_S_DISABLE, np->vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (sxe2_vf_id_check(adapter, (u16)vf_idx)) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_set_vf_bw_check_param(netdev, vf_idx, min_tx_rate, max_tx_rate); + +l_end: + return ret; +} + +s32 sxe2_cfg_vf_bw(struct sxe2_adapter *adapter, s32 vf_idx, s32 min_tx_rate, + s32 max_tx_rate) +{ + s32 ret; + struct sxe2_vf_node *vf_node; + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + ret = -EINVAL; + goto l_unlock; + } + + ret = sxe2_check_vf_ready_for_cfg(vf_node); + if (ret) + goto l_unlock; + + if (vf_node->prop.min_tx_rate != (u32)min_tx_rate) { + if (!min_tx_rate) + ret = sxe2_txsched_vf_bw_lmt_cfg(adapter, vf_node, + SXE2_NODE_RL_TYPE_CIR, + SXE2_TXSCHED_DFLT_BW); + else + ret = sxe2_txsched_vf_bw_lmt_cfg(adapter, vf_node, + SXE2_NODE_RL_TYPE_CIR, + (u32)min_tx_rate * 1000); + if (ret) { + LOG_ERROR_BDF("unable to set vf min rate, ret=%d, " + "vf_idx=%u, minrate=%u\n", + ret, vf_idx, min_tx_rate); + goto l_unlock; + } + vf_node->prop.min_tx_rate = (u32)min_tx_rate; + } + + if (vf_node->prop.max_tx_rate != (u32)max_tx_rate) { + if (!max_tx_rate) + ret = sxe2_txsched_vf_bw_lmt_cfg(adapter, vf_node, + SXE2_NODE_RL_TYPE_EIR, + SXE2_TXSCHED_DFLT_BW); + else + ret = sxe2_txsched_vf_bw_lmt_cfg(adapter, vf_node, + SXE2_NODE_RL_TYPE_EIR, + (u32)max_tx_rate * 1000); + if (ret) { + LOG_ERROR_BDF("unable to set vf max rate, ret=%d, " + "vf_idx=%u, maxrate=%u\n", + ret, vf_idx, max_tx_rate); + goto l_unlock; + } + vf_node->prop.max_tx_rate = (u32)max_tx_rate; + } + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, (u16)vf_idx)); + + return ret; +} + +STATIC s32 sxe2_set_vf_bw(struct net_device *netdev, s32 vf_idx, s32 min_tx_rate, + s32 max_tx_rate) +{ + s32 ret; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + ret = sxe2_set_vf_bw_check(netdev, vf_idx, min_tx_rate, max_tx_rate); + if (ret) { + LOG_NETDEV_ERR("vf bw check failed\n"); + goto l_end; + } + + ret = sxe2_cfg_vf_bw(adapter, vf_idx, min_tx_rate, max_tx_rate); + if (ret) + LOG_NETDEV_ERR("cfg vf bw failed\n"); + +l_end: + return ret; +} + +STATIC const struct net_device_ops sxe2_netdev_ops = { + .ndo_open = sxe2_open, + .ndo_stop = sxe2_stop, + + .ndo_select_queue = sxe2_select_queue, + .ndo_start_xmit = sxe2_xmit, + .ndo_get_stats64 = sxe2_get_stats64, + .ndo_change_mtu = sxe2_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_features = sxe2_set_features, + .ndo_features_check = sxe2_features_check, + .ndo_fix_features = sxe2_fix_features, + .ndo_set_mac_address = sxe2_set_mac_address, +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = sxe2_eth_ioctl, +#else + .ndo_do_ioctl = sxe2_eth_ioctl, +#endif + + .ndo_bridge_getlink = sxe2_bridge_getlink, + .ndo_bridge_setlink = sxe2_bridge_setlink, + .ndo_dfwd_add_station = sxe2_fwd_add_macvlan, + .ndo_dfwd_del_station = sxe2_fwd_del_macvlan, + .ndo_fdb_add = sxe2_fdb_add, + .ndo_fdb_del = sxe2_fdb_del, + .ndo_set_rx_mode = sxe2_set_rx_mode, + + .ndo_vlan_rx_add_vid = sxe2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sxe2_vlan_rx_kill_vid, + + .ndo_setup_tc = sxe2_setup_tc, + .ndo_set_tx_maxrate = sxe2_set_tx_maxrate, + .ndo_set_vf_vlan = sxe2_set_vf_port_vlan, + .ndo_set_vf_spoofchk = sxe2_set_vf_spoofchk, + .ndo_set_vf_mac = sxe2_set_vf_mac, + .ndo_get_vf_config = sxe2_get_vf_cfg, + .ndo_set_vf_trust = sxe2_set_vf_trust, + .ndo_set_vf_link_state = sxe2_set_vf_link_state, + .ndo_set_vf_rate = sxe2_set_vf_bw, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = sxe2_rx_flow_steer, +#endif + +#ifdef HAVE_XDP_SUPPORT + .ndo_bpf = sxe2_xdp, + .ndo_xdp_xmit = sxe2_xdp_xmit, +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NDO_XSK_WAKEUP + .ndo_xsk_wakeup = sxe2_xsk_wakeup, +#else + .ndo_xsk_async_xmit = sxe2_xsk_async_xmit, +#endif + +#endif +}; + +STATIC const struct net_device_ops sxe2_netdev_ops_for_safe_mode = { + .ndo_open = sxe2_open, + .ndo_stop = sxe2_stop, + .ndo_start_xmit = sxe2_xmit, + .ndo_get_stats64 = sxe2_get_stats64, + .ndo_change_mtu = sxe2_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sxe2_set_mac_address, +#ifdef HAVE_XDP_SUPPORT + .ndo_bpf = sxe2_xdp, +#endif +}; + +STATIC void sxe2_netdev_ops_init(struct net_device *netdev) +{ + netdev->netdev_ops = &sxe2_netdev_ops; +} + +STATIC void sxe2_netdev_ops_init_for_safe_mode(struct net_device *netdev) +{ + netdev->netdev_ops = &sxe2_netdev_ops_for_safe_mode; +} + +static void sxe2_netdev_priv_flags_init(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; +} + +#ifdef HAVE_NETDEV_MIN_MAX_MTU +void sxe2_netdev_mtu_init(struct net_device *netdev) +{ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = SXE2_MAX_MTU; +} +#endif + +s32 sxe2_netdev_init(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + struct net_device *netdev; + + netdev = sxe2_netdev_alloc(vsi); + if (!netdev) { + ret = -ENOMEM; + goto l_end; + } + + sxe2_netdev_feature_init(netdev); + + if (!sxe2_is_safe_mode(adapter)) + sxe2_netdev_ops_init(netdev); + else + sxe2_netdev_ops_init_for_safe_mode(netdev); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + netdev->udp_tunnel_nic_info = adapter->udp_tunnel_nic; +#endif + sxe2_netdev_priv_flags_init(netdev); + +#ifdef HAVE_NETDEV_MIN_MAX_MTU + sxe2_netdev_mtu_init(netdev); +#endif + if (!sxe2_is_safe_mode(adapter)) + sxe2_ethtool_ops_set(netdev); + else + sxe2_ethtool_ops_set_for_safe_mode(netdev); + + sxe2_dcbnl_setup(vsi); + +l_end: + return ret; +} + +void sxe2_netdev_deinit(struct sxe2_vsi *vsi) +{ + free_netdev(vsi->netdev); + vsi->netdev = NULL; +} + +s32 sxe2_netdev_register(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + netif_tx_stop_all_queues(vsi->netdev); + netif_carrier_off(vsi->netdev); + LOG_INFO_BDF("net dev carrier off link down.\n"); + + ret = register_netdev(vsi->netdev); + if (ret) { + LOG_DEV_ERR("netdev register failed, ret=%d.\n", ret); + goto l_end; + } + + ret = sxe2_hw_mtu_init(adapter, vsi->netdev->mtu, false); + if (ret) { + unregister_netdev(vsi->netdev); + LOG_DEV_ERR("net dev init mtu set failed, ret=%d.\n", ret); + } + +l_end: + return ret; +} + +bool netif_is_sxe2(struct net_device *netdev) +{ + return netdev && (netdev->netdev_ops == &sxe2_netdev_ops); +} + +#ifdef HAVE_FLOW_BLOCK_API +#ifdef HAVE_TC_INDIR_BLOCK +static void sxe2_rep_indr_tc_block_unbind(void *cb_priv) +{ + struct sxe2_indr_block_priv *indr_priv = cb_priv; + + list_del(&indr_priv->list); + kfree(indr_priv); +} +#endif +#endif + +#ifdef HAVE_TC_INDIR_BLOCK +static struct sxe2_indr_block_priv * +sxe2_indr_block_priv_find(struct sxe2_netdev_priv *np, struct net_device *netdev) +{ + struct sxe2_indr_block_priv *cb_priv; + + list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) + { + if (!cb_priv->netdev) + return NULL; + if (cb_priv->netdev == netdev) + return cb_priv; + } + return NULL; +} + +#ifdef SXE2_INDR_SETUP_TC_BLOCK_NEED_3_PARAMS +STATIC s32 sxe2_indr_setup_tc_block(struct net_device *netdev, + struct sxe2_netdev_priv *np, + struct flow_block_offload *f) +#else +STATIC s32 sxe2_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, + struct sxe2_netdev_priv *np, + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +#endif +{ +#ifndef HAVE_FLOW_BLOCK_API + s32 ret; +#endif + struct sxe2_indr_block_priv *indr_priv; +#ifdef HAVE_FLOW_BLOCK_API + struct flow_block_cb *block_cb; +#endif + + if ((sxe2_tc_tun_type_get(netdev) == SXE2_TNL_NONE) && + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == np->vsi->netdev)) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + if (f->command == FLOW_BLOCK_BIND) { + indr_priv = sxe2_indr_block_priv_find(np, netdev); + if (indr_priv) + return -EEXIST; + + indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->netdev = netdev; + indr_priv->np = np; + + list_add(&indr_priv->list, &np->tc_indr_block_priv_list); + +#ifdef HAVE_FLOW_BLOCK_API +#ifdef HAVE_FLOW_INDR_BLOCK_API + block_cb = flow_indr_block_cb_alloc(sxe2_indr_setup_block_cb, + indr_priv, indr_priv, + sxe2_rep_indr_tc_block_unbind, f, + netdev, sch, data, np, cleanup); +#else + block_cb = flow_block_cb_alloc(sxe2_indr_setup_block_cb, indr_priv, + indr_priv, + sxe2_rep_indr_tc_block_unbind); +#endif + if (IS_ERR(block_cb)) { + list_del(&indr_priv->list); + kfree(indr_priv); + return (s32)PTR_ERR(block_cb); + } + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &sxe2_block_cb_list); +#else + ret = tcf_block_cb_register(f->block, sxe2_indr_setup_block_cb, + indr_priv, indr_priv, f->extack); + if (ret) { + list_del(&indr_priv->list); + kfree(indr_priv); + } + return ret; +#endif + } else if (f->command == FLOW_BLOCK_UNBIND) { + indr_priv = sxe2_indr_block_priv_find(np, netdev); + if (!indr_priv) + return -ENOENT; + +#ifdef HAVE_FLOW_BLOCK_API + block_cb = flow_block_cb_lookup(f->block, sxe2_indr_setup_block_cb, + indr_priv); + if (!block_cb) + return -ENOENT; + +#ifdef HAVE_FLOW_INDR_BLOCK_API + flow_indr_block_cb_remove(block_cb, f); +#else + flow_block_cb_remove(block_cb, f); +#endif + list_del(&block_cb->driver_list); +#else + tcf_block_cb_unregister(f->block, sxe2_indr_setup_block_cb, + indr_priv); + list_del(&indr_priv->list); + kfree(indr_priv); +#endif + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +#ifdef SXE2_INDR_SETUP_TC_BLOCK_CB_NEED_4_PARAMS +STATIC s32 sxe2_indr_setup_tc_block_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + if (type == TC_SETUP_BLOCK) + return sxe2_indr_setup_tc_block(netdev, cb_priv, type_data); + else + return -EOPNOTSUPP; +} +#else +STATIC s32 sxe2_indr_setup_tc_block_cb( + struct net_device *netdev, struct Qdisc *sch, void *cb_priv, + enum tc_setup_type type, void *type_data, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +{ + if (type == TC_SETUP_BLOCK) + return sxe2_indr_setup_tc_block(netdev, sch, cb_priv, type_data, + data, cleanup); + else + return -EOPNOTSUPP; +} +#endif + +#ifndef HAVE_TC_FLOW_INDIR_DEV +static int sxe2_indr_register_block(struct sxe2_netdev_priv *np, + struct net_device *netdev) +{ + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + int err; + + err = __flow_indr_block_cb_register(netdev, np, sxe2_indr_setup_tc_block_cb, + np); + if (err) { + LOG_NETDEV_ERR("Failed to register remote block notifier for %s, " + "err %d\n", + netdev_name(netdev), err); + } + return err; +} + +static void sxe2_indr_unregister_block(struct sxe2_netdev_priv *np, + struct net_device *netdev) +{ + __flow_indr_block_cb_unregister(netdev, sxe2_indr_setup_tc_block_cb, np); +} + +static void sxe2_indr_clean_block_privs(struct sxe2_netdev_priv *np) +{ + struct sxe2_indr_block_priv *cb_priv, *temp; + struct list_head *head = &np->tc_indr_block_priv_list; + + list_for_each_entry_safe(cb_priv, temp, head, list) + { + sxe2_indr_unregister_block(np, cb_priv->netdev); + devm_kfree(&cb_priv->netdev->dev, cb_priv); + } +} + +static int sxe2_netdevice_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct sxe2_netdev_priv *np = + container_of(nb, struct sxe2_netdev_priv, netdevice_nb); + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + int tunnel_type = sxe2_tc_tun_type_get(netdev); + + if (tunnel_type != SXE2_TNL_VXLAN && tunnel_type != SXE2_TNL_GENEVE && + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == np->vsi->netdev)) + return NOTIFY_OK; + + switch (event) { + case NETDEV_REGISTER: + sxe2_indr_register_block(np, netdev); + break; + case NETDEV_UNREGISTER: + sxe2_indr_unregister_block(np, netdev); + break; + } + return NOTIFY_OK; +} +#endif +#endif + +#ifdef HAVE_TC_INDIR_BLOCK +s32 sxe2_tc_indir_block_register(struct sxe2_vsi *vsi) +{ + struct sxe2_netdev_priv *np; + + if (!vsi || !vsi->netdev) + return -EINVAL; + + np = netdev_priv(vsi->netdev); + + INIT_LIST_HEAD(&np->tc_indr_block_priv_list); +#ifdef HAVE_TC_FLOW_INDIR_DEV + return flow_indr_dev_register(sxe2_indr_setup_tc_block_cb, np); +#else + np->netdevice_nb.notifier_call = sxe2_netdevice_event; + return register_netdevice_notifier(&np->netdevice_nb); +#endif +} + +void sxe2_tc_indir_block_unregister(struct sxe2_vsi *vsi) +{ + struct sxe2_netdev_priv *np = netdev_priv(vsi->netdev); + +#ifdef HAVE_TC_FLOW_INDIR_DEV +#ifdef UNREGISTER_NEED_SETUP_BLOCK + flow_indr_dev_unregister(sxe2_indr_setup_tc_block_cb, np, + sxe2_indr_setup_block_cb); +#else + flow_indr_dev_unregister(sxe2_indr_setup_tc_block_cb, np, + sxe2_rep_indr_tc_block_unbind); +#endif +#else + unregister_netdevice_notifier(&np->netdevice_nb); + sxe2_indr_clean_block_privs(np); +#endif +} +#endif + +bool sxe2_netdev_is(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &sxe2_netdev_ops || + dev->netdev_ops == &sxe2_netdev_ops_for_safe_mode); +} + +s32 sxe2_netdev_q_cnt_set(struct net_device *netdev, u16 txq_cnt, u16 rxq_cnt, + bool is_locked) +{ + s32 ret; + struct sxe2_netdev_priv *np; + struct sxe2_adapter *adapter; + u16 old_txq_cnt; + u16 total_txq_cnt; + + if (!is_locked) + rtnl_lock(); + + old_txq_cnt = (u16)netdev->real_num_tx_queues; + total_txq_cnt = txq_cnt; + + if (!netif_is_macvlan(netdev)) { + np = netdev_priv(netdev); + adapter = np->vsi->adapter; + if (test_bit(SXE2_FLAG_MACVLAN_ENABLE, adapter->flags)) + total_txq_cnt += adapter->macvlan_ctxt.max_num_macvlan; + } + + ret = netif_set_real_num_tx_queues(netdev, total_txq_cnt); + if (ret) { + LOG_ERROR("set real txq cnt from %u to %u failed %d.\n", old_txq_cnt, + total_txq_cnt, ret); + goto l_out; + } + + ret = netif_set_real_num_rx_queues(netdev, rxq_cnt); + if (ret) { + LOG_ERROR("set real rxq cnt to %u failed.\n", rxq_cnt); + (void)netif_set_real_num_tx_queues(netdev, old_txq_cnt); + } + +l_out: + if (!is_locked) + rtnl_unlock(); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.h new file mode 100644 index 0000000000000000000000000000000000000000..61987f147598e55e14fcb267dea2b6bcaa47a32f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_netdev.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_netdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_NETDEV_H__ +#define __SXE2_NETDEV_H__ + +#include "sxe2_compat.h" +#include "sxe2_vsi.h" +#include "sxe2_mbx_public.h" + +#define NETIF_VLAN_FILTERING_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_VLAN_STRIPPING_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) + +#define NETIF_VLAN_OFFLOAD_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX) + +#define SXE2_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +#define SXE2_XDP_PASS (0) +#define SXE2_XDP_CONSUMED BIT(0) +#define SXE2_XDP_TX BIT(1) +#define SXE2_XDP_REDIR BIT(2) + +struct sxe2_indr_block_priv { + struct net_device *netdev; + struct sxe2_netdev_priv *np; + struct list_head list; +}; + +struct sxe2_netdev_priv { + struct sxe2_vsi *vsi; + struct sxe2_vf_repr *repr; + struct list_head tc_indr_block_priv_list; +#ifndef HAVE_TC_FLOW_INDIR_DEV + struct notifier_block netdevice_nb; +#endif +}; + +s32 sxe2_open(struct net_device *netdev); + +s32 sxe2_stop(struct net_device *netdev); + +s32 sxe2_netdev_init(struct sxe2_vsi *vsi); + +void sxe2_netdev_deinit(struct sxe2_vsi *vsi); + +s32 sxe2_netdev_register(struct sxe2_vsi *vsi); + +void sxe2_fetch_u64_data_per_ring(struct u64_stats_sync *syncp, + struct sxe2_queue_stats *stats, u64 *pkts, + u64 *bytes); + +void sxe2_set_vlan_offload_features(struct sxe2_vsi *vsi, + netdev_features_t current_features, + netdev_features_t requested_features); + +s32 sxe2_set_vlan_filter_features(struct sxe2_vsi *vsi, + netdev_features_t features); + +bool netif_is_sxe2(struct net_device *netdev); + +#ifdef HAVE_NETDEV_MIN_MAX_MTU +void sxe2_netdev_mtu_init(struct net_device *netdev); +#endif + +void sxe2_netdev_feature_init(struct net_device *netdev); + +#ifdef HAVE_TC_INDIR_BLOCK +s32 sxe2_tc_indir_block_register(struct sxe2_vsi *vsi); + +void sxe2_tc_indir_block_unregister(struct sxe2_vsi *vsi); +#endif + +bool sxe2_netdev_is(struct net_device *dev); + +s32 sxe2_check_vf_ready_for_cfg(struct sxe2_vf_node *vf); + +#ifdef HAVE_XDP_SUPPORT +s32 sxe2_xmit_xdp_buff(struct xdp_buff *xdp, struct sxe2_queue *xdp_ring); + +s32 sxe2_xmit_xdp_ring(void *data, u16 size, struct sxe2_queue *xdp_ring); + +s32 sxe2_destroy_xdp_rings(struct sxe2_vsi *vsi, bool is_rebuild); + +s32 sxe2_prepare_xdp_rings(struct sxe2_vsi *vsi, struct bpf_prog *prog); + +void sxe2_vsi_xdp_qs_stats_deinit(struct sxe2_vsi *vsi); +#endif + +s32 sxe2_netdev_q_cnt_set(struct net_device *netdev, u16 txq_cnt, u16 rxq_cnt, + bool is_locked); + +s32 sxe2_set_mtu_cfg(struct sxe2_adapter *adapter, u32 set_mtu); + +s32 sxe2_cfg_vf_bw(struct sxe2_adapter *adapter, s32 vf_idx, + s32 min_tx_rate, s32 max_tx_rate); + +s32 sxe2_net_link_down(struct sxe2_adapter *adapter); + +s32 sxe2_link_up(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..68a776986ae9df03ee93599f9333b411559adb5a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.c @@ -0,0 +1,1240 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ptp.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_host_regs.h" +#include "sxe2.h" + +#include "sxe2_ptp.h" + +#include "sxe2_common.h" +#include "sxe2_log.h" +#include "sxe2_rx.h" + +#define SXE2_PTP_MAX_ADJ (100000000) +#define SXE2_MAX_CLKO_VALUE (0x3B9AC9FF) +#define SXE2_PTP_BUSY_MAX_RETRY (10) +#define SXE2_PTP_SLEEP_MAX (10000) +#define SXE2_PTP_SLEEP_MIN (5000) +#define SXE2_PTP_PERIOD_NORMAL (500) +#define SXE2_PTP_PERIOD_UNNORMAL (50) +#define PTP_TIME_OFFSET (13) +#define PTP_TIME_DIV (125) + +#define SXE2_NS_PER_SEC (1000000000ULL) + +#define SXE2_PTP_INGRESS_CORR_NANO (0x47) +#define SXE2_PTP_INGRESS_CORR_SUBNANO (0x0700) +#define SXE2_PTP_INGRESS_CORR (0x80001f07) +#define SXE2_PTP_EGRESS_CORR_NANO (0x11c) +#define SXE2_PTP_EGRESS_CORR_SUBNANO (0x1f00) +#define SXE2_PTP_EGRESS_CORR (0x80001f07) +#define SXE2_PTP_SAMPLE_TYPE_ALL (0xc) +#define SXE2_PTP_SAMPLE_TYPE_ALL_EVENT (0x1) +#define SXE2_PTP_FILTER_TYPE_ALL (3) + +#define SXE2_PTP_WAIT_RETRY_CNT (10) +static void sxe2_ptp_schedule_periodic_work(struct sxe2_ptp_context *ptp, + unsigned long delay); +static void sxe2_ptp_all_perout_restore(struct sxe2_adapter *adapter); +static void sxe2_ptp_all_perout_disable(struct sxe2_adapter *adapter); +static void sxe2_ptp_cancel_periodic_work(struct sxe2_ptp_context *ptp); + +static struct mutex sxe2_ptp_owner_mtx; +static struct sxe2_ptp_owner_list sxe2_ptp_owner_head; + +void sxe2_ptp_owner_init_once(void) +{ + mutex_init(&sxe2_ptp_owner_mtx); + INIT_LIST_HEAD(&sxe2_ptp_owner_head.node); +} + +void sxe2_ptp_owner_deinit_once(void) +{ + mutex_destroy(&sxe2_ptp_owner_mtx); +} + +s32 sxe2_ptp_clock_idx_get(struct sxe2_adapter *adapter) +{ + s32 ret = -1; + struct list_head *tmp; + struct list_head *n; + struct sxe2_ptp_owner_list *entry = NULL; + + mutex_lock(&sxe2_ptp_owner_mtx); + list_for_each_safe(tmp, n, &sxe2_ptp_owner_head.node) { + entry = list_entry(tmp, struct sxe2_ptp_owner_list, node); + if (!entry->owner_adapter) + continue; + + if (memcmp(adapter->serial_num, + entry->owner_adapter->serial_num, SXE2_SERIAL_NUM_LEN) == 0) { + if (entry->owner_adapter->ptp_ctxt.clock) + ret = ptp_clock_index(entry->owner_adapter->ptp_ctxt.clock); + + break; + } + } + mutex_unlock(&sxe2_ptp_owner_mtx); + return ret; +} + +STATIC s32 sxe2_ptp_owner_adapter_add(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_ptp_owner_list *entry; + + mutex_lock(&sxe2_ptp_owner_mtx); + entry = kzalloc(sizeof(struct sxe2_ptp_owner_list), GFP_KERNEL); + if (!entry) { + LOG_ERROR_BDF("alloc lag list node failed.\n"); + ret = -ENOMEM; + goto out; + } + entry->owner_adapter = adapter; + list_add(&entry->node, &sxe2_ptp_owner_head.node); + +out: + mutex_unlock(&sxe2_ptp_owner_mtx); + return ret; +} + +STATIC void sxe2_ptp_owner_adapter_delete(struct sxe2_adapter *adapter) +{ + struct list_head *tmp; + struct list_head *n; + struct sxe2_ptp_owner_list *entry = NULL; + + mutex_lock(&sxe2_ptp_owner_mtx); + + list_for_each_safe(tmp, n, &sxe2_ptp_owner_head.node) { + entry = list_entry(tmp, struct sxe2_ptp_owner_list, node); + if (entry->owner_adapter == adapter) { + list_del(&entry->node); + break; + } + + entry = NULL; + } + + kfree(entry); + + mutex_unlock(&sxe2_ptp_owner_mtx); +} + +static inline struct sxe2_vsi *sxe2_get_main_vsi(struct sxe2_adapter *adapter) +{ + return adapter->vsi_ctxt.main_vsi; +} + +static inline struct sxe2_adapter * +sxe2_ptp_to_adapter(struct ptp_clock_info *clock_info) +{ + struct sxe2_ptp_context *ptpinfo; + + ptpinfo = container_of(clock_info, struct sxe2_ptp_context, info); + (void)ptpinfo; + + return container_of(ptpinfo, struct sxe2_adapter, ptp_ctxt); +} + +#ifdef SXE2_CFG_DEBUG +static void timespec64_to_localtime(const struct timespec64 *time, + struct rtc_time *tm) +{ + time64_t local_time; + + local_time = time->tv_sec - (sys_tz.tz_minuteswest * 60); + rtc_time64_to_tm(local_time, tm); + tm->tm_mon += 1; + tm->tm_year += 1900; +} +#endif + +static void dump_timespec64(struct sxe2_adapter *adapter, + const struct timespec64 *ts) +{ +#ifdef SXE2_CFG_DEBUG + struct rtc_time tm; + time64_t sec = ts->tv_sec; + time64_t nsec = ts->tv_nsec; + + timespec64_to_localtime(ts, &tm); + LOG_DEBUG_BDF("Time: %lld:%06lld\n", sec, nsec); + LOG_DEBUG_BDF("Readable Time:(%04d-%02d-%02d %02d:%02d:%02d.%ld)\n", + tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, + tm.tm_sec, ts->tv_nsec); +#endif +} + +static bool sxe2_ptp_sem_acquire(struct sxe2_adapter *adapter) +{ + int cnt = 0; + bool value = false; + + for (cnt = 0; cnt < SXE2_PTP_BUSY_MAX_RETRY; cnt++) { + value = sxe2_hw_ptp_acquire_1588_lock(&adapter->hw); + if (value) + break; + + (void)usleep_range(SXE2_PTP_SLEEP_MIN, SXE2_PTP_SLEEP_MAX); + } + + return value; +} + +static void sxe2_ptp_sem_release(struct sxe2_adapter *adapter) +{ + sxe2_hw_ptp_release_1588_lock(&adapter->hw); +} + +static int sxe2_ptp_primary_timer_set(struct sxe2_adapter *adapter, + struct timespec64 tstamp) +{ + if (!sxe2_ptp_sem_acquire(adapter)) + return -EBUSY; + + sxe2_hw_ptp_1588_timestamp_write(&adapter->hw, (u64)tstamp.tv_sec, + (u32)tstamp.tv_nsec); + sxe2_ptp_sem_release(adapter); + + return 0; +} + +static int sxe2_ptp_primary_timer_adjust(struct sxe2_adapter *adapter, s64 adj) +{ + u32 adj_nano; + bool neg = false; + + if (adj < 0) { + neg = true; + adj_nano = (u32)(-adj); + } else { + adj_nano = (u32)(adj); + } + + if (!sxe2_ptp_sem_acquire(adapter)) + return -EBUSY; + + sxe2_hw_ptp_1588_timestamp_adjust(&adapter->hw, adj_nano, neg); + sxe2_ptp_sem_release(adapter); + + return 0; +} + +bool sxe2_ptp_primary_timer_read(struct sxe2_adapter *adapter, + struct timespec64 *hwts) +{ + u64 ns; + u64 second; + + if (!sxe2_ptp_sem_acquire(adapter)) { + LOG_ERROR_BDF("Failed to get ptp sem.\n"); + return false; + } + + sxe2_hw_ptp_1588_timestamp_read(&adapter->hw, &second, &ns); + sxe2_ptp_sem_release(adapter); + hwts->tv_nsec = (time64_t)ns; + hwts->tv_sec = (long)second; + + return true; +} + +bool sxe2_ptp_owned(struct sxe2_adapter *adapter) +{ + return adapter->ptp_ctxt.ptp_owned; +} + +STATIC void sxe2_ptp_txts_enable(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + vsi = sxe2_get_main_vsi(adapter); + if (!vsi) + return; + + adapter->ptp_ctxt.ptp_tx_enable = true; + sxe2_hw_ptp_tsyn_switch(&adapter->hw, true); +} + +STATIC void sxe2_ptp_txts_disable(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + vsi = sxe2_get_main_vsi(adapter); + if (!vsi) + return; + + adapter->ptp_ctxt.ptp_tx_enable = false; + + sxe2_hw_ptp_tsyn_switch(&adapter->hw, false); +} + +STATIC void sxe2_ptp_rxts_enable(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + vsi = sxe2_get_main_vsi(adapter); + if (!vsi) + return; + + adapter->ptp_ctxt.ptp_rx_enable = true; +} + +STATIC void sxe2_ptp_rxts_disable(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + + vsi = sxe2_get_main_vsi(adapter); + if (!vsi) + return; + + adapter->ptp_ctxt.ptp_rx_enable = false; +} + +STATIC void sxe2_ptp_rxts_restore(struct sxe2_adapter *adapter) +{ + if (adapter->ptp_ctxt.tstamp_config.rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) + sxe2_ptp_rxts_enable(adapter); + else + sxe2_ptp_rxts_disable(adapter); +} + +STATIC void sxe2_ptp_txts_restore(struct sxe2_adapter *adapter) +{ + if (adapter->ptp_ctxt.tstamp_config.tx_type == HWTSTAMP_TX_ON) + sxe2_ptp_txts_enable(adapter); + else + sxe2_ptp_txts_disable(adapter); +} + +int sxe2_ptp_hwts_get(struct sxe2_adapter *adapter, struct ifreq *ifr) +{ + if (adapter->ptp_ctxt.status != PTP_READY) + return -EIO; + + return copy_to_user(ifr->ifr_data, &adapter->ptp_ctxt.tstamp_config, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +static int sxe2_ptp_ts_mode_set(struct sxe2_adapter *adapter, + struct hwtstamp_config *config) +{ + if (!config) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_OFF; + sxe2_ptp_txts_disable(adapter); + break; + case HWTSTAMP_TX_ON: + adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_ON; + sxe2_ptp_txts_enable(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + sxe2_ptp_rxts_disable(adapter); + adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: +#ifdef HWTSTAMP_FILTER_NTP_ALL + case HWTSTAMP_FILTER_NTP_ALL: +#endif + case HWTSTAMP_FILTER_ALL: + return -ERANGE; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + sxe2_ptp_rxts_enable(adapter); + adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; + } + + return 0; +} + +int sxe2_ptp_hwts_set(struct sxe2_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (adapter->ptp_ctxt.status != PTP_READY) + return -EAGAIN; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = sxe2_ptp_ts_mode_set(adapter, &config); + if (err) + return err; + + config = adapter->ptp_ctxt.tstamp_config; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; +} + +static int sxe2_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + u32 aux_in_value = 0; + + if (rq->extts.index > SXE2_EXTTS_COUNT) + return -EINVAL; + + if (on) { + sxe2_hw_ptp_tsyn_event_switch(&adapter->hw, true); + aux_in_value = GLTSYN_AUXIN_ENABLE; + if (rq->extts.flags & PTP_RISING_EDGE) + aux_in_value |= GLTSYN_AUXIN_RISING_EDGE; + + if (rq->extts.flags & PTP_FALLING_EDGE) + aux_in_value |= GLTSYN_AUXIN_FALLING_EDGE; + + sxe2_hw_ptp_aux_in_set(&adapter->hw, rq->extts.index, + aux_in_value); + set_bit((int)(rq->extts.index), adapter->ptp_ctxt.extts.chan); + + } else { + sxe2_hw_ptp_aux_in_set(&adapter->hw, rq->extts.index, aux_in_value); + clear_bit((int)(rq->extts.index), adapter->ptp_ctxt.extts.chan); + if (bitmap_empty(adapter->ptp_ctxt.extts.chan, + SXE2_EXTTS_COUNT)) { + sxe2_hw_ptp_tsyn_event_switch(&adapter->hw, false); + } + } + + return 0; +} + +static void sxe2_ptp_all_perout_restore(struct sxe2_adapter *adapter) +{ + u32 index; + u64 period = 0; + u32 value; + + for (index = 0; index < SXE2_PEROUT_COUNT; index++) { + if (adapter->ptp_ctxt.perout[index].on) { + value = sxe2_hw_ptp_auxout_get(&adapter->hw, index); + value |= GLTSYN_AUXOUT_OUT_ENA; + value |= GLTSYN_AUXOUT_INT_ENA; + sxe2_hw_ptp_auxout_set(&adapter->hw, index, value); + period = (u64)((adapter->ptp_ctxt.perout[index].start.tv_sec * + NSEC_PER_SEC) + + adapter->ptp_ctxt.perout[index].start.tv_nsec); + period >>= 1; + + sxe2_hw_ptp_1588_clockout_write( + &adapter->hw, index, period, + (u64)adapter->ptp_ctxt.perout[index].start.tv_sec, + (u64)adapter->ptp_ctxt.perout[index].start.tv_nsec); + + } else { + value = sxe2_hw_ptp_auxout_get(&adapter->hw, index); + value &= ~GLTSYN_AUXOUT_OUT_ENA; + value &= ~GLTSYN_AUXOUT_INT_ENA; + sxe2_hw_ptp_auxout_set(&adapter->hw, index, value); + sxe2_hw_ptp_1588_clockout_write(&adapter->hw, index, 0, 0, 0); + } + } +} + +static void sxe2_ptp_all_perout_disable(struct sxe2_adapter *adapter) +{ + u32 index; + u32 value; + + for (index = 0; index < SXE2_PEROUT_COUNT; index++) { + value = sxe2_hw_ptp_auxout_get(&adapter->hw, index); + value &= ~GLTSYN_AUXOUT_OUT_ENA; + value &= ~GLTSYN_AUXOUT_INT_ENA; + sxe2_hw_ptp_auxout_set(&adapter->hw, index, value); + sxe2_hw_ptp_1588_clockout_write(&adapter->hw, index, 0, 0, 0); + } +} + +static int sxe2_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + u32 value; + u64 period = 0; + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + u32 index = rq->perout.index; + + period = (u64)((rq->perout.period.sec * NSEC_PER_SEC) + + rq->perout.period.nsec); + if (period & 0x1) { + LOG_DEV_ERR("CLKO period is illegal\n"); + return -EIO; + } + + period >>= 1; + if (period > SXE2_MAX_CLKO_VALUE) { + LOG_DEV_ERR("CLKO period is illegal\n"); + return -EIO; + } + + value = sxe2_hw_ptp_auxout_get(&adapter->hw, index); + if (on && period) { + adapter->ptp_ctxt.perout[index].period.tv_sec = + rq->perout.period.sec; + adapter->ptp_ctxt.perout[index].period.tv_nsec = + rq->perout.period.nsec; + adapter->ptp_ctxt.perout[index].start.tv_sec = + rq->perout.start.sec; + adapter->ptp_ctxt.perout[index].start.tv_nsec = + rq->perout.start.nsec; + adapter->ptp_ctxt.perout[index].on = true; + value |= GLTSYN_AUXOUT_OUT_ENA; + value |= GLTSYN_AUXOUT_INT_ENA; + sxe2_hw_ptp_auxout_set(&adapter->hw, index, value); + } else { + adapter->ptp_ctxt.perout[index].period.tv_sec = 0; + adapter->ptp_ctxt.perout[index].period.tv_nsec = 0; + adapter->ptp_ctxt.perout[index].start.tv_sec = 0; + adapter->ptp_ctxt.perout[index].start.tv_nsec = 0; + adapter->ptp_ctxt.perout[index].on = false; + value &= ~GLTSYN_AUXOUT_OUT_ENA; + value &= ~GLTSYN_AUXOUT_INT_ENA; + sxe2_hw_ptp_auxout_set(&adapter->hw, index, value); + } + + sxe2_hw_ptp_1588_clockout_write(&adapter->hw, index, period, + (u64)adapter->ptp_ctxt.perout[index].start.tv_sec, + (u64)adapter->ptp_ctxt.perout[index].start.tv_nsec); + + return 0; +} + +int sxe2_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on) +{ + int ret; + + switch (request->type) { + case PTP_CLK_REQ_EXTTS: + ret = sxe2_extts_configure(ptp, request, !!on); + break; + case PTP_CLK_REQ_PEROUT: + ret = sxe2_perout_configure(ptp, request, !!on); + break; + case PTP_CLK_REQ_PPS: + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +STATIC void sxe2_ptp_update_timespec(struct sxe2_adapter *adapter, + const struct timespec64 *ts) +{ + unsigned long flags = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) { + LOG_ERROR_BDF("vsi is disable, do not use queues' context.\n"); + goto l_unlock; + } + + spin_lock_irqsave(&adapter->ptp_ctxt.cached_ts_lock, flags); + adapter->ptp_ctxt.cached_phc_time = *ts; + spin_unlock_irqrestore(&adapter->ptp_ctxt.cached_ts_lock, flags); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +int sxe2_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + int ret = 0; + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + ptp_read_system_prets(sts); + + if (!sxe2_ptp_primary_timer_read(adapter, ts)) { + LOG_ERROR_BDF("failed to read 1588 timer.\n"); + ret = -EIO; + } + + ptp_read_system_postts(sts); + return ret; +} + +static int sxe2_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + return sxe2_ptp_gettimex64(info, ts, NULL); +} + +int sxe2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + struct timespec64 now, then; + int err; + + if (delta > SXE2_MAX_CLKO_VALUE || delta < S32_MIN) { + then = ns_to_timespec64(delta); + err = sxe2_ptp_gettimex64(ptp, &now, NULL); + if (err) { + LOG_ERROR_BDF("Failed to get current phc time.\n"); + return err; + } + + now = timespec64_add(now, then); + return sxe2_ptp_settime(ptp, (const struct timespec64 *)&now); + } + + sxe2_ptp_all_perout_disable(adapter); + (void)sxe2_ptp_primary_timer_adjust(adapter, delta); + sxe2_ptp_all_perout_restore(adapter); + + (void)sxe2_ptp_gettimex64(ptp, &now, NULL); + sxe2_ptp_update_timespec(adapter, &now); + + return 0; +} + +int sxe2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + u64 incval = SXE2_PTP_NOMINAL_INCVAL_TUCANA; + bool neg_adj = false; + u64 diff; + + if (scaled_ppm < 0) { + neg_adj = true; + scaled_ppm = -scaled_ppm; + } + + diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm, 1000000ULL << 16); + if (neg_adj) + incval -= diff; + else + incval += diff; + + LOG_DEBUG_BDF("calculate incval is %llu[0x%llx]\n", incval, incval); + if (!sxe2_ptp_sem_acquire(adapter)) + return -EBUSY; + + sxe2_hw_ptp_init_incval(&adapter->hw, incval); + sxe2_ptp_sem_release(adapter); + + return 0; +} + +#ifndef HAVE_PTP_CLOCK_INFO_ADJFINE +static int sxe2_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) +{ + long scaled_ppm; + + scaled_ppm = ((long)ppb << PTP_TIME_OFFSET) / PTP_TIME_DIV; + return sxe2_ptp_adjfine(info, scaled_ppm); +} +#endif + +int sxe2_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) +{ + int err; + struct sxe2_adapter *adapter = sxe2_ptp_to_adapter(ptp); + + sxe2_ptp_all_perout_disable(adapter); + + err = sxe2_ptp_primary_timer_set(adapter, *ts); + if (!err) { + dump_timespec64(adapter, ts); + sxe2_ptp_update_timespec(adapter, ts); + } else { + LOG_ERROR_BDF("failed to set time.\n"); + } + + sxe2_ptp_all_perout_restore(adapter); + + return 0; +} + +static u64 sxe2_ptp_32bit_to_realtime(u64 cached_time, u64 rawtstamp) +{ + u64 realtime; + u32 tstamp_ns = (u32)(rawtstamp); + u32 cached_ns = cached_time % SXE2_NS_PER_SEC; + + if (tstamp_ns > cached_ns) { + realtime = cached_time / SXE2_NS_PER_SEC * SXE2_NS_PER_SEC + tstamp_ns; + } else { + realtime = cached_time / SXE2_NS_PER_SEC * SXE2_NS_PER_SEC + SXE2_NS_PER_SEC + + tstamp_ns; + } + + return realtime; +} + +#define SXE2_RX_WB_RXDID_MASK (0x7) +#define SXE2_RX_DESC_TYPE_1588 (0x2) +void sxe2_ptp_rxts_request(struct sxe2_queue *rxq, + union sxe2_rx_desc_1588 *desc, struct sk_buff *skb) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + struct skb_shared_hwtstamps *hwtstamps; + u64 ts; + u64 raw_cached_ns; + u32 rx_ns; + u32 rx_subns; + u8 rxdid; + u32 valid; + struct timespec64 cached_timespec64; + unsigned long flags = 0; + + if (!adapter->ptp_ctxt.ptp_rx_enable) + return; + + rxdid = desc->wb.rxdid_src_fd_eudpe & SXE2_RX_WB_RXDID_MASK; + if (rxdid != SXE2_RX_DESC_TYPE_1588) + return; + + rx_ns = le32_to_cpu(desc->wb.ts_h); + rx_subns = le32_to_cpu(desc->wb.ts_l); + valid = rx_subns & BIT(0); + spin_lock_irqsave(&adapter->ptp_ctxt.cached_ts_lock, flags); + cached_timespec64 = adapter->ptp_ctxt.cached_phc_time; + spin_unlock_irqrestore(&adapter->ptp_ctxt.cached_ts_lock, flags); + raw_cached_ns = (u64)timespec64_to_ns(&cached_timespec64); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + dump_timespec64(adapter, &cached_timespec64); + LOG_DEBUG_BDF("Rx timestamp is 0x%x.0x%x,valid:%d,Cached time:%llu\n", + rx_ns, rx_subns, valid, raw_cached_ns); + } +#endif + + ts = sxe2_ptp_32bit_to_realtime(raw_cached_ns, rx_ns); + if (!ts) { + LOG_ERROR_BDF("failed to get rx timestamp.\n"); + return; + } + + hwtstamps = skb_hwtstamps(skb); + (void)memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ts); +} + +s32 sxe2_ptp_txts_request(struct sxe2_ptp_tx *tx, struct sk_buff *skb) +{ + u32 index; + unsigned long flags; + + spin_lock_irqsave(&tx->ptp_lock, flags); + + index = (u32)find_first_zero_bit(tx->in_use, SXE2_INDEX_PER_PORT); + if (index < SXE2_INDEX_PER_PORT) { + set_bit((int)index, tx->in_use); + tx->descs[index].start = jiffies; + tx->descs[index].skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } else { + spin_unlock_irqrestore(&tx->ptp_lock, flags); + return -EINVAL; + } + + spin_unlock_irqrestore(&tx->ptp_lock, flags); + + return (s32)index; +} + +static s32 sxe2_ptp_tx_timestamp_get(struct sxe2_adapter *adapter, u32 index, + u64 *ns) +{ + u64 tx_mem_ns; + u64 cached_ns; + bool ret; + struct timespec64 cached_timespec64; + unsigned long flags = 0; + + ret = sxe2_hw_ptp_tx_tstamp_read(&adapter->hw, adapter->port_idx, index, &tx_mem_ns); + if (!ret) { + LOG_ERROR_BDF("Failed to read tx timestamp.\n"); + return -EINVAL; + } + + spin_lock_irqsave(&adapter->ptp_ctxt.cached_ts_lock, flags); + cached_timespec64 = adapter->ptp_ctxt.cached_phc_time; + spin_unlock_irqrestore(&adapter->ptp_ctxt.cached_ts_lock, flags); + + cached_ns = (u64)timespec64_to_ns(&cached_timespec64); + *ns = sxe2_ptp_32bit_to_realtime(cached_ns, tx_mem_ns); +#ifdef SXE2_CFG_DEBUG + LOG_DEBUG_BDF("Tx Get ts:cached:%lld.%ld ,total ns:%lld,read:%lld\n", + cached_timespec64.tv_sec, + cached_timespec64.tv_nsec, cached_ns, + tx_mem_ns); +#endif + + return 0; +} + +static struct sk_buff *sxe2_ptp_tstamp_skb_get(struct sxe2_adapter *adapter, u64 index) +{ + struct sk_buff *skb = NULL; + struct sxe2_ptp_tx *ptp_tx = &adapter->ptp_ctxt.tx; + unsigned long flags; + + spin_lock_irqsave(&ptp_tx->ptp_lock, flags); + clear_bit((int)index, ptp_tx->in_use); + skb = ptp_tx->descs[index].skb; + ptp_tx->descs[index].skb = NULL; + sxe2_hw_ptp_tx_tstamp_discard(&adapter->hw, adapter->port_idx, (u32)index); + spin_unlock_irqrestore(&ptp_tx->ptp_lock, flags); + + return skb; +} + +static void sxe2_ptp_tx_tstamp_clean(struct sxe2_adapter *adapter) +{ + u64 index; + u64 ns; + s32 ret; + struct skb_shared_hwtstamps shhwtstamps; + struct sxe2_ptp_tx *ptp_tx = &adapter->ptp_ctxt.tx; + struct sk_buff *skb; + + for_each_set_bit(index, ptp_tx->in_use, SXE2_INDEX_PER_PORT) { + if (time_is_before_jiffies((unsigned long)ptp_tx->descs[index].start + 2 * HZ)) { + LOG_ERROR_BDF("tx timestamp request timeout\n"); + skb = sxe2_ptp_tstamp_skb_get(adapter, index); + if (skb) + dev_kfree_skb_any(skb); + + continue; + } + + ret = sxe2_ptp_tx_timestamp_get(adapter, (u32)index, &ns); + if (ret) { + LOG_ERROR_BDF("failed to get tx timestamp\n"); + continue; + } + + skb = sxe2_ptp_tstamp_skb_get(adapter, index); + if (!skb) { + LOG_DEBUG_BDF("NULL skb in tx work handler:%llu\n", index); + continue; + } + + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + } +} + +void sxe2_ptp_txts_process(struct sxe2_adapter *adapter) +{ + if (adapter->ptp_ctxt.status != PTP_READY) + return; + + sxe2_ptp_tx_tstamp_clean(adapter); +} + +static void sxe2_ptp_schedule_periodic_work(struct sxe2_ptp_context *ptp, + unsigned long delay) +{ + (void)kthread_queue_delayed_work(ptp->kworker, &ptp->period_work, delay); +} + +static void sxe2_ptp_cancel_periodic_work(struct sxe2_ptp_context *ptp) +{ + (void)kthread_cancel_delayed_work_sync(&ptp->period_work); +} + +void sxe2_ptp_extts_intr(struct sxe2_adapter *adapter) +{ + struct sxe2_ptp_extts_context *extts = &adapter->ptp_ctxt.extts; + u32 index; + struct ptp_clock_event event; + struct timespec64 ts; + + for (index = 0; index < SXE2_EXTTS_COUNT; index++) { + if (test_bit((int)index, extts->chan)) { + if (!test_bit((int)index, extts->irq)) + continue; + + ts.tv_sec = (time64_t)sxe2_hw_ptp_get_event_second(&adapter->hw, index); + ts.tv_nsec = + (long)sxe2_hw_ptp_get_event_nanosecond(&adapter->hw, index); + event.timestamp = (u64)timespec64_to_ns(&ts); + event.type = PTP_CLOCK_EXTTS; + event.index = (int)index; + ptp_clock_event(adapter->ptp_ctxt.clock, &event); + clear_bit((int)index, extts->irq); + } + } +} + +static bool sxe2_ptp_enabled(struct sxe2_adapter *adapter) +{ + bool ret = false; + + if (!sxe2_ptp_sem_acquire(adapter)) + goto error; + + ret = sxe2_hw_ptp_main_is_enabled(&adapter->hw); + sxe2_ptp_sem_release(adapter); + +error: + return ret; +} +static void sxe2_ptp_period_work(struct kthread_work *work) +{ + struct sxe2_adapter *adapter = container_of(work, struct sxe2_adapter, + ptp_ctxt.period_work.work); + u32 period; + struct timespec64 ts; + + if (sxe2_ptp_enabled(adapter) == false) { + period = SXE2_PTP_PERIOD_NORMAL; + goto out; + } + + if (sxe2_ptp_primary_timer_read(adapter, &ts)) { + period = SXE2_PTP_PERIOD_NORMAL; + sxe2_ptp_update_timespec(adapter, &ts); + + adapter->ptp_ctxt.last_is_failed = false; + } else { + if (!adapter->ptp_ctxt.last_is_failed) + LOG_ERROR_BDF("failed to read 1588 timer.\n"); + + adapter->ptp_ctxt.last_is_failed = true; + + period = SXE2_PTP_PERIOD_UNNORMAL; + } + +out: + sxe2_ptp_schedule_periodic_work(&adapter->ptp_ctxt, + msecs_to_jiffies(period)); +} + +static s32 sxe2_fwc_ptp_mac_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_ptp_init_req req = { 0 }; + + req.corr.ingress_corr_nanosec = SXE2_PTP_INGRESS_CORR_NANO; + req.corr.ingress_corr_subnanosec = SXE2_PTP_INGRESS_CORR_SUBNANO; + req.corr.ingress_sync_corr = SXE2_PTP_INGRESS_CORR; + req.corr.egress_corr_nanosec = SXE2_PTP_EGRESS_CORR_NANO; + req.corr.egress_corr_subnanosec = SXE2_PTP_EGRESS_CORR_SUBNANO; + req.corr.egress_sync_corr = SXE2_PTP_EGRESS_CORR; + req.filter_addr.filter_type = SXE2_PTP_FILTER_TYPE_ALL; + req.sample_type = SXE2_PTP_SAMPLE_TYPE_ALL; + req.threshold = 0; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_PTP_INIT, &req, sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("ptp fw init fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 sxe2_ptp_clock_create(struct sxe2_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp_ctxt.info; + struct device *dev = (&((adapter)->pdev->dev)); + if (adapter->ptp_ctxt.clock) + return 0; + + (void)snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", + dev_driver_string(dev), dev_name(dev)); + info->owner = THIS_MODULE; + info->max_adj = SXE2_PTP_MAX_ADJ; + info->adjtime = sxe2_ptp_adjtime; +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE + info->adjfine = sxe2_ptp_adjfine; +#else + info->adjfreq = sxe2_ptp_adjfreq; +#endif +#ifdef HAVE_PTP_CLOCK_INFO_GETTIMEX64 + info->gettimex64 = sxe2_ptp_gettimex64; +#endif + info->gettime64 = sxe2_ptp_gettime; + + info->settime64 = sxe2_ptp_settime; + info->enable = sxe2_ptp_enable; + info->n_per_out = SXE2_PEROUT_COUNT; + info->n_ext_ts = SXE2_EXTTS_COUNT; + + adapter->ptp_ctxt.clock = ptp_clock_register(info, dev); + if (IS_ERR(adapter->ptp_ctxt.clock)) { + LOG_ERROR_BDF("failed to create ptp clock,err:%ld.\n", + PTR_ERR(adapter->ptp_ctxt.clock)); + return (s32)PTR_ERR(adapter->ptp_ctxt.clock); + } + return 0; +} + +STATIC s32 sxe2_fwc_ptp_sem_clean(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_PTP_SEM_CLEAN, NULL, 0, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("ptp sem clean fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static int sxe2_ptp_1588_init(struct sxe2_adapter *adapter) +{ + int err = 0; + struct timespec64 ts; + + err = sxe2_fwc_ptp_sem_clean(adapter); + if (err) { + LOG_ERROR_BDF("failed to release ptp lock force, err:%d.\n", err); + goto error; + }; + + if (!sxe2_ptp_sem_acquire(adapter)) { + err = -EBUSY; + goto error; + } + + sxe2_hw_ptp_main_enable(&adapter->hw); + + ts = ktime_to_timespec64(ktime_get_real()); + sxe2_hw_ptp_init_incval(&adapter->hw, SXE2_PTP_NOMINAL_INCVAL_TUCANA); + sxe2_hw_ptp_1588_timestamp_write(&adapter->hw, (u64)ts.tv_sec, + (u32)ts.tv_nsec); + sxe2_ptp_sem_release(adapter); + +error: + return err; +} + +int sxe2_ptp_init(struct sxe2_adapter *adapter) +{ + struct kthread_worker *kworker; + int ret = 0; + struct device *dev = &((adapter)->pdev->dev); + + adapter->ptp_ctxt.status = PTP_UNINITIALIZED; + + if (sxe2_ptp_owned(adapter)) { + LOG_DEBUG_BDF("current pf[%d] owns the 1588 timer\n", + adapter->pf_idx); + ret = sxe2_ptp_1588_init(adapter); + if (ret) { + LOG_ERROR_BDF("failed to create ptp clock\n"); + goto clock_err; + } + + ret = sxe2_ptp_clock_create(adapter); + if (ret) { + LOG_ERROR_BDF("failed to create ptp clock\n"); + ret = -ENOMEM; + goto clock_err; + } + + ret = sxe2_ptp_owner_adapter_add(adapter); + if (ret) { + LOG_ERROR_BDF("failed to add ptp owner adapter\n"); + goto set_owner_err; + } + } + + ret = sxe2_fwc_ptp_mac_init(adapter); + if (ret) { + LOG_DEV_ERR("failed to initial ptp.%d\n", ret); + goto set_owner_err; + } + + bitmap_zero(adapter->ptp_ctxt.tx.in_use, SXE2_INDEX_PER_PORT); + spin_lock_init(&adapter->ptp_ctxt.tx.ptp_lock); + spin_lock_init(&adapter->ptp_ctxt.cached_ts_lock); + kthread_init_delayed_work(&adapter->ptp_ctxt.period_work, + sxe2_ptp_period_work); + + kworker = kthread_create_worker(0, "sxe2-ptp%d-%s", + adapter->pf_idx, dev_name(dev)); + if (IS_ERR(kworker)) { + LOG_DEV_ERR("failed to create ptp worker\n"); + ret = (int)PTR_ERR(kworker); + goto init_mac_err; + } + + adapter->ptp_ctxt.kworker = kworker; + sxe2_ptp_schedule_periodic_work(&adapter->ptp_ctxt, 0); + + sxe2_ptp_txts_disable(adapter); + sxe2_ptp_rxts_disable(adapter); + adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_OFF; + adapter->ptp_ctxt.last_is_failed = false; + adapter->ptp_ctxt.status = PTP_READY; + + LOG_DEBUG_BDF("Ptp init success!,pfid:%d\n", adapter->pf_idx); + + return 0; + +init_mac_err: + if (sxe2_ptp_owned(adapter)) + sxe2_ptp_owner_adapter_delete(adapter); + +set_owner_err: + if (adapter->ptp_ctxt.clock) { + (void)ptp_clock_unregister(adapter->ptp_ctxt.clock); + adapter->ptp_ctxt.clock = NULL; + } + +clock_err: + adapter->ptp_ctxt.status = PTP_ERROR; + + return ret; +} + +void sxe2_ptp_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_ptp_tx *tx = &adapter->ptp_ctxt.tx; + struct sk_buff *skb; + u64 index; + + if (!adapter->ptp_ctxt.kworker) { + LOG_ERROR_BDF("pf %d kworker is null\n", adapter->pf_idx); + return; + } + + adapter->ptp_ctxt.status = PTP_UNINITIALIZED; + + if (sxe2_ptp_owned(adapter)) { + LOG_DEBUG_BDF("current pf[%d] owns the 1588 timer\n", + adapter->pf_idx); + sxe2_ptp_owner_adapter_delete(adapter); + if (adapter->ptp_ctxt.clock) { + (void)ptp_clock_unregister(adapter->ptp_ctxt.clock); + adapter->ptp_ctxt.clock = NULL; + } + } + + for_each_set_bit(index, tx->in_use, SXE2_INDEX_PER_PORT) { + skb = sxe2_ptp_tstamp_skb_get(adapter, index); + if (skb) { + dev_kfree_skb_any(skb); + skb = NULL; + } + } + + sxe2_ptp_cancel_periodic_work(&adapter->ptp_ctxt); + + kthread_destroy_worker(adapter->ptp_ctxt.kworker); + + sxe2_ptp_txts_disable(adapter); + sxe2_ptp_rxts_disable(adapter); + adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_OFF; +} + +void sxe2_ptp_stop(struct sxe2_adapter *adapter) +{ + u64 index; + struct sxe2_ptp_tx *tx = &adapter->ptp_ctxt.tx; + struct sxe2_ptp_context *ptp = &adapter->ptp_ctxt; + struct sk_buff *skb; + + if (ptp->status != PTP_READY) + return; + + ptp->status = PTP_RESETTING; + + for_each_set_bit(index, tx->in_use, SXE2_INDEX_PER_PORT) { + skb = sxe2_ptp_tstamp_skb_get(adapter, index); + if (skb) { + dev_kfree_skb_any(skb); + skb = NULL; + } + } + + sxe2_ptp_txts_disable(adapter); + sxe2_ptp_rxts_disable(adapter); + sxe2_ptp_all_perout_disable(adapter); + + sxe2_ptp_cancel_periodic_work(ptp); +} + +int sxe2_ptp_rebuild(struct sxe2_adapter *adapter) +{ + s32 err = 0; + struct timespec64 ts; + struct sxe2_ptp_context *ptp = &adapter->ptp_ctxt; + + if (!adapter->ptp_ctxt.kworker) { + LOG_ERROR_BDF("pf %d kworker is null\n", + adapter->pf_idx); + return -EINVAL; + } + + if (sxe2_ptp_owned(adapter)) { + if (!sxe2_ptp_sem_acquire(adapter)) { + LOG_ERROR_BDF("failed to get ptp lock\n"); + err = -EINVAL; + goto error; + } + sxe2_hw_ptp_main_enable(&adapter->hw); + sxe2_hw_ptp_init_incval(&adapter->hw, + SXE2_PTP_NOMINAL_INCVAL_TUCANA); + sxe2_ptp_sem_release(adapter); + ts = ktime_to_timespec64(ktime_get_real()); + if (sxe2_ptp_primary_timer_set(adapter, ts)) { + LOG_ERROR_BDF("failed to re init 1588 time\n"); + err = -EINVAL; + goto error; + } + } + + sxe2_ptp_all_perout_restore(adapter); + sxe2_ptp_schedule_periodic_work(ptp, 0); + sxe2_ptp_txts_restore(adapter); + sxe2_ptp_rxts_restore(adapter); + + ptp->status = PTP_READY; + LOG_DEBUG_BDF("rebuild ptp ok\n"); + return 0; + +error: + ptp->status = PTP_ERROR; + return err; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.h new file mode 100644 index 0000000000000000000000000000000000000000..c3ff98ef4b626092d5c93669c9359219febcd20a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_ptp.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ptp.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_PTP_H__ +#define __SXE2_PTP_H__ + +#include +#include +#include +#include +#include + +#define SXE2_EXTTS_COUNT 3 +#define SXE2_PEROUT_COUNT 4 +#define SXE2_INDEX_PER_PORT 64 +#define SXE2_PTP_NOMINAL_INCVAL_TUCANA (0x1B4E89B4EULL) + +struct sxe2_adapter; +struct sxe2_queue; +union sxe2_rx_desc_1588; + +enum sxe2_ptp_status { + PTP_UNINITIALIZED = 0, + PTP_ERROR, + PTP_READY, + PTP_RESETTING +}; + +struct sxe2_ptp_perout_context { + bool on; + int gpio; + struct timespec64 period; + struct timespec64 start; +}; + +struct sxe2_ptp_extts_context { + DECLARE_BITMAP(chan, SXE2_EXTTS_COUNT); + DECLARE_BITMAP(irq, SXE2_EXTTS_COUNT); +}; + +struct sxe2_ptp_descriptor { + struct sk_buff *skb; + u64 start; +}; + +struct sxe2_ptp_tx { + spinlock_t ptp_lock; + struct sxe2_ptp_descriptor descs[SXE2_INDEX_PER_PORT]; + DECLARE_BITMAP(in_use, SXE2_INDEX_PER_PORT); +}; + +struct sxe2_ptp_context { + enum sxe2_ptp_status status; + bool ptp_owned; + bool ptp_tx_enable; + bool ptp_rx_enable; + + struct kthread_delayed_work period_work; + struct kthread_worker *kworker; + spinlock_t cached_ts_lock; + struct timespec64 cached_phc_time; + bool last_is_failed; + + struct sxe2_ptp_tx tx; + + struct sxe2_ptp_extts_context extts; + + struct sxe2_ptp_perout_context perout[SXE2_PEROUT_COUNT]; + + struct ptp_clock_info info; + struct ptp_clock *clock; + struct hwtstamp_config tstamp_config; +}; + +struct sxe2_ptp_owner_list { + struct list_head node; + struct sxe2_adapter *owner_adapter; +}; + +int sxe2_ptp_init(struct sxe2_adapter *adapter); + +void sxe2_ptp_deinit(struct sxe2_adapter *adapter); + +void sxe2_ptp_rxts_request(struct sxe2_queue *rxq, + union sxe2_rx_desc_1588 *desc, struct sk_buff *skb); + +s32 sxe2_ptp_txts_request(struct sxe2_ptp_tx *tx, struct sk_buff *skb); + +void sxe2_ptp_txts_process(struct sxe2_adapter *adapter); + +int sxe2_ptp_hwts_get(struct sxe2_adapter *adapter, struct ifreq *ifr); + +int sxe2_ptp_hwts_set(struct sxe2_adapter *adapter, struct ifreq *ifr); + +void sxe2_ptp_stop(struct sxe2_adapter *adapter); + +int sxe2_ptp_rebuild(struct sxe2_adapter *adapter); + +int sxe2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta); + +int sxe2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); + +int sxe2_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts); + +int sxe2_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts); + +int sxe2_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); +bool sxe2_ptp_owned(struct sxe2_adapter *adapter); + +bool sxe2_ptp_primary_timer_read(struct sxe2_adapter *adapter, + struct timespec64 *hwts); +void sxe2_ptp_owner_init_once(void); + +void sxe2_ptp_owner_deinit_once(void); + +s32 sxe2_ptp_clock_idx_get(struct sxe2_adapter *adapter); + +void sxe2_ptp_extts_intr(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.c new file mode 100644 index 0000000000000000000000000000000000000000..c5e0e586e85aa66537b9745f576fab8839008cdd --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_queue.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_common.h" + +STATIC u16 sxe2_rxq_preallocate_num_cal(struct sxe2_adapter *adapter) +{ + u16 rxq_cnt = 0; + struct sxe2_queue_layout *rxq_layout = &adapter->q_ctxt.rxq_layout; + + rxq_cnt += rxq_layout->lb + rxq_layout->ctrl + rxq_layout->lan + + rxq_layout->dpdk; + + return rxq_cnt; +} + +STATIC u16 sxe2_txq_preallocate_num_cal(struct sxe2_adapter *adapter) +{ + u16 txq_cnt = 0; + struct sxe2_queue_layout *txq_layout = &adapter->q_ctxt.txq_layout; + + txq_cnt += txq_layout->lb + txq_layout->ctrl + txq_layout->lan + + txq_layout->dpdk; + + return txq_cnt; +} + +u16 sxe2_usable_txqs_cnt_get(struct sxe2_adapter *adapter) +{ + struct sxe2_queue_layout *q_layout = &adapter->q_ctxt.txq_layout; + u16 size = adapter->q_ctxt.max_txq_cnt; + unsigned long *map = q_layout->txq_map; + struct mutex *lock = &adapter->q_ctxt.lock; + u16 cnt = 0; + unsigned long start_idx = sxe2_txq_preallocate_num_cal(adapter); + unsigned long bit; + + mutex_lock(lock); + for_each_clear_bit_from_base(bit, map, size, start_idx) { + cnt++; + } + mutex_unlock(lock); + + return cnt; +} + +u16 sxe2_usable_rxqs_cnt_get(struct sxe2_adapter *adapter) +{ + struct sxe2_queue_layout *q_layout = &adapter->q_ctxt.rxq_layout; + u16 size = adapter->q_ctxt.max_rxq_cnt; + unsigned long *map = q_layout->rxq_map; + struct mutex *lock = &adapter->q_ctxt.lock; + u16 cnt = 0; + unsigned long start_idx = sxe2_rxq_preallocate_num_cal(adapter); + unsigned long bit; + + mutex_lock(lock); + for_each_clear_bit_from_base(bit, map, size, start_idx) { + cnt++; + } + mutex_unlock(lock); + + return cnt; +} + +STATIC s32 sxe2_qs_find_from_shared(struct sxe2_adapter *adapter, unsigned long *map, + u16 size, u16 start_idx, u16 cnt) +{ + s32 offset; + + offset = (s32)bitmap_find_next_zero_area(map, (s32)size, (s32)start_idx, + (s32)cnt, 0); + if (offset >= (s32)size) { + offset = -ENOMEM; + LOG_ERROR_BDF("get %d qs from map(size %d) failed, ret=%d\n", cnt, + size, offset); + goto l_end; + } + bitmap_set(map, (unsigned int)offset, (unsigned int)cnt); +l_end: + return offset; +} + +STATIC s32 sxe2_vsi_qs_offset_get(struct sxe2_vsi *vsi, + struct sxe2_queue_layout *q_layout, + unsigned long *map, u16 size, u16 cnt, u8 q_type) +{ + s32 offset; + u8 vsi_type = vsi->type; + struct sxe2_adapter *adapter = vsi->adapter; + struct mutex *lock = &adapter->q_ctxt.lock; + u16 start_idx; + u16 base_idx; + + if (q_type == SXE2_DATA_RQ) + start_idx = sxe2_rxq_preallocate_num_cal(adapter); + else + start_idx = sxe2_txq_preallocate_num_cal(adapter); + + if (q_type == SXE2_DATA_XDP_TQ) { + mutex_lock(lock); + offset = sxe2_qs_find_from_shared(adapter, map, size, start_idx, + cnt); + mutex_unlock(lock); + goto l_end; + } + + switch (vsi_type) { + case SXE2_VSI_T_LB: + offset = q_layout->lb_offset; + break; + case SXE2_VSI_T_PF: + offset = q_layout->lan_offset; + break; + case SXE2_VSI_T_CTRL: + offset = q_layout->ctrl_offset; + break; + case SXE2_VSI_T_ESW: + offset = q_layout->esw_offset; + break; + case SXE2_VSI_T_MACVLAN: + mutex_lock(lock); + offset = (s32)sxe2_qs_find_from_shared(adapter, map, size, start_idx, + cnt); + if (offset >= 0) + q_layout->macvlan++; + mutex_unlock(lock); + break; + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + base_idx = (q_type == SXE2_DATA_TQ) ? vsi->txqs.base_idx_in_feature + : vsi->rxqs.base_idx_in_feature; + offset = q_layout->sriov_offset + + adapter->vf_ctxt.q_cnt * (vsi->vf_node->vf_idx) + base_idx; + break; + case SXE2_VSI_T_DPDK_PF: + offset = q_layout->dpdk_offset; + break; + case SXE2_VSI_T_DPDK_ESW: + offset = q_layout->dpdk_esw_offset; + break; + default: + offset = -1; + break; + } + +l_end: + return offset; +} + +s32 sxe2_vsi_queues_get(struct sxe2_vsi *vsi, u8 q_type) +{ + s32 ret = 0; + s32 offset; + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_queue_layout *q_layout = NULL; + unsigned long *map; + u16 size = 0; + struct sxe2_vsi_queues *queues; + u16 cnt = 0; + + switch (q_type) { + case SXE2_DATA_TQ: + size = adapter->q_ctxt.max_txq_cnt; + queues = &vsi->txqs; + cnt = queues->q_alloc; + q_layout = &adapter->q_ctxt.txq_layout; + map = q_layout->txq_map; + break; + case SXE2_DATA_RQ: + size = adapter->q_ctxt.max_rxq_cnt; + queues = &vsi->rxqs; + cnt = queues->q_alloc; + q_layout = &adapter->q_ctxt.rxq_layout; + map = q_layout->rxq_map; + break; + case SXE2_DATA_XDP_TQ: + size = adapter->q_ctxt.max_txq_cnt; + queues = &vsi->xdp_rings; + cnt = queues->q_alloc; + q_layout = &adapter->q_ctxt.txq_layout; + map = q_layout->txq_map; + break; + default: + ret = -ENOMEM; + goto l_end; + } + + offset = sxe2_vsi_qs_offset_get(vsi, q_layout, map, size, cnt, q_type); + if (offset < 0) { + ret = -ENOMEM; + LOG_DEV_ERR("vsi get %d qs failed.\n", cnt); + goto l_end; + } + + for (i = 0; i < cnt; i++) + queues->q[i]->idx_in_pf = (u16)(i + offset); + +l_end: + return ret; +} + +s32 sxe2_vsi_txrx_queues_get(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + if (vsi->rxqs.q_alloc > SXE2_VSI_TXRX_Q_MAX_CNT || + vsi->txqs.q_alloc > SXE2_VSI_TXRX_Q_MAX_CNT) { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_vsi_queues_get(vsi, SXE2_DATA_TQ); + if (ret) { + LOG_DEV_ERR("get txqs %d failed.\n", vsi->txqs.q_alloc); + goto l_error; + } + + ret = sxe2_vsi_queues_get(vsi, SXE2_DATA_RQ); + if (ret) { + LOG_DEV_ERR("get rxqs %d failed.\n", vsi->rxqs.q_alloc); + goto l_error; + } + + return ret; + +l_error: + sxe2_vsi_txrx_queues_put(vsi); +l_end: + return ret; +} + +STATIC void sxe2_vsi_queues_put(struct sxe2_vsi *vsi, u8 q_type) +{ + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_queue_layout *q_layout = NULL; + unsigned long *map = NULL; + struct sxe2_vsi_queues *queues = NULL; + struct mutex *lock = &adapter->q_ctxt.lock; + + switch (q_type) { + case SXE2_DATA_TQ: + queues = &vsi->txqs; + q_layout = &adapter->q_ctxt.txq_layout; + map = q_layout->txq_map; + break; + case SXE2_DATA_RQ: + queues = &vsi->rxqs; + q_layout = &adapter->q_ctxt.rxq_layout; + map = q_layout->rxq_map; + break; + case SXE2_DATA_XDP_TQ: + queues = &vsi->xdp_rings; + q_layout = &adapter->q_ctxt.txq_layout; + map = q_layout->txq_map; + break; + default: + LOG_ERROR_BDF("invalid q_type %d.\n", q_type); + return; + } + + mutex_lock(lock); + for (i = 0; i < queues->q_alloc; i++) { + if (queues->q[i]->idx_in_pf != SXE2_Q_IDX_INVAL) { + clear_bit(queues->q[i]->idx_in_pf, map); + queues->q[i]->idx_in_pf = SXE2_Q_IDX_INVAL; + } + } + + if (vsi->type == SXE2_VSI_T_MACVLAN) + q_layout->macvlan--; + + mutex_unlock(lock); +} + +void sxe2_vsi_txrx_queues_put(struct sxe2_vsi *vsi) +{ + sxe2_vsi_queues_put(vsi, SXE2_DATA_TQ); + + sxe2_vsi_queues_put(vsi, SXE2_DATA_RQ); +} + +STATIC void sxe2_safemode_q_layout_init(struct sxe2_adapter *adapter) +{ + struct sxe2_queue_layout *txq_layout = &adapter->q_ctxt.txq_layout; + struct sxe2_queue_layout *rxq_layout = &adapter->q_ctxt.rxq_layout; + u16 max_txq = adapter->q_ctxt.max_txq_cnt; + u16 max_rxq = adapter->q_ctxt.max_rxq_cnt; + + txq_layout->lb = 0; + txq_layout->lb_offset = 0; + + txq_layout->ctrl = 0; + txq_layout->ctrl_offset = 0; + + txq_layout->lan = 1; + txq_layout->lan_offset = 0; + + txq_layout->xdp = 0; + txq_layout->xdp_offset = txq_layout->lan_offset + txq_layout->lan; + + txq_layout->macvlan = 0; + txq_layout->macvlan_offset = txq_layout->xdp_offset + txq_layout->xdp; + + txq_layout->dpdk_esw = 0; + txq_layout->dpdk_esw_offset = max_txq; + + txq_layout->esw = 0; + txq_layout->esw_offset = max_txq; + + txq_layout->sriov = 0; + txq_layout->sriov_offset = max_txq; + + rxq_layout->lb = 0; + rxq_layout->lb_offset = 0; + + rxq_layout->ctrl = 0; + rxq_layout->ctrl_offset = 0; + + rxq_layout->lan = 1; + rxq_layout->lan_offset = 0; + + rxq_layout->xdp = 0; + rxq_layout->xdp_offset = rxq_layout->lan_offset + rxq_layout->lan; + + rxq_layout->macvlan = 0; + rxq_layout->macvlan_offset = rxq_layout->xdp_offset + rxq_layout->xdp; + + rxq_layout->dpdk_esw = 0; + rxq_layout->dpdk_esw_offset = max_rxq; + + rxq_layout->esw = 0; + rxq_layout->esw_offset = max_rxq; + + rxq_layout->sriov = 0; + rxq_layout->sriov_offset = max_rxq; +} + +STATIC void sxe2_q_layout_init(struct sxe2_adapter *adapter) +{ + struct sxe2_queue_layout *txq_layout = &adapter->q_ctxt.txq_layout; + struct sxe2_queue_layout *rxq_layout = &adapter->q_ctxt.rxq_layout; + u16 max_txq = adapter->q_ctxt.max_txq_cnt; + u16 max_rxq = adapter->q_ctxt.max_rxq_cnt; + + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 local_cpu_cnt = sxe2_local_cpus_cnt_get(dev); + u32 standard_cpu_cnt = sxe2_standardize_cpu_cnt(local_cpu_cnt); + u32 mode = (u32)sxe2_com_mode_get(adapter); + + if (mode == SXE2_COM_MODULE_KERNEL) { + txq_layout->lb = 1; + txq_layout->lb_offset = 0; + + txq_layout->ctrl = 1; + txq_layout->ctrl_offset = txq_layout->lb_offset + txq_layout->lb; + + txq_layout->lan_offset = txq_layout->ctrl_offset + txq_layout->ctrl; + txq_layout->lan = (u16)min_t(u32, standard_cpu_cnt, + (u32)adapter->irq_ctxt.irq_layout.lan); + if ((txq_layout->lan + txq_layout->lan_offset) > max_txq) + txq_layout->lan = (u16)(max_txq - txq_layout->lan_offset); + txq_layout->dpdk_offset = txq_layout->lan_offset + txq_layout->lan; + } else if (mode == SXE2_COM_MODULE_DPDK) { + txq_layout->dpdk = SXE2_DPDK_QUEUE_MAX_CNT; + txq_layout->dpdk_offset = txq_layout->lan_offset + txq_layout->lan; + if ((txq_layout->dpdk + txq_layout->dpdk_offset) > max_txq) + txq_layout->dpdk = (u16)(max_txq - txq_layout->dpdk_offset); + } else { + txq_layout->lb = 1; + txq_layout->lb_offset = 0; + + txq_layout->ctrl = 1; + txq_layout->ctrl_offset = txq_layout->lb_offset + txq_layout->lb; + + txq_layout->lan_offset = txq_layout->ctrl_offset + txq_layout->ctrl; + txq_layout->lan = (u16)min_t(u32, standard_cpu_cnt, + (u32)adapter->irq_ctxt.irq_layout.lan); + if ((txq_layout->lan + txq_layout->lan_offset) > max_txq) + txq_layout->lan = (u16)(max_txq - txq_layout->lan_offset); + + txq_layout->dpdk_offset = txq_layout->lan_offset + txq_layout->lan; + txq_layout->dpdk = SXE2_DPDK_QUEUE_DFLT_CNT; + if ((txq_layout->dpdk + txq_layout->dpdk_offset) > max_txq) + txq_layout->dpdk = (u16)(max_txq - txq_layout->dpdk_offset); + } + + txq_layout->xdp = 0; + txq_layout->xdp_offset = txq_layout->dpdk_offset + txq_layout->dpdk; + + txq_layout->macvlan = 0; + txq_layout->macvlan_offset = txq_layout->xdp_offset + txq_layout->xdp; + + txq_layout->dpdk_esw = 0; + txq_layout->dpdk_esw_offset = max_txq; + + txq_layout->esw = 0; + txq_layout->esw_offset = max_txq; + + txq_layout->sriov = 0; + txq_layout->sriov_offset = max_txq; + + if (mode == SXE2_COM_MODULE_KERNEL) { + rxq_layout->lb = 1; + rxq_layout->lb_offset = 0; + + rxq_layout->ctrl = 1; + rxq_layout->ctrl_offset = rxq_layout->lb_offset + rxq_layout->lb; + + rxq_layout->lan_offset = rxq_layout->ctrl_offset + rxq_layout->ctrl; + rxq_layout->lan = (u16)min_t(u32, standard_cpu_cnt, + (u32)adapter->irq_ctxt.irq_layout.lan); + if ((rxq_layout->lan + rxq_layout->lan_offset) > max_txq) + rxq_layout->lan = (u16)(max_txq - rxq_layout->lan_offset); + rxq_layout->dpdk_offset = rxq_layout->lan_offset + rxq_layout->lan; + } else if (mode == SXE2_COM_MODULE_DPDK) { + rxq_layout->dpdk = SXE2_DPDK_QUEUE_MAX_CNT; + rxq_layout->dpdk_offset = rxq_layout->lan_offset + rxq_layout->lan; + if ((rxq_layout->dpdk + rxq_layout->dpdk_offset) > max_txq) + rxq_layout->dpdk = (u16)(max_txq - rxq_layout->dpdk_offset); + } else { + rxq_layout->lb = 1; + rxq_layout->lb_offset = 0; + + rxq_layout->ctrl = 1; + rxq_layout->ctrl_offset = rxq_layout->lb_offset + rxq_layout->lb; + + rxq_layout->lan_offset = rxq_layout->ctrl_offset + rxq_layout->ctrl; + rxq_layout->lan = (u16)min_t(u32, standard_cpu_cnt, + (u32)adapter->irq_ctxt.irq_layout.lan); + if ((rxq_layout->lan + rxq_layout->lan_offset) > max_txq) + rxq_layout->lan = (u16)(max_txq - rxq_layout->lan_offset); + + rxq_layout->dpdk_offset = rxq_layout->lan_offset + rxq_layout->lan; + rxq_layout->dpdk = SXE2_DPDK_QUEUE_DFLT_CNT; + if ((rxq_layout->dpdk + rxq_layout->dpdk_offset) > max_txq) + rxq_layout->dpdk = (u16)(max_txq - rxq_layout->dpdk_offset); + } + + rxq_layout->xdp = 0; + rxq_layout->xdp_offset = rxq_layout->dpdk_offset + rxq_layout->dpdk; + + rxq_layout->macvlan = 0; + rxq_layout->macvlan_offset = rxq_layout->xdp_offset + rxq_layout->xdp; + + rxq_layout->dpdk_esw = 0; + rxq_layout->dpdk_esw_offset = max_rxq; + + rxq_layout->esw = 0; + rxq_layout->esw_offset = max_rxq; + + rxq_layout->sriov = 0; + rxq_layout->sriov_offset = max_rxq; +} + +void sxe2_queue_init(struct sxe2_adapter *adapter) +{ + if (sxe2_is_safe_mode(adapter)) + sxe2_safemode_q_layout_init(adapter); + else + sxe2_q_layout_init(adapter); +} + +s32 sxe2_dpdk_abs_qid_get(struct sxe2_adapter *adapter, + struct sxe2_q_id_transe *params) +{ + struct sxe2_vsi *vsi; + u16 base_idx; + u16 max_cnt; + s32 ret = 0; + + if (!adapter || !params) { + ret = -EINVAL; + LOG_ERROR_BDF("params invalid.\n"); + return ret; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("dpdk vsi_id:%d vsi null.\n", params->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + if (params->is_tx) { + max_cnt = vsi->txqs.q_cnt; + base_idx = adapter->q_ctxt.txq_base_idx_in_dev; + } else { + max_cnt = vsi->rxqs.q_cnt; + base_idx = adapter->q_ctxt.rxq_base_idx_in_dev; + } + + if (params->q_id >= max_cnt) { + LOG_ERROR_BDF("invalid queue_id:%d vsi queue cnt:% is_tx:%d\n", + params->q_id, max_cnt, params->is_tx); + ret = -EINVAL; + goto l_unlock; + } + + if (params->is_tx) + params->q_id_in_dev = + vsi->txqs.q[params->q_id]->idx_in_pf + base_idx; + else + params->q_id_in_dev = + vsi->rxqs.q[params->q_id]->idx_in_pf + base_idx; + + LOG_INFO_BDF("dpdk vsi_id:%d q_id_in_vsi:%d q_id_in_dev:%d is_tx:%d.\n", + params->vsi_id, params->q_id, params->q_id_in_dev, + params->is_tx); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..b18fc204566b84a30007068be975de2f204036b4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_queue.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_queue.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_QUEUE_H__ +#define __SXE2_QUEUE_H__ + +#include +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif +#endif + +#include +#include + +#include "sxe2_compat.h" + +#define SXE2_DPDK_QUEUE_DFLT_CNT 64 +#define SXE2_DPDK_QUEUE_MAX_CNT 256 + +#define sxe2_for_each_queue(post, head) \ + for ((post) = (head).next; (post); (post) = (post)->next) + +#define sxe2_for_each_queue_safe(post, n, head) \ + for ((post) = (head).next, (n) = (post)->next; \ + (post); \ + (post) = (n), (n) = (post)->next) + +#define for_each_clear_bit_from_base(bit, addr, size, start) \ + for ((bit) = find_next_zero_bit((addr), (size), (start)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) + +struct sxe2_adapter; +struct sxe2_vsi; +struct sxe2_irq_data; +struct sxe2_mqprio_channel; +struct sxe2_ptp_tx; + +#define SXE2_TXRX_Q_MAX_CNT 2048 +#define SXE2_TXRX_Q_MAX_INDEX 2047 + +#define SXE2_VSI_TXRX_Q_MAX_CNT 256 + +#define SXE2_Q_IDX_INVAL 0xffff +#define SXE2_IRQ_IDX_INVAL 0xffff + +#define SXE2_DFLT_NUM_RX_DESC 3072 +#define SXE2_DFLT_NUM_TX_DESC 256 + +#define SXE2_REQ_DESC_MULTIPLE 32 +#define SXE2_MAX_NUM_DESC 8160 +#define SXE2_MIN_NUM_DESC \ + 64 +#define SXE2_DESC_ALIGN_32 32 + +#define SXE2_PAGE_SIZE_4KB 4096 +#define SXE2_PAGE_SIZE_8KB 8192 + +#define SXE2_XDP_MAX_CNT (64) + +#define SXE2_DESC_IDLE(queue) \ +({ \ + const typeof(*(queue)) *__q = (queue); \ + u16 ntc = __q->next_to_clean; \ + u16 ntu = __q->next_to_use; \ + u16 depth = __q->depth; \ + (u16)(((ntc > ntu) ? 0 : depth) + ntc - ntu - 1); \ +}) + +enum sxe2_queue_flags { + SXE2_RXQ_BUILD_SKB_ENABLED = 0, + SXE2_RXQ_LRO_ENABLED, + SXE2_RXQ_CRC_STRIP_DISABLED, + SXE2_TX_FLAGS_Q_XDP, + SXE2_RXQ_SPLIT_ENABLED, +}; + +enum sxe2_data_queue_type { + SXE2_DATA_TQ = 0, + SXE2_DATA_RQ, + SXE2_DATA_XDP_TQ, + SXE2_DATA_QUEUE_MAX, +}; + +static inline bool sxe2_desc_status0_err_test(__le16 desc_status, const u16 bit) +{ + return !!(desc_status & cpu_to_le16(bit)); +} + +struct sxe2_desc_ring { + void *base_addr; + u8 __iomem *tail; + dma_addr_t dma; +}; + +struct sxe2_rx_buf { + union { + struct { + dma_addr_t dma; + struct page *page; + u32 pg_offset; + u16 drv_refcnt; + struct sk_buff *skb; + }; +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef HAVE_XSK_BATCHED_RX_ALLOC + struct { + union { + struct xdp_buff *xdp; + void *addr; + }; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + u64 handle; +#endif + }; +#endif +#endif + }; +}; + +struct sxe2_tx_buf { + void *next_to_watch; + union { + struct sk_buff *skb; + void *raw_buf; + }; + u32 bytecount; + u16 gso_segs; + u32 tx_features; + DEFINE_DMA_UNMAP_LEN(len); + DEFINE_DMA_UNMAP_ADDR(dma); +}; + +enum sxe2_xdp_state_t { + SXE2_TX_XPS_INIT_DONE, + SXE2_TX_NBITS, +}; + +struct sxe2_txq_stats { + u64 tx_restart; + u64 tx_busy; + u64 tx_linearize; + u64 tx_tso_linearize_chk; + u64 tx_vlan_insert; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_dropped; + u64 tx_xmit_more; +}; + +struct sxe2_rxq_xdp_stats { + u64 rx_xdp_drop; + u64 rx_xdp_redirect; + u64 rx_xdp_redirect_fail; + u64 rx_xdp_pkts; + u64 rx_xdp_bytes; + u64 rx_xdp_pass; + u64 rx_xdp_unknown; + u64 rx_xdp_tx_xmit; + u64 rx_xdp_tx_xmit_fail; + + u64 rx_xsk_drop; + u64 rx_xsk_redirect; + u64 rx_xsk_redirect_fail; + u64 rx_xsk_packets; + u64 rx_xsk_bytes; + u64 rx_xsk_pass; + u64 rx_xsk_unknown; + u64 rx_xsk_tx_xmit; + u64 rx_xsk_tx_xmit_fail; +}; + +struct sxe2_rxq_stats { + u64 rx_lro_count; + u64 rx_lro_packets; + u64 rx_non_eop_descs; + u64 rx_page_alloc; + u64 rx_pg_alloc_fail; + u64 rx_buff_alloc_err; + u64 rx_csum_err; + u64 rx_vlan_strip; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_unnecessary_inner; + u64 rx_lro_bytes; + u64 rx_pkts_sw_drop; + + struct sxe2_rxq_xdp_stats xdp_stats; + u64 rx_pa_err; +}; + +struct sxe2_queue_ipsec_stats { + u64 rx_error_invalid_sp; + u64 rx_error_invalid_state; + u64 rx_error_invalid_ptype; + u64 rx_error_decrypt_fail; + u64 rx_offload_success; + u64 tx_error_invalid_sp; + u64 tx_error_invalid_state; + u64 tx_offload_success; +}; + +struct sxe2_queue_stats { + u64 packets; + u64 bytes; + union { + struct sxe2_rxq_stats rx_stats; + struct sxe2_txq_stats tx_stats; + }; + int prev_pkt; + struct sxe2_queue_ipsec_stats ipsec_stats; + struct u64_stats_sync syncp; +}; + +struct sxe2_queue { + struct sxe2_queue *next; + struct device *dev; + struct net_device *netdev; + struct sxe2_irq_data *irq_data; + struct sxe2_desc_ring desc; + union { + struct sxe2_rx_buf *rx_buf; + struct sxe2_tx_buf *tx_buf; + }; + + struct sxe2_vsi *vsi; +#ifdef HAVE_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif + + u16 depth; + u16 idx_in_pf; + u16 idx_in_vsi; + + u16 next_to_use; + u16 next_to_clean; + union { + struct { + u16 rx_offset; + u16 rx_buf_len; + u16 next_to_alloc; + }; + + struct { + u16 next_rs; + u16 next_dd; + u16 q_handle; + }; + }; + + struct sxe2_queue_stats *stats; + struct u64_stats_sync syncp; + + struct rcu_head rcu; + + struct sxe2_mqprio_channel *ch; + DECLARE_BITMAP(xps_state, SXE2_TX_NBITS); + struct bpf_prog *xdp_prog; + union { + struct sxe2_queue *xdp_ring; + struct sxe2_ptp_tx *tx_tstamps; + }; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifdef HAVE_NETDEV_BPF_XSK_POOL + struct xsk_buff_pool *xsk_pool; +#else + struct xdp_umem *xsk_pool; +#endif + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct zero_copy_allocator zca; +#endif +#endif + + struct sk_buff *skb; + + union { + struct { + u64 cached_phctime; + u8 ptp_rx; + }; + + struct { + /* in order to protect the data */ + spinlock_t xdp_lock; + u16 txq_teid; + u8 ptp_tx; + }; + }; + u8 dcb_tc; + unsigned long flags; + u16 xdp_tx_active; + u32 max_lro_size; + u32 split_type_mask; + u16 hdr_len; + u8 desc_size; +} ____cacheline_internodealigned_in_smp; + +struct sxe2_queue_layout { + u16 lb; + u16 lb_offset; + + u16 ctrl; + u16 ctrl_offset; + + u16 lan; + u16 lan_offset; + + u16 dpdk; + u16 dpdk_offset; + + u16 macvlan; + u16 macvlan_offset; + + u16 xdp; + u16 xdp_offset; + + u16 esw; + u16 esw_offset; + + u16 dpdk_esw; + u16 dpdk_esw_offset; + + u16 sriov; + u16 sriov_offset; + + DECLARE_BITMAP(txq_map, SXE2_TXRX_Q_MAX_CNT); + DECLARE_BITMAP(rxq_map, SXE2_TXRX_Q_MAX_CNT); +}; + +struct sxe2_queue_context { + u16 max_txq_cnt; + u16 max_rxq_cnt; + u16 txq_base_idx_in_dev; + u16 rxq_base_idx_in_dev; + /* in order to protect the data */ + struct mutex lock; + struct sxe2_queue_layout txq_layout; + struct sxe2_queue_layout rxq_layout; +}; + +struct sxe2_list { + struct sxe2_queue *next; + u16 cnt; +}; + +struct sxe2_q_id_transe { + u16 q_id; + u16 q_id_in_dev; + u16 vsi_id; + bool is_tx; +}; + +s32 sxe2_vsi_txrx_queues_get(struct sxe2_vsi *vsi); + +void sxe2_vsi_txrx_queues_put(struct sxe2_vsi *vsi); + +u16 sxe2_usable_txqs_cnt_get(struct sxe2_adapter *adapter); + +u16 sxe2_usable_rxqs_cnt_get(struct sxe2_adapter *adapter); + +void sxe2_queue_init(struct sxe2_adapter *adapter); + +static inline void sxe2_tx_pkt_stats_update(struct sxe2_queue *txq, + struct sxe2_queue_stats *stats) +{ + u64_stats_update_begin(&txq->syncp); + txq->stats->bytes += stats->bytes; + txq->stats->packets += stats->packets; + u64_stats_update_end(&txq->syncp); +} + +static inline void sxe2_rxq_stats_update(struct sxe2_queue *rxq, u64 pkts, + u64 bytes) +{ + u64_stats_update_begin(&rxq->syncp); + rxq->stats->bytes += bytes; + rxq->stats->packets += pkts; + u64_stats_update_end(&rxq->syncp); +} + +s32 sxe2_dpdk_abs_qid_get(struct sxe2_adapter *adapter, struct sxe2_q_id_transe *params); + +static inline bool sxe2_queue_depth_is_valid(u16 depth) +{ + return (depth >= SXE2_MIN_NUM_DESC && depth <= SXE2_MAX_NUM_DESC && + !(depth % SXE2_REQ_DESC_MULTIPLE)); +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.c new file mode 100644 index 0000000000000000000000000000000000000000..782d2e1260abaf56f0cec02ef11d99287cb8b7ef --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_rss.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_rss.h" +#include "sxe2.h" +#include "sxe2_cmd.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_log.h" +#include "sxe2_common.h" +#include "sxe2_flow.h" + +static struct sxe2_rss_hash_cfg default_rss_cfg_ip4; +static struct sxe2_rss_hash_cfg default_rss_cfg_tcp4; +static struct sxe2_rss_hash_cfg default_rss_cfg_udp4; +static struct sxe2_rss_hash_cfg default_rss_cfg_sctp4; +static struct sxe2_rss_hash_cfg default_rss_cfg_ip6; +static struct sxe2_rss_hash_cfg default_rss_cfg_tcp6; +static struct sxe2_rss_hash_cfg default_rss_cfg_udp6; +static struct sxe2_rss_hash_cfg default_rss_cfg_sctp6; + +static struct sxe2_rss_hash_cfg *default_rss_cfgs[] = { + &default_rss_cfg_ip4, &default_rss_cfg_tcp4, + &default_rss_cfg_udp4, &default_rss_cfg_sctp4, + + &default_rss_cfg_ip6, &default_rss_cfg_tcp6, + &default_rss_cfg_udp6, &default_rss_cfg_sctp6, +}; + +void sxe2_rss_flow_ctxt_init(struct sxe2_adapter *adapter) +{ + struct sxe2_rss_ctxt *rss_flow_ctxt = &adapter->rss_flow_ctxt; + + sxe2_flow_ppp_comm_ctxt_init(&rss_flow_ctxt->ppp, adapter, + SXE2_HW_BLOCK_ID_RSS); + + sxe2_rss_comm_init(rss_flow_ctxt); + + LOG_DEBUG_BDF("rss flow context init done"); +} + +void sxe2_rss_flow_ctxt_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_rss_ctxt *rss_flow_ctxt = &adapter->rss_flow_ctxt; + + sxe2_flow_ppp_comm_ctxt_deinit(&rss_flow_ctxt->ppp); + sxe2_rss_comm_deinit(rss_flow_ctxt); + + LOG_DEBUG_BDF("rss flow context deinit done.\n"); +} + +void sxe2_rss_ctxt_init(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + + vsi->rss_ctxt.hash_type = SXE2_RSS_HASH_FUNC_TOEPLITZ; + switch (vsi->type) { + case SXE2_VSI_T_PF: + vsi->rss_ctxt.lut_type = SXE2_RSS_PF_LUT; + vsi->rss_ctxt.lut_size = adapter->caps_ctxt.max_rss_lut_size; + vsi->rss_ctxt.queue_size = + min_t(u16, SXE2_PF_LUT_MAX_QUEUE, vsi->rxqs.q_cnt); + break; + case SXE2_VSI_T_DPDK_PF: + vsi->rss_ctxt.lut_type = SXE2_RSS_GLOBAL_LUT; + vsi->rss_ctxt.lut_size = SXE2_RSS_LUT_SIZE_512; + vsi->rss_ctxt.global_lut_id = (u8)adapter->caps_ctxt.global_lut_base; + vsi->rss_ctxt.queue_size = min_t(u16, SXE2_GLOBAL_LUT_MAX_QUEUE, + vsi->rxqs.q_cnt); + break; + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + vsi->rss_ctxt.lut_type = SXE2_RSS_VSI_LUT; + vsi->rss_ctxt.lut_size = SXE2_RSS_LUT_SIZE_64; + vsi->rss_ctxt.queue_size = + min_t(u16, SXE2_VSI_LUT_MAX_QUEUE, vsi->rxqs.q_cnt); + break; + default: + LOG_INFO_BDF("unsupport Vsi Type: %u\n", vsi->type); + break; + } + LOG_DEBUG_BDF("rss ctxt init ok, hash type: %u, lut_type: %u, lut_size: %u,\t" + "queue_size: %u\n", + vsi->rss_ctxt.hash_type, vsi->rss_ctxt.lut_type, + vsi->rss_ctxt.lut_size, vsi->rss_ctxt.queue_size); +} + +void sxe2_rss_fill_lut(u8 *lut, u16 lut_size, u16 queue_size) +{ + u16 i = 0; + + for (i = 0; i < lut_size; i++) + lut[i] = (u8)(i % queue_size); +} + +static void sxe2_default_rss_cfg_create(void) +{ + default_rss_cfg_ip4.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_ip4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_rss_cfg_ip4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_rss_cfg_ip4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_rss_cfg_ip4.hash_flds); + + default_rss_cfg_tcp4.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_tcp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_rss_cfg_tcp4.headers); + set_bit(SXE2_FLOW_HDR_TCP, default_rss_cfg_tcp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, default_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, default_rss_cfg_tcp4.hash_flds); + + default_rss_cfg_udp4.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_udp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_rss_cfg_udp4.headers); + set_bit(SXE2_FLOW_HDR_UDP, default_rss_cfg_udp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, default_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, default_rss_cfg_udp4.hash_flds); + + default_rss_cfg_sctp4.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_sctp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_rss_cfg_sctp4.headers); + set_bit(SXE2_FLOW_HDR_SCTP, default_rss_cfg_sctp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, default_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, default_rss_cfg_sctp4.hash_flds); + + default_rss_cfg_ip6.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_ip6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_rss_cfg_ip6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_rss_cfg_ip6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_rss_cfg_ip6.hash_flds); + + default_rss_cfg_tcp6.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_tcp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_rss_cfg_tcp6.headers); + set_bit(SXE2_FLOW_HDR_TCP, default_rss_cfg_tcp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, default_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, default_rss_cfg_tcp6.hash_flds); + + default_rss_cfg_udp6.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_udp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_rss_cfg_udp6.headers); + set_bit(SXE2_FLOW_HDR_UDP, default_rss_cfg_udp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, default_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, default_rss_cfg_udp6.hash_flds); + + default_rss_cfg_sctp6.hdr_type = SXE2_RSS_ANY_HEADERS; + default_rss_cfg_sctp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_rss_cfg_sctp6.headers); + set_bit(SXE2_FLOW_HDR_SCTP, default_rss_cfg_sctp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, default_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, default_rss_cfg_sctp6.hash_flds); +} + +s32 sxe2_rss_default_flow_set(struct sxe2_vsi *vsi) +{ + u32 i, j; + s32 ret = 0; + s32 ret_tmp; + struct sxe2_rss_hash_cfg *cfg; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rss_ctxt *rss_flow_ctxt = &adapter->rss_flow_ctxt; + + if (sxe2_is_safe_mode(adapter)) { + LOG_ERROR_BDF("sxe2 rss is in safe mode, not support.\n"); + return -EINVAL; + } + + sxe2_default_rss_cfg_create(); + + for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) { + cfg = default_rss_cfgs[i]; + ret_tmp = sxe2_add_rss_flow(rss_flow_ctxt, vsi->id_in_pf, cfg); + if (ret_tmp) { + LOG_ERROR_BDF("rss flow[%u] add failed, ret: %d, type:%d,\t" + "symm:%d\n", + i, ret_tmp, cfg->hdr_type, cfg->symm); + LOG_ERROR_BDF("headers[%lu]:\n", + BITS_TO_LONGS(SXE2_FLOW_HDR_MAX)); + for (j = 0; j < BITS_TO_LONGS(SXE2_FLOW_HDR_MAX); j++) { + LOG_ERROR_BDF("headers[%u] = 0x%lx\n", j, + cfg->headers[j]); + } + LOG_ERROR_BDF("hash_flds[%lu]:\n", + BITS_TO_LONGS(SXE2_FLOW_FLD_ID_MAX)); + for (j = 0; j < BITS_TO_LONGS(SXE2_FLOW_FLD_ID_MAX); j++) { + LOG_ERROR_BDF("hash_flds[%u] = 0x%lx\n", j, + cfg->hash_flds[j]); + } + } + ret |= ret_tmp; + } + + return ret; +} + +void sxe2_rss_vsi_flow_clean(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("sxe2 rss in safe mode is not supported.\n"); + return; + } + (void)sxe2_rss_delete_vsi_flows(&adapter->rss_flow_ctxt, vsi->id_in_pf); +} + +s32 sxe2_fwc_rss_hash_ctrl_set(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_vsi_hctrl hctrl = {0}; + + hctrl.vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + hctrl.hash_type = vsi->rss_ctxt.hash_type; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_VSI_HCTRL_SET, &hctrl, + sizeof(hctrl), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi hash ctrl set cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_fwc_rss_lut_set(struct sxe2_vsi *vsi, u8 *lut, u16 lut_size) +{ + s32 ret = 0; + u16 buff_size = sizeof(struct sxe2_rss_lut_cfg) + lut_size; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_lut_cfg *lut_cfg = NULL; + + if (lut_size > vsi->rss_ctxt.lut_size) { + LOG_ERROR_BDF("lut size = %u is invalid!\n", lut_size); + return -EINVAL; + } + + lut_cfg = kzalloc(buff_size, GFP_KERNEL); + if (!lut_cfg) { + LOG_ERROR_BDF("no memory!\n"); + return -ENOMEM; + } + lut_cfg->vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + lut_cfg->lut_type = vsi->rss_ctxt.lut_type; + lut_cfg->global_lut_id = 0; + if (vsi->rss_ctxt.lut_type == SXE2_RSS_GLOBAL_LUT) + lut_cfg->global_lut_id = vsi->rss_ctxt.global_lut_id; + + lut_cfg->lut_size = cpu_to_le16(lut_size); + memcpy(lut_cfg->lut, lut, lut_size); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_LUT_SET, lut_cfg, buff_size, + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rss lut set cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + kfree(lut_cfg); + return ret; +} + +s32 sxe2_fwc_rss_lut_get(struct sxe2_vsi *vsi, u8 *lut, u16 lut_size) +{ + s32 ret = 0; + u16 buff_size = sizeof(struct sxe2_rss_lut_cfg) + lut_size; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_lut_cfg *lut_cfg = NULL; + + if (!lut) + return -EINVAL; + + lut_cfg = kzalloc(buff_size, GFP_KERNEL); + if (!lut_cfg) { + LOG_ERROR_BDF("no memory!\n"); + return -ENOMEM; + } + lut_cfg->vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + lut_cfg->lut_type = vsi->rss_ctxt.lut_type; + lut_cfg->global_lut_id = 0; + lut_cfg->lut_size = cpu_to_le16(lut_size); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_LUT_GET, lut_cfg, buff_size, + lut_cfg, buff_size); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rss lut set cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + memcpy(lut, lut_cfg->lut, lut_size); + + kfree(lut_cfg); + return ret; +} + +s32 sxe2_fwc_rss_hkey_set(struct sxe2_vsi *vsi, u8 *hkey) +{ + s32 ret = 0; + u8 i; + u16 buff_size = sizeof(struct sxe2_rss_hkey_cfg) + SXE2_RSS_HASH_KEY_SIZE; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_hkey_cfg *hkey_cfg = NULL; + + hkey_cfg = kzalloc(buff_size, GFP_KERNEL); + if (!hkey_cfg) { + LOG_ERROR_BDF("no memory!\n"); + return -ENOMEM; + } + hkey_cfg->vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + + for (i = 0; i < SXE2_RSS_HASH_KEY_SIZE; i++) + hkey_cfg->key[i] = hkey[i]; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_HKEY_SET, hkey_cfg, buff_size, + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rss hash key set cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + kfree(hkey_cfg); + return ret; +} + +s32 sxe2_fwc_rss_hkey_get(struct sxe2_vsi *vsi, u8 *hkey) +{ + s32 ret = 0; + u16 buff_size = sizeof(struct sxe2_rss_hkey_cfg) + SXE2_RSS_HASH_KEY_SIZE; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_hkey_cfg *hkey_cfg = NULL; + + if (!hkey) + return -EINVAL; + + hkey_cfg = kzalloc(buff_size, GFP_KERNEL); + if (!hkey_cfg) { + LOG_ERROR_BDF("no memory!\n"); + return -ENOMEM; + } + hkey_cfg->vsi_hw_id = cpu_to_le16(vsi->idx_in_dev); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_HKEY_GET, hkey_cfg, buff_size, + hkey_cfg, buff_size); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rss hash key get cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + memcpy(hkey, hkey_cfg->key, SXE2_RSS_HASH_KEY_SIZE); + + kfree(hkey_cfg); + return ret; +} + +void sxe2_fwc_rss_trace_trigger(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_TRACE_TRIGGER, NULL, 0, NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("rss trace trigger cmd fail, ret=%d\n", ret); +} + +void sxe2_fwc_rss_trace_recorder(struct sxe2_adapter *adapter) +{ + s32 ret; + s32 i = 0; + struct sxe2_cmd_params cmd = {0}; + struct sxe2_rss_trace_recorder recorder = {0}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RSS_TRACE_RECORDER, NULL, 0, + &recorder, sizeof(struct sxe2_rss_trace_recorder)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rss trace recorder cmd fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("****rss trace recorder start****"); + LOG_DEV_INFO("status0: %u\n", recorder.trace_status0); + if (recorder.trace_status0 == 0) { + LOG_DEV_INFO("profile_id0: 0x%08X\n", + __le32_to_cpu(recorder.profile_id0)); + for (i = 0; i < SXE2_RSS_FV_TRACE_CNT; i++) { + LOG_DEV_INFO("fv[%d]: 0x%08X\n", i, + __le32_to_cpu(recorder.fv[i])); + } + } + LOG_DEV_INFO("status1: %u\n", recorder.trace_status1); + if (recorder.trace_status1 == 0) + LOG_DEV_INFO("hash1: 0x%08X\n", __le32_to_cpu(recorder.hash1)); + + LOG_DEV_INFO("status2: %u\n", recorder.trace_status2); + if (recorder.trace_status2 == 0) { + LOG_DEV_INFO("hash2: 0x%08X\n", __le32_to_cpu(recorder.hash2)); + LOG_DEV_INFO("profile_id2: %u\n", recorder.profile_id2); + LOG_DEV_INFO("bad_profile: %u\n", recorder.bad_profile); + LOG_DEV_INFO("q_index: %u\n", __le16_to_cpu(recorder.q_index)); + LOG_DEV_INFO("thread_id: %u\n", recorder.thread_id); + LOG_DEV_INFO("vsi: %u\n", __le16_to_cpu(recorder.vsi)); + } + LOG_DEV_INFO("****rss trace recorder end****"); +} + +u16 sxe2_rss_queue_size_correct(u16 new_size) +{ + return (u16)min_t(u16, SXE2_PF_LUT_MAX_QUEUE, new_size); +} + +s32 sxe2_rss_lut_reset(struct sxe2_vsi *vsi, u16 queue_size) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + u8 *lut = NULL; + u16 new_queue_size = 0; + + if (queue_size == 0) { + LOG_ERROR_BDF("invalid queue size!"); + return -EINVAL; + } + + lut = kzalloc(vsi->rss_ctxt.lut_size, GFP_KERNEL); + if (!lut) { + ret = -ENOMEM; + LOG_ERROR_BDF("no memory!\n"); + goto l_lut_alloc_failed; + } + + new_queue_size = sxe2_rss_queue_size_correct(queue_size); + + sxe2_rss_fill_lut(lut, vsi->rss_ctxt.lut_size, new_queue_size); + ret = sxe2_fwc_rss_lut_set(vsi, lut, vsi->rss_ctxt.lut_size); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_rss_lut_set failed, ret: %d, lut: %p, lut_size:\t" + "%u\n", + ret, lut, vsi->rss_ctxt.lut_size); + goto l_lut_free; + } + + vsi->rss_ctxt.queue_size = new_queue_size; + if (vsi->rss_ctxt.lut) { + memcpy(vsi->rss_ctxt.lut, lut, vsi->rss_ctxt.lut_size); + LOG_DEV_INFO("rx queue size change, clearing user lut,\n" + "re-run ethtool [-x|-X] to [check|set] settings if\t" + "needed.\n"); + } + +l_lut_free: + kfree(lut); +l_lut_alloc_failed: + return ret; +} + +s32 sxe2_rss_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter) +{ + u16 i = 0; + u16 j = 0; + u16 table_idx = 0; + s32 ret = 0; + u16 per_size = 0; + u16 ddp_max_cnt; + u8 port_idx = adapter->port_idx; + + per_size = sizeof(struct sxe2_ddp_rxft_ptg); + ddp_max_cnt = (SXE2_MAX_PTYPE_NUM * SXE2_MAX_CDID_NUM) / per_size; + if (!data || base_id >= ddp_max_cnt || cnt > ddp_max_cnt) { + LOG_ERROR_BDF("sxe2 rss ptg parse from ddp failed, port_idx=%u !\n", + port_idx); + ret = -EINVAL; + goto l_end; + } + + table_idx = (u16)((u32)base_id * per_size); + for (i = 0; i < cnt; i++) { + for (j = 0; j < per_size; j++) { + if (table_idx >= (port_idx * SXE2_MAX_PTYPE_NUM) && + table_idx < ((port_idx + 1) * SXE2_MAX_PTYPE_NUM)) { + adapter->rss_flow_ctxt.ppp + .pt_to_grp[table_idx % + SXE2_MAX_PTYPE_NUM] + .idx = *data; + } + table_idx++; + data++; + } + } + LOG_INFO_BDF("sxe2 rss ptg parse from ddp, port_idx=%u !\n", port_idx); + +l_end: + return ret; +} + +void sxe2_rss_xlt2_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_xlt2_dump(&adapter->rss_flow_ctxt.ppp); +} + +void sxe2_rss_vsig_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_vsig_dump(&adapter->rss_flow_ctxt.ppp); +} + +void sxe2_rss_prof_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_prof_dump(&adapter->rss_flow_ctxt.ppp); +} + +void sxe2_rss_mask_dump(struct sxe2_adapter *adapter) +{ + sxe2_flow_mask_dump(&adapter->rss_flow_ctxt.ppp); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.h new file mode 100644 index 0000000000000000000000000000000000000000..9bd03268f37e3e98d50a1ebf922b3742e8617dfa --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rss.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_rss.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_RSS_H__ +#define __SXE2_RSS_H__ + +#include +#include + +#include "sxe2_vsi.h" + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +#define SXE2_RSS_HASH_KEY_SIZE 52 + +enum sxe2_rss_lut_type { + SXE2_RSS_VSI_LUT = 0, + SXE2_RSS_LUT_TYPE_RSV, + SXE2_RSS_PF_LUT, + SXE2_RSS_GLOBAL_LUT, + SXE2_RSS_MAX_LUT_TYPE, +}; + +enum sxe2_rss_lut_size { + SXE2_RSS_LUT_SIZE_64 = 64, + SXE2_RSS_LUT_SIZE_128 = 128, + SXE2_RSS_LUT_SIZE_512 = 512, + SXE2_RSS_LUT_SIZE_2K = 2048, +}; + +enum sxe2_rss_lut_max_queue { + SXE2_VSI_LUT_MAX_QUEUE = 16, + SXE2_GLOBAL_LUT_MAX_QUEUE = 64, + SXE2_PF_LUT_MAX_QUEUE = 256, +}; + +void sxe2_rss_flow_ctxt_init(struct sxe2_adapter *adapter); + +void sxe2_rss_flow_ctxt_deinit(struct sxe2_adapter *adapter); + +void sxe2_rss_ctxt_init(struct sxe2_vsi *vsi); + +void sxe2_rss_fill_lut(u8 *lut, u16 lut_size, u16 queue_size); + +s32 sxe2_rss_default_flow_set(struct sxe2_vsi *vsi); + +void sxe2_rss_vsi_flow_clean(struct sxe2_vsi *vsi); + +s32 sxe2_fwc_rss_hash_ctrl_set(struct sxe2_vsi *vsi); + +s32 sxe2_fwc_rss_lut_set(struct sxe2_vsi *vsi, u8 *lut, u16 lut_size); + +s32 sxe2_fwc_rss_lut_get(struct sxe2_vsi *vsi, u8 *lut, u16 lut_size); + +s32 sxe2_fwc_rss_hkey_set(struct sxe2_vsi *vsi, u8 *hkey); + +s32 sxe2_fwc_rss_hkey_get(struct sxe2_vsi *vsi, u8 *hkey); + +void sxe2_fwc_rss_trace_trigger(struct sxe2_adapter *adapter); + +void sxe2_fwc_rss_trace_recorder(struct sxe2_adapter *adapter); + +u16 sxe2_rss_queue_size_correct(u16 new_size); + +s32 sxe2_rss_lut_reset(struct sxe2_vsi *vsi, u16 queue_size); + +void sxe2_rss_clean_for_vf(struct sxe2_vsi *vsi, bool need_clear_hw); + +s32 sxe2_rss_ptg_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +void sxe2_rss_xlt2_dump(struct sxe2_adapter *adapter); + +void sxe2_rss_vsig_dump(struct sxe2_adapter *adapter); + +void sxe2_rss_prof_dump(struct sxe2_adapter *adapter); + +void sxe2_rss_mask_dump(struct sxe2_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..94eb5ed6f59ad9104b9995f810469faa49f36842 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.c @@ -0,0 +1,2262 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_rx.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif +#endif + +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_vsi.h" +#include "sxe2_queue.h" +#include "sxe2_rx.h" +#include "sxe2_common.h" +#include "sxe2_skb_dump.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_ptp.h" +#include "sxe2_xsk.h" + +#define SXE2_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - SXE2_RXBUF_2048) + +STATIC s32 sxe2_xdp_info_get(struct sxe2_queue *rxq) +{ + s32 rc = 0; + + if (sxe2_xdp_is_enable(rxq->vsi)) + WRITE_ONCE(rxq->xdp_prog, rxq->vsi->xdp_prog); + +#ifdef HAVE_XDP_BUFF_RXQ + if (rxq->vsi->type == SXE2_VSI_T_PF && !xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + if (xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->netdev, rxq->idx_in_vsi, + rxq->irq_data->napi.napi_id)) + rc = -ENOMEM; +#endif + + return rc; +} + +static void sxe2_rx_page_free(struct sxe2_queue *rxq, struct sxe2_rx_buf *buf_info) +{ + dma_unmap_page_attrs(rxq->dev, buf_info->dma, SXE2_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2_RX_DMA_ATTR); + + __page_frag_cache_drain(buf_info->page, buf_info->drv_refcnt); + + buf_info->page = NULL; + buf_info->pg_offset = 0; +} + +void sxe2_rx_ring_clean(struct sxe2_queue *rxq) +{ + struct device *dev = rxq->dev; + struct sxe2_rx_buf *rx_buf_info; + u16 i; + + if (!rxq->rx_buf) + return; + + if (rxq->skb) { + dev_kfree_skb(rxq->skb); + rxq->skb = NULL; + } + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (rxq->xsk_pool) { + sxe2_xsk_clean_rx_ring(rxq); + goto rx_skip_free; + } +#endif + + for (i = 0; i < rxq->depth; i++) { + rx_buf_info = &rxq->rx_buf[i]; + if (!rx_buf_info->page) + continue; + + dma_sync_single_range_for_cpu(dev, rx_buf_info->dma, + rx_buf_info->pg_offset, + rxq->rx_buf_len, DMA_FROM_DEVICE); + sxe2_rx_page_free(rxq, rx_buf_info); + } + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +rx_skip_free: +#endif + memset(rxq->rx_buf, 0, SXE2_RX_BUF_LEN(rxq)); + memset(rxq->desc.base_addr, 0, SXE2_RX_DESC_SIZE(rxq)); + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; +} + +void sxe2_rx_ring_free(struct sxe2_queue *rxq) +{ +#ifdef HAVE_XDP_BUFF_RXQ + if (rxq->vsi->type == SXE2_VSI_T_PF) + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + xdp_rxq_info_unreg(&rxq->xdp_rxq); +#endif + rxq->xdp_prog = NULL; + + kfree(rxq->rx_buf); + rxq->rx_buf = NULL; + + if (rxq->desc.base_addr) { + dmam_free_coherent(rxq->dev, SXE2_RX_DESC_SIZE(rxq), + rxq->desc.base_addr, rxq->desc.dma); + rxq->desc.base_addr = NULL; + } + + LOG_DEBUG("rxq[%u] ring memory free.\n", rxq->idx_in_vsi); +} + +static void sxe2_rx_rings_free(struct sxe2_vsi *vsi) +{ + u16 i; + struct sxe2_queue *rxq; + + sxe2_for_each_vsi_rxq(vsi, i) + { + rxq = vsi->rxqs.q[i]; + sxe2_rx_ring_free(rxq); + } +} + +s32 sxe2_rx_ring_alloc(struct sxe2_queue *rxq) +{ + s32 ret; + u32 size; + struct sxe2_adapter *adapter = rxq->vsi->adapter; + + rxq->rx_buf = kcalloc(rxq->depth, sizeof(*rxq->rx_buf), GFP_KERNEL); + if (!rxq->rx_buf) { + ret = -ENOMEM; + LOG_ERROR_BDF("rxq:%d alloc %luB buffer info failed.(err:%d)\n", + rxq->idx_in_pf, rxq->depth * sizeof(*rxq->rx_buf), + ret); + return ret; + } + + size = SXE2_RX_DESC_SIZE(rxq); + rxq->desc.base_addr = dmam_alloc_coherent(rxq->dev, size, &rxq->desc.dma, + GFP_KERNEL); + if (!rxq->desc.base_addr) { + ret = -ENOMEM; + LOG_DEV_ERR("rxq:%d alloc %uB descriptor failed.(err:%d)\n", + rxq->idx_in_pf, size, ret); + goto l_desc_alloc_failed; + } + + rxq->next_to_use = 0; + rxq->next_to_clean = 0; + + ret = sxe2_xdp_info_get(rxq); + if (ret) { + LOG_ERROR_BDF("rxq:%u xdp info get fail.(err:%d)\n", rxq->idx_in_pf, + ret); + goto l_xdp_failed; + } + + LOG_DEBUG_BDF("rxq[%u][%u] depth:%u rx_buf:%p size:%lu\t" + "descriptor base va:%p dma:0x%llx size:%u.\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->depth, rxq->rx_buf, + rxq->depth * sizeof(*rxq->rx_buf), rxq->desc.base_addr, + rxq->desc.dma, size); + + return ret; + +l_xdp_failed: + dmam_free_coherent(rxq->dev, size, rxq->desc.base_addr, rxq->desc.dma); + rxq->desc.base_addr = NULL; + +l_desc_alloc_failed: + kfree(rxq->rx_buf); + rxq->rx_buf = NULL; + + return ret; +} + +static s32 sxe2_rx_rings_alloc(struct sxe2_vsi *vsi) +{ + s32 ret; + u16 i; + struct sxe2_queue *rxq; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_for_each_vsi_rxq(vsi, i) + { + rxq = vsi->rxqs.q[i]; + + if (vsi->netdev) + rxq->netdev = vsi->netdev; + + ret = sxe2_rx_ring_alloc(rxq); + if (ret) { + LOG_ERROR_BDF("vsi:%d rxq:%d ring alloc failed.(%d)\n", + vsi->idx_in_dev, i, ret); + goto l_failed; + } + } + + return ret; + +l_failed: + sxe2_rx_rings_free(vsi); + return ret; +} + +void sxe2_rx_ring_res_free(struct sxe2_queue *rxq) +{ + sxe2_rx_ring_clean(rxq); + + sxe2_rx_ring_free(rxq); +} + +void sxe2_rx_rings_res_free(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_rxq(vsi, i) sxe2_rx_ring_res_free(vsi->rxqs.q[i]); +} + +void sxe2_set_rx_mode(struct net_device *netdev) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + set_bit(SXE2_VSI_S_MAC_FLTR_CHANGED, vsi->state); + set_bit(SXE2_FLAG_FLTR_SYNC, adapter->flags); + + sxe2_monitor_work_schedule(adapter); +} + +int sxe2_vsi_vlan_zero_add(struct sxe2_vsi *vsi) +{ + struct sxe2_vlan vlan; + int ret; + + vlan = SXE2_VLAN(0, 0, 0); + ret = sxe2_vlan_rule_add(vsi, &vlan); + if (ret && ret != -EEXIST) + return ret; + + vlan = SXE2_VLAN(ETH_P_8021Q, 0, 0); + ret = sxe2_vlan_rule_add(vsi, &vlan); + if (ret && ret != -EEXIST) + return ret; + + return 0; +} + +s32 sxe2_vlan_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + sxe2_hw_l2tag_accept(hw, vsi->idx_in_dev); + + sxe2_set_vlan_offload_features(vsi, 0, vsi->netdev->features); + + ret = sxe2_set_vlan_filter_features(vsi, vsi->netdev->features); + if (ret) + return ret; + + ret = sxe2_vsi_vlan_zero_add(vsi); + + return ret; +} + +void sxe2_frame_size_set(struct sxe2_vsi *vsi) +{ + if (!vsi->netdev) + vsi->rxqs.max_frame = SXE2_MAX_FRAME_SIZE; + else + vsi->rxqs.max_frame = (u16)(vsi->netdev->mtu + SXE2_ETH_DEAD_LOAD); + +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + if (!vsi->netdev || + test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, vsi->adapter->flags)) { + vsi->rxqs.rx_buf_len = SXE2_RXBUF_2048; + } else if (!SXE2_PAD_RX_LEN_EXCEED_2K && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->rxqs.rx_buf_len = SXE2_RXBUF_1536 - NET_IP_ALIGN; + } else { + vsi->rxqs.rx_buf_len = SXE2_RXBUF_3072; + } +#else + vsi->rxqs.rx_buf_len = SXE2_RXBUF_2048; +#endif + + LOG_INFO("vsi:%u max_frame:%u rx_buf_len:%u set done.\n", vsi->idx_in_dev, + vsi->rxqs.max_frame, vsi->rxqs.rx_buf_len); +} + +static u16 sxe2_rxq_get_lro_desc_num(u32 max_lro_pkt_size, u16 buf_size) +{ + u16 lro_desc_num_arr[] = {1, 4, 8, 16, 32, 48, 64}; + u16 desc_num; + u16 lro_desc_num = SXE2_RX_LRO_DESC_MAX_64; + u16 i; + + desc_num = (u16)(max_lro_pkt_size / buf_size); + desc_num = desc_num ? desc_num : 1; + + for (i = 0; i < ARRAY_SIZE(lro_desc_num_arr); i++) { + if (desc_num <= lro_desc_num_arr[i]) { + lro_desc_num = lro_desc_num_arr[i]; + break; + } + } + LOG_DEBUG("Rx queue max_lro_pkt_size:%u buf_size:%u lro_desc_num:%u->%u.", + max_lro_pkt_size, buf_size, desc_num, lro_desc_num); + return lro_desc_num; +} + +static s32 sxe2_buffer_split_set(struct sxe2_queue *rxq, + struct sxe2_rxq_ctxt *rx_ctx) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + s32 ret = 0; + u32 proto_hdr; + + if (test_bit(SXE2_RXQ_SPLIT_ENABLED, &rxq->flags)) { + proto_hdr = rxq->split_type_mask; + if (proto_hdr == SXE2_PTYPE_UNKNOWN) { + LOG_ERROR_BDF("Buffer split protocol must be configured\n"); + ret = -EINVAL; + goto l_end; + } + + rx_ctx->hbuff_len = + cpu_to_le16(rxq->hdr_len >> SXE2_RX_HBUF_LEN_UNIT); + switch (proto_hdr & SXE2_PTYPE_L4_MASK) { + case SXE2_PTYPE_L4_TCP: + case SXE2_PTYPE_L4_UDP: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_TCP_UDP; + goto l_end; + case SXE2_PTYPE_L4_SCTP: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_SCTP; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_L3_MASK) { + case SXE2_PTYPE_L3_IPV4: + case SXE2_PTYPE_L3_IPV6: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_IP; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_L2_MASK) { + case SXE2_PTYPE_L2_ETHER: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_L2; + rx_ctx->hsplit_1 = SXE2_RX_HSPLIT_1_SPLIT_L2; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_INNER_L4_MASK) { + case SXE2_PTYPE_INNER_L4_TCP: + case SXE2_PTYPE_INNER_L4_UDP: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_TCP_UDP; + goto l_end; + case SXE2_PTYPE_INNER_L4_SCTP: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_SCTP; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_INNER_L3_MASK) { + case SXE2_PTYPE_INNER_L3_IPV4: + case SXE2_PTYPE_INNER_L3_IPV6: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_IP; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_INNER_L2_MASK) { + case SXE2_PTYPE_INNER_L2_ETHER: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_SPLIT_L2; + goto l_end; + } + + switch (proto_hdr & SXE2_PTYPE_TUNNEL_MASK) { + case SXE2_PTYPE_TUNNEL_GRENAT: + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_CONDITION; + rx_ctx->hsplit_1 = SXE2_RX_HSPLIT_1_SPLIT_ALWAYS; + goto l_end; + } + + LOG_ERROR_BDF("Buffer split protocol is not supported\n"); + ret = -EINVAL; + } else { + rx_ctx->hsplit_type = SXE2_RX_HSPLIT_NO_SPLIT; + rx_ctx->hsplit_0 = SXE2_RX_HSPLIT_0_NO_SPLIT; + rx_ctx->hsplit_1 = SXE2_RX_HSPLIT_1_NO_SPLIT; + } + +l_end: + return ret; +} + +static s32 sxe2_rxq_ctxt_fill(struct sxe2_queue *rxq, struct sxe2_rxq_ctxt *rxq_ctxt) +{ + u8 chain_len = SXE2_RX_BUF_CHAINED_MAX; + struct sxe2_adapter *adapter = rxq->vsi->adapter; + s32 ret = 0; + + memset(rxq_ctxt, 0, sizeof(*rxq_ctxt)); + + rxq_ctxt->base_addr = + cpu_to_le64(rxq->desc.dma >> SXE2_RX_DESC_BASE_ADDR_UNIT); + rxq_ctxt->depth = cpu_to_le16(rxq->depth); + + rxq_ctxt->dbuff_len = cpu_to_le16(rxq->rx_buf_len >> SXE2_RX_DBUF_LEN_UNIT); + + if (rxq->desc_size == SXE2_DESCRIPTOR_SIZE_16B) + rxq_ctxt->desc_type = SXE2_DESCRIPTOR_16; + else + rxq_ctxt->desc_type = SXE2_DESCRIPTOR_32; + + if (test_bit(SXE2_RXQ_CRC_STRIP_DISABLED, &rxq->flags)) + rxq_ctxt->crc_strip = false; + else + rxq_ctxt->crc_strip = true; + + rxq_ctxt->l2tag1_show = 1; + + rxq_ctxt->inner_vlan_strip = 0; + + if (test_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags)) { + rxq_ctxt->lro_enable = true; + if (rxq->max_lro_size) + rxq_ctxt->lro_desc_max = + cpu_to_le16(sxe2_rxq_get_lro_desc_num(rxq->max_lro_size, + rxq->rx_buf_len)); + else + rxq_ctxt->lro_desc_max = + cpu_to_le16(SXE2_RX_LRO_DESC_MAX_16); + rxq_ctxt->hbuff_len = cpu_to_le16(SXE2_HBUFF_LEN_MIN); + } else { + rxq_ctxt->lro_enable = false; + } + + ret = sxe2_buffer_split_set(rxq, rxq_ctxt); + if (ret) + return ret; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (rxq->xsk_pool) + chain_len = 1; +#endif + + rxq_ctxt->max_frame_size = cpu_to_le16(min_t(u32, rxq->vsi->rxqs.max_frame, + (chain_len * (rxq->rx_buf_len & SXE2_RX_DBUF_LEN_MASK)))); + + rxq_ctxt->low_desc_waterline = SXE2_RX_DESC_LOW_WATERLINE_1; + rxq_ctxt->pref_enable = 1; + + if (rxq->vsi->type == SXE2_VSI_T_VF || + rxq->vsi->type == SXE2_VSI_T_DPDK_VF) { + rxq_ctxt->vfen = true; + + rxq_ctxt->vfid = cpu_to_le16(adapter->vf_ctxt.vfid_base + + rxq->vsi->vf_node->vf_idx); + } + rxq_ctxt->vsi_id = cpu_to_le16(rxq->vsi->idx_in_dev); + rxq_ctxt->pfid = rxq->vsi->adapter->pf_idx; + + LOG_INFO_BDF("base addr:0x%llx depth:%u dbuff_len:%u desc_type:%u\t" + "crc_strip:%u l2_tag_flag:%u lro_enable:%u lro_max_desc:%u\t" + "max_frame_size:%u pfid:%u vfid:%u. chain_len:%d.\n", + rxq_ctxt->base_addr, rxq->depth, rxq_ctxt->dbuff_len, + rxq_ctxt->desc_type, rxq_ctxt->crc_strip, rxq_ctxt->l2tag1_show, + rxq_ctxt->lro_enable, rxq_ctxt->lro_desc_max, + rxq_ctxt->max_frame_size, rxq_ctxt->pfid, rxq_ctxt->vfid, + chain_len); + return ret; +} + +#if (PAGE_SIZE < 8192) +static u16 sxe2_skb_pad_cal(void) +{ + u16 rx_buf_len; + u16 value; + + if (SXE2_PAD_RX_LEN_EXCEED_2K) + rx_buf_len = SXE2_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = SXE2_RXBUF_1536; + + rx_buf_len -= NET_IP_ALIGN; + + value = ALIGN(rx_buf_len, PAGE_SIZE / 2); + + value = SKB_WITH_OVERHEAD(value) - rx_buf_len; + + return value; +} +#endif + +static u16 sxe2_rx_offset_get(struct sxe2_queue *rxq) +{ + if (test_bit(SXE2_RXQ_BUILD_SKB_ENABLED, &rxq->flags)) + return SXE2_SKB_PAD_VALUE; +#ifdef HAVE_XDP_SUPPORT + else if (sxe2_xdp_is_enable(rxq->vsi)) + return XDP_PACKET_HEADROOM; +#endif + + return 0; +} + +static bool sxe2_rx_page_alloc(struct sxe2_queue *rxq, struct sxe2_rx_buf *buf_info) +{ + struct page *page; + dma_addr_t dma; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + if (likely(buf_info->page)) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_IRQ("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p\t" + "order:%u \t" + "dma:%llu pg_offset:%u drv_refcnt:0x%x\t" + "reused.\n", + rxq->idx_in_vsi, rxq->next_to_use, + rxq->next_to_clean, rxq->next_to_alloc, + buf_info->page, SXE2_RX_PAGE_ORDER(rxq), + buf_info->dma, buf_info->pg_offset, + buf_info->drv_refcnt); + } +#endif + return true; + } + + page = dev_alloc_pages(SXE2_RX_PAGE_ORDER(rxq)); + if (unlikely(!page)) { + rxq->stats->rx_stats.rx_pg_alloc_fail++; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_ERROR_IRQ("rxq[%u] ntu:%u ntc:%u nta:%u rx pg_offset:%u\t" + "drv_refcnt:0x%x page alloc fail.\n", + rxq->idx_in_vsi, rxq->next_to_use, + rxq->next_to_clean, rxq->next_to_alloc, + buf_info->pg_offset, buf_info->drv_refcnt); + } +#endif + return false; + } + + dma = dma_map_page_attrs(rxq->dev, page, 0, SXE2_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2_RX_DMA_ATTR); + + if (dma_mapping_error(rxq->dev, dma)) { + __free_pages(page, SXE2_RX_PAGE_ORDER(rxq)); + rxq->stats->rx_stats.rx_pg_alloc_fail++; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_ERROR_IRQ("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p\t" + "dma:%llu \t" + "pg_offset:%u drv_refcnt:0x%x dma map\t" + "fail.\n", + rxq->idx_in_vsi, rxq->next_to_use, + rxq->next_to_clean, rxq->next_to_alloc, page, + dma, buf_info->pg_offset, + buf_info->drv_refcnt); + } +#endif + return false; + } + + rxq->stats->rx_stats.rx_page_alloc++; + buf_info->dma = dma; + buf_info->page = page; + buf_info->pg_offset = rxq->rx_offset; + + page_ref_add(page, USHRT_MAX - 1); + buf_info->drv_refcnt = USHRT_MAX; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_IRQ("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p order:%u\t" + "dma:%llu\t" + "pg_offset:%u drv_refcnt:0x%x.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_clean, + rxq->next_to_alloc, page, SXE2_RX_PAGE_ORDER(rxq), dma, + buf_info->pg_offset, buf_info->drv_refcnt); + } +#endif + + return true; +} + +void sxe2_rxq_tail_update(struct sxe2_queue *rxq, u16 new) +{ + u16 prev_ntu = rxq->next_to_use; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + rxq->next_to_use = new; + rxq->next_to_alloc = new; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u] idx_in_pf:%u ntu:%u nta:%u tail:%u update.\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->next_to_use, + rxq->next_to_alloc, new); + } +#endif + + if (prev_ntu != new) { + wmb(); + writel(new, rxq->desc.tail); + } +} + +bool sxe2_rx_buffers_alloc(struct sxe2_queue *rxq, u16 cnt) +{ + union sxe2_rx_desc *desc; + struct sxe2_rx_buf *buf_info; + u16 ntu = rxq->next_to_use; + + if ((!rxq->netdev && rxq->vsi->type == SXE2_VSI_T_CTRL)) + return false; + + if (!cnt) + return false; + + desc = SXE2_RX_DESC(rxq, ntu); + buf_info = &rxq->rx_buf[ntu]; + + do { + if (!sxe2_rx_page_alloc(rxq, buf_info)) + break; + + dma_sync_single_range_for_device(rxq->dev, buf_info->dma, + buf_info->pg_offset, + rxq->rx_buf_len, DMA_FROM_DEVICE); + desc->read.pkt_addr = + cpu_to_le64(buf_info->dma + buf_info->pg_offset); + + desc++; + buf_info++; + ntu++; + + if (unlikely(ntu == rxq->depth)) { + desc = SXE2_RX_DESC(rxq, 0); + buf_info = rxq->rx_buf; + ntu = 0; + } + + desc->wb.status0_err = 0; + + cnt--; + } while (cnt); + + if (rxq->next_to_use != ntu) + sxe2_rxq_tail_update(rxq, ntu); + + return !!cnt; +} + +static s32 sxe2_rx_head_tail_init(struct sxe2_queue *rxq) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + u8 __iomem *head; + + rxq->desc.tail = sxe2_reg_addr_get(hw, SXE2_RXQ_TAIL(rxq->idx_in_pf)); + head = sxe2_reg_addr_get(hw, SXE2_RXQ_HEAD(rxq->idx_in_pf)); + if (IS_ERR(rxq->desc.tail) || IS_ERR(head)) { + LOG_ERROR_BDF("vsi:%u queue:%u tail addr: %ld head:%ld error.\n", + rxq->vsi->idx_in_dev, rxq->idx_in_vsi, + PTR_ERR(rxq->desc.tail), PTR_ERR(head)); + return -EFAULT; + } + + writel(0, rxq->desc.tail); + writel(0, head); + LOG_INFO_BDF("rxq:%u head/tail init done head:0x%x tail:0x%x.\n", + rxq->idx_in_pf, readl(head), readl(rxq->desc.tail)); + + return 0; +} + +static void sxe2_rxq_lro_ctrl(struct sxe2_queue *rxq) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2_VSI_FLAG_LRO_ENABLE, rxq->vsi->flags)) { + set_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u idx_in_pf:%u lro\t" + "enabled.\n", + rxq->vsi->idx_in_dev, rxq->vsi->type, rxq->idx_in_vsi, + rxq->idx_in_pf); + } else { + clear_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u idx_in_pf:%u lro\t" + "disabled.\n", + rxq->vsi->idx_in_dev, rxq->vsi->type, rxq->idx_in_vsi, + rxq->idx_in_pf); + } +} + +static void sxe2_rxq_build_skb_ctrl(struct sxe2_vsi *vsi, struct sxe2_queue *rxq) +{ + if (!vsi->netdev || + test_bit(SXE2_FLAG_LEGACY_RX_ENABLE, vsi->adapter->flags)) + clear_bit(SXE2_RXQ_BUILD_SKB_ENABLED, &rxq->flags); + else + set_bit(SXE2_RXQ_BUILD_SKB_ENABLED, &rxq->flags); +} + +#ifdef HAVE_XDP_BUFF_RXQ +#ifdef HAVE_AF_XDP_ZC_SUPPORT +STATIC s32 sxe2_rxq_xdp_cfg(struct sxe2_queue *rxq) +{ + s32 err; + struct sxe2_vsi *vsi = rxq->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + if (rxq->vsi->type == SXE2_VSI_T_PF) { + if (!xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + (void)xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->netdev, + rxq->idx_in_vsi, + rxq->irq_data->napi.napi_id); + + rxq->xsk_pool = sxe2_xsk_pool(rxq); + if (rxq->xsk_pool) { + xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq); + + rxq->rx_buf_len = +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_pool_get_rx_frame_size(rxq->xsk_pool); +#else + rxq->xsk_pool->chunk_size_nohr - + XDP_PACKET_HEADROOM; +#endif +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL); + if (err) + return err; + xsk_pool_set_rxq_info(rxq->xsk_pool, &rxq->xdp_rxq); + + LOG_DEV_INFO("Registered XDP mem model\t" + "MEM_TYPE_XSK_BUFF_POOL on Rx rxq %d\n", + rxq->idx_in_vsi); +#else + rxq->zca.free = sxe2_zca_free; + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &rxq->zca); + if (err) + return err; + + LOG_DEV_INFO("Registered XDP mem model MEM_TYPE_ZERO_COPY\t" + "on Rx rxq %d\n", + rxq->idx_in_vsi); +#endif + } else { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + rxq->zca.free = NULL; +#endif + if (!xdp_rxq_info_is_reg(&rxq->xdp_rxq)) + (void)xdp_rxq_info_reg(&rxq->xdp_rxq, rxq->netdev, + rxq->idx_in_vsi, + rxq->irq_data->napi.napi_id); + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) + return err; + } + } + + return 0; +} +#endif +#endif + +static void sxe2_rxq_rxfc_ctrl(struct sxe2_queue *rxq) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2_VSI_FLAG_RXFCS_ENABLE, rxq->vsi->flags)) { + set_bit(SXE2_RXQ_CRC_STRIP_DISABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u idx_in_pf:%u rxfcs\t" + "enabled.\n", + rxq->vsi->idx_in_dev, rxq->vsi->type, rxq->idx_in_vsi, + rxq->idx_in_pf); + } else { + clear_bit(SXE2_RXQ_CRC_STRIP_DISABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u idx_in_pf:%u rxfcs\t" + "disabled.\n", + rxq->vsi->idx_in_dev, rxq->vsi->type, rxq->idx_in_vsi, + rxq->idx_in_pf); + } +} + +void sxe2_rxq_feature_cfg(struct sxe2_queue *rxq) +{ + struct sxe2_vsi *vsi = rxq->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + + rxq->rx_buf_len = vsi->rxqs.rx_buf_len; + + sxe2_rxq_lro_ctrl(rxq); + + sxe2_rxq_rxfc_ctrl(rxq); + + sxe2_rxq_build_skb_ctrl(vsi, rxq); + + rxq->rx_offset = sxe2_rx_offset_get(rxq); + + LOG_DEBUG_BDF("vsi:%u type:%u rxq[%u][%u] rx_offset:%u rx_buf_len:%u.\n", + vsi->idx_in_dev, vsi->type, rxq->idx_in_vsi, rxq->idx_in_pf, + rxq->rx_offset, rxq->rx_buf_len); +} + +STATIC s32 sxe2_fwc_rxq_ctxt_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_cfg_rxq_req *req) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RXQ_CFG, req, sizeof(*req), NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("rxq cfg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +static s32 __sxe2_rxq_ctxt_cfg(struct sxe2_queue *rxq) +{ + s32 ret = 0; + struct sxe2_vsi *vsi = rxq->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_fwc_cfg_rxq_req req = {0}; + + req.pf_idx = adapter->pf_idx; + req.idx_in_dev = cpu_to_le16(rxq->idx_in_pf + + adapter->q_ctxt.rxq_base_idx_in_dev); + + ret = sxe2_rxq_ctxt_fill(rxq, &req.rxq_ctxt); + if (ret) { + LOG_ERROR_BDF("rxq idx_in_pf:%u context fill failed.(err:%d)\n", + rxq->idx_in_pf, ret); + return ret; + } + + ret = sxe2_fwc_rxq_ctxt_cfg(adapter, &req); + if (ret) { + LOG_ERROR_BDF("rxq idx_in_pf:%u context cfg failed.(err:%d)\n", + rxq->idx_in_pf, ret); + return ret; + } + + LOG_INFO_BDF("pf_idx:%u rxq idx_in_dev:%u context cfg success.\n", + req.pf_idx, req.idx_in_dev); + + return sxe2_rx_head_tail_init(rxq); +} + +s32 sxe2_rxqs_stop(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &vsi->adapter->hw; + u16 reg_idx; + + sxe2_for_each_vsi_rxq(vsi, i) + { + reg_idx = vsi->rxqs.q[i]->idx_in_pf; + (void)sxe2_rxq_ctrl_set(adapter, vsi->rxqs.q[i], false, false); + } + + sxe2_for_each_vsi_rxq(vsi, i) + { + reg_idx = vsi->rxqs.q[i]->idx_in_pf; + ret = sxe2_err_code_trans_hw(sxe2_hw_rxq_status_check(hw, + reg_idx, + false)); + if (ret) { + LOG_ERROR_BDF("vsi:%u rxq[%u][%u] set status:%u timeout.\n", + vsi->idx_in_dev, i, reg_idx, false); + break; + } + } + + return ret; +} + +#ifdef SXE2_UNSUPPORT +static s32 sxe2_rx_fb_hw_configure(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_RX_FB, NULL, 0, NULL, 0); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("rx frame buffer configure fail.(err:%d)\n", ret); + + return ret; +} +#endif + +s32 sxe2_rxq_ctxt_cfg(struct sxe2_queue *rxq) +{ + s32 ret; + struct sxe2_adapter *adapter = rxq->vsi->adapter; + u32 cur_val = SXE2_REG_READ(&adapter->hw, SXE2_RXQ_CTRL(rxq->idx_in_pf)); + + SXE2_BUG_ON((cur_val != SXE2_REG_INVALID_VALUE) && + (!!(cur_val & SXE2_RXQ_CTRL_STATUS_ACTIVE) == true)); + + ret = __sxe2_rxq_ctxt_cfg(rxq); + if (ret) { + LOG_ERROR_BDF("vsi:%u rxq:%u cfg failed.(err:%d)\n", + rxq->vsi->idx_in_dev, rxq->idx_in_vsi, ret); + return ret; + } + + (void)sxe2_rxq_ctrl_set(adapter, rxq, true, false); + + return ret; +} + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +STATIC s32 sxe2_alloc_rx_xsk_cfg_bufs(struct sxe2_queue *rxq, u16 count) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + bool ok; + + if (!xsk_buff_can_alloc(rxq->xsk_pool, count)) { + LOG_DEV_WARN("XSK buffer pool does not provide enough\n" + "addresses to fill %d buffers on Rx ring %d\n", + count, rxq->idx_in_vsi); + LOG_DEV_WARN("Change Rx ring/fill queue size to avoid performance\t" + "issues\n"); + return 0; + } + + ok = sxe2_alloc_rx_bufs_zc(rxq, count); + if (!ok) { + LOG_DEV_INFO("Failed to allocate some buffers on XSK buffer\n" + "pool enabled Rx ring %d (pf_q %d)\n", + rxq->idx_in_vsi, rxq->idx_in_pf); + } + + return 0; +} +#endif + +s32 sxe2_vsi_cfg_rxq(struct sxe2_queue *rxq) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = rxq->vsi->adapter; + + sxe2_rxq_feature_cfg(rxq); + +#ifdef HAVE_XDP_BUFF_RXQ +#ifdef HAVE_AF_XDP_ZC_SUPPORT + ret = sxe2_rxq_xdp_cfg(rxq); + if (ret) { + LOG_ERROR_BDF("rxq[%u] xdp cfg failed.(err:%d)\n", rxq->idx_in_vsi, + ret); + goto l_err; + } +#endif +#endif + + ret = sxe2_rxq_ctxt_cfg(rxq); + if (ret) { + LOG_ERROR_BDF("rxq[%u] context cfg failed.(err:%d)\n", + rxq->idx_in_pf, ret); + goto l_err; + } + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (rxq->xsk_pool) { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + (void)sxe2_alloc_rx_xsk_cfg_bufs(rxq, SXE2_DESC_IDLE(rxq)); +#else + (void)sxe2_alloc_rx_bufs_slow_zc(rxq, SXE2_DESC_IDLE(rxq)); +#endif + goto l_err; + } +#endif + (void)sxe2_rx_buffers_alloc(rxq, SXE2_DESC_IDLE(rxq)); + +l_err: + return ret; +} + +s32 sxe2_rx_hw_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + + if (vsi->netdev && vsi->type == SXE2_VSI_T_PF) { + sxe2_set_rx_mode(vsi->netdev); + ret = sxe2_vlan_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u vlan cfg failed.(err:%d)\n", + vsi->idx_in_dev, vsi->type, ret); + return ret; + } + } + + sxe2_frame_size_set(vsi); + +#ifdef SXE2_UNSUPPORT + ret = sxe2_rx_fb_hw_configure(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u rx frame buffer cfg failed.(err:%d)\n", + vsi->idx_in_dev, vsi->type, ret); + return ret; + } +#endif + + sxe2_for_each_vsi_rxq(vsi, i) + { + ret = sxe2_vsi_cfg_rxq(vsi->rxqs.q[i]); + if (ret) + goto l_err; + } + + return ret; + +l_err: + while (i--) { + (void)sxe2_rxq_ctrl_set(adapter, vsi->rxqs.q[i], false, false); + sxe2_rx_ring_clean(vsi->rxqs.q[i]); + } + return ret; +} + +s32 sxe2_rx_cfg(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret; + + ret = sxe2_rx_rings_alloc(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u rx ring resource alloc\t" + "failed.(err:%d)\n", + vsi->idx_in_dev, vsi->type, ret); + goto l_out; + } + + ret = sxe2_rx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u rx queue cfg failed.(err:%d)\n", + vsi->idx_in_dev, vsi->type, ret); + goto l_rxq_cfg_failed; + } + + LOG_INFO_BDF("vsi:%u type:%u rx queue configure done.\n", vsi->idx_in_dev, + vsi->type); + + return 0; + +l_rxq_cfg_failed: + sxe2_rx_rings_free(vsi); +l_out: + return ret; +} + +static void sxe2_rx_first_pkt_sync(struct sxe2_queue *rxq, struct sk_buff *skb) +{ + struct sxe2_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2_RXQ_BUILD_SKB_ENABLED, &rxq->flags)) { + unsigned long mask = (unsigned long)SXE2_RX_PAGE_SIZE(rxq) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; + + dma_sync_single_range_for_cpu(rxq->dev, SXE2_SKB_PRIV(skb)->dma, + offset, skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rxq->dev, SXE2_SKB_PRIV(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), DMA_FROM_DEVICE); + } + + if (unlikely(SXE2_SKB_PRIV(skb)->page_released)) { + dma_unmap_page_attrs(rxq->dev, SXE2_SKB_PRIV(skb)->dma, + SXE2_RX_PAGE_SIZE(rxq), DMA_FROM_DEVICE, + SXE2_RX_DMA_ATTR); + } + + LOG_DEBUG_BDF("rxq[%u][%u] skb:%p nr_frags:%u dma:%llu sync first packet\n", + rxq->idx_in_vsi, rxq->idx_in_pf, skb, + skb_shinfo(skb)->nr_frags, SXE2_SKB_PRIV(skb)->dma); +} + +static struct sxe2_rx_buf *sxe2_rx_buffer_get(struct sxe2_queue *rxq, + union sxe2_rx_desc *rx_desc, + const u32 size, s32 *pg_cnt) +{ + struct sxe2_rx_buf *rx_buffer = &rxq->rx_buf[rxq->next_to_clean]; + struct sk_buff *skb = rx_buffer->skb; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + *pg_cnt = +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + page_count(rx_buffer->page); +#else + 0; +#endif + + prefetchw(rx_buffer->page); + + if (!size) + return rx_buffer; + + if (sxe2_is_non_eop(rxq, rx_desc)) { + if (!skb) + goto l_skip_sync; + } else { + if (skb) + sxe2_rx_first_pkt_sync(rxq, skb); + } + + dma_sync_single_range_for_cpu(rxq->dev, rx_buffer->dma, rx_buffer->pg_offset, + size, DMA_FROM_DEVICE); + +l_skip_sync: + rx_buffer->drv_refcnt--; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] ntc:%u pkt len:%u skb:%p \t" + "drv_refcnt:0x%x pg_offset:%u rx_buffer[%u]:%p get\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->next_to_clean, + size, skb, rx_buffer->drv_refcnt, rx_buffer->pg_offset, + rxq->next_to_clean, rx_buffer); + } +#endif + + return rx_buffer; +} + +STATIC enum pkt_hash_types sxe2_hash_type_get(u16 ptype) +{ + struct sxe2_rx_ptype_info ptype_info = sxe2_rx_ptype_parse(ptype); + + if (!ptype_info.known) + return PKT_HASH_TYPE_NONE; + if (ptype_info.hash_layer == SXE2_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + if (ptype_info.hash_layer == SXE2_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + if (ptype_info.outer_ip == SXE2_PTYPE_OUTER_L2) + return PKT_HASH_TYPE_L2; + + return PKT_HASH_TYPE_NONE; +} + +static void sxe2_lro_stats_update(struct sxe2_queue *rxq, struct sk_buff *skb) +{ + u32 hdr_len; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + if (!SXE2_SKB_PRIV(skb)->lro_cnt) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] not lro packet\n", + rxq->idx_in_vsi, rxq->idx_in_pf); + } +#endif + return; + } + + rxq->stats->rx_stats.rx_lro_count += SXE2_SKB_PRIV(skb)->lro_cnt - 1; + rxq->stats->rx_stats.rx_lro_packets++; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] lro deal\n", rxq->idx_in_vsi, + rxq->idx_in_pf); + } +#endif + + hdr_len = skb_headlen(skb); + + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - hdr_len), + SXE2_SKB_PRIV(skb)->lro_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + rxq->stats->rx_stats.rx_lro_bytes += skb->len; + + SXE2_SKB_PRIV(skb)->lro_cnt = 0; +} + +STATIC inline bool sxe2_hash_from_arfs(u16 flow_id) +{ + return (flow_id >= SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP) && + (flow_id <= SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP); +} + +STATIC void sxe2_skb_hash_set(struct sxe2_queue *rxq, union sxe2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ptype) +{ + u32 rss_hash_vld; + u32 fnav_vld; + u16 flow_id; + u32 hash; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + if (!(rxq->netdev->features & NETIF_F_RXHASH)) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] rss hash offload off netdev\t" + "feature:0x%llx.\n", + rxq->idx_in_vsi, rxq->idx_in_pf, + rxq->netdev->features); + } +#endif + return; + } + + rss_hash_vld = (le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) >> + SXE2_RX_DESC_STATUS2_RSS_VLD) & + SXE2_RX_DESC_RSS_VLD_MASK; + fnav_vld = rx_desc->wb.rxdid_src & SXE2_RX_DESC_FD_VLD_MASK; + flow_id = le16_to_cpu(rx_desc->wb.flow_id); + if (!rss_hash_vld && !(fnav_vld && sxe2_hash_from_arfs(flow_id))) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] rss_vld:0x%x, fnav_vld:0x%x,\t" + "flow_id:%u\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rss_hash_vld, + fnav_vld, flow_id); + } +#endif + return; + } + + if (rss_hash_vld) + hash = le32_to_cpu(rx_desc->wb.filter_status); + else + hash = le32_to_cpu(rx_desc->wb.fd_filter_id); + + skb_set_hash(skb, hash, sxe2_hash_type_get(ptype)); +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] hash:0x%x ptype:%u, rss_vld:0x%x,\n" + "fnav_vld:0x%x, flow_id:%u\n", + rxq->idx_in_vsi, rxq->idx_in_pf, hash, ptype, + rss_hash_vld, fnav_vld, flow_id); + } +#endif +} + +static void sxe2_rx_csum_check(struct sxe2_queue *rxq, union sxe2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ptype) +{ + struct sxe2_rx_ptype_info ptype_info; + u16 status0_err; + bool ipv4 = 0; + bool ipv6 = 0; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + if (!(rxq->netdev->features & NETIF_F_RXCSUM)) { + rxq->stats->rx_stats.rx_csum_none++; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] rx csum offload off \t" + "netdev feature:0x%llx.\n", + rxq->idx_in_vsi, rxq->idx_in_pf, + rxq->netdev->features); + } +#endif + return; + } + + status0_err = le16_to_cpu(rx_desc->wb.status0_err); + ptype_info = sxe2_rx_ptype_parse(ptype); + + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + if (!(status0_err & BIT(SXE2_RX_DESC_STATUS0_L3L4_P)) || + !(ptype_info.known && ptype_info.outer_ip)) + goto l_out; + + ipv4 = (ptype_info.outer_ip == SXE2_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2_PTYPE_OUTER_IPV4); + + ipv6 = (ptype_info.outer_ip == SXE2_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2_PTYPE_OUTER_IPV6); + + if (ipv4 && (status0_err & + (BIT(SXE2_RX_DESC_ERROR_IPE) | BIT(SXE2_RX_DESC_ERROR_EIPE)))) + goto checksum_fail; + + if (ipv6 && (le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) & + (BIT(SXE2_RX_DESC_STATUS2_IPV6EXADD)))) + goto l_out; + + if (status0_err & BIT(SXE2_RX_DESC_ERROR_L4E)) + goto checksum_fail; + + if (rx_desc->wb.rxdid_src & BIT(SXE2_RX_DESC_EUDPE)) + goto checksum_fail; + + if (ptype_info.tunnel_type >= SXE2_PTYPE_TUNNEL_IP_GRENAT) { + skb->csum_level = 1; + rxq->stats->rx_stats.rx_csum_unnecessary_inner++; + } + + switch (ptype_info.inner_prot) { + case SXE2_PTYPE_INNER_PROT_UDP: + case SXE2_PTYPE_INNER_PROT_TCP: + case SXE2_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + rxq->stats->rx_stats.rx_csum_unnecessary++; + break; + default: + break; + } + rxq->stats->rx_stats.rx_csum_complete++; + + return; + +checksum_fail: + rxq->stats->rx_stats.rx_csum_err++; + +l_out: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] rxcsum offload:0x%llx status0_err:0x%x\t" + "ptype:0x%x ptype_info:0x%x pktl_hdrl_status2:0x%x \t" + "rxdid_src:%u ipv4:%u ipv6:%u ip_summed:%d\t" + "csum_level:%d \t" + "rx_csum_err:0x%llx\n", + rxq->idx_in_vsi, rxq->idx_in_pf, + rxq->netdev->features & NETIF_F_RXCSUM, status0_err, + ptype, *(u32 *)&ptype_info, + le32_to_cpu(rx_desc->wb.pktl_hdrl_status2), + rx_desc->wb.rxdid_src, ipv4, ipv6, skb->ip_summed, + skb->csum_level, rxq->stats->rx_stats.rx_csum_err); + } +#endif + ; +} + +static void sxe2_skb_hwtstamps_set(struct sxe2_queue *rxq, + union sxe2_rx_desc *rx_desc, struct sk_buff *skb) +{ + sxe2_ptp_rxts_request(rxq, (union sxe2_rx_desc_1588 *)rx_desc, skb); +} + +static inline u16 sxe2_vlan_tag_get(union sxe2_rx_desc *rx_desc) +{ + if (le16_to_cpu(rx_desc->wb.status0_err) & + BIT(SXE2_RX_DESC_STATUS0_L2TAG1_P)) + return le16_to_cpu(rx_desc->wb.l2tag1); + + return 0; +} + +static void sxe2_skb_vlan_tag_put(struct sxe2_queue *rxq, + union sxe2_rx_desc *rx_desc, struct sk_buff *skb) +{ + netdev_features_t features = rxq->netdev->features; + u16 vlan_tag = sxe2_vlan_tag_get(rx_desc); + bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) { + rxq->stats->rx_stats.rx_vlan_strip++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) { + rxq->stats->rx_stats.rx_vlan_strip++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + } +} + +#ifdef HAVE_MACSEC_SUPPORT +static bool sxe2_rx_desc_macsec_check(union sxe2_rx_desc *rx_desc) +{ + return (le32_to_cpu(rx_desc->wb.status0_err) >> + SXE2_RX_DESC_MACSEC_OFFLOAD) & + 0x1; +} +#endif + +static bool sxe2_rx_desc_ipsec_check(union sxe2_rx_desc *rx_desc) +{ + return (le32_to_cpu(rx_desc->wb.status_lrocnt_fdpf_id) >> + SXE2_RX_DESC_IPSEC_PKT) & + 0x1; +} + +void sxe2_skb_field_fill(struct sxe2_queue *rxq, union sxe2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ptype) +{ + sxe2_lro_stats_update(rxq, skb); + + sxe2_skb_hash_set(rxq, rx_desc, skb, ptype); + + skb->protocol = eth_type_trans(skb, rxq->netdev); + + sxe2_rx_csum_check(rxq, rx_desc, skb, ptype); + + sxe2_skb_hwtstamps_set(rxq, rx_desc, skb); + +#ifdef HAVE_MACSEC_SUPPORT + if (sxe2_rx_desc_macsec_check(rx_desc)) + sxe2_macsec_rx(rxq, skb); +#endif + + if (sxe2_rx_desc_ipsec_check(rx_desc)) + sxe2_ipsec_rx(rxq, rx_desc, skb, ptype); + + sxe2_skb_vlan_tag_put(rxq, rx_desc, skb); + + if (netif_is_macvlan(rxq->netdev)) + macvlan_count_rx((const struct macvlan_dev *)netdev_priv(rxq->netdev), + skb->len + ETH_HLEN, true, false); +} + +STATIC void sxe2_rx_page_reuse(struct sxe2_queue *rxq, struct sxe2_rx_buf *old_buff) +{ + struct sxe2_rx_buf *new_buff; + u16 nta = rxq->next_to_alloc; + + new_buff = &rxq->rx_buf[nta]; + + nta++; + rxq->next_to_alloc = (nta < rxq->depth) ? nta : 0; + + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->pg_offset = old_buff->pg_offset; + new_buff->drv_refcnt = old_buff->drv_refcnt; +} + +static bool sxe2_page_is_reusable(struct sxe2_rx_buf *rx_buf, int pg_refcnt) +{ + u32 drv_refcnt = rx_buf->drv_refcnt; + struct page *page = rx_buf->page; + + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + if (unlikely((pg_refcnt - drv_refcnt) > 1)) + return false; +#else + if (rx_buf->pg_offset > SXE2_LAST_OFFSET) + return false; +#endif + + if (unlikely(drv_refcnt == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buf->drv_refcnt = USHRT_MAX; + } + + return true; +} + +static void sxe2_rx_buffer_put(struct sxe2_queue *rxq, union sxe2_rx_desc *rx_desc, + struct sxe2_rx_buf *rx_buf, struct sk_buff *skb, + int pg_refcnt) +{ + u16 ntc = rxq->next_to_clean + 1; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + u8 lro_cnt = sxe2_rx_desc_lro_cnt(rxq, rx_desc); + + ntc = (ntc < rxq->depth) ? ntc : 0; + rxq->next_to_clean = ntc; + + if (!rx_buf) + return; + + if (lro_cnt) + SXE2_SKB_PRIV(skb)->lro_cnt += lro_cnt; + + if (sxe2_page_is_reusable(rx_buf, pg_refcnt)) { + sxe2_rx_page_reuse(rxq, rx_buf); + } else { + if (!IS_ERR(skb) && SXE2_SKB_PRIV(skb)->dma == rx_buf->dma) + SXE2_SKB_PRIV(skb)->page_released = true; + else + dma_unmap_page_attrs(rxq->dev, rx_buf->dma, + SXE2_RX_PAGE_SIZE(rxq), DMA_FROM_DEVICE, + SXE2_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buf->page, rx_buf->drv_refcnt); + } + + rx_buf->page = NULL; + rx_buf->skb = NULL; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] ntc:%u nta:%u skb:%p nr_frags:%u \t" + "lro_cnt:%u rx_buffer[%u]:%p put\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->next_to_clean, + rxq->next_to_alloc, skb, skb_shinfo(skb)->nr_frags, + SXE2_SKB_PRIV(skb)->lro_cnt, ntc - 1, rx_buf); + } +#endif +} + +STATIC void sxe2_xdp_rx_buffer_put(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf, int pg_refcnt) +{ + u16 ntc = rxq->next_to_clean + 1; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + + ntc = (ntc < rxq->depth) ? ntc : 0; + rxq->next_to_clean = ntc; + + if (!rx_buf) + return; + + if (sxe2_page_is_reusable(rx_buf, pg_refcnt)) { + sxe2_rx_page_reuse(rxq, rx_buf); + } else { + dma_unmap_page_attrs(rxq->dev, rx_buf->dma, SXE2_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buf->page, rx_buf->drv_refcnt); + } + + rx_buf->page = NULL; + rx_buf->skb = NULL; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] ntc:%u nta:%u rx_buffer[%u]:%p put\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->next_to_clean, + rxq->next_to_alloc, ntc - 1, rx_buf); + } +#endif +} + +static void sxe2_rx_pg_offset_update(struct sxe2_rx_buf *rx_buf, u32 size) +{ +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + rx_buf->pg_offset ^= size; +#else + rx_buf->pg_offset += size; +#endif +} + +static void sxe2_skb_frag_add(struct sxe2_queue *rxq, struct sxe2_rx_buf *rx_buf, + struct sk_buff *skb, u32 size) +{ +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + u32 truesize = SXE2_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(size + rxq->rx_offset); +#endif + + if (!size) + return; + + skb_add_rx_frag(skb, (int)skb_shinfo(skb)->nr_frags, rx_buf->page, + (int)rx_buf->pg_offset, (int)size, truesize); + + sxe2_rx_pg_offset_update(rx_buf, truesize); +} + +static struct sk_buff *sxe2_skb_build(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf, + struct xdp_buff *xdp, + union sxe2_rx_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + u8 metasize = (u8)(xdp->data - xdp->data_meta); +#endif +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + u32 truesize = SXE2_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + +#ifdef HAVE_XDP_BUFF_DATA_META + net_prefetch(xdp->data_meta); +#else + net_prefetch(xdp->data); +#endif + + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + skb_record_rx_queue(skb, rxq->idx_in_vsi); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + (void)__skb_put(skb, (u32)(xdp->data_end - xdp->data)); +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) + skb_metadata_set(skb, metasize); +#endif + if (sxe2_is_non_eop(rxq, rx_desc)) + SXE2_SKB_PRIV(skb)->dma = rx_buf->dma; + + sxe2_rx_pg_offset_update(rx_buf, truesize); + + return skb; +} + +static struct sk_buff *sxe2_skb_construct(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf, + struct xdp_buff *xdp, + union sxe2_rx_desc *rx_desc) +{ + u32 metasize = (u32)(xdp->data - xdp->data_meta); + u32 size = (u32)(xdp->data_end - xdp->data); + u32 headlen; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&rxq->irq_data->napi, SXE2_RX_HDR_SIZE + metasize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_record_rx_queue(skb, rxq->idx_in_vsi); + headlen = size; + if (headlen > SXE2_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, xdp->data, SXE2_RX_HDR_SIZE); + + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { +#ifdef HAVE_XDP_BUFF_DATA_META + skb_metadata_set(skb, (u8)metasize); +#endif + (void)__skb_pull(skb, metasize); + } + + size -= headlen; + if (size) { +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + u32 truesize = SXE2_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(size); +#endif + if (sxe2_is_non_eop(rxq, rx_desc)) + SXE2_SKB_PRIV(skb)->dma = rx_buf->dma; + + skb_add_rx_frag(skb, 0, rx_buf->page, + (int)(rx_buf->pg_offset + headlen), (int)size, + truesize); + + sxe2_rx_pg_offset_update(rx_buf, truesize); + } else { + rx_buf->drv_refcnt++; + } + + return skb; +} + +#ifdef HAVE_XDP_BUFF_FRAME_SZ +static u32 sxe2_rx_frame_truesize(struct sxe2_queue *rxq, u32 __maybe_unused size) +{ + u32 truesize; + +#if (PAGE_SIZE < 8192) + truesize = SXE2_RX_PAGE_SIZE(rxq) / 2; +#else + truesize = rxq->rx_offset ? SKB_DATA_ALIGN(rxq->rx_offset + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + : SKB_DATA_ALIGN(size); +#endif + return truesize; +} +#endif + +#ifdef HAVE_XDP_SUPPORT +static s32 sxe2_xdp_run(struct sxe2_queue *rxq, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) +{ + s32 err, result; + u32 act; + struct sxe2_queue *xdp_ring; + u64 rx_bytes = (u64)(xdp->data_end - xdp->data); + struct sxe2_rxq_xdp_stats *xdp_stats = &rxq->stats->rx_stats.xdp_stats; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#endif + u32 queue_index = (smp_processor_id() % + rxq->vsi->num_xdp_txq); + + xdp_stats->rx_xdp_pkts++; + xdp_stats->rx_xdp_bytes += rx_bytes; + + act = bpf_prog_run_xdp(xdp_prog, xdp); +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("bpf_prog_run_xdp ret:%d\n", act); + +#endif + + switch (act) { + case XDP_PASS: + xdp_stats->rx_xdp_pass++; + result = SXE2_XDP_PASS; + break; + + case XDP_TX: + xdp_ring = rxq->vsi->xdp_rings.q[queue_index]; + result = sxe2_xmit_xdp_buff(xdp, xdp_ring); + if (result == SXE2_XDP_TX) + xdp_stats->rx_xdp_tx_xmit++; + else + xdp_stats->rx_xdp_tx_xmit_fail++; + break; + + case XDP_REDIRECT: + err = xdp_do_redirect(rxq->netdev, xdp, xdp_prog); + result = !err ? SXE2_XDP_REDIR : SXE2_XDP_CONSUMED; + if (err) + xdp_stats->rx_xdp_redirect_fail++; + else + xdp_stats->rx_xdp_redirect++; + + break; + + default: + bpf_warn_invalid_xdp_action(rxq->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + xdp_stats->rx_xdp_unknown++; + trace_xdp_exception(rxq->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = SXE2_XDP_CONSUMED; + xdp_stats->rx_xdp_drop++; + break; + } + + return result; +} +#endif + +void sxe2_xdp_tail_update(struct sxe2_queue *rxq, u32 xdp_res) +{ + u16 tx_qid; + struct sxe2_queue *xdp_ring = NULL; + + if (xdp_res & SXE2_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & SXE2_XDP_TX) { + tx_qid = rxq->idx_in_vsi; + if (tx_qid >= rxq->vsi->num_xdp_txq) + tx_qid = (u16)(tx_qid - rxq->vsi->num_xdp_txq); + + xdp_ring = rxq->vsi->xdp_rings.q[tx_qid]; + wmb(); + writel(xdp_ring->next_to_use, xdp_ring->desc.tail); + } +} + +int sxe2_rxq_irq_clean(struct sxe2_queue *rxq, int budget) +{ + u32 total_rx_bytes = 0; + u32 total_rx_pkts = 0; + u16 idle = SXE2_DESC_IDLE(rxq); + u32 offset = rxq->rx_offset; + struct sk_buff *skb = rxq->skb; + u32 xdp_res = 0; + u32 xdp_xmit = 0; + struct bpf_prog *xdp_prog = NULL; + struct xdp_buff xdp; + bool failure; + struct sxe2_adapter *adapter = rxq->vsi->adapter; +#ifdef SXE2_CFG_DEBUG +#define SXE2_LOG_STR_LEN 32 + char str[SXE2_LOG_STR_LEN] = {0}; +#endif +#ifdef HAVE_XDP_BUFF_RXQ + xdp.rxq = &rxq->xdp_rxq; +#endif +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + xdp.frame_sz = sxe2_rx_frame_truesize(rxq, 0); +#else + xdp.frame_sz = 0; +#endif +#endif + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("rxq[%u][%u] ntc:%u ntu:%u \t" + "nta:%u budget:%u rxq_offset:%u \t" + "descriptor idle:%u skb:%p vsi:%p\n", + rxq->idx_in_vsi, rxq->idx_in_pf, rxq->next_to_clean, + rxq->next_to_use, rxq->next_to_alloc, budget, offset, + idle, skb, rxq->vsi); + } +#endif + + while (likely(total_rx_pkts < (u32)budget)) { + union sxe2_rx_desc *rx_desc; + union sxe2_rx_desc_fnav *rx_fnav_desc; + struct sxe2_rx_buf *rx_buf; + unsigned char *hard_start; + u32 size; + s32 pg_refcnt; + u16 rx_ptype; + + rx_desc = SXE2_RX_DESC(rxq, rxq->next_to_clean); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + (void)snprintf(str, SXE2_LOG_STR_LEN - 1, + "rxq[%d][%d] desc[%d]", rxq->idx_in_vsi, + rxq->idx_in_pf, rxq->next_to_clean); + DATA_DUMP(rx_desc, sizeof(*rx_desc), str); + } +#endif + + if (!sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_STATUS0_DD))) + break; + + dma_rmb(); + + sxe2_trace(rx_pkt_clean_begin, rxq); + + if ((rx_desc->wb.rxdid_src & SXE2_RX_DESC_RXDID_MASK) == + SXE2_RX_DESC_ID_FD || + !rxq->netdev) { + adapter->fnav_ctxt.pkt_err_cnt++; + rx_fnav_desc = (union sxe2_rx_desc_fnav *)rx_desc; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_INFO_BDF("flowid=%u, fdid=%u, fd_prog_sts=%u", + rx_fnav_desc->wb.flowid, + rx_fnav_desc->wb.fdid, + rx_fnav_desc->wb.fd_prog_sts); + } +#endif + + sxe2_rx_buffer_put(rxq, rx_desc, NULL, NULL, 0); + idle++; + continue; + } + + size = le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) & + SXE2_RX_DESC_PKT_LEN_MASK; + + rx_buf = sxe2_rx_buffer_get(rxq, rx_desc, size, &pg_refcnt); + + if (!size) { + xdp.data = NULL; + xdp.data_end = NULL; + xdp.data_hard_start = NULL; + xdp.data_meta = NULL; + goto construct_skb; + } + + hard_start = page_address(rx_buf->page) + rx_buf->pg_offset - offset; +#ifdef HAVE_XDP_SUPPORT + xdp_prepare_buff(&xdp, hard_start, (int)offset, (int)size, true); +#endif + +#ifdef HAVE_XDP_BUFF_FRAME_SZ +#if (PAGE_SIZE > SXE2_PAGE_SIZE_4KB) + xdp.frame_sz = sxe2_rx_frame_truesize(rxq, size); +#endif +#endif + + xdp_prog = READ_ONCE(rxq->xdp_prog); + if (!xdp_prog) + goto construct_skb; + +#ifdef HAVE_XDP_SUPPORT + xdp_res = (u32)sxe2_xdp_run(rxq, &xdp, xdp_prog); + if (!xdp_res) + goto construct_skb; +#endif + + if (xdp_res & (SXE2_XDP_TX | SXE2_XDP_REDIR)) { +#ifndef HAVE_XDP_BUFF_FRAME_SZ + unsigned int truesize; +#if (PAGE_SIZE < SXE2_PAGE_SIZE_8KB) + truesize = SXE2_RX_PAGE_SIZE(rxq) / 2; +#else + truesize = SKB_DATA_ALIGN(sxe2_rx_offset_get(rxq) + size); +#endif +#endif + xdp_xmit |= xdp_res; +#ifdef HAVE_XDP_BUFF_FRAME_SZ + sxe2_rx_pg_offset_update(rx_buf, xdp.frame_sz); +#else + sxe2_rx_pg_offset_update(rx_buf, truesize); +#endif + } else { + rx_buf->drv_refcnt++; + } + total_rx_bytes += size; + total_rx_pkts++; + + idle++; + sxe2_xdp_rx_buffer_put(rxq, rx_buf, pg_refcnt); + continue; + +construct_skb: + if (skb) { + sxe2_skb_frag_add(rxq, rx_buf, skb, size); + } else if (likely(xdp.data)) { + if (test_bit(SXE2_RXQ_BUILD_SKB_ENABLED, &rxq->flags)) + skb = sxe2_skb_build(rxq, rx_buf, &xdp, rx_desc); + else + skb = sxe2_skb_construct(rxq, rx_buf, &xdp, rx_desc); + } + + if (!skb) { + rxq->stats->rx_stats.rx_buff_alloc_err++; + if (rx_buf) + rx_buf->drv_refcnt++; + break; + } + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + SKB_DUMP(skb); +#endif + sxe2_rx_buffer_put(rxq, rx_desc, rx_buf, skb, pg_refcnt); + idle++; + + if (sxe2_is_non_eop(rxq, rx_desc)) { + rxq->rx_buf[rxq->next_to_clean].skb = skb; + rxq->stats->rx_stats.rx_non_eop_descs++; + continue; + } + + if (unlikely(sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_ERROR_PA_ERR)))) { + rxq->stats->rx_stats.rx_pa_err++; + } + + if (unlikely(sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_ERROR_RXE)) || + sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_ERROR_OVERSIZE)))) { + rxq->stats->rx_stats.rx_pkts_sw_drop++; + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + total_rx_bytes += skb->len; + + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_status1) & + SXE2_RX_DESC_PTYPE_MASK; + + sxe2_skb_field_fill(rxq, rx_desc, skb, rx_ptype); + + sxe2_trace(rx_pkt_clean_end, rxq); + + (void)napi_gro_receive(&rxq->irq_data->napi, skb); + skb = NULL; + + total_rx_pkts++; + } + + failure = sxe2_rx_buffers_alloc(rxq, idle); + + if (xdp_prog) + sxe2_xdp_tail_update(rxq, xdp_xmit); + + rxq->skb = skb; + + sxe2_rxq_stats_update(rxq, total_rx_pkts, total_rx_bytes); + + return failure ? budget : (int)total_rx_pkts; +} + +static bool sxe2_rx_buf_len_is_valid(u16 rx_buf_len) +{ + return ((rx_buf_len >= SXE2_RX_BUF_LEN_MIN) && + (rx_buf_len <= SXE2_RX_BUF_LEN_MAX)); +} + +static bool sxe2_rxq_cfg_param_is_valid(struct sxe2_adapter *adapter, + struct sxe2_rxq_cfg_params *params) +{ + struct sxe2_ctxt_elem *ctxt; + u16 i; + struct sxe2_vsi *vsi; + + if (!sxe2_vsi_id_is_valid(adapter, params->vsi_id)) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + if (params->q_cnt > vsi->rxqs.q_cnt || !params->q_cnt) { + LOG_ERROR_BDF(" rxq cnt:%u invalid max:%u vsi_id:%d.\n", + params->q_cnt, vsi->rxqs.q_cnt, params->vsi_id); + return false; + } + + for (i = 0; i < params->q_cnt; i++) { + ctxt = ¶ms->cfg[i]; + if (ctxt->queue_id >= vsi->rxqs.q_cnt || + !sxe2_queue_depth_is_valid(ctxt->depth) || + !sxe2_rx_buf_len_is_valid(ctxt->buf_len)) { + LOG_ERROR_BDF("ctxt vsi_id:%u vsi_id_in_dev:%d\t" + "queue_id:%u\n" + "rxq cnt:%u depth:%u buf_len:%u\t" + "max_frame_size:%u.\n", + params->vsi_id, vsi->idx_in_dev, + ctxt->queue_id, vsi->rxqs.q_cnt, ctxt->depth, + ctxt->buf_len, params->max_frame_size); + return false; + } + } + + return true; +} + +static bool sxe2_rxq_dis_param_is_valid(struct sxe2_adapter *adapter, + struct sxe2_rxq_dis_params *params) +{ + struct sxe2_vsi *vsi; + + if (!sxe2_vsi_id_is_valid(adapter, params->vsi_id)) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + if (params->q_idx > vsi->rxqs.q_cnt) { + LOG_ERROR_BDF(" rxq id:%u invalid max:%u vsi_id:%d.\n", + params->q_idx, vsi->rxqs.q_cnt, params->vsi_id); + return false; + } + + return true; +} + +s32 sxe2_rxq_cfg_ena_common_handle(struct sxe2_adapter *adapter, + struct sxe2_rxq_cfg_params *params) +{ + u16 i = 0; + struct sxe2_ctxt_elem *ctxt; + struct sxe2_queue *rxq; + s32 ret = 0; + struct sxe2_vsi *vsi; + + if (!adapter || !params) { + ret = -EINVAL; + LOG_ERROR_BDF("rx cfg enable params invalid.\n"); + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (!sxe2_rxq_cfg_param_is_valid(adapter, params)) { + ret = -EINVAL; + LOG_ERROR_BDF("rx cfg enable params invalid.\n"); + goto l_err; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + ret = -EINVAL; + LOG_ERROR_BDF("vsi_id:%d vsi null.\n", params->vsi_id); + goto l_err; + } + + vsi->rxqs.max_frame = params->max_frame_size; + for (i = 0; i < params->q_cnt; i++) { + ctxt = ¶ms->cfg[i]; + rxq = vsi->rxqs.q[ctxt->queue_id]; + + rxq->depth = ctxt->depth; + rxq->rx_buf_len = ctxt->buf_len; + rxq->desc.dma = ctxt->dma_addr; + rxq->desc_size = ctxt->desc_size; + + if (ctxt->keep_crc_en) + set_bit(SXE2_RXQ_CRC_STRIP_DISABLED, &rxq->flags); + else + clear_bit(SXE2_RXQ_CRC_STRIP_DISABLED, &rxq->flags); + + if (ctxt->lro_en) { + set_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags); + rxq->max_lro_size = ctxt->max_lro_size; + } else { + clear_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags); + rxq->max_lro_size = 0; + } + + if (ctxt->split_en) { + set_bit(SXE2_RXQ_SPLIT_ENABLED, &rxq->flags); + rxq->split_type_mask = ctxt->split_type_mask; + rxq->hdr_len = ctxt->hdr_len; + } else { + clear_bit(SXE2_RXQ_SPLIT_ENABLED, &rxq->flags); + } + + ret = sxe2_rxq_ctxt_cfg(rxq); + if (ret) { + LOG_DEV_WARN("vsi:%d i:%u rxq:%u cfg enable failed.\n", + vsi->id_in_pf, i, ctxt->queue_id); + ret = -SXE2_VF_ERR_HANDLE_ERROR; + goto l_err; + } + } + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + +l_err: + while (i) { + i--; + rxq = vsi->rxqs.q[ctxt->queue_id]; + if (sxe2_rxq_ctrl_set(adapter, rxq, false, true)) + LOG_DEV_ERR("vsi:%u i:%u rxq:%u disable failed.\n", + vsi->id_in_pf, i, ctxt->queue_id); + } + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + return ret; +} + +s32 sxe2_rxq_disable_common_handle(struct sxe2_adapter *adapter, + struct sxe2_rxq_dis_params *params) +{ + struct sxe2_queue *rxq; + s32 ret = 0; + struct sxe2_vsi *vsi; + struct sxe2_hw *hw = &adapter->hw; + + if (!adapter || !params) { + ret = -EINVAL; + LOG_ERROR_BDF("rx cfg enable params invalid.\n"); + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (!sxe2_rxq_dis_param_is_valid(adapter, params)) { + ret = -EINVAL; + LOG_ERROR_BDF("rx queue disable params invalid.\n"); + goto l_unlock; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("vsi_id:%d vsi null.\n", params->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + rxq = vsi->rxqs.q[params->q_idx]; + + (void)sxe2_rxq_ctrl_set(adapter, rxq, false, false); + ret = sxe2_err_code_trans_hw(sxe2_hw_rxq_status_check(hw, + rxq->idx_in_pf, + false)); + if (ret) + LOG_ERROR_BDF("vsi:%u rxq[%u][%u] set status:%u timeout.\n", + vsi->idx_in_dev, params->q_idx, rxq->idx_in_pf, false); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + return ret; +} + +s32 sxe2_rxq_ctrl_set(struct sxe2_adapter *adapter, struct sxe2_queue *rxq, + bool enable, bool wait) +{ + struct sxe2_vsi *vsi; + s32 ret = 0; + bool rxq_cde = true; + + if (!rxq || !rxq->vsi) { + ret = -EINVAL; + LOG_ERROR_BDF("invalid rxq:%p vsi:%p .\n", rxq, rxq->vsi); + goto l_out; + } + + vsi = rxq->vsi; + + if (test_bit(SXE2_VSI_FLAG_FC_ON, vsi->flags)) + rxq_cde = false; + + (void)sxe2_hw_rxq_ctrl(&adapter->hw, rxq->idx_in_pf, enable, wait, rxq_cde); + +l_out: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..d9390ce39e3ce5690877f02319a68c635b7f304f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_rx.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_rx.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_RX_H__ +#define __SXE2_RX_H__ + +#define SXE2_FRAME_SIZE_MIN 64 + +#define SXE2_RX_HDR_SIZE 256 + +#define SXE2_RX_BUF_LEN_MIN 1024 +#define SXE2_RX_BUF_LEN_MAX (16 * 1024 - 128) + +#define SXE2_RXBUF_3072 3072 +#define SXE2_RXBUF_2048 2048 +#define SXE2_RXBUF_1536 1536 + +#if (PAGE_SIZE < 8192) +#define SXE2_PAD_RX_LEN_EXCEED_2K \ + ((unsigned int)(NET_SKB_PAD + SXE2_RXBUF_1536) > \ + SKB_WITH_OVERHEAD(SXE2_RXBUF_2048)) + +#define SXE2_SKB_PAD_VALUE sxe2_skb_pad_cal() + +#else +#define SXE2_PAD_RX_LEN_EXCEED_2K false +#define SXE2_SKB_PAD_VALUE (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +#define SXE2_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define SXE2_RX_DESC(rxq, ridx) \ + (&(((union sxe2_rx_desc *)((rxq)->desc.base_addr))[ridx])) + +#define SXE2_RX_DESC_SIZE(rxq) \ + (ALIGN((rxq)->depth * sizeof(union sxe2_rx_desc), PAGE_SIZE)) + +#define SXE2_RX_BUF_LEN(rxq) \ + ({ \ + const typeof(*(rxq)) *__rq = (rxq); \ + __rq->depth * sizeof(*__rq->rx_buf); \ + }) + +#define SXE2_RX_DESC_EUDPE (6) + +#define SXE2_HBUFF_LEN_MIN (2) + +#define SXE2_RX_DESC_PKT_LEN_SHIFT (0) + +#define SXE2_RX_DESC_PKT_LEN_MASK (0x3FFF) + +#define SXE2_RX_DESC_LROCNT_MASK (0xF) +#define SXE2_RX_DESC_LROCNT_SHIFT (0) + +#define SXE2_RX_DESC_RXDID_MASK (0x7) + +#define SXE2_RX_DESC_PTYPE_MASK (0x3FF) + +#define SXE2_RX_DESC_PHY_PORT_MASK (0x3) + +#define SXE2_RX_DESC_RSS_VLD_MASK (0x1) + +#define SXE2_RX_DESC_FD_VLD_MASK BIT(5) + +#define SXE2_RX_DESC_MACSEC_OFFLOAD (5) + +#define SXE2_RX_DESC_IPSEC_PKT (21) +#define SXE2_RX_DESC_IPSEC_ENGINE (22) +#define SXE2_RX_DESC_IPSEC_MODE (23) +#define SXE2_RX_DESC_IPSEC_STATUS (24) +#define SXE2_RX_DESC_IPSEC_STATUS_MASK (0x7) +#define SXE2_RX_DESC_IPSEC_STATUS_GET(qw) \ + (((qw) >> SXE2_RX_DESC_IPSEC_STATUS) & SXE2_RX_DESC_IPSEC_STATUS_MASK) + +#define SXE2_DESCRIPTOR_SIZE_16B (16) +#define SXE2_DESCRIPTOR_SIZE_32B (32) + +enum sxe2_descriptor_type { SXE2_DESCRIPTOR_16 = 0, SXE2_DESCRIPTOR_32 }; + +enum sxe2_rx_header_split_mode { + SXE2_RX_HSPLIT_NO_SPLIT = 0, + SXE2_RX_HSPLIT_CONDITION, + SXE2_RX_HSPLIT_NO_CONDITION, +}; + +enum sxe2_rx_header_split0_type { + SXE2_RX_HSPLIT_0_NO_SPLIT = 0, + SXE2_RX_HSPLIT_0_SPLIT_L2 = 1, + SXE2_RX_HSPLIT_0_SPLIT_IP = 2, + SXE2_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + SXE2_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +enum sxe2_rx_header_split1_type { + SXE2_RX_HSPLIT_1_NO_SPLIT = 0, + SXE2_RX_HSPLIT_1_SPLIT_L2 = 1, + SXE2_RX_HSPLIT_1_SPLIT_ALWAYS = 2, +}; + +enum { + SXE2_RX_LRO_DESC_MAX_1 = 0, + SXE2_RX_LRO_DESC_MAX_4, + SXE2_RX_LRO_DESC_MAX_8, + SXE2_RX_LRO_DESC_MAX_16, + SXE2_RX_LRO_DESC_MAX_32, + SXE2_RX_LRO_DESC_MAX_48, + SXE2_RX_LRO_DESC_MAX_64, +}; + +enum { + SXE2_RX_DESC_LOW_WATERLINE_1 = 1, + SXE2_RX_DESC_LOW_WATERLINE_2, + SXE2_RX_DESC_LOW_WATERLINE_3, + SXE2_RX_DESC_LOW_WATERLINE_4, + SXE2_RX_DESC_LOW_WATERLINE_5, + SXE2_RX_DESC_LOW_WATERLINE_6, + SXE2_RX_DESC_LOW_WATERLINE_7, +}; + +enum sxe2_rx_desc_status0_err { + SXE2_RX_DESC_STATUS0_DD = 0, + SXE2_RX_DESC_STATUS0_EOP = 1, + SXE2_RX_DESC_STATUS0_L2TAG1_P = 2, + SXE2_RX_DESC_STATUS0_L3L4_P = 3, + SXE2_RX_DESC_STATUS0_CRCP = 4, + SXE2_RX_DESC_STATUS0_SECP = 5, + SXE2_RX_DESC_STATUS0_SECTAG = 6, + + SXE2_RX_DESC_ERROR_RXE = 7, + SXE2_RX_DESC_ERROR_PA_ERR = 8, + SXE2_RX_DESC_ERROR_PKT_HBO = 9, + SXE2_RX_DESC_ERROR_IPE = 10, + SXE2_RX_DESC_ERROR_L4E = 11, + SXE2_RX_DESC_ERROR_EIPE = 12, + SXE2_RX_DESC_ERROR_OVERSIZE = 13, + SXE2_RX_DESC_ERROR_SEC_ERR = 14, +}; + +enum sxe2_rx_desc_status1 { + SXE2_RX_DESC_STATUS1_START = 10, + SXE2_RX_DESC_STATUS1_SECE = 0 + SXE2_RX_DESC_STATUS1_START, + SXE2_RX_DESC_STATUS1_EXT_UDP_0 = 1 + SXE2_RX_DESC_STATUS1_START, + SXE2_RX_DESC_STATUS1_UMBCAST = 2 + SXE2_RX_DESC_STATUS1_START, + SXE2_RX_DESC_STATUS1_FLTSTAT = 4 + SXE2_RX_DESC_STATUS1_START, +}; + +enum sxe2_rx_desc_status2 { + SXE2_RX_DESC_STATUS2_START = 26, + SXE2_RX_DESC_STATUS2_FIM = 0 + SXE2_RX_DESC_STATUS2_START, + SXE2_RX_DESC_STATUS2_LPBK = 1 + SXE2_RX_DESC_STATUS2_START, + SXE2_RX_DESC_STATUS2_IPV6EXADD = 2 + SXE2_RX_DESC_STATUS2_START, + SXE2_RX_DESC_STATUS2_RSS_VLD = 3 + SXE2_RX_DESC_STATUS2_START, + SXE2_RX_DESC_STATUS2_ACL_HIT = 4 + SXE2_RX_DESC_STATUS2_START, + SXE2_RX_DESC_STATUS2_INT_UDP_0 = 5 + SXE2_RX_DESC_STATUS2_START, +}; + +struct sxe2_ctxt_elem { + u64 dma_addr; + u32 max_lro_size; + u32 split_type_mask; + u16 hdr_len; + u16 queue_id; + u16 depth; + u16 buf_len; + u8 lro_en; + u8 keep_crc_en; + u8 split_en; + u8 desc_size; +}; + +struct sxe2_rxq_cfg_params { + u16 q_cnt; + u16 vsi_id; + u16 max_frame_size; + u8 rsv[2]; + struct sxe2_ctxt_elem cfg[]; +}; + +struct sxe2_rxq_dis_params { + u16 vsi_id; + u16 q_idx; +}; + +union sxe2_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + u8 rxdid_src; + u8 mirror; + __le16 l2tag1; + __le32 filter_status; + + __le16 status0_err; + __le16 ptype_status1; + __le32 pktl_hdrl_status2; + + __le32 status_lrocnt_fdpf_id; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + u8 acl_pf_id; + u8 sw_pf_id; + __le16 flow_id; + __le32 fd_filter_id; + + } wb; +}; + +union sxe2_rx_desc_1588 { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + u8 rxdid_src_fd_eudpe; + u8 mirror; + __le16 l2_tag1; + __le32 filter_status; + + __le64 status_err_ptype_len; + + __le16 ext_status; + u8 rsvd; + u8 ts_l; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + __le32 ts_h; + __le32 fd_filter_id; + + } wb; +}; + +union sxe2_rx_desc_fnav { + struct { + __le32 rxdid : 3; + __le32 rsv1 : 8; + __le32 fd_hlt_sts : 4; + __le32 flowid : 17; + __le32 fdid; + + __le64 dd : 1; + __le64 prog_id : 2; + __le64 rsv2 : 1; + __le64 fd_prog_sts : 3; + __le64 flt_addr : 15; + __le64 ht_table_id : 6; + __le64 pkt_prof : 7; + __le64 rsv3 : 3; + __le64 bucket_hash : 26; + } wb; +}; + +enum { + SXE2_RX_DESC_ID_16B = 0, + SXE2_RX_DESC_ID_32B, + SXE2_RX_DESC_ID_1588, + SXE2_RX_DESC_ID_FD, +}; + +enum { + SXE2_PTYPE_OUTER_L2 = 0, + SXE2_PTYPE_OUTER_IP = 1, +}; + +enum { + SXE2_PTYPE_OUTER_NONE = 0, + SXE2_PTYPE_OUTER_IPV4 = 1, + SXE2_PTYPE_OUTER_IPV6 = 2, +}; + +enum { + SXE2_PTYPE_NOT_FRAG = 0, + SXE2_PTYPE_FRAG = 1, +}; + +enum { + SXE2_PTYPE_TUNNEL_NONE = 0, + SXE2_PTYPE_TUNNEL_IP_IP = 1, + SXE2_PTYPE_TUNNEL_IP_MAC = 2, + SXE2_PTYPE_TUNNEL_IP_MAC_VLAN = 3, + SXE2_PTYPE_TUNNEL_IP_GTPU = 4, + SXE2_PTYPE_TUNNEL_IP_GRENAT = 5, + SXE2_PTYPE_TUNNEL_IP_GRENAT_MAC = 6, + SXE2_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 7, +}; + +enum { + SXE2_PTYPE_TUNNEL_END_NONE = 0, + SXE2_PTYPE_TUNNEL_END_IPV4 = 1, + SXE2_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum { + SXE2_PTYPE_INNER_PROT_NONE = 0, + SXE2_PTYPE_INNER_PROT_UDP = 1, + SXE2_PTYPE_INNER_PROT_TCP = 2, + SXE2_PTYPE_INNER_PROT_SCTP = 3, + SXE2_PTYPE_INNER_PROT_ICMP = 4, + SXE2_PTYPE_INNER_PROT_TIMESYNC = 5, +}; + +enum { + SXE2_PTYPE_PAYLOAD_LAYER_NONE = 0, + SXE2_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + SXE2_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + SXE2_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +struct sxe2_rx_ptype_info { + u32 known : 1; + u32 outer_ip : 1; + u32 outer_ip_ver : 2; + u32 outer_frag : 1; + u32 tunnel_type : 3; + u32 tunnel_end_prot : 2; + u32 tunnel_end_frag : 1; + u32 inner_prot : 4; + u32 payload_layer : 3; + u32 hash_layer : 3; +}; + +struct sxe2_skb_private_data { + dma_addr_t dma; + u16 lro_cnt; + bool page_released; +}; + +s32 sxe2_rx_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_rx_init(struct sxe2_vsi *vsi); + +s32 sxe2_vlan_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_rx_hw_cfg(struct sxe2_vsi *vsi); + +static inline u32 sxe2_rx_page_order(struct sxe2_queue *rxq) +{ + u32 order = 0; + +#if (PAGE_SIZE < 8192) + if (rxq->rx_buf_len > (PAGE_SIZE / 2)) + order = 1; +#endif + return order; +} + +static inline bool sxe2_is_non_eop(struct sxe2_queue *rxq, + union sxe2_rx_desc *rx_desc) +{ + if (likely(sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_STATUS0_EOP)))) { + return false; + } + + return true; +} + +static inline u8 sxe2_rx_desc_lro_cnt(struct sxe2_queue *rxq, + union sxe2_rx_desc *rx_desc) +{ + if (test_bit(SXE2_RXQ_LRO_ENABLED, &rxq->flags)) { + return (le32_to_cpu(rx_desc->wb.status_lrocnt_fdpf_id) >> + SXE2_RX_DESC_LROCNT_SHIFT) & + SXE2_RX_DESC_LROCNT_MASK; + } + + return 0; +} + +#define SXE2_PTYPE_UNUSED_ENTRY(PTYPE) \ + [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +#define SXE2_PTYPE(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, \ + PL, HL) \ + [PTYPE] = { 1, \ + SXE2_PTYPE_OUTER_##OUTER_IP, \ + SXE2_PTYPE_OUTER_##OUTER_IP_VER, \ + SXE2_PTYPE_##OUTER_FRAG, \ + SXE2_PTYPE_TUNNEL_##T, \ + SXE2_PTYPE_TUNNEL_END_##TE, \ + SXE2_PTYPE_##TEF, \ + SXE2_PTYPE_INNER_PROT_##I, \ + SXE2_PTYPE_PAYLOAD_LAYER_##PL, \ + SXE2_PTYPE_PAYLOAD_LAYER_##HL } + +#define SXE2_PTYPE_NOF SXE2_PTYPE_NOT_FRAG +#define SXE2_PTYPE_FRG SXE2_PTYPE_FRAG + +static const struct sxe2_rx_ptype_info sxe2_ptype_table[BIT(10)] = { + SXE2_PTYPE_UNUSED_ENTRY(0), + SXE2_PTYPE(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(3), SXE2_PTYPE_UNUSED_ENTRY(4), + SXE2_PTYPE_UNUSED_ENTRY(5), + SXE2_PTYPE(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(8), SXE2_PTYPE_UNUSED_ENTRY(9), + SXE2_PTYPE(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(12), SXE2_PTYPE_UNUSED_ENTRY(13), + SXE2_PTYPE_UNUSED_ENTRY(14), SXE2_PTYPE_UNUSED_ENTRY(15), + SXE2_PTYPE_UNUSED_ENTRY(16), SXE2_PTYPE_UNUSED_ENTRY(17), + SXE2_PTYPE_UNUSED_ENTRY(18), SXE2_PTYPE_UNUSED_ENTRY(19), + SXE2_PTYPE_UNUSED_ENTRY(20), SXE2_PTYPE_UNUSED_ENTRY(21), + + SXE2_PTYPE(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(25), + SXE2_PTYPE(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(32), + SXE2_PTYPE(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(39), + SXE2_PTYPE(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3, PAY3), + + SXE2_PTYPE(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(47), + SXE2_PTYPE(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(54), + SXE2_PTYPE(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2_PTYPE(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(62), + SXE2_PTYPE(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(69), + SXE2_PTYPE(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2_PTYPE(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(77), + SXE2_PTYPE(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(84), + SXE2_PTYPE(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(91), + SXE2_PTYPE(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(98), + SXE2_PTYPE(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(105), + SXE2_PTYPE(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3, PAY3), + + SXE2_PTYPE(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(113), + SXE2_PTYPE(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(120), + SXE2_PTYPE(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2_PTYPE(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(128), + SXE2_PTYPE(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(135), + SXE2_PTYPE(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, + PAY3, PAY3), + + SXE2_PTYPE(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, + PAY3, PAY3), + SXE2_PTYPE(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, + PAY3, PAY3), + SXE2_PTYPE(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(143), + SXE2_PTYPE(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, + PAY4, PAY4), + SXE2_PTYPE(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, + PAY4, PAY3), + + SXE2_PTYPE(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, + PAY3, PAY3), + SXE2_PTYPE(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, + PAY3, PAY3), + SXE2_PTYPE(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2_PTYPE_UNUSED_ENTRY(150), + SXE2_PTYPE(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2_PTYPE(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, + PAY4, PAY4), + SXE2_PTYPE(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, + PAY4, PAY3), + SXE2_PTYPE_UNUSED_ENTRY(154), SXE2_PTYPE_UNUSED_ENTRY(155), + SXE2_PTYPE_UNUSED_ENTRY(156), SXE2_PTYPE_UNUSED_ENTRY(157), + SXE2_PTYPE_UNUSED_ENTRY(158), SXE2_PTYPE_UNUSED_ENTRY(159), + + SXE2_PTYPE(160, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(161, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(162, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(163, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(164, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(165, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(166, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(167, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + + [168 ... 254] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + [255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + [256 ... 271] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(272, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(273, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(274, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(275, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(276, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(277), + SXE2_PTYPE(278, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + + [279 ... 326] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(327, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(328, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(329, IP, IPV4, NOF, IP_GTPU, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(330, IP, IPV6, NOF, IP_GTPU, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(331, IP, IPV4, NOF, IP_GTPU, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(332, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(333, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(334, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(335, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(336, IP, IPV6, NOF, IP_GTPU, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(337, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(338, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(339, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(340, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(341, IP, IPV4, NOF, IP_GTPU, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(342, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(343, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(344, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(345, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(346, IP, IPV6, NOF, IP_GTPU, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(347, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(348, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(349, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(350, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(351, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(352, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(353, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(354, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + + [355 ... 359] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(360, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(361, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + + [362 ... 370] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(371, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + [372 ... 380] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + SXE2_PTYPE(381, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + [382 ... 390] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + SXE2_PTYPE(391, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + + [392 ... 395] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(396, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(397, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(398, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(399, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(400, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(401, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(402, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(403, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(404, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(405, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(406, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(407, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + + SXE2_PTYPE(408, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(409, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(410, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(411, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(412, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(413, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(414, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(415, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(416, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(417, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(418, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(419, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(420, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(421, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(422, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(423, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(424, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(425, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(426, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(427, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + + [428 ... 768] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(769, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE(770, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE(771, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(772), + SXE2_PTYPE(773, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2_PTYPE_UNUSED_ENTRY(774), SXE2_PTYPE_UNUSED_ENTRY(775), + + SXE2_PTYPE(776, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(777, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(778, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(779, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(780, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(781, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(782), SXE2_PTYPE_UNUSED_ENTRY(783), + + SXE2_PTYPE(784, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(785, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(786, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(787, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(788, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE(789, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(790), SXE2_PTYPE_UNUSED_ENTRY(791), + + SXE2_PTYPE(792, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(793, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(794, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(795, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(796, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(797, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(798, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(799, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(800, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(801, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(802), SXE2_PTYPE_UNUSED_ENTRY(803), + SXE2_PTYPE_UNUSED_ENTRY(804), SXE2_PTYPE_UNUSED_ENTRY(805), + SXE2_PTYPE_UNUSED_ENTRY(806), SXE2_PTYPE_UNUSED_ENTRY(807), + + SXE2_PTYPE(808, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(809, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(810, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(811, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(812, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(813, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(814, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(815, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(816, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(817, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE_UNUSED_ENTRY(818), SXE2_PTYPE_UNUSED_ENTRY(819), + + SXE2_PTYPE(820, IP, IPV6, NOF, IP_MAC, NONE, NOF, NONE, PAY3, PAY3), + + SXE2_PTYPE(821, IP, IPV6, NOF, IP_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(822, IP, IPV6, NOF, IP_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(823, IP, IPV6, NOF, IP_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(824, IP, IPV6, NOF, IP_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(825, IP, IPV6, NOF, IP_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(826, IP, IPV6, NOF, IP_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(827, IP, IPV6, NOF, IP_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(828, IP, IPV6, NOF, IP_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(829, IP, IPV6, NOF, IP_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(830, IP, IPV6, NOF, IP_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(831, IP, IPV6, NOF, IP_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(832, IP, IPV6, NOF, IP_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(832, IP, IPV6, NOF, IP_MAC_VLAN, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2_PTYPE(836, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(837, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(838, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(839, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(840, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(841, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2_PTYPE(842, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2_PTYPE(843, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2_PTYPE(844, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(845, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(846, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2_PTYPE(847, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, ICMP, PAY4, + PAY3), + + [848 ... 867] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2_PTYPE(878, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(877, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(876, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(879, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(880, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(875, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(874, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(871, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(870, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(872, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(873, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(869, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(868, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(891, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(890, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(889, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(892, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(893, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(888, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(887, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(884, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(883, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(885, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(886, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(882, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(881, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(904, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(903, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(902, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(905, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(906, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(901, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(900, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(897, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(896, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(898, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(899, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(895, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(894, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(917, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(916, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(915, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(918, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(919, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(914, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(913, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(910, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(909, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(911, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(912, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(908, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(907, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(930, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(929, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(928, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(931, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(932, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(927, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(926, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(923, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(922, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(924, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(925, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(921, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(920, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(943, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(942, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(941, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(944, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(945, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(940, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(939, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(936, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(935, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(937, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(938, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(934, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(933, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(956, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(955, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(954, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(957, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(958, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(953, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(952, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(949, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(948, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(950, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(951, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(947, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(946, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(969, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(968, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(967, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(970, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(971, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(966, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(965, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(962, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(961, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(963, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(964, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(960, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(959, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(982, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(981, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(980, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(983, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(984, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(979, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(978, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(975, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(974, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(976, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(977, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(973, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(972, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(995, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(994, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(993, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(996, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(997, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(992, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(991, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(988, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(987, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(989, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(990, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(986, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(985, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(1008, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(1007, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(1006, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(1009, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(1010, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(1005, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(1004, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(1001, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(1000, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(1002, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(1003, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(999, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(998, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(1021, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2_PTYPE(1020, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(1019, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(1022, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(1023, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(1018, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(1017, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2_PTYPE(1014, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2_PTYPE(1013, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2_PTYPE(1015, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2_PTYPE(1016, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2_PTYPE(1012, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2_PTYPE(1011, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), +}; + +static inline struct sxe2_rx_ptype_info sxe2_rx_ptype_parse(u16 ptype) +{ + return sxe2_ptype_table[ptype]; +} + +#define SXE2_RX_PAGE_ORDER(rxq) (sxe2_rx_page_order(rxq)) +#define SXE2_RX_PAGE_SIZE(rxq) (PAGE_SIZE << SXE2_RX_PAGE_ORDER(rxq)) + +#define SXE2_SKB_PRIV(skb) ((struct sxe2_skb_private_data *)(skb)->cb) + +void sxe2_set_rx_mode(struct net_device *netdev); + +void sxe2_frame_size_set(struct sxe2_vsi *vsi); + +s32 sxe2_rx_cfg(struct sxe2_vsi *vsi); + +int sxe2_rxq_irq_clean(struct sxe2_queue *rxq, int budget); + +s32 sxe2_rx_ring_alloc(struct sxe2_queue *rxq); + +void sxe2_rx_ring_free(struct sxe2_queue *rxq); + +bool sxe2_rx_buffers_alloc(struct sxe2_queue *rxq, u16 cnt); + +void sxe2_rx_rings_res_free(struct sxe2_vsi *vsi); + +s32 sxe2_rxqs_stop(struct sxe2_vsi *vsi); + +void sxe2_rx_ring_clean(struct sxe2_queue *rxq); + +s32 sxe2_rxq_ctxt_cfg(struct sxe2_queue *rxq); +void sxe2_rxq_feature_cfg(struct sxe2_queue *rxq); +void sxe2_rx_ring_res_free(struct sxe2_queue *rxq); + +void sxe2_xdp_tail_update(struct sxe2_queue *rxq, u32 xdp_res); + +void sxe2_skb_field_fill(struct sxe2_queue *rxq, union sxe2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ptype); + +void sxe2_rxq_tail_update(struct sxe2_queue *rxq, u16 new); + +s32 sxe2_vsi_cfg_rxq(struct sxe2_queue *rxq); + +s32 sxe2_rxq_cfg_ena_common_handle(struct sxe2_adapter *adapter, + struct sxe2_rxq_cfg_params *params); + +s32 sxe2_rxq_disable_common_handle(struct sxe2_adapter *adapter, + struct sxe2_rxq_dis_params *params); + +s32 sxe2_rxq_ctrl_set(struct sxe2_adapter *adapter, struct sxe2_queue *rxq, bool enable, bool wait); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.c new file mode 100644 index 0000000000000000000000000000000000000000..c6a59684bcacfd4c032234f127ace1292370c0dc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_skb_dump.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_skb_dump.h" + +#define SKB_DESCRIPTION_LEN 256 +static DEFINE_PER_CPU(u64, pkg_cnt); + +void sxe2_dump_skb(struct sk_buff *skb) +{ + u32 len = skb->len; + u32 data_len = skb->data_len; + u64 *pkt = this_cpu_ptr(&pkg_cnt); + + s8 desc[SKB_DESCRIPTION_LEN] = {}; + + (void)snprintf(desc, SKB_DESCRIPTION_LEN, "pkg_cnt=%llu, skb addr:0x%llx %s", + (++*pkt), (u64)skb, "linear region"); +#ifndef SXE2_CFG_RELEASE + sxe2_log_binary(__FILE__, __func__, __LINE__, (u8 *)skb->data, (u64)skb, + min_t(u32, (len - data_len), 256), desc); +#endif + (void)data_len; + (void)len; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.h new file mode 100644 index 0000000000000000000000000000000000000000..73adf8fbb33082b6f003c14a9c6548b7986d50ff --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_skb_dump.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_skb_dump.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_SKB_DUMP_H__ +#define __SXE2_SKB_DUMP_H__ + +#include +#include "sxe2_log.h" + +void sxe2_dump_skb(struct sk_buff *skb); + +#ifndef SXE2_CFG_RELEASE +#define SKB_DUMP(skb) sxe2_dump_skb(skb) +#else +#define SKB_DUMP(skb) +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.c new file mode 100644 index 0000000000000000000000000000000000000000..45eceff8144ee47c36c6b44b86bd5feac99ac018 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.c @@ -0,0 +1,1997 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_sriov.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2.h" +#include "sxe2_common.h" +#include "sxe2_queue.h" +#include "sxe2_mbx_msg.h" +#include "sxe2_log.h" +#include "sxe2_sriov.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_netdev.h" +#include "sxe2_eswitch.h" +#include "sxe2_mbx_channel.h" +#include "sxe2_rss.h" +#include "sxe2_ipsec.h" +#include "sxe2_lag.h" +#include "sxe2_fnav.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_vsi.h" +#include "sxe2_acl.h" + +static void sxe2_sriov_clear_ceq_irq_map(struct sxe2_vf_node *vf, u16 ceq_idx) +{ + u16 glint_ceqctl_idx = ceq_idx; + u32 reg_val; + + reg_val = SXE2_REG_READ(&vf->adapter->hw, + SXE2_VF_GLINT_CEQCTL(glint_ceqctl_idx)); + SXE2_REG_WRITE(&vf->adapter->hw, SXE2_VF_GLINT_CEQCTL(glint_ceqctl_idx), + (reg_val & SXE2_VF_CEQ_CTRL_MASK)); +} + +static void sxe2_sriov_clear_aeq_irq_map(struct sxe2_vf_node *vf) +{ + SXE2_REG_WRITE(&vf->adapter->hw, SXE2_VF_VPINT_AEQCTL(vf->vf_idx), 0); +} + +static void sxe2_sriov_clear_rdma_irq_map(struct sxe2_vf_node *vf, + struct aux_qv_info *qv_info) +{ + if (qv_info->ceq_idx != SXE2_RDMA_VCHNL_Q_INVALID_IDX) + sxe2_sriov_clear_ceq_irq_map(vf, qv_info->ceq_idx); + + if (qv_info->aeq_idx != SXE2_RDMA_VCHNL_Q_INVALID_IDX) + sxe2_sriov_clear_aeq_irq_map(vf); +} + +static void sxe2_sriov_cfg_rdma_ceq_irq_map(struct sxe2_vf_node *vf, + struct aux_qv_info *qv_info) +{ + u32 regval = ((qv_info->v_idx + vf->irq_base_idx) & + SXE2_VF_GLINT_CEQCTL_MSIX_INDX_M) | + ((qv_info->itr_idx << SXE2_VF_GLINT_CEQCTL_ITR_INDX_S) & + SXE2_VF_GLINT_CEQCTL_ITR_INDX_M) | + SXE2_VF_GLINT_CEQCTL_CAUSE_ENA_M; + + SXE2_REG_WRITE(&vf->adapter->hw, SXE2_VF_GLINT_CEQCTL(qv_info->ceq_idx), + regval); + LOG_DEBUG("map info v_idx(%d) irq_base(%d) ceq_idx(%d) value(%d)\t" + "itr_idx(%d)", + qv_info->v_idx, vf->irq_base_idx, + SXE2_VF_GLINT_CEQCTL(qv_info->ceq_idx), regval, qv_info->itr_idx); +} + +static void sxe2_sriov_cfg_rdma_aeq_irq_map(struct sxe2_vf_node *vf, + struct aux_qv_info *qv_info) +{ + u32 regval = (qv_info->v_idx & SXE2_VF_PFINT_AEQCTL_MSIX_INDX_M) | + ((qv_info->itr_idx << SXE2_VF_VPINT_AEQCTL_ITR_INDX_S) & + SXE2_VF_VPINT_AEQCTL_ITR_INDX_M) | + SXE2_VF_VPINT_AEQCTL_CAUSE_ENA_M; + + SXE2_REG_WRITE(&vf->adapter->hw, SXE2_VF_VPINT_AEQCTL(vf->vf_idx), regval); + LOG_DEBUG("map info v_idx(%d) vf_idx(%d) qeq_idx(%d) value(%d) itr_idx(%d)", + qv_info->v_idx, vf->vf_idx, SXE2_VF_VPINT_AEQCTL(vf->vf_idx), + regval, qv_info->itr_idx); +} + +static void sxe2_sriov_cfg_rdma_irq_map(struct sxe2_vf_node *vf, + struct aux_qv_info *qv_info) +{ + if (qv_info->ceq_idx != SXE2_RDMA_VCHNL_Q_INVALID_IDX) + sxe2_sriov_cfg_rdma_ceq_irq_map(vf, qv_info); + + if (qv_info->aeq_idx != SXE2_RDMA_VCHNL_Q_INVALID_IDX) + sxe2_sriov_cfg_rdma_aeq_irq_map(vf, qv_info); +} + +static const struct sxe2_vf_ops sxe2_sriov_ops = { + .reset_type = SXE2_RST_TYPE_VF_RESET, + .clear_mbx_reg = NULL, + .trigger_reset_register = NULL, + .poll_reset_status = NULL, + .clear_reset_trigger = NULL, + .vsi_rebuild = NULL, + .post_vsi_rebuild = NULL, + .cfg_rdma_irq_map = sxe2_sriov_cfg_rdma_irq_map, + .clear_rdma_irq_map = sxe2_sriov_clear_rdma_irq_map, +}; + +static s32 sxe2_vf_perm_check(struct sxe2_adapter *adapter, int num_vfs) +{ + if (!test_bit(SXE2_FLAG_SRIOV_CAPABLE, adapter->flags)) { + LOG_DEV_ERR("sriov not support\n"); + return -EOPNOTSUPP; + } + + if (sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("device in safe mode, cannot configure sriov\n"); + return -EOPNOTSUPP; + } + + if (sxe2_lag_support(adapter)) { + mutex_lock(&adapter->lag_ctxt->lock); + if (sxe2_lag_is_bonded(adapter) || + adapter->lag_ctxt->lag_wk.is_bonded) { + mutex_unlock(&adapter->lag_ctxt->lock); + LOG_DEV_ERR("device in bond, cannot configure sriov\n"); + return -EOPNOTSUPP; + } + mutex_unlock(&adapter->lag_ctxt->lock); + } + + if (num_vfs > adapter->vf_ctxt.max_vfs) { + LOG_DEV_ERR("enable %u vfs exceed device vf cap:%u.\n", num_vfs, + adapter->vf_ctxt.max_vfs); + return -EOPNOTSUPP; + } + + return 0; +} + +bool sxe2_vf_is_exist(struct sxe2_adapter *adapter) +{ + return !!adapter->vf_ctxt.num_vfs; +} + +bool sxe2_vf_is_trusted(struct sxe2_vf_node *vf) +{ + return vf->prop.trusted; +} + +bool sxe2_vf_set_mac_is_allow(struct sxe2_vf_node *vf) +{ + if (vf->prop.mac_from_pf && !sxe2_vf_is_trusted(vf)) + return false; + + return true; +} + +STATIC u16 sxe2_sriov_irqs_avail_nums_get(struct sxe2_adapter *adapter) +{ + unsigned long *map = adapter->irq_ctxt.map; + u16 size = adapter->irq_ctxt.max_cnt; + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + u16 start_idx = irq_layout->macvlan_offset; + u16 zero_count = 0; + u16 i; + + for (i = (u16)(size - 1); i >= start_idx; i--) { + if (test_bit(i, map)) + break; + zero_count++; + + if (i == 0) + break; + } + + return zero_count; +} + +STATIC s32 sxe2_vf_irqs_num_set(struct sxe2_adapter *adapter, int num_vfs) +{ + u16 sriov; + u16 per; + u16 vf_irq_cnt; + struct sxe2_irq_context *irq_ctxt = &adapter->irq_ctxt; + u16 max_msix = irq_ctxt->max_cnt; + struct sxe2_irq_layout *irq_layout = &irq_ctxt->irq_layout; + unsigned long *map = adapter->irq_ctxt.map; + + mutex_lock(&irq_ctxt->lock); + + sriov = sxe2_sriov_irqs_avail_nums_get(adapter); + + per = sriov / (u16)num_vfs; + + if (per < SXE2_VF_1Q_MSIX_NUM) { + LOG_DEV_ERR("sriov irq:%u not enough to support %u vfs's \t" + "minimum msi-x interrupts:%u\n", + sriov, num_vfs, SXE2_VF_1Q_MSIX_NUM); + mutex_unlock(&irq_ctxt->lock); + return -ENOSPC; + } + + if (per >= SXE2_VF_64Q_MSIX_NUM) + vf_irq_cnt = SXE2_VF_64Q_MSIX_NUM; + else + vf_irq_cnt = per; + + adapter->vf_ctxt.irq_cnt = vf_irq_cnt; + + irq_layout->sriov = (u16)(vf_irq_cnt * (u16)num_vfs); + irq_layout->sriov_offset = (u16)(max_msix - irq_layout->sriov); + adapter->vf_ctxt.irq_base = irq_layout->sriov_offset; + bitmap_set(map, irq_layout->sriov_offset, irq_layout->sriov); + + mutex_unlock(&irq_ctxt->lock); + + LOG_INFO_BDF("pf total irq cnt:%u used:%u\t" + "sriov:%u num_vfs:%d per vf irq cnt:%u\t" + "sriov irq base idx:%u\n", + irq_ctxt->max_cnt, irq_ctxt->avail_cnt + irq_layout->macvlan, + sriov, num_vfs, vf_irq_cnt, adapter->vf_ctxt.irq_base); + + return 0; +} + +STATIC u16 sxe2_sriov_qs_avail_nums_get(struct sxe2_adapter *adapter, u8 q_type) +{ + unsigned long *map; + u16 size; + struct sxe2_queue_layout *q_layout = NULL; + u16 start_idx; + u16 zero_count = 0; + u16 i; + + switch (q_type) { + case SXE2_DATA_TQ: + size = adapter->q_ctxt.max_txq_cnt; + q_layout = &adapter->q_ctxt.txq_layout; + map = q_layout->txq_map; + start_idx = q_layout->macvlan_offset; + break; + case SXE2_DATA_RQ: + size = adapter->q_ctxt.max_rxq_cnt; + q_layout = &adapter->q_ctxt.rxq_layout; + map = q_layout->rxq_map; + start_idx = q_layout->macvlan_offset; + break; + default: + zero_count = 0; + goto l_end; + } + + for (i = (u16)(size - 1); i >= start_idx; i--) { + if (test_bit(i, map)) + break; + + zero_count++; + + if (i == 0) + break; + } + +l_end: + return zero_count; +} + +static s32 sxe2_vf_queues_num_set(struct sxe2_adapter *adapter, int num_vfs) +{ + u16 txq_cnt; + u16 rxq_cnt; + u16 txq_idle; + u16 rxq_idle; + u16 per; + struct mutex *lock = &adapter->q_ctxt.lock; + struct sxe2_queue_layout *txq_layout = &adapter->q_ctxt.txq_layout; + struct sxe2_queue_layout *rxq_layout = &adapter->q_ctxt.rxq_layout; + u16 max_txq_cnt = adapter->q_ctxt.max_txq_cnt; + u16 max_rxq_cnt = adapter->q_ctxt.max_rxq_cnt; + u16 eswitch_mode_need = 0; + + mutex_lock(lock); + + txq_idle = sxe2_sriov_qs_avail_nums_get(adapter, SXE2_DATA_TQ); + if (sxe2_eswitch_is_offload(adapter)) { + eswitch_mode_need = (u16)(num_vfs * SXE2_VF_ESW_CNT); + txq_idle = (txq_idle > eswitch_mode_need) + ? (txq_idle - eswitch_mode_need) + : (u16)0; + } + per = txq_idle / (u16)num_vfs; + if (!per) + txq_cnt = 0U; + else + txq_cnt = (u16)min_t(u16, per, (u16)SXE2_VF_QUEUE_CNT_MAX); + + rxq_idle = sxe2_sriov_qs_avail_nums_get(adapter, SXE2_DATA_RQ); + if (sxe2_eswitch_is_offload(adapter)) { + eswitch_mode_need = (u16)num_vfs * (u16)SXE2_VF_ESW_CNT; + rxq_idle = (rxq_idle > eswitch_mode_need) + ? (rxq_idle - eswitch_mode_need) + : (u16)0; + } + per = rxq_idle / (u16)num_vfs; + if (!per) + rxq_cnt = 0U; + else + rxq_cnt = (u16)min_t(u16, per, (u16)SXE2_VF_QUEUE_CNT_MAX); + + if (txq_cnt < SXE2_VF_QUEUE_CNT_MIN || rxq_cnt < SXE2_VF_QUEUE_CNT_MIN) { + LOG_DEV_ERR("txq_idle:%u rxq_idle:%u not enough\t" + "to support %u vfs's minimum queue cnt:%u\n", + txq_idle, rxq_idle, num_vfs, SXE2_VF_QUEUE_CNT_MIN); + mutex_unlock(lock); + return -ENOSPC; + } + adapter->vf_ctxt.q_cnt = (u16)min_t(u16, rxq_cnt, txq_cnt); + + txq_layout->sriov = (u16)(adapter->vf_ctxt.q_cnt * (u16)num_vfs); + txq_layout->sriov_offset = (u16)(max_txq_cnt - txq_layout->sriov); + + rxq_layout->sriov = (u16)(adapter->vf_ctxt.q_cnt * (u16)num_vfs); + rxq_layout->sriov_offset = (u16)(max_rxq_cnt - rxq_layout->sriov); + + bitmap_set(txq_layout->txq_map, txq_layout->sriov_offset, txq_layout->sriov); + bitmap_set(rxq_layout->rxq_map, rxq_layout->sriov_offset, rxq_layout->sriov); + + if (sxe2_eswitch_is_offload(adapter)) { + txq_layout->esw = (u16)num_vfs; + txq_layout->esw_offset = + (u16)(txq_layout->sriov_offset - txq_layout->esw); + txq_layout->dpdk_esw = (u16)num_vfs; + txq_layout->dpdk_esw_offset = + (u16)(txq_layout->sriov_offset - txq_layout->esw - + txq_layout->dpdk_esw); + + rxq_layout->esw = (u16)num_vfs; + rxq_layout->esw_offset = + (u16)(rxq_layout->sriov_offset - rxq_layout->esw); + rxq_layout->dpdk_esw = (u16)num_vfs; + rxq_layout->dpdk_esw_offset = + (u16)(rxq_layout->sriov_offset - rxq_layout->esw - + rxq_layout->dpdk_esw); + + bitmap_set(txq_layout->txq_map, txq_layout->esw_offset, + txq_layout->esw); + bitmap_set(rxq_layout->rxq_map, rxq_layout->esw_offset, + rxq_layout->esw); + + bitmap_set(txq_layout->txq_map, txq_layout->dpdk_esw_offset, + txq_layout->dpdk_esw); + bitmap_set(rxq_layout->rxq_map, rxq_layout->dpdk_esw_offset, + rxq_layout->dpdk_esw); + } + + mutex_unlock(lock); + + LOG_INFO_BDF("num_vfs:%d txq_idle:%u rxq_idle:%u \t" + "txq_cnt:%u rxq_cnt:%u q_cnt:%u swt_mode:%u.\n", + num_vfs, txq_idle, rxq_idle, txq_cnt, rxq_cnt, + adapter->vf_ctxt.q_cnt, adapter->eswitch_ctxt.mode); + + return 0; +} + +static void sxe2_vf_irq_queues_num_clear(struct sxe2_adapter *adapter) +{ + struct sxe2_irq_context *irq_ctxt = &adapter->irq_ctxt; + struct sxe2_irq_layout *irq_layout = &irq_ctxt->irq_layout; + unsigned long *map = adapter->irq_ctxt.map; + + struct sxe2_queue_context *q_ctxt = &adapter->q_ctxt; + struct sxe2_queue_layout *txq_layout = &q_ctxt->txq_layout; + struct sxe2_queue_layout *rxq_layout = &q_ctxt->rxq_layout; + + mutex_lock(&irq_ctxt->lock); + bitmap_clear(map, adapter->vf_ctxt.irq_base, irq_layout->sriov); + irq_layout->sriov = 0; + irq_layout->sriov_offset = irq_ctxt->max_cnt; + mutex_unlock(&irq_ctxt->lock); + + mutex_lock(&q_ctxt->lock); + bitmap_clear(txq_layout->txq_map, txq_layout->sriov_offset, + txq_layout->sriov); + txq_layout->sriov = 0; + txq_layout->sriov_offset = q_ctxt->max_txq_cnt; + + bitmap_clear(rxq_layout->rxq_map, rxq_layout->sriov_offset, + rxq_layout->sriov); + rxq_layout->sriov = 0; + rxq_layout->sriov_offset = q_ctxt->max_rxq_cnt; + + if (sxe2_eswitch_is_offload(adapter)) { + bitmap_clear(txq_layout->txq_map, txq_layout->dpdk_esw_offset, + txq_layout->dpdk_esw); + bitmap_clear(rxq_layout->rxq_map, rxq_layout->dpdk_esw_offset, + rxq_layout->dpdk_esw); + + bitmap_clear(txq_layout->txq_map, txq_layout->esw_offset, + txq_layout->esw); + bitmap_clear(rxq_layout->rxq_map, rxq_layout->esw_offset, + rxq_layout->esw); + + txq_layout->esw = 0; + txq_layout->esw_offset = q_ctxt->max_txq_cnt; + txq_layout->dpdk_esw = 0; + txq_layout->dpdk_esw_offset = q_ctxt->max_txq_cnt; + + rxq_layout->esw = 0; + rxq_layout->esw_offset = q_ctxt->max_rxq_cnt; + rxq_layout->dpdk_esw = 0; + rxq_layout->dpdk_esw_offset = q_ctxt->max_rxq_cnt; + } + mutex_unlock(&q_ctxt->lock); + + adapter->vf_ctxt.irq_base = 0; + adapter->vf_ctxt.q_cnt = 0; + adapter->vf_ctxt.irq_cnt = 0; +} + +static void sxe2_vf_node_free(struct sxe2_adapter *adapter, u16 vf_idx) +{ + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_vf_node_e *vf_node_e = SXE2_VF_NODE_E(adapter, vf_idx); + + BUG_ON(vf_idx >= adapter->vf_ctxt.max_vfs); + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + devm_kfree(dev, vf_node_e->vf_node); + vf_node_e->vf_node = NULL; + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); +} + +static void sxe2_vf_sw_res_deinit(struct sxe2_adapter *adapter) +{ + u16 vf_idx; + u16 vfs_cnt = adapter->vf_ctxt.num_vfs; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + adapter->vf_ctxt.num_vfs = 0; + + for (vf_idx = 0; vf_idx < vfs_cnt; vf_idx++) + sxe2_vf_node_free(adapter, vf_idx); + + LOG_DEV_WARN("%u vf sw res deinit.\n", vfs_cnt); +} + +static s32 sxe2_vf_sw_res_init(struct sxe2_adapter *adapter, u32 num_vfs) +{ + struct sxe2_vf_node_e *vf_node_e; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 vf_idx; + s32 ret; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + adapter->vf_ctxt.adapter = adapter; + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_node_e = SXE2_VF_NODE_E(adapter, vf_idx); + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + vf_node_e->vf_node = devm_kzalloc(dev, sizeof(struct sxe2_vf_node), + GFP_KERNEL); + if (!vf_node_e->vf_node) { + ret = -ENOMEM; + LOG_DEV_ERR("vf node alloc failed num_vfs:%d vf_idx:%u\t" + "size:%zu ret:%d\n", + num_vfs, vf_idx, sizeof(struct sxe2_vf_node), + ret); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + goto l_free; + } + + set_bit(SXE2_VF_STATE_DIS, vf_node_e->vf_node->states); + vf_node_e->vf_node->adapter = adapter; + vf_node_e->vf_node->irq_base_idx = + (u16)(adapter->vf_ctxt.irq_base + + vf_idx * adapter->vf_ctxt.irq_cnt); + vf_node_e->vf_node->vf_idx = vf_idx; + vf_node_e->vf_node->msg_table = sxe2_mbx_msg_table_get(); + vf_node_e->vf_node->vf_ops = &sxe2_sriov_ops; + vf_node_e->vf_node->prop.spoofchk = true; + vf_node_e->vf_node->mode = SXE2_COM_MODULE_UNDEFINED; + + mutex_init(&vf_node_e->vf_node->repr_cfg_lock); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + + adapter->vf_ctxt.num_vfs = (u16)num_vfs; + + return 0; + +l_free: + while (vf_idx--) + sxe2_vf_node_free(adapter, vf_idx); + + return ret; +} + +struct sxe2_vf_node *sxe2_vf_node_get(struct sxe2_adapter *adapter, u16 vf_id) +{ + struct sxe2_vf_node *vf_node = NULL; + + lockdep_assert_held(SXE2_VF_NODE_LOCK(adapter, vf_id)); + + if (vf_id >= adapter->vf_ctxt.num_vfs) { + LOG_ERROR_BDF("invalid vf_id:%u.\n", vf_id); + goto l_out; + } + + vf_node = SXE2_VF_NODE(adapter, vf_id); + if (!vf_node) + LOG_DEV_ERR("vf_id:%u node NULL.\n", vf_id); + +l_out: + return vf_node; +} + +STATIC s32 sxe2_vf_vlan_init(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (sxe2_port_vlan_is_exist(vf_node)) { + ret = sxe2_vf_vsi_port_vlan_cfg(vf_node, vsi); + if (ret) + LOG_ERROR("vf:%u vsi %u port vlan cfg fail %d.\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + } else { + ret = sxe2_vsi_vlan_zero_add(vf_node->vsi); + if (ret) + LOG_ERROR("vf:%u vsi %u add vlan 0 fail %d.\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + } + + return ret; +} + +static s32 sxe2_vf_mac_init(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi) +{ + s32 ret; + u8 broadcast[ETH_ALEN]; + + eth_broadcast_addr(broadcast); + ret = sxe2_mac_rule_add(vsi, broadcast); + if (ret) { + LOG_ERROR("vf:%u vsi:%u broadcast mac addr add failed.(err:%d).\n", + vf_node->vf_idx, vsi->idx_in_dev, ret); + return ret; + } + + vf_node->mac_cnt++; + + return 0; +} + +s32 sxe2_vf_id_check(struct sxe2_adapter *adapter, u16 vf_idx) +{ + if (vf_idx >= adapter->vf_ctxt.max_vfs) { + LOG_DEV_ERR("invalid vf_idx:%u exceed max_vfs:%u.\n", vf_idx, + adapter->vf_ctxt.max_vfs); + return -EINVAL; + } + + return 0; +} + +STATIC s32 sxe2_vf_vsi_id_alloc(struct sxe2_vf_node *vf_node) +{ + s32 ret = 0; + struct sxe2_hw *hw = &vf_node->adapter->hw; + struct sxe2_adapter *adapter = vf_node->adapter; + u16 i; + u16 idx_in_pf; + + for (i = 0; i < SXE2_VF_TYPE_NR; i++) { + mutex_lock(&adapter->vsi_ctxt.lock); + idx_in_pf = sxe2_vsi_get(&adapter->vsi_ctxt); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (idx_in_pf == SXE2_INVAL_U16) { + ret = -ENOSPC; + LOG_DEV_ERR("No Free Vsis.\n"); + goto l_err; + } + vf_node->vsi_id[i] = idx_in_pf + adapter->vsi_ctxt.base_idx_in_dev; + LOG_INFO_BDF("vf:%d vsi id_in_pf:%d type:%d id_in_dev:%d.\n", + vf_node->vf_idx, idx_in_pf, i, vf_node->vsi_id[i]); + + sxe2_hw_l2tag_accept(hw, vf_node->vsi_id[i]); + } + + return ret; + +l_err: + while (i) { + i--; + idx_in_pf = (u16)(vf_node->vsi_id[i] - + adapter->vsi_ctxt.base_idx_in_dev); + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_vsi_put(&adapter->vsi_ctxt, idx_in_pf); + mutex_unlock(&adapter->vsi_ctxt.lock); + } + return ret; +} + +static void sxe2_vf_vsi_id_free(struct sxe2_vf_node *vf_node) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + u16 i; + u16 idx_in_pf; + + for (i = 0; i < SXE2_VF_TYPE_NR; i++) { + idx_in_pf = (u16)(vf_node->vsi_id[i] - + adapter->vsi_ctxt.base_idx_in_dev); + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_vsi_put(&adapter->vsi_ctxt, idx_in_pf); + mutex_unlock(&adapter->vsi_ctxt.lock); + } +} + +static void sxe2_vf_irq_map(struct sxe2_vf_node *vf) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_hw_vf_irq vf_irq; + + vf_irq.first_in_pf = vf->irq_base_idx; + vf_irq.last_in_pf = (vf_irq.first_in_pf + adapter->vf_ctxt.irq_cnt) - 1U; + + vf_irq.first_in_dev = vf_irq.first_in_pf + adapter->irq_ctxt.base_idx_in_dev; + vf_irq.last_in_dev = (vf_irq.first_in_dev + adapter->vf_ctxt.irq_cnt) - 1U; + + vf_irq.vfid_in_pf = vf->vf_idx; + vf_irq.vfid_in_dev = vf->vf_idx + adapter->vf_ctxt.vfid_base; + + vf_irq.pf_id = adapter->pf_idx; + + LOG_INFO_BDF("pf_id:%u vfid_in_pf:%u vfid_in_dev:%u irq cnt:%u\t" + "first_in_pf:%u last_in_pf:%u first_in_dev:%u\t" + "last_in_dev:%u.\n", + vf_irq.pf_id, vf_irq.vfid_in_pf, vf_irq.vfid_in_dev, + adapter->vf_ctxt.irq_cnt, vf_irq.first_in_pf, vf_irq.last_in_pf, + vf_irq.first_in_dev, vf_irq.last_in_dev); + + sxe2_hw_vf_irq_cfg(&adapter->hw, &vf_irq); +} + +static void sxe2_vf_queue_map(struct sxe2_vf_node *vf) +{ + struct sxe2_hw_vf_queue vf_queue; + struct sxe2_adapter *adapter = vf->adapter; + + vf_queue.rxq_cnt = adapter->vf_ctxt.q_cnt; + vf_queue.rxq_first_in_pf = (u16)(adapter->q_ctxt.rxq_layout.sriov_offset + + vf->vf_idx * adapter->vf_ctxt.q_cnt); + vf_queue.txq_cnt = adapter->vf_ctxt.q_cnt; + vf_queue.txq_first_in_pf = (u16)(adapter->q_ctxt.txq_layout.sriov_offset + + vf->vf_idx * adapter->vf_ctxt.q_cnt); + vf_queue.vfid_in_pf = vf->vf_idx; + + LOG_INFO_BDF("vf:%u rxq cnt:%u rxq_first_idx:%u txq cnt:%u\t" + "txq_first_in_pf:%u.\n", + vf->vf_idx, vf_queue.rxq_cnt, vf_queue.rxq_first_in_pf, + vf_queue.txq_cnt, vf_queue.txq_first_in_pf); + + sxe2_hw_vf_queue_cfg(&adapter->hw, &vf_queue); +} + +static void sxe2_vf_res_map(struct sxe2_vf_node *vf_node) +{ + sxe2_vf_irq_map(vf_node); + sxe2_vf_queue_map(vf_node); +} + +static void sxe2_vf_irq_unmap(struct sxe2_vf_node *vf) +{ + struct sxe2_adapter *adapter = vf->adapter; + struct sxe2_hw_vf_irq vf_irq; + + vf_irq.first_in_pf = vf->irq_base_idx; + vf_irq.last_in_pf = (vf_irq.first_in_pf + adapter->vf_ctxt.irq_cnt) - 1U; + + vf_irq.first_in_dev = vf_irq.first_in_pf + adapter->irq_ctxt.base_idx_in_dev; + vf_irq.last_in_dev = (vf_irq.first_in_dev + adapter->vf_ctxt.irq_cnt) - 1U; + + vf_irq.vfid_in_pf = vf->vf_idx; + vf_irq.vfid_in_dev = vf->vf_idx + adapter->vf_ctxt.vfid_base; + + vf_irq.pf_id = adapter->pf_idx; + sxe2_hw_vf_irq_decfg(&adapter->hw, &vf_irq); +} + +static void sxe2_vf_queue_unmap(struct sxe2_vf_node *vf) +{ + struct sxe2_hw_vf_queue vf_queue; + struct sxe2_adapter *adapter = vf->adapter; + + vf_queue.vfid_in_pf = vf->vf_idx; + sxe2_hw_vf_queue_decfg(&adapter->hw, &vf_queue); +} + +static void sxe2_vf_res_unmap(struct sxe2_vf_node *vf_node) +{ + sxe2_vf_irq_unmap(vf_node); + sxe2_vf_queue_unmap(vf_node); +} + +void sxe2_vfs_active(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf_node; + u16 vf_idx; + + sxe2_for_each_vf(adapter, vf_idx) + { + vf_node = SXE2_VF_NODE(adapter, vf_idx); + + clear_bit(SXE2_VF_STATE_DIS, vf_node->states); + + sxe2_hw_vf_active(&adapter->hw, vf_node->vf_idx); + } +} + +static void sxe2_vfs_hw_deactive(struct sxe2_adapter *adapter) +{ + struct sxe2_vf_node *vf_node; + u16 vf_idx; + + sxe2_for_each_vf(adapter, vf_idx) + { + vf_node = SXE2_VF_NODE(adapter, vf_idx); + sxe2_hw_vf_deactive(&adapter->hw, vf_node->vf_idx); + } +} + +static s32 __sxe2_vf_vsi_destroy_by_id(struct sxe2_vf_node *vf_node, + u16 vsi_id_in_dev) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + struct sxe2_vsi *vsi = NULL; + enum sxe2_vsi_type type; + s32 ret; + + lockdep_assert_held(&adapter->vsi_ctxt.lock); + + vsi = sxe2_vf_vsi_get(vf_node, vsi_id_in_dev); + if (!vsi) { + ret = -EINVAL; + LOG_WARN_BDF("vsi id:%d vsi not create yet.\n", vsi_id_in_dev); + goto l_out; + } + + ret = sxe2_vf_vsi_type_get(vf_node, vsi_id_in_dev, &type); + if (ret) { + LOG_ERROR_BDF("vf:%d vsi_id:%d vsi type get failed %d.\n", + vf_node->vf_idx, vsi_id_in_dev, ret); + goto l_out; + } + + sxe2_vsi_destroy_unlock(vsi); + + if (type == SXE2_VSI_T_DPDK_VF) + vf_node->dpdk_vf_vsi = NULL; + else + vf_node->vsi = NULL; + + LOG_INFO_BDF("vf:%d vsi_id:%d vsi destroyed.\n", vf_node->vf_idx, + vsi_id_in_dev); + +l_out: + return ret; +} + +void sxe2_vf_vsi_destroy_by_id(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + (void)__sxe2_vf_vsi_destroy_by_id(vf_node, vsi_id_in_dev); + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +STATIC void sxe2_vf_vsi_destroy(struct sxe2_vf_node *vf_node) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_adapter *adapter = vf_node->adapter; + u16 i; + u16 vsi_id; + + for (i = 0; i < SXE2_VF_TYPE_NR; i++) { + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vf_vsi_get(vf_node, vf_node->vsi_id[i]); + if (!vsi) { + vsi_id = vf_node->vsi_id[i]; + sxe2_vsi_fltr_remove(vf_node->adapter, vsi_id); + mutex_unlock(&adapter->vsi_ctxt.lock); + continue; + } + + sxe2_vsi_destroy_unlock(vsi); + + mutex_unlock(&adapter->vsi_ctxt.lock); + } + + vf_node->vsi = NULL; + vf_node->dpdk_vf_vsi = NULL; +} + +static s32 sxe2_vf_queue_range_set(struct sxe2_adapter *adapter) +{ + struct sxe2_fwc_vf_queue_info *req; + u32 req_size; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 vf_cnt = adapter->vf_ctxt.num_vfs; + s32 ret; + u16 i; + struct sxe2_cmd_params cmd = {}; + u16 vf_queue_in_pf; + + req_size = (u32)struct_size(req, queue_info, vf_cnt); + req = devm_kzalloc(dev, req_size, GFP_KERNEL); + if (!req) { + LOG_ERROR_BDF("sched node add: alloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + req->pf_id = adapter->pf_idx; + req->vf_cnt = vf_cnt; + for (i = 0; i < vf_cnt; i++) { + vf_queue_in_pf = adapter->q_ctxt.rxq_layout.sriov_offset + + i * adapter->vf_ctxt.q_cnt; + req->queue_info[i].rxq_base = vf_queue_in_pf; + req->queue_info[i].rxq_cnt = adapter->vf_ctxt.q_cnt; + vf_queue_in_pf = adapter->q_ctxt.txq_layout.sriov_offset + + i * adapter->vf_ctxt.q_cnt; + req->queue_info[i].txq_base = vf_queue_in_pf; + req->queue_info[i].txq_cnt = adapter->vf_ctxt.q_cnt; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_VF_QUEUE_SET, req, req_size, + NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vf queue set msg failed %d\n", ret); + ret = -EIO; + } + + devm_kfree(dev, req); +l_end: + return ret; +} + +static s32 sxe2_vf_queue_range_clear(struct sxe2_adapter *adapter) +{ + struct sxe2_fwc_vf_queue_info req = {}; + s32 ret; + struct sxe2_cmd_params cmd = {}; + + req.pf_id = adapter->pf_idx; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_VF_QUEUE_CLEAR, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vf cnt:%d queue clear msg failed %d\n", + adapter->vf_ctxt.num_vfs, ret); + ret = -EIO; + } + + return ret; +} + +static void sxe2_vf_hw_res_deinit(struct sxe2_adapter *adapter, bool is_disable) +{ + struct sxe2_vf_node *vf_node; + u16 vf_idx; + + sxe2_mbx_channel_disable(adapter); + + (void)sxe2_vf_queue_range_clear(adapter); + + sxe2_for_each_vf(adapter, vf_idx) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node = sxe2_vf_node_get(adapter, vf_idx); + if (is_disable) + (void)sxe2_rdma_aux_send_vf_reset_event(adapter, + vf_node->vf_idx); + + sxe2_vf_res_unmap(vf_node); + + sxe2_vf_vsi_destroy(vf_node); + + sxe2_vf_vsi_id_free(vf_node); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } +} + +static s32 sxe2_vf_hw_res_init(struct sxe2_adapter *adapter, int num_vfs) +{ + struct sxe2_vf_node_e *vf_node_e; + u16 vf_idx; + s32 ret = 0; + u16 cnt = 0; + u16 vf_id_in_dev = 0; + + lockdep_assert_held(&adapter->vf_ctxt.vfs_lock); + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_id_in_dev = vf_idx + adapter->vf_ctxt.vfid_base; + if (sxe2_hw_vflr_cause_get(&adapter->hw, vf_id_in_dev)) + sxe2_hw_vflr_cause_clear(&adapter->hw, vf_id_in_dev); + } + + ret = sxe2_mbx_channel_enable(adapter); + if (ret) { + ret = -EIO; + goto l_fail; + } + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_node_e = SXE2_VF_NODE_E(adapter, vf_idx); + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + ret = sxe2_vf_vsi_id_alloc(vf_node_e->vf_node); + if (ret) { + ret = -EIO; + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + goto l_fail; + } + + cnt++; + + sxe2_vf_res_map(vf_node_e->vf_node); + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + + ret = sxe2_vf_queue_range_set(adapter); + if (ret) { + LOG_ERROR_BDF("vf queue range set failed %d\n", ret); + goto l_fail; + } + + return ret; + +l_fail: + for (vf_idx = 0; vf_idx < cnt; vf_idx++) { + vf_node_e = SXE2_VF_NODE_E(adapter, vf_idx); + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + sxe2_vf_res_unmap(vf_node_e->vf_node); + sxe2_vf_vsi_id_free(vf_node_e->vf_node); + vf_node_e->vf_node->vsi = NULL; + + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } + return ret; +} + +s32 sxe2_vf_base_l2_filter_setup(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + ret = sxe2_vf_vlan_init(vf_node, vsi); + if (ret) + return ret; + + ret = sxe2_vf_mac_init(vf_node, vsi); + return ret; +} + +static s32 sxe2_sriov_enable(struct sxe2_adapter *adapter, int num_vfs) +{ + s32 ret; + + mutex_lock(&adapter->vf_ctxt.vfs_lock); + + if (test_bit(SXE2_FLAG_SRIOV_VFS_DISABLED, adapter->flags)) { + ret = -EBUSY; + LOG_WARN_BDF("during pf reset, try later.\n"); + goto l_unlock; + } + + ret = sxe2_vf_irqs_num_set(adapter, num_vfs); + if (ret) + goto l_unlock; + + ret = sxe2_vf_queues_num_set(adapter, num_vfs); + if (ret) + goto l_clear_num; + + ret = sxe2_vf_sw_res_init(adapter, (u32)num_vfs); + if (ret) + goto l_clear_num; + + ret = sxe2_vf_hw_res_init(adapter, num_vfs); + if (ret) + goto l_sw_res_deinit; + + if (sxe2_eswitch_is_offload(adapter)) { + ret = sxe2_eswitch_configure(adapter, true); + if (ret) + goto l_hw_res_deinit; + } + + sxe2_vfs_active(adapter); + + mutex_unlock(&adapter->vf_ctxt.vfs_lock); + return ret; + +l_hw_res_deinit: + sxe2_vf_hw_res_deinit(adapter, false); + +l_sw_res_deinit: + sxe2_vf_sw_res_deinit(adapter); + +l_clear_num: + sxe2_vf_irq_queues_num_clear(adapter); + +l_unlock: + mutex_unlock(&adapter->vf_ctxt.vfs_lock); + return ret; +} + +static s32 sxe2_sriov_disable(struct sxe2_adapter *adapter, bool is_stopped) +{ + struct sxe2_vf_context *vf_ctxt = &adapter->vf_ctxt; + struct sxe2_vf_node *vf_node; + u16 idx; + s32 ret = 0; + + mutex_lock(&vf_ctxt->vfs_lock); + + if (!is_stopped && test_bit(SXE2_FLAG_SRIOV_VFS_DISABLED, adapter->flags)) { + ret = -EBUSY; + LOG_WARN_BDF("during pf reset, try later.\n"); + goto l_unlock; + } + + sxe2_for_each_vf(adapter, idx) + { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + sxe2_vf_stop(vf_node); + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + LOG_INFO_BDF("vf:%u stopped.\n", idx); + } + + sxe2_vfs_hw_deactive(adapter); + + (void)sxe2_eswitch_configure(adapter, false); + + sxe2_fnav_vf_cfg_clear(adapter); + + sxe2_vf_hw_res_deinit(adapter, true); + + sxe2_txsched_vf_tree_clean(adapter); + + sxe2_vf_sw_res_deinit(adapter); + + sxe2_vf_irq_queues_num_clear(adapter); + +l_unlock: + mutex_unlock(&vf_ctxt->vfs_lock); + + return ret; +} + +static s32 sxe2_vfs_enable(struct sxe2_adapter *adapter, int num_vfs) +{ + s32 ret; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_timeout_check; + } + + ret = sxe2_sriov_enable(adapter, num_vfs); + if (ret) { + LOG_DEV_ERR("sxe2 enable %u vfs failed.ret:%d\n", num_vfs, ret); + goto l_timeout_check; + } + + ret = pci_enable_sriov(adapter->pdev, num_vfs); + if (ret) { + LOG_DEV_ERR("enable %u sriov failed.ret:%d\n", num_vfs, ret); + (void)sxe2_sriov_disable(adapter, true); + goto l_timeout_check; + } + + if (sxe2_eswitch_is_offload(adapter)) { + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_SW_MODE_SWITCHDEV); + } else { + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_SW_MODE_LEGACY); + } + +l_timeout_check: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +s32 sxe2_vfs_disable(struct sxe2_adapter *adapter, bool is_stopped) +{ + s32 ret = 0; + + if (sxe2_eswitch_mode_read_lock(adapter)) { + ret = -EBUSY; + goto l_timeout_check; + } + + pci_disable_sriov(adapter->pdev); + (void)sxe2_sriov_disable(adapter, is_stopped); + + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_SW_MODE_LEGACY); + +l_timeout_check: + sxe2_eswitch_mode_read_unlock(adapter); + return ret; +} + +int sxe2_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct sxe2_adapter *adapter = pci_get_drvdata(pdev); + int ret; + + ret = sxe2_vf_perm_check(adapter, num_vfs); + if (ret) + goto l_err; + + if (!num_vfs) + ret = sxe2_vfs_disable(adapter, true); + else + ret = sxe2_vfs_enable(adapter, num_vfs); + + LOG_INFO_BDF("%s %d vfs done.(ret:%d).\n", num_vfs ? "create" : "remove", + num_vfs, ret); + if (ret) + goto l_err; + + return num_vfs; + +l_err: + return ret; +} + +void sxe2_vf_init(struct sxe2_adapter *adapter) +{ + u16 i; + + mutex_init(&adapter->vf_ctxt.vfs_lock); + + for (i = 0; i < SXE2_VF_NUM; i++) + mutex_init(&adapter->vf_ctxt.vf_node_e[i].vf_lock); +} + +void sxe2_vf_deinit(struct sxe2_adapter *adapter) +{ + u16 i; + + for (i = 0; i < SXE2_VF_NUM; i++) + mutex_destroy(&adapter->vf_ctxt.vf_node_e[i].vf_lock); + + mutex_destroy(&adapter->vf_ctxt.vfs_lock); +} + +static s32 sxe2_vf_port_vlan_check(struct sxe2_adapter *adapter, u16 vlan_id, u8 qos, + __be16 vlan_proto) +{ + if (vlan_id >= VLAN_N_VID || qos > SXE2_VLAN_QOS_MAX) { + LOG_DEV_ERR("vlan id:%d QoS %d invalid\n", vlan_id, qos); + return -EINVAL; + } + + if (vlan_proto != ETH_P_8021Q && vlan_proto != ETH_P_8021AD) { + LOG_DEV_ERR("vlan_proto:0x%x invalid\n", vlan_proto); + return -EPROTONOSUPPORT; + } + + return 0; +} + +int sxe2_set_vf_port_vlan_inner(struct sxe2_adapter *adapter, int vf_idx, + u16 vlan_id, u8 qos, u16 protocol, + bool need_vf_reset) +{ + s32 ret; + struct sxe2_vlan vlan; + struct sxe2_vf_node *vf; + + ret = sxe2_vf_port_vlan_check(adapter, vlan_id, qos, protocol); + if (ret) + return ret; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEV_ERR("vf vlan cannot be configured - switchdev is enabled\n"); + return -EOPNOTSUPP; + } + + vlan = SXE2_VLAN(protocol, vlan_id, qos); + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf) { + ret = -EINVAL; + goto l_unlock; + } + + if (!memcmp(&vf->vlan_info.port_vlan, &vlan, sizeof(vlan))) { + LOG_INFO_BDF("vf:%u port vlan vlan_id:%u qos:%u protocol:0x%x existed.\n", + vf_idx, vlan_id, qos, protocol); + goto l_unlock; + } + + ret = sxe2_check_vf_ready_for_cfg(vf); + if (ret) { + LOG_ERROR_BDF("vf:%u pf flags:0x%lx vf states:0x%lx not ready.\n", + vf_idx, *adapter->flags, *vf->states); + goto l_unlock; + } + + memcpy(&vf->vlan_info.port_vlan, &vlan, sizeof(vlan)); + if (sxe2_port_vlan_is_exist(vf)) { + vf->vlan_info.port_vlan_exsit = true; + LOG_INFO_BDF("vf:%u port vlan vlan_id:%u qos:%u protocol:0x%x add.\n", + vf_idx, vlan_id, qos, protocol); + } else { + vf->vlan_info.port_vlan_exsit = false; + LOG_INFO_BDF("vf:%u port vlan delete.\n", vf_idx); + } + + if (need_vf_reset) { + ret = sxe2_reset_vf(adapter, (u16)vf_idx, SXE2_VF_RESET_FLAG_NOTIFY); + if (ret) + LOG_ERROR_BDF("vf:%u set port vlan:0x%x failed.(err:%d)\n", + vf_idx, *(u32 *)&vlan, ret); + } + +l_unlock: + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + return ret; +} + +int sxe2_set_vf_port_vlan(struct net_device *netdev, int vf_idx, u16 vlan_id, u8 qos, + __be16 protocol) +{ + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + + return sxe2_set_vf_port_vlan_inner(adapter, vf_idx, vlan_id, qos, + be16_to_cpu(protocol), true); +} + +struct sxe2_vsi *sxe2_vf_vsi_get(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev) +{ + struct sxe2_vsi *vsi = NULL; + struct sxe2_adapter *adapter = vf_node->adapter; + + lockdep_assert_held(SXE2_VF_NODE_LOCK(vf_node->adapter, + (u16)vf_node->vf_idx)); + + if (vsi_id_in_dev == vf_node->vsi_id[SXE2_VF_TYPE_ETH]) + vsi = vf_node->vsi; + else if (vsi_id_in_dev == vf_node->vsi_id[SXE2_VF_TYPE_DPDK]) + vsi = vf_node->dpdk_vf_vsi; + else + LOG_ERROR_BDF("invalid vsi id:%d.\n", vsi_id_in_dev); + + return vsi; +} + +s32 sxe2_vf_vsi_type_get(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev, + enum sxe2_vsi_type *type) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + s32 ret = 0; + + lockdep_assert_held(SXE2_VF_NODE_LOCK(vf_node->adapter, + (u16)vf_node->vf_idx)); + + if (vsi_id_in_dev == vf_node->vsi_id[SXE2_VF_TYPE_ETH]) { + *type = SXE2_VSI_T_VF; + } else if (vsi_id_in_dev == vf_node->vsi_id[SXE2_VF_TYPE_DPDK]) { + *type = SXE2_VSI_T_DPDK_VF; + } else { + ret = -EINVAL; + LOG_ERROR_BDF("invalid vsi id:%d ret:%d.\n", vsi_id_in_dev, ret); + } + + return ret; +} + +void sxe2_vf_queues_stop(struct sxe2_vf_node *vf_node) +{ + struct sxe2_vsi *vsi; + struct sxe2_adapter *adapter = vf_node->adapter; + u16 i; + + for (i = 0; i < SXE2_VF_TYPE_NR; i++) { + vsi = sxe2_vf_vsi_get(vf_node, vf_node->vsi_id[i]); + if (!vsi) + continue; + + LOG_INFO_BDF("vsi:%u txqs disable start.\n", vsi->idx_in_dev); + if (sxe2_txqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u txqs disable failed.\n", + vsi->idx_in_dev); + + if (sxe2_rxqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u rxqs disable failed.\n", + vsi->idx_in_dev); + } +} + +void sxe2_vf_adv_cfg_clear(struct sxe2_vf_node *vf_node, bool is_vfr_vflr) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + u16 vsi_id_in_dev; + + vsi_id_in_dev = vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + + if (!is_vfr_vflr) { + LOG_INFO_BDF("vsi %u pfr/pflr skip.\n", vsi_id_in_dev); + return; + } + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) + sxe2_vsi_l2_fltr_remove(adapter, vsi_id_in_dev); +} + +void sxe2_vf_trust_cfg_restore(struct sxe2_vf_node *vf_node) +{ + assign_bit(SXE2_VF_CAP_TRUSTED, vf_node->caps, vf_node->prop.trusted); +} + +STATIC s32 sxe2_vf_mac_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret = 0; + u8 broadcast[ETH_ALEN]; + + if (sxe2_eswitch_is_offload(vf_node->adapter)) { + LOG_INFO("switchdev mode no need restore vf mac.\n"); + return ret; + } + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + eth_broadcast_addr(broadcast); + ret = sxe2_mac_rule_add(vf_node->vsi, broadcast); + if (ret) { + LOG_ERROR("vf:%u broadcast mac addr add failed.(err:%d).\n", + vf_node->vf_idx, ret); + return ret; + } + + vf_node->mac_cnt++; + + if (!is_zero_ether_addr(vf_node->mac_addr.addr)) { + ret = sxe2_mac_rule_add(vf_node->vsi, vf_node->mac_addr.addr); + if (ret) { + LOG_ERROR("Failed to add MAC %pM for VF %d\n, error %d\n", + vf_node->mac_addr.addr, vf_node->vf_idx, ret); + return ret; + } + ret = sxe2_mac_spoofchk_ext_rule_add(vf_node->adapter, + vf_node->vsi->idx_in_dev, + vf_node->mac_addr.addr); + if (ret) { + LOG_ERROR("Failed to add mac spoof ext rule %pM\t" + "for VF %d vsi %u\n, error %d\n", + vf_node->mac_addr.addr, vf_node->vsi->idx_in_dev, + vf_node->vf_idx, ret); + (void)sxe2_mac_rule_del(vf_node->adapter, + vf_node->vsi->idx_in_dev, + vf_node->mac_addr.addr); + return ret; + } + + vf_node->mac_cnt++; + } + + return ret; +} + +STATIC s32 sxe2_vf_port_vlan_recfg(struct sxe2_hw *hw, struct sxe2_vf_node *vf_node, + u16 vlan_info, u16 tpid) +{ + s32 ret = 0; + + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + (void)sxe2_hw_port_vlan_setup(hw, vf_node->vsi->idx_in_dev, vlan_info, tpid); + return 0; +} + +STATIC s32 sxe2_vf_vlan_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + struct sxe2_vlan *port_vlan; + u16 vlan_info; + struct sxe2_hw *hw = &adapter->hw; + + if (sxe2_eswitch_is_offload(vf_node->adapter)) { + LOG_INFO("switchdev mode no need restore vf mac.\n"); + return 0; + } + + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + if (sxe2_port_vlan_is_exist(vf_node)) { + port_vlan = &vf_node->vlan_info.port_vlan; + vlan_info = (u16)(port_vlan->prio << VLAN_PRIO_SHIFT) | + port_vlan->vid; + ret = sxe2_vf_port_vlan_recfg(hw, vf_node, vlan_info, + port_vlan->tpid); + if (ret) { + LOG_ERROR_BDF("port vlan set failed, vsi[%u] tpid[0x%x],\t" + "vid[%d], prio[%d].\n", + vf_node->vsi->idx_in_dev, port_vlan->tpid, + port_vlan->vid, port_vlan->prio); + goto l_err; + } + + ret = sxe2_vlan_rule_add(vf_node->vsi, port_vlan); + if (ret && ret != -EEXIST) + goto l_rule_add_fail; + + ret = sxe2_vlan_filter_control(adapter, vf_node->vsi->idx_in_dev, + true); + if (ret) + goto l_vlan_filter_fail; + + } else { + ret = sxe2_vsi_vlan_zero_add(vf_node->vsi); + if (ret) + goto l_err; + } + + return 0; + +l_vlan_filter_fail: + (void)sxe2_vlan_rule_del(adapter, vf_node->vsi->idx_in_dev, port_vlan); +l_rule_add_fail: + (void)sxe2_vf_port_vlan_recfg(hw, vf_node, 0, port_vlan->tpid); + +l_err: + return ret; +} + +s32 sxe2_vf_vsi_port_vlan_cfg(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + struct sxe2_vlan *port_vlan; + u16 vlan_info; + struct sxe2_hw *hw = &adapter->hw; + + if (sxe2_eswitch_is_offload(vf_node->adapter)) { + LOG_INFO("switchdev mode no need restore vf mac.\n"); + return 0; + } + + port_vlan = &vf_node->vlan_info.port_vlan; + vlan_info = (u16)(port_vlan->prio << VLAN_PRIO_SHIFT) | port_vlan->vid; + + (void)sxe2_hw_port_vlan_setup(hw, vsi->idx_in_dev, vlan_info, + port_vlan->tpid); + + ret = sxe2_vlan_rule_add(vsi, port_vlan); + if (ret && ret != -EEXIST) + goto l_rule_add_fail; + + ret = sxe2_vlan_filter_control(adapter, vsi->idx_in_dev, true); + if (ret) + goto l_vlan_filter_fail; + + return 0; + +l_vlan_filter_fail: + (void)sxe2_vlan_rule_del(adapter, vsi->idx_in_dev, port_vlan); +l_rule_add_fail: + (void)sxe2_hw_port_vlan_setup(hw, vsi->idx_in_dev, 0, port_vlan->tpid); + + return ret; +} + +STATIC s32 sxe2_vf_rate_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret = 0; + + if (vf_node->prop.min_tx_rate) { + ret = sxe2_txsched_vf_bw_lmt_cfg(vf_node->adapter, vf_node, + SXE2_NODE_RL_TYPE_CIR, + vf_node->prop.min_tx_rate * 1000); + if (ret) { + LOG_ERROR("vf:%u min rate:%u vsi:%u restore\t" + "failed.(err:%d)\n", + vf_node->vf_idx, vf_node->prop.min_tx_rate, + vf_node->vsi->idx_in_dev, ret); + } + } + if (vf_node->prop.max_tx_rate) { + ret = sxe2_txsched_vf_bw_lmt_cfg(vf_node->adapter, vf_node, + SXE2_NODE_RL_TYPE_EIR, + vf_node->prop.max_tx_rate * 1000); + if (ret) { + LOG_ERROR("vf:%u max rate:%u vsi:%u restore\t" + "failed.(err:%d)\n", + vf_node->vf_idx, vf_node->prop.max_tx_rate, + vf_node->vsi->idx_in_dev, ret); + } + } + + return ret; +} + +STATIC s32 sxe2_vf_spoofchk_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEBUG_BDF("switchdev mode no need restore spoofchk.\n"); + return 0; + } + + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + ret = sxe2_vsi_spoofchk_control(vf_node->adapter, vf_node->vsi->idx_in_dev, + vf_node->prop.spoofchk); + if (ret) { + LOG_DEV_ERR("vf:%u spoofchk:%u vsi:%u restore failed.(err:%d)\n", + vf_node->vf_idx, vf_node->prop.spoofchk, + vf_node->vsi->idx_in_dev, ret); + } + + return ret; +} + +STATIC s32 sxe2_vf_default_etype_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEBUG_BDF("switchdev mode no need restore etype rule.\n"); + return 0; + } + + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + ret = sxe2_etype_fltr_init(vf_node->vsi); + if (ret) { + LOG_DEV_ERR("vf:%u vsi:%u default_etype restore failed.(err:%d)\n", + vf_node->vf_idx, vf_node->vsi->idx_in_dev, ret); + } + + return ret; +} + +STATIC s32 sxe2_vf_src_prune_cfg_restore(struct sxe2_vf_node *vf_node) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + + if (sxe2_eswitch_is_offload(adapter)) { + LOG_DEBUG_BDF("switchdev mode no need restore etype rule.\n"); + return 0; + } + + if (!vf_node->vsi) { + LOG_ERROR("vsi is null, vsi id:%d.\n", + vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + ret = -EINVAL; + return ret; + } + + ret = sxe2_src_vsi_prune_control(adapter, vf_node->vsi->idx_in_dev, true); + if (ret) { + LOG_DEV_ERR("vf:%u vsi:%u src prune restore failed,\n" + "inverse action cfg error.(err:%d)\n", + vf_node->vf_idx, vf_node->vsi->idx_in_dev, ret); + return ret; + } + + ret = sxe2_srcvsi_rule_add(vf_node->vsi); + if (ret) { + LOG_DEV_ERR("vf:%u vsi:%u src prune restore failed.(err:%d)\n", + vf_node->vf_idx, vf_node->vsi->idx_in_dev, ret); + return ret; + } + + return ret; +} + +STATIC void sxe2_vf_cfg_restore(struct sxe2_vf_node *vf_node) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + + sxe2_vf_trust_cfg_restore(vf_node); + + if (sxe2_mac_spoofchk_rule_add(adapter, vf_node->vsi_id[SXE2_VF_TYPE_ETH])) + LOG_DEV_ERR("vf:%u vsi:%u mac spoofchk rule restore failed.\n", + vf_node->vf_idx, vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + + if (sxe2_vf_mac_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u mac configure restore failed.\n", + vf_node->vf_idx); + + if (sxe2_vf_vlan_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u vlan configure restore failed.\n", + vf_node->vf_idx); + + if (sxe2_vf_rate_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u rate configure restore failed.\n", + vf_node->vf_idx); + + if (sxe2_vf_spoofchk_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u spoofchk configure restore failed.\n", + vf_node->vf_idx); + + if (sxe2_vf_default_etype_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u default etype configure restore failed.\n", + vf_node->vf_idx); + + if (sxe2_vf_src_prune_cfg_restore(vf_node)) + LOG_DEV_ERR("vf:%u src prune configure restore failed.\n", + vf_node->vf_idx); +} + +s32 sxe2_sriov_vsi_rebuild(struct sxe2_vsi *vsi, bool is_vfr_vflr) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + bool init = true; + struct sxe2_hw *hw = &vsi->adapter->hw; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (is_vfr_vflr) + init = false; + + ret = sxe2_vsi_rebuild(vsi, init); + if (ret) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("vsi[%u][%u] init:%u rebuild failed.(err:%d.)\n", + vsi->id_in_pf, init, vsi->idx_in_dev, ret); + return ret; + } + + if (vsi->type == SXE2_VSI_T_VF) + sxe2_hw_l2tag_accept(hw, vsi->idx_in_dev); + + LOG_INFO_BDF("vsi[%u][%u] rebuild done.(err:%d.)\n", vsi->id_in_pf, + vsi->idx_in_dev, ret); + + clear_bit(SXE2_VSI_S_DISABLE, vsi->state); + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_vf_rebuild(struct sxe2_vf_node *vf_node, bool is_vfr_vflr) +{ + s32 ret; + struct sxe2_adapter *adapter = vf_node->adapter; + + ret = sxe2_sriov_vsi_rebuild(vf_node->vsi, is_vfr_vflr); + if (ret) { + LOG_ERROR_BDF("vf_idx:%u vsi rebuild failed.(err:%d)\n", + vf_node->vf_idx, ret); + return ret; + } + + sxe2_vf_cfg_restore(vf_node); + + memset(&vf_node->vlan_info.vlan_offload, 0, + sizeof(vf_node->vlan_info.vlan_offload)); + + return ret; +} + +u32 sxe_calc_all_vfs_min_tx_rate(struct sxe2_adapter *adapter) +{ + u16 idx; + u16 vf_id_in_dev; + u32 rate = 0; + struct sxe2_vf_node *vf_node; + + sxe2_for_each_vf(adapter, idx) + { + vf_id_in_dev = idx + adapter->vf_ctxt.vfid_base; + LOG_INFO_BDF("vf:%u vf_id_in_dev:%u get min rate.\n", idx, + vf_id_in_dev); + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + if (!vf_node) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + continue; + } + + if (sxe2_check_vf_ready_for_cfg(vf_node)) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + continue; + } + + rate += vf_node->prop.min_tx_rate; + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + + return rate; +} + +bool sxe2_min_tx_rate_oversubscribed(struct sxe2_adapter *adapter, s32 vf_idx, + int min_tx_rate) +{ + u32 all_vfs_min_tx_rate; + u32 link_speed_mbps; + u32 new_all_vfs_min_tx_rate; + struct sxe2_vf_node *vf_node; + +#if defined(SXE2_HARDWARE_ASIC) + link_speed_mbps = adapter->link_ctxt.current_link_speed; +#else + link_speed_mbps = SXE2_LINK_SPEED_10G; +#endif + all_vfs_min_tx_rate = sxe_calc_all_vfs_min_tx_rate(adapter); + + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node = sxe2_vf_node_get(adapter, (u16)vf_idx); + if (!vf_node) { + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + return false; + } + + new_all_vfs_min_tx_rate = all_vfs_min_tx_rate - vf_node->prop.min_tx_rate + + min_tx_rate; + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + + if (new_all_vfs_min_tx_rate > link_speed_mbps) { + if (all_vfs_min_tx_rate > link_speed_mbps) { + LOG_INFO_BDF("The sum of min_tx_rate for all VF's\n" + "is greater than the link speed\n"); + LOG_INFO_BDF("Set the min_tx_rate to 0 on the VF(s)\n" + "to resolve oversubscription\n"); + } + + LOG_ERROR_BDF("min_tx_rate of %d Mbps on VF %u\n" + "would cause oversubscription of %d Mbps\n" + "based on the current link speed %d Mbps\n", + min_tx_rate, vf_idx, + all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps, + link_speed_mbps); + + return true; + } + + return false; +} + +u16 sxe2_vf_num_get(struct sxe2_adapter *adapter) +{ + return adapter->vf_ctxt.num_vfs; +} + +static void sxe2_vf_l2_fltr_cnt_clear(struct sxe2_vf_node *vf_node) +{ + vf_node->mac_cnt = 0; + vf_node->vlan_info.vlan_cnt = 0; +} + +void sxe2_rss_clean_for_vf(struct sxe2_vsi *vsi, bool need_clear_hw) +{ + u8 *lut = NULL; + u8 *hash_key = NULL; + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!vsi || sxe2_is_safe_mode(adapter)) { + LOG_DEV_ERR("sxe2 rss in safe mode is not supported.\n"); + return; + } + + if (!need_clear_hw) { + LOG_INFO_BDF("not vfr, no need clear vf hw rss cfg!\n"); + return; + } + + (void)sxe2_rss_delete_vsi_flows_for_vfr(&adapter->rss_flow_ctxt, + vsi->id_in_pf); + + lut = kzalloc(vsi->rss_ctxt.lut_size, GFP_KERNEL); + if (!lut) { + LOG_ERROR_BDF("no memory for lut!\n"); + goto hkey_clean; + } + ret = sxe2_fwc_rss_lut_set(vsi, lut, vsi->rss_ctxt.lut_size); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_rss_lut_set failed, ret: %d, lut: %p, lut_size:\t" + "%u\n", + ret, lut, vsi->rss_ctxt.lut_size); + } + kfree(lut); + +hkey_clean: + hash_key = kzalloc(SXE2_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (!hash_key) { + LOG_ERROR_BDF("no memory for hkey!\n"); + goto l_end; + } + ret = sxe2_fwc_rss_hkey_set(vsi, hash_key); + if (ret != 0) + LOG_ERROR_BDF("sxe2_fwc_rss_hkey_set failed, ret: %d, key: %p\n", + ret, hash_key); + kfree(hash_key); + +l_end: + LOG_INFO_BDF("sxe2 vsi rss for vf clean done, id=%u type=%u ret=%d !\n", + vsi->id_in_pf, vsi->type, ret); +} + +void sxe2_ipsec_vf_sa_clear(struct sxe2_adapter *adapter, u32 vf_id) +{ + struct sxe2_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 i; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + if (!ipsec->rx_sa_table[i].used) + continue; + + if (ipsec->rx_sa_table[i].is_vf && + ipsec->rx_sa_table[i].vf_id == vf_id) { + hash_del_rcu(&ipsec->rx_sa_table[i].hlist); + sxe2_ipsec_rx_state_free(adapter, &ipsec->rx_sa_table[i]); + } + } + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + if (!ipsec->tx_sa_table[i].used) + continue; + + if (ipsec->tx_sa_table[i].is_vf && + ipsec->tx_sa_table[i].vf_id == vf_id) + sxe2_ipsec_tx_state_free(adapter, &ipsec->tx_sa_table[i]); + } + + mutex_unlock(&adapter->ipsec_ctxt.context_lock); +} + +void sxe2_vf_dpdk_cfg_clear(struct sxe2_vf_node *vf_node, bool is_vfr_vflr) +{ + struct sxe2_vsi *vsi; + struct sxe2_adapter *adapter = vf_node->adapter; + + vsi = vf_node->dpdk_vf_vsi; + if (!vsi) { + LOG_WARN_BDF("vf:%d no dpdk.\n", vf_node->vf_idx); + return; + } + + if (vsi->txsched.node) + (void)sxe2_txsch_ucmd_subtree_del(adapter, vsi->idx_in_dev, + vsi->txsched.node->info.node_teid, + true); + + sxe2_vf_vsi_destroy_by_id(vf_node, vsi->idx_in_dev); +} + +STATIC s32 sxe2_vf_eth_clean_and_rebuild(struct sxe2_vf_node *vf_node, + bool is_vfr_vflr) +{ + struct sxe2_adapter *adapter = vf_node->adapter; + s32 ret; + + sxe2_vf_l2_fltr_cnt_clear(vf_node); + + sxe2_vf_res_map(vf_node); + + if (!vf_node->vsi) { + LOG_WARN_BDF("vf:%d eth vsi not cfg yet.\n", vf_node->vf_idx); + return 0; + } + + sxe2_rss_clean_for_vf(vf_node->vsi, is_vfr_vflr); + + sxe2_fnav_clean_by_vsi(vf_node->vsi, is_vfr_vflr); + + sxe2_vsi_acl_deinit(vf_node->vsi); + + sxe2_vf_adv_cfg_clear(vf_node, is_vfr_vflr); + + sxe2_ipsec_vf_sa_clear(adapter, vf_node->vf_idx); + + ret = sxe2_vf_rebuild(vf_node, is_vfr_vflr); + if (ret) { + LOG_ERROR_BDF("vf:%u vsi rebuild failed during reset.\n", + vf_node->vf_idx); + goto l_end; + } + + sxe2_vf_repr_rebuild(vf_node->vsi, is_vfr_vflr); + +l_end: + return ret; +} + +s32 sxe2_vf_clean_and_rebuild(struct sxe2_vf_node *vf_node, bool is_vfr_vflr) +{ + if (!test_bit(SXE2_VF_STATE_DIS, vf_node->states)) + return 0; + + sxe2_vf_dpdk_cfg_clear(vf_node, is_vfr_vflr); + + return sxe2_vf_eth_clean_and_rebuild(vf_node, is_vfr_vflr); +} + +s32 sxe2_vf_reset_notify(struct sxe2_adapter *adapter, struct sxe2_vf_node *vf_node) +{ + struct sxe2_cmd_params params = {}; + + if (!test_bit(SXE2_VF_STATE_ACTIVE, vf_node->states)) { + LOG_INFO_BDF("vf:%u not activated, no send vf reset notify.\n", + vf_node->vf_idx); + return 0; + } + + sxe2_mbx_msg_params_fill(¶ms, SXE2_VF_RESET_NOTIFY, NULL, 0, + vf_node->vf_idx, true); + return sxe2_mbx_msg_send(adapter, ¶ms); +} + +void sxe2_vfs_vsi_id_get(struct sxe2_adapter *adapter, + struct sxe2_drv_vsi_caps *repr_vf_id) +{ + struct sxe2_vf_node *vf_node; + u16 num_vfs; + u16 vf_idx = 0; + + num_vfs = sxe2_vf_num_get(adapter); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + mutex_lock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + vf_node = sxe2_vf_node_get(adapter, vf_idx); + if (vf_node->vsi) + repr_vf_id[vf_idx].kernel_vsi_id = vf_node->vsi->idx_in_dev; + else + repr_vf_id[vf_idx].kernel_vsi_id = 0xFFFF; + if (vf_node->dpdk_vf_vsi) + repr_vf_id[vf_idx].dpdk_vsi_id = + vf_node->dpdk_vf_vsi->idx_in_dev; + else + repr_vf_id[vf_idx].dpdk_vsi_id = 0xFFFF; + repr_vf_id[vf_idx].vsi_type = SXE2_VSI_T_VF; + repr_vf_id[vf_idx].func_id = vf_idx; + mutex_unlock(SXE2_VF_NODE_LOCK(adapter, vf_idx)); + } +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.h new file mode 100644 index 0000000000000000000000000000000000000000..544f111fd57c702be6da6a9f8f88c49142abb9d2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_sriov.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_sriov.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_SRIOV_H__ +#define __SXE2_SRIOV_H__ + +#include +#include +#include +#include + +#include "sxe2_fnav.h" +#include "sxe2_switch.h" +#include "sxe2_vsi.h" +#include "sxe2_mbx_public.h" +#include "sxe2_drv_aux.h" +#include "sxe2_com_cdev.h" + +struct sxe2_adapter; +struct sxe2_drv_vsi_caps; + +enum sxe2_vf_caps { + SXE2_VF_CAP_TRUSTED = 0, + SXE2_VF_CAP_NBITS, + +}; + +enum sxe2_vf_states { + SXE2_VF_STATE_ACTIVE = 0, + SXE2_VF_STATE_DIS, + SXE2_VF_STATE_MC_PROMISC, + SXE2_VF_STATE_UC_PROMISC, + SXE2_VF_STATE_REPLAY_VC, + SXE2_VF_STATES_NBITS +}; + +#define SXE2_VF_IDX_MASK 0xFF + +#define SXE2_VF_IDX(vf_id) ((vf_id) & SXE2_VF_IDX_MASK) + +#define SXE2_VF_EVENT_MSIX_NUM 1 + +#define SXE2_VF_CEQ_CTRL_MASK 0xBFFFFFFF + +#define SXE2_VF_MACADDR_CNT_MAX 18 +#define SXE2_VF_VLAN_CNT_MAX 8 + +#define SXE2_VF_64Q_MSIX_NUM (64 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_32Q_MSIX_NUM (32 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_16Q_MSIX_NUM (16 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_16Q_MSIX_NUM (16 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_4Q_MSIX_NUM (4 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_2Q_MSIX_NUM (2 + SXE2_VF_EVENT_MSIX_NUM) +#define SXE2_VF_1Q_MSIX_NUM (1 + SXE2_VF_EVENT_MSIX_NUM) + +#define SXE2_VF_REFCNT_WAIT (3000) +#define SXE2_VF_REFCNT_WAIT_INTERNAL (1) + +#define SXE2_VF_RESET_DONE_WAIT_COUNT (1000) +#define SXE2_VF_QUEUE_CNT_MIN (1) +#define SXE2_VF_QUEUE_CNT_MAX (SXE2_VF_ETH_Q_NUM + SXE2_VF_DPDK_Q_NUM) + +#define SXE2_VF_ESW_CNT (2) + +#define sxe2_for_each_vf(adapter, i) \ + for ((i) = 0; (i) < ((adapter)->vf_ctxt.num_vfs); (i)++) + +#define SXE2_VF_NODE_LOCK(adapter, vf_id) \ + (&((adapter)->vf_ctxt.vf_node_e[(vf_id)].vf_lock)) + +#define SXE2_VF_NODE_E(adapter, vf_id) \ + (&((adapter)->vf_ctxt.vf_node_e[(vf_id)])) + +#define SXE2_VF_NODE(adapter, vf_id) \ + ((adapter)->vf_ctxt.vf_node_e[(vf_id)].vf_node) + +enum sxe2_vf_type { + SXE2_VF_TYPE_ETH = 0, + SXE2_VF_TYPE_DPDK, + SXE2_VF_TYPE_NR +}; + +struct sxe2_vf_node_e { + struct sxe2_vf_node *vf_node; + struct mutex vf_lock; +}; + +struct sxe2_vf_context { + struct mutex vfs_lock; + struct sxe2_adapter *adapter; + u16 max_vfs; + u16 num_vfs; + u16 vfid_base; + u16 q_cnt; + u16 irq_cnt; + u16 irq_base; + struct sxe2_vf_node_e vf_node_e[SXE2_VF_NUM]; +}; + +struct sxe2_vf_prop { + u8 mac_from_pf : 1; + u8 trusted : 1; + u8 spoofchk : 1; + u8 link_forced : 1; + u8 link_up : 1; + u32 min_tx_rate; + u32 max_tx_rate; +}; + +enum sxe2_rst_type { + SXE2_RST_TYPE_NO_RESET = 0, + SXE2_RST_TYPE_VM_RESET, + SXE2_RST_TYPE_VF_RESET, +}; + +struct sxe2_vf_ops { + enum sxe2_rst_type reset_type; + void (*free)(struct sxe2_vf_node *vf_node); + void (*clear_mbx_reg)(struct sxe2_vf_node *vf_node); + void (*trigger_reset_register)(struct sxe2_vf_node *vf_node, bool is_vflr); + bool (*poll_reset_status)(struct sxe2_vf_node *vf_node); + void (*clear_reset_trigger)(struct sxe2_vf_node *vf_node); + int (*vsi_rebuild)(struct sxe2_vf_node *vf_node); + void (*post_vsi_rebuild)(struct sxe2_vf_node *vf_node); + void (*cfg_rdma_irq_map)(struct sxe2_vf_node *vf, struct aux_qv_info *qv_info); + void (*clear_rdma_irq_map)(struct sxe2_vf_node *vf, struct aux_qv_info *qv_info); +}; + +struct sxe2_vf_vlaninfo { + struct sxe2_vlan port_vlan; + u8 port_vlan_exsit; + u16 max_cnt; + u16 vlan_cnt; + struct sxe2_user_vlan_offload_cfg vlan_offload; +}; + +struct sxe2_vf_node { + struct sxe2_adapter *adapter; + struct sxe2_vsi *vsi; + struct sxe2_vsi *dpdk_vf_vsi; + u16 vsi_id[SXE2_VF_TYPE_NR]; + struct sxe2_vf_addr mac_addr; + struct sxe2_vf_vlaninfo vlan_info; + struct sxe2_mbx_msg_table *msg_table; + const struct sxe2_vf_ops *vf_ops; + struct sxe2_vf_prop prop; + u16 irq_base_idx; + u16 vf_idx; + u32 mac_cnt; + DECLARE_BITMAP(states, SXE2_VF_STATES_NBITS); + DECLARE_BITMAP(caps, SXE2_VF_CAP_NBITS); + struct sxe2_vf_repr *repr; + struct sxe2_vf_ver_msg vf_ver; + bool user_repr_valid; + struct mutex repr_cfg_lock; + enum sxe2_com_module mode; +}; + +static inline u16 sxe2_vf_port_vid_get(struct sxe2_vf_node *vf) +{ + return vf->vlan_info.port_vlan.vid; +} + +static inline u8 sxe2_vf_port_vprio_get(struct sxe2_vf_node *vf) +{ + return vf->vlan_info.port_vlan.prio; +} + +static inline u16 sxe2_vf_port_tpid_get(struct sxe2_vf_node *vf) +{ + return vf->vlan_info.port_vlan.tpid; +} + +static inline bool sxe2_port_vlan_is_exist(struct sxe2_vf_node *vf) +{ + return (sxe2_vf_port_vid_get(vf) || sxe2_vf_port_vprio_get(vf)); +} + +struct sxe2_vf_node *sxe2_vf_node_get(struct sxe2_adapter *adapter, u16 vf_id); + +bool sxe2_vf_is_trusted(struct sxe2_vf_node *vf); + +bool sxe2_vf_set_mac_is_allow(struct sxe2_vf_node *vf); + +int sxe2_sriov_configure(struct pci_dev *pdev, int num_vfs); + +void sxe2_vf_init(struct sxe2_adapter *adapter); + +void sxe2_vf_deinit(struct sxe2_adapter *adapter); + +int sxe2_set_vf_port_vlan(struct net_device *netdev, int vf_idx, u16 vlan_id, + u8 qos, __be16 protocol); + +int sxe2_set_vf_port_vlan_inner(struct sxe2_adapter *adapter, int vf_idx, + u16 vlan_id, u8 qos, u16 protocol, + bool need_vf_reset); + +u32 sxe_calc_all_vfs_min_tx_rate(struct sxe2_adapter *adapter); + +bool sxe2_min_tx_rate_oversubscribed(struct sxe2_adapter *adapter, s32 vf_idx, int min_tx_rate); + +void sxe2_vf_queues_stop(struct sxe2_vf_node *vf_node); + +void sxe2_vf_adv_cfg_clear(struct sxe2_vf_node *vf_node, bool is_vfr_vflr); + +s32 sxe2_vf_rebuild(struct sxe2_vf_node *vf_node, bool is_vfr_vflr); + +u16 sxe2_vf_num_get(struct sxe2_adapter *adapter); + +s32 sxe2_vf_reset_notify(struct sxe2_adapter *adapter, struct sxe2_vf_node *vf_node); + +s32 sxe2_vf_clean_and_rebuild(struct sxe2_vf_node *vf_node, bool is_vfr_vflr); + +s32 sxe2_sriov_vsi_rebuild(struct sxe2_vsi *vsi, bool is_vfr_vflr); + +s32 sxe2_vfs_disable(struct sxe2_adapter *adapter, bool is_remove); + +bool sxe2_vf_is_exist(struct sxe2_adapter *adapter); + +void sxe2_vfs_active(struct sxe2_adapter *adapter); + +s32 sxe2_vf_id_check(struct sxe2_adapter *adapter, u16 vf_idx); + +struct sxe2_vsi *sxe2_vf_vsi_get(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev); + +void sxe2_vfs_vsi_id_get(struct sxe2_adapter *adapter, struct sxe2_drv_vsi_caps *repr_vf_id); + +s32 sxe2_vf_base_l2_filter_setup(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi); + +void sxe2_vf_dpdk_cfg_clear(struct sxe2_vf_node *vf_node, bool is_vfr_vflr); + +s32 sxe2_vf_vsi_type_get(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev, enum sxe2_vsi_type *type); + +void sxe2_vf_vsi_destroy_by_id(struct sxe2_vf_node *vf_node, u16 vsi_id_in_dev); + +s32 sxe2_vf_vsi_port_vlan_cfg(struct sxe2_vf_node *vf_node, struct sxe2_vsi *vsi); + +void sxe2_vf_trust_cfg_restore(struct sxe2_vf_node *vf_node); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.c new file mode 100644 index 0000000000000000000000000000000000000000..83b93ac751dd402622b01a814cd46056528cc4d1 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.c @@ -0,0 +1,6075 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_switch.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_switch.h" +#include "sxe2_cmd.h" +#include "sxe2_vsi.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_netdev.h" +#include "sxe2_hw.h" +#include "sxe2_common.h" +#include "sxe2_tc.h" +#include "sxe2_dcb.h" +#include "sxe2_rx.h" +#include + +#ifdef SXE2_CFG_DEBUG +STATIC int l2_force_fkot; +module_param(l2_force_fkot, int, 0); +MODULE_PARM_DESC(l2_force_fkot, + "switch rule force add fkot (0=false(default), 1=true)"); +STATIC int tc_force_fkot; +module_param(tc_force_fkot, int, 0); +MODULE_PARM_DESC(tc_force_fkot, + "switch rule force add fkot (0=false(default), 1=true)"); +#endif + +#define SWITCH_RULE_ACT_INFO_CPY(dst_rule_info, src_rule_info) \ + do { \ + memcpy(&(dst_rule_info)->act, &(src_rule_info)->act, \ + sizeof(struct sxe2_rule_action)); \ + (dst_rule_info)->tcf_fltr->action = \ + (src_rule_info)->tcf_fltr->action; \ + (dst_rule_info)->tcf_fltr->cookie = \ + (src_rule_info)->tcf_fltr->cookie; \ + (dst_rule_info)->tcf_fltr->prio = \ + (src_rule_info)->tcf_fltr->prio; \ + (dst_rule_info)->tcf_fltr->dst_vsi_id = \ + (src_rule_info)->tcf_fltr->dst_vsi_id; \ + memcpy((dst_rule_info)->tcf_fltr->dst_vsi_map, \ + (src_rule_info)->tcf_fltr->dst_vsi_map, \ + sizeof((dst_rule_info)->tcf_fltr->dst_vsi_map)); \ + (dst_rule_info)->tcf_fltr->src_vsi_id = \ + (src_rule_info)->tcf_fltr->src_vsi_id; \ + (dst_rule_info)->tcf_fltr->backup_type = \ + (src_rule_info)->tcf_fltr->backup_type; \ + (dst_rule_info)->vsi_list = (src_rule_info)->vsi_list; \ + } while (0) + +#define SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(dst_tc_rule_info, src_rule_info) \ + do { \ + memcpy(&(dst_tc_rule_info)->act, &(src_rule_info)->act, \ + sizeof(struct sxe2_rule_action)); \ + (dst_tc_rule_info)->action = \ + (src_rule_info)->tcf_fltr->action; \ + (dst_tc_rule_info)->cookie = \ + (src_rule_info)->tcf_fltr->cookie; \ + (dst_tc_rule_info)->prio = (src_rule_info)->tcf_fltr->prio; \ + memcpy((dst_tc_rule_info)->dst_vsi_map, \ + (src_rule_info)->tcf_fltr->dst_vsi_map, \ + sizeof((dst_tc_rule_info)->dst_vsi_map)); \ + (dst_tc_rule_info)->src_vsi_id = \ + (src_rule_info)->tcf_fltr->src_vsi_id; \ + (dst_tc_rule_info)->dst_vsi_id = \ + (src_rule_info)->tcf_fltr->dst_vsi_id; \ + (dst_tc_rule_info)->backup_type = \ + (src_rule_info)->tcf_fltr->backup_type; \ + (dst_tc_rule_info)->vsi_list = (src_rule_info)->vsi_list; \ + } while (0) + +#define SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(dst_rule_info, src_tc_rule_info) \ + do { \ + memcpy(&(dst_rule_info)->act, &(src_tc_rule_info)->act, \ + sizeof(struct sxe2_rule_action)); \ + (dst_rule_info)->tcf_fltr->action = \ + (src_tc_rule_info)->action; \ + (dst_rule_info)->tcf_fltr->cookie = \ + (src_tc_rule_info)->cookie; \ + (dst_rule_info)->tcf_fltr->prio = (src_tc_rule_info)->prio; \ + memcpy((dst_rule_info)->tcf_fltr->dst_vsi_map, \ + (src_tc_rule_info)->dst_vsi_map, \ + sizeof((dst_rule_info)->tcf_fltr->dst_vsi_map)); \ + (dst_rule_info)->tcf_fltr->dst_vsi_id = \ + (src_tc_rule_info)->dst_vsi_id; \ + (dst_rule_info)->tcf_fltr->src_vsi_id = \ + (src_tc_rule_info)->src_vsi_id; \ + (dst_rule_info)->tcf_fltr->backup_type = \ + (src_tc_rule_info)->backup_type; \ + (dst_rule_info)->vsi_list = (src_tc_rule_info)->vsi_list; \ + } while (0) + +#define SWITCH_TC_RULE_ACT_INFO_GET_FROM_TC_RULE(dst_tc_rule_info, src_tc_rule_info) \ + do { \ + memcpy(&(dst_tc_rule_info)->act, &(src_tc_rule_info)->act, \ + sizeof(struct sxe2_rule_action)); \ + (dst_tc_rule_info)->action = \ + (src_tc_rule_info)->action; \ + (dst_tc_rule_info)->cookie = \ + (src_tc_rule_info)->cookie; \ + (dst_tc_rule_info)->prio = (src_tc_rule_info)->prio; \ + memcpy((dst_tc_rule_info)->dst_vsi_map, \ + (src_tc_rule_info)->dst_vsi_map, \ + sizeof((dst_tc_rule_info)->dst_vsi_map)); \ + (dst_tc_rule_info)->src_vsi_id = \ + (src_tc_rule_info)->src_vsi_id; \ + (dst_tc_rule_info)->dst_vsi_id = \ + (src_tc_rule_info)->dst_vsi_id; \ + (dst_tc_rule_info)->backup_type = \ + (src_tc_rule_info)->backup_type; \ + (dst_tc_rule_info)->vsi_list = (src_tc_rule_info)->vsi_list; \ + } while (0) + +#define LOG_SWITCH_RULE_OPT(ret, fmt, ...) \ + do { \ + if (ret == -EEXIST || ret == -ENOENT) { \ + LOG_WARN_BDF(fmt, ##__VA_ARGS__); \ + } else if (ret) { \ + LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \ + } else { \ + LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +struct sxe2_prot_entry { + enum sxe2_protocol_filed_type type; + u8 prot_id; +}; + +static struct sxe2_prot_entry sxe2_prot_id_tbl[SXE2_PROT_FIELD_LAST] = { + { SXE2_META_PKT_SRC, SXE2_META_HW }, + { SXE2_META_PKT_DIRECTION, SXE2_META_HW }, + { SXE2_META_VSI_NUM, SXE2_META_HW }, + { SXE2_META_PKT_TO_RDMA, SXE2_META_HW }, + { SXE2_OUTER_SMAC, SXE2_MAC_OL_HW }, + { SXE2_OUTER_DMAC, SXE2_MAC_OL_HW }, + { SXE2_INNER_SMAC, SXE2_MAC_IL_HW }, + { SXE2_INNER_DMAC, SXE2_MAC_IL_HW }, + { SXE2_OUTER_ETYPE, SXE2_ETYPE_OL_HW }, + { SXE2_INNER_ETYPE, SXE2_ETYPE_IL_HW }, + { SXE2_OUTER_VLAN_EX, SXE2_VLAN_EX_HW }, + { SXE2_OUTER_VLAN, SXE2_VLAN_OL_HW }, + { SXE2_OUTER_IPV4_SADDR, SXE2_IPV4_OL_HW }, + { SXE2_OUTER_IPV4_DADDR, SXE2_IPV4_OL_HW }, + { SXE2_OUTER_IPV4_TTL, SXE2_IPV4_OL_HW }, + { SXE2_OUTER_IPV4_TOS, SXE2_IPV4_OL_HW }, + { SXE2_OUTER_IPV4_PROT, SXE2_IPV4_OL_HW }, + { SXE2_INNER_IPV4_SADDR, SXE2_IPV4_IL_HW }, + { SXE2_INNER_IPV4_DADDR, SXE2_IPV4_IL_HW }, + { SXE2_INNER_IPV4_TTL, SXE2_IPV4_IL_HW }, + { SXE2_INNER_IPV4_TOS, SXE2_IPV4_IL_HW }, + { SXE2_INNER_IPV4_PROT, SXE2_IPV4_IL_HW }, + { SXE2_OUTER_IPV6_SADDR, SXE2_IPV6_OL_HW }, + { SXE2_OUTER_IPV6_DADDR, SXE2_IPV6_OL_HW }, + { SXE2_INNER_IPV6_SADDR, SXE2_IPV6_IL_HW }, + { SXE2_INNER_IPV6_DADDR, SXE2_IPV6_IL_HW }, + { SXE2_LAST_TCP_SPORT, SXE2_TCP_IL_HW }, + { SXE2_LAST_TCP_DPORT, SXE2_TCP_IL_HW }, + { SXE2_OUTER_UDP_SPORT, SXE2_UDP_OL_HW }, + { SXE2_OUTER_UDP_DPORT, SXE2_UDP_OL_HW }, + { SXE2_INNER_UDP_SPORT, SXE2_UDP_IL_HW }, + { SXE2_INNER_UDP_DPORT, SXE2_UDP_IL_HW }, + { SXE2_VXLAN_ENC_ID, SXE2_UDP_OL_HW }, + { SXE2_GENEVE_ENC_ID, SXE2_UDP_OL_HW }, + { SXE2_NVGRE_ENC_ID, SXE2_GRE_HW }, +}; + +static u32 sxe2_tc_rule_hash_func(const void *data) +{ + u32 hash; + struct sxe2_tcf_fltr *fltr = (struct sxe2_tcf_fltr *)data; + + hash = jhash(&fltr->tunnel_type, sizeof(fltr->tunnel_type), 0); + hash = jhash(&fltr->src_type, sizeof(fltr->src_type), hash); + hash = jhash(fltr->lkup_index, sizeof(fltr->lkup_index), hash); + hash = jhash(fltr->lkup_value, sizeof(fltr->lkup_value), hash); + hash = jhash(fltr->lkup_mask, sizeof(fltr->lkup_mask), hash); + hash = jhash(fltr->profiles, sizeof(fltr->profiles), hash); + + return hash; +} + +static s32 sxe2_tc_rule_hash_cmp(struct sxe2_tcf_fltr *fltr_save, + struct sxe2_tcf_fltr *fltr_to_find) +{ + if (fltr_save->tunnel_type != fltr_to_find->tunnel_type || + fltr_save->src_type != fltr_to_find->src_type) { + return 1; + } + + if (memcmp(fltr_save->lkup_index, fltr_to_find->lkup_index, + sizeof(fltr_save->lkup_index))) { + return 1; + } + + if (memcmp(fltr_save->lkup_mask, fltr_to_find->lkup_mask, + sizeof(fltr_save->lkup_mask))) { + return 1; + } + + if (memcmp(fltr_save->lkup_value, fltr_to_find->lkup_value, + sizeof(fltr_save->lkup_value))) { + return 1; + } + + if (memcmp(fltr_save->profiles, fltr_to_find->profiles, + sizeof(fltr_save->profiles))) { + return 1; + } + + return 0; +} + +struct sxe2_tc_rule_hash *sxe2_hash_cookie_find(struct sxe2_adapter *adapter, + unsigned long cookie) +{ + u32 key; + struct sxe2_tc_rule_hash *rule_hash_node; + + key = jhash(&cookie, sizeof(cookie), 0); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + hash_for_each_possible(adapter->switch_ctxt.complex_recipe.ht_cookie, + rule_hash_node, node, key) { + if (rule_hash_node->cookie == cookie) + return rule_hash_node; + } + return NULL; +} + +static void sxe2_hash_cookie_del(struct sxe2_adapter *adapter, + unsigned long cookie) +{ + u32 key; + struct sxe2_tc_rule_hash *rule_hash_node; + + key = jhash(&cookie, sizeof(cookie), 0); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + hash_for_each_possible(adapter->switch_ctxt.complex_recipe.ht_cookie, + rule_hash_node, node, key) { + if (rule_hash_node->cookie == cookie) { + hash_del(&rule_hash_node->node); + kfree(rule_hash_node); + break; + } + } +} + +static s32 sxe2_hash_cookie_add(struct sxe2_adapter *adapter, + unsigned long cookie, struct sxe2_rule_info *rule_info) +{ + u32 key; + s32 ret = 0; + struct sxe2_tc_rule_hash *rule_hash_node; + + if (sxe2_hash_cookie_find(adapter, cookie)) { + LOG_ERROR_BDF("cookie has exist in hash table\n"); + ret = -EEXIST; + goto l_end; + } + + key = jhash(&cookie, sizeof(cookie), 0); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + rule_hash_node = kzalloc(sizeof(*rule_hash_node), GFP_KERNEL); + if (!rule_hash_node) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + rule_hash_node->cookie = cookie; + rule_hash_node->rule_info = rule_info; + hash_add(adapter->switch_ctxt.complex_recipe.ht_cookie, + &rule_hash_node->node, key); + +l_end: + return ret; +} + +static void *sxe2_hash_lkup_find(struct sxe2_adapter *adapter, + struct sxe2_tcf_fltr *fltr) +{ + u32 key; + struct sxe2_tcf_fltr *save_fltr; + + key = sxe2_tc_rule_hash_func(fltr); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + hash_for_each_possible(adapter->switch_ctxt.complex_recipe.ht_lkup, + save_fltr, node, key) { + if (!sxe2_tc_rule_hash_cmp(save_fltr, fltr)) + return save_fltr; + } + return NULL; +} + +static void sxe2_hash_lkup_del(struct sxe2_adapter *adapter, + struct sxe2_tcf_fltr *fltr) +{ + u32 key; + struct sxe2_tcf_fltr *save_fltr; + + key = sxe2_tc_rule_hash_func(fltr); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + hash_for_each_possible(adapter->switch_ctxt.complex_recipe.ht_lkup, + save_fltr, node, key) { + if (!sxe2_tc_rule_hash_cmp(save_fltr, fltr)) { + hash_del(&save_fltr->node); + break; + } + } +} + +static s32 sxe2_hash_lkup_add(struct sxe2_adapter *adapter, + struct sxe2_tcf_fltr *fltr) +{ + u32 key; + s32 ret = 0; + + if (sxe2_hash_lkup_find(adapter, fltr)) { + LOG_ERROR_BDF("fltr has exist in hash table\n"); + ret = -EEXIST; + return ret; + } + key = sxe2_tc_rule_hash_func(fltr); + LOG_INFO_BDF("cookie hash key is:%u\n", key); + hash_add(adapter->switch_ctxt.complex_recipe.ht_lkup, + &fltr->node, key); + return ret; +} + +STATIC s32 sxe2_fwc_switch_rules_cfg(struct sxe2_adapter *adapter, void *req, + void *resp, u32 req_len, u32 resp_len, + enum sxe2_drv_cmd_opcode opc) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, opc, req, req_len, resp, resp_len); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch req cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 +sxe2_fwc_switch_recipe_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_switch_recipe *switch_recipe, + enum sxe2_drv_cmd_opcode opc) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_recipe resp = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, opc, switch_recipe, + sizeof(*switch_recipe), &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch recipe get cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + memcpy(switch_recipe, &resp, sizeof(*switch_recipe)); + + return ret; +} + +STATIC s32 sxe2_fwc_switch_profile_recipe_map_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_switch_profile_recipe_map *map, + enum sxe2_drv_cmd_opcode opc) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_profile_recipe_map resp = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, opc, map, + sizeof(*map), &resp, + sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch recipe get cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + memcpy(map->map, resp.map, sizeof(map->map)); + + return ret; +} + +STATIC s32 sxe2_fwc_vsi_list_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_switch_vsi_list *vsi_list_fwc, + u32 size, enum sxe2_drv_cmd_opcode opc) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_vsi_list_resp resp = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, opc, vsi_list_fwc, size, &resp, + sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi list cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + vsi_list_fwc->vsi_list_id = resp.index; + + return ret; +} + +s32 sxe2_fwc_switch_large_action_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_switch_large_action *lgActionParm, + enum sxe2_drv_cmd_opcode opc) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + + sxe2_cmd_params_dflt_fill(&cmd, opc, lgActionParm, + sizeof(*lgActionParm), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch req cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +void sxe2_switch_context_deinit(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + s32 i; + + mutex_destroy(&switch_ctxt->mac_addr_lock); + + for (i = 0; i < SXE2_VSI_LIST_TYPE_MAX; i++) + mutex_destroy(&switch_ctxt->vsi_list_mgmt[i].vsi_list_lock); + + for (i = 0; i < SXE2_DEFAULT_RECIPE_MAX; i++) + mutex_destroy(&switch_ctxt->recipe[i].rule_lock); + + mutex_destroy(&switch_ctxt->complex_recipe.rule_lock); + + if (!switch_ctxt->profile_fv_item) + return; + + for (i = 0; i < SXE2_MAX_NUM_PROFILES; i++) { + kfree(switch_ctxt->profile_fv_item[i]); + switch_ctxt->profile_fv_item[i] = NULL; + + } + kfree(switch_ctxt->profile_fv_item); + switch_ctxt->profile_fv_item = NULL; +} + +s32 sxe2_switch_context_init(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + s32 i; + + for (i = 0; i < SXE2_VSI_LIST_TYPE_MAX; i++) { + switch_ctxt->vsi_list_mgmt[i].type = (enum sxe2_vsi_list_type)i; + INIT_LIST_HEAD(&switch_ctxt->vsi_list_mgmt[i].vsi_list_head); + mutex_init(&switch_ctxt->vsi_list_mgmt[i].vsi_list_lock); + } + + for (i = 0; i < SXE2_DEFAULT_RECIPE_MAX; i++) { + switch_ctxt->recipe[i].recipe_id = (u16)i; + INIT_LIST_HEAD(&switch_ctxt->recipe[i].rule_head); + INIT_LIST_HEAD(&switch_ctxt->recipe[i].restore_head); + mutex_init(&switch_ctxt->recipe[i].rule_lock); + if (i == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) + switch_ctxt->recipe[i].is_root = false; + else + switch_ctxt->recipe[i].is_root = true; + } + + INIT_LIST_HEAD(&switch_ctxt->complex_recipe.rule_head); + INIT_LIST_HEAD(&switch_ctxt->complex_recipe.restore_head); + + hash_init(switch_ctxt->complex_recipe.ht_cookie); + hash_init(switch_ctxt->complex_recipe.ht_lkup); + + mutex_init(&switch_ctxt->complex_recipe.rule_lock); + + switch_ctxt->profile_fv_item = kcalloc( + SXE2_MAX_NUM_PROFILES, + sizeof(struct sxe2_profile_fv_item *), + GFP_KERNEL); + if (!switch_ctxt->profile_fv_item) { + LOG_DEV_ERR("alloc fv item memory failed\n"); + goto l_end; + } + for (i = 0; i < SXE2_MAX_NUM_PROFILES; i++) { + switch_ctxt->profile_fv_item[i] = + kcalloc(SXE2_SWITCH_PROFILE_FV_CNT, + sizeof(struct sxe2_profile_fv_item), + GFP_KERNEL); + if (!switch_ctxt->profile_fv_item[i]) { + LOG_DEV_ERR("alloc fv item %u memory failed\n", i); + goto l_end; + } + } + + switch_ctxt->evb_mode = BRIDGE_MODE_VEB; + + mutex_init(&switch_ctxt->evb_mode_lock); + adapter->switch_ctxt.switch_id = adapter->pf_idx; + + mutex_init(&adapter->user_pf_ctxt.flag_lock); + mutex_init(&switch_ctxt->mac_addr_lock); + + return 0; + +l_end: + sxe2_switch_context_deinit(adapter); + + return -ENOMEM; +} + +static void sxe2_switch_sw_rule_free(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule) +{ + struct list_head *tc_rule_head; + struct sxe2_tc_rule_info *tc_list_itr = NULL; + struct sxe2_tc_rule_info *tc_list_tmp = NULL; + + if (rule->tcf_fltr) { + tc_rule_head = &rule->tc_rule_head; + list_for_each_entry_safe(tc_list_itr, tc_list_tmp, + tc_rule_head, list_entry) { + list_del(&tc_list_itr->list_entry); + sxe2_hash_cookie_del(adapter, tc_list_itr->cookie); + kfree(tc_list_itr); + } + kfree(rule->tcf_fltr); + } + kfree(rule); +} + +STATIC struct sxe2_vsi_list_info * +sxe2_vsi_list_entry_find(struct sxe2_adapter *adapter, u16 *vsi_array, + s32 vsi_cnt, enum sxe2_vsi_list_type type) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_vsi_list_info *list_itr = NULL; + struct sxe2_vsi_list_info *ret = NULL; + s32 i; + + list_head = &switch_ctxt->vsi_list_mgmt[type].vsi_list_head; + list_for_each_entry(list_itr, list_head, list_entry) { + if (bitmap_weight(list_itr->vsi_map, SXE2_VSI_MAX_CNT) == + vsi_cnt) { + ret = list_itr; + for (i = 0; i < vsi_cnt; i++) { + if (!test_bit(vsi_array[i], + list_itr->vsi_map)) { + ret = NULL; + break; + } + } + if (ret) { + LOG_DEBUG_BDF("find vsi list success, \t" + "vsi list id %u with vsi count %d\n", + ret->vsi_list_id, + bitmap_weight(ret->vsi_map, SXE2_VSI_MAX_CNT)); + break; + } + } + } + + return ret; +} + +STATIC struct sxe2_vsi_list_info * +sxe2_vsi_list_create(struct sxe2_adapter *adapter, u16 *vsi_array, u16 vsi_cnt, + enum sxe2_vsi_list_type type, s32 *status) +{ + s32 ret; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct sxe2_fwc_switch_vsi_list *vsi_list_fwc; + struct sxe2_vsi_list_info *new_vsi_list = NULL; + u32 vsi_size = vsi_cnt * sizeof(*vsi_array); + s32 i; + + vsi_list_fwc = kzalloc(sizeof(*vsi_list_fwc) + vsi_size, GFP_KERNEL); + if (!vsi_list_fwc) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + new_vsi_list = kzalloc(sizeof(*new_vsi_list), GFP_KERNEL); + if (!new_vsi_list) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (type == SXE2_VSI_LIST_TYPE_PRUNE) + vsi_list_fwc->flag |= + cpu_to_le16(SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE); + + vsi_list_fwc->vsi_cnt = cpu_to_le16(vsi_cnt); + for (i = 0; i < vsi_cnt; i++) + vsi_list_fwc->vsi[i] = cpu_to_le16(vsi_array[i]); + + ret = sxe2_fwc_vsi_list_cfg(adapter, vsi_list_fwc, + sizeof(*vsi_list_fwc) + vsi_size, + SXE2_CMD_SWITCH_VSI_LIST_ADD); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + kfree(new_vsi_list); + new_vsi_list = NULL; + goto l_end; + } + + new_vsi_list->type = type; + new_vsi_list->vsi_list_id = le16_to_cpu(vsi_list_fwc->vsi_list_id); + new_vsi_list->rule_cnt = 0; + new_vsi_list->need_bond = 0; + for (i = 0; i < vsi_cnt; i++) + set_bit(vsi_array[i], new_vsi_list->vsi_map); + + list_add(&new_vsi_list->list_entry, + &switch_ctxt->vsi_list_mgmt[type].vsi_list_head); + + LOG_DEBUG_BDF("create vsi list success,\t" + "vsi list id %u with vsi count %d\n", + new_vsi_list->vsi_list_id, + bitmap_weight(new_vsi_list->vsi_map, SXE2_VSI_MAX_CNT)); + +l_end: + kfree(vsi_list_fwc); + vsi_list_fwc = NULL; + + *status = ret; + return new_vsi_list; +} + +s32 sxe2_vsi_list_update_bond(struct sxe2_adapter *adapter, + struct sxe2_vsi_list_info *vsi_list, + struct sxe2_adapter *master_adapter, bool linking) +{ + s32 ret; + struct sxe2_fwc_switch_vsi_list *vsi_list_fwc = NULL; + u16 vsi_cnt = 1; + u16 vsi_size = (u16)(vsi_cnt * sizeof(u16)); + + vsi_list_fwc = kzalloc(sizeof(*vsi_list_fwc) + vsi_size, GFP_KERNEL); + if (!vsi_list_fwc) { + LOG_DEV_ERR("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (linking) + vsi_list_fwc->flag |= SXE2_CMD_SWITCH_VSI_FLAG_LIST_INC; + + vsi_list_fwc->flag |= SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE; + vsi_list_fwc->vsi_list_id = cpu_to_le16(vsi_list->vsi_list_id); + vsi_list_fwc->vsi_cnt = cpu_to_le16(vsi_cnt); + vsi_list_fwc->vsi[0] = cpu_to_le16( + master_adapter->vsi_ctxt.main_vsi->idx_in_dev); + + ret = sxe2_fwc_vsi_list_cfg(adapter, vsi_list_fwc, + sizeof(*vsi_list_fwc) + vsi_size, + SXE2_CMD_SWITCH_VSI_LIST_UPDATE); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + goto l_end; + } + +l_end: + kfree(vsi_list_fwc); + return ret; +} + +STATIC s32 sxe2_vsi_list_update(struct sxe2_adapter *adapter, + struct sxe2_vsi_list_info *vsi_list, + u16 *vsi_array, u16 vsi_cnt, + enum sxe2_vsi_list_type type, bool is_increase) +{ + s32 ret; + struct sxe2_fwc_switch_vsi_list *vsi_list_fwc; + u32 vsi_size = vsi_cnt * sizeof(*vsi_array); + u16 i; + + vsi_list_fwc = kzalloc(sizeof(*vsi_list_fwc) + vsi_size, GFP_KERNEL); + if (!vsi_list_fwc) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (type == SXE2_VSI_LIST_TYPE_PRUNE) + vsi_list_fwc->flag |= + cpu_to_le16(SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE); + if (is_increase) + vsi_list_fwc->flag |= + cpu_to_le16(SXE2_CMD_SWITCH_VSI_FLAG_LIST_INC); + + vsi_list_fwc->vsi_list_id = cpu_to_le16(vsi_list->vsi_list_id); + vsi_list_fwc->vsi_cnt = cpu_to_le16(vsi_cnt); + + for (i = 0; i < vsi_cnt; i++) + vsi_list_fwc->vsi[i] = cpu_to_le16(vsi_array[i]); + + ret = sxe2_fwc_vsi_list_cfg(adapter, vsi_list_fwc, + sizeof(*vsi_list_fwc) + vsi_size, + SXE2_CMD_SWITCH_VSI_LIST_UPDATE); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + goto l_end; + } + + for (i = 0; i < vsi_cnt; i++) { + if (is_increase) + set_bit(vsi_array[i], vsi_list->vsi_map); + else + clear_bit(vsi_array[i], vsi_list->vsi_map); + } + +l_end: + kfree(vsi_list_fwc); + vsi_list_fwc = NULL; + return ret; +} + +STATIC s32 sxe2_vsi_list_remove(struct sxe2_adapter *adapter, + struct sxe2_vsi_list_info *vsi_list) +{ + s32 ret; + struct sxe2_fwc_switch_vsi_list vsi_list_fwc = { 0 }; + + if (vsi_list->type == SXE2_VSI_LIST_TYPE_PRUNE) + vsi_list_fwc.flag |= + cpu_to_le16(SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE); + + vsi_list_fwc.vsi_list_id = cpu_to_le16(vsi_list->vsi_list_id); + + ret = sxe2_fwc_vsi_list_cfg(adapter, &vsi_list_fwc, + sizeof(vsi_list_fwc), + SXE2_CMD_SWITCH_VSI_LIST_DEL); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + goto l_end; + } + + list_del(&vsi_list->list_entry); + + LOG_DEBUG_BDF("free vsi list success, vsi list id %u\n", + vsi_list->vsi_list_id); + + kfree(vsi_list); + +l_end: + return ret; +} + +STATIC void sxe2_vsi_list_refcnt_update(struct sxe2_adapter *adapter, + struct sxe2_vsi_list_info *vsi_list, + bool is_increase) +{ + if (is_increase) { + vsi_list->rule_cnt++; + } else { + if (vsi_list->rule_cnt) + vsi_list->rule_cnt--; +#ifdef SXE2_CFG_DEBUG + else { + LOG_ERROR("vsi list %d is zero before decrease\n", + vsi_list->vsi_list_id); + } +#endif + if (vsi_list->rule_cnt == 0) + (void)sxe2_vsi_list_remove(adapter, vsi_list); + } +} + +STATIC struct sxe2_rule_info * +sxe2_rule_entry_find(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *list_itr = NULL; + struct sxe2_rule_info *ret = NULL; + struct sxe2_tcf_fltr *tcf_fltr; + + if (!rule_info->tcf_fltr) { + list_head = + &switch_ctxt->recipe[rule_info->recipe_id].rule_head; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&rule_info->fltr.data, &list_itr->fltr.data, + sizeof(rule_info->fltr.data)) && + rule_info->fltr.src_type == + list_itr->fltr.src_type) { + ret = list_itr; + break; + } + } + } else { + tcf_fltr = (struct sxe2_tcf_fltr *)sxe2_hash_lkup_find(adapter, + rule_info->tcf_fltr); + if (tcf_fltr) + ret = tcf_fltr->rule_info; + } + + if (ret) + LOG_DEBUG_BDF("find rule success, rule id %u\n", ret->rule_id); + + return ret; +} + +STATIC void sxe2_make_switch_full_key(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule, + u32 *full_key) +{ + struct sxe2_rule_filter *rule_filter = &rule->fltr; + union sxe2_switch_full_key_dw0 full_key_d0; + union sxe2_switch_full_key_dw1 full_key_d1; + union sxe2_switch_full_key_dw2 full_key_d2; + u8 *mac_addr; + + memset(&full_key_d0, 0, sizeof(full_key_d0)); + memset(&full_key_d1, 0, sizeof(full_key_d1)); + memset(&full_key_d2, 0, sizeof(full_key_d2)); + + full_key_d0.field.rid = rule->recipe_id; + if (adapter->switch_ctxt.recipe[rule->recipe_id].is_root) + full_key_d0.field.is_root = 1; + + full_key_d0.field.fv0 = adapter->switch_ctxt.switch_id; + + if (rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC) { + mac_addr = rule_filter->data.mac.mac_addr; + full_key_d1.field.fv1 = + (u32)((mac_addr[0] << (u32)8) | mac_addr[1]); + full_key_d1.field.fv2 = + (u32)((mac_addr[2] << (u32)8) | mac_addr[3]); + full_key_d2.field.fv3 = + (u32)((mac_addr[4] << (u32)8) | mac_addr[5]); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_VLAN) { + full_key_d1.field.fv1 = rule_filter->data.vlan.tpid; + full_key_d1.field.fv2 = rule_filter->data.vlan.vlan_id; + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_TX_ETYPE) { + full_key_d1.field.fv1 |= + (rule_filter->data.etype.vsi_id << SXE2_FV_VSI_NUM_OFFSET); + full_key_d1.field.fv2 |= + (SXE2_FV_DIRECTION_TX << SXE2_FV_DIRECTION_OFFSET); + full_key_d2.field.fv3 = rule_filter->data.etype.ethertype; + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_RX_ETYPE) { + full_key_d1.field.fv1 |= + (SXE2_FV_DIRECTION_RX << SXE2_FV_DIRECTION_OFFSET); + full_key_d1.field.fv2 = rule_filter->data.etype.ethertype; + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_ALLMULTI) { + full_key_d1.field.fv1 |= + (SXE2_FV_CAST_MULTI << SXE2_FV_CAST_OFFSET); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_PROMISC) { + if (rule_filter->src_type == SXE2_SRC_TYPE_TX) + full_key_d1.field.fv1 |= (SXE2_FV_DIRECTION_TX + << SXE2_FV_DIRECTION_OFFSET); + else + full_key_d1.field.fv1 |= (SXE2_FV_DIRECTION_RX + << SXE2_FV_DIRECTION_OFFSET); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI) { + full_key_d1.field.fv1 |= + (rule_filter->data.srcvsi.vsi_id << SXE2_FV_VSI_NUM_OFFSET); + full_key_d1.field.fv2 |= (rule_filter->data.srcvsi.to_rdma + << SXE2_FV_PKT_TO_RDMA_OFFSET); + full_key_d2.field.fv3 |= + (rule_filter->data.srcvsi.packet_src_type + << SXE2_FV_PKT_SRC_OFFSET); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI_EXT) { + full_key_d1.field.fv1 |= + (rule_filter->data.srcvsi.vsi_id << SXE2_FV_VSI_NUM_OFFSET); + full_key_d1.field.fv2 |= (rule_filter->data.srcvsi.to_rdma + << SXE2_FV_PKT_TO_RDMA_OFFSET); + full_key_d2.field.fv3 |= + (rule_filter->data.srcvsi.packet_src_type + << SXE2_FV_PKT_SRC_OFFSET); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) { + full_key_d1.field.fv1 |= + (rule_filter->data.mac_spoofchk.vsi_id << SXE2_FV_VSI_NUM_OFFSET); + full_key_d1.field.fv2 = + (SXE2_FV_DIRECTION_TX << SXE2_FV_DIRECTION_OFFSET); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT) { + mac_addr = rule_filter->data.mac_spoofchk_ext.mac_addr; + full_key_d1.field.fv1 = + (u32)((mac_addr[0] << (u32)8) | mac_addr[1]); + full_key_d1.field.fv2 = + (u32)((mac_addr[2] << (u32)8) | mac_addr[3]); + full_key_d2.field.fv3 = + (u32)((mac_addr[4] << (u32)8) | mac_addr[5]); + full_key_d2.field.fv4 = rule_filter->data.mac_spoofchk_ext.hid; + } + + full_key[0] = cpu_to_le32(full_key_d0.val); + full_key[1] = cpu_to_le32(full_key_d1.val); + full_key[2] = cpu_to_le32(full_key_d2.val); +} + +STATIC void sxe2_make_switch_action(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + u32 *action) +{ + u32 act = 0; + switch (rule_info->act.type) { + case SXE2_FWD_TO_VSI: + if (rule_info->recipe_id != SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) { + act |= ((rule_info->act.fwd_id.vsi_id) << SXE2_SINGLE_ACT_VSI_ID_S) & + SXE2_SINGLE_ACT_VSI_ID_M; + act |= SXE2_SINGLE_ACT_VSI_FORWARD | SXE2_SINGLE_ACT_VALID_BIT; + } + break; + case SXE2_FWD_TO_VSI_LIST: + act |= SXE2_SINGLE_ACT_VSI_LIST; + act |= (rule_info->act.fwd_id.vsi_list_id + << SXE2_SINGLE_ACT_VSI_LIST_ID_S) & + SXE2_SINGLE_ACT_VSI_LIST_ID_M; + if (rule_info->recipe_id == SXE2_DEFAULT_RECIPE_VLAN) + act |= SXE2_SINGLE_ACT_PRUNE | SXE2_SINGLE_ACT_EGRESS | + SXE2_SINGLE_ACT_INGRESS; + else + act |= SXE2_SINGLE_ACT_VSI_FORWARD | + SXE2_SINGLE_ACT_VALID_BIT; + break; + case SXE2_FWD_TO_Q: + act |= SXE2_SINGLE_ACT_TO_Q; + act |= (rule_info->act.fwd_id.q_id + << SXE2_SINGLE_ACT_Q_INDEX_S) & + SXE2_SINGLE_ACT_Q_INDEX_M; + act |= SXE2_SINGLE_ACT_Q_PRIORITY & + rule_info->act.q_high; + break; + case SXE2_FWD_TO_QGRP: + act |= SXE2_SINGLE_ACT_TO_Q; + act |= (rule_info->act.fwd_id.q_id + << SXE2_SINGLE_ACT_Q_INDEX_S) & + SXE2_SINGLE_ACT_Q_INDEX_M; + act |= (rule_info->act.qgrp_size + << SXE2_SINGLE_ACT_Q_REGION_S) & + SXE2_SINGLE_ACT_Q_REGION_M; + act |= SXE2_SINGLE_ACT_Q_PRIORITY & + rule_info->act.q_high; + break; + case SXE2_DROP_PACKET: + act |= SXE2_SINGLE_ACT_VSI_FORWARD | SXE2_SINGLE_ACT_DROP | + SXE2_SINGLE_ACT_VALID_BIT; + break; + case SXE2_MIRROR_PACKET: + act |= SXE2_SINGLE_ACT_MIRROR; + act |= (rule_info->act.fwd_id.vsi_id << SXE2_SINGLE_ACT_VSI_ID_S) & + SXE2_SINGLE_ACT_VSI_ID_M; + break; + case SXE2_LARGE_ACTION: + act |= SXE2_SINGLE_ACT_POINTER | SXE2_SINGLE_ACT_TO_LARGE | + SXE2_SINGLE_ACT_HASFWD; + break; + + default: + goto l_end; + } + + if (rule_info->act.lb_en) + act |= SXE2_SINGLE_ACT_LB_ENABLE; + if (rule_info->act.lan_en) + act |= SXE2_SINGLE_ACT_LAN_ENABLE; + +l_end: + *action = act; +} + +STATIC void sxe2_switch_rule_fill(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_fwc_switch_rule *rule_fwc, + bool is_del) +{ + u32 act = 0; + + if (!is_del) + sxe2_make_switch_action(adapter, rule_info, &act); + + rule_fwc->recipe_id = cpu_to_le16(rule_info->recipe_id); + rule_fwc->act = cpu_to_le32(act); + rule_fwc->rule_id = cpu_to_le16(rule_info->rule_id); + + sxe2_make_switch_full_key(adapter, rule_info, rule_fwc->full_key); + + LOG_DEBUG_BDF("rule_fwc, recipe id %d, act %x, full_key %08x:%08x:%08x\n", + le16_to_cpu(rule_fwc->recipe_id), + le32_to_cpu(rule_fwc->act), + le32_to_cpu(rule_fwc->full_key[0]), + le32_to_cpu(rule_fwc->full_key[1]), + le32_to_cpu(rule_fwc->full_key[2])); +} + +STATIC void sxe2_switch_complex_rule_fill(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_fwc_switch_complex_rule *rule_fwc, + bool is_del) +{ + u32 act = 0; + u16 i, profile_id, profile_cnt; + struct sxe2_tcf_fltr *fltr = rule_info->tcf_fltr; + union sxe2_switch_full_key_dw0 full_key_d0; + union sxe2_switch_full_key_dw1 full_key_d1; + union sxe2_switch_full_key_dw2 full_key_d2; + + if (!is_del) + sxe2_make_switch_action(adapter, rule_info, &act); + + rule_fwc->act = cpu_to_le32(act); + + profile_cnt = 0; + profile_id = 0; + while (true) { + profile_id = (u16)find_next_bit(fltr->profiles, + SXE2_MAX_NUM_PROFILES, + profile_id); + if (profile_id >= SXE2_MAX_NUM_PROFILES) + break; + rule_fwc->profile_id[profile_cnt++] = cpu_to_le16(profile_id); + profile_id++; + } + + rule_fwc->profile_cnt = cpu_to_le16(profile_cnt); + rule_fwc->rule_root = cpu_to_le16(rule_info->rule_id); + rule_fwc->word_cnt = cpu_to_le16(fltr->word_cnt); + rule_fwc->recipe_root = cpu_to_le16(rule_info->recipe_id); + rule_fwc->recipe_cnt = cpu_to_le16(fltr->recipe_cnt); + rule_fwc->priority = fltr->priority; + + for (i = 0; i < fltr->word_cnt; i++) { + rule_fwc->lkup_mask[i] = + cpu_to_le16(be16_to_cpu(fltr->lkup_mask[i])); + rule_fwc->lkup_value[i] = + cpu_to_le16(be16_to_cpu(fltr->lkup_value[i])); + rule_fwc->lkup_index[i] = cpu_to_le16(fltr->lkup_index[i]); + } + + for (i = 0; i < SXE2_MAX_CHAIN_WORDS; i++) { + if ((i % SXE2_NUM_WORDS_RECIPE) == 0 && + fltr->lkup_mask[i] != 0 && i + 3 < SXE2_MAX_CHAIN_WORDS) { + full_key_d0.val = 0; + full_key_d1.val = 0; + full_key_d2.val = 0; + full_key_d0.field.fv0 = rule_fwc->lkup_value[i]; + if (rule_fwc->lkup_mask[i + 1]) + full_key_d1.field.fv1 = + rule_fwc->lkup_value[i + 1]; + if (rule_fwc->lkup_mask[i + 2]) + full_key_d1.field.fv2 = + rule_fwc->lkup_value[i + 2]; + if (rule_fwc->lkup_mask[i + 3]) + full_key_d2.field.fv3 = + rule_fwc->lkup_value[i + 3]; + LOG_DEBUG_BDF("full_key %08x:%08x:%08x\n", + cpu_to_le32(full_key_d0.val), + cpu_to_le32(full_key_d1.val), + cpu_to_le32(full_key_d2.val)); + } + } + + for (i = 0; i < fltr->recipe_cnt; i++) { + rule_fwc->rule_id[i] = cpu_to_le16(fltr->rule_id[i]); + rule_fwc->recipe_id[i] = cpu_to_le16(fltr->recipe_id[i]); + } + + LOG_DEBUG_BDF("rule_fwc, profile cnt %d, recipe cnt %d, act %x\n", + le16_to_cpu(rule_fwc->profile_cnt), + le16_to_cpu(rule_fwc->recipe_cnt), + le32_to_cpu(rule_fwc->act)); +} + +STATIC struct sxe2_rule_info * +sxe2_fwd_rule_create(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, s32 *status) +{ + s32 ret; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct sxe2_rule_info *new_rule = NULL; + struct sxe2_fwc_switch_rule rule_req = { 0 }; + struct sxe2_fwc_switch_rule_resp rule_resp = { 0 }; + struct sxe2_fwc_switch_complex_rule *cpx_rule_req = NULL; + + struct sxe2_fwc_switch_complex_rule_resp cpx_rule_resp = { 0 }; + u16 i; + struct sxe2_tcf_fltr *new_fltr = NULL; + + cpx_rule_req = kzalloc(sizeof(*cpx_rule_req), GFP_KERNEL); + if (!cpx_rule_req) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + new_rule = kzalloc(sizeof(*new_rule), GFP_KERNEL); + if (!new_rule) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (rule_info->tcf_fltr) { + new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); + if (!new_fltr) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + memcpy(new_fltr, rule_info->tcf_fltr, sizeof(*new_fltr)); + } + + if (!new_fltr) { + sxe2_switch_rule_fill(adapter, rule_info, &rule_req, false); +#ifdef SXE2_CFG_DEBUG + if (l2_force_fkot) + rule_req.add_fkot = true; +#endif + ret = sxe2_fwc_switch_rules_cfg(adapter, &rule_req, &rule_resp, + sizeof(rule_req), + sizeof(rule_resp), + SXE2_CMD_SWITCH_RULE_ADD); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + goto l_end; + } + rule_info->rule_id = le16_to_cpu(rule_resp.index); + rule_info->hid = (u16)(rule_resp.resv1[0] + (((u16)rule_resp.resv1[1]) << 8)); + } else { +#ifdef SXE2_CFG_DEBUG + if (tc_force_fkot) + cpx_rule_req->add_fkot = true; +#endif + sxe2_switch_complex_rule_fill(adapter, rule_info, cpx_rule_req, + false); + ret = sxe2_fwc_switch_rules_cfg(adapter, cpx_rule_req, + &cpx_rule_resp, + sizeof(*cpx_rule_req), + sizeof(cpx_rule_resp), + SXE2_CMD_SWITCH_RULE_CPX_ADD); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + goto l_end; + } + rule_info->recipe_id = le16_to_cpu(cpx_rule_resp.recipe_root); + rule_info->rule_id = le16_to_cpu(cpx_rule_resp.rule_root); + new_fltr->recipe_cnt = le16_to_cpu(cpx_rule_resp.recipe_cnt); + for (i = 0; i < new_fltr->recipe_cnt; i++) { + new_fltr->rule_id[i] = + le16_to_cpu(cpx_rule_resp.rule_id[i]); + new_fltr->recipe_id[i] = + le16_to_cpu(cpx_rule_resp.recipe_id[i]); + } + } + + memcpy(new_rule, rule_info, sizeof(*new_rule)); + new_rule->tcf_fltr = new_fltr; + + if (new_rule->vsi_list) + sxe2_vsi_list_refcnt_update(adapter, new_rule->vsi_list, true); + + if (!new_rule->tcf_fltr) { + list_add(&new_rule->list_entry, + &switch_ctxt->recipe[new_rule->recipe_id].rule_head); + } else { + new_fltr->rule_info = new_rule; + INIT_LIST_HEAD(&new_rule->tc_rule_head); + if (!new_rule->tcf_fltr->cookie_invalid) { + ret = sxe2_hash_cookie_add(adapter, new_rule->tcf_fltr->cookie, + new_rule); + if (ret) { + LOG_ERROR_BDF("hash cookie add failed, ret %d\n", ret); + goto l_end; + } + } + + ret = sxe2_hash_lkup_add(adapter, new_rule->tcf_fltr); + if (ret) { + LOG_ERROR_BDF("hash lkup add failed, ret %d\n", ret); + goto l_end; + } + } + + LOG_DEBUG_BDF("create rule success, rule id %u\n", new_rule->rule_id); + +l_end: + *status = ret; + if (ret) { + kfree(new_fltr); + kfree(new_rule); + new_rule = NULL; + } + + kfree(cpx_rule_req); + + return new_rule; +} + +s32 sxe2_fwd_rule_update(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info) +{ + s32 ret; + struct sxe2_fwc_switch_rule rule_fwc = { 0 }; + struct sxe2_fwc_switch_rule_resp rule_resp = { 0 }; + struct sxe2_fwc_switch_complex_rule *cpx_rule_req = NULL; + struct sxe2_fwc_switch_complex_rule_resp cpx_rule_resp = { 0 }; + + cpx_rule_req = kzalloc(sizeof(*cpx_rule_req), GFP_KERNEL); + if (!cpx_rule_req) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (!rule_info->tcf_fltr) { + sxe2_switch_rule_fill(adapter, rule_info, &rule_fwc, false); + + ret = sxe2_fwc_switch_rules_cfg(adapter, &rule_fwc, &rule_resp, + sizeof(rule_fwc), + sizeof(rule_resp), + SXE2_CMD_SWITCH_RULE_UPDATE); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + } + } else { + sxe2_switch_complex_rule_fill(adapter, rule_info, cpx_rule_req, + false); + ret = sxe2_fwc_switch_rules_cfg(adapter, cpx_rule_req, &cpx_rule_resp, + sizeof(*cpx_rule_req), sizeof(cpx_rule_resp), + SXE2_CMD_SWITCH_RULE_CPX_UPDATE); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + } + } + kfree(cpx_rule_req); +l_end: + return ret; +} + +STATIC s32 sxe2_rule_fwd_id_update(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = -EINVAL; + struct sxe2_rule_info new_rule; + + memcpy(&new_rule, save_rule, sizeof(new_rule)); + + if (rule_info->vsi_list && !save_rule->vsi_list) { + new_rule.vsi_list = rule_info->vsi_list; + new_rule.act.fwd_id.vsi_list_id = + rule_info->vsi_list->vsi_list_id; + + new_rule.act.type = SXE2_FWD_TO_VSI_LIST; + + } else if (!rule_info->vsi_list && save_rule->vsi_list) { + new_rule.vsi_list = NULL; + new_rule.act.fwd_id.vsi_id = rule_info->act.fwd_id.vsi_id; + + new_rule.act.type = SXE2_FWD_TO_VSI; + + } else if (rule_info->vsi_list && save_rule->vsi_list) { + new_rule.vsi_list = rule_info->vsi_list; + new_rule.act.fwd_id.vsi_list_id = + rule_info->vsi_list->vsi_list_id; + + } else { + ret = -EINVAL; + goto l_end; + } + + ret = sxe2_fwd_rule_update(adapter, &new_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + goto l_end; + } + + if (save_rule->vsi_list) + sxe2_vsi_list_refcnt_update(adapter, save_rule->vsi_list, false); + + memcpy(save_rule, &new_rule, sizeof(*save_rule)); + + if (save_rule->vsi_list) + sxe2_vsi_list_refcnt_update(adapter, save_rule->vsi_list, true); + + LOG_DEBUG_BDF("update rule success, rule id %u\n", save_rule->rule_id); + +l_end: + return ret; +} + +s32 sxe2_fwd_rule_remove(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, bool free_sw) +{ + s32 ret = 0; + struct sxe2_fwc_switch_rule rule_fwc = { 0 }; + struct sxe2_fwc_switch_rule_resp rule_resp = { 0 }; + struct sxe2_fwc_switch_complex_rule *cpx_rule_req = NULL; + struct sxe2_fwc_switch_complex_rule_resp cpx_rule_resp = { 0 }; + + cpx_rule_req = kzalloc(sizeof(*cpx_rule_req), GFP_KERNEL); + if (!cpx_rule_req) { + LOG_ERROR_BDF("alloc memory failed\n"); + ret = -ENOMEM; + goto l_end; + } + + if (!rule_info->tcf_fltr) { + sxe2_switch_rule_fill(adapter, rule_info, &rule_fwc, true); + + ret = sxe2_fwc_switch_rules_cfg(adapter, &rule_fwc, &rule_resp, + sizeof(rule_fwc), + sizeof(rule_resp), + SXE2_CMD_SWITCH_RULE_DEL); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + goto l_end; + } + } else { + sxe2_switch_complex_rule_fill(adapter, rule_info, cpx_rule_req, + true); + ret = sxe2_fwc_switch_rules_cfg(adapter, cpx_rule_req, + &cpx_rule_resp, + sizeof(*cpx_rule_req), + sizeof(cpx_rule_resp), + SXE2_CMD_SWITCH_RULE_CPX_DEL); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + goto l_end; + } + } + + if (rule_info->tcf_fltr) { + if (!rule_info->tcf_fltr->cookie_invalid) + sxe2_hash_cookie_del(adapter, rule_info->tcf_fltr->cookie); + sxe2_hash_lkup_del(adapter, rule_info->tcf_fltr); + } else { + list_del(&rule_info->list_entry); + } + + LOG_DEBUG_BDF("remove rule success, rule id %u\n", rule_info->rule_id); + + if (free_sw) + sxe2_switch_sw_rule_free(adapter, rule_info); + +l_end: + kfree(cpx_rule_req); + return ret; +} + +STATIC s32 sxe2_switch_tc_rule_info_add(struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + struct sxe2_tc_rule_info *list_itr = NULL; + struct sxe2_tc_rule_info *tc_rule_info = NULL; + struct list_head *prev; + u32 cnt = 0; + + tc_rule_info = kzalloc(sizeof(*tc_rule_info), GFP_KERNEL); + if (!tc_rule_info) + return -ENOMEM; + + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(tc_rule_info, rule_info); + + list_for_each_entry(list_itr, &save_rule->tc_rule_head, list_entry) { + if (rule_info->tcf_fltr->prio == list_itr->prio) { + kfree(tc_rule_info); + return -EEXIST; + } else if (rule_info->tcf_fltr->prio < list_itr->prio) { + if (cnt == 0) { + list_add(&tc_rule_info->list_entry, + &save_rule->tc_rule_head); + } else { + prev = list_itr->list_entry.prev; + prev->next = &tc_rule_info->list_entry; + tc_rule_info->list_entry.prev = prev; + tc_rule_info->list_entry.next = + &list_itr->list_entry; + list_itr->list_entry.prev = + &tc_rule_info->list_entry; + } + return 0; + } + cnt++; + } + list_add_tail(&tc_rule_info->list_entry, &save_rule->tc_rule_head); + + return 0; +} + +STATIC s32 sxe2_switchdev_tc_samerule_add(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + struct sxe2_tc_rule_info temp_rule_info; + s32 ret = 0; + unsigned long new_cookie; + + if (rule_info->tcf_fltr->cookie_invalid || save_rule->tcf_fltr->cookie_invalid) { + LOG_ERROR_BDF("same rule add failed, cookie is invalid.\n"); + ret = -EINVAL; + goto l_end; + } + + if (rule_info->tcf_fltr->prio == save_rule->tcf_fltr->prio) { + LOG_ERROR_BDF("the prio is same\n"); + ret = -EEXIST; + goto l_end; + } + + new_cookie = rule_info->tcf_fltr->cookie; + memset(&temp_rule_info, 0, sizeof(temp_rule_info)); + + if (rule_info->tcf_fltr->prio < save_rule->tcf_fltr->prio) { + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(&temp_rule_info, save_rule); + + SWITCH_RULE_ACT_INFO_CPY(save_rule, rule_info); + + ret = sxe2_fwd_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, + &temp_rule_info); + goto l_end; + } + + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(rule_info, &temp_rule_info); + } + ret = sxe2_hash_cookie_add(adapter, new_cookie, save_rule); + if (ret) { + LOG_ERROR_BDF("hash cookie add failed, cookie:%lu, ret:%d\n", new_cookie, ret); + goto l_end; + } + + ret = sxe2_switch_tc_rule_info_add(rule_info, save_rule); + if (ret) + LOG_ERROR_BDF("tc_rule_info add failed, ret:%d\n", ret); + +l_end: + return ret; +} + +STATIC struct sxe2_vsi_list_info * +sxe2_cpx_rule_vsi_list_create(struct sxe2_adapter *adapter, + struct sxe2_tcf_fltr *fltr, s32 *status) +{ + struct sxe2_vsi_list_info *vsi_list = NULL; + u16 *vsi_id; + u16 idx = 0; + u16 i, vsi_cnt; + + vsi_cnt = bitmap_weight(fltr->dst_vsi_map, SXE2_VSI_MAX_CNT); + vsi_id = kcalloc(vsi_cnt, sizeof(*vsi_id), GFP_KERNEL); + if (!vsi_id) { + LOG_ERROR_BDF("alloc memory failed\n"); + *status = -ENOMEM; + goto l_end; + } + + for (i = 0; i < vsi_cnt; i++) { + idx = (u16)find_next_bit((unsigned long *)fltr->dst_vsi_map, + SXE2_VSI_MAX_CNT, idx); + if (idx >= SXE2_VSI_MAX_CNT) + break; + vsi_id[i] = idx; + idx++; + } + + vsi_list = sxe2_vsi_list_create(adapter, vsi_id, + vsi_cnt, SXE2_VSI_LIST_TYPE_FORWARD, + status); + kfree(vsi_id); +l_end: + return vsi_list; +} + +STATIC s32 sxe2_switchdev_user_samerule_add(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + struct sxe2_tc_rule_info temp_rule_info; + s32 ret = 0; + bool new_vsi_list = false; + struct sxe2_tc_rule_info *list_itr = NULL; + + if (rule_info->tcf_fltr->prio == save_rule->tcf_fltr->prio) { + LOG_ERROR_BDF("the prio is same\n"); + ret = -EEXIST; + goto l_end; + } + + list_for_each_entry(list_itr, &save_rule->tc_rule_head, list_entry) { + if (rule_info->tcf_fltr->prio == list_itr->prio) { + LOG_ERROR_BDF("the prio is same\n"); + ret = -EEXIST; + goto l_end; + } + } + + if (rule_info->act.type == SXE2_FWD_TO_VSI_LIST) { + struct sxe2_vsi_list_info *vsi_list; + + vsi_list = sxe2_cpx_rule_vsi_list_create(adapter, rule_info->tcf_fltr, &ret); + if (!vsi_list) + goto l_end; + + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = vsi_list->vsi_list_id; + sxe2_vsi_list_refcnt_update(adapter, rule_info->vsi_list, true); + new_vsi_list = true; + } + + memset(&temp_rule_info, 0, sizeof(temp_rule_info)); + if (rule_info->tcf_fltr->prio < save_rule->tcf_fltr->prio) { + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(&temp_rule_info, save_rule); + SWITCH_RULE_ACT_INFO_CPY(save_rule, rule_info); + + ret = sxe2_fwd_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", + ret); + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, &temp_rule_info); + if (new_vsi_list) + (void)sxe2_vsi_list_remove(adapter, rule_info->vsi_list); + rule_info->vsi_list = NULL; + goto l_end; + } + + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(rule_info, &temp_rule_info); + } + + ret = sxe2_switch_tc_rule_info_add(rule_info, save_rule); + if (ret) + LOG_ERROR_BDF("tc_rule_info add failed, ret:%d\n", ret); + +l_end: + return ret; +} + +STATIC s32 sxe2_switch_save_rule_check(struct sxe2_adapter *adapter, + struct sxe2_rule_info *save_rule, + struct sxe2_rule_info *rule_info) +{ + s32 ret = 0; + u16 vsi_id; + + if (save_rule->vsi_list) { + vsi_id = rule_info->act.fwd_id.vsi_id; + if (test_bit(vsi_id, save_rule->vsi_list->vsi_map)) { + ret = -EEXIST; + } + } else if (save_rule->act.type == SXE2_DROP_PACKET || + save_rule->act.type == SXE2_MIRROR_PACKET || + save_rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI || + save_rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI_EXT || + save_rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK || + save_rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT) { + if (rule_info->act.fwd_id.vsi_id == save_rule->act.fwd_id.vsi_id) { + LOG_WARN_BDF("rule exist, rule id %d, same vsi %d\n", + save_rule->rule_id, rule_info->act.fwd_id.vsi_id); + ret = -EEXIST; + } else { + LOG_ERROR_BDF("rule exist, rule id %d, exist vsi %d, request vsi %d\n", + save_rule->rule_id, save_rule->act.fwd_id.vsi_id, + rule_info->act.fwd_id.vsi_id); + ret = -EINVAL; + } + } else if (save_rule->act.type == SXE2_FWD_TO_Q) { + if (rule_info->act.fwd_id.q_id == save_rule->act.fwd_id.q_id) { + LOG_WARN_BDF("rule exist, rule id %d, same queue %d\n", + save_rule->rule_id, rule_info->act.fwd_id.q_id); + ret = -EEXIST; + } else { + LOG_ERROR_BDF("rule exist, rule id %d, exist queue %d, request queue %d\n", + save_rule->rule_id, save_rule->act.fwd_id.q_id, + rule_info->act.fwd_id.q_id); + ret = -EINVAL; + } + } else if (save_rule->act.type == SXE2_FWD_TO_QGRP) { + if ((rule_info->act.fwd_id.q_id == save_rule->act.fwd_id.q_id) && + (rule_info->act.qgrp_size == save_rule->act.qgrp_size)) { + LOG_WARN_BDF("rule exist, rule id %d, same queue group %d size %d\n", + save_rule->rule_id, + rule_info->act.fwd_id.q_id, + rule_info->act.qgrp_size); + ret = -EEXIST; + } else { + LOG_ERROR_BDF("rule exist, rule id %d, exist group %d size %d, \t" + "request group %d size %d\n", + save_rule->rule_id, + save_rule->act.fwd_id.q_id, + save_rule->act.qgrp_size, + rule_info->act.fwd_id.q_id, + rule_info->act.qgrp_size); + ret = -EINVAL; + } + } else { + if (rule_info->act.fwd_id.vsi_id == + save_rule->act.fwd_id.vsi_id) { + LOG_WARN_BDF("rule exist, rule id %d, same vsi %d\n", + save_rule->rule_id, + rule_info->act.fwd_id.vsi_id); + ret = -EEXIST; + } + } + + return ret; +} + +s32 sxe2_switch_tc_samerule_del(struct sxe2_adapter *adapter, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + struct sxe2_tc_rule_info *tc_rule_info = NULL; + struct sxe2_tc_rule_info temp_rule_info; + + memset(&temp_rule_info, 0, sizeof(temp_rule_info)); + + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(&temp_rule_info, save_rule); + + tc_rule_info = list_first_entry(&save_rule->tc_rule_head, + struct sxe2_tc_rule_info, list_entry); + + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, tc_rule_info); + + ret = sxe2_fwd_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, &temp_rule_info); + goto l_end; + } + + sxe2_hash_cookie_del(adapter, temp_rule_info.cookie); + + list_del(&tc_rule_info->list_entry); + kfree(tc_rule_info); + +l_end: + return ret; +} + +STATIC s32 sxe2_switch_rule_add_new_list(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule, + enum sxe2_vsi_list_type vsi_list_type) +{ + s32 ret = 0; + bool new_vsi_list = false; + u16 vsi_array[2] = { 0 }; + struct sxe2_vsi_list_info *vsi_list; + u16 vsi_id; + + ret = sxe2_switch_save_rule_check(adapter, save_rule, rule_info); + if (ret) + goto l_end; + + if (save_rule->vsi_list && !rule_info->is_fwd && + save_rule->vsi_list->rule_cnt > 1) { + if (bitmap_weight(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT) > 1) { + LOG_ERROR_BDF("rule id %d vsi list id %d has multi vsi and multi ref cnt\n", + save_rule->rule_id, + save_rule->vsi_list->vsi_list_id); + ret = -EIO; + goto l_end; + } + vsi_array[0] = rule_info->act.fwd_id.vsi_id; + vsi_array[1] = (u16)find_first_bit(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + new_vsi_list = true; + } else if (!save_rule->vsi_list) { + vsi_array[0] = rule_info->act.fwd_id.vsi_id; + vsi_array[1] = save_rule->act.fwd_id.vsi_id; + new_vsi_list = true; + } + + if (new_vsi_list) { + vsi_list = sxe2_vsi_list_create(adapter, vsi_array, 2, + vsi_list_type, &ret); + if (!vsi_list) + goto l_end; + + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = vsi_list->vsi_list_id; + ret = sxe2_rule_fwd_id_update(adapter, rule_info, save_rule); + if (ret) { + (void)sxe2_vsi_list_remove(adapter, vsi_list); + rule_info->vsi_list = NULL; + } + } else { + if (!save_rule->vsi_list) { + LOG_ERROR_BDF("rule id %d need update vsi list, but has no vsi list\n", + save_rule->rule_id); + ret = -EIO; + goto l_end; + } + vsi_id = rule_info->act.fwd_id.vsi_id; + ret = sxe2_vsi_list_update(adapter, save_rule->vsi_list, + &vsi_id, 1, vsi_list_type, true); + if (ret) + goto l_end; + } +l_end: + return ret; +} + +STATIC s32 sxe2_legacy_user_samerule_add(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + struct sxe2_tc_rule_info *tc_rule_info = NULL; + struct sxe2_tc_rule_info *rule_node = NULL; + struct sxe2_tcf_fltr *new_fltr; + struct sxe2_tc_rule_info temp_rule_info; + bool new_vsi_list = false; + + new_fltr = rule_info->tcf_fltr; + + if (new_fltr->backup_type == SXE2_RULE_BACKUP_T_NO) { + LOG_ERROR_BDF("new rule backup type is %u.\n", SXE2_RULE_BACKUP_T_NO); + ret = -EINVAL; + goto l_end; + } + + if (!list_empty(&save_rule->tc_rule_head)) { + tc_rule_info = list_first_entry(&save_rule->tc_rule_head, + struct sxe2_tc_rule_info, list_entry); + } + + if (tc_rule_info && tc_rule_info->backup_type != new_fltr->backup_type) { + LOG_ERROR_BDF("current backup type is %u, but new rule backup type is %u.\n", + tc_rule_info->backup_type, new_fltr->backup_type); + ret = -EINVAL; + goto l_end; + } + + if (!tc_rule_info || tc_rule_info->backup_type == new_fltr->backup_type) { + rule_node = kzalloc(sizeof(*tc_rule_info), GFP_KERNEL); + if (!rule_node) { + ret = -ENOMEM; + LOG_ERROR_BDF("alloc memory failed\n"); + goto l_end; + } + + if (new_fltr->action == SXE2_FWD_TO_VSI_LIST) { + struct sxe2_vsi_list_info *vsi_list; + + vsi_list = sxe2_cpx_rule_vsi_list_create(adapter, new_fltr, &ret); + if (!vsi_list) { + kfree(rule_node); + goto l_end; + } + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = vsi_list->vsi_list_id; + sxe2_vsi_list_refcnt_update(adapter, rule_info->vsi_list, true); + new_vsi_list = true; + } + + if (new_fltr->backup_type == SXE2_RULE_BACKUP_T_LAST) { + memset(&temp_rule_info, 0, sizeof(temp_rule_info)); + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(&temp_rule_info, save_rule); + SWITCH_RULE_ACT_INFO_CPY(save_rule, rule_info); + ret = sxe2_fwd_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, + &temp_rule_info); + if (new_vsi_list) + (void)sxe2_vsi_list_remove(adapter, rule_info->vsi_list); + rule_info->vsi_list = NULL; + + kfree(rule_node); + goto l_end; + } + SWITCH_TC_RULE_ACT_INFO_GET_FROM_TC_RULE(rule_node, &temp_rule_info); + list_add(&rule_node->list_entry, &save_rule->tc_rule_head); + + } else { + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(rule_node, rule_info); + list_add_tail(&rule_node->list_entry, &save_rule->tc_rule_head); + } + } + +l_end: + return ret; +} + +STATIC void *sxe2_switch_cpx_samerule_check(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + + void *ret = NULL; + struct sxe2_tc_rule_info *list_itr = NULL; + struct sxe2_tcf_fltr *tcf_fltr = NULL; + + tcf_fltr = rule_info->tcf_fltr; + if (tcf_fltr->action == save_rule->tcf_fltr->action && + tcf_fltr->src_vsi_id == save_rule->tcf_fltr->src_vsi_id) { + if ((tcf_fltr->action == SXE2_FWD_TO_VSI_LIST && + !memcmp(tcf_fltr->dst_vsi_map, save_rule->tcf_fltr->dst_vsi_map, + sizeof(tcf_fltr->dst_vsi_map))) || + (tcf_fltr->action != SXE2_FWD_TO_VSI_LIST && + tcf_fltr->dst_vsi_id == save_rule->tcf_fltr->dst_vsi_id)) { + ret = save_rule; + goto l_end; + } + } + + list_for_each_entry(list_itr, &save_rule->tc_rule_head, list_entry) { + if (tcf_fltr->action == list_itr->action && + tcf_fltr->src_vsi_id == list_itr->src_vsi_id) { + if ((tcf_fltr->action == SXE2_FWD_TO_VSI_LIST && + !memcmp(tcf_fltr->dst_vsi_map, list_itr->dst_vsi_map, + sizeof(tcf_fltr->dst_vsi_map))) || + (tcf_fltr->action != SXE2_FWD_TO_VSI_LIST && + tcf_fltr->dst_vsi_id == list_itr->dst_vsi_id)) { + ret = list_itr; + goto l_end; + } + } + } + +l_end: + if (ret) { + LOG_INFO_BDF("There is a same rule, action type:%u, src_vsi:%u\n", + tcf_fltr->action, tcf_fltr->src_vsi_id); + } + + return ret; +} + +STATIC s32 sxe2_switch_user_rule_update(struct sxe2_adapter *adapter, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + struct sxe2_tc_rule_info *tc_rule_info = NULL; + struct sxe2_tc_rule_info temp_rule_info; + struct sxe2_vsi_list_info *vsi_list; + + vsi_list = save_rule->vsi_list; + + memset(&temp_rule_info, 0, sizeof(temp_rule_info)); + + SWITCH_TC_RULE_ACT_INFO_GET_FROM_RULE(&temp_rule_info, save_rule); + + tc_rule_info = list_first_entry(&save_rule->tc_rule_head, + struct sxe2_tc_rule_info, list_entry); + + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, tc_rule_info); + + ret = sxe2_fwd_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + SWITCH_TC_RULE_ACT_INFO_SET_TO_RULE(save_rule, &temp_rule_info); + goto l_end; + } + + if (vsi_list) + (void)sxe2_vsi_list_remove(adapter, vsi_list); + + list_del(&tc_rule_info->list_entry); + kfree(tc_rule_info); + +l_end: + return ret; +} + +STATIC s32 sxe2_user_samerule_del(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + void *check_rule; + struct sxe2_tc_rule_info *list_itr = NULL; + struct sxe2_vsi_list_info *vsi_list; + + check_rule = sxe2_switch_cpx_samerule_check(adapter, rule_info, save_rule); + if (!check_rule) { + LOG_ERROR_BDF("can not find rule\n"); + ret = -ENOENT; + goto l_end; + } else if (save_rule == (struct sxe2_rule_info *)check_rule && + list_empty(&save_rule->tc_rule_head)) { + vsi_list = save_rule->vsi_list; + ret = sxe2_fwd_rule_remove(adapter, save_rule, true); + if (ret) { + LOG_DEV_ERR("complex rule del failed, ret:%d\n", ret); + goto l_end; + } + save_rule = NULL; + + if (vsi_list) + (void)sxe2_vsi_list_remove(adapter, vsi_list); + } else if (save_rule == (struct sxe2_rule_info *)check_rule) { + ret = sxe2_switch_user_rule_update(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("switch tc samerule del failed, ret %d\n", ret); + goto l_end; + } + } else { + list_itr = (struct sxe2_tc_rule_info *)check_rule; + if (list_itr->vsi_list) { + (void)sxe2_vsi_list_remove(adapter, + list_itr->vsi_list); + list_itr->vsi_list = NULL; + } + list_del(&list_itr->list_entry); + kfree(list_itr); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_switch_cpx_samerule_add(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + + if (rule_info->tcf_fltr->is_user_rule != save_rule->tcf_fltr->is_user_rule) { + ret = -EINVAL; + LOG_ERROR_BDF("There is a same rule, type %s, ret %d\n", + save_rule->tcf_fltr->is_user_rule ? "user" : "not user", ret); + goto l_end; + } + + if (sxe2_switch_cpx_samerule_check(adapter, rule_info, save_rule)) { + ret = -EEXIST; + LOG_ERROR_BDF("There is a same rule, ret %d\n", ret); + goto l_end; + } + + if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags) && + !save_rule->tcf_fltr->is_user_rule) { + ret = sxe2_switchdev_tc_samerule_add(adapter, rule_info, + save_rule); + if (ret) { + LOG_ERROR_BDF("switchdev mode, kernel same rule process error\n"); + goto l_end; + } + } else if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags) && + save_rule->tcf_fltr->is_user_rule) { + ret = sxe2_switchdev_user_samerule_add(adapter, rule_info, save_rule); + if (ret) { + LOG_ERROR_BDF("switchdev mode, user same rule process error\n"); + goto l_end; + } + } else if (save_rule->tcf_fltr->is_user_rule) { + ret = sxe2_legacy_user_samerule_add(adapter, rule_info, save_rule); + if (ret) + goto l_end; + } else { + LOG_ERROR_BDF("legacy mode, same rule process, but not user rule\n"); + ret = -EINVAL; + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_switch_rule_add(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + struct sxe2_rule_info *save_rule; + struct sxe2_vsi_list_info *vsi_list; + enum sxe2_vsi_list_type vsi_list_type; + bool new_vsi_list = false; + + u16 vsi_id; + + if (rule_info->is_fwd) + vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + else + vsi_list_type = SXE2_VSI_LIST_TYPE_PRUNE; + + if (!rule_info->tcf_fltr) + rule_lock = &switch_ctxt->recipe[rule_info->recipe_id].rule_lock; + else + rule_lock = &switch_ctxt->complex_recipe.rule_lock; + + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + save_rule = sxe2_rule_entry_find(adapter, rule_info); + + if (!save_rule) { + if (!rule_info->is_fwd) { + vsi_id = rule_info->act.fwd_id.vsi_id; + vsi_list = sxe2_vsi_list_entry_find(adapter, &vsi_id, 1, + vsi_list_type); + if (!vsi_list) { + vsi_list = + sxe2_vsi_list_create(adapter, &vsi_id, + 1, vsi_list_type, + &ret); + if (!vsi_list) + goto l_end; + new_vsi_list = true; + } + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = vsi_list->vsi_list_id; + } else if (rule_info->tcf_fltr && + rule_info->act.type == SXE2_FWD_TO_VSI_LIST) { + vsi_list = sxe2_cpx_rule_vsi_list_create(adapter, + rule_info->tcf_fltr, &ret); + if (!vsi_list) + goto l_end; + new_vsi_list = true; + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = vsi_list->vsi_list_id; + } + + if (!sxe2_fwd_rule_create(adapter, rule_info, &ret)) { + if (new_vsi_list) + (void)sxe2_vsi_list_remove(adapter, + rule_info->vsi_list); + rule_info->vsi_list = NULL; + } + } else if (rule_info->tcf_fltr) { + ret = sxe2_switch_cpx_samerule_add(adapter, rule_info, + save_rule); + if (ret) { + LOG_ERROR_BDF("switch tc same rule process error\n"); + goto l_end; + } + } else if (rule_info->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) { + rule_info->hid = save_rule->hid; + ret = -EEXIST; + } else { + ret = sxe2_switch_rule_add_new_list(adapter, rule_info, + save_rule, vsi_list_type); + } + +l_end: + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + + return ret; +} + +STATIC s32 sxe2_switch_rule_del_vsi(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; +#ifdef SXE2_CFG_DEBUG + if (save_rule->vsi_list) { + LOG_ERROR_BDF("rule %d forward to vsi, but vsi_list struct is not NULL\n", + save_rule->rule_id); + } +#endif + if (save_rule->act.fwd_id.vsi_id != rule_info->act.fwd_id.vsi_id) { + LOG_WARN_BDF("rule %d forward to vsi is %d, but need to delete vsi is %d\n", + save_rule->rule_id, save_rule->act.fwd_id.vsi_id, + rule_info->act.fwd_id.vsi_id); + ret = -ENOENT; + goto l_end; + } + if (rule_info->tcf_fltr && !list_empty(&save_rule->tc_rule_head)) { + ret = sxe2_switch_tc_samerule_del(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("switch tc samerule del failed, ret %d\n", + ret); + goto l_end; + } + } else { + ret = sxe2_fwd_rule_remove(adapter, save_rule, true); + save_rule = NULL; + } +l_end: + return ret; +} + +STATIC s32 sxe2_switch_rule_del_vsi_list(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule, + enum sxe2_vsi_list_type vsi_list_type) +{ + s32 ret = 0; + u16 vsi_id = 0; + struct sxe2_vsi_list_info *vsi_list; +#ifdef SXE2_CFG_DEBUG + if (!save_rule->vsi_list) { + LOG_ERROR_BDF("rule %d forward to vsi_left list, but vsi_list struct is NULL\n", + save_rule->rule_id); + ret = -EINVAL; + goto l_end; + } +#endif + vsi_id = rule_info->act.fwd_id.vsi_id; + if (!test_bit(vsi_id, save_rule->vsi_list->vsi_map)) { + LOG_WARN_BDF("rule %d forward to vsi list is %d, but it do not content vsi %d\n", + save_rule->rule_id, save_rule->vsi_list->vsi_list_id, + rule_info->act.fwd_id.vsi_id); + ret = -ENOENT; + goto l_end; + } + + if (bitmap_weight(save_rule->vsi_list->vsi_map, SXE2_VSI_MAX_CNT) == 2 && + rule_info->is_fwd) { + vsi_id = (u16)find_first_bit(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + if (vsi_id == rule_info->act.fwd_id.vsi_id) + vsi_id = (u16)find_next_bit(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT, vsi_id + 1); + + rule_info->act.fwd_id.vsi_id = vsi_id; + ret = sxe2_rule_fwd_id_update(adapter, rule_info, save_rule); + } else if (bitmap_weight(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT) == 2 && !rule_info->is_fwd) { + vsi_id = (u16)find_first_bit(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + if (vsi_id == rule_info->act.fwd_id.vsi_id) + vsi_id =(u16)find_next_bit(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT, + vsi_id + 1); + vsi_list = sxe2_vsi_list_entry_find(adapter, &vsi_id, 1, + vsi_list_type); + if (!vsi_list) { + vsi_id = rule_info->act.fwd_id.vsi_id; + ret = sxe2_vsi_list_update(adapter, save_rule->vsi_list, + &vsi_id, 1, vsi_list_type, + false); + if (ret) + goto l_end; + } else { + rule_info->vsi_list = vsi_list; + rule_info->act.fwd_id.vsi_list_id = + vsi_list->vsi_list_id; + ret = sxe2_rule_fwd_id_update(adapter, rule_info, + save_rule); + if (ret) + goto l_end; + + } + } else if (bitmap_weight(save_rule->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT) == 1 && + !rule_info->is_fwd) { + vsi_list = save_rule->vsi_list; + ret = sxe2_fwd_rule_remove(adapter, save_rule, true); + if (ret) + goto l_end; + save_rule = NULL; + sxe2_vsi_list_refcnt_update(adapter, vsi_list, false); + + } else { + vsi_id = rule_info->act.fwd_id.vsi_id; + ret = sxe2_vsi_list_update(adapter, save_rule->vsi_list, + &vsi_id, 1, vsi_list_type, false); + if (ret) + goto l_end; + } +l_end: + return ret; +} + +STATIC s32 sxe2_switch_rule_del_other(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, + struct sxe2_rule_info *save_rule) +{ + s32 ret = 0; + if (rule_info->tcf_fltr && !list_empty(&save_rule->tc_rule_head)) { + ret = sxe2_switch_tc_samerule_del(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("switch tc samerule del failed, ret %d\n", + ret); + goto l_end; + } + } else { + ret = sxe2_fwd_rule_remove(adapter, save_rule, true); + if (ret) + goto l_end; + save_rule = NULL; + } +l_end: + return ret; +} + +STATIC s32 sxe2_switch_rule_del(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + enum sxe2_vsi_list_type vsi_list_type; + struct sxe2_rule_info *save_rule; + + if (rule_info->is_fwd) + vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + else + vsi_list_type = SXE2_VSI_LIST_TYPE_PRUNE; + + if (!rule_info->tcf_fltr) + rule_lock = + &switch_ctxt->recipe[rule_info->recipe_id].rule_lock; + else + rule_lock = &switch_ctxt->complex_recipe.rule_lock; + vsi_list_lock = + &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + save_rule = sxe2_rule_entry_find(adapter, rule_info); + if (!save_rule) { + LOG_ERROR_BDF("can not find rule\n"); + ret = -ENOENT; + goto l_end; + } + + if (rule_info->tcf_fltr && rule_info->tcf_fltr->is_user_rule) { + ret = sxe2_user_samerule_del(adapter, rule_info, save_rule); + if (ret) + goto l_end; + } else { + rule_info->rule_id = save_rule->rule_id; + rule_info->hid = save_rule->hid; + if (save_rule->act.type == SXE2_FWD_TO_VSI) { + ret = sxe2_switch_rule_del_vsi(adapter, rule_info, save_rule); + if (ret) + goto l_end; + } else if (save_rule->act.type == SXE2_FWD_TO_VSI_LIST) { + ret = sxe2_switch_rule_del_vsi_list(adapter, rule_info, + save_rule, vsi_list_type); + if (ret) + goto l_end; + } else if (save_rule->act.type == SXE2_DROP_PACKET || + save_rule->act.type == SXE2_FWD_TO_Q || + save_rule->act.type == SXE2_FWD_TO_QGRP || + save_rule->act.type == SXE2_MIRROR_PACKET || + save_rule->act.type == SXE2_LARGE_ACTION) { + ret = sxe2_switch_rule_del_other(adapter, rule_info, save_rule); + if (ret) + goto l_end; + } + } + +l_end: + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + return ret; +} + +STATIC void sxe2_mac_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac, + struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_MAC; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = true; + if (adapter->switch_ctxt.evb_mode == BRIDGE_MODE_VEB) { + if (is_unicast_ether_addr(mac)) + rule->act.lan_en = false; + else + rule->act.lan_en = true; + } else { + rule->act.lan_en = true; + } + + ether_addr_copy(rule->fltr.data.mac.mac_addr, mac); + LOG_DEBUG_BDF("vsi %u prepare mac rule, mac %pM, evb_mode %d\n", + id_in_dev, rule->fltr.data.mac.mac_addr, + adapter->switch_ctxt.evb_mode); + +} + +s32 sxe2_default_mac_addr_get(struct sxe2_vsi *vsi, u8 *mac) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_mac_info_resp resp; + + memset(&resp, 0, sizeof(resp)); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MAC_ADDR_GET, NULL, 0, &resp, + sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch default mac addr get failed, ret=%d\n", ret); + ret = -EIO; + } + memcpy(mac, resp.mac_addr, sizeof(resp.mac_addr)); + return ret; +} + +s32 sxe2_cur_mac_addr_set(struct sxe2_vsi *vsi, const u8 *mac) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_mac_info req; + + memcpy(req.mac_addr, mac, sizeof(req.mac_addr)); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_MAC_ADDR_SET, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch mac addr set failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_mac_rule_add(struct sxe2_vsi *vsi, const u8 *mac) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + (void)mutex_lock(&vsi->adapter->switch_ctxt.evb_mode_lock); + sxe2_mac_rule_prepare(adapter, vsi->idx_in_dev, mac, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + (void)mutex_unlock(&vsi->adapter->switch_ctxt.evb_mode_lock); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u mac_rule add ret:%d, rule_id:%d mac:%pM\n", + vsi->idx_in_dev, ret, rule_info.rule_id, + rule_info.fltr.data.mac.mac_addr); + + return ret; +} + +s32 sxe2_mac_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_mac_rule_prepare(adapter, id_in_dev, mac, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u mac_rule delete ret:%d, rule_id:%d mac:%pM\n", + id_in_dev, ret, rule_info.rule_id, + rule_info.fltr.data.mac.mac_addr); + + return ret; +} + +STATIC void sxe2_vlan_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_vlan *vlan, + struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_VLAN; + rule->act.type = SXE2_FWD_TO_VSI_LIST; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = false; + + rule->act.lb_en = false; + rule->act.lan_en = true; + + rule->fltr.data.vlan.vlan_id = vlan->vid; + rule->fltr.data.vlan.tpid = vlan->tpid; + rule->fltr.data.vlan.tpid_valid = true; + LOG_DEBUG_BDF("vsi %u prepare vlan rule, vlan id %d, tpid 0x%x\n", + id_in_dev, rule->fltr.data.vlan.vlan_id, + rule->fltr.data.vlan.tpid); + +} + +s32 sxe2_vlan_rule_add(struct sxe2_vsi *vsi, struct sxe2_vlan *vlan) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_vlan_rule_prepare(adapter, vsi->idx_in_dev, vlan, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u vlan_rule add ret:%d rule_id:%d vlan_id:%u tpid:0x%x\n", + vsi->idx_in_dev, ret, rule_info.rule_id, vlan->vid, vlan->tpid); + + return ret; +} + +s32 sxe2_vlan_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev, struct sxe2_vlan *vlan) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_vlan_rule_prepare(adapter, id_in_dev, vlan, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret,"vsi_id:%u vlan_rule del ret:%d rule_id:%d vlan_id:%u tpid:0x%x\n", + id_in_dev, ret, rule_info.rule_id, vlan->vid, vlan->tpid); + + return ret; +} + +void sxe2_srcvsi_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_SRCVSI; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = false; + + rule->fltr.data.srcvsi.vsi_id = id_in_dev; + rule->fltr.data.srcvsi.to_rdma = + SXE2_FV_PKT_TO_RDMA_NO; + rule->fltr.data.srcvsi.packet_src_type = + SXE2_PKT_SRC_TYPE_LOOPBACK_HOST; + + rule->fltr.src_type = SXE2_SRC_TYPE_RX; + LOG_DEBUG_BDF("vsi %u prepare source vsi prune rule\n", id_in_dev); +} + +s32 sxe2_srcvsi_rule_add(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_srcvsi_rule_prepare(adapter, vsi->idx_in_dev, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi_rule add ret:%d, rule_id:%d\n", + vsi->idx_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_srcvsi_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_srcvsi_rule_prepare(adapter, id_in_dev, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi_rule del ret:%d, rule_id:%d\n", + id_in_dev, ret, rule_info.rule_id); + + return ret; +} + +STATIC void sxe2_srcvsi_ext_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev_fltr, u16 id_in_dev_act, struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_SRCVSI_EXT; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev_act; + + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = false; + + rule->fltr.data.srcvsi.vsi_id = id_in_dev_fltr; + rule->fltr.data.srcvsi.to_rdma = + SXE2_FV_PKT_TO_RDMA_NO; + rule->fltr.data.srcvsi.packet_src_type = + SXE2_PKT_SRC_TYPE_LOOPBACK_HOST; + + rule->fltr.src_type = SXE2_SRC_TYPE_RX; + LOG_DEBUG_BDF("vsi %u prepare source vsi prune rule\n", id_in_dev_fltr); + +} +s32 sxe2_srcvsi_ext_rule_add(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + struct sxe2_adapter *adapter = vsi->adapter; + + if ((vsi->src_prune.vsi_id_k == SXE2_VSI_ID_INVALID) || + (vsi->src_prune.vsi_id_u == SXE2_VSI_ID_INVALID)) { + goto l_end; + } + + memset(&rule_info, 0, sizeof(rule_info)); + sxe2_srcvsi_ext_rule_prepare(adapter, vsi->src_prune.vsi_id_u, + vsi->src_prune.vsi_id_k, &rule_info); + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi ext rule add fwd vsi:%u ret:%d, rule_id:%d\n", + vsi->src_prune.vsi_id_u, vsi->src_prune.vsi_id_k, + ret, rule_info.rule_id); + if (ret) + goto l_end; + + memset(&rule_info, 0, sizeof(rule_info)); + sxe2_srcvsi_ext_rule_prepare(adapter, vsi->src_prune.vsi_id_k, + vsi->src_prune.vsi_id_u, &rule_info); + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi ext rule add fwd vsi:%u ret:%d, rule_id:%d\n", + vsi->src_prune.vsi_id_k, vsi->src_prune.vsi_id_u, + ret, rule_info.rule_id); + if (ret) { + memset(&rule_info, 0, sizeof(rule_info)); + sxe2_srcvsi_ext_rule_prepare(adapter, vsi->src_prune.vsi_id_u, + vsi->src_prune.vsi_id_k, &rule_info); + (void)sxe2_switch_rule_del(adapter, &rule_info); + } +l_end: + return ret; +} + +s32 sxe2_srcvsi_ext_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + struct sxe2_vsi *vsi; + + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + LOG_ERROR_BDF("src vsi id_in_dev:%u is NULL.\n", vsi_id); + goto l_end; + } + + if ((vsi->src_prune.vsi_id_k == SXE2_VSI_ID_INVALID) || + (vsi->src_prune.vsi_id_u == SXE2_VSI_ID_INVALID)) { + goto l_end; + } + + memset(&rule_info, 0, sizeof(rule_info)); + sxe2_srcvsi_ext_rule_prepare(adapter, vsi->src_prune.vsi_id_u, + vsi->src_prune.vsi_id_k, &rule_info); + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi ext rule del fwd vsi:%u ret:%d, rule_id:%d\n", + vsi->src_prune.vsi_id_u, vsi->src_prune.vsi_id_k, + ret, rule_info.rule_id); + + memset(&rule_info, 0, sizeof(rule_info)); + sxe2_srcvsi_ext_rule_prepare(adapter, vsi->src_prune.vsi_id_k, + vsi->src_prune.vsi_id_u, &rule_info); + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u srcvsi ext rule del fwd vsi:%u ret:%d, rule_id:%d\n", + vsi->src_prune.vsi_id_k, vsi->src_prune.vsi_id_u, + ret, rule_info.rule_id); + + vsi->src_prune.vsi_id_k = SXE2_VSI_ID_INVALID; + vsi->src_prune.vsi_id_u = SXE2_VSI_ID_INVALID; + +l_end: + return ret; +} + +bool sxe2_promisc_rule_in_use(struct sxe2_vsi *vsi) +{ + bool ret = false; + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct sxe2_rule_info *list_itr = NULL; + u16 recipe_id = SXE2_DEFAULT_RECIPE_PROMISC; + enum sxe2_vsi_list_type vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + struct list_head *list_head; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + + list_head = &switch_ctxt->recipe[recipe_id].rule_head; + rule_lock = &switch_ctxt->recipe[recipe_id].rule_lock; + vsi_list_lock = + &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(list_itr, list_head, list_entry) { + if ((list_itr->act.type == SXE2_FWD_TO_VSI && + list_itr->act.fwd_id.vsi_id == vsi->idx_in_dev) || + (list_itr->act.type == SXE2_FWD_TO_VSI_LIST && + list_itr->vsi_list && + test_bit(vsi->idx_in_dev, list_itr->vsi_list->vsi_map))) { + ret = true; + break; + } + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + + return ret; +} + +STATIC void sxe2_promisc_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_PROMISC; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = false; + + rule->fltr.src_type = SXE2_SRC_TYPE_RX; + LOG_DEBUG_BDF("vsi %u prepare promisc rule\n", id_in_dev); +} + +s32 sxe2_promisc_rule_add(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_promisc_rule_prepare(adapter, vsi->idx_in_dev, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u promisc_rule add ret:%d, rule_id:%d\n", + vsi->idx_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_promisc_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_promisc_rule_prepare(adapter, id_in_dev, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u promisc_rule del ret:%d, rule_id:%d\n", + id_in_dev, ret, rule_info.rule_id); + + return ret; +} + +bool sxe2_allmulti_rule_in_use(struct sxe2_vsi *vsi) +{ + bool ret = false; + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct sxe2_rule_info *list_itr = NULL; + u16 recipe_id = SXE2_DEFAULT_RECIPE_ALLMULTI; + enum sxe2_vsi_list_type vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + struct list_head *list_head; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + + list_head = &switch_ctxt->recipe[recipe_id].rule_head; + rule_lock = &switch_ctxt->recipe[recipe_id].rule_lock; + vsi_list_lock = + &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(list_itr, list_head, list_entry) { + if ((list_itr->act.type == SXE2_FWD_TO_VSI && + list_itr->act.fwd_id.vsi_id == vsi->idx_in_dev) || + (list_itr->act.type == SXE2_FWD_TO_VSI_LIST && + list_itr->vsi_list && + test_bit(vsi->idx_in_dev, list_itr->vsi_list->vsi_map))) { + ret = true; + break; + } + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + + return ret; +} + +STATIC void sxe2_allmulti_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_ALLMULTI; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = true; + rule->act.lan_en = true; + + LOG_DEBUG_BDF("vsi %u prepare allmulti rule\n", id_in_dev); +} + +s32 sxe2_allmulti_rule_add(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_allmulti_rule_prepare(adapter, vsi->idx_in_dev, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u allmulti_rule add ret:%d, rule_id:%d\n", + vsi->idx_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_allmulti_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_allmulti_rule_prepare(adapter, id_in_dev, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u allmulti_rule del ret:%d, rule_id:%d\n", + id_in_dev, ret, rule_info.rule_id); + + return ret; +} + +STATIC void sxe2_tx_etype_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule, u16 etype) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_TX_ETYPE; + rule->act.type = SXE2_DROP_PACKET; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = true; + + rule->fltr.data.etype.ethertype = etype; + rule->fltr.data.etype.vsi_id = rule->act.fwd_id.vsi_id; + rule->fltr.src_type = SXE2_SRC_TYPE_TX; + LOG_DEBUG_BDF("vsi %u prepare tx etype rule, etype %x\n", id_in_dev, + etype); + +} +STATIC void sxe2_rx_etype_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule, u16 etype) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_RX_ETYPE; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = false; + + rule->fltr.data.etype.ethertype = etype; + rule->fltr.src_type = SXE2_SRC_TYPE_RX; + LOG_DEBUG_BDF("vsi %u prepare rx etype rule, etype %x\n", id_in_dev, etype); +} + +s32 sxe2_tx_etype_rule_add(struct sxe2_vsi *vsi, u16 etype) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags) && etype == ETH_P_LLDP) { + LOG_DEBUG_BDF("do not need to set tx lldp drop rule in switchdev mode.\n"); + return 0; + } + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_tx_etype_rule_prepare(adapter, vsi->idx_in_dev, + &rule_info, etype); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u tx_etype_rule add ret:%d, rule_id:%d\n", + vsi->idx_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_tx_etype_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev, u16 etype) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_tx_etype_rule_prepare(adapter, id_in_dev, &rule_info, etype); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u tx_etype_rule del ret:%d, rule_id:%d\n", + id_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_rx_etype_rule_add(struct sxe2_vsi *vsi, u16 etype) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_rx_etype_rule_prepare(adapter, vsi->idx_in_dev, + &rule_info, etype); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u rx_etype_rule add ret:%d, rule_id:%d\n", + vsi->idx_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_rx_etype_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev, u16 etype) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_rx_etype_rule_prepare(adapter, id_in_dev, &rule_info, etype); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, + "vsi_id:%u rx_etype_rule del ret:%d, rule_id:%d\n", + id_in_dev, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_etype_fltr_init(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + bool islldpAgent; + struct sxe2_adapter *adapter = vsi->adapter; + ret = sxe2_tx_etype_rule_add(vsi, ETH_P_PAUSE); + if (ret) + return ret; + + if (vsi->type == SXE2_VSI_T_PF || + vsi->type == SXE2_VSI_T_DPDK_PF) { + mutex_lock(&adapter->switch_ctxt.lldp_rule_lock); + ret = sxe2_lldp_fw_agent_status_get(vsi->adapter, &islldpAgent, NULL); + LOG_DEBUG_BDF("sxe2 lldp agent_status islldpAgent:%u, ret:%d\n", islldpAgent, ret); + + if (ret || islldpAgent) + ret = sxe2_tx_etype_rule_add(vsi, ETH_P_LLDP); + else + ret = sxe2_rx_etype_rule_add(vsi, ETH_P_LLDP); + + mutex_unlock(&adapter->switch_ctxt.lldp_rule_lock); + if (ret) + return ret; + } else if (vsi->type == SXE2_VSI_T_VF || vsi->type == SXE2_VSI_T_DPDK_VF) { + ret = sxe2_tx_etype_rule_add(vsi, ETH_P_LLDP); + if (ret) + return ret; + } + return ret; +} + +static s32 sxe2_mac_spoofchk_hid_find(struct sxe2_adapter *adapter, u16 id_in_dev, u16 *hid) +{ + s32 ret = 0; + struct sxe2_rule_info rule_info; + struct mutex *rule_lock; + struct sxe2_rule_info *save_rule; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_mac_spoofchk_rule_prepare(adapter, id_in_dev, &rule_info); + + rule_lock = &adapter->switch_ctxt.recipe[SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK].rule_lock; + + mutex_lock(rule_lock); + save_rule = sxe2_rule_entry_find(adapter, &rule_info); + if (save_rule) { + *hid = save_rule->hid; + } else { + LOG_ERROR_BDF("There is no spoofchk not root rule, can not fill hid.\n"); + ret = -ENOENT; + } + mutex_unlock(rule_lock); + + return ret; +} + +STATIC void sxe2_mac_spoofchk_ext_rule_prepare(struct sxe2_adapter *adapter, + u16 vsi_id_in_dev, struct sxe2_rule_info *rule, + u16 hid, const u8 *mac) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = vsi_id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = true; + rule->act.lan_en = true; + + ether_addr_copy(rule->fltr.data.mac_spoofchk_ext.mac_addr, mac); + rule->fltr.data.mac_spoofchk_ext.hid = hid; + rule->fltr.src_type = SXE2_SRC_TYPE_TX; + + LOG_DEBUG_BDF("vsi %u preapre mac spoofchk_ext rule, mac %pM, hid %04x\n", + vsi_id_in_dev, mac, rule->fltr.data.mac_spoofchk_ext.hid); + +} + +s32 sxe2_mac_spoofchk_ext_rule_add(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac) +{ + s32 ret; + struct sxe2_rule_info rule_info; + u16 hid; + + memset(&rule_info, 0, sizeof(rule_info)); + + ret = sxe2_mac_spoofchk_hid_find(adapter, id_in_dev, &hid); + if (ret) { + LOG_ERROR_BDF("vsi %u can not find mac spoofchk rule\n", id_in_dev); + return ret; + } + + sxe2_mac_spoofchk_ext_rule_prepare(adapter, id_in_dev, &rule_info, hid, mac); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u mac spoofchk ext rule add \t" + "ret:%d, rule_id:%d, hid %04x, mac %pM\n", + id_in_dev, ret, rule_info.rule_id, + rule_info.fltr.data.mac_spoofchk_ext.hid, mac); + + return ret; +} + +s32 sxe2_mac_spoofchk_ext_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac) +{ + s32 ret; + struct sxe2_rule_info rule_info; + u16 hid; + + memset(&rule_info, 0, sizeof(rule_info)); + + ret = sxe2_mac_spoofchk_hid_find(adapter, id_in_dev, &hid); + if (ret) { + LOG_ERROR_BDF("vsi %u can not find mac spoofchk rule\n", id_in_dev); + return ret; + } + + sxe2_mac_spoofchk_ext_rule_prepare(adapter, id_in_dev, &rule_info, hid, mac); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret,"vsi_id:%u mac spoofchk ext rule del \t" + "ret:%d, rule_id:%d, hid %04x, mac %pM\n", + id_in_dev, ret, rule_info.rule_id, + rule_info.fltr.data.mac_spoofchk_ext.hid, mac); + + return ret; +} + +void sxe2_mac_spoofchk_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule) +{ + rule->recipe_id = SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK; + rule->act.type = SXE2_FWD_TO_VSI; + rule->act.fwd_id.vsi_id = id_in_dev; + rule->is_fwd = true; + + rule->act.lb_en = false; + rule->act.lan_en = false; + + rule->fltr.data.mac_spoofchk.vsi_id = id_in_dev; + rule->fltr.src_type = SXE2_SRC_TYPE_TX; + + LOG_DEBUG_BDF("vsi %u prepare mac spoofchk rule\n", id_in_dev); +} + +s32 sxe2_mac_spoofchk_rule_add(struct sxe2_adapter *adapter, + u16 id_in_dev) +{ + s32 ret; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_mac_spoofchk_rule_prepare(adapter, id_in_dev, &rule_info); + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u mac_spoofchk_rule add ret:%d, rule_id:%d, hid %04x\n", + id_in_dev, ret, rule_info.rule_id, rule_info.hid); + + return ret; +} + +s32 sxe2_mac_spoofchk_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + s32 ret; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + sxe2_mac_spoofchk_rule_prepare(adapter, id_in_dev, &rule_info); + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u mac_spoofchk_rule del ret:%d, rule_id:%d, hid %04x\n", + id_in_dev, ret, rule_info.rule_id, rule_info.hid); + + return ret; + +} + +static bool sxe2_profile_is_empty(u16 profile_id, struct sxe2_adapter *adapter) +{ + struct sxe2_profile_fv_item fv_item[SXE2_SWITCH_PROFILE_FV_CNT]; + + memset(fv_item, 0, sizeof(fv_item)); + + if (memcmp(adapter->switch_ctxt.profile_fv_item[profile_id], fv_item, + sizeof(fv_item)) == 0) + return true; + return false; +} + +static bool sxe2_profile_tunnel_check(u16 profile_id, + struct sxe2_tcf_fltr *fltr) +{ + s32 i; + struct sxe2_profile_fv_item fv_item; + struct sxe2_adapter *adapter = fltr->adapter; + + if (fltr->tunnel_type == SXE2_TNL_ALL) + return true; + + for (i = 0; i < SXE2_SWITCH_PROFILE_FV_CNT; i++) { + fv_item = adapter->switch_ctxt.profile_fv_item[profile_id][i]; + if (fv_item.prot_id == SXE2_UDP_OL_HW && + fv_item.offset == SXE2_PROT_OFFSET_VNI && fv_item.enable) { + if (fltr->tunnel_type == SXE2_TNL_VXLAN || + fltr->tunnel_type == SXE2_TNL_GENEVE) + return true; + else + return false; + } else if (fv_item.prot_id == SXE2_GRE_HW && fv_item.enable) { + if (fltr->tunnel_type == SXE2_TNL_GRETAP) + return true; + else + return false; + } + } + + if (fltr->tunnel_type == SXE2_TNL_NONE) + return true; + return false; +} + +s32 sxe2_sw_profile_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter) +{ + u16 i; + u32 j; + u16 profile_id; + struct sxe2_profile_fv_item *ddp_profile_fv_item = NULL; + + if (!cnt || (base_id + cnt > (SXE2_PROFILE_MAX_COUNT - 1))) { + LOG_ERROR_BDF("cnt:%u base_id:%u invalid.\n", cnt, base_id); + return -EINVAL; + } + + for (i = 0; i < cnt; i++) { + profile_id = base_id + i; + for (j = 0; j < SXE2_SWITCH_PROFILE_FV_CNT; j++) { + ddp_profile_fv_item =(struct sxe2_profile_fv_item *)(((u32 *)data) + + (u64)i * SXE2_SWITCH_PROFILE_FV_CNT + j); + memcpy(&adapter->switch_ctxt.profile_fv_item[profile_id][j], + ddp_profile_fv_item, + sizeof(struct sxe2_profile_fv_item)); + } + } + + return 0; +} + +static s32 sxe2_profile_fv_check(u16 profile_id, struct sxe2_tcf_fltr *fltr, + bool *match) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + s32 fv_idx; + bool find; + struct sxe2_profile_fv_item fv_item; + u16 i, j, k; + + *match = false; + + fv_idx = 0; + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + if (sxe2_tcf_item_is_empty(fltr, i)) + continue; + + item = &fltr->items[i]; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) { + if (!item->mask.raw[j]) + continue; + find = false; + for (k = 0; k < SXE2_SWITCH_PROFILE_FV_CNT; k++) { + fv_item = adapter->switch_ctxt.profile_fv_item[profile_id][k]; + if (fv_item.enable && + (fv_item.prot_id == sxe2_prot_id_tbl[i].prot_id) && + (fv_item.offset == sizeof(u16) * j)) { + find = true; + if (fltr->lkup_mask[fv_idx] && + (fltr->lkup_index[fv_idx] != k + 1)) { + LOG_ERROR_BDF("profile fv conflict profile id %d\n", + profile_id); + return -EIO; + } + fltr->lkup_value[fv_idx] = item->value.raw[j]; + fltr->lkup_mask[fv_idx] = item->mask.raw[j]; + fltr->lkup_index[fv_idx++] = k + 1; + break; + } + } + + if (!find) + return 0; + } + } + + *match = true; + return 0; +} + +void sxe2_tc_item_print(struct sxe2_tcf_fltr *user_cpx_fltr); + +s32 sxe2_tcf_profile_find(struct sxe2_tcf_fltr *fltr) +{ + u16 prof_id, i; + struct sxe2_adapter *adapter = fltr->adapter; + s32 ret; + bool match; + + sxe2_tc_item_print(fltr); + + for (prof_id = 0; prof_id < SXE2_MAX_NUM_PROFILES; prof_id++) { + if (sxe2_profile_is_empty(prof_id, adapter)) + continue; + + if (!sxe2_profile_tunnel_check(prof_id, fltr)) + continue; + + ret = sxe2_profile_fv_check(prof_id, fltr, &match); + if (ret) + return ret; + + if (match) { + set_bit(prof_id, fltr->profiles); + LOG_DEBUG_BDF("profile id %d match\n", prof_id); + } + } + + if (bitmap_weight(fltr->profiles, SXE2_MAX_NUM_PROFILES) == 0) { + LOG_ERROR_BDF("no profile can match request\n"); + return -EINVAL; + } + + for (i = 0; i < SXE2_MAX_REPLY_RECIPE; i++) { + LOG_DEBUG_BDF("lkup_mask 0x%x, 0x%x, 0x%x, 0x%x\n", + fltr->lkup_mask[i * SXE2_NUM_WORDS_RECIPE + 0], + fltr->lkup_mask[i * SXE2_NUM_WORDS_RECIPE + 1], + fltr->lkup_mask[i * SXE2_NUM_WORDS_RECIPE + 2], + fltr->lkup_mask[i * SXE2_NUM_WORDS_RECIPE + 3]); + LOG_DEBUG_BDF("lkup_value %d, %d, %d, %d\n", + fltr->lkup_value[i * SXE2_NUM_WORDS_RECIPE + 0], + fltr->lkup_value[i * SXE2_NUM_WORDS_RECIPE + 1], + fltr->lkup_value[i * SXE2_NUM_WORDS_RECIPE + 2], + fltr->lkup_value[i * SXE2_NUM_WORDS_RECIPE + 3]); + LOG_DEBUG_BDF("lkup_index %d, %d, %d, %d\n", + fltr->lkup_index[i * SXE2_NUM_WORDS_RECIPE + 0], + fltr->lkup_index[i * SXE2_NUM_WORDS_RECIPE + 1], + fltr->lkup_index[i * SXE2_NUM_WORDS_RECIPE + 2], + fltr->lkup_index[i * SXE2_NUM_WORDS_RECIPE + 3]); + } + + return 0; +} + +void sxe2_tcf_match_meta_fill(struct sxe2_tcf_fltr *fltr) +{ + struct sxe2_tcf_key_item *item; + + if (fltr->src_type == SXE2_SRC_TYPE_RX) { + item = &fltr->items[SXE2_META_PKT_SRC]; + item->value.raw[SXE2_META_PKT_SRC_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_PKT_SRC_RX << SXE2_FV_PKT_SRC_OFFSET)); + item->mask.raw[SXE2_META_PKT_SRC_OFFSET] = + cpu_to_be16((u16)SXE2_FV_PKT_SRC_MASK); + } else { + item = &fltr->items[SXE2_META_VSI_NUM]; + item->value.raw[SXE2_META_VSI_NUM_OFFSET] = + cpu_to_be16(fltr->src_vsi_id << SXE2_FV_VSI_NUM_OFFSET); + item->mask.raw[SXE2_META_VSI_NUM_OFFSET] = + cpu_to_be16((u16)SXE2_FV_VSI_NUM_MASK); + if (fltr->action == SXE2_MIRROR_PACKET) { + item = &fltr->items[SXE2_META_PKT_SRC]; + item->value.raw[SXE2_META_PKT_SRC_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_PKT_SRC_TX << SXE2_FV_PKT_SRC_OFFSET)); + item->mask.raw[SXE2_META_PKT_SRC_OFFSET] = + cpu_to_be16((u16)SXE2_FV_PKT_SRC_MASK); + } + } +} +STATIC s32 sxe2_tcf_rule_prepare(struct sxe2_adapter *adapter, + struct sxe2_tcf_fltr *fltr, + struct sxe2_rule_info *rule) +{ + s32 ret = 0; + u16 idx = 0; + + rule->tcf_fltr = fltr; + rule->is_fwd = true; + rule->act.type = fltr->action; + if (fltr->action == SXE2_FWD_TO_Q) { + rule->act.fwd_id.q_id = fltr->dst_queue_id; + rule->act.q_high = fltr->dst_queue_high; + } else if (fltr->action == SXE2_FWD_TO_QGRP) { + rule->act.fwd_id.q_id = fltr->dst_queue_id; + rule->act.qgrp_size = fltr->dst_queue_group; + rule->act.q_high = fltr->dst_queue_high; + } else if (fltr->action == SXE2_FWD_TO_VSI) { + rule->act.fwd_id.vsi_id = fltr->dst_vsi_id; + } else if (fltr->action == SXE2_MIRROR_PACKET) { + rule->act.fwd_id.vsi_id = fltr->dst_vsi_id; + } else if (fltr->action == SXE2_LARGE_ACTION) { + rule->act.fwd_id.vsi_id = fltr->dst_vsi_id; + } else { + LOG_DEBUG_BDF("action %d\n", fltr->action); + } + + if (fltr->src_type == SXE2_SRC_TYPE_RX) { + rule->act.lb_en = false; + rule->act.lan_en = false; + } else { + rule->act.lb_en = true; + rule->act.lan_en = false; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, idx) { + if (!adapter->vsi_ctxt.vsi[idx]) + continue; + + if ((adapter->vsi_ctxt.vsi[idx]->type == SXE2_VSI_T_DPDK_PF) || + (adapter->vsi_ctxt.vsi[idx]->type == SXE2_VSI_T_PF)) { + + if (fltr->dst_vsi_id == adapter->vsi_ctxt.vsi[idx]->idx_in_dev) { + rule->act.lb_en = false; + rule->act.lan_en = true; + break; + } + + if (fltr->action == SXE2_FWD_TO_VSI_LIST) { + if (test_bit(adapter->vsi_ctxt.vsi[idx]->idx_in_dev, fltr->dst_vsi_map)) { + ret = -EINVAL; + LOG_SWITCH_RULE_OPT(ret, "vsi list action include pf vsi %d\n", + adapter->vsi_ctxt.vsi[idx]->idx_in_dev); + goto l_end; + } + } + } + } + + if (fltr->action == SXE2_MIRROR_PACKET) { + rule->act.lb_en = false; + rule->act.lan_en = false; + } + if (fltr->action == SXE2_LARGE_ACTION) { + rule->act.lb_en = false; + rule->act.lan_en = false; + } + } + LOG_DEBUG_BDF("dst_id %d, lb_en %d, lan_en %d\n", rule->act.fwd_id.q_id, + rule->act.lb_en, rule->act.lan_en); +l_end: + return ret; +} + +s32 sxe2_tcf_rule_add(struct sxe2_adapter *adapter, + u16 vsi_id_in_dev, struct sxe2_tcf_fltr *fltr) +{ + s32 ret; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + + ret = sxe2_tcf_rule_prepare(adapter, fltr, &rule_info); + if (ret) + return ret; + + ret = sxe2_switch_rule_add(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u tc_rule cookie %lu add ret:%d, rule_id:%d\n", + vsi_id_in_dev, fltr->cookie, ret, rule_info.rule_id); + + return ret; +} + +s32 sxe2_tcf_rule_del(struct sxe2_adapter *adapter, + u16 vsi_id_in_dev, struct sxe2_tcf_fltr *fltr) +{ + s32 ret; + struct sxe2_rule_info rule_info; + + memset(&rule_info, 0, sizeof(rule_info)); + ret = sxe2_tcf_rule_prepare(adapter, fltr, &rule_info); + if (ret) + return ret; + + ret = sxe2_switch_rule_del(adapter, &rule_info); + LOG_SWITCH_RULE_OPT(ret, "vsi_id:%u tc_rule cookie %lu del ret:%d, rule_id:%d\n", + vsi_id_in_dev, fltr->cookie, ret, rule_info.rule_id); + + return ret; +} + +void sxe2_vsi_l2_fltr_clean(struct sxe2_vsi *vsi) +{ + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct list_head *list_head; + struct list_head *vsi_list_head; + struct sxe2_rule_info *list_itr = NULL; + struct sxe2_rule_info *list_tmp = NULL; + struct sxe2_vsi_list_info *vsi_list = NULL; + struct sxe2_vsi_list_info *vsi_list_tmp = NULL; + s32 recipe_id; + s32 i; + enum sxe2_vsi_list_type vsi_list_type; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + s32 vsi_cnt; + u16 vsi_left; + + for (recipe_id = 0; recipe_id < SXE2_DEFAULT_RECIPE_MAX; recipe_id++) { + if (recipe_id != SXE2_DEFAULT_RECIPE_VLAN) + vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + else + vsi_list_type = SXE2_VSI_LIST_TYPE_PRUNE; + + list_head = &switch_ctxt->recipe[recipe_id].rule_head; + rule_lock = &switch_ctxt->recipe[recipe_id].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + list_for_each_entry_safe(list_itr, list_tmp, list_head, + list_entry) { + if (recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI_EXT) { + if (list_itr->act.fwd_id.vsi_id == vsi->idx_in_dev || + list_itr->fltr.data.srcvsi.vsi_id == vsi->idx_in_dev){ + list_del(&list_itr->list_entry); + kfree(list_itr); + list_itr = NULL; + continue; + } + } + if ((list_itr->act.type == SXE2_FWD_TO_VSI || + list_itr->act.type == SXE2_DROP_PACKET) && + list_itr->act.fwd_id.vsi_id == vsi->idx_in_dev) { + list_del(&list_itr->list_entry); + kfree(list_itr); + list_itr = NULL; + } else if (list_itr->act.type == SXE2_FWD_TO_VSI_LIST && + test_bit(vsi->idx_in_dev, list_itr->vsi_list->vsi_map)) { + clear_bit(vsi->idx_in_dev, list_itr->vsi_list->vsi_map); + vsi_cnt = bitmap_weight(list_itr->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + if (vsi_list_type == SXE2_VSI_LIST_TYPE_FORWARD && vsi_cnt == 1) { + list_itr->act.type = SXE2_FWD_TO_VSI; + vsi_left = (u16)find_first_bit(list_itr->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + list_itr->act.fwd_id.vsi_id = vsi_left; + list_itr->vsi_list = NULL; + } else if (vsi_list_type == SXE2_VSI_LIST_TYPE_PRUNE && + vsi_cnt == 0) { + list_del(&list_itr->list_entry); + kfree(list_itr); + list_itr = NULL; + } + } else if (vsi_list_type == SXE2_VSI_LIST_TYPE_PRUNE && + list_itr->act.type == SXE2_FWD_TO_VSI_LIST && + bitmap_weight(list_itr->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT) == 0) { + list_del(&list_itr->list_entry); + kfree(list_itr); + list_itr = NULL; + } else if (vsi_list_type == SXE2_VSI_LIST_TYPE_FORWARD && + list_itr->act.type == SXE2_FWD_TO_VSI_LIST && + bitmap_weight(list_itr->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT) == 1) { + list_itr->act.type = SXE2_FWD_TO_VSI; + vsi_left = (u16)find_first_bit(list_itr->vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + list_itr->act.fwd_id.vsi_id = vsi_left; + list_itr->vsi_list = NULL; + } + } + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + } + + for (i = 0; i < SXE2_VSI_LIST_TYPE_MAX; i++) { + vsi_list_head = &switch_ctxt->vsi_list_mgmt[i].vsi_list_head; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[i].vsi_list_lock; + mutex_lock(vsi_list_lock); + list_for_each_entry_safe(vsi_list, vsi_list_tmp, vsi_list_head, list_entry) { + if (i == SXE2_VSI_LIST_TYPE_FORWARD && + bitmap_weight(vsi_list->vsi_map, SXE2_VSI_MAX_CNT) == 1) { + list_del(&vsi_list->list_entry); + kfree(vsi_list); + vsi_list = NULL; + } else if (i == SXE2_VSI_LIST_TYPE_PRUNE && + bitmap_weight(vsi_list->vsi_map, SXE2_VSI_MAX_CNT) == 0) { + list_del(&vsi_list->list_entry); + kfree(vsi_list); + vsi_list = NULL; + } + } + mutex_unlock(vsi_list_lock); + } +} + +void sxe2_vsi_complex_fltr_clean(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct sxe2_rule_info *rule_info = NULL; + struct sxe2_tcf_fltr *tcf_fltr; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + u32 bkt; + struct hlist_node *temp; + + rule_lock = &switch_ctxt->complex_recipe.rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + hash_for_each_safe(adapter->switch_ctxt.complex_recipe.ht_lkup, bkt, temp, tcf_fltr, node) { + if (tcf_fltr->src_vsi_id == vsi->idx_in_dev) { + rule_info = tcf_fltr->rule_info; + sxe2_hash_cookie_del(adapter, tcf_fltr->cookie); + sxe2_hash_lkup_del(adapter, tcf_fltr); + LOG_DEBUG_BDF("clean rule success, rule id %u\n", + rule_info->rule_id); + sxe2_switch_sw_rule_free(adapter, rule_info); + } + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); +} + +void sxe2_vsi_fltr_clean(struct sxe2_vsi *vsi) +{ + sxe2_vsi_l2_fltr_clean(vsi); + sxe2_vsi_complex_fltr_clean(vsi); +} + +void sxe2_vsi_l2_fltr_remove(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *list_itr = NULL; + struct sxe2_rule_info *tmp = NULL; + struct sxe2_vsi_list_info *vsi_list; + struct sxe2_vlan vlan; + s32 status; + s32 recipe_id; + u16 rule_id; + struct list_head del_head; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + enum sxe2_vsi_list_type vsi_list_type; + + INIT_LIST_HEAD(&del_head); + for (recipe_id = 0; recipe_id < SXE2_DEFAULT_RECIPE_MAX; recipe_id++) { + if (recipe_id != SXE2_DEFAULT_RECIPE_VLAN) + vsi_list_type = SXE2_VSI_LIST_TYPE_FORWARD; + else + vsi_list_type = SXE2_VSI_LIST_TYPE_PRUNE; + + list_head = &switch_ctxt->recipe[recipe_id].rule_head; + rule_lock = &switch_ctxt->recipe[recipe_id].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[vsi_list_type].vsi_list_lock; + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(list_itr, list_head, list_entry) { + vsi_list = list_itr->vsi_list; + if ((!vsi_list && list_itr->act.fwd_id.vsi_id == id_in_dev) || + (vsi_list && test_bit(id_in_dev, vsi_list->vsi_map))) { + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + LOG_ERROR_BDF("alloc memory failed\n"); + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + goto l_free; + } + memcpy(tmp, list_itr, sizeof(*list_itr)); + list_add(&tmp->list_entry, &del_head); + } + } + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + } + + list_for_each_entry(list_itr, &del_head, list_entry) { + recipe_id = list_itr->recipe_id; + rule_id = list_itr->rule_id; + if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_MAC) { + status = sxe2_mac_rule_del(adapter, + id_in_dev, list_itr->fltr.data.mac.mac_addr); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_VLAN) { + vlan.vid = list_itr->fltr.data.vlan.vlan_id; + vlan.tpid = list_itr->fltr.data.vlan.tpid; + status = sxe2_vlan_rule_del(adapter, id_in_dev, &vlan); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_TX_ETYPE) { + status = sxe2_tx_etype_rule_del(adapter, id_in_dev, + list_itr->fltr.data.etype.ethertype); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_RX_ETYPE) { + status = sxe2_rx_etype_rule_del(adapter, id_in_dev, + list_itr->fltr.data.etype.ethertype); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_ALLMULTI) { + status = sxe2_allmulti_rule_del(adapter, id_in_dev); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_PROMISC) { + status = sxe2_promisc_rule_del(adapter, id_in_dev); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI) { + status = sxe2_srcvsi_rule_del(adapter, id_in_dev); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI_EXT) { + status = sxe2_srcvsi_ext_rule_del(adapter, id_in_dev); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) { + status = sxe2_mac_spoofchk_rule_del(adapter, id_in_dev); + } else if (list_itr->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT) { + status = sxe2_mac_spoofchk_ext_rule_del(adapter, id_in_dev, + list_itr->fltr.data.\ + mac_spoofchk_ext.mac_addr); + } else { + continue; + } + + if (status && status != -ENOENT) { + LOG_ERROR_BDF("remove l2 rule failure, recipe id %d, rule id %u\n", + recipe_id, rule_id); + } + } + +l_free: + list_for_each_entry_safe(list_itr, tmp, &del_head, list_entry) { + list_del(&list_itr->list_entry); + kfree(list_itr); + list_itr = NULL; + } +} + +void sxe2_vsi_complex_fltr_remove(struct sxe2_adapter *adapter, + u16 id_in_dev, bool to_restore) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *restore_head; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + s32 ret = 0; + struct sxe2_tcf_fltr *tcf_fltr; + u32 bkt; + struct hlist_node *temp; + + rule_lock = &switch_ctxt->complex_recipe.rule_lock; + restore_head = &switch_ctxt->complex_recipe.restore_head; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + hash_for_each_safe(adapter->switch_ctxt.complex_recipe.ht_lkup, bkt, temp, tcf_fltr, node) { + if (tcf_fltr->src_vsi_id == id_in_dev || + (tcf_fltr->is_user_rule && + (tcf_fltr->rule_vsi_id == id_in_dev))) { + + ret = sxe2_fwd_rule_remove(adapter, tcf_fltr->rule_info, false); + if (ret) { + LOG_ERROR_BDF("complex rule delete fail, ret %d\n", ret); + break; + } + + if (to_restore) + list_add(&tcf_fltr->rule_info->list_entry, restore_head); + else + sxe2_switch_sw_rule_free(adapter, tcf_fltr->rule_info); + } + } + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); +} + +void sxe2_vsi_fltr_remove(struct sxe2_adapter *adapter, u16 id_in_dev) +{ + sxe2_vsi_l2_fltr_remove(adapter, id_in_dev); + sxe2_vsi_complex_fltr_remove(adapter, id_in_dev, false); +} + +s32 sxe2_rule_bridge_mode_update(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + struct sxe2_rule_info *rule_info = NULL; + s32 ret = 0; + + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_MAC].rule_head; + rule_lock = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_MAC].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + list_for_each_entry(rule_info, list_head, list_entry) { + u8 *addr = rule_info->fltr.data.mac.mac_addr; + + if (is_unicast_ether_addr(addr)) { + if (adapter->switch_ctxt.evb_mode == BRIDGE_MODE_VEB) + rule_info->act.lan_en = false; + else + rule_info->act.lan_en = true; + + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + break; + } + } + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + + return ret; +} + +static s32 sxe2_switch_restore_list_fill(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule, + struct list_head *rule_head) +{ + s32 weight; + s32 i; + u16 vsi_id; + struct sxe2_vsi_list_info *vsi_list; + struct sxe2_rule_info *new_rule; + struct sxe2_vsi *vsi; + + vsi_list = rule->vsi_list; + if (!vsi_list) { + list_del(&rule->list_entry); + vsi = sxe2_vsi_get_by_idx(adapter, rule->act.fwd_id.vsi_id); + if (vsi->type == SXE2_VSI_T_PF) + list_add(&rule->list_entry, rule_head); + else + kfree(rule); + } else { + weight = bitmap_weight(vsi_list->vsi_map, SXE2_VSI_MAX_CNT); + if (!weight || weight >= SXE2_VSI_MAX_CNT) { + list_del(&rule->list_entry); + kfree(rule); + return 0; + } + for (i = 0, vsi_id = 0; i < weight; i++, vsi_id++) { + vsi_id = (u16)find_next_bit(vsi_list->vsi_map, + SXE2_VSI_MAX_CNT, vsi_id); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (vsi && vsi->type == SXE2_VSI_T_PF) { + new_rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!new_rule) + return -ENOMEM; + + memcpy(new_rule, rule, sizeof(*rule)); + new_rule->vsi_list = NULL; + new_rule->act.fwd_id.vsi_id = vsi_id; + new_rule->act.type = SXE2_FWD_TO_VSI; + list_add(&new_rule->list_entry, rule_head); + } + } + list_del(&rule->list_entry); + kfree(rule); + } + + return 0; +} + +s32 sxe2_switch_fltr_restore_prepare(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct list_head *restore_head; + struct sxe2_vsi_list_info *vsi_list; + struct sxe2_vsi_list_info *vsi_list_tmp; + struct sxe2_rule_info *rule; + struct sxe2_rule_info *rule_tmp; + struct mutex *list_lock; + s32 i; + s32 ret = 0; + struct sxe2_tcf_fltr *tcf_fltr; + u32 bkt; + struct hlist_node *temp; + + for (i = 0; i < SXE2_DEFAULT_RECIPE_MAX; i++) { + list_head = &switch_ctxt->recipe[i].rule_head; + restore_head = &switch_ctxt->recipe[i].restore_head; + list_lock = &switch_ctxt->recipe[i].rule_lock; + + if (i == SXE2_DEFAULT_RECIPE_SRCVSI_EXT) + continue; + + mutex_lock(list_lock); + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + ret = sxe2_switch_restore_list_fill(adapter, rule, + restore_head); + if (ret) { + mutex_unlock(list_lock); + return ret; + } + } + mutex_unlock(list_lock); + } + + list_head = &switch_ctxt->complex_recipe.rule_head; + restore_head = &switch_ctxt->complex_recipe.restore_head; + list_lock = &switch_ctxt->complex_recipe.rule_lock; + mutex_lock(list_lock); + hash_for_each_safe(adapter->switch_ctxt.complex_recipe.ht_lkup, bkt, temp, tcf_fltr, node) { + rule = tcf_fltr->rule_info; + + if (!tcf_fltr->cookie_invalid) + sxe2_hash_cookie_del(adapter, tcf_fltr->cookie); + + sxe2_hash_lkup_del(adapter, tcf_fltr); + if (!rule->tcf_fltr->cookie_invalid) + list_add(&rule->list_entry, restore_head); + else + sxe2_switch_sw_rule_free(adapter, rule); + } + mutex_unlock(list_lock); + + for (i = 0; i < SXE2_VSI_LIST_TYPE_MAX; i++) { + list_head = &switch_ctxt->vsi_list_mgmt[i].vsi_list_head; + list_lock = &switch_ctxt->vsi_list_mgmt[i].vsi_list_lock; + + mutex_lock(list_lock); + list_for_each_entry_safe(vsi_list, vsi_list_tmp, list_head, list_entry) { + list_del(&vsi_list->list_entry); + kfree(vsi_list); + vsi_list = NULL; + } + mutex_unlock(list_lock); + } + + return 0; +} + +void sxe2_switch_fltr_restore_clean(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct mutex *list_lock; + struct sxe2_rule_info *rule; + struct sxe2_rule_info *rule_tmp; + s32 i; + + for (i = 0; i < SXE2_DEFAULT_RECIPE_MAX; i++) { + list_head = &switch_ctxt->recipe[i].restore_head; + + list_lock = &switch_ctxt->recipe[i].rule_lock; + mutex_lock(list_lock); + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + list_del(&rule->list_entry); + sxe2_switch_sw_rule_free(adapter, rule); + } + mutex_unlock(list_lock); + } + + list_head = &switch_ctxt->complex_recipe.restore_head; + list_lock = &switch_ctxt->complex_recipe.rule_lock; + mutex_lock(list_lock); + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + list_del(&rule->list_entry); + sxe2_switch_sw_rule_free(adapter, rule); + } + mutex_unlock(list_lock); + +} + +STATIC s32 sxe2_vsi_lldp_fltr_update(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + bool islldpAgent; + s32 recipe_id; + struct list_head *list_head; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct sxe2_rule_info *rule; + struct sxe2_rule_info *rule_tmp; + struct sxe2_rule_info *rule_tx_lldp = NULL; + struct sxe2_rule_info *rule_rx_lldp = NULL; + + ret = sxe2_lldp_agent_event_init(adapter); + if (ret) { + LOG_ERROR_BDF("lldp event initfailed: %d\n", ret); + goto l_out; + } + + mutex_lock(&switch_ctxt->lldp_rule_lock); + ret = sxe2_lldp_fw_agent_status_get(vsi->adapter, &islldpAgent, NULL); + if (ret) { + LOG_ERROR_BDF("lldp status get failed:%d\n", ret); + goto l_unlock; + } + LOG_DEBUG_BDF("sxe2 lldp agent status islldpAgent:%u\n", islldpAgent); + + for (recipe_id = 0; recipe_id < SXE2_DEFAULT_RECIPE_MAX; recipe_id++) { + if (recipe_id == SXE2_DEFAULT_RECIPE_TX_ETYPE || + recipe_id == SXE2_DEFAULT_RECIPE_RX_ETYPE) { + list_head = &switch_ctxt->recipe[recipe_id].rule_head; + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + if (rule->act.fwd_id.vsi_id != vsi->idx_in_dev) + continue; + if (recipe_id == SXE2_DEFAULT_RECIPE_TX_ETYPE && + rule->fltr.data.etype.ethertype == ETH_P_LLDP) { + rule_tx_lldp = rule; + } else if (recipe_id == SXE2_DEFAULT_RECIPE_RX_ETYPE && + rule->fltr.data.etype.ethertype == ETH_P_LLDP) { + rule_rx_lldp = rule; + } + } + } + } + + if (islldpAgent) { + if (!rule_tx_lldp) { + ret = sxe2_tx_etype_rule_add(vsi, ETH_P_LLDP); + if (ret) { + LOG_ERROR_BDF("failed to add tx lldp rule, \t" + "lldp fltr update failed:%d\n", ret); + goto l_unlock; + } + } + if (rule_rx_lldp) { + ret = sxe2_rx_etype_rule_del(adapter, vsi->idx_in_dev, ETH_P_LLDP); + if (ret) { + LOG_ERROR_BDF("failed to del rx lldp rule, \t" + "lldp fltr update failed:%d\n", ret); + goto l_unlock; + } + } + } else { + if (rule_tx_lldp) { + ret = sxe2_tx_etype_rule_del(adapter, vsi->idx_in_dev, ETH_P_LLDP); + if (ret) { + LOG_ERROR_BDF("failed to del tx lldp rule, \t" + "lldp fltr update failed:%d\n", ret); + goto l_unlock; + } + } + if (!rule_rx_lldp) { + ret = sxe2_rx_etype_rule_add(vsi, ETH_P_LLDP); + if (ret) { + LOG_ERROR_BDF("failed to add rx lldp rule, \t" + "lldp fltr update failed:%d\n", ret); + goto l_unlock; + } + } + } + +l_unlock: + mutex_unlock(&switch_ctxt->lldp_rule_lock); + +l_out: + + return ret; +} + +s32 sxe2_vsi_l2_fltr_restore(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_switch_context *switch_ctxt = &vsi->adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *rule; + struct sxe2_rule_info *rule_tmp; + struct sxe2_vlan vlan; + s32 recipe_id; + + for (recipe_id = 0; recipe_id < SXE2_DEFAULT_RECIPE_MAX; recipe_id++) { + list_head = &switch_ctxt->recipe[recipe_id].restore_head; + + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + if (rule->act.fwd_id.vsi_id != vsi->idx_in_dev) + continue; + if (rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC) { + ret = sxe2_mac_rule_add(vsi, rule->fltr.data.mac.mac_addr); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_VLAN) { + vlan.vid = rule->fltr.data.vlan.vlan_id; + vlan.tpid = rule->fltr.data.vlan.tpid; + ret = sxe2_vlan_rule_add(vsi, &vlan); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_TX_ETYPE) { + ret = sxe2_tx_etype_rule_add(vsi, rule->fltr.data.etype.ethertype); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_RX_ETYPE) { + ret = sxe2_rx_etype_rule_add(vsi, rule->fltr.data.etype.ethertype); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_ALLMULTI) { + ret = sxe2_allmulti_rule_add(vsi); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_PROMISC) { + ret = sxe2_promisc_rule_add(vsi); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI) { + ret = sxe2_srcvsi_rule_add(vsi); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_SRCVSI_EXT) { + ret = sxe2_srcvsi_ext_rule_add(vsi); + } else if (rule->recipe_id == SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK) { + ret = sxe2_mac_spoofchk_rule_add(adapter, vsi->idx_in_dev); + } + + if (ret == -EEXIST) { + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("add rule failure, ret %d, \t" + "vsi[%d] recipe id %d, old rule id %d\n", + ret, vsi->idx_in_dev, rule->recipe_id, + rule->rule_id); + break; + } + list_del(&rule->list_entry); + kfree(rule); + rule = NULL; + } + + if (ret) + break; + } + + if (vsi->type == SXE2_VSI_T_PF) { + ret = sxe2_vsi_lldp_fltr_update(vsi); + if (ret) + LOG_ERROR_BDF("lldp fltr update failure, ret %d, vsi[%d]\n", + ret, vsi->idx_in_dev); + + ret = sxe2_cur_mac_addr_set(vsi, vsi->netdev->dev_addr); + if (ret) + LOG_ERROR_BDF("rebuild mac addr failed, mac %pM, ret %d\n", + vsi->netdev->dev_addr, ret); + } + + return ret; +} + +STATIC void sxe2_fltr_cookie_rule_update(struct sxe2_adapter *adapter, struct sxe2_rule_info *rule) +{ + struct list_head *tc_rule_head; + struct sxe2_tc_rule_info *tc_list_itr = NULL; + struct sxe2_tc_rule_info *tc_list_tmp = NULL; + struct sxe2_tc_rule_hash *rule_hash_node = NULL; + + tc_rule_head = &rule->tc_rule_head; + list_for_each_entry_safe(tc_list_itr, tc_list_tmp, + tc_rule_head, list_entry) { + rule_hash_node = sxe2_hash_cookie_find(adapter, tc_list_itr->cookie); + if (rule_hash_node) + rule_hash_node->rule_info = rule; + } +} + +s32 sxe2_vsi_complex_fltr_restore(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *rule; + struct sxe2_rule_info *rule_tmp; + struct sxe2_rule_info *new_rule; + + list_head = &switch_ctxt->complex_recipe.restore_head; + list_for_each_entry_safe(rule, rule_tmp, list_head, list_entry) { + if (rule->tcf_fltr->src_vsi_id != vsi_id) + continue; + + sxe2_tcf_match_meta_fill(rule->tcf_fltr); + + ret = sxe2_tcf_rule_add(adapter, vsi_id, rule->tcf_fltr); + + if (ret == -EEXIST) { + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("add rule failure, ret %d, old rule id %d\n", + ret, rule->rule_id); + break; + } + + new_rule = sxe2_rule_entry_find(adapter, rule); + if (new_rule) { + INIT_LIST_HEAD(&new_rule->tc_rule_head); + if (!list_empty(&rule->tc_rule_head)) { + list_replace_init(&rule->tc_rule_head, + &new_rule->tc_rule_head); + sxe2_fltr_cookie_rule_update(adapter, new_rule); + } + } + list_del(&rule->list_entry); + sxe2_switch_sw_rule_free(adapter, rule); + rule = NULL; + } + + return ret; +} + +s32 sxe2_vfs_complex_fltr_restore(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + u16 vf_idx; + struct sxe2_vf_node *vf_node; + + sxe2_for_each_vf(adapter, vf_idx) { + vf_node = SXE2_VF_NODE(adapter, vf_idx); + ret = sxe2_vsi_complex_fltr_restore(adapter, vf_node->vsi_id[SXE2_VF_TYPE_ETH]); + if (ret) { + LOG_ERROR_BDF("vf:%u add complex rule failed, ret %d.\n", + vf_node->vf_idx, ret); + break; + } + } + + return ret; +} + +s32 sxe2_pf_complex_fltr_restore(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_vsi_complex_fltr_restore(adapter, + adapter->vsi_ctxt.main_vsi->idx_in_dev); + if (ret) { + LOG_ERROR_BDF("adapter %d complex filter restore failed, ret %d\n", + adapter->pf_idx, ret); + } + + return ret; +} + +STATIC void sxe2_switch_vsi_list_hw_dump(struct sxe2_adapter *adapter) +{ + struct list_head *list_head; + struct mutex *rule_lock; + s32 i, status; + s8 *vsi_buff; + u16 vsi_id = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct sxe2_vsi_list_info *vsi_list = NULL; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_vsi_list vsi_list_fwc = { 0 }; + struct sxe2_fwc_switch_vsi_list_resp vsi_list_resp = { 0 }; + u32 idx, vsi_map[SXE2_VSI_LIST_DAT_LEN]; + + LOG_DEV_INFO("=============switch vsi list dump start=============\n"); + + for (i = 0; i < SXE2_VSI_LIST_TYPE_MAX; i++) { + list_head = &switch_ctxt->vsi_list_mgmt[i].vsi_list_head; + rule_lock = &switch_ctxt->vsi_list_mgmt[i].vsi_list_lock; + + if (i == SXE2_VSI_LIST_TYPE_PRUNE) + vsi_list_fwc.flag |= cpu_to_le16(SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE); + mutex_lock(rule_lock); + list_for_each_entry(vsi_list, list_head, list_entry) { + vsi_list_fwc.vsi_list_id = cpu_to_le16(vsi_list->vsi_list_id); + + sxe2_cmd_params_dflt_fill( + &cmd, SXE2_CMD_SWITCH_VSI_LIST_GET, + &vsi_list_fwc, sizeof(vsi_list_fwc), + &vsi_list_resp, sizeof(vsi_list_resp)); + status = sxe2_cmd_fw_exec(adapter, &cmd); + if (status) { + LOG_ERROR_BDF("switch vsi list cmd fail, \t" + "type %d, vsi list %d, ret=%d\n", + i, vsi_list->vsi_list_id, status); + continue; + } + + vsi_buff = kzalloc(4096, GFP_KERNEL); + if (!vsi_buff) + continue; + vsi_id = 0; + for (idx = 0; idx < SXE2_VSI_LIST_DAT_LEN; idx++) + vsi_map[idx] = le32_to_cpu(vsi_list_resp.vsi[idx]); + + while (true) { + vsi_id = (u16)find_next_bit((unsigned long *)vsi_map, + SXE2_VSI_MAX_CNT, vsi_id); + if (vsi_id >= SXE2_VSI_MAX_CNT) + break; + snprintf(vsi_buff + strlen(vsi_buff), 4096 - strlen(vsi_buff), + "%d,", vsi_id); + vsi_id++; + } + + LOG_DEV_INFO("vsi list type %d, vsi list id %d, vsi:%s\n", i, + vsi_list->vsi_list_id, vsi_buff); + + kfree(vsi_buff); + vsi_buff = NULL; + } + mutex_unlock(rule_lock); + } + + LOG_DEV_INFO("==============switch vsi list dump end==============\n"); +} + +STATIC void sxe2_switch_rule_complex_hw_dump(struct sxe2_adapter *adapter) +{ + u32 action; + s32 i, status, vsi_cnt; + struct sxe2_tcf_fltr *tcf_fltr; + u16 vsi_id = 0; + u16 vsi_list_id = 0; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_rule_resp rule_resp = { 0 }; + struct sxe2_fwc_switch_rule rule_fwc = { 0 }; + struct sxe2_rule_info *rule_info = NULL; + struct sxe2_vsi_list_info *vsi_list = NULL; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct mutex *rule_lock = &switch_ctxt->complex_recipe.rule_lock; + u32 bkt; + struct hlist_node *temp; + + LOG_DEV_INFO("===============switch complex rule dump start===============\n"); + mutex_lock(rule_lock); + + hash_for_each_safe(adapter->switch_ctxt.complex_recipe.ht_lkup, bkt, temp, + tcf_fltr, node) { + rule_info = tcf_fltr->rule_info; + for (i = 0; i < tcf_fltr->recipe_cnt; i++) { + rule_fwc.rule_id = cpu_to_le16(tcf_fltr->rule_id[i]); + rule_fwc.recipe_id = + cpu_to_le16(tcf_fltr->recipe_id[i]); + vsi_list = rule_info->vsi_list; + sxe2_cmd_params_dflt_fill(&cmd, + SXE2_CMD_SWITCH_RULE_GET, + &rule_fwc, sizeof(rule_fwc), + &rule_resp, + sizeof(rule_resp)); + status = sxe2_cmd_fw_exec(adapter, &cmd); + if (status) { + LOG_ERROR_BDF("switch rule cmd fail, recipe %d, rule %d, ret=%d\n", + tcf_fltr->recipe_id[i], tcf_fltr->rule_id[i], + status); + continue; + } + action = le32_to_cpu(rule_resp.act); + + if (rule_info->act.type == SXE2_FWD_TO_VSI) + vsi_cnt = 1; + else if (SXE2_FWD_TO_VSI_LIST == rule_info->act.type) + vsi_cnt = bitmap_weight(vsi_list->vsi_map, SXE2_VSI_MAX_CNT); + else + vsi_cnt = 0; + + if (!vsi_list) + vsi_id = (action & SXE2_SINGLE_ACT_VSI_ID_M) >> + SXE2_SINGLE_ACT_VSI_ID_S; + else + vsi_list_id = (action & + SXE2_SINGLE_ACT_VSI_LIST_ID_M) >> + SXE2_SINGLE_ACT_VSI_LIST_ID_S; + + if (rule_info->act.type == SXE2_FWD_TO_Q) { + LOG_DEV_INFO("cookie %lu, recipe %d, rule_id %d, act %08x, \t" + "full_key %08x:%08x:%08x, vsi_cnt %d, lb_en %d, \t" + "lan_en %d, is_root %d, queue_id:%d\n", + tcf_fltr->cookie, tcf_fltr->recipe_id[i], + tcf_fltr->rule_id[i], action, rule_resp.full_key[0], + rule_resp.full_key[1], rule_resp.full_key[2], + vsi_cnt, + ((action & SXE2_SINGLE_ACT_LB_ENABLE) ? true : false), + ((action & SXE2_SINGLE_ACT_LAN_ENABLE) ? true : false), + rule_fwc.rule_id == rule_info->rule_id ? 1 : 0, + rule_info->act.fwd_id.q_id); + } else { + LOG_DEV_INFO("cookie %lu, recipe %d, rule_id %d, \t" + "act %08x, full_key %08x:%08x:%08x, \t" + "vsi_cnt %d, lb_en %d, lan_en %d, \t" + "is_root %d, vsi id %d, vsi_list_id %d, \t" + "ref_cnt %d\n", + tcf_fltr->cookie, tcf_fltr->recipe_id[i], + tcf_fltr->rule_id[i], action, rule_resp.full_key[0], + rule_resp.full_key[1], rule_resp.full_key[2], + vsi_cnt, + ((action & SXE2_SINGLE_ACT_LB_ENABLE) ? true : false), + ((action & SXE2_SINGLE_ACT_LAN_ENABLE) ? true : false), + rule_fwc.rule_id == rule_info->rule_id ? 1 : 0, + (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1), + le16_to_cpu(rule_resp.ref_cnt)); + } + } + } + mutex_unlock(rule_lock); + + LOG_DEV_INFO("================switch complex rule dump end================\n"); +} + +#define LOG_DUMP_SWITCH_RULE(fmt, ...) \ + LOG_DEV_INFO("recipe %d, rule_id %d, " \ + "act %08x, full_key %08x:%08x:%08x, " \ + "vsi_cnt %d, lb_en %d, lan_en %d, " fmt, \ + i, rule_info->rule_id, action, rule_resp.full_key[0], \ + rule_resp.full_key[1], rule_resp.full_key[2], vsi_cnt, \ + ((action & SXE2_SINGLE_ACT_LB_ENABLE) ? true : false), \ + ((action & SXE2_SINGLE_ACT_LAN_ENABLE) ? true : false), \ + ##__VA_ARGS__) + +#define FULL_KEY_TO_MAC_ADDR() \ + do { \ + mac_addr[0] = (u8)(full_key_d1.field.fv1 >> U8_BITS); \ + mac_addr[1] = (u8)(full_key_d1.field.fv1 & U8_MAX); \ + mac_addr[2] = (u8)(full_key_d1.field.fv2 >> U8_BITS); \ + mac_addr[3] = (u8)(full_key_d1.field.fv2 & U8_MAX); \ + mac_addr[4] = (u8)(full_key_d2.field.fv3 >> U8_BITS); \ + mac_addr[5] = (u8)(full_key_d2.field.fv3 & U8_MAX); \ + } while (0) + +STATIC void sxe2_switch_rule_l2_hw_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct mutex *rule_lock; + struct sxe2_rule_info *rule_info = NULL; + struct sxe2_vsi_list_info *vsi_list = NULL; + struct sxe2_fwc_switch_rule rule_fwc = { 0 }; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_rule_resp rule_resp = { 0 }; + s32 i, status, vsi_cnt; + u16 vsi_id = 0; + u16 src_vsi_id = 0; + u16 vsi_list_id = 0; + u32 action; + union sxe2_switch_full_key_dw1 full_key_d1; + union sxe2_switch_full_key_dw2 full_key_d2; + u8 mac_addr[ETH_ALEN]; + struct sxe2_vlan vlan; + u16 etype; + + LOG_DEV_INFO("===============switch rule dump start===============\n"); + for (i = 0; i < SXE2_DEFAULT_RECIPE_MAX; i++) { + list_head = &switch_ctxt->recipe[i].rule_head; + rule_lock = &switch_ctxt->recipe[i].rule_lock; + mutex_lock(rule_lock); + list_for_each_entry(rule_info, list_head, list_entry) { + rule_fwc.rule_id = cpu_to_le16(rule_info->rule_id); + rule_fwc.recipe_id = cpu_to_le16((u16)i); + vsi_list = rule_info->vsi_list; + sxe2_cmd_params_dflt_fill(&cmd, + SXE2_CMD_SWITCH_RULE_GET, + &rule_fwc, sizeof(rule_fwc), + &rule_resp, + sizeof(rule_resp)); + status = sxe2_cmd_fw_exec(adapter, &cmd); + if (status) { + LOG_ERROR_BDF("switch rule cmd fail, recipe %d, rule %d, ret=%d\n", + i, rule_info->rule_id, status); + continue; + } + full_key_d1.val = le32_to_cpu(rule_resp.full_key[1]); + full_key_d2.val = le32_to_cpu(rule_resp.full_key[2]); + action = le32_to_cpu(rule_resp.act); + if (rule_info->act.type == SXE2_FWD_TO_VSI) { + vsi_cnt = 1; + } else if (SXE2_FWD_TO_VSI_LIST == rule_info->act.type) { + vsi_cnt = bitmap_weight(vsi_list->vsi_map, + SXE2_VSI_MAX_CNT); + } else { + vsi_cnt = 0; + } + if (!vsi_list) + vsi_id = (action & SXE2_SINGLE_ACT_VSI_ID_M) >> + SXE2_SINGLE_ACT_VSI_ID_S; + else + vsi_list_id = (action & + SXE2_SINGLE_ACT_VSI_LIST_ID_M) >> + SXE2_SINGLE_ACT_VSI_LIST_ID_S; + + switch (i) { + case SXE2_DEFAULT_RECIPE_MAC: + FULL_KEY_TO_MAC_ADDR(); + LOG_DUMP_SWITCH_RULE("mac %pM, vsi id %d, vsi_list_id %d\n", + mac_addr, (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + case SXE2_DEFAULT_RECIPE_VLAN: + vlan.tpid = full_key_d1.field.fv1; + vlan.vid = full_key_d1.field.fv2; + LOG_DUMP_SWITCH_RULE("tpid %x, vid %d, vsi_list_id %d\n", + vlan.tpid, vlan.vid, vsi_list_id); + break; + case SXE2_DEFAULT_RECIPE_TX_ETYPE: + src_vsi_id = full_key_d1.field.fv1 & 0x03FF; + etype = full_key_d2.field.fv3; + LOG_DUMP_SWITCH_RULE("etype %x, is_rx %d, \t" + "src_vsi_id %d, vsi id %d, vsi_list_id %d\n", + etype, false, src_vsi_id, + (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + case SXE2_DEFAULT_RECIPE_RX_ETYPE: + etype = full_key_d1.field.fv2; + LOG_DUMP_SWITCH_RULE("etype %x, is_rx %d, \t" + "vsi id %d, vsi_list_id %d\n", + etype, true, (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + case SXE2_DEFAULT_RECIPE_ALLMULTI: + case SXE2_DEFAULT_RECIPE_PROMISC: + LOG_DUMP_SWITCH_RULE("vsi id %d, vsi_list_id %d\n", + (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + case SXE2_DEFAULT_RECIPE_SRCVSI: + src_vsi_id = full_key_d1.field.fv1 & 0x03FF; + LOG_DUMP_SWITCH_RULE("src_vsi_id %d, vsi id %d\n", + src_vsi_id, vsi_id); + break; + case SXE2_DEFAULT_RECIPE_SRCVSI_EXT: + src_vsi_id = full_key_d1.field.fv1 & 0x03FF; + LOG_DUMP_SWITCH_RULE("src_vsi_ext_id %d, vsi id %d\n", + src_vsi_id, vsi_id); + break; + case SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK: + FULL_KEY_TO_MAC_ADDR(); + LOG_DUMP_SWITCH_RULE("mac %pM, vsi id %d, vsi_list_id %d\n", + mac_addr, (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + case SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT: + FULL_KEY_TO_MAC_ADDR(); + LOG_DUMP_SWITCH_RULE("mac %pM, vsi id %d, vsi_list_id %d\n", + mac_addr, (vsi_list ? -1 : vsi_id), + (vsi_list ? vsi_list_id : -1)); + break; + default: + break; + } + } + mutex_unlock(rule_lock); + } + LOG_DEV_INFO("================switch rule dump end================\n"); +} + +void sxe2_switch_rule_hw_dump(struct sxe2_adapter *adapter) +{ + sxe2_switch_rule_l2_hw_dump(adapter); + sxe2_switch_rule_complex_hw_dump(adapter); + sxe2_switch_vsi_list_hw_dump(adapter); +} + +void sxe2_fwc_switch_trace_rx_trigger(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_trace_req req = { 0 }; + + req.is_rx = true; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SWITCH_TRACE_TRIGGER, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("switch trace trigger cmd fail, ret=%d\n", ret); + +} + +void sxe2_fwc_switch_trace_tx_trigger(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_trace_req req = { 0 }; + + req.is_rx = false; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SWITCH_TRACE_TRIGGER, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("switch trace trigger cmd fail, ret=%d\n", ret); +} + +void sxe2_fwc_switch_trace_recorder(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_trace_resp resp; + struct sxe2_recp_trace_rcd *recp; + s32 i; + + memset(&resp, 0, sizeof(resp)); + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SWITCH_TRACE_RECORDER, NULL, 0, + &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch trace recorder cmd fail, ret=%d\n", ret); + goto l_end; + } + + LOG_DEV_INFO("=============switch trace recorder start=============\n"); + + if (resp.og.done) { + LOG_DEV_INFO("OG trace, status %d, profile id %d\n", + resp.og.status, resp.og.profile_id); + for (i = 0; i < SXE2_SWITCH_FV_CNT; i++) { + LOG_DEV_INFO("OG trace, fv[%d] 0x%04x\n", i, + le16_to_cpu(resp.og.fv[i])); + } + } + + if (resp.swe.done) { + LOG_DEV_INFO("SWE trace, status %d\n", resp.swe.status); + for (i = 0; i < SXE2_PACKET_MAX_RECIPES; i++) { + recp = &resp.swe.recp[i]; + LOG_DEV_INFO("SWE trace, recipe id %d, ht1/ht2/kt/fkot hit %d/%d/%d/%d, index %d\n", + le16_to_cpu(recp->recipe_id), + recp->ht1_hit, recp->ht2_hit, recp->kt_hit, + recp->fkot_hit, le16_to_cpu(recp->index)); + } + } + + if (resp.rg.done) { + LOG_DEV_INFO("RG trace, status %d\n", resp.rg.status); + for (i = 0; i < SXE2_PACKET_INFO_DWORD_CNT; i++) { + LOG_DEV_INFO("RG trace, packet info[%d] 0x%08x\n", i, + le32_to_cpu(resp.rg.ppe_info[i])); + } + } + + LOG_DEV_INFO("==============switch trace recorder end==============\n"); + +l_end: + return; +} + +void sxe2_switch_recipe_dump(struct sxe2_adapter *adapter) +{ + s32 ret; + u8 i; + struct sxe2_fwc_switch_recipe switch_recipe = { 0 }; + + LOG_DEV_INFO("===============switch recipe dump start===============\n"); + + for (i = 0; i < SXE2_MAX_NUM_RECIPES; i++) { + memset(&switch_recipe, 0, sizeof(switch_recipe)); + switch_recipe.rid = i; + ret = sxe2_fwc_switch_recipe_get(adapter, &switch_recipe, + SXE2_CMD_SWITCH_RECIPE_GET); + if (ret) { + LOG_DEV_ERR("switch recipe get fail, ret=%d\n", ret); + return; + } + + LOG_DEV_INFO("rid:%u, isRoot:%u, lpidx0:%u, lpidx0Vid:%u, \t" + "fv0msk:0x%x, lpidx1:%u, lpidx1Vid:%u, fv1msk:0x%x, \t" + "lpidx2:%u, lpidx2Vid:%u, fv2msk:0x%x, lpidx3:%u, \t" + "lpidx3Vid:%u, fv3msk:0x%x, lpidx4:%u, lpidx4Vid:%u, \t" + "fv4msk:0x%x, prio:%u, jprio:0x%x, inversAct:%u, \t" + "defAct:0x%x, defActValid:%u, ref_cnt %d\n", + switch_recipe.rid, switch_recipe.is_root, + switch_recipe.lookup_index0, + switch_recipe.lookup_index0_valid, + le16_to_cpu(switch_recipe.fv0_bitmask), + switch_recipe.lookup_index1, + switch_recipe.lookup_index1_valid, + le16_to_cpu(switch_recipe.fv1_bitmask), + switch_recipe.lookup_index2, + switch_recipe.lookup_index2_valid, + le16_to_cpu(switch_recipe.fv2_bitmask), + switch_recipe.lookup_index3, + switch_recipe.lookup_index3_valid, + le16_to_cpu(switch_recipe.fv3_bitmask), + switch_recipe.lookup_index4, + switch_recipe.lookup_index4_valid, + le16_to_cpu(switch_recipe.fv4_bitmask), + switch_recipe.priority, switch_recipe.join_priority, + switch_recipe.inverse_action, + le32_to_cpu((u32)switch_recipe.default_action), + switch_recipe.default_action_valid, + le16_to_cpu((u32)switch_recipe.ref_cnt)); + } + LOG_DEV_INFO("===============switch recipe dump end===============\n"); + +} + +void sxe2_switch_profile_recipemap_dump(struct sxe2_adapter *adapter) +{ + s32 ret; + u16 i; + struct sxe2_fwc_switch_profile_recipe_map profile_recipe_map = { 0 }; + u32 map[2]; + + LOG_DEV_INFO("===============switch profile recipe map dump start===============\n"); + + for (i = 2; i < SXE2_MAX_NUM_PROFILES; i++) { + memset(&profile_recipe_map, 0, sizeof(profile_recipe_map)); + memset(map, 0, sizeof(map)); + profile_recipe_map.profile_id = cpu_to_le16(i); + ret = sxe2_fwc_switch_profile_recipe_map_get(adapter, &profile_recipe_map, + SXE2_CMD_SWITCH_PROFILE_RECIPE_MAP_GET); + if (ret) { + LOG_DEV_ERR("switch profile recipe map get fail, ret=%d\n", ret); + break; + } + memcpy(map, profile_recipe_map.map, sizeof(map)); + LOG_DEV_INFO("profile[%u] 0x%x 0x%x\n", + le16_to_cpu(profile_recipe_map.profile_id), + le32_to_cpu(profile_recipe_map.map[0]), + le32_to_cpu(profile_recipe_map.map[1])); + } + LOG_DEV_INFO("===============switch profile recipe map dump end===============\n"); + +} + +void sxe2_switch_share_id_dump(struct sxe2_adapter *adapter) +{ + s32 ret; + u16 i; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_share_id *fwc_share_id; + + fwc_share_id = kzalloc(sizeof(*fwc_share_id), GFP_KERNEL); + if (!fwc_share_id) + return; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SWITCH_SHARE_ID_GET, NULL, + 0, fwc_share_id, sizeof(*fwc_share_id)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("switch share id get cmd fail, ret=%d\n", ret); + + LOG_DEV_INFO("===============switch share id dump start===============\n"); + + LOG_DEV_INFO("usage 0x%x\n", le32_to_cpu(fwc_share_id->usage)); + for (i = 0; i < SXE2_MAX_NUM_RECIPES; i++) { + LOG_DEV_INFO("recipe %d, share id %d, bitmap 0x%x\n", + i, le32_to_cpu(fwc_share_id->share_id[i]), + le32_to_cpu(fwc_share_id->bitmap[i])); + } + + LOG_DEV_INFO("================switch share id dump end================\n"); + kfree(fwc_share_id); +} + +struct sxe2_switch_dfx_stats_info { + __le32 index; + char name[32]; +}; + +void sxe2_fwc_hw_dfx_show(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_dfx_stats resp; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_HW_DFX_SHOW, NULL, 0, &resp, + sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("switch trace trigger cmd fail, ret=%d\n", ret); +} + +s32 sxe2_vlan_filter_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_vsi_vlan_filter req = { 0 }; + + req.vsi_hw_id = cpu_to_le16(vsi_hw_id); + req.enable = (u8)en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_VLAN_FILTER, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi vlan filter cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_src_vsi_prune_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_vsi_src_prune req = { 0 }; + + req.vsi_hw_id = cpu_to_le16(vsi_hw_id); + req.enable = (u8)en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_SRC_PRUNE, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi src prune action cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_vsi_loopback_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_vsi_loopback req = { 0 }; + + req.vsi_hw_id = cpu_to_le16(vsi_hw_id); + req.enable = (u8)en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_LOOPBACK, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi lookback cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_vsi_spoofchk_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_vsi_spoofchk req = { 0 }; + + req.vsi_hw_id = cpu_to_le16(vsi_hw_id); + req.mac_enable = (u8)en; + req.vlan_enable = (u8)en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_SPOOFCHK, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("vsi spoofchk cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_switch_dfx_irq_setup(struct sxe2_adapter *adapter, bool en) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_switch_dfx_irq req = { 0 }; + + req.enable = (u8)en; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_SWITCH_DFX_IRQ, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("switch dfx irq setup cmd fail, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC bool sxe2_macvlan_mac_check(struct sxe2_adapter *adapter, const u8 *mac, u16 *vsi_id) +{ + u16 i; + struct sxe2_vsi *vsi = NULL; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + if (vsi->type == SXE2_VSI_T_MACVLAN && + ether_addr_equal(mac, (u8 *)vsi->netdev->dev_addr)) { + *vsi_id = vsi->idx_in_dev; + return true; + } + } + return false; +} + +s32 sxe2_mac_rule_update(struct sxe2_adapter *adapter, const u8 *mac, u16 old_vsi, u16 new_vsi) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *rule_info = NULL; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_MAC].rule_head; + rule_lock = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_MAC].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(rule_info, list_head, list_entry) { + u8 *addr = rule_info->fltr.data.mac.mac_addr; + + if (ether_addr_equal(mac, addr)) { + if (rule_info->vsi_list) { + clear_bit(old_vsi, rule_info->vsi_list->vsi_map); + set_bit(new_vsi, rule_info->vsi_list->vsi_map); + } else { + rule_info->act.fwd_id.vsi_id = new_vsi; + } + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + break; + } + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + return ret; +} + +s32 sxe2_mac_spoof_rule_update(struct sxe2_vsi *eth_vsi, + struct sxe2_vsi *user_vsi, u8 *mac_addr, bool to_user) +{ + s32 ret; + struct sxe2_adapter *adapter = eth_vsi->adapter; + + if (to_user) { + ret = sxe2_mac_spoofchk_ext_rule_del(adapter, eth_vsi->idx_in_dev, mac_addr); + if (ret) { + LOG_ERROR_BDF("mac %pM spoofchk del failed, vsi_id:%u\n", + mac_addr, eth_vsi->idx_in_dev); + goto l_end; + } + + ret = sxe2_mac_spoofchk_ext_rule_add(adapter, user_vsi->idx_in_dev, mac_addr); + if (ret) { + LOG_ERROR_BDF("mac %pM spoofchk add failed, vsi_id:%u\n", + mac_addr, user_vsi->idx_in_dev); + (void)sxe2_mac_spoofchk_ext_rule_add(adapter, + eth_vsi->idx_in_dev, mac_addr); + goto l_end; + } + } else { + ret = sxe2_mac_spoofchk_ext_rule_del(adapter, + user_vsi->idx_in_dev, mac_addr); + if (ret) { + LOG_ERROR_BDF("mac %pM spoofchk del failed, vsi_id:%u\n", + mac_addr, user_vsi->idx_in_dev); + goto l_end; + } + + ret = sxe2_mac_spoofchk_ext_rule_add(adapter, + eth_vsi->idx_in_dev, mac_addr); + if (ret) { + LOG_ERROR_BDF("mac %pM spoofchk add failed, vsi_id:%u\n", + mac_addr, eth_vsi->idx_in_dev); + (void)sxe2_mac_spoofchk_ext_rule_add(adapter, + user_vsi->idx_in_dev, mac_addr); + goto l_end; + } + } + +l_end: + return ret; +} + +STATIC s32 sxe2_unicast_user_mode_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + struct sxe2_mac_filter *user_mac_fltr; + struct sxe2_addr_node *user_mac_node; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&adapter->vsi_ctxt.lock); + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + user_mac_fltr = &user_vsi->mac_filter; + mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in mac list.\n", mac); + ret = -EEXIST; + goto l_mac_list_unlock; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for mac:%pM failed.\n", mac); + ret = -ENOMEM; + goto l_mac_list_unlock; + } + + ret = sxe2_mac_rule_add(user_vsi, mac); + if (ret) { + kfree(user_mac_node); + goto l_mac_list_unlock; + } + + ether_addr_copy(user_mac_node->mac_addr, mac); + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +STATIC s32 sxe2_unicast_com_mode_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + struct sxe2_mac_filter *user_mac_fltr; + struct sxe2_addr_node *eth_mac_node; + struct sxe2_addr_node *user_mac_node; + u16 macvlan_vsi; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&adapter->vsi_ctxt.lock); + eth_vsi = adapter->vsi_ctxt.main_vsi; + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_vsi_unlock; + } + + user_mac_fltr = &user_vsi->mac_filter; + + mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in mac list.\n", mac); + ret = -EEXIST; + goto l_mac_list_unlock; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for mac:%pM failed.\n", mac); + ret = -ENOMEM; + goto l_mac_list_unlock; + } + + eth_mac_node = sxe2_mac_addr_find(eth_vsi, mac); + if (eth_mac_node) { + ret = sxe2_mac_rule_update(adapter, mac, + eth_vsi->idx_in_dev, user_vsi->idx_in_dev); + if (ret) { + kfree(user_mac_node); + goto l_mac_list_unlock; + } + } else if (sxe2_macvlan_mac_check(adapter, mac, &macvlan_vsi)) { + ret = sxe2_mac_rule_update(adapter, mac, + macvlan_vsi, user_vsi->idx_in_dev); + if (ret) { + kfree(user_mac_node); + goto l_mac_list_unlock; + } + } else { + ret = sxe2_mac_rule_add(user_vsi, mac); + if (ret) { + kfree(user_mac_node); + goto l_mac_list_unlock; + } + } + + ether_addr_copy(user_mac_node->mac_addr, mac); + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_unicast_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u unicast mac %pM rule add.\n", vsi_id, mac); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_unicast_user_mode_mac_add(adapter, vsi_id, mac); + else + ret = sxe2_unicast_com_mode_mac_add(adapter, vsi_id, mac); + return ret; +} + +s32 sxe2_ucmd_multi_broad_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_addr_node *user_mac_node; + struct sxe2_vsi *user_vsi; + struct sxe2_mac_filter *user_mac_fltr; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_DEBUG_BDF("User pf vsi:%u multi broad mac %pM rule add.\n", vsi_id, mac); + + mutex_lock(&adapter->vsi_ctxt.lock); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_vsi_unlock; + } + + user_mac_fltr = &user_vsi->mac_filter; + mutex_lock(&switch_ctxt->mac_addr_lock); + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in mac list.\n", mac); + ret = -EEXIST; + goto l_mac_list_unlock; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for mac:%pM failed.\n", mac); + ret = -ENOMEM; + goto l_mac_list_unlock; + } + + ret = sxe2_mac_rule_add(user_vsi, mac); + if (ret) { + kfree(user_mac_node); + goto l_mac_list_unlock; + } + + ether_addr_copy(user_mac_node->mac_addr, mac); + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_unicast_com_mode_mac_del(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + struct sxe2_addr_node *eth_mac_node; + struct sxe2_addr_node *user_mac_node; + u16 macvlan_vsi; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&adapter->vsi_ctxt.lock); + + eth_vsi = adapter->vsi_ctxt.main_vsi; + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_vsi_unlock; + } + + mutex_lock(&switch_ctxt->mac_addr_lock); + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (!user_mac_node) { + LOG_WARN_BDF("mac:%pM is not in mac list\n", mac); + goto l_mac_list_unlock; + } + + eth_mac_node = sxe2_mac_addr_find(eth_vsi, mac); + if (eth_mac_node) { + ret = sxe2_mac_rule_update(adapter, mac, + user_vsi->idx_in_dev, eth_vsi->idx_in_dev); + if (ret) + goto l_mac_list_unlock; + } else if (sxe2_macvlan_mac_check(adapter, mac, &macvlan_vsi)) { + ret = sxe2_mac_rule_update(adapter, mac, + user_vsi->idx_in_dev, macvlan_vsi); + if (ret) + goto l_mac_list_unlock; + } else { + ret = sxe2_mac_rule_del(adapter, user_vsi->idx_in_dev, mac); + if (ret) + goto l_mac_list_unlock; + } + + sxe2_switch_mac_node_del_and_free(user_mac_node); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_unicast_user_mode_mac_del(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + struct sxe2_addr_node *user_mac_node; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&adapter->vsi_ctxt.lock); + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (!user_mac_node) { + LOG_ERROR_BDF("mac:%pM is not in mac list.\n", mac); + ret = -EEXIST; + goto l_mac_list_unlock; + } + + ret = sxe2_mac_rule_del(adapter, user_vsi->idx_in_dev, mac); + if (ret) + goto l_mac_list_unlock; + + sxe2_switch_mac_node_del_and_free(user_mac_node); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +s32 sxe2_ucmd_srcvsi_ext_add(struct sxe2_adapter *adapter, + u16 vsi_id, u16 *vsi_id_list, u16 vsi_id_cnt) +{ + s32 ret = 0; + u16 vsi_id_temp = 0; + u16 idx = 0; + struct sxe2_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + if (vsi_id_cnt != 2) { + LOG_ERROR_BDF("User src vsi list cnt just support two vsi id.\n"); + goto l_end; + } + + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + LOG_ERROR_BDF("src vsi id_in_dev:%u is NULL.\n", vsi_id); + goto l_end; + } + + for (idx = 0; idx < vsi_id_cnt; idx++) { + vsi_id_temp = vsi_id_list[idx]; + if (vsi_id_temp == SXE2_VSI_ID_INVALID) { + LOG_ERROR_BDF("User src vsi list id:%u vsi id is invalid.\n", idx); + goto l_end; + } + if (vsi_id_temp == vsi_id) + vsi->src_prune.vsi_id_u = vsi_id_temp; + else + vsi->src_prune.vsi_id_k = vsi_id_temp; + } + + ret = sxe2_srcvsi_ext_rule_add(vsi); + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_srcvsi_ext_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_srcvsi_ext_rule_del(adapter, vsi_id); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_unicast_mac_del(struct sxe2_adapter *adapter, u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u unicast mac %pM rule del.\n", vsi_id, mac); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_unicast_user_mode_mac_del(adapter, vsi_id, mac); + else + ret = sxe2_unicast_com_mode_mac_del(adapter, vsi_id, mac); + return ret; +} + +s32 sxe2_ucmd_multi_broad_mac_del(struct sxe2_adapter *adapter, u16 vsi_id, const u8 *mac) +{ + s32 ret = 0; + struct sxe2_addr_node *user_mac_node; + struct sxe2_vsi *user_vsi; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_DEBUG_BDF("User pf vsi:%u multi broad mac %pM rule del.\n", + vsi_id, mac); + + mutex_lock(&adapter->vsi_ctxt.lock); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_vsi_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_vsi_unlock; + } + + mutex_lock(&switch_ctxt->mac_addr_lock); + user_mac_node = sxe2_mac_addr_find(user_vsi, mac); + if (!user_mac_node) { + LOG_ERROR_BDF("mac:%pM is not in mac list.\n", mac); + ret = -EEXIST; + goto l_mac_list_unlock; + } + + ret = sxe2_mac_rule_del(adapter, user_vsi->idx_in_dev, mac); + if (ret) + goto l_mac_list_unlock; + + sxe2_switch_mac_node_del_and_free(user_mac_node); + +l_mac_list_unlock: + mutex_unlock(&switch_ctxt->mac_addr_lock); +l_vsi_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_promisc_rule_update(struct sxe2_adapter *adapter, u16 old_vsi, u16 new_vsi) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *rule_info = NULL; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_PROMISC].rule_head; + rule_lock = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_PROMISC].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(rule_info, list_head, list_entry) { + if (rule_info->vsi_list) { + clear_bit(old_vsi, rule_info->vsi_list->vsi_map); + set_bit(new_vsi, rule_info->vsi_list->vsi_map); + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + } else { + rule_info->act.fwd_id.vsi_id = new_vsi; + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + } + break; + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + return ret; +} + +STATIC s32 sxe2_com_mode_promisc_rule_add(struct sxe2_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + + eth_vsi = adapter->vsi_ctxt.main_vsi; + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (adapter->user_pf_ctxt.is_promisc_set) { + LOG_ERROR_BDF("user vsi [id in dev:%u] has been set promisc\n", + user_vsi->idx_in_dev); + ret = -EEXIST; + goto l_end; + } + + if (sxe2_promisc_rule_in_use(eth_vsi)) { + ret = sxe2_promisc_rule_update(adapter, eth_vsi->idx_in_dev, user_vsi->idx_in_dev); + if (ret) + goto l_end; + } else { + ret = sxe2_promisc_rule_add(user_vsi); + if (ret) + goto l_end; + } + + adapter->user_pf_ctxt.is_promisc_set = true; + +l_end: + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_user_mode_promisc_rule_add(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (adapter->user_pf_ctxt.is_promisc_set) { + LOG_ERROR_BDF("user vsi [id in pf:%u] has been set promisc\n", + user_vsi->idx_in_dev); + ret = -EEXIST; + goto l_end; + } + + ret = sxe2_promisc_rule_add(user_vsi); + if (ret) + goto l_end; + + adapter->user_pf_ctxt.is_promisc_set = true; + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_promisc_rule_add(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u promisc rule add.\n", vsi_id); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_user_mode_promisc_rule_add(adapter, vsi_id); + else + ret = sxe2_com_mode_promisc_rule_add(adapter, vsi_id); + return ret; +} + +STATIC s32 sxe2_com_mode_promisc_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + + eth_vsi = adapter->vsi_ctxt.main_vsi; + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (!adapter->user_pf_ctxt.is_promisc_set) { + LOG_WARN_BDF("user vsi [id in pf:%u] has not set promisc\n", + user_vsi->idx_in_dev); + goto l_end; + } + + if (eth_vsi->netdev->flags & IFF_PROMISC) { + ret = sxe2_promisc_rule_update(adapter, + user_vsi->idx_in_dev, eth_vsi->idx_in_dev); + if (ret) + goto l_end; + } else { + ret = sxe2_promisc_rule_del(adapter, user_vsi->idx_in_dev); + if (ret) + goto l_end; + } + + adapter->user_pf_ctxt.is_promisc_set = false; + +l_end: + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_user_mode_promisc_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (!adapter->user_pf_ctxt.is_promisc_set) { + LOG_WARN_BDF("user vsi [id in pf:%u] has not set promisc\n", + user_vsi->idx_in_dev); + goto l_end; + } + + ret = sxe2_promisc_rule_del(adapter, user_vsi->idx_in_dev); + if (ret) + goto l_end; + + adapter->user_pf_ctxt.is_promisc_set = false; + +l_end: + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_promisc_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u promisc rule del.\n", vsi_id); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_user_mode_promisc_rule_del(adapter, vsi_id); + else + ret = sxe2_com_mode_promisc_rule_del(adapter, vsi_id); + return ret; +} + +s32 sxe2_allmulti_rule_update(struct sxe2_adapter *adapter, u16 old_vsi, u16 new_vsi) +{ + s32 ret = 0; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct list_head *list_head; + struct sxe2_rule_info *rule_info = NULL; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + + list_head = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_ALLMULTI].rule_head; + rule_lock = &switch_ctxt->recipe[SXE2_DEFAULT_RECIPE_ALLMULTI].rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + + list_for_each_entry(rule_info, list_head, list_entry) { + if (rule_info->vsi_list) { + clear_bit(old_vsi, rule_info->vsi_list->vsi_map); + set_bit(new_vsi, rule_info->vsi_list->vsi_map); + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + } else { + rule_info->act.fwd_id.vsi_id = new_vsi; + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + } + break; + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + return ret; +} + +STATIC s32 sxe2_user_mode_allmulti_rule_add(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (adapter->user_pf_ctxt.is_allmulti_set) { + LOG_ERROR_BDF("user vsi [id in pf:%u] has been set allmulti\n", + user_vsi->idx_in_dev); + ret = -EEXIST; + goto l_end; + } + + ret = sxe2_allmulti_rule_add(user_vsi); + if (ret) + goto l_end; + + adapter->user_pf_ctxt.is_allmulti_set = true; + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_com_mode_allmulti_rule_add(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + + eth_vsi = adapter->vsi_ctxt.main_vsi; + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (adapter->user_pf_ctxt.is_allmulti_set) { + LOG_ERROR_BDF("user vsi [id in pf:%u] has been set allmulti\n", + user_vsi->idx_in_dev); + ret = -EEXIST; + goto l_end; + } + + if (sxe2_allmulti_rule_in_use(eth_vsi)) { + ret = sxe2_allmulti_rule_update(adapter, + eth_vsi->idx_in_dev, user_vsi->idx_in_dev); + if (ret) + goto l_end; + } else { + ret = sxe2_allmulti_rule_add(user_vsi); + if (ret) + goto l_end; + } + + adapter->user_pf_ctxt.is_allmulti_set = true; + +l_end: + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_allmulti_rule_add(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u allmulti rule add.\n", vsi_id); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_user_mode_allmulti_rule_add(adapter, vsi_id); + else + ret = sxe2_com_mode_allmulti_rule_add(adapter, vsi_id); + return ret; +} + +STATIC s32 sxe2_user_mode_allmulti_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (!adapter->user_pf_ctxt.is_allmulti_set) { + LOG_WARN_BDF("user vsi [id in pf:%u] has not set allmulti\n", + user_vsi->idx_in_dev); + goto l_end; + } + + ret = sxe2_allmulti_rule_del(adapter, user_vsi->idx_in_dev); + if (ret) + goto l_end; + + adapter->user_pf_ctxt.is_allmulti_set = false; + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_com_mode_allmulti_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2_vsi *eth_vsi; + struct sxe2_vsi *user_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + + eth_vsi = adapter->vsi_ctxt.main_vsi; + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE2_VSI_S_DISABLE, user_vsi->state) || + test_bit(SXE2_VSI_S_DISABLE, eth_vsi->state)) { + ret = -EBUSY; + goto l_end; + } + + if (!adapter->user_pf_ctxt.is_allmulti_set) { + LOG_WARN_BDF("user vsi [id in dev:%u] has not set allmulti\n", + user_vsi->idx_in_dev); + goto l_end; + } + + if ((eth_vsi->netdev->flags & IFF_ALLMULTI) || + (eth_vsi->netdev->flags & IFF_PROMISC)) { + ret = sxe2_allmulti_rule_update(adapter, + user_vsi->idx_in_dev, eth_vsi->idx_in_dev); + if (ret) + goto l_end; + } else { + ret = sxe2_allmulti_rule_del(adapter, user_vsi->idx_in_dev); + if (ret) + goto l_end; + } + + adapter->user_pf_ctxt.is_allmulti_set = false; + +l_end: + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_allmulti_rule_del(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("User pf vsi:%u allmulti rule del.\n", vsi_id); + + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2_user_mode_allmulti_rule_del(adapter, vsi_id); + else + ret = sxe2_com_mode_allmulti_rule_del(adapter, vsi_id); + return ret; +} + +static void sxe2_ucmd_complex_fltr_init(struct sxe2_user_cpx_fltr *user_cpx_fltr, + struct sxe2_tcf_fltr *fltr) +{ + s32 i; + u16 j; + struct sxe2_tcf_key_item *item; + struct sxe2_tcf_key_item *item_user; + + memset(fltr, 0, sizeof(*fltr)); + + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + item = &fltr->items[i]; + item_user = &user_cpx_fltr->items[i]; + + item->type = i; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) { + item->mask.raw[j] = item_user->mask.raw[j]; + item->value.raw[j] = item_user->value.raw[j]; + } + } + + fltr->adapter = user_cpx_fltr->adapter; + fltr->src_vsi_id = user_cpx_fltr->src_vsi_id; + fltr->dst_queue_id = user_cpx_fltr->dst_queue_id; + fltr->dst_queue_high = user_cpx_fltr->dst_queue_high; + fltr->dst_queue_group = user_cpx_fltr->dst_queue_group; + fltr->tunnel_type = user_cpx_fltr->tunnel_type; + fltr->action = user_cpx_fltr->action; + fltr->src_type = user_cpx_fltr->src_type; + fltr->prio = user_cpx_fltr->prio; + fltr->rule_vsi_id = user_cpx_fltr->rule_vsi_id; + fltr->backup_type = user_cpx_fltr->backup_type; + fltr->cookie_invalid = true; + fltr->is_user_rule = true; + fltr->priority = SXE2_SWITCH_RECIPE_PRIO_7; + memcpy(fltr->dst_vsi_map, user_cpx_fltr->dst_vsi_map, sizeof(fltr->dst_vsi_map)); + fltr->dst_vsi_id = user_cpx_fltr->dst_vsi_id; +} + +static bool sxe2_tc_item_is_empty(struct sxe2_tcf_fltr *fltr, u16 id) +{ + u16 tmp[sizeof(union sxe2_prot_hdr) / sizeof(u16)] = { 0 }; + + if (memcmp(fltr->items[id].mask.raw, tmp, sizeof(tmp)) == 0) + return true; + return false; +} + +void sxe2_tc_item_print(struct sxe2_tcf_fltr *user_cpx_fltr) +{ + struct sxe2_adapter *adapter = user_cpx_fltr->adapter; + struct sxe2_tcf_key_item *item; + u16 i, j; + + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + if (sxe2_tc_item_is_empty(user_cpx_fltr, i)) + continue; + item = &user_cpx_fltr->items[i]; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) { + if (!item->mask.raw[j]) + continue; + LOG_DEBUG_BDF("item[%u][%u] value:0x%x mask:0x%x\n", + i, j, item->value.raw[j], item->mask.raw[j]); + } + } +} + +static bool sxe2_user_cpx_item_is_empty(struct sxe2_user_cpx_fltr *fltr, u16 id) +{ + u16 tmp[sizeof(union sxe2_prot_hdr) / sizeof(u16)] = { 0 }; + + if (memcmp(fltr->items[id].mask.raw, tmp, sizeof(tmp)) == 0) + return true; + return false; +} + +STATIC void sxe2_user_cpx_item_print(struct sxe2_user_cpx_fltr *user_cpx_fltr) +{ + struct sxe2_adapter *adapter = user_cpx_fltr->adapter; + struct sxe2_tcf_key_item *item; + u16 i, j; + + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + if (sxe2_user_cpx_item_is_empty(user_cpx_fltr, i)) + continue; + item = &user_cpx_fltr->items[i]; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) { + if (!item->mask.raw[j]) + continue; + LOG_DEBUG_BDF("item[%u][%u] value:0x%x mask:0x%x\n", + i, j, item->value.raw[j], item->mask.raw[j]); + } + } +} + +s32 sxe2_ucmd_complex_fltr_proc(struct sxe2_user_cpx_fltr *user_cpx_fltr, + bool is_add) +{ + struct sxe2_tcf_fltr *fltr; + struct sxe2_adapter *adapter = user_cpx_fltr->adapter; + struct sxe2_vsi *vsi; + struct sxe2_vsi *vsi_q; + s32 ret = 0; + u16 dst_vsi_cnt; + u16 dst_vsi = 0; + + LOG_DEBUG_BDF("User complex rule %s. src_vsi:%u, dst_vsi:%u, \t" + "dst_queue_id:%u, dst_queue_high:%u, dst_queue_group:%u, \t" + "tunnel_type:%u, action_type:%u, src_type:%u, prio:%u, backtype:%u\n", + is_add ? "add" : "del", + user_cpx_fltr->src_vsi_id, user_cpx_fltr->dst_vsi_id, + user_cpx_fltr->dst_queue_id, user_cpx_fltr->dst_queue_high, + user_cpx_fltr->dst_queue_group, user_cpx_fltr->tunnel_type, + user_cpx_fltr->action, user_cpx_fltr->src_type, + user_cpx_fltr->prio, user_cpx_fltr->backup_type); + + while (true) { + dst_vsi = (u16)find_next_bit((unsigned long *)user_cpx_fltr->dst_vsi_map, + SXE2_VSI_MAX_CNT, dst_vsi); + if (dst_vsi >= SXE2_VSI_MAX_CNT) + break; + LOG_DEBUG_BDF("[in map]dst_vsi_id: %u ", dst_vsi); + dst_vsi++; + } + + sxe2_user_cpx_item_print(user_cpx_fltr); + + dst_vsi_cnt = bitmap_weight(user_cpx_fltr->dst_vsi_map, SXE2_VSI_MAX_CNT); + if (user_cpx_fltr->action == SXE2_FWD_TO_VSI_LIST && dst_vsi_cnt <= 1) { + LOG_ERROR_BDF("dst vsi count:%u, but action type is fwd to list\n", + dst_vsi_cnt); + ret = -EINVAL; + return ret; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + LOG_ERROR_BDF("alloc memory failed, size %ld\n", sizeof(*fltr)); + return -ENOMEM; + } + + sxe2_ucmd_complex_fltr_init(user_cpx_fltr, fltr); + + sxe2_tcf_match_meta_fill(fltr); + + ret = sxe2_tcf_word_cnt_calc(fltr); + if (ret) + goto l_end; + + ret = sxe2_tcf_profile_find(fltr); + if (ret) + goto l_end; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, user_cpx_fltr->src_vsi_id); + if (!vsi) { + LOG_ERROR_BDF("src vsi id_in_dev:%u is NULL.\n", user_cpx_fltr->src_vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + LOG_ERROR_BDF("src vsi id_in_dev:%u is disable .\n", user_cpx_fltr->src_vsi_id); + ret = -EBUSY; + goto l_unlock; + } + + if ((fltr->action == SXE2_FWD_TO_Q) || (fltr->action == SXE2_FWD_TO_QGRP)) { + vsi_q = sxe2_vsi_get_by_idx(adapter, user_cpx_fltr->dst_vsi_id); + if (!vsi_q) { + LOG_ERROR_BDF("dst vsi id_in_dev:%u is NULL\n", user_cpx_fltr->dst_vsi_id); + ret = -EINVAL; + goto l_unlock; + } + if (test_bit(SXE2_VSI_S_DISABLE, vsi_q->state)) { + LOG_ERROR_BDF("dst vsi id_in_dev:%u is disable\n", + user_cpx_fltr->dst_vsi_id); + ret = -EBUSY; + goto l_unlock; + } + if (vsi_q->rxqs.q_cnt < user_cpx_fltr->dst_queue_id) { + LOG_ERROR_BDF("dst vsi queue count %u insufficient for flow queue id %u\n", + vsi_q->rxqs.q_cnt, user_cpx_fltr->dst_queue_id); + ret = -EINVAL; + goto l_unlock; + } + fltr->dst_queue_id = vsi_q->rxqs.q[user_cpx_fltr->dst_queue_id]->idx_in_pf + + adapter->q_ctxt.rxq_base_idx_in_dev; + } + + ret = is_add ? sxe2_tcf_rule_add(adapter, vsi->idx_in_dev, fltr) : + sxe2_tcf_rule_del(adapter, vsi->idx_in_dev, fltr); + + dst_vsi = 0; + if (user_cpx_fltr->action == SXE2_FWD_TO_VSI_LIST) { + LOG_DEBUG_BDF("user complex rule %s %s, rule vsi %u, src vsi %u dst vsi list\n", + is_add ? "add" : "del", ret ? "failed":"success", + fltr->rule_vsi_id, fltr->src_vsi_id); + } else { + LOG_DEBUG_BDF("user complex rule %s %s, rule vsi %u, src vsi %u, \t" + "dst vsi %u, queue in dev %u\n", + is_add ? "add" : "del", ret ? "failed":"success", + fltr->rule_vsi_id, fltr->src_vsi_id, + user_cpx_fltr->dst_vsi_id, fltr->dst_queue_id); + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +l_end: + kfree(fltr); + return ret; +} + +s32 sxe2_ucmd_vlan_rule_process(struct sxe2_adapter *adapter, u16 vsi_hw_id, + struct sxe2_vlan *vlan, bool add) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + LOG_DEBUG_BDF("User pf vsi:%u %s vlan rule, vid:%u, tpid:%u\n", + vsi_hw_id, add ? "add" : "del", vlan->vid, vlan->tpid); + + mutex_lock(&adapter->vsi_ctxt.lock); + + vsi = sxe2_vsi_get_by_idx(adapter, vsi_hw_id); + if (!vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (add) + ret = sxe2_vlan_rule_add(vsi, vlan); + else + ret = sxe2_vlan_rule_del(adapter, vsi->idx_in_dev, vlan); + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_ucmd_vlan_filter_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en) +{ + LOG_DEBUG_BDF("User pf vsi:%u vlan filter set to %u.\n", vsi_hw_id, en); + return sxe2_vlan_filter_control(adapter, vsi_hw_id, en); +} + +static void sxe2_user_unicast_mac_rest(struct sxe2_adapter *adapter, struct sxe2_vsi *user_vsi, + struct sxe2_vsi *eth_vsi) +{ + s32 ret = 0; + struct sxe2_addr_node *user_node; + struct sxe2_addr_node *eth_node; + struct sxe2_addr_node *tmp_1; + struct sxe2_addr_node *tmp_2; + struct sxe2_mac_filter *eth_mac_fltr; + struct sxe2_mac_filter *user_mac_fltr; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + eth_mac_fltr = ð_vsi->mac_filter; + user_mac_fltr = &user_vsi->mac_filter; + + mutex_lock(&switch_ctxt->mac_addr_lock); + list_for_each_entry_safe(user_node, tmp_1, &user_mac_fltr->mac_addr_list, list) { + if (is_unicast_ether_addr(user_node->mac_addr)) { + list_for_each_entry_safe(eth_node, tmp_2, + ð_mac_fltr->mac_addr_list, list) { + if (!memcmp(user_node->mac_addr, eth_node->mac_addr, + sizeof(user_node->mac_addr))) { + ret = sxe2_mac_rule_update(adapter, user_node->mac_addr, + user_vsi->idx_in_dev, + eth_vsi->idx_in_dev); + if (ret) { + LOG_ERROR_BDF("user vsi %u mac %pM, \t" + "update to eth vsi %u failed.\n", + user_vsi->idx_in_dev, + user_node->mac_addr, + eth_vsi->idx_in_dev); + } + break; + } + } + } + + sxe2_switch_mac_node_del_and_free(user_node); + } + mutex_unlock(&switch_ctxt->mac_addr_lock); +} + +static void sxe2_user_mac_node_clean(struct sxe2_adapter *adapter, struct sxe2_vsi *user_vsi) +{ + struct sxe2_addr_node *user_node; + struct sxe2_addr_node *tmp; + struct sxe2_mac_filter *user_mac_fltr; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + user_mac_fltr = &user_vsi->mac_filter; + + mutex_lock(&switch_ctxt->mac_addr_lock); + list_for_each_entry_safe(user_node, tmp, &user_mac_fltr->mac_addr_list, list) { + sxe2_switch_mac_node_del_and_free(user_node); + } + mutex_unlock(&switch_ctxt->mac_addr_lock); +} + +static void sxe2_user_promisc_allmulti_rest(struct sxe2_adapter *adapter, struct sxe2_vsi *user_vsi, + struct sxe2_vsi *eth_vsi) +{ + s32 ret = 0; + + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + if ((adapter->user_pf_ctxt.is_allmulti_set && + (eth_vsi->netdev->flags & IFF_ALLMULTI)) || + (eth_vsi->netdev->flags & IFF_PROMISC)) { + ret = sxe2_allmulti_rule_update(adapter, user_vsi->idx_in_dev, + eth_vsi->idx_in_dev); + if (ret) { + LOG_ERROR_BDF("user vsi %u allmulti rule, update to eth vsi %u failed.\n", + user_vsi->idx_in_dev, eth_vsi->idx_in_dev); + } + } + if (adapter->user_pf_ctxt.is_promisc_set && + eth_vsi->netdev->flags & IFF_PROMISC) { + ret = sxe2_promisc_rule_update(adapter, user_vsi->idx_in_dev, + eth_vsi->idx_in_dev); + if (ret) { + LOG_ERROR_BDF("user vsi %u promisc rule, update to eth vsi %u failed.\n", + user_vsi->idx_in_dev, eth_vsi->idx_in_dev); + } + } + + adapter->user_pf_ctxt.is_allmulti_set = false; + adapter->user_pf_ctxt.is_promisc_set = false; + + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); +} + +static void sxe2_user_promisc_allmulti_clean(struct sxe2_adapter *adapter, + struct sxe2_vsi *user_vsi) +{ + mutex_lock(&adapter->user_pf_ctxt.flag_lock); + adapter->user_pf_ctxt.is_allmulti_set = false; + adapter->user_pf_ctxt.is_promisc_set = false; + mutex_unlock(&adapter->user_pf_ctxt.flag_lock); +} + +s32 sxe2_user_l2_feature_clean(struct sxe2_adapter *adapter, u16 vsi_hw_id) +{ + s32 ret = 0; + struct sxe2_vsi *user_vsi; + struct sxe2_vsi *eth_vsi; + + LOG_DEBUG_BDF("User pf vsi:%u clean l2 feature.\n", vsi_hw_id); + + user_vsi = sxe2_vsi_get_by_idx(adapter, vsi_hw_id); + if (!user_vsi) { + LOG_ERROR_BDF("user PF vsi is NULL.\n"); + ret = -EINVAL; + goto l_end; + } + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + eth_vsi = adapter->vsi_ctxt.main_vsi; + sxe2_user_unicast_mac_rest(adapter, user_vsi, eth_vsi); + + if (sxe2_eswitch_is_offload(adapter) && sxe2_vf_is_exist(adapter)) { + (void)sxe2_eswitch_ucmd_uplink_resetto_ker(adapter); + } else { + sxe2_user_promisc_allmulti_rest(adapter, user_vsi, eth_vsi); + } + + } else { + sxe2_user_mac_node_clean(adapter, user_vsi); + sxe2_user_promisc_allmulti_clean(adapter, user_vsi); + } + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.h new file mode 100644 index 0000000000000000000000000000000000000000..9fb69c5f1c4656f3ab5928ad30d9c750454d54d2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_switch.h @@ -0,0 +1,655 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_switch.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_SWITCH_H__ +#define __SXE2_SWITCH_H__ + +#include +#include +#include +#include + +#include "sxe2_vsi.h" +#include "sxe2_flow_public.h" + +#ifndef U8_MAX +#define U8_MAX (0xFF) +#endif + +#ifndef U8_BITS +#define U8_BITS (8) +#endif + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +#define SXE2_FULL_KEY_RECIPE_ROOT_S (7) +#define SXE2_FULL_KEY_SOURCE_TYPE_S (5) + +#define SXE2_IPV6_ADDR_LENGTH (16) + +#define SXE2_VSID_PF_TO_DEV(id_in_pf, adapter) \ + (((adapter)->vsi_ctxt.vsi[(id_in_pf)])->idx_in_dev) + +#define SXE2_VLAN_QOS_MAX (7) + +#define SXE2_PROFILE_MAX_COUNT (256) +#define SXE2_SWITCH_PROFILE_FV_CNT (48) + +#define SXE2_VLAN(tpid, vid, prio) ((struct sxe2_vlan){ tpid, vid, prio }) + +struct sxe2_tc_rule_hash { + struct hlist_node node; + unsigned long cookie; + struct sxe2_rule_info *rule_info; +}; + +enum sxe2_fwd_act_type { + SXE2_FWD_TO_VSI = 0, + SXE2_FWD_TO_VSI_LIST, + SXE2_FWD_TO_Q, + SXE2_FWD_TO_QGRP, + SXE2_DROP_PACKET, + SXE2_MIRROR_PACKET, + SXE2_LARGE_ACTION, + SXE2_INVAL_ACT +}; + +enum sxe2_vsi_list_type { + SXE2_VSI_LIST_TYPE_FORWARD = 0, + SXE2_VSI_LIST_TYPE_PRUNE, + + SXE2_VSI_LIST_TYPE_MAX, +}; + +struct sxe2_vsi_list_info { + struct list_head list_entry; + enum sxe2_vsi_list_type type; + DECLARE_BITMAP(vsi_map, SXE2_VSI_MAX_CNT); + u16 vsi_list_id; + u16 rule_cnt; + u16 need_bond; +}; + +struct sxe2_vsi_list_mgmt { + enum sxe2_vsi_list_type type; + struct list_head vsi_list_head; + struct mutex vsi_list_lock; +}; + +struct sxe2_rule_action { + enum sxe2_fwd_act_type type; + union { + u16 q_id : 11; + u16 vsi_id : 10; + u16 vsi_list_id : 10; + } fwd_id; + u8 lb_en; + u8 lan_en; + u8 q_high:1; + u8 qgrp_size:3; + u8 rsv:4; +}; + +enum sxe2_src_type { + SXE2_SRC_TYPE_TX = 0, + SXE2_SRC_TYPE_RX, +}; + +enum sxe2_pkt_src_type { + SXE2_PKT_SRC_TYPE_LAN = 0, + SXE2_PKT_SRC_TYPE_LOOPBACK_HOST, + SXE2_PKT_SRC_TYPE_LOOPBACK_MNG, + SXE2_PKT_SRC_TYPE_TRANSMIT, +}; + +struct sxe2_rule_filter { + enum sxe2_src_type src_type; + union { + struct { + u8 mac_addr[ETH_ALEN]; + } mac; + struct { + u16 vlan_id; + u16 tpid; + u8 tpid_valid; + } vlan; + struct { + u16 vsi_id; + u16 ethertype; + } etype; + struct { + u16 vsi_id; + u8 to_rdma; + u8 packet_src_type; + } srcvsi; + struct { + u16 vsi_id; + } mac_spoofchk; + struct { + u16 hid; + u8 mac_addr[ETH_ALEN]; + } mac_spoofchk_ext; + } data; +}; + +struct sxe2_rule_info { + struct list_head list_entry; + struct sxe2_vsi_list_info *vsi_list; + u16 recipe_id; + u16 rule_id; + struct sxe2_rule_filter fltr; + struct sxe2_rule_action act; + bool is_fwd; + struct sxe2_tcf_fltr *tcf_fltr; + struct list_head tc_rule_head; + u16 hid; +}; + +struct sxe2_recipe { + u8 is_root; + u16 recipe_id; + struct list_head rule_head; + struct list_head restore_head; + struct mutex rule_lock; + DECLARE_HASHTABLE(ht_cookie, 10); + DECLARE_HASHTABLE(ht_lkup, 10); +}; + +struct sxe2_profile_fv_item { + u32 prot_id : 8; + u32 offset : 9; + u32 enable : 1; + u32 rsv : 14; +}; + +struct sxe2_switch_context { + u16 evb_mode; + u8 switch_id; + struct mutex evb_mode_lock; + struct sxe2_vsi_list_mgmt vsi_list_mgmt[SXE2_VSI_LIST_TYPE_MAX]; + struct sxe2_recipe recipe[SXE2_DEFAULT_RECIPE_MAX]; + struct sxe2_recipe complex_recipe; + struct sxe2_profile_fv_item **profile_fv_item; + struct mutex lldp_rule_lock; + struct mutex mac_addr_lock; +}; + +struct sxe2_ethtype_hdr { + u16 ethtype_id; +} __packed; + +struct sxe2_udp_tnl_hdr { + u16 source; + u16 dest; + u16 len; + u16 check; + u16 field; + u16 proto_type; + u32 vni; +} __packed; + +union sxe2_prot_hdr { + struct sxe2_ether_hdr eth_hdr; + struct sxe2_ethtype_hdr ethertype; + struct sxe2_vlan_hdr vlan_hdr; + struct sxe2_ipv4_hdr ipv4_hdr; + struct sxe2_ipv6_hdr ipv6_hdr; + struct sxe2_tcp_hdr tcp_hdr; + struct sxe2_udp_hdr udp_hdr; + struct sxe2_udp_tnl_hdr udp_tnl_hdr; + struct sxe2_nvgre_hdr nvgre_hdr; +}; + +#define SXE2_MAC_OL_HW 1 +#define SXE2_MAC_IL_HW 4 +#define SXE2_ETYPE_OL_HW 9 +#define SXE2_ETYPE_IL_HW 10 +#define SXE2_VLAN_EX_HW 16 +#define SXE2_VLAN_OL_HW 17 +#define SXE2_IPV4_OL_HW 32 +#define SXE2_IPV4_IL_HW 33 +#define SXE2_IPV6_OL_HW 40 +#define SXE2_IPV6_IL_HW 41 +#define SXE2_TCP_IL_HW 49 +#define SXE2_UDP_OL_HW 52 +#define SXE2_UDP_IL_HW 53 +#define SXE2_GRE_HW 64 + +#define SXE2_META_HW 0xff + +#define SXE2_META_PKT_SRC_OFFSET 5 +#define SXE2_META_PKT_DIRECTION_OFFSET 4 +#define SXE2_META_VSI_NUM_OFFSET 1 +#define SXE2_META_PKT_TO_RDMA_OFFSET 2 + +enum sxe2_protocol_filed_type { + SXE2_META_PKT_SRC = 0, + SXE2_META_PKT_DIRECTION, + SXE2_META_VSI_NUM, + SXE2_META_PKT_TO_RDMA, + SXE2_OUTER_SMAC, + SXE2_OUTER_DMAC, + SXE2_INNER_SMAC, + SXE2_INNER_DMAC, + SXE2_OUTER_ETYPE, + SXE2_INNER_ETYPE, + SXE2_OUTER_VLAN_EX, + SXE2_OUTER_VLAN, + SXE2_OUTER_IPV4_SADDR, + SXE2_OUTER_IPV4_DADDR, + SXE2_OUTER_IPV4_TTL, + SXE2_OUTER_IPV4_TOS, + SXE2_OUTER_IPV4_PROT, + SXE2_INNER_IPV4_SADDR, + SXE2_INNER_IPV4_DADDR, + SXE2_INNER_IPV4_TTL, + SXE2_INNER_IPV4_TOS, + SXE2_INNER_IPV4_PROT, + SXE2_OUTER_IPV6_SADDR, + SXE2_OUTER_IPV6_DADDR, + SXE2_INNER_IPV6_SADDR, + SXE2_INNER_IPV6_DADDR, + SXE2_LAST_TCP_SPORT, + SXE2_LAST_TCP_DPORT, + SXE2_OUTER_UDP_SPORT, + SXE2_OUTER_UDP_DPORT, + SXE2_INNER_UDP_SPORT, + SXE2_INNER_UDP_DPORT, + SXE2_VXLAN_ENC_ID, + SXE2_GENEVE_ENC_ID, + SXE2_NVGRE_ENC_ID, + + SXE2_PROT_FIELD_LAST, +}; + +#define SXE2_PROT_OFFSET_VNI 12 + +struct sxe2_tcf_key_item { + enum sxe2_protocol_filed_type type; + union { + union sxe2_prot_hdr hdr; + u16 raw[sizeof(union sxe2_prot_hdr) / sizeof(u16)]; + } value; + union { + union sxe2_prot_hdr hdr; + u16 raw[sizeof(union sxe2_prot_hdr) / sizeof(u16)]; + } mask; +}; + +enum sxe2_tunnel_type { + SXE2_TNL_NONE = 0, + SXE2_TNL_VXLAN, + SXE2_TNL_GENEVE, + SXE2_TNL_GRETAP, + + SXE2_TNL_ALL, +}; + +enum sxe2_rule_backup_type { + SXE2_RULE_BACKUP_T_NO = 0, + SXE2_RULE_BACKUP_T_LAST, + SXE2_RULE_BACKUP_T_FIRST, + + SXE2_RULE_BACKUP_T_ALL, +}; + +struct sxe2_tc_rule_info { + struct list_head list_entry; + unsigned long cookie; + u32 prio; + u16 src_vsi_id; + u16 dst_vsi_id; + DECLARE_BITMAP(dst_vsi_map, SXE2_VSI_MAX_CNT); + struct sxe2_vsi_list_info *vsi_list; + enum sxe2_fwd_act_type action; + struct sxe2_rule_action act; + enum sxe2_rule_backup_type backup_type; +}; + +struct sxe2_tcf_fltr { + struct hlist_node node; + struct sxe2_rule_info *rule_info; + unsigned long cookie; + + struct sxe2_adapter *adapter; + u16 src_vsi_id; + u16 dst_vsi_id; + DECLARE_BITMAP(dst_vsi_map, SXE2_VSI_MAX_CNT); + u16 dst_queue_id; + u8 dst_queue_high:1; + u8 dst_queue_group:3; + u8 rsv:4; + + struct sxe2_tcf_key_item items[SXE2_PROT_FIELD_LAST]; + u16 word_cnt; + + enum sxe2_tunnel_type tunnel_type; + u8 ip_proto; + + enum sxe2_fwd_act_type action; + enum sxe2_src_type src_type; + u8 priority; + + DECLARE_BITMAP(profiles, SXE2_MAX_NUM_PROFILES); + + u16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + u16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + u16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + + u16 recipe_cnt; + u16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + u16 rule_id[SXE2_MAX_CHAIN_RECIPE]; + + u32 prio; + + bool cookie_invalid; + + bool is_user_rule; + + u16 rule_vsi_id; + + enum sxe2_rule_backup_type backup_type; +}; + +struct sxe2_user_cpx_fltr { + struct sxe2_adapter *adapter; + u16 src_vsi_id; + u16 dst_vsi_id; + DECLARE_BITMAP(dst_vsi_map, SXE2_VSI_MAX_CNT); + u16 dst_queue_id; + u8 dst_queue_high; + u8 dst_queue_group; + struct sxe2_tcf_key_item items[SXE2_PROT_FIELD_LAST]; + enum sxe2_tunnel_type tunnel_type; + enum sxe2_fwd_act_type action; + enum sxe2_src_type src_type; + u32 prio; + u16 rule_vsi_id; + enum sxe2_rule_backup_type backup_type; +}; + +struct sxe2_switch_recipe { + u8 rid : 6; + u8 rcp_rsv0 : 1; + u8 is_root : 1; + u8 lookup_index0 : 7; + u8 lookup_index0_valid : 1; + u8 lookup_index1 : 7; + u8 lookup_index1_valid : 1; + u8 lookup_index2 : 7; + u8 lookup_index2_valid : 1; + u8 lookup_index3 : 7; + u8 lookup_index3_valid : 1; + u8 lookup_index4 : 7; + u8 lookup_index4_valid : 1; + u8 join_priority; + u8 priority : 3; + u8 need_pass_l2 : 1; + u8 allow_pass_l2 : 1; + u8 inverse_action : 1; + u8 prune_idx : 2; + u32 default_action : 19; + u32 rcp_rsv1 : 4; + u32 default_action_valid : 1; + u32 rcp_rsv2 : 8; + u32 fv4_bitmask : 16; + u32 fv3_bitmask : 16; + u32 fv2_bitmask : 16; + u32 fv1_bitmask : 16; + u32 fv0_bitmask : 16; + u32 rcp_rsv3 : 16; +}; + +struct sxe2_user_context { + struct mutex flag_lock; + bool is_promisc_set; + bool is_allmulti_set; +}; + +static inline void sxe2_switch_mac_node_del_and_free(struct sxe2_addr_node *mac_node) +{ + if (mac_node) { + list_del(&mac_node->list); + kfree(mac_node); + } +} + +s32 sxe2_switch_context_init(struct sxe2_adapter *adapter); + +void sxe2_switch_context_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_mac_rule_add(struct sxe2_vsi *vsi, const u8 *mac); + +s32 sxe2_mac_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac); + +s32 sxe2_vlan_rule_add(struct sxe2_vsi *vsi, struct sxe2_vlan *vlan); + +s32 sxe2_vlan_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_vlan *vlan); + +s32 sxe2_promisc_rule_add(struct sxe2_vsi *vsi); + +s32 sxe2_promisc_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev); + +bool sxe2_promisc_rule_in_use(struct sxe2_vsi *vsi); + +s32 sxe2_allmulti_rule_add(struct sxe2_vsi *vsi); + +s32 sxe2_allmulti_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev); + +bool sxe2_allmulti_rule_in_use(struct sxe2_vsi *vsi); + +s32 sxe2_tx_etype_rule_add(struct sxe2_vsi *vsi, u16 etype); + +s32 sxe2_tx_etype_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, u16 etype); + +s32 sxe2_tcf_rule_add(struct sxe2_adapter *adapter, + u16 vsi_id_in_dev, struct sxe2_tcf_fltr *fltr); + +s32 sxe2_tcf_rule_del(struct sxe2_adapter *adapter, + u16 vsi_id_in_dev, struct sxe2_tcf_fltr *fltr); + +void sxe2_vsi_fltr_clean(struct sxe2_vsi *vsi); +void sxe2_vsi_l2_fltr_clean(struct sxe2_vsi *vsi); +void sxe2_vsi_complex_fltr_clean(struct sxe2_vsi *vsi); + +void sxe2_vsi_fltr_remove(struct sxe2_adapter *adapter, u16 id_in_dev); + +void sxe2_vsi_l2_fltr_remove(struct sxe2_adapter *adapter, u16 id_in_dev); + +void sxe2_vsi_complex_fltr_remove(struct sxe2_adapter *adapter, + u16 id_in_dev, bool to_restore); + +s32 sxe2_switch_fltr_restore_prepare(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_complex_fltr_restore(struct sxe2_adapter *adapter, u16 vsi_id); + +void sxe2_switch_fltr_restore_clean(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_l2_fltr_restore(struct sxe2_vsi *vsi); + +s32 sxe2_rule_bridge_mode_update(struct sxe2_adapter *adapter); + +void sxe2_switch_rule_hw_dump(struct sxe2_adapter *adapter); + +void sxe2_fwc_switch_trace_rx_trigger(struct sxe2_adapter *adapter); + +void sxe2_fwc_switch_trace_tx_trigger(struct sxe2_adapter *adapter); + +void sxe2_fwc_switch_trace_recorder(struct sxe2_adapter *adapter); + +void sxe2_fwc_hw_dfx_show(struct sxe2_adapter *adapter); + +s32 sxe2_vlan_filter_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_vsi_loopback_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_vsi_spoofchk_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_tcf_profile_find(struct sxe2_tcf_fltr *fltr); + +void sxe2_tcf_match_meta_fill(struct sxe2_tcf_fltr *fltr); + +static inline bool sxe2_tcf_item_is_empty(struct sxe2_tcf_fltr *fltr, u16 id) +{ + u16 tmp[sizeof(union sxe2_prot_hdr) / sizeof(u16)] = { 0 }; + + if (memcmp(fltr->items[id].mask.raw, tmp, sizeof(tmp)) == 0) + return true; + return false; +} + +s32 sxe2_vsi_vlan_zero_add(struct sxe2_vsi *vsi); + +void sxe2_srcvsi_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule); + +s32 sxe2_srcvsi_rule_add(struct sxe2_vsi *vsi); + +s32 sxe2_etype_fltr_init(struct sxe2_vsi *vsi); + +s32 sxe2_rx_etype_rule_add(struct sxe2_vsi *vsi, u16 etype); + +s32 sxe2_rx_etype_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, u16 etype); + +s32 sxe2_srcvsi_rule_del(struct sxe2_adapter *adapter, u16 id_in_dev); +s32 sxe2_src_vsi_prune_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_vfs_complex_fltr_restore(struct sxe2_adapter *adapter); + +s32 sxe2_pf_complex_fltr_restore(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_list_update_bond(struct sxe2_adapter *adapter, + struct sxe2_vsi_list_info *vsi_list, + struct sxe2_adapter *master_adapter, + bool linking); + +s32 sxe2_fwc_switch_large_action_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_switch_large_action *lgActionParm, + enum sxe2_drv_cmd_opcode opc); + +s32 sxe2_sw_profile_parse_from_ddp(u8 *data, u16 cnt, u16 base_id, + struct sxe2_adapter *adapter); + +s32 sxe2_default_mac_addr_get(struct sxe2_vsi *vsi, u8 *mac); + +s32 sxe2_mac_addr_set(struct sxe2_vsi *vsi, const u8 *mac); + +s32 sxe2_mac_spoofchk_rule_add(struct sxe2_adapter *adapter, + u16 id_in_dev); + +s32 sxe2_mac_spoofchk_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev); + +void sxe2_mac_spoofchk_rule_prepare(struct sxe2_adapter *adapter, + u16 id_in_dev, struct sxe2_rule_info *rule); + +s32 sxe2_mac_spoofchk_ext_rule_add(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac); + +s32 sxe2_mac_spoofchk_ext_rule_del(struct sxe2_adapter *adapter, + u16 id_in_dev, const u8 *mac); + +s32 sxe2_cur_mac_addr_set(struct sxe2_vsi *vsi, const u8 *mac); + +void sxe2_switch_recipe_dump(struct sxe2_adapter *adapter); + +void sxe2_switch_profile_recipemap_dump(struct sxe2_adapter *adapter); + +void sxe2_switch_share_id_dump(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_loopback_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_src_vsi_prune_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_switch_dfx_irq_setup(struct sxe2_adapter *adapter, bool en); + +s32 sxe2_switch_tc_samerule_del(struct sxe2_adapter *adapter, + struct sxe2_rule_info *save_rule); + +s32 sxe2_fwd_rule_remove(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info, bool free_sw); + +struct sxe2_tc_rule_hash *sxe2_hash_cookie_find(struct sxe2_adapter *adapter, + unsigned long cookie); + +s32 sxe2_mac_rule_update(struct sxe2_adapter *adapter, const u8 *mac, u16 old_vsi, u16 new_vsi); + +s32 sxe2_promisc_rule_update(struct sxe2_adapter *adapter, u16 old_vsi, u16 new_vsi); + +s32 sxe2_allmulti_rule_update(struct sxe2_adapter *adapter, u16 old_vsi, u16 new_vsi); + +s32 sxe2_fwd_rule_update(struct sxe2_adapter *adapter, + struct sxe2_rule_info *rule_info); + +s32 sxe2_ucmd_unicast_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac); + +s32 sxe2_ucmd_multi_broad_mac_add(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac); + +s32 sxe2_ucmd_unicast_mac_del(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac); + +s32 sxe2_ucmd_multi_broad_mac_del(struct sxe2_adapter *adapter, + u16 vsi_id, const u8 *mac); + +s32 sxe2_ucmd_promisc_rule_add(struct sxe2_adapter *adapter, + u16 vsi_id); + +s32 sxe2_ucmd_promisc_rule_del(struct sxe2_adapter *adapter, + u16 vsi_id); + +s32 sxe2_allmulti_rule_update(struct sxe2_adapter *adapter, + u16 old_vsi, u16 new_vsi); + +s32 sxe2_ucmd_allmulti_rule_add(struct sxe2_adapter *adapter, + u16 vsi_id); + +s32 sxe2_ucmd_allmulti_rule_del(struct sxe2_adapter *adapter, + u16 vsi_id); + +s32 sxe2_ucmd_complex_fltr_proc(struct sxe2_user_cpx_fltr *user_cpx_fltr, + bool is_add); + +s32 sxe2_ucmd_vlan_filter_control(struct sxe2_adapter *adapter, u16 vsi_hw_id, + bool en); + +s32 sxe2_ucmd_vlan_rule_process(struct sxe2_adapter *adapter, u16 vsi_hw_id, + struct sxe2_vlan *vlan, bool add); + +s32 sxe2_user_l2_feature_clean(struct sxe2_adapter *adapter, u16 vsi_hw_id); + +s32 sxe2_mac_spoof_rule_update(struct sxe2_vsi *eth_vsi, + struct sxe2_vsi *user_vsi, u8 *mac_addr, bool to_user); +s32 sxe2_srcvsi_ext_rule_add(struct sxe2_vsi *vsi); + +s32 sxe2_srcvsi_ext_rule_del(struct sxe2_adapter *adapter, + u16 vsi_id); +s32 sxe2_ucmd_srcvsi_ext_add(struct sxe2_adapter *adapter, + u16 vsi_id, u16 *vsi_id_list, u16 vsi_id_cnt); +s32 sxe2_ucmd_srcvsi_ext_del(struct sxe2_adapter *adapter, + u16 vsi_id); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.c new file mode 100644 index 0000000000000000000000000000000000000000..239ab8933e738ebf6f7fd07c0c1c1a507fc855b9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.c @@ -0,0 +1,1473 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_tc.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#ifdef HAVE_FLOW_OFFLOAD_H +#include +#endif +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_vsi.h" +#include "sxe2_netdev.h" +#include "sxe2_log.h" +#include "sxe2_common.h" +#include "sxe2_tc.h" +#include "sxe2_switch.h" + +#define SXE2_TC_FLOWER_MASK_32 0xFFFFFFFF +#define SXE2_TC_FLOWER_MASK_16 0xFFFF +#define SXE2_TC_FLOWER_VNI_MAX 0xFFFFFFU + +enum sxe2_tunnel_type sxe2_tc_tun_type_get(struct net_device *tunnel_dev) +{ +#ifdef HAVE_VXLAN_TYPE + if (netif_is_vxlan(tunnel_dev)) + return SXE2_TNL_VXLAN; +#endif + +#ifdef HAVE_GENEVE_TYPE + if (netif_is_geneve(tunnel_dev)) + return SXE2_TNL_GENEVE; +#endif + +#ifdef HAVE_GRETAP_TYPE + if (netif_is_gretap(tunnel_dev) || netif_is_ip6gretap(tunnel_dev)) + return SXE2_TNL_GRETAP; +#endif + + return SXE2_TNL_NONE; +} + +#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) +static struct net_device *sxe2_tunnel_device_get(struct net_device *dev, + struct flow_rule *rule) +{ +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE + struct flow_action_entry *act; + u32 i; + + if (sxe2_tc_tun_type_get(dev) != SXE2_TNL_NONE) + return dev; + + if (rule->action.num_entries == 0) + return NULL; + + i = rule->action.num_entries - 1; + act = &rule->action.entries[i]; + if ((act->id == FLOW_ACTION_REDIRECT || act->id == FLOW_ACTION_MIRRED) && + sxe2_tc_tun_type_get(act->dev) != SXE2_TNL_NONE) + return act->dev; +#endif + + return NULL; +} +#endif + +#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) +static s32 sxe2_tc_set_ipv4(struct flow_match_ipv4_addrs *match, + struct sxe2_tcf_fltr *fltr, bool is_outer) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + + if (match->key->dst) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV4_DADDR]; + else + item = &fltr->items[SXE2_INNER_IPV4_DADDR]; + item->value.hdr.ipv4_hdr.daddr = match->key->dst; + item->mask.hdr.ipv4_hdr.daddr = match->mask->dst; + LOG_DEBUG_BDF("prot_type %d, daddr[%d], mask[0x%x]\n", + item->type, match->key->dst, match->mask->dst); + } + if (match->key->src) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV4_SADDR]; + else + item = &fltr->items[SXE2_INNER_IPV4_SADDR]; + item->value.hdr.ipv4_hdr.saddr = match->key->src; + item->mask.hdr.ipv4_hdr.saddr = match->mask->src; + LOG_DEBUG_BDF("prot_type %d, saddr[%d], mask[0x%x]\n", + item->type, match->key->src, match->mask->src); + } + return 0; +} + +static s32 sxe2_tc_set_ipv6(struct flow_match_ipv6_addrs *match, + struct sxe2_tcf_fltr *fltr, bool is_outer, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + + if (ipv6_addr_loopback(&match->key->dst) || + ipv6_addr_loopback(&match->key->src)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "Bad IPv6, addr is LOOPBACK"); +#endif + return -EINVAL; + } + if (ipv6_addr_any(&match->mask->dst) && ipv6_addr_any(&match->mask->src)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "Bad src/dest IPv6, addr is any"); +#endif + return -EINVAL; + } + + if (!ipv6_addr_any(&match->mask->dst)) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV6_DADDR]; + else + item = &fltr->items[SXE2_INNER_IPV6_DADDR]; + memcpy(item->value.hdr.ipv6_hdr.daddr, match->key->dst.s6_addr, + sizeof(match->key->dst.s6_addr)); + memcpy(item->mask.hdr.ipv6_hdr.daddr, match->mask->dst.s6_addr, + sizeof(match->mask->dst.s6_addr)); + LOG_DEBUG_BDF("prot_type %d, daddr[%d:%d:%d:%d], mask[0x%x:%x:%x:%x]\n", + item->type, match->key->dst.s6_addr32[0], + match->key->dst.s6_addr32[1], + match->key->dst.s6_addr32[2], + match->key->dst.s6_addr32[3], + match->mask->dst.s6_addr32[0], + match->mask->dst.s6_addr32[1], + match->mask->dst.s6_addr32[2], + match->mask->dst.s6_addr32[3]); + } + if (!ipv6_addr_any(&match->mask->src)) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV6_SADDR]; + else + item = &fltr->items[SXE2_INNER_IPV6_SADDR]; + memcpy(item->value.hdr.ipv6_hdr.saddr, match->key->src.s6_addr, + sizeof(match->key->src.s6_addr)); + memcpy(item->mask.hdr.ipv6_hdr.saddr, match->mask->src.s6_addr, + sizeof(match->mask->src.s6_addr)); + LOG_DEBUG_BDF("prot_type %d, saddr[%d:%d:%d:%d], mask[0x%x:%x:%x:%x]\n", + item->type, match->key->src.s6_addr32[0], + match->key->src.s6_addr32[1], + match->key->src.s6_addr32[2], + match->key->src.s6_addr32[3], + match->mask->src.s6_addr32[0], + match->mask->src.s6_addr32[1], + match->mask->src.s6_addr32[2], + match->mask->src.s6_addr32[3]); + } + + return 0; +} + +static s32 sxe2_tc_set_ttl_tos(struct flow_match_ip *match, + struct sxe2_tcf_fltr *fltr, bool is_outer) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + + if (match->mask->tos) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV4_TOS]; + else + item = &fltr->items[SXE2_INNER_IPV4_TOS]; + item->value.hdr.ipv4_hdr.tos = match->key->tos; + item->mask.hdr.ipv4_hdr.tos = match->mask->tos; + + LOG_DEBUG_BDF("prot_type %d, tos[%d], mask[0x%x]\n", + item->type, match->key->tos, match->mask->tos); + } + + if (match->mask->ttl) { + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV4_TTL]; + else + item = &fltr->items[SXE2_INNER_IPV4_TTL]; + item->value.hdr.ipv4_hdr.ttl = match->key->ttl; + item->mask.hdr.ipv4_hdr.ttl = match->mask->ttl; + + LOG_DEBUG_BDF("prot_type %d, ttl[%d], mask[0x%x]\n", + item->type, match->key->ttl, match->mask->ttl); + } + + return 0; +} + +static inline s32 sxe2_match_key_parse_enc_keyid(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct flow_match_enc_keyid enc_keyid; + struct sxe2_tcf_key_item *item; + u32 key_id; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + if (!enc_keyid.mask->keyid || + enc_keyid.mask->keyid != cpu_to_be32(SXE2_TC_FLOWER_MASK_32)) { + LOG_DEV_ERR("bad mask for encap key_id 0x%04x, it must be 0xFFFFFFFF\n", + be32_to_cpu(enc_keyid.mask->keyid)); + return -EINVAL; + } + + key_id = be32_to_cpu(enc_keyid.key->keyid); + if ((fltr->tunnel_type == SXE2_TNL_VXLAN || + fltr->tunnel_type == SXE2_TNL_GENEVE) && + key_id > SXE2_TC_FLOWER_VNI_MAX) { + LOG_DEV_ERR("VNI out of range : 0x%x\n", key_id); + return -EINVAL; + } + + if (fltr->tunnel_type == SXE2_TNL_VXLAN) { + item = &fltr->items[SXE2_VXLAN_ENC_ID]; + item->value.hdr.udp_tnl_hdr.vni = cpu_to_be32(key_id << 8); + memcpy(&item->mask.hdr.udp_tnl_hdr.vni, "\xff\xff\xff\x00", 4); + } else if (fltr->tunnel_type == SXE2_TNL_GENEVE) { + item = &fltr->items[SXE2_GENEVE_ENC_ID]; + item->value.hdr.udp_tnl_hdr.vni = cpu_to_be32(key_id << 8); + memcpy(&item->mask.hdr.udp_tnl_hdr.vni, "\xff\xff\xff\x00", 4); + } else if (fltr->tunnel_type == SXE2_TNL_GRETAP) { + item = &fltr->items[SXE2_NVGRE_ENC_ID]; + item->value.hdr.nvgre_hdr.tni = cpu_to_be32(key_id); + memcpy(&item->mask.hdr.nvgre_hdr.tni, "\xff\xff\xff\xff", 4); + } else + return -EINVAL; + + LOG_DEBUG_BDF("prot_type %d, keyid[%u], mask[0x%x]\n", item->type, + enc_keyid.key->keyid, enc_keyid.mask->keyid); + return 0; +} + +static inline s32 sxe2_match_key_parse_enc_ipv4(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + if (sxe2_tc_set_ipv4(&match, fltr, true)) + return -EINVAL; + + return 0; +} + +static inline s32 sxe2_match_key_parse_enc_ipv6(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + if (sxe2_tc_set_ipv6(&match, fltr, true, cls_flower)) + return -EINVAL; + + return 0; +} + +static inline s32 sxe2_match_key_parse_enc_ip(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct flow_match_ip match; + + flow_rule_match_enc_ip(rule, &match); + return sxe2_tc_set_ttl_tos(&match, fltr, true); +} + +static s32 sxe2_tc_set_ipv4_proto(struct flow_match_basic *match, + struct sxe2_tcf_fltr *fltr, bool is_outer) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + + if (is_outer) + item = &fltr->items[SXE2_OUTER_IPV4_PROT]; + else + item = &fltr->items[SXE2_INNER_IPV4_PROT]; + item->value.hdr.ipv4_hdr.protocol = match->key->ip_proto; + item->mask.hdr.ipv4_hdr.protocol = match->mask->ip_proto; + LOG_DEBUG_BDF("prot_type %d, ip_protocol[%d], mask[0x%x]\n", + item->type, item->value.hdr.ipv4_hdr.protocol, + item->mask.hdr.ipv4_hdr.protocol); + + return 0; +} + +static inline s32 sxe2_match_key_parse_basic(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct sxe2_adapter *adapter = fltr->adapter; + u16 proto_mask = 0; + u16 proto_key = 0; + struct flow_match_basic match; + struct sxe2_tcf_key_item *item; + bool is_outer = true; + + flow_rule_match_basic(rule, &match); + + proto_key = ntohs(match.key->n_proto); + proto_mask = ntohs(match.mask->n_proto); + + if (proto_key == ETH_P_ALL || proto_key == 0) { + proto_key = 0; + proto_mask = 0; + } + + if (fltr->tunnel_type == SXE2_TNL_NONE) + item = &fltr->items[SXE2_OUTER_ETYPE]; + else + item = &fltr->items[SXE2_INNER_ETYPE]; + + item->value.hdr.ethertype.ethtype_id = cpu_to_be16(proto_key); + item->mask.hdr.ethertype.ethtype_id = cpu_to_be16(proto_mask); + fltr->ip_proto = match.key->ip_proto; + + if (fltr->tunnel_type != SXE2_TNL_NONE) + is_outer = false; + (void)sxe2_tc_set_ipv4_proto(&match, fltr, is_outer); + + LOG_DEBUG_BDF("prot_type %d, ethtype[%d], mask[0x%x], ip_proto %d\n", + item->type, match.key->n_proto, match.mask->n_proto, + fltr->ip_proto); + return 0; +} + +static inline s32 sxe2_match_key_parse_mac(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct flow_match_eth_addrs match; + struct sxe2_tcf_key_item *item; + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.key->dst)) { + if (fltr->tunnel_type == SXE2_TNL_NONE) + item = &fltr->items[SXE2_OUTER_DMAC]; + else + item = &fltr->items[SXE2_INNER_DMAC]; + ether_addr_copy(item->value.hdr.eth_hdr.dst_addr, + match.key->dst); + ether_addr_copy(item->mask.hdr.eth_hdr.dst_addr, + match.mask->dst); + LOG_DEBUG_BDF("prot_type %d, dmac[%pM], mask[%pM]\n", + item->type, match.key->dst, match.mask->dst); + } + + if (!is_zero_ether_addr(match.key->src)) { + if (fltr->tunnel_type == SXE2_TNL_NONE) + item = &fltr->items[SXE2_OUTER_SMAC]; + else + item = &fltr->items[SXE2_INNER_SMAC]; + ether_addr_copy(item->value.hdr.eth_hdr.src_addr, + match.key->src); + ether_addr_copy(item->mask.hdr.eth_hdr.src_addr, + match.mask->src); + LOG_DEBUG_BDF("prot_type %d, smac[%pM], mask[%pM]\n", + item->type, match.key->src, match.mask->src); + } + return 0; +} + +static inline s32 sxe2_match_key_parse_vlan(struct net_device *filter_dev, + struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct flow_dissector_key_vlan mask; + struct flow_dissector_key_vlan key; + struct flow_match_vlan match; + struct sxe2_tcf_key_item *item; + + item = &fltr->items[SXE2_OUTER_VLAN_EX]; + if (is_vlan_dev(filter_dev)) { + match.key = &key; + match.key->vlan_id = vlan_dev_vlan_id(filter_dev); + match.key->vlan_priority = 0; + match.mask = &mask; + memset(match.mask, 0xff, sizeof(*match.mask)); + match.mask->vlan_priority = 0; + } else { + flow_rule_match_vlan(rule, &match); + } + + if (match.mask->vlan_id && match.mask->vlan_id != VLAN_VID_MASK) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "Bad VLAN mask"); +#endif + return -EINVAL; + } + + if (match.mask->vlan_tpid) { + item->value.hdr.vlan_hdr.type = match.key->vlan_tpid; + item->mask.hdr.vlan_hdr.type = match.mask->vlan_tpid; + LOG_DEBUG_BDF("prot_type %d, vlan_tpid[%d], mask[0x%x]\n", + item->type, match.key->vlan_tpid, + match.mask->vlan_tpid); + } + + if (match.mask->vlan_id) { + item->value.hdr.vlan_hdr.vlan |= + cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); + item->mask.hdr.vlan_hdr.vlan |= cpu_to_be16(VLAN_VID_MASK); + LOG_DEBUG_BDF("prot_type %d, vlan_id[%d], mask[0x%x], is_vlan_dev %d\n", + item->type, match.key->vlan_id, match.mask->vlan_id, + is_vlan_dev(filter_dev)); + } + + if (match.mask->vlan_priority) { + item->value.hdr.vlan_hdr.vlan |= + be16_encode_bits(match.key->vlan_priority, VLAN_PRIO_MASK); + item->mask.hdr.vlan_hdr.vlan |= cpu_to_be16(VLAN_PRIO_MASK); + LOG_DEBUG_BDF("prot_type %d, vlan_priority[%d], mask[0x%x], is_vlan_dev %d\n", + item->type, match.key->vlan_priority, + match.mask->vlan_priority, is_vlan_dev(filter_dev)); + } + return 0; +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN +static inline s32 sxe2_match_key_parse_cvlan(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct flow_match_vlan match; + struct sxe2_tcf_key_item *item; + + item = &fltr->items[SXE2_OUTER_VLAN]; + + flow_rule_match_cvlan(rule, &match); + if (match.mask->vlan_id && match.mask->vlan_id != VLAN_VID_MASK) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "Bad CVLAN mask"); +#endif + return -EINVAL; + } + + if (match.mask->vlan_tpid) { + item->value.hdr.vlan_hdr.type = match.key->vlan_tpid; + item->mask.hdr.vlan_hdr.type = match.mask->vlan_tpid; + LOG_DEBUG_BDF("prot_type %d, vlan_tpid[%d], mask[0x%x]\n", + item->type, match.key->vlan_tpid, + match.mask->vlan_tpid); + } + + if (match.mask->vlan_id) { + item->value.hdr.vlan_hdr.vlan |= + cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK); + item->mask.hdr.vlan_hdr.vlan |= cpu_to_be16(VLAN_VID_MASK); + LOG_DEBUG_BDF("prot_type %d, vlan_id[%d], mask[0x%x]\n", + item->type, match.key->vlan_id, + match.mask->vlan_id); + } + + if (match.mask->vlan_priority) { + item->value.hdr.vlan_hdr.vlan |= + be16_encode_bits(match.key->vlan_priority, VLAN_PRIO_MASK); + item->mask.hdr.vlan_hdr.vlan |= cpu_to_be16(VLAN_PRIO_MASK); + LOG_DEBUG_BDF("prot_type %d, vlan_priority[%d], mask[0x%x]\n", + item->type, match.key->vlan_priority, + match.mask->vlan_priority); + } + return 0; +} +#endif + +static inline s32 sxe2_match_key_parse_control(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + u16 addr_type = 0; + struct flow_match_control match_c; + bool is_outer = true; + + flow_rule_match_control(rule, &match_c); + addr_type = match_c.key->addr_type; + if (fltr->tunnel_type != SXE2_TNL_NONE) + is_outer = false; + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (sxe2_tc_set_ipv4(&match, fltr, is_outer)) + return -EINVAL; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + if (sxe2_tc_set_ipv6(&match, fltr, is_outer, cls_flower)) + return -EINVAL; + } + return 0; +} + +static inline s32 sxe2_match_key_parse_ip(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr) +{ + struct flow_match_ip match; + bool is_outer = true; + + flow_rule_match_ip(rule, &match); + if (fltr->tunnel_type != SXE2_TNL_NONE) + is_outer = false; + + return sxe2_tc_set_ttl_tos(&match, fltr, is_outer); +} + +static inline s32 sxe2_match_key_parse_port(struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_adapter *adapter = fltr->adapter; + struct flow_match_ports match; + struct sxe2_tcf_key_item *item; + + flow_rule_match_ports(rule, &match); + if (fltr->ip_proto != IPPROTO_TCP && fltr->ip_proto != IPPROTO_UDP) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Only UDP and TCP transport are supported"); +#endif + return -EINVAL; + } + + if (match.key->dst) { + if (fltr->ip_proto == IPPROTO_TCP) { + item = &fltr->items[SXE2_LAST_TCP_DPORT]; + item->value.hdr.tcp_hdr.dest = match.key->dst; + item->mask.hdr.tcp_hdr.dest = match.mask->dst; + } else { + if (fltr->tunnel_type == SXE2_TNL_NONE) + item = &fltr->items[SXE2_OUTER_UDP_DPORT]; + else + item = &fltr->items[SXE2_INNER_UDP_DPORT]; + item->value.hdr.udp_hdr.dest = match.key->dst; + item->mask.hdr.udp_hdr.dest = match.mask->dst; + } + LOG_DEBUG_BDF("prot_type %d, dport[%d], mask[0x%x]\n", + item->type, match.key->dst, match.mask->dst); + } + if (match.key->src) { + if (fltr->ip_proto == IPPROTO_TCP) { + item = &fltr->items[SXE2_LAST_TCP_SPORT]; + item->value.hdr.tcp_hdr.source = match.key->src; + item->mask.hdr.tcp_hdr.source = match.mask->src; + } else { + if (fltr->tunnel_type == SXE2_TNL_NONE) + item = &fltr->items[SXE2_OUTER_UDP_SPORT]; + else + item = &fltr->items[SXE2_INNER_UDP_SPORT]; + item->value.hdr.udp_hdr.source = match.key->src; + item->mask.hdr.udp_hdr.source = match.mask->src; + } + LOG_DEBUG_BDF("prot_type %d, sport[%d], mask[0x%x]\n", + item->type, match.key->src, match.mask->src); + } + + return 0; +} + +static s32 sxe2_tcf_match_tunnel_parse(enum sxe2_tunnel_type tunnel_type, + struct flow_rule *rule, + struct sxe2_tcf_fltr *fltr, + struct flow_cls_offload *cls_flower) +{ + struct flow_match_control enc_control; + u16 addr_type = 0; + s32 ret; + + fltr->tunnel_type = tunnel_type; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + ret = sxe2_match_key_parse_enc_keyid(rule, fltr); + if (ret) + return ret; + } + + flow_rule_match_enc_control(rule, &enc_control); + addr_type = enc_control.key->addr_type; + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + ret = sxe2_match_key_parse_enc_ipv4(rule, fltr); + if (ret) + return ret; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + ret = sxe2_match_key_parse_enc_ipv6(rule, fltr, cls_flower); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + ret = sxe2_match_key_parse_enc_ip(rule, fltr); + if (ret) + return ret; + } + + return 0; +} + +static s32 sxe2_tcf_match_parse(struct net_device *filter_dev, + struct sxe2_adapter *adapter, + struct flow_cls_offload *cls_flower, + struct sxe2_tcf_fltr *fltr) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct flow_dissector *dissector = rule->match.dissector; + struct net_device *tunnel_dev; + enum sxe2_tunnel_type tunnel_type; + s32 ret; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_CVLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | +#ifdef HAVE_TC_FLOWER_ENC + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP)| +#endif + BIT(FLOW_DISSECTOR_KEY_IP))) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "Unsupported key used"); +#endif + return -EOPNOTSUPP; + } + + tunnel_dev = sxe2_tunnel_device_get(filter_dev, rule); + if (tunnel_dev) { + tunnel_type = sxe2_tc_tun_type_get(tunnel_dev); + if (tunnel_type == SXE2_TNL_NONE) { + LOG_DEV_ERR("Tunnel HW offload is not supported for the tunnel type"); + return -EOPNOTSUPP; + } + ret = sxe2_tcf_match_tunnel_parse(tunnel_type, rule, fltr, cls_flower); + if (ret) { + LOG_DEV_ERR("Failed to parse TC flower tunnel attributes"); + return ret; + } + } else { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Tunnel key used, but device isn't a tunnel"); +#endif + return -EOPNOTSUPP; + } + fltr->tunnel_type = SXE2_TNL_NONE; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + ret = sxe2_match_key_parse_basic(rule, fltr); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + ret = sxe2_match_key_parse_mac(rule, fltr); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || + is_vlan_dev(filter_dev)) { + ret = sxe2_match_key_parse_vlan(filter_dev, rule, fltr, cls_flower); + if (ret) + return ret; + } + +#ifdef HAVE_FLOW_DISSECTOR_KEY_CVLAN + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + ret = sxe2_match_key_parse_cvlan(rule, fltr, cls_flower); + if (ret) + return ret; + } +#endif + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + ret = sxe2_match_key_parse_control(rule, fltr, cls_flower); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + ret = sxe2_match_key_parse_ip(rule, fltr); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + ret = sxe2_match_key_parse_port(rule, fltr, cls_flower); + if (ret) + return ret; + } + + return 0; +} +#endif + +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE +static bool sxe2_is_uplink_netdev(struct net_device *dev) +{ + return sxe2_netdev_is(dev) || (sxe2_tc_tun_type_get(dev) != SXE2_TNL_NONE); +} + +static bool sxe2_netdev_pf_check(struct sxe2_adapter *adapter, + struct net_device *out_dev) +{ + struct sxe2_netdev_priv *np; + + np = netdev_priv(out_dev); + + if (sxe2_tc_tun_type_get(out_dev) != SXE2_TNL_NONE) + return true; + + if (sxe2_is_repr_netdev(out_dev) && np->repr->vf_node->adapter == adapter) + return true; + else if (sxe2_netdev_is(out_dev) && np->vsi->adapter == adapter) + return true; + + return false; +} +#endif + +static s32 sxe2_tcf_action_parse(struct net_device *filter_dev, + struct flow_cls_offload *cls_flower, + struct sxe2_tcf_fltr *fltr) +{ +#ifdef HAVE_TC_FLOW_RULE_INFRASTRUCTURE + struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); + struct flow_action *flow_action = &rule->action; + struct flow_action_entry *act; + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_netdev_priv *np; + u32 i; + u16 vsi_id = 0; + + if (!flow_action_has_entries(flow_action)) + return -EINVAL; + + for (i = 0; i < (flow_action)->num_entries; ++i) { + act = &(flow_action)->entries[i]; + if (act->id == FLOW_ACTION_DROP) { + fltr->dst_vsi_id = 0; + fltr->action = SXE2_DROP_PACKET; + if (sxe2_is_repr_netdev(filter_dev)) { + fltr->src_type = SXE2_SRC_TYPE_TX; + } else if (sxe2_is_uplink_netdev(filter_dev)) { + fltr->src_type = SXE2_SRC_TYPE_RX; + } else { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Unsupported netdevice"); +#endif + return -EINVAL; + } + + np = netdev_priv(filter_dev); + if (sxe2_is_repr_netdev(filter_dev)) { + fltr->src_vsi_id = np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + } else if (sxe2_tc_tun_type_get(filter_dev) != SXE2_TNL_NONE) { + fltr->src_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + } else { + fltr->src_vsi_id = np->vsi->idx_in_dev; + } + } else if (act->id == FLOW_ACTION_REDIRECT || act->id == FLOW_ACTION_MIRRED) { + if (!sxe2_netdev_pf_check(adapter, act->dev)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Unsupported netdevice"); +#endif + return -EINVAL; + } + + fltr->action = (act->id == FLOW_ACTION_REDIRECT) ? + SXE2_FWD_TO_VSI : SXE2_MIRROR_PACKET; + if (sxe2_is_repr_netdev(filter_dev) && sxe2_is_repr_netdev(act->dev)) { + fltr->src_type = SXE2_SRC_TYPE_TX; + np = netdev_priv(filter_dev); + fltr->src_vsi_id = np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + np = netdev_priv(act->dev); + + if (fltr->action == SXE2_FWD_TO_VSI_LIST) + set_bit(np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH], + fltr->dst_vsi_map); + else + fltr->dst_vsi_id = np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + } else if (sxe2_is_repr_netdev(filter_dev) && sxe2_netdev_is(act->dev)) { + fltr->src_type = SXE2_SRC_TYPE_TX; + np = netdev_priv(filter_dev); + fltr->src_vsi_id = np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + np = netdev_priv(act->dev); + if (fltr->action == SXE2_FWD_TO_VSI_LIST) + set_bit(np->vsi->idx_in_dev, fltr->dst_vsi_map); + else + fltr->dst_vsi_id = np->vsi->idx_in_dev; + } else if (sxe2_netdev_is(filter_dev) && + sxe2_is_repr_netdev(act->dev)) { + fltr->src_type = SXE2_SRC_TYPE_RX; + np = netdev_priv(filter_dev); + fltr->src_vsi_id = np->vsi->idx_in_dev; + np = netdev_priv(act->dev); + if (fltr->action == SXE2_FWD_TO_VSI_LIST) + set_bit(np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH], + fltr->dst_vsi_map); + else + fltr->dst_vsi_id = + np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + } else if ((sxe2_tc_tun_type_get(filter_dev) != SXE2_TNL_NONE) && + (sxe2_is_repr_netdev(act->dev))) { + fltr->src_type = SXE2_SRC_TYPE_RX; + fltr->src_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + np = netdev_priv(act->dev); + if (fltr->action == SXE2_FWD_TO_VSI_LIST) + set_bit(np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH], + fltr->dst_vsi_map); + else + fltr->dst_vsi_id = + np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + } else if ((sxe2_tc_tun_type_get(act->dev) != + SXE2_TNL_NONE) && + (sxe2_is_repr_netdev(filter_dev))) { + fltr->src_type = SXE2_SRC_TYPE_TX; + if (fltr->action == SXE2_FWD_TO_VSI_LIST) + set_bit(adapter->vsi_ctxt.main_vsi->idx_in_dev, + fltr->dst_vsi_map); + else + fltr->dst_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + np = netdev_priv(filter_dev); + fltr->src_vsi_id = np->repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + } else { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Unsupported netdevice"); +#endif + return -EINVAL; + } + + if ((fltr->action == SXE2_FWD_TO_VSI_LIST && + test_bit(fltr->src_vsi_id, fltr->dst_vsi_map)) || + (fltr->action != SXE2_FWD_TO_VSI_LIST && + fltr->src_vsi_id == fltr->dst_vsi_id)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "can't forward from a device to itself"); +#endif + return -EOPNOTSUPP; + } + } else { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Unsupported action"); +#endif + return -EINVAL; + } + } + fltr->priority = SXE2_SWITCH_RECIPE_PRIO_7; + + if (fltr->action == SXE2_FWD_TO_VSI_LIST) { + while (true) { + vsi_id = (u16)find_next_bit( + (unsigned long *)fltr->dst_vsi_map, SXE2_VSI_MAX_CNT, vsi_id); + if (vsi_id >= SXE2_VSI_MAX_CNT) + break; + LOG_DEBUG_BDF("src_id %d, dst_id %d, src_type %d\n", fltr->src_vsi_id, + vsi_id, fltr->src_type); + vsi_id++; + } + } else { + LOG_DEBUG_BDF("src_id %d, dst_id %d, src_type %d\n", fltr->src_vsi_id, + fltr->dst_vsi_id, fltr->src_type); + } + +#endif + return 0; +} + +static void sxe2_tcf_fltr_init(struct sxe2_adapter *adapter, + struct flow_cls_offload *cls_flower, + struct sxe2_tcf_fltr *fltr) +{ + s32 i; + + memset(fltr, 0, sizeof(*fltr)); + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) + fltr->items[i].type = i; + + fltr->cookie = cls_flower->cookie; + fltr->adapter = adapter; +} + +s32 sxe2_tcf_word_cnt_calc(struct sxe2_tcf_fltr *fltr) +{ + u16 i, j; + struct sxe2_adapter *adapter = fltr->adapter; + struct sxe2_tcf_key_item *item; + + fltr->word_cnt = 0; + for (i = 0; i < SXE2_PROT_FIELD_LAST; i++) { + if (sxe2_tcf_item_is_empty(fltr, i)) + continue; + item = &fltr->items[i]; + for (j = 0; j < ARRAY_SIZE(item->mask.raw); j++) + if (item->mask.raw[j]) + fltr->word_cnt++; + } + + if (fltr->word_cnt > SXE2_MAX_CHAIN_WORDS) { + LOG_ERROR_BDF("word count %d is bigger than limit %d\n", + fltr->word_cnt, SXE2_MAX_CHAIN_WORDS); + return -EINVAL; + } + + LOG_DEBUG_BDF("word count %d\n", fltr->word_cnt); + return 0; +} + +static s32 sxe2_tcf_fltr_add(struct sxe2_adapter *adapter, + struct net_device *netdev, u16 vsi_id_in_dev, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_tcf_fltr *fltr; + s32 ret = 0; + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) { + LOG_ERROR_BDF("eswitch is not running\n"); + return -EOPNOTSUPP; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + LOG_DEV_ERR("alloc memory failed, size %ld\n", sizeof(*fltr)); + return -ENOMEM; + } + + sxe2_tcf_fltr_init(adapter, cls_flower, fltr); + + ret = sxe2_tcf_action_parse(netdev, cls_flower, fltr); + if (ret) + goto l_end; + +#if defined(HAVE_TC_FLOWER_ENC) && defined(HAVE_TC_INDIR_BLOCK) + ret = sxe2_tcf_match_parse(netdev, adapter, cls_flower, fltr); + if (ret) + goto l_end; +#endif + + sxe2_tcf_match_meta_fill(fltr); + + ret = sxe2_tcf_word_cnt_calc(fltr); + if (ret) + goto l_end; + + ret = sxe2_tcf_profile_find(fltr); + if (ret) + goto l_end; +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + fltr->prio = cls_flower->common.prio; +#endif + ret = sxe2_tcf_rule_add(adapter, vsi_id_in_dev, fltr); + +l_end: + kfree(fltr); + return ret; +} + +STATIC bool sxe2_tcf_fltr_find_by_cookie(struct sxe2_adapter *adapter, + unsigned long cookie, bool del, + struct sxe2_rule_info **cplx_rule) +{ + struct list_head *tc_rule_list_head; + struct mutex *rule_lock; + struct sxe2_tc_rule_info *tc_list_itr = NULL; + bool is_find = false; + struct sxe2_tc_rule_hash *rule_hash_node; + + rule_lock = &adapter->switch_ctxt.complex_recipe.rule_lock; + + mutex_lock(rule_lock); + rule_hash_node = sxe2_hash_cookie_find(adapter, cookie); + if (!rule_hash_node) { + is_find = false; + } else { + is_find = true; + if (rule_hash_node->rule_info->tcf_fltr->cookie == cookie) { + *cplx_rule = rule_hash_node->rule_info; + } else { + tc_rule_list_head = &rule_hash_node->rule_info->tc_rule_head; + list_for_each_entry(tc_list_itr, tc_rule_list_head, + list_entry) { + if (rule_hash_node->rule_info->tcf_fltr && + tc_list_itr->cookie == cookie) { + if (del) { + list_del(&tc_list_itr->list_entry); + hash_del(&rule_hash_node->node); + kfree(rule_hash_node); + kfree(tc_list_itr); + } + is_find = true; + break; + } + } + } + } + mutex_unlock(rule_lock); + + return is_find; +} + +STATIC s32 sxe2_cls_flower_add(struct sxe2_adapter *adapter, + struct net_device *netdev, struct sxe2_vsi *vsi, + u16 vsi_id_in_dev, struct flow_cls_offload *cls_flower) +{ + struct sxe2_rule_info *save_rule = NULL; + struct net_device *vsi_netdev; + bool is_find = false; + s32 ret; + + if (test_bit(SXE2_FLAG_FW_DCBX_AGENT, adapter->flags)) + return -EINVAL; + + if (sxe2_is_repr_netdev(netdev)) + vsi_netdev = netdev; + else + vsi_netdev = vsi->netdev; + + if (!(vsi_netdev->features & NETIF_F_HW_TC)) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + if (netdev == vsi_netdev) + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "can't apply TC flower filters, " + "turn ON hw-tc-offload and try again"); +#endif + ret = -EINVAL; + goto l_end; + } + + is_find = sxe2_tcf_fltr_find_by_cookie(adapter, cls_flower->cookie, + false, &save_rule); + if (is_find) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + if (save_rule) { + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "filter cookie already exists, ignoring"); + } else { + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "filter cookie already exists on backuplist, ignoring"); + } +#endif + ret = -EEXIST; + goto l_end; + } + + ret = sxe2_tcf_fltr_add(adapter, netdev, vsi_id_in_dev, cls_flower); + +l_end: + return ret; +} + +STATIC s32 sxe2_cls_flower_del(struct sxe2_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_rule_info *save_rule = NULL; + bool is_find = false; + s32 ret = 0; + + is_find = sxe2_tcf_fltr_find_by_cookie(adapter, cls_flower->cookie, + true, &save_rule); + if (!is_find) { +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "filter cookie not exists"); +#endif + ret = -EINVAL; + goto l_end; + } + + if (!save_rule) { + ret = 0; + goto l_end; + } + + if (!list_empty(&save_rule->tc_rule_head)) { + ret = sxe2_switch_tc_samerule_del(adapter, save_rule); + if (ret) { + LOG_ERROR_BDF("switch tc samerule del failed, ret %d\n", + ret); + goto l_end; + } + } else { + ret = sxe2_fwd_rule_remove(adapter, save_rule, true); + if (ret) + LOG_DEV_ERR("cls flower del failed, ret:%d\n", ret); + } + +l_end: + return ret; +} + +s32 sxe2_setup_tc_cls_flower(struct sxe2_netdev_priv *np, + struct net_device *filter_dev, + struct flow_cls_offload *cls_flower) +{ + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + s32 ret = 0; + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; +#endif + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) { + LOG_ERROR_BDF("eswitch is not running\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (cls_flower->command == FLOW_CLS_REPLACE) + ret = sxe2_cls_flower_add(adapter, filter_dev, vsi, vsi->idx_in_dev, cls_flower); + else if (cls_flower->command == FLOW_CLS_DESTROY) + ret = sxe2_cls_flower_del(adapter, cls_flower); + else + ret = -EINVAL; + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +s32 sxe2_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct sxe2_netdev_priv *np = (struct sxe2_netdev_priv *)cb_priv; + + if (type == TC_SETUP_CLSFLOWER) + return sxe2_setup_tc_cls_flower(np, np->vsi->netdev, type_data); + + return -EOPNOTSUPP; +} + +s32 sxe2_indr_setup_block_cb(enum tc_setup_type type, void *type_data, + void *indr_priv) +{ + struct sxe2_indr_block_priv *priv = indr_priv; + struct sxe2_netdev_priv *np = priv->np; + + if (type == TC_SETUP_CLSFLOWER) + return sxe2_setup_tc_cls_flower(np, priv->netdev, type_data); + + return -EOPNOTSUPP; +} +#endif +s32 sxe2_repr_setup_tc_cls_flower(struct sxe2_vf_repr *repr, + struct flow_cls_offload *cls_flower) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = repr->vf_node->adapter; + u16 vsi_id_indev; + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; +#endif + + if (!test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) { + LOG_ERROR_BDF("eswitch is not running\n"); + return -EOPNOTSUPP; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2_VSI_S_DISABLE, adapter->vsi_ctxt.main_vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + vsi_id_indev = repr->vf_node->vsi_id[SXE2_VF_TYPE_ETH] - adapter->vsi_ctxt.base_idx_in_dev; + + if (cls_flower->command == FLOW_CLS_REPLACE) + ret = sxe2_cls_flower_add(adapter, repr->netdev, + repr->src_vsi, vsi_id_indev, cls_flower); + else if (cls_flower->command == FLOW_CLS_DESTROY) + ret = sxe2_cls_flower_del(adapter, cls_flower); + else + ret = -EINVAL; + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +s32 sxe2_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct sxe2_netdev_priv *np = (struct sxe2_netdev_priv *)cb_priv; + + if (type == TC_SETUP_CLSFLOWER) + return sxe2_repr_setup_tc_cls_flower(np->repr, type_data); + + return -EOPNOTSUPP; +} +#endif + +s32 sxe2_eswitch_vf_slow_path_rule_setup(struct sxe2_vf_node *vf_node, + bool is_user, bool is_add) +{ + struct sxe2_tcf_fltr *fltr; + s32 ret = 0; + struct sxe2_adapter *adapter = vf_node->adapter; + u16 qid_in_vsi; + struct sxe2_vsi *esw_vsi; + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + LOG_DEV_ERR("alloc memory failed, size %ld\n", sizeof(*fltr)); + return -ENOMEM; + } + + fltr->action = SXE2_FWD_TO_Q; + if (is_user) + fltr->src_vsi_id = vf_node->vsi_id[SXE2_VF_TYPE_DPDK]; + else + fltr->src_vsi_id = vf_node->vsi_id[SXE2_VF_TYPE_ETH]; + + mutex_lock(&vf_node->repr_cfg_lock); + + if (vf_node->user_repr_valid) + esw_vsi = adapter->eswitch_ctxt.user_esw_vsi; + else + esw_vsi = adapter->eswitch_ctxt.esw_vsi; + + qid_in_vsi = vf_node->vf_idx; + fltr->dst_queue_id = esw_vsi->rxqs.q[qid_in_vsi]->idx_in_pf + + adapter->q_ctxt.rxq_base_idx_in_dev; + fltr->dst_vsi_id = esw_vsi->idx_in_dev; + fltr->src_type = SXE2_SRC_TYPE_TX; + fltr->tunnel_type = SXE2_TNL_ALL; + fltr->priority = SXE2_SWITCH_RECIPE_PRIO_6; + fltr->adapter = adapter; + fltr->cookie_invalid = true; + + sxe2_tcf_match_meta_fill(fltr); + + ret = sxe2_tcf_word_cnt_calc(fltr); + if (ret) + goto l_end; + + ret = sxe2_tcf_profile_find(fltr); + if (ret) + goto l_end; + + ret = is_add ? sxe2_tcf_rule_add(adapter, vf_node->vsi_id[SXE2_VF_TYPE_ETH], fltr) : + sxe2_tcf_rule_del(adapter, vf_node->vsi_id[SXE2_VF_TYPE_ETH], fltr); + LOG_DEBUG_BDF("slowpath rule %s %s, src vsi %u, dst vsi %u, " + "queue in vsi %u, queue in dev %u\n", + is_add ? "add" : "del", ret ? "failed" : "success", + fltr->src_vsi_id, esw_vsi->idx_in_dev, qid_in_vsi, fltr->dst_queue_id); + +l_end: + mutex_unlock(&vf_node->repr_cfg_lock); + kfree(fltr); + return ret; +} + +s32 sxe2_bond_single_rule_setup(struct sxe2_adapter *adapter, bool is_add) +{ + struct sxe2_tcf_fltr *fltr; + s32 ret = 0; + struct sxe2_tcf_key_item *item; + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + LOG_DEV_ERR("alloc memory failed, size %ld\n", sizeof(*fltr)); + return -ENOMEM; + } + + fltr->action = SXE2_LARGE_ACTION; + fltr->src_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + fltr->dst_vsi_id = adapter->vsi_ctxt.main_vsi->idx_in_dev; + fltr->src_type = SXE2_SRC_TYPE_RX; + fltr->tunnel_type = SXE2_TNL_ALL; + fltr->priority = SXE2_SWITCH_RECIPE_PRIO_7; + fltr->adapter = adapter; + fltr->cookie_invalid = true; + + item = &fltr->items[SXE2_META_PKT_TO_RDMA]; + item->value.raw[SXE2_META_PKT_TO_RDMA_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_PKT_TO_RDMA << SXE2_FV_PKT_TO_RDMA_OFFSET)); + item->mask.raw[SXE2_META_PKT_TO_RDMA_OFFSET] = + cpu_to_be16((u16)SXE2_FV_PKT_TO_RDMA_MASK); + + sxe2_tcf_match_meta_fill(fltr); + + ret = sxe2_tcf_word_cnt_calc(fltr); + if (ret) + goto l_end; + + ret = sxe2_tcf_profile_find(fltr); + if (ret) + goto l_end; + + ret = is_add ? sxe2_tcf_rule_add(adapter,adapter->vsi_ctxt.main_vsi->idx_in_dev, fltr) : + sxe2_tcf_rule_del(adapter,adapter->vsi_ctxt.main_vsi->idx_in_dev, fltr); + if (ret && ret != -EEXIST) + LOG_ERROR_BDF("bond single rule %s failed, ret %d\n", is_add ? "add" : "del", ret); + else + LOG_DEBUG_BDF("bond single rule %s success\n", is_add ? "add" : "del"); + +l_end: + kfree(fltr); + return ret; +} + +static s32 sxe2_rdma_mirror_rule_setup(struct sxe2_vsi *vsi, u8 *mac, + bool is_rx, bool is_add) +{ + struct sxe2_tcf_fltr *fltr; + s32 ret = 0; + struct sxe2_tcf_key_item *item; + struct sxe2_adapter *adapter = vsi->adapter; + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + LOG_DEV_ERR("alloc memory failed, size %ld\n", sizeof(*fltr)); + return -ENOMEM; + } + + fltr->action = SXE2_MIRROR_PACKET; + fltr->src_vsi_id = vsi->idx_in_dev; + fltr->dst_vsi_id = vsi->idx_in_dev; + fltr->src_type = is_rx ? SXE2_SRC_TYPE_RX : SXE2_SRC_TYPE_TX; + fltr->tunnel_type = SXE2_TNL_ALL; + fltr->priority = SXE2_SWITCH_RECIPE_PRIO_7; + fltr->adapter = adapter; + fltr->cookie_invalid = true; + + item = &fltr->items[SXE2_META_PKT_TO_RDMA]; + item->value.raw[SXE2_META_PKT_TO_RDMA_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_PKT_TO_RDMA << SXE2_FV_PKT_TO_RDMA_OFFSET)); + item->mask.raw[SXE2_META_PKT_TO_RDMA_OFFSET] = cpu_to_be16((u16)SXE2_FV_PKT_TO_RDMA_MASK); + + if (is_rx) { + item = &fltr->items[SXE2_OUTER_DMAC]; + ether_addr_copy(item->value.hdr.eth_hdr.dst_addr, mac); + eth_broadcast_addr(item->mask.hdr.eth_hdr.dst_addr); + } else { + item = &fltr->items[SXE2_OUTER_SMAC]; + ether_addr_copy(item->value.hdr.eth_hdr.src_addr, mac); + eth_broadcast_addr(item->mask.hdr.eth_hdr.src_addr); + } + + item = &fltr->items[SXE2_META_PKT_DIRECTION]; + if (is_rx) { + item->value.raw[SXE2_META_PKT_DIRECTION_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_DIRECTION_RX << SXE2_FV_DIRECTION_OFFSET)); + } else { + item->value.raw[SXE2_META_PKT_DIRECTION_OFFSET] = + cpu_to_be16((u16)(SXE2_FV_DIRECTION_TX << SXE2_FV_DIRECTION_OFFSET)); + } + item->mask.raw[SXE2_META_PKT_DIRECTION_OFFSET] = cpu_to_be16((u16)SXE2_FV_DIRECTION_MASK); + + ret = sxe2_tcf_word_cnt_calc(fltr); + if (ret) + goto l_end; + + ret = sxe2_tcf_profile_find(fltr); + if (ret) + goto l_end; + + ret = is_add ? sxe2_tcf_rule_add(adapter, vsi->idx_in_dev, fltr) : + sxe2_tcf_rule_del(adapter, vsi->idx_in_dev, fltr); + if (ret && ret != -EEXIST) { + LOG_ERROR_BDF("rdma rx mirror rule %s failed, ret %d\n", + is_add ? "add" : "del", ret); + } else { + LOG_DEBUG_BDF("rdma rx mirror rule %s success\n", + is_add ? "add" : "del"); + } + +l_end: + kfree(fltr); + return ret; +} + +s32 sxe2_rdma_dump_pcap_setup(struct sxe2_vsi *vsi, u8 *mac, bool is_add) +{ + s32 ret = 0; + + ret = sxe2_rdma_mirror_rule_setup(vsi, mac, true, is_add); + if (ret) + goto l_end; + + ret = sxe2_rdma_mirror_rule_setup(vsi, mac, false, is_add); + if (ret) + (void)sxe2_rdma_mirror_rule_setup(vsi, mac, true, !is_add); + +l_end: + return ret; +} + +s32 sxe2_eswitch_vf_slow_path_rule_update(struct sxe2_adapter *adapter, + u16 vsi_id, struct sxe2_vf_repr_cfg *repr_cfg) +{ + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + struct mutex *rule_lock; + struct mutex *vsi_list_lock; + s32 ret = 0; + struct sxe2_tcf_fltr *tcf_fltr; + u32 bkt; + struct hlist_node *temp; + struct sxe2_rule_info *rule_info; + + rule_lock = &switch_ctxt->complex_recipe.rule_lock; + vsi_list_lock = &switch_ctxt->vsi_list_mgmt[SXE2_VSI_LIST_TYPE_FORWARD].vsi_list_lock; + + mutex_lock(rule_lock); + mutex_lock(vsi_list_lock); + hash_for_each_safe(adapter->switch_ctxt.complex_recipe.ht_lkup, bkt, temp, tcf_fltr, node) { + if (tcf_fltr->src_vsi_id == vsi_id && + tcf_fltr->action == SXE2_FWD_TO_Q && + !tcf_fltr->is_user_rule) { + rule_info = tcf_fltr->rule_info; + if (repr_cfg->cfg_to_user) + rule_info->tcf_fltr->dst_queue_id = repr_cfg->queue_in_dev_u; + else + rule_info->tcf_fltr->dst_queue_id = repr_cfg->queue_in_dev; + + rule_info->act.fwd_id.q_id = rule_info->tcf_fltr->dst_queue_id; + ret = sxe2_fwd_rule_update(adapter, rule_info); + if (ret) + LOG_ERROR_BDF("request to admin q failed, ret %d\n", ret); + + break; + } + } + + mutex_unlock(vsi_list_lock); + mutex_unlock(rule_lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.h new file mode 100644 index 0000000000000000000000000000000000000000..b648b39baf1afdb17517ee196c67a8210d5f5780 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tc.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_tc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_TC_H__ +#define __SXE2_TC_H__ + +#include +#include "sxe2_eswitch.h" + +enum sxe2_tunnel_type sxe2_tc_tun_type_get(struct net_device *tunnel_dev); + +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +s32 sxe2_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); + +s32 sxe2_indr_setup_block_cb(enum tc_setup_type type, void *type_data, void *indr_priv); + +s32 sxe2_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); +#endif + +s32 sxe2_setup_tc_cls_flower(struct sxe2_netdev_priv *np, + struct net_device *filter_dev, + struct flow_cls_offload *cls_flower); + +s32 sxe2_repr_setup_tc_cls_flower(struct sxe2_vf_repr *repr, + struct flow_cls_offload *cls_flower); + +s32 sxe2_eswitch_vf_slow_path_rule_setup(struct sxe2_vf_node *vf_node, + bool is_user, bool is_add); + +s32 sxe2_bond_single_rule_setup(struct sxe2_adapter *adapter, bool is_add); + +s32 sxe2_rdma_dump_pcap_setup(struct sxe2_vsi *vsi, u8 *mac, bool is_add); + +s32 sxe2_tcf_word_cnt_calc(struct sxe2_tcf_fltr *fltr); + +s32 sxe2_eswitch_vf_slow_path_rule_update(struct sxe2_adapter *adapter, + u16 vsi_id, struct sxe2_vf_repr_cfg *repr_cfg); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..4bb880560a1a44e2f5e3fa7ca51bd4d79e294ec5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.c @@ -0,0 +1,2239 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_tx.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2_compat.h" +#include "sxe2.h" +#include "sxe2_tx.h" +#include "sxe2_hw.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_cmd.h" +#include "sxe2_dcb.h" +#include "sxe2_queue.h" +#include "sxe2_netdev.h" +#include "sxe2_common.h" +#include "sxe2_txsched.h" +#include "sxe2_dev_ctrl.h" +#include "sxe2_skb_dump.h" +#include "sxe2_fnav.h" +#include "sxe2_xsk.h" + +#define SXE2_MIN_TX_LEN 17 + +#define SXE2_DFLT_IRQ_WORK 256 +#define SXE2_CACHE_LINE_BYTES 64 +#define SXE2_DESCS_PER_CACHE_LINE (SXE2_CACHE_LINE_BYTES / sizeof(union sxe2_tx_data_desc)) +#define SXE2_DESCS_FOR_CTXT_DESC 1 +#define SXE2_DESCS_FOR_SKB_DATA_PTR 1 + +#define SXE2_MAX_DATA_DESC_PER_SKB 15 +#define SXE2_TSO_SEG_DESC_USE_FOR_FRAGMENT (SXE2_MAX_DATA_DESC_PER_SKB - 2) + +#define SXE2_TX_DESC_NEEDED (MAX_SKB_FRAGS + SXE2_DESCS_FOR_CTXT_DESC + \ + SXE2_DESCS_PER_CACHE_LINE + SXE2_DESCS_FOR_SKB_DATA_PTR) +#define SXE2_TX_WAKE_THRESHOLD (SXE2_TX_DESC_NEEDED * 2) + +#define SXE2_MAX_DATA_PER_TXD ((1u << 14) - 1) + +#ifdef SXE2_TX_4K_AILGN +#define SXE2_MAX_READ_REQ_SIZE 4096 +#define SXE2_MAX_DATA_PER_TXD_ALIGNED (~(SXE2_MAX_READ_REQ_SIZE - 1) & SXE2_MAX_DATA_PER_TXD) + +#define SXE2_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(frag_size) (((frag_size * 85) >> 20) + 1) +#else + +#define SXE2_MAX_DATA_PER_TXD_ALIGNED SXE2_MAX_DATA_PER_TXD + +#define SXE2_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(frag_size) ((frag_size >> 14) + 1) + +#endif + +#define SXE2_FNAV_DESC_CLEAN_DELAY 10 +#define SXE2_FNAV_DESC_NEED_USE 2 + +static struct netdev_queue *sxe2_netdev_txq_get(const struct sxe2_queue *txq) +{ + return netdev_get_tx_queue(txq->netdev, txq->idx_in_vsi); +} + +STATIC void sxe2_tx_buffer_unmap(struct sxe2_queue *txq, + struct sxe2_tx_buf *tx_buf) +{ + if (tx_buf->skb) { + if (sxe2_queue_is_xdp(txq)) + page_frag_free(tx_buf->raw_buf); + else { + dev_kfree_skb_any(tx_buf->skb); + + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_single(txq->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } + } + + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); +} + +void sxe2_tx_ring_clean(struct sxe2_queue *txq) +{ + u16 i; + u32 size; + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + if (sxe2_queue_is_xdp(txq) && txq->xsk_pool) { + sxe2_xsk_clean_xdp_ring(txq); + goto tx_skip_free; + } +#endif + + if (!txq->tx_buf) + return; + + for (i = 0; i < txq->depth; i++) + sxe2_tx_buffer_unmap(txq, &txq->tx_buf[i]); + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +tx_skip_free: +#endif + (void)memset(txq->tx_buf, 0, sizeof(*txq->tx_buf) * txq->depth); + + size = ALIGN(txq->depth * sizeof(union sxe2_tx_data_desc), PAGE_SIZE); + (void)memset(txq->desc.base_addr, 0, size); + + txq->next_to_use = 0; + txq->next_to_clean = 0; + + if (txq->netdev) { + netdev_tx_reset_queue(sxe2_netdev_txq_get(txq)); + } +} + +void sxe2_tx_ring_free(struct sxe2_queue *txq) +{ + u32 size; + + if (txq->tx_buf) { + devm_kfree(txq->dev, txq->tx_buf); + txq->tx_buf = NULL; + } + + if (txq->desc.base_addr) { + size = ALIGN(txq->depth * sizeof(union sxe2_tx_data_desc), + PAGE_SIZE); + dmam_free_coherent(txq->dev, size, txq->desc.base_addr, + txq->desc.dma); + txq->desc.base_addr = NULL; + } +} + +static void sxe2_tx_rings_free(struct sxe2_vsi *vsi) +{ + u32 i; + + sxe2_for_each_vsi_txq(vsi, i) + sxe2_tx_ring_free(vsi->txqs.q[i]); +} + +static void sxe2_tx_ring_res_free(struct sxe2_queue *txq) +{ + sxe2_tx_ring_clean(txq); + sxe2_tx_ring_free(txq); +} + +void sxe2_tx_rings_res_free(struct sxe2_vsi *vsi) +{ + u32 i; + + sxe2_for_each_vsi_txq(vsi, i) + sxe2_tx_ring_res_free(vsi->txqs.q[i]); + + if (sxe2_xdp_is_enable(vsi)) { + for (i = 0; i < vsi->num_xdp_txq; i++) + sxe2_tx_ring_res_free(vsi->xdp_rings.q[i]); + } +} + +s32 sxe2_tx_ring_alloc(struct sxe2_queue *txq, struct sxe2_vsi *vsi) +{ + u32 size; + struct device *dev = txq->dev; + struct sxe2_adapter *adapter = txq->vsi->adapter; + struct sxe2_desc_ring *desc_ring = &txq->desc; + + txq->tx_buf = devm_kcalloc(dev, sizeof(struct sxe2_tx_buf), txq->depth, + GFP_KERNEL); + if (!txq->tx_buf) { + LOG_ERROR_BDF("unable to allocate memory for tx buf ring\n"); + goto l_end; + } + + size = ALIGN(txq->depth * sizeof(union sxe2_tx_data_desc), PAGE_SIZE); + desc_ring->base_addr = + dmam_alloc_coherent(dev, size, &desc_ring->dma, GFP_KERNEL); + if (!desc_ring->base_addr) { + LOG_DEV_ERR("unable to allocate memory for the Tx descriptor ring, size=%u\n", + size); + goto l_alloc_failed; + } + + if (vsi->netdev) + txq->netdev = vsi->netdev; + txq->next_to_use = 0; + txq->next_to_clean = 0; + txq->tx_tstamps = &adapter->ptp_ctxt.tx; + return 0; + +l_alloc_failed: + devm_kfree(dev, txq->tx_buf); + txq->tx_buf = NULL; +l_end: + return -ENOMEM; +} + +static void sxe2_txq_tc_setup(struct sxe2_vsi *vsi) +{ + u8 n; + u32 i; + u16 qoffset, qcount; + struct sxe2_queue *txq; + + if (!test_bit(SXE2_FLAG_DCB_ENABLE, vsi->adapter->flags)) { + sxe2_for_each_vsi_txq(vsi, i) { + txq = vsi->txqs.q[i]; + txq->dcb_tc = 0; + } + } + + sxe2_for_each_tc(n) { + if (!(vsi->tc.tc_map & BIT(n))) + break; + + qoffset = vsi->tc.info[n].txq_offset; + qcount = vsi->tc.info[n].txq_cnt; + for (i = qoffset; i < (qoffset + qcount); i++) + vsi->txqs.q[i]->dcb_tc = n; + } +} + +s32 sxe2_tx_rings_alloc(struct sxe2_vsi *vsi) +{ + s32 ret; + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_for_each_vsi_txq(vsi, i) { + ret = sxe2_tx_ring_alloc(vsi->txqs.q[i], vsi); + if (ret) { + LOG_ERROR_BDF("allocation for Tx queue %d failed, ret=%d\n", i, ret); + goto l_end; + } + } + + sxe2_txq_tc_setup(vsi); + + return 0; +l_end: + while (i--) + sxe2_tx_ring_free(vsi->txqs.q[i]); + + return ret; +} + +STATIC void sxe2_txq_xps_configure(struct sxe2_queue *txq) +{ + if (!txq->irq_data || !txq->netdev) + return; + if (test_and_set_bit(SXE2_TX_XPS_INIT_DONE, txq->xps_state)) + return; + + (void)netif_set_xps_queue(txq->netdev, &txq->irq_data->affinity_mask, + txq->idx_in_vsi); +} + +s32 sxe2_txq_ctxt_fill(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + struct sxe2_txq_ctxt *ctxt) +{ + struct sxe2_adapter *adapter = vsi->adapter; + + txq->desc.tail = sxe2_reg_addr_get(&adapter->hw, SXE2_TXQ_LEGACY_DBLL(txq->idx_in_pf)); + if (IS_ERR(txq->desc.tail)) { + LOG_ERROR_BDF("vsi:%u queue:%u tail addr: %ld error.\n", + vsi->idx_in_dev, txq->idx_in_vsi, + PTR_ERR(txq->desc.tail)); + return -EFAULT; + } + + ctxt->q_idx_in_nic = adapter->q_ctxt.txq_base_idx_in_dev + txq->idx_in_pf; + + ctxt->base_addr = txq->desc.dma; + + ctxt->port_idx = adapter->port_idx; + ctxt->pf_idx = adapter->pf_idx; + ctxt->cgd_idx = txq->dcb_tc + (SXE2_TC_MAX_CNT * adapter->port_idx); + + if (txq->vsi->type == SXE2_VSI_T_VF) { + ctxt->vmvf_idx = adapter->vf_ctxt.vfid_base + txq->vsi->vf_node->vf_idx; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_VF; + } else if (txq->vsi->type == SXE2_VSI_T_PF) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_PF; + } else if (txq->vsi->type == SXE2_VSI_T_LB) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_PF; + } else if (txq->vsi->type == SXE2_VSI_T_MACVLAN) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_VM; + } else if (txq->vsi->type == SXE2_VSI_T_ESW) { + ctxt->vmvf_idx = vsi->idx_in_dev; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_VM; + } else if (txq->vsi->type == SXE2_VSI_T_CTRL) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_PF; + } else if (txq->vsi->type == SXE2_VSI_T_DPDK_PF) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_PF; + } else if (txq->vsi->type == SXE2_VSI_T_DPDK_ESW) { + ctxt->vmvf_idx = 0; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_PF; + } else if (txq->vsi->type == SXE2_VSI_T_DPDK_VF) { + ctxt->vmvf_idx = + adapter->vf_ctxt.vfid_base + txq->vsi->vf_node->vf_idx; + ctxt->vmvf_type = SXE2_TXQ_VMVF_TYPE_VF; + } else { + LOG_INFO_BDF("vsi is neither pf nor vf.\n"); + } + ctxt->tsyn_enable = 0; + + ctxt->alt_vlan = 0; + ctxt->adv_sso = 0; + ctxt->wb_mode = 0; + ctxt->itr_notify_mode = 0; + ctxt->legacy_enable = SXE2_TXQ_LEGACY; + + if (vsi->type == SXE2_VSI_T_CTRL && !vsi->vf_node) + ctxt->src_vsi = adapter->vsi_ctxt.main_vsi->idx_in_dev; + else if (vsi->type == SXE2_VSI_T_CTRL && vsi->vf_node && vsi->vf_node->vsi) + ctxt->src_vsi = vsi->vf_node->vsi->idx_in_dev; + else + ctxt->src_vsi = vsi->idx_in_dev; + + ctxt->q_idx_in_func = txq->idx_in_pf; + ctxt->qlen = txq->depth; + ctxt->ptp_en = 1; + + return 0; +} + +s32 sxe2_fwc_txq_ctxt_cfg(struct sxe2_vsi *vsi, + struct sxe2_fwc_cfg_txq_req *req) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_fwc_cfg_txq_resp resp = { 0 }; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQ_CFG_AND_ENABLE, req, + sizeof(*req), &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("txq cfg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC bool sxe2_hw_txq_enable_check(struct sxe2_vsi *vsi, + struct sxe2_queue *txq) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_st_txq_req req = {}; + struct sxe2_fwc_st_txq_resp resp = {}; + struct sxe2_adapter *adapter = vsi->adapter; + + req.txq_idx_in_nic = adapter->q_ctxt.txq_base_idx_in_dev + txq->idx_in_pf; + req.txq_idx_in_func = txq->idx_in_pf; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQ_STATE, &req, sizeof(req), + &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("txq enable check failed, ret=%d\n", ret); + return false; + } + + LOG_INFO_BDF("hw tx txq[%u] enable %d\n", txq->idx_in_pf, resp.state); + + return resp.state; +} + +s32 sxe2_hw_txqs_disable_check(struct sxe2_vsi *vsi) +{ + u32 i; + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_queue **txq = vsi->txqs.q; + + sxe2_for_each_vsi_txq(vsi, i) { + if (sxe2_hw_txq_enable_check(vsi, txq[i])) { + LOG_ERROR_BDF("txq enable check: txq_in_vsi %d txq_in_pf %d is enable.\n", + txq[i]->idx_in_vsi, txq[i]->idx_in_pf); + ret = -EBUSY; + break; + } + } + + return ret; +} + +s32 sxe2_hw_txq_configure(struct sxe2_vsi *vsi, struct sxe2_queue *txq) +{ + s32 ret; + struct sxe2_fwc_cfg_txq_req req = {}; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_txq_ctxt_fill(vsi, txq, &req.ctxt); + if (ret) + return ret; + + if (sxe2_txsched_support_chk(adapter)) { + ret = sxe2_txsched_txq_node_add(adapter, vsi, txq, + SXE2_TXSCHED_NODE_OWNER_LAN, &req); + if (ret) { + LOG_ERROR("hw tx txq[%u] start failed\n", + txq->idx_in_pf); + return ret; + } + } else { + ret = sxe2_fwc_txq_ctxt_cfg(vsi, &req); + if (ret) { + LOG_ERROR("hw tx txq[%u] start failed\n", + txq->idx_in_pf); + return ret; + } + } + + LOG_INFO_BDF("hw tx txq[%u] start success\n", txq->idx_in_pf); + + return ret; +} + +s32 sxe2_tx_hw_cfg(struct sxe2_vsi *vsi) +{ + u32 i = 0; + u32 j; + s32 ret = 0; + struct sxe2_queue **txq = vsi->txqs.q; + + sxe2_for_each_vsi_txq(vsi, i) { + sxe2_txq_xps_configure(txq[i]); + ret = sxe2_hw_txq_configure(vsi, txq[i]); + if (ret) + break; + } + + if (ret) { + for (j = 0; j < i; j++) + (void)sxe2_txq_stop(vsi, txq[j]); + } + + return ret; +} + +s32 sxe2_xdp_tx_hw_cfg(struct sxe2_vsi *vsi) +{ + u32 i = 0; + u32 j; + s32 ret = 0; + struct sxe2_queue **txq = vsi->xdp_rings.q; + + for (i = 0; i < vsi->num_xdp_txq; i++) { + ret = sxe2_hw_txq_configure(vsi, txq[i]); + if (ret) + goto end; + } + +#ifdef HAVE_AF_XDP_ZC_SUPPORT + for (i = 0; i < vsi->num_xdp_txq; i++) + vsi->xdp_rings.q[i]->xsk_pool = sxe2_xsk_pool(vsi->xdp_rings.q[i]); +#endif + +end: + if (ret) { + for (j = 0; j < i; j++) + (void)sxe2_txq_stop(vsi, txq[j]); + } + + return ret; +} + +s32 sxe2_fwc_txq_stop(struct sxe2_vsi *vsi, struct sxe2_queue *txq) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_disable_txq_req req; + struct sxe2_adapter *adapter = vsi->adapter; + + req.txq_idx_in_nic = adapter->q_ctxt.txq_base_idx_in_dev + txq->idx_in_pf; + req.txq_idx_in_func = txq->idx_in_pf; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXQ_DISABLE, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("txq disable failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_txq_stop(struct sxe2_vsi *vsi, struct sxe2_queue *txq) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + if (sxe2_txsched_support_chk(adapter)) { + ret = sxe2_txsched_txq_node_del(adapter, txq); + if (ret) { + LOG_ERROR_BDF("vsi %d type %d hw tx txq[%u] stop failed\n", + vsi->id_in_pf, vsi->type, txq->idx_in_pf); + return ret; + } + } else { + ret = sxe2_fwc_txq_stop(vsi, txq); + if (ret) { + LOG_ERROR_BDF("vsi %d type %d hw tx txq[%u] stop failed\n", + vsi->id_in_pf, vsi->type, txq->idx_in_pf); + return ret; + } + } + + LOG_INFO_BDF("hw tx txq[%u] stop success\n", txq->idx_in_pf); + return ret; +} + +s32 sxe2_txqs_stop(struct sxe2_vsi *vsi) +{ + u32 i; + s32 ret = 0; + s32 rc = 0; + struct sxe2_queue **txq = vsi->txqs.q; + + sxe2_for_each_vsi_txq(vsi, i) { + rc = sxe2_txq_stop(vsi, txq[i]); + if (rc) + ret = rc; + } + + return ret; +} + +s32 sxe2_xdp_txqs_stop(struct sxe2_vsi *vsi) +{ + u32 i; + s32 ret = 0; + s32 rc = 0; + struct sxe2_queue **txq = vsi->xdp_rings.q; + + for (i = 0; i < vsi->num_xdp_txq; i++) { + rc = sxe2_txq_stop(vsi, txq[i]); + if (rc) + ret = rc; + } + + return ret; +} + +s32 sxe2_tx_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_tx_rings_alloc(vsi); + if (ret) { + LOG_ERROR_BDF("tx resource alloc failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe2_tx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("tx hw configure failed, ret=%d\n", ret); + goto l_free; + } + + return 0; + +l_free: + sxe2_tx_rings_free(vsi); + +l_end: + return ret; +} + +s32 sxe2_xdp_tx_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + s64 i; + + for (i = 0; i < vsi->num_xdp_txq; i++) { + if (!vsi->xdp_rings.q[i]->tx_buf) { + if (sxe2_tx_ring_alloc(vsi->xdp_rings.q[i], vsi)) + goto free_xdp_rings; + } + } + + ret = sxe2_xdp_tx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("tx hw configure failed, ret=%d\n", ret); + goto l_end; + } + + return 0; + +free_xdp_rings: + ret = -ENOMEM; + for (; i >= 0; i--) + if (vsi->xdp_rings.q[i]) + sxe2_tx_ring_free(vsi->xdp_rings.q[i]); + +l_end: + return ret; +} + +static inline u16 sxe2_tx_desc_unused_count(struct sxe2_queue *txq) +{ + u16 ntc = txq->next_to_clean; + u16 ntu = txq->next_to_use; + u16 unused_tx_desc_cnt; + + unused_tx_desc_cnt = (u16)(((ntc > ntu) ? 0 : txq->depth) + ntc - ntu - 1); + return unused_tx_desc_cnt; +} + +STATIC s32 sxe2_maybe_stop_tx(struct sxe2_queue *txq, u16 desc_cnt) +{ + s32 ret = 0; + + netif_stop_subqueue(txq->netdev, txq->idx_in_vsi); + + smp_mb(); + + if (likely(sxe2_tx_desc_unused_count(txq) < desc_cnt)) { + ret = -EBUSY; + goto l_end; + } + + netif_start_subqueue(txq->netdev, txq->idx_in_vsi); + + ++txq->stats->tx_stats.tx_restart; + +l_end: + return ret; +} + +static u32 sxe2_tx_desc_count(struct sk_buff *skb) +{ + u32 count = 0, size = skb_headlen(skb); + u32 nr_frags = skb_shinfo(skb)->nr_frags; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + for (;;) { + count += SXE2_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +static s32 sxe2_tso(struct sxe2_queue *txq, struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + s32 ret; + u32 paylen; + union sxe2_ip_hdr ip; + union sxe2_l4_hdr l4; + u8 l4_start, header_len; + u64 cd_mss, cd_tso_len; + struct sk_buff *skb = first_buf->skb; + struct sxe2_adapter *adapter = offload->adapter; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) { + ret = 0; + goto l_end; + } +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tso start\n"); + +#endif + + ret = skb_cow_head(skb, 0); + if (ret < 0) { + LOG_ERROR_BDF("skb cow head failed, ret=%d\n", ret); + goto l_end; + } + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tunnel tso start\n"); + +#endif + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + + l4_start = (u8)(l4.hdr - skb->data); + + paylen = skb->len - l4_start; + csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("udp tunnel tso, out head csum replace\n"); + } +#endif + } + + if (ip.v4->version == 4) + ip.v4->frag_off |= htons(IP_DF); + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + l4_start = (u8)(l4.hdr - skb->data); + + paylen = skb->len - l4_start; +#ifdef NETIF_F_GSO_UDP_L4 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)sizeof(l4.udp) + l4_start; + } else { + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)((l4.tcp->doff * 4) + l4_start); + } +#else + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)((l4.tcp->doff * 4) + l4_start); +#endif + + first_buf->gso_segs = skb_shinfo(skb)->gso_segs; + first_buf->bytecount += (first_buf->gso_segs - 1) * header_len; + txq->stats->tx_stats.tx_tso_packets += first_buf->gso_segs; + txq->stats->tx_stats.tx_tso_bytes += first_buf->bytecount; + + cd_tso_len = skb->len - header_len; + cd_mss = skb_shinfo(skb)->gso_size; + + first_buf->tx_features |= SXE2_TX_FEATURE_TSO; + + sxe2_tx_desc_setup_for_tso(offload, cd_tso_len, cd_mss); + + return 0; + +l_end: + return ret; +} + +STATIC s32 sxe2_tx_csum(struct sxe2_queue *txq, struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + s32 ret; + bool gso_enable; + u8 l4_proto = 0; + unsigned char *exthdr; + union sxe2_ip_hdr ip; + union sxe2_l4_hdr l4; + __be16 frag_off, protocol; + u32 l4_len = 0, l3_len, l2_len; + u32 cmd = 0, tunnel = 0; + struct sk_buff *skb = first_buf->skb; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = offload->adapter; +#endif + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + txq->stats->tx_stats.tx_csum_none++; + return 0; + } + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx checksum offload start\n"); + +#endif + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + l2_len = (u32)(ip.hdr - skb->data); + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IP)) + first_buf->tx_features |= SXE2_TX_FEATURE_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + first_buf->tx_features |= SXE2_TX_FEATURE_IPV6; + + if (skb->encapsulation) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tunnel checksum offload\n"); +#endif + if (first_buf->tx_features & SXE2_TX_FEATURE_IPV4) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tunnel out ipv4 checksum offload\n"); + +#endif + tunnel |= (first_buf->tx_features & + SXE2_TX_FEATURE_TSO) ? SXE2_TXCD_IPV4 : SXE2_TXCD_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; + + } else if (first_buf->tx_features & SXE2_TX_FEATURE_IPV6) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tunnel out ipv6 checksum offload\n"); + +#endif + tunnel |= SXE2_TXCD_EIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + if (ret < 0) { + ret = -1; + goto l_end; + } + } + + switch (l4_proto) { + case IPPROTO_UDP: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tunnel out udp checksum offload\n"); +#endif + tunnel |= SXE2_TXCD_QW0_L4TUNT_UDP_M; + first_buf->tx_features |= SXE2_TX_FEATURE_TUNNEL; + break; + case IPPROTO_GRE: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tunnel out gre checksum offload\n"); +#endif + tunnel |= SXE2_TXCD_QW0_L4TUNT_GRE_M; + first_buf->tx_features |= SXE2_TX_FEATURE_TUNNEL; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("tx tunnel out ipip/ipv6 checksum offload\n"); + } +#endif + first_buf->tx_features |= SXE2_TX_FEATURE_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (first_buf->tx_features & SXE2_TX_FEATURE_TSO) { + ret = -1; + goto l_end; + } + + (void)skb_checksum_help(skb); + ret = 0; + goto l_end; + } + + tunnel |= ((l4.hdr - ip.hdr) / 4) << SXE2_TXCD_QW0_EIPLEN_S; + + ip.hdr = skb_inner_network_header(skb); + + tunnel |= ((ip.hdr - l4.hdr) / 2) << SXE2_TXCD_QW0_L4TUNLEN_S; + + gso_enable = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; + + if ((first_buf->tx_features & SXE2_TX_FEATURE_TSO) && + !gso_enable && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx udp tunnel checksum offload\n"); + +#endif + tunnel |= SXE2_TXCD_QW0_L4T_CS_M; + } + + offload->ctxt_desc_tunnel |= tunnel; + offload->ctxt_desc_qw1 |= (u64)SXE2_TX_DESC_DTYPE_CTXT; + + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + first_buf->tx_features &= + ~(SXE2_TX_FEATURE_IPV4 | SXE2_TX_FEATURE_IPV6); + if (ip.v4->version == 4) + first_buf->tx_features |= SXE2_TX_FEATURE_IPV4; + if (ip.v6->version == 6) + first_buf->tx_features |= SXE2_TX_FEATURE_IPV6; + txq->stats->tx_stats.tx_csum_partial_inner++; + } else { + txq->stats->tx_stats.tx_csum_partial++; + } + + if (first_buf->tx_features & SXE2_TX_FEATURE_IPV4) { + l4_proto = ip.v4->protocol; + if (first_buf->tx_features & SXE2_TX_FEATURE_TSO) + cmd |= SXE2_TXDD_CMD_IIPT_IPV4_CSUM; + else + cmd |= SXE2_TXDD_CMD_IIPT_IPV4; + + } else if (first_buf->tx_features & SXE2_TX_FEATURE_IPV6) { + cmd |= SXE2_TXDD_CMD_IIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) { + (void)ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + } + + } else { + ret = -1; + goto l_end; + } + + l3_len = (u32)(l4.hdr - ip.hdr); + + switch (l4_proto) { + case IPPROTO_TCP: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx tcp checksum offload\n"); + +#endif + cmd |= SXE2_TXDD_CMD_L4T_EOFT_TCP; + l4_len = l4.tcp->doff; + break; + case IPPROTO_UDP: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx udp checksum offload\n"); + +#endif + cmd |= SXE2_TXDD_CMD_L4T_EOFT_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("tx sctp checksum offload\n"); + +#endif + cmd |= SXE2_TXDD_CMD_L4T_EOFT_SCTP; + l4_len = sizeof(struct sctphdr) >> 2; + break; +#endif + + default: + if (first_buf->tx_features & SXE2_TX_FEATURE_TSO) + return -1; + (void)skb_checksum_help(skb); + ret = 0; + goto l_end; + } + + sxe2_tx_desc_setup_for_csum(offload, l2_len, l3_len, l4_len, cmd); + first_buf->tx_features |= SXE2_TX_FEATURE_MACLEN; + + return 0; +l_end: + return ret; +} + +static void sxe2_tx_vlan(struct sxe2_queue *txq, struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + struct sk_buff *skb = first_buf->skb; + union sxe2_ip_hdr ip; + u32 l2_len; + + if (!skb_vlan_tag_present(skb) && eth_type_vlan((__be16)skb->protocol)) + return; + + if (skb_vlan_tag_present(skb)) { + txq->stats->tx_stats.tx_vlan_insert++; + sxe2_tx_desc_setup_for_vlan(offload, skb_vlan_tag_get(skb)); + } + + if ((offload->data_desc_cmd & SXE2_TXDD_CMD_IL2TAG1) && + !(first_buf->tx_features & SXE2_TX_FEATURE_MACLEN)) { + ip.hdr = skb_network_header(skb); + l2_len = (u32)(ip.hdr - skb->data); + offload->data_desc_offset |= ((l2_len / 2) << SXE2_TXDD_MACLEN_S); + } +} + +static void sxe2_request_tstamp(struct sxe2_queue *txq, + struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + s32 idx; + struct net_device *netdev = netif_is_macvlan(txq->netdev) ? + macvlan_dev_real_dev(txq->netdev) : txq->netdev; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sk_buff *skb = first_buf->skb; + + if (!adapter->ptp_ctxt.ptp_tx_enable) + return; + + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return; + + if (first_buf->tx_features & SXE2_TX_FEATURE_TSO) + return; + + idx = sxe2_ptp_txts_request(txq->tx_tstamps, skb); + if (idx < 0) + return; + + sxe2_tx_desc_setup_for_ptp(offload, (u64)idx); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("QW1:0x%llx,ptp index:%d\n", offload->ctxt_desc_qw1, idx); + +#endif +} + +STATIC s32 sxe2_tx_feature_offload(struct sxe2_queue *txq, + struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + s32 ret; + u16 ntu = txq->next_to_use; + struct net_device *netdev = netif_is_macvlan(txq->netdev) ? + macvlan_dev_real_dev(txq->netdev) : txq->netdev; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_adapter *adapter = np->vsi->adapter; + struct sxe2_tx_context_desc *ctxt_desc; + + offload->adapter = adapter; + + ret = sxe2_tso(txq, first_buf, offload); + if (ret < 0) + goto l_end; + +#ifdef HAVE_MACSEC_SUPPORT + if (sxe2_macsec_offload(adapter, first_buf->skb)) + offload->data_desc_cmd |= SXE2_TXDD_CMD_MACSEC; +#endif + + if (xfrm_offload(first_buf->skb)) { + ret = sxe2_ipsec_tx(txq, first_buf, offload); + if (ret) + goto l_end; + } + + ret = sxe2_tx_csum(txq, first_buf, offload); + if (ret < 0) + goto l_end; + + sxe2_request_tstamp(txq, first_buf, offload); + + sxe2_tx_vlan(txq, first_buf, offload); + + if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, offload->adapter->flags)) + sxe2_eswitch_tx_desc_setup(offload, first_buf->skb); + + if (offload->ctxt_desc_qw1 & SXE2_TX_DESC_DTYPE_CTXT) { + ctxt_desc = SXE2_TXCD(txq, ntu); + ntu++; + txq->next_to_use = (ntu < txq->depth) ? ntu : 0; + + ctxt_desc->tunneling_params = cpu_to_le32(offload->ctxt_desc_tunnel); + ctxt_desc->l2tag2 = cpu_to_le16(offload->ctxt_desc_l2tag2); + ctxt_desc->qw1 = cpu_to_le64(offload->ctxt_desc_qw1); + ctxt_desc->ipset_offset = cpu_to_le16(offload->ctxt_desc_ipsec_offset); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("desc:qw1=0x%llx\n", ctxt_desc->qw1); + +#endif + } + +l_end: + return ret; +} + +static void sxe2_tx_dma_err(struct sxe2_queue *txq, + struct sxe2_tx_buf *first_buf, u16 ntu) +{ + struct sxe2_tx_buf *tx_buf; + + for (;;) { + tx_buf = &txq->tx_buf[ntu]; + + sxe2_tx_buffer_unmap(txq, tx_buf); + if (tx_buf == first_buf) + break; + + if (ntu == 0) + ntu += txq->depth; + + --ntu; + } + + txq->next_to_use = ntu; +} + +static inline void sxe2_tx_desc_update(struct sxe2_queue *txq, + union sxe2_tx_data_desc **desc, + u16 *ntu) +{ + ++(*ntu); + ++(*desc); + if (txq->depth == *ntu) { + *desc = SXE2_TX_DESC(txq, 0); + *ntu = 0; + } +} + +STATIC s32 sxe2_tx_desc_ring_map(struct sxe2_queue *txq, + struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload, + union sxe2_tx_data_desc **desc, u16 *ntu) +{ + u32 max_data; + dma_addr_t dma; + skb_frag_t *frag; + struct sk_buff *skb = first_buf->skb; + u32 map_size = skb_headlen(skb); + u32 remaining_size = skb->data_len; + struct sxe2_tx_buf *tx_buf = first_buf; + struct sxe2_adapter *adapter = txq->vsi->adapter; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("skb dma map start, line_size=%u, \t" + " total_frag_len=%u, skb_len=%u, ntu=%u\n", + skb_headlen(skb), skb->data_len, skb->len, *ntu); + } +#endif + + dma = dma_map_single(txq->dev, skb->data, map_size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(txq->dev, dma)) { + LOG_ERROR_BDF("tx dma map failed\n"); + goto l_dma_err; + } + + dma_unmap_len_set(tx_buf, len, map_size); + dma_unmap_addr_set(tx_buf, dma, dma); + (*desc)->read.buf_addr = cpu_to_le64(dma); + + max_data = SXE2_MAX_DATA_PER_TXD_ALIGNED; + +#ifdef SXE2_TX_4K_AILGN + max_data += -dma & (SXE2_MAX_READ_REQ_SIZE - 1); +#endif + + while (unlikely(map_size > SXE2_MAX_DATA_PER_TXD)) { + (*desc)->read.cmd_type_offset_bsz = + sxe2_tx_data_desc_qword1_setup(offload, max_data); + + sxe2_tx_desc_update(txq, desc, ntu); + dma += max_data; + map_size -= max_data; + + max_data = SXE2_MAX_DATA_PER_TXD_ALIGNED; + (*desc)->read.buf_addr = cpu_to_le64(dma); + } + + if (likely(!remaining_size)) { + offload->data_desc_cmd |= SXE2_TXDD_CMD_EOP | SXE2_TXDD_CMD_RS; + (*desc)->read.cmd_type_offset_bsz = + sxe2_tx_data_desc_qword1_setup(offload, map_size); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, " + "remaining_size=%u, desc_ptr=%p, dma_addr=%#llx, " + "desc.buffer_addr = %#llx, cmd_type=0x%llx\n", + map_size, remaining_size, *desc, (u64)dma, + (*desc)->read.buf_addr, + (*desc)->read.cmd_type_offset_bsz); + } +#endif + break; + } + + (*desc)->read.cmd_type_offset_bsz = + sxe2_tx_data_desc_qword1_setup(offload, map_size); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, " + "remaining_size=%u, desc_ptr=%p, dma_addr=%#llx, " + "desc.buffer_addr = %#llx, cmd_type=0x%llx\n", + map_size, remaining_size, *desc, (u64)dma, + (*desc)->read.buf_addr, + (*desc)->read.cmd_type_offset_bsz); + } +#endif + + sxe2_tx_desc_update(txq, desc, ntu); + + map_size = skb_frag_size(frag); + remaining_size -= map_size; + + dma = skb_frag_dma_map(txq->dev, frag, 0, map_size, + DMA_TO_DEVICE); + + tx_buf = &txq->tx_buf[*ntu]; + } + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + LOG_DEBUG_BDF("skb dma map end\n"); + +#endif + return 0; + +l_dma_err: + sxe2_tx_dma_err(txq, first_buf, *ntu); + return -ENOMEM; +} + +s32 sxe2_xmit_pkt(struct sxe2_queue *txq, struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload) +{ + s32 ret; + u16 ntu = txq->next_to_use; + union sxe2_tx_data_desc *desc = SXE2_TX_DESC(txq, ntu); + struct sxe2_adapter *adapter = txq->vsi->adapter; + bool xmit_more; + ret = sxe2_tx_desc_ring_map(txq, first_buf, offload, &desc, &ntu); + if (ret) + goto l_end; + + ntu++; + if (ntu == txq->depth) + ntu = 0; + + skb_tx_timestamp(first_buf->skb); + + wmb(); + + first_buf->next_to_watch = desc; + + txq->next_to_use = ntu; + + if (unlikely(sxe2_tx_desc_unused_count(txq) <= SXE2_TX_DESC_NEEDED)) { + ret = sxe2_maybe_stop_tx(txq, SXE2_TX_DESC_NEEDED); + if (ret < 0) { + LOG_WARN_BDF("the desc is not enough in the queue[%u],\t" + "to stop the queue, \t" + "desc_cnt < SXE2_TX_DESC_NEEDED[%u]\n", + txq->idx_in_vsi, (u32)SXE2_TX_DESC_NEEDED); + } + } + + xmit_more = netdev_xmit_more(); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("xmit end, ring idx=%u, next_to_use=%d, \t" + "next_to_clean=%d, next_to_watch=%pK xmit_more %d \t" + "desc left %d SXE2_TX_DESC_NEEDED %d\n", + txq->idx_in_pf, txq->next_to_use, + txq->next_to_clean, first_buf->next_to_watch, xmit_more, + sxe2_tx_desc_unused_count(txq), (u32)SXE2_TX_DESC_NEEDED); + } +#endif + + txq->stats->tx_stats.tx_xmit_more += xmit_more; + if (__netdev_tx_sent_queue(sxe2_netdev_txq_get(txq), + first_buf->bytecount, xmit_more)) { + writel(ntu, txq->desc.tail); + } + + return 0; + +l_end: + txq->stats->tx_stats.tx_queue_dropped++; + return ret; +} + +static bool sxe2_chk_linearize_for_tso(struct sxe2_queue *txq, + struct sk_buff *skb) +{ + u32 i; + bool ret; + s32 nr_frags, sum; + const skb_frag_t *frag, *stale; + + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < (SXE2_MAX_DATA_DESC_PER_SKB - 1)) { + ret = false; + goto l_end; + } + + txq->stats->tx_stats.tx_tso_linearize_chk++; + + sum = 1 - skb_shinfo(skb)->gso_size; + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < SXE2_TSO_SEG_DESC_USE_FOR_FRAGMENT - 1; i++) + sum += (s32)skb_frag_size(frag++); + nr_frags -= SXE2_TSO_SEG_DESC_USE_FOR_FRAGMENT; + + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + s32 stale_size = (s32)skb_frag_size(stale); + + sum += (s32)skb_frag_size(frag++); + + if (stale_size > (s32)SXE2_MAX_DATA_PER_TXD) { +#ifdef SXE2_TX_4K_AILGN + int align_pad = -(skb_frag_off(stale)) & + (SXE2_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; +#endif + do { + sum -= (s32)SXE2_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= (s32)SXE2_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > (s32)SXE2_MAX_DATA_PER_TXD); + } + + if (sum < 0) { + ret = true; + goto l_end; + } + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + ret = false; +l_end: + return ret; +} + +static bool sxe2_chk_linearize(struct sxe2_queue *txq, struct sk_buff *skb, + u32 desc_needed) +{ + if (likely(desc_needed < SXE2_MAX_DATA_DESC_PER_SKB)) + return false; + + if (skb_is_gso(skb)) + return sxe2_chk_linearize_for_tso(txq, skb); + + return true; +} +static netdev_tx_t sxe2_queue_xmit(struct sk_buff *skb, struct sxe2_queue *txq) +{ + s32 res; + netdev_tx_t ret = NETDEV_TX_OK; + u32 need_desc_count; + struct sxe2_tx_buf *first_buf = NULL; + struct sxe2_adapter *adapter = txq->vsi->adapter; + struct sxe2_tx_offload_info offload = {}; + + sxe2_trace(queue_xmit, txq, skb); + + need_desc_count = sxe2_tx_desc_count(skb); + if (sxe2_chk_linearize(txq, skb, need_desc_count)) { + if (__skb_linearize(skb)) { + LOG_WARN_BDF("skb linearize failed, drop pkg, txq_idx=%d", + txq->idx_in_vsi); + goto l_free; + } + + need_desc_count = SXE2_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(skb->len); + txq->stats->tx_stats.tx_linearize++; + } + + need_desc_count += (SXE2_DESCS_PER_CACHE_LINE + SXE2_DESCS_FOR_CTXT_DESC); + if (unlikely(sxe2_tx_desc_unused_count(txq) < need_desc_count)) { + if (sxe2_maybe_stop_tx(txq, (u16)need_desc_count)) { + txq->stats->tx_stats.tx_busy++; +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_WARN_BDF("txq desc is not enough, txq_idx=%d need desc %u \t" + "max %lu pkt len %u data len %u pkt frags %u", + txq->idx_in_vsi, need_desc_count, SXE2_TX_DESC_NEEDED, + skb->len, skb->data_len, skb_shinfo(skb)->nr_frags); + } +#endif + ret = NETDEV_TX_BUSY; + goto l_end; + } + } + + netdev_txq_bql_enqueue_prefetchw(sxe2_netdev_txq_get(txq)); + + first_buf = sxe2_tx_first_buffer_get(skb, txq); + + res = sxe2_tx_feature_offload(txq, first_buf, &offload); + if (res < 0) { + LOG_ERROR_BDF("tx offload failed, tx queue->idx=%u\n", + txq->idx_in_vsi); + goto l_free; + } + + res = sxe2_xmit_pkt(txq, first_buf, &offload); + if (res) { + LOG_ERROR_BDF("tx dma mapping err, queue idx=%u\n", + txq->idx_in_vsi); + } + + return NETDEV_TX_OK; + +l_free: + sxe2_trace(queue_xmit_drop, txq, skb); + dev_kfree_skb_any(skb); + if (first_buf) + first_buf->skb = NULL; +l_end: + return ret; +} + +netdev_tx_t sxe2_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + netdev_tx_t ret; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_queue *txq; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = vsi->adapter; +#endif + + txq = vsi->txqs.q[skb->queue_mapping]; + if (!txq) { + ret = NETDEV_TX_BUSY; + goto l_end; + } + + if (skb_put_padto(skb, SXE2_MIN_TX_LEN)) { + ret = NETDEV_TX_OK; + goto l_end; + } + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) + SKB_DUMP(skb); + +#endif + + return sxe2_queue_xmit(skb, txq); +l_end: + return ret; +} + +s32 sxe2_prgm_fnav_fltr(struct sxe2_vsi *vsi, + struct sxe2_tx_fnav_desc *fnav_desc, u8 *raw_packet) +{ + struct sxe2_queue *txq; + struct device *dev; + u32 i = 0; + dma_addr_t dma; + u16 ntu; + struct sxe2_tx_buf *first_buf, *tx_buf; + struct sxe2_tx_fnav_desc *f_desc; + union sxe2_tx_data_desc *d_desc; + struct sxe2_tx_offload_info offload; + + if (!vsi) + return -ENOENT; + txq = vsi->txqs.q[0]; + if (!txq || !txq->desc.base_addr) + return -ENOENT; + dev = txq->dev; + + while (sxe2_tx_desc_unused_count(txq) < SXE2_FNAV_DESC_NEED_USE) { + if (i > SXE2_FNAV_DESC_CLEAN_DELAY) + return -EAGAIN; + + i++; + (void)msleep_interruptible(1); + } + + dma = dma_map_single(dev, raw_packet, SXE2_FNAV_MAX_RAW_PKT_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + return -EINVAL; + + ntu = txq->next_to_use; + first_buf = &txq->tx_buf[ntu]; + f_desc = SXE2_TXFD(txq, ntu); + (void)memcpy(f_desc, fnav_desc, sizeof(*f_desc)); + + ntu++; + if (unlikely(ntu == txq->depth)) + ntu = 0; + tx_buf = &txq->tx_buf[ntu]; + d_desc = SXE2_TX_DESC(txq, ntu); + + ntu++; + if (unlikely(ntu == txq->depth)) + ntu = 0; + txq->next_to_use = ntu; + + (void)memset(tx_buf, 0, sizeof(*tx_buf)); + dma_unmap_len_set(tx_buf, len, SXE2_FNAV_MAX_RAW_PKT_SIZE); + dma_unmap_addr_set(tx_buf, dma, dma); + + d_desc->read.buf_addr = cpu_to_le64(dma); + offload.adapter = txq->vsi->adapter; + offload.data_desc_cmd = SXE2_TXDD_CMD_EOP | SXE2_TXDD_CMD_RS | + SXE2_TXDD_CMD_DUMMY | SXE2_TXDD_CMD_RE; + offload.data_desc_offset = 0; + offload.data_desc_l2tag1 = 0; + + tx_buf->tx_features = SXE2_TX_FEATURE_DUMMY_PKT; + tx_buf->raw_buf = (void *)raw_packet; + tx_buf->gso_segs = 1; + tx_buf->bytecount = SXE2_FNAV_MAX_RAW_PKT_SIZE; + + d_desc->read.cmd_type_offset_bsz = + sxe2_tx_data_desc_qword1_setup(&offload, SXE2_FNAV_MAX_RAW_PKT_SIZE); + + wmb(); + + first_buf->next_to_watch = d_desc; + + writel(ntu, txq->desc.tail); + + return 0; +} + +static inline void sxe2_tx_skb_unmap(struct sxe2_queue *txq, + struct sxe2_tx_buf *tx_buf) +{ + dma_unmap_single(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(tx_buf, len, 0); +} + +static inline void sxe2_tx_desc_buf_update(struct sxe2_queue *txq, + struct sxe2_tx_buf **tx_buf, + union sxe2_tx_data_desc **tx_desc, + u32 *ntc) +{ + (*tx_buf)++; + (*tx_desc)++; + ++(*ntc); + if (unlikely(!(*ntc))) { + *ntc -= txq->depth; + *tx_buf = txq->tx_buf; + *tx_desc = SXE2_TX_DESC(txq, 0); + } +} + +static void sxe2_tx_desc_ring_unmap(struct sxe2_queue *txq, s32 napi_budget, + u16 *budget, + struct sxe2_queue_stats *queue_stats) +{ + struct sxe2_tx_buf *tx_buf; + u32 ntc = txq->next_to_clean; + union sxe2_tx_data_desc *tx_desc; + union sxe2_tx_data_desc *eop_desc; +#ifdef SXE2_CFG_DEBUG + struct sxe2_adapter *adapter = txq->vsi->adapter; +#endif + + tx_buf = &txq->tx_buf[ntc]; + tx_desc = SXE2_TX_DESC(txq, ntc); + ntc -= txq->depth; + + do { + eop_desc = tx_buf->next_to_watch; + + if (!eop_desc) + break; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("tx queue clean start: queue idx=%u, reg_idx=%u, \t" + "next_to_use=%d, next_to_clean=%d, budget=%d, \t" + "next_to_watch=%pK, eop_desc.wb.dd=%#08llx\n", + txq->idx_in_vsi, txq->idx_in_pf, txq->next_to_use, + txq->next_to_clean, *budget, tx_buf->next_to_watch, + ((union sxe2_tx_data_desc *)tx_buf->next_to_watch)->wb.dd); + } +#endif + + prefetchw(&tx_buf->skb->users); + + smp_rmb(); + + sxe2_trace(clean_tx_irq, txq, tx_desc, tx_buf); + + if (!(eop_desc->wb.dd & cpu_to_le64(SXE2_TX_DESC_DTYPE_DESC_DONE))) { + break; + } + + tx_buf->next_to_watch = NULL; + + queue_stats->bytes += tx_buf->bytecount; + queue_stats->packets += tx_buf->gso_segs; + + if (sxe2_queue_is_xdp(txq)) + page_frag_free(tx_buf->raw_buf); + else + napi_consume_skb(tx_buf->skb, napi_budget); + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("tx queue clean: budget=%d, bytes=%llu, packet=%llu\n", + *budget, queue_stats->bytes, queue_stats->packets); + } +#endif + + sxe2_tx_skb_unmap(txq, tx_buf); + tx_buf->skb = NULL; + + while (tx_desc != eop_desc) { + sxe2_trace(clean_tx_irq_unmap, txq, tx_desc, tx_buf); + sxe2_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + + if (dma_unmap_len(tx_buf, len)) + sxe2_tx_skb_unmap(txq, tx_buf); + } + sxe2_trace(clean_tx_irq_unmap_eop, txq, tx_desc, tx_buf); + sxe2_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + + prefetch(tx_desc); + + --*budget; + } while (likely(*budget)); + + ntc += txq->depth; + txq->next_to_clean = (u16)ntc; + +#ifdef SXE2_CFG_DEBUG + if (test_bit(SXE2_DATAPATH_LOG_ENABLE, adapter->flags)) { + LOG_DEBUG_BDF("tx queue clean end: queue idx=%u, reg_idx=%u, next_to_use=%d, \t" + "next_to_clean=%d, budget=%d\n", + txq->idx_in_vsi, txq->idx_in_pf, txq->next_to_use, + txq->next_to_clean, *budget); + } +#endif +} + +void sxe2_ctrl_txq_irq_clean(struct sxe2_queue *txq) +{ + struct sxe2_irq_data *irq_data = txq->irq_data; + u32 ntc = txq->next_to_clean; + int budget = SXE2_DFLT_IRQ_WORK; + union sxe2_tx_data_desc *tx_desc; + union sxe2_tx_data_desc *eop_desc; + struct sxe2_tx_buf *tx_buf; + struct sxe2_queue_stats queue_stats = {}; + + tx_buf = &txq->tx_buf[ntc]; + tx_desc = SXE2_TX_DESC(txq, ntc); + ntc -= txq->depth; + + do { + eop_desc = tx_buf->next_to_watch; + + if (!eop_desc) + break; + + LOG_DEBUG_IRQ("tx queue clean start: queue idx=%u, reg_idx=%u, \t" + "next_to_use=%d, next_to_clean=%d, budget=%d, \t" + "next_to_watch=%pK, eop_desc.wb.dd=%#08llx\n", + txq->idx_in_vsi, txq->idx_in_pf, txq->next_to_use, + txq->next_to_clean, budget, tx_buf->next_to_watch, + ((union sxe2_tx_data_desc *)tx_buf->next_to_watch)->wb.dd); + + smp_rmb(); + + if (!(eop_desc->wb.dd & cpu_to_le64(SXE2_TX_DESC_DTYPE_DESC_DONE))) { + break; + } + + tx_buf->next_to_watch = NULL; + + tx_desc->read.buf_addr = 0; + tx_desc->read.cmd_type_offset_bsz = 0; + + sxe2_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_single(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } + queue_stats.bytes += tx_buf->bytecount; + queue_stats.packets += tx_buf->gso_segs; + if (tx_buf->tx_features & SXE2_TX_FEATURE_DUMMY_PKT) + devm_kfree(txq->dev, tx_buf->raw_buf); + + tx_buf->raw_buf = NULL; + tx_buf->tx_features = 0; + tx_buf->next_to_watch = NULL; + tx_buf->bytecount = 0; + tx_buf->gso_segs = 0; + dma_unmap_addr_set(tx_buf, dma, 0); + dma_unmap_len_set(tx_buf, len, 0); + tx_desc->read.buf_addr = 0; + tx_desc->read.cmd_type_offset_bsz = 0; + + sxe2_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + + budget--; + } while (likely(budget)); + + ntc += txq->depth; + txq->next_to_clean = (u16)ntc; + + sxe2_tx_pkt_stats_update(txq, &queue_stats); + + sxe2_hw_irq_enable(&irq_data->vsi->adapter->hw, irq_data->idx_in_pf); +} + +bool sxe2_txq_irq_clean(struct sxe2_queue *txq, s32 napi_budget) +{ + u16 budget = SXE2_DFLT_IRQ_WORK; + struct sxe2_queue_stats queue_stats = {}; + struct sxe2_adapter *adapter = txq->vsi->adapter; + + if (txq->netdev) + netdev_txq_bql_complete_prefetchw(sxe2_netdev_txq_get(txq)); + + sxe2_tx_desc_ring_unmap(txq, napi_budget, &budget, &queue_stats); + + sxe2_tx_pkt_stats_update(txq, &queue_stats); + + if (sxe2_queue_is_xdp(txq)) + return !!budget; + + netdev_tx_completed_queue(sxe2_netdev_txq_get(txq), + (u32)queue_stats.packets, (u32)queue_stats.bytes); + + if (unlikely(queue_stats.packets && netif_carrier_ok(txq->netdev) && + (sxe2_tx_desc_unused_count(txq) >= SXE2_TX_WAKE_THRESHOLD))) { + + smp_mb(); + + if (netif_tx_queue_stopped(sxe2_netdev_txq_get(txq)) && + !test_bit(SXE2_VSI_S_DOWN, txq->vsi->state)) { + netif_tx_wake_queue(sxe2_netdev_txq_get(txq)); + ++txq->stats->tx_stats.tx_restart; + LOG_WARN_BDF("\n\n txq idx=%u, wake_up\n\n", + txq->idx_in_vsi); + } + } + + return !!budget; +} + +#ifdef HAVE_XDP_SUPPORT +s32 sxe2_xmit_xdp_ring(void *data, u16 size, struct sxe2_queue *xdp_ring) +{ + u16 ntu = xdp_ring->next_to_use; + union sxe2_tx_data_desc *tx_desc; + struct sxe2_tx_buf *tx_buf; + struct sxe2_tx_offload_info offload; + dma_addr_t dma; + + if (!unlikely(SXE2_DESC_UNUSED(xdp_ring))) { + xdp_ring->stats->tx_stats.tx_busy++; + return SXE2_XDP_CONSUMED; + } + + dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(xdp_ring->dev, dma)) + return SXE2_XDP_CONSUMED; + + tx_buf = &xdp_ring->tx_buf[ntu]; + tx_buf->bytecount = size; + tx_buf->gso_segs = 1; + tx_buf->raw_buf = data; + + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + offload.adapter = xdp_ring->vsi->adapter; + offload.data_desc_cmd = SXE2_TXDD_CMD_EOP | SXE2_TXDD_CMD_RS; + offload.data_desc_offset = 0; + offload.data_desc_l2tag1 = 0; + + tx_desc = SXE2_TX_DESC(xdp_ring, ntu); + tx_desc->read.buf_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_offset_bsz = sxe2_tx_data_desc_qword1_setup(&offload, size); + + smp_wmb(); + + ntu++; + if (ntu == xdp_ring->depth) + ntu = 0; + + tx_buf->next_to_watch = tx_desc; + xdp_ring->next_to_use = ntu; + + return SXE2_XDP_TX; +} + +s32 sxe2_xmit_xdp_buff(struct xdp_buff *xdp, struct sxe2_queue *xdp_ring) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + + if (unlikely(!xdpf)) + return SXE2_XDP_CONSUMED; + + return sxe2_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); +} +#endif + +static u16 sxe2_txq_pending_get(struct sxe2_queue *txq) +{ + u16 head, tail; + u16 pending_txq_seq; + + head = txq->next_to_clean; + tail = txq->next_to_use; + + if (head != tail) { + pending_txq_seq = (u16)((head < tail) ? (tail - head) : (tail + txq->depth - head)); + return pending_txq_seq; + } + + return 0; +} + +void sxe2_tx_hang_check_subtask(struct sxe2_adapter *adapter) +{ + u32 i; + s32 packets; + struct sxe2_vsi *vsi; + struct sxe2_queue *txq; + struct sxe2_queue_stats *txq_stats; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = adapter->vsi_ctxt.main_vsi; + + if (!vsi || test_bit(SXE2_VSI_S_DOWN, vsi->state)) + goto unlock; + + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + goto unlock; + + sxe2_for_each_vsi_txq(vsi, i) { + txq = vsi->txqs.q[i]; + if (!txq) + continue; + + txq_stats = txq->stats; + if (!txq_stats) + continue; + + if (txq->desc.base_addr) { + packets = txq_stats->packets & INT_MAX; + if (txq_stats->prev_pkt == packets) { + sxe2_hw_irq_trigger(&adapter->hw, + txq->irq_data->idx_in_pf); + continue; + } + + smp_rmb(); + txq_stats->prev_pkt = sxe2_txq_pending_get(txq) ? packets : -1; + } + } + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +static inline bool sxe2_find_q_in_range(u16 low, u16 high, u32 txq) +{ + return (txq >= low) && (txq < high); +} + +STATIC bool sxe2_is_pfc_causing_hung_q(struct sxe2_adapter *adapter, + u32 txqueue) +{ + u8 num_tcs = 0, i, tc, up_in_tc = 0; + u64 ref_prio_xoff[SXE2_MAX_USER_PRIORITY]; + struct sxe2_vsi *vsi = adapter->vsi_ctxt.main_vsi; + struct sxe2_dcb_stats *stats = &adapter->pf_stats.dcb_stats; + struct sxe2_dcbx_cfg *dcb_cfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (!vsi) + return false; + + sxe2_for_each_tc(i) + if (vsi->tc.tc_map & BIT(i)) + num_tcs++; + + for (tc = 0; tc < num_tcs - 1; tc++) + if (sxe2_find_q_in_range(vsi->tc.info[tc].txq_offset, + vsi->tc.info[tc + 1].txq_offset, + txqueue)) + break; + + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) { + if (dcb_cfg->ets.prio_tbl[i] == tc) + up_in_tc |= BIT(i); + } + + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) + if (up_in_tc & BIT(i)) + ref_prio_xoff[i] = stats->curr_pause_stats.prio_xoff_rx[i]; + + sxe2_dcb_stats_update(adapter); + + for (i = 0; i < IEEE_8021Q_MAX_PRIORITIES; i++) + if (up_in_tc & BIT(i)) + if (stats->curr_pause_stats.prio_xoff_rx[i] > + ref_prio_xoff[i]) + return true; + + return false; +} + +void sxe2_tx_timeout(struct net_device *netdev, u32 txqueue) +{ + u32 i; + struct sxe2_netdev_priv *np = netdev_priv(netdev); + struct sxe2_vsi *vsi = np->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_queue *txq = NULL; + struct netdev_queue *ntxq = NULL; + + adapter->tx_timeout_count++; + + if (sxe2_is_pfc_causing_hung_q(adapter, txqueue)) { + LOG_DEV_INFO("fake Tx hang detected on queue %u, \t" + "timeout caused by PFC storm\n", + txqueue); + return; + } + + sxe2_for_each_vsi_txq(vsi, i) { + if (vsi->txqs.q[i] && vsi->txqs.q[i]->desc.base_addr) { + if (txqueue == vsi->txqs.q[i]->idx_in_vsi) { + txq = vsi->txqs.q[i]; + break; + } + } + } + + if (time_after(jiffies, (adapter->tx_timeout_last_recovery + HZ * 20))) + adapter->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (adapter->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (txq) { + ntxq = sxe2_netdev_txq_get(txq); + LOG_NETDEV_INFO("tx_timeout: VSI_num: %u, Q %u, NTC: 0x%x, " + "NTU: 0x%x netdev txq state %lu" + "(BIT: 0 - DRV_XOFF; 1 - STACK_XOFF; 2- FROZEN)\n", + vsi->idx_in_dev, txqueue, txq->next_to_clean, + txq->next_to_use, ntxq->state); + } + + adapter->tx_timeout_last_recovery = jiffies; + LOG_NETDEV_INFO("tx_timeout recovery level %d, txqueue %u\n", + adapter->tx_timeout_recovery_level, txqueue); + + switch (adapter->tx_timeout_recovery_level) { + case 1: + (void)sxe2_reset_async(adapter, SXE2_RESET_PFR); + break; + case 2: + (void)sxe2_reset_async(adapter, SXE2_RESET_CORER); + break; + default: + LOG_NETDEV_ERR("tx_timeout recovery unsuccessful, \t" + "device is in unrecoverable state\n"); + + break; + } + + adapter->tx_timeout_recovery_level++; +} + +static void sxe2_txq_ucmd_err_handle(struct sxe2_vsi *vsi, + struct sxe2_txq_ucmd_en_params *params, u32 q_err_id) +{ + u32 i = 0; + struct sxe2_queue *txq; + struct sxe2_txq_ucmd_ctxt *q_ctxt_info; + + for (i = 0; i < q_err_id; i++) { + q_ctxt_info = ¶ms->ctxts[i]; + txq = vsi->txqs.q[q_ctxt_info->queue_id]; + + if (q_ctxt_info->sched_mode == SXE2_SCHED_MODE_TM && + q_ctxt_info->sched_mode == SXE2_SCHED_MODE_HIGH_PERFORMANCE) + (void)sxe2_fwc_txq_stop(vsi, txq); + else if (q_ctxt_info->sched_mode == SXE2_SCHED_MODE_DEFAULT) + (void)sxe2_txq_stop(vsi, txq); + } +} + +static bool sxe2_txq_cfg_param_is_valid(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_en_params *params) +{ + u16 i; + struct sxe2_vsi *vsi; + struct sxe2_txq_ucmd_ctxt *ctxt; + + if (!sxe2_vsi_id_is_valid(adapter, params->vsi_idx)) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_idx); + return false; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_idx); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_idx); + return false; + } + + if ((params->q_cnt > vsi->txqs.q_cnt) || !params->q_cnt) { + LOG_ERROR_BDF(" txq cnt:%u invalid max:%u vsi_id:%d.\n", + params->q_cnt, vsi->txqs.q_cnt, params->vsi_idx); + return false; + } + + for (i = 0; i < params->q_cnt; i++) { + ctxt = ¶ms->ctxts[i]; + if (ctxt->queue_id >= vsi->txqs.q_cnt || + ctxt->sched_mode >= SXE2_UCMD_TXQ_MODE_INVALID || + !sxe2_queue_depth_is_valid(ctxt->depth)) { + LOG_ERROR_BDF("vsi_id:%u vsi_id_in_dev:%d queue_id:%u\n" + "txq cnt:%u depth:%u\n", + params->vsi_idx, vsi->idx_in_dev, + ctxt->queue_id, vsi->txqs.q_cnt, + ctxt->depth); + return false; + } + } + + return true; +} + +s32 sxe2_txq_cfg_ena_common_handle(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_en_params *params) +{ + s32 ret = 0; + u32 i = 0; + u16 q_cnt; + struct sxe2_queue *txq; + struct sxe2_vsi *vsi; + struct sxe2_txq_ucmd_ctxt *q_ctxt_info; + enum sxe2_txsched_node_owner owner; + struct sxe2_fwc_cfg_txq_req req = {.ctxt = {0}, .leaf = {0}}; + + if (!adapter || !params) { + LOG_ERROR_BDF("tx cfg enable params invalid.\n"); + ret = -EINVAL; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (!sxe2_txq_cfg_param_is_valid(adapter, params)) { + LOG_ERROR_BDF("tx cfg enable params invalid.\n"); + ret = -EINVAL; + goto l_unlock; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_idx); + if (!vsi) { + ret = -EINVAL; + LOG_ERROR_BDF("vsi_id:%d vsi null.\n", params->vsi_idx); + goto l_unlock; + } + q_cnt = params->q_cnt; + for (i = 0; i < q_cnt; i++) { + q_ctxt_info = ¶ms->ctxts[i]; + txq = vsi->txqs.q[q_ctxt_info->queue_id]; + + txq->depth = q_ctxt_info->depth; + txq->desc.dma = q_ctxt_info->dma_addr; + + ret = sxe2_txq_ctxt_fill(vsi, txq, &req.ctxt); + if (ret) { + LOG_ERROR("q_id[%#x] depth[%#x]\n", + le16_to_cpu(q_ctxt_info->queue_id), txq->depth); + goto l_unlock; + } + + if (q_ctxt_info->sched_mode == SXE2_UCMD_TXQ_MODE_TM) + req.ctxt.is_tm = 1; + + if (q_ctxt_info->sched_mode == SXE2_UCMD_TXQ_MODE_TM || + q_ctxt_info->sched_mode == SXE2_UCMD_TXQ_MODE_HIGH_PERFORMANCE) { + ret = sxe2_fwc_txq_ctxt_cfg(vsi, &req); + if (ret) { + LOG_ERROR("q_id[%#x] depth[%#x] txq[%#x]\n", + le16_to_cpu(q_ctxt_info->queue_id), + txq->depth, txq->idx_in_pf); + goto l_unlock; + } + + } else if (q_ctxt_info->sched_mode == SXE2_UCMD_TXQ_MODE_DEFAULT) { + owner = ((vsi->type == SXE2_VSI_T_DPDK_PF) || \ + (vsi->type == SXE2_VSI_T_DPDK_VF)) ? + SXE2_TXSCHED_NODE_OWNER_USER : SXE2_TXSCHED_NODE_OWNER_LAN; + + ret = sxe2_txsched_txq_node_add(adapter, vsi, txq, owner, &req); + if (ret) { + LOG_ERROR("hw tx txq[%u] start failed\n", + txq->idx_in_pf); + goto l_unlock; + } + } + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); +l_end: + if (ret) + sxe2_txq_ucmd_err_handle(vsi, params, i); + + return ret; +} + +static bool sxe2_txq_dis_param_is_valid(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_dis_params *params) +{ + struct sxe2_vsi *vsi; + + if (!sxe2_vsi_id_is_valid(adapter, params->vsi_id)) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + return false; + } + + if (params->q_idx > vsi->txqs.q_cnt) { + LOG_ERROR_BDF("txq id:%u invalid max:%u vsi_id:%d.\n", + params->q_idx, vsi->txqs.q_cnt, params->vsi_id); + return false; + } + + if (params->sched_mode >= SXE2_UCMD_TXQ_MODE_INVALID) { + LOG_ERROR_BDF("sched_mode:%u invalid max:%u vsi_id:%d.\n", + params->sched_mode, SXE2_UCMD_TXQ_MODE_INVALID, + params->vsi_id); + return false; + } + + return true; +} + +s32 sxe2_txq_dis_common_handle(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_dis_params *params) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + struct sxe2_queue *txq; + + if (!adapter || !params) { + LOG_ERROR_BDF("tx cfg enable params invalid.\n"); + ret = -EINVAL; + goto l_end; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (!sxe2_txq_dis_param_is_valid(adapter, params)) { + LOG_ERROR_BDF("tx queue disable params invalid.\n"); + ret = -EINVAL; + goto l_unlock; + } + + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("tx queue disable failed, vsi=NULL, vsi_idx=%d\n", + params->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + txq = vsi->txqs.q[params->q_idx]; + + if (params->sched_mode == SXE2_UCMD_TXQ_MODE_TM || + params->sched_mode == SXE2_UCMD_TXQ_MODE_HIGH_PERFORMANCE) { + ret = sxe2_fwc_txq_stop(vsi, txq); + if (ret) { + LOG_ERROR_BDF("vsi %d type %d hw tx txq[%u] stop failed\n", + vsi->idx_in_dev, vsi->type, txq->idx_in_pf); + goto l_unlock; + } + } else if (params->sched_mode == SXE2_UCMD_TXQ_MODE_DEFAULT) { + ret = sxe2_txsched_txq_node_del(adapter, txq); + if (ret) { + LOG_ERROR_BDF("vsi %d type %d hw tx txq[%u] stop failed\n", + vsi->idx_in_dev, vsi->type, txq->idx_in_pf); + goto l_unlock; + } + } + + LOG_INFO_BDF("vsi %d type %d hw tx txq[%u] stop success\n", + vsi->idx_in_dev, vsi->type, txq->idx_in_pf); +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..0ece6f1da88a6872402e28c29724361f19030530 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_tx.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_tx.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_TX_H__ +#define __SXE2_TX_H__ + +#include +#ifdef HAVE_SCTP +#include +#endif +#include +#include +#include +#include +#include + +#include "sxe2.h" +#include "sxe2_drv_cmd.h" + +#define SXE2_BYTES_PER_WORD (2) +#define SXE2_BYTES_PER_DWORD (4) + +#define SXE2_UCMD_TXQ_MODE_DEFAULT 0 +#define SXE2_UCMD_TXQ_MODE_TM 1 +#define SXE2_UCMD_TXQ_MODE_HIGH_PERFORMANCE 2 +#define SXE2_UCMD_TXQ_MODE_INVALID 3 + +#define SXE2_TXQ_LEGACY (1) +#define SXE2_TXQ_VMVF_TYPE_VF (0x0) +#define SXE2_TXQ_VMVF_TYPE_VM (0x1) +#define SXE2_TXQ_VMVF_TYPE_PF (0x2) + +#define SXE2_DESC_UNUSED(R) \ + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->depth) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) +#define SXE2_TX_DESC(q, i) \ + (&(((union sxe2_tx_data_desc *)((q)->desc.base_addr))[i])) +#define SXE2_TXCD(q, i) \ + (&(((struct sxe2_tx_context_desc *)((q)->desc.base_addr))[i])) +#define SXE2_TXFD(q, i) \ + (&(((struct sxe2_tx_fnav_desc *)((q)->desc.base_addr))[i])) + +#define SXE2_TXDD_CMD_S (4) +#define SXE2_TXDD_OFFSET_S (16) +#define SXE2_TXDD_BUF_SZ_S (34) +#define SXE2_TXDD_L2TAG1_S (48) +#define SXE2_TXDD_CMD_M (0xFFFUL << SXE2_TXDD_CMD_S) +#define SXE2_TXDD_OFFSET_M (0x3FFFFULL << SXE2_TXDD_OFFSET_S) +#define SXE2_TXDD_BUF_SZ_M (0x3FFFULL << SXE2_TXDD_BUF_SZ_S) +#define SXE2_TXDD_L2TAG1_M (0xFFFFULL << SXE2_TXDD_L2TAG1_S) + +enum sxe2_txdd_offset_fields_relative_shift { + SXE2_TXDD_MACLEN_S = 0, + SXE2_TXDD_IPLEN_S = 7, + SXE2_TXDD_L4LEN_S = 14 +}; +#define SXE2_TXDD_MACLEN_M (0x7FUL << SXE2_TXDD_MACLEN_S) +#define SXE2_TXDD_IPLEN_M (0x7FUL << SXE2_TXDD_IPLEN_S) +#define SXE2_TXDD_L4LEN_M (0xFUL << SXE2_TXDD_L4LEN_S) + +#define SXE2_TXCD_QW0_EIPLEN_S (2) +#define SXE2_TXCD_QW0_L4TUNT_S (9) +#define SXE2_TXCD_QW0_L4TUNLEN_S (12) +#define SXE2_TXCD_QW0_L4T_CS_S (23) +#define SXE2_TXCD_QW0_L4TUNT_UDP_M BIT_ULL(SXE2_TXCD_QW0_L4TUNT_S) +#define SXE2_TXCD_QW0_L4TUNT_GRE_M (0x2ULL << SXE2_TXCD_QW0_L4TUNT_S) +#define SXE2_TXCD_QW0_L4T_CS_M BIT_ULL(SXE2_TXCD_QW0_L4T_CS_S) + +#define SXE2_TXCD_QW1_CMD_S (4) +#define SXE2_TXCD_QW1_TSO_TOTAL_LEN_S (30) +#define SXE2_TXCD_QW1_MSS_S (50) +#define SXE2_TXCD_QW1_VSI_S (50) +#define SXE2_TXCD_QW1_CMD_M (0x7FUL << SXE2_TXCD_QW1_CMD_S) +#define SXE2_TXCD_QW1_TSO_TOTAL_LEN_M \ + (0x3FFFFULL << SXE2_TXCD_QW1_TSO_TOTAL_LEN_S) +#define SXE2_TXCD_QW1_VSI_M (0x3FFULL << SXE2_TXCD_QW1_VSI_S) +#define SXE2_TXCD_TSYN_REG_SHIFT (30) +#define SXE2_TXCD_QW1_IPSEC_MODE_S (11) +#define SXE2_TXCD_QW1_IPSEC_MODE_M BIT_ULL(SXE2_TXCD_QW1_IPSEC_MODE_S) +#define SXE2_TXCD_QW1_IPSEC_EN_S (12) +#define SXE2_TXCD_QW1_IPSEC_EN_M BIT_ULL(SXE2_TXCD_QW1_IPSEC_EN_S) +#define SXE2_TXCD_QW1_IPSEC_ENGINE_MODE_S (13) +#define SXE2_TXCD_QW1_IPSEC_ENGINE_MODE_M \ + BIT_ULL(SXE2_TXCD_QW1_IPSEC_ENGINE_MODE_S) +#define SXE2_TXCD_QW1_IPSEC_SA_IDX_S (16) +#define SXE2_TXCD_QW1_IPSEC_SA_IDX_M (0x1FFF << SXE2_TXCD_QW1_IPSEC_SA_IDX_S) + +#define SXE2_FNAV_TX_DESC_QW0_Q_INDEX_SHIFT (0) +#define SXE2_FNAV_TX_DESC_QW0_Q_INDEX_MASK \ + ((0x7FFULL) << SXE2_FNAV_TX_DESC_QW0_Q_INDEX_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_COMP_Q_SHIFT (11) +#define SXE2_FNAV_TX_DESC_QW0_COMP_Q_MASK \ + ((0x1ULL) << SXE2_FNAV_TX_DESC_QW0_COMP_Q_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_SHIFT (12) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_MASK \ + ((0x3ULL) << SXE2_FNAV_TX_DESC_QW0_COMP_RPT_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_NONE (0) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_FAIL (1) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_ANY (2) +#define SXE2_FNAV_TX_DESC_QW0_COMP_RPT_RSV (3) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_SHIFT (14) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_MASK \ + ((0x3ULL) << SXE2_FNAV_TX_DESC_QW0_FD_SPACE_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_GUAR (0) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_BEST (1) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_GUAR_FIRST \ + (2) +#define SXE2_FNAV_TX_DESC_QW0_FD_SPACE_BEST_FIRST \ + (3) +#define SXE2_FNAV_TX_DESC_QW0_STAT_CNT_SHIFT (16) +#define SXE2_FNAV_TX_DESC_QW0_STAT_CNT_MASK \ + ((0x3FFFULL) << SXE2_FNAV_TX_DESC_QW0_STAT_CNT_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_SHIFT (30) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_MASK \ + ((0x3ULL) << SXE2_FNAV_TX_DESC_QW0_STAT_ENA_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_NONE (0) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_PKTS (1) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_BYTES (2) +#define SXE2_FNAV_TX_DESC_QW0_STAT_ENA_ALL (3) +#define SXE2_FNAV_TX_DESC_QW0_EVICT_ENA_SHIFT (32) +#define SXE2_FNAV_TX_DESC_QW0_EVICT_ENA_MASK \ + ((0x1ULL) << SXE2_FNAV_TX_DESC_QW0_EVICT_ENA_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_SHIFT (33) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_MASK \ + ((0x7ULL) << SXE2_FNAV_TX_DESC_QW0_TOQ_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_SHIFT (36) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_MASK \ + ((0x7ULL) << SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_ZERO (0) +#define SXE2_FNAV_TX_DESC_QW0_TOQ_PRIO_THREE (3) +#define SXE2_FNAV_TX_DESC_QW0_DROP_SHIFT (40) +#define SXE2_FNAV_TX_DESC_QW0_DROP_MASK \ + ((0x1ULL) << SXE2_FNAV_TX_DESC_QW0_DROP_SHIFT) +#define SXE2_FNAV_TX_DESC_QW0_FLOW_ID_SHIFT (48) +#define SXE2_FNAV_TX_DESC_QW0_FLOW_ID_MASK \ + ((0xFFFFULL) << SXE2_FNAV_TX_DESC_QW0_FLOW_ID_SHIFT) + +#define SXE2_FNAV_TX_DESC_QW1_DTYPE_SHIFT (0) +#define SXE2_FNAV_TX_DESC_QW1_DTYPE_MASK \ + ((0xF) << SXE2_FNAV_TX_DESC_QW1_DTYPE_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_SHIFT (4) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_MASK \ + ((0x3) << SXE2_FNAV_TX_DESC_QW1_PCMD_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_UPDATE (0) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_REMOVE (1) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_ADD \ + (2) +#define SXE2_FNAV_TX_DESC_QW1_PCMD_REPLACE \ + (3) +#define SXE2_FNAV_TX_DESC_QW1_FD_VSI_SHIFT (14) +#define SXE2_FNAV_TX_DESC_QW1_FD_VSI_MASK \ + ((0x3FFULL) << SXE2_FNAV_TX_DESC_QW1_FD_VSI_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_SWAP_SHIFT (24) +#define SXE2_FNAV_TX_DESC_QW1_SWAP_MASK \ + ((0x1ULL) << SXE2_FNAV_TX_DESC_QW1_SWAP_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_SHIFT (25) +#define SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_MASK \ + ((0x7ULL) << SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_ONE (1) +#define SXE2_FNAV_TX_DESC_QW1_FDID_PRIO_THREE (3) +#define SXE2_FNAV_TX_DESC_QW1_FDID_MDID_SHIFT (28) +#define SXE2_FNAV_TX_DESC_QW1_FDID_MDID_MASK \ + ((0xFULL) << SXE2_FNAV_TX_DESC_QW1_FDID_MDID_SHIFT) +#define SXE2_FNAV_TX_DESC_QW1_FDID_MDID_FNAV (5) +#define SXE2_FNAV_TX_DESC_QW1_FDID_SHIFT (32) +#define SXE2_FNAV_TX_DESC_QW1_FDID_MASK \ + ((0xFFFFFFFFULL) << SXE2_FNAV_TX_DESC_QW1_FDID_SHIFT) + +#define SXE2_TXDD_MACLEN_MAX \ + ((SXE2_TXDD_MACLEN_M >> SXE2_TXDD_MACLEN_S) * SXE2_BYTES_PER_WORD) +#define SXE2_TXDD_IPLEN_MAX \ + ((SXE2_TXDD_IPLEN_M >> SXE2_TXDD_IPLEN_S) * SXE2_BYTES_PER_DWORD) +#define SXE2_TXDD_L4LEN_MAX \ + ((SXE2_TXDD_L4LEN_M >> SXE2_TXDD_L4LEN_S) * SXE2_BYTES_PER_DWORD) + +#define SXE2_TXCD_QW1_MSS_MIN (88) + +enum sxe2_tx_desc_type { + SXE2_TX_DESC_DTYPE_DATA = 0x0, + SXE2_TX_DESC_DTYPE_CTXT = 0x1, + SXE2_TX_DESC_DTYPE_FLTR_PROG = 0x8, + SXE2_TX_DESC_DTYPE_DESC_DONE = 0xF, +}; + +enum sxe2_tx_cd_cmd_bits { + SXE2_TXCD_CMD_TSO = 0x01, + SXE2_TXCD_CMD_TSYN = 0x02, + SXE2_TXCD_CMD_IL2TAG2 = 0x04, + SXE2_TXCD_CMD_IL2TAG2_IL2H = 0x08, + SXE2_TXCD_CMD_SWTCH_NOTAG = 0x00, + SXE2_TXCD_CMD_SWTCH_UPLINK = 0x10, + SXE2_TXCD_CMD_SWTCH_LOCAL = 0x20, + SXE2_TXCD_CMD_SWTCH_VSI = 0x30, + SXE2_TXCD_CMD_RESERVED = 0x40 +}; + +enum sxe2_tx_ctxt_desc_eipt_bits { + SXE2_TXCD_EIPT_NONE = 0x0, + SXE2_TXCD_EIPT_IPV6 = 0x1, + SXE2_TXCD_IPV4_NO_CSUM = 0x2, + SXE2_TXCD_IPV4 = 0x3, +}; + +enum sxe2_tx_data_desc_cmd_bits { + SXE2_TXDD_CMD_EOP = 0x0001, + SXE2_TXDD_CMD_RS = 0x0002, + SXE2_TXDD_CMD_MACSEC = 0x0004, + SXE2_TXDD_CMD_IL2TAG1 = 0x0008, + SXE2_TXDD_CMD_DUMMY = 0x0010, + SXE2_TXDD_CMD_IIPT_IPV6 = 0x0020, + SXE2_TXDD_CMD_IIPT_IPV4 = 0x0040, + SXE2_TXDD_CMD_IIPT_IPV4_CSUM = 0x0060, + SXE2_TXDD_CMD_L4T_EOFT_TCP = 0x0100, + SXE2_TXDD_CMD_L4T_EOFT_SCTP = 0x0200, + SXE2_TXDD_CMD_L4T_EOFT_UDP = 0x0300, + SXE2_TXDD_CMD_RE = 0x0400, +}; + +#define SXE2_TX_FEATURE_VLAN_MASK (0xffff0000) +#define SXE2_TX_FEATURE_VLAN_PR_MASK (0xe0000000) +#define SXE2_TX_FEATURE_VLAN_SHIFT (16) +#define SXE2_TX_FEATURE_VLAN_PR_SHIFT (29) +enum sxe2_tx_features { + SXE2_TX_FEATURE_TSO = BIT(0), + SXE2_TX_FEATURE_HW_VLAN = BIT(1), + SXE2_TX_FEATURE_MACLEN = BIT(2), + SXE2_TX_FEATURE_DUMMY_PKT = BIT(3), + SXE2_TX_FEATURE_TSYN = BIT(4), + SXE2_TX_FEATURE_IPV4 = BIT(5), + SXE2_TX_FEATURE_IPV6 = BIT(6), + SXE2_TX_FEATURE_TUNNEL = BIT(7), + SXE2_TX_FEATURE_HW_OUTER_SINGLE_VLAN = BIT(8), +}; + +struct sxe2_tx_offload_info { + struct sxe2_adapter *adapter; + u32 data_desc_cmd; + u32 data_desc_offset; + u32 data_desc_l2tag1; + u32 ctxt_desc_tunnel; + u64 ctxt_desc_qw1; + u16 ctxt_desc_ipsec_offset; + u16 ctxt_desc_l2tag2; +}; + +union sxe2_ip_hdr { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union sxe2_l4_hdr { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +struct sxe2_tx_fnav_desc { + __le64 qidx_compq_space_stat; + __le64 dtype_cmd_vsi_fdid; +}; + +struct sxe2_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 ipset_offset; + __le64 qw1; +}; + +union sxe2_tx_data_desc { + struct { + __le64 buf_addr; + __le64 cmd_type_offset_bsz; + } read; + struct { + __le64 rsvd; + __le64 dd; + } wb; +}; + +struct sxe2_txq_ucmd_ctxt { + u32 sched_mode; + u16 queue_id; + u16 depth; + u64 dma_addr; +}; + +struct sxe2_txq_ucmd_en_params { + u16 q_cnt; + u16 vsi_idx; + struct sxe2_txq_ucmd_ctxt ctxts[]; +}; + +struct sxe2_txq_ucmd_dis_params { + u16 vsi_id; + u16 q_idx; + u8 sched_mode; +}; + +static inline void +sxe2_tx_desc_setup_for_tso(struct sxe2_tx_offload_info *offload, u64 tso_len, + u64 mss) +{ + offload->ctxt_desc_qw1 = + (u64)(SXE2_TX_DESC_DTYPE_CTXT | + (SXE2_TXCD_CMD_TSO << SXE2_TXCD_QW1_CMD_S) | + (tso_len << SXE2_TXCD_QW1_TSO_TOTAL_LEN_S) | + (mss << SXE2_TXCD_QW1_MSS_S)); +} + +static inline void +sxe2_tx_desc_setup_for_csum(struct sxe2_tx_offload_info *offload, u32 l2_len, + u32 l3_len, u32 l4_len, u32 cmd) +{ + offload->data_desc_offset |= ((l2_len / 2) << SXE2_TXDD_MACLEN_S) | + ((l3_len / 4) << SXE2_TXDD_IPLEN_S) | + (l4_len << SXE2_TXDD_L4LEN_S); + + offload->data_desc_cmd |= cmd; +} + +static inline void +sxe2_tx_desc_setup_for_ptp(struct sxe2_tx_offload_info *offload, u64 idx) +{ + offload->ctxt_desc_qw1 |= (u64)(SXE2_TX_DESC_DTYPE_CTXT | + (SXE2_TXCD_CMD_TSYN << SXE2_TXCD_QW1_CMD_S) | + (idx << SXE2_TXCD_TSYN_REG_SHIFT)); +} + +static inline void +sxe2_tx_desc_setup_for_vlan(struct sxe2_tx_offload_info *offload, u16 vlan_tci) +{ + offload->data_desc_l2tag1 = vlan_tci; + offload->data_desc_cmd |= SXE2_TXDD_CMD_IL2TAG1; +} + +#ifdef HAVE_METADATA_PORT_INFO +static inline void +sxe2_eswitch_tx_desc_setup(struct sxe2_tx_offload_info *offload, + struct sk_buff *skb) +{ + struct metadata_dst *dst = skb_metadata_dst(skb); + u64 cd_cmd, dst_vsi; + + if (!dst) { + cd_cmd = SXE2_TXCD_CMD_SWTCH_UPLINK << SXE2_TXCD_QW1_CMD_S; + offload->ctxt_desc_qw1 |= (cd_cmd | SXE2_TX_DESC_DTYPE_CTXT); + } else { + cd_cmd = SXE2_TXCD_CMD_SWTCH_VSI << SXE2_TXCD_QW1_CMD_S; + dst_vsi = ((u64)dst->u.port_info.port_id + << SXE2_TXCD_QW1_VSI_S) & + SXE2_TXCD_QW1_VSI_M; + offload->ctxt_desc_qw1 = + cd_cmd | dst_vsi | SXE2_TX_DESC_DTYPE_CTXT; + } +} +#else +static inline void +sxe2_eswitch_tx_desc_setup(struct sxe2_tx_offload_info *offload, + struct sk_buff *skb) +{ +} +#endif + +s32 sxe2_tx_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_xdp_tx_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_tx_hw_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_xdp_tx_hw_cfg(struct sxe2_vsi *vsi); + +s32 sxe2_txqs_stop(struct sxe2_vsi *vsi); + +s32 sxe2_xdp_txqs_stop(struct sxe2_vsi *vsi); + +void sxe2_tx_rings_res_free(struct sxe2_vsi *vsi); + +void sxe2_tx_ring_clean(struct sxe2_queue *txq); + +s32 sxe2_tx_ring_alloc(struct sxe2_queue *txq, struct sxe2_vsi *vsi); + +s32 sxe2_tx_rings_alloc(struct sxe2_vsi *vsi); + +netdev_tx_t sxe2_xmit(struct sk_buff *skb, struct net_device *netdev); + +s32 sxe2_hw_txqs_disable_check(struct sxe2_vsi *vsi); + +s32 sxe2_prgm_fnav_fltr(struct sxe2_vsi *vsi, + struct sxe2_tx_fnav_desc *fnav_desc, u8 *raw_packet); + +void sxe2_ctrl_txq_irq_clean(struct sxe2_queue *txq); + +bool sxe2_txq_irq_clean(struct sxe2_queue *txq, s32 napi_budget); + +s32 sxe2_fwc_txq_stop(struct sxe2_vsi *vsi, struct sxe2_queue *txq); + +s32 sxe2_txq_stop(struct sxe2_vsi *vsi, struct sxe2_queue *txq); +s32 sxe2_hw_txq_configure(struct sxe2_vsi *vsi, struct sxe2_queue *txq); + +void sxe2_tx_ring_free(struct sxe2_queue *txq); + +static inline void sxe2_xdp_ring_update_tail(struct sxe2_queue *xdp_ring) +{ + wmb(); + writel(xdp_ring->next_to_use, xdp_ring->desc.tail); +} + +static inline struct sxe2_tx_buf * +sxe2_tx_first_buffer_get(struct sk_buff *skb, struct sxe2_queue *txq) +{ + struct sxe2_tx_buf *first_buf; + + first_buf = &txq->tx_buf[txq->next_to_use]; + first_buf->skb = skb; + first_buf->bytecount = max_t(u32, skb->len, ETH_ZLEN); + first_buf->gso_segs = 1; + first_buf->tx_features = 0; + + return first_buf; +} + +#ifdef HAVE_XDP_SUPPORT +s32 sxe2_xmit_xdp_ring(void *data, u16 size, struct sxe2_queue *xdp_ring); +#endif + +static inline __le64 +sxe2_tx_data_desc_qword1_setup(struct sxe2_tx_offload_info *offload, u32 size) +{ + return cpu_to_le64(SXE2_TX_DESC_DTYPE_DATA | + ((u64)offload->data_desc_cmd << SXE2_TXDD_CMD_S) | + ((u64)offload->data_desc_offset << SXE2_TXDD_OFFSET_S) | + ((u64)size << SXE2_TXDD_BUF_SZ_S) | + ((u64)offload->data_desc_l2tag1 << SXE2_TXDD_L2TAG1_S)); +} + +void sxe2_tx_timeout(struct net_device *netdev, u32 txqueue); + +void sxe2_tx_hang_check_subtask(struct sxe2_adapter *adapter); + +s32 sxe2_txq_ctxt_fill(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + struct sxe2_txq_ctxt *ctxt); + +s32 sxe2_fwc_txq_ctxt_cfg(struct sxe2_vsi *vsi, + struct sxe2_fwc_cfg_txq_req *req); + +s32 sxe2_xmit_pkt(struct sxe2_queue *txq, struct sxe2_tx_buf *first_buf, + struct sxe2_tx_offload_info *offload); + +s32 sxe2_txq_cfg_ena_common_handle(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_en_params *params); + +s32 sxe2_txq_dis_common_handle(struct sxe2_adapter *adapter, + struct sxe2_txq_ucmd_dis_params *params); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.c new file mode 100644 index 0000000000000000000000000000000000000000..e5589a143704831b10364061550eb35a633b8d19 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.c @@ -0,0 +1,3583 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_txsched.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_log.h" +#include "sxe2_vsi.h" +#include "sxe2_sriov.h" +#include "sxe2_txsched.h" +#include "sxe2_com_cdev.h" +#include "sxe2_cmd_channel.h" +#include "sxe2_spec.h" + +#define SXE2_AA_MODE_QSET_NUM 16 +#define SXE2_TXSCH_LAYER_BY_TEID_MASK 0x7 +#define SXE2_TXSCH_VF_IDX_INVAL SXE2_VF_NUM +#define SXE2_TXSCHED_NODE_TEID_GET(x) ((x)->info.node_teid) + +STATIC u32 node_adj_lvl = SXE2_TXSCH_NODE_ADJ_LVL_DFLT; +module_param(node_adj_lvl, uint, 0644); +MODULE_PARM_DESC(node_adj_lvl, + "txsch node adj lvl, 0:Physical, 1:Data Link 2:Network 3(Default):Transport"); + +static inline u8 sxe2_txsch_node_adj_lvl_get(void) +{ + return (u8)node_adj_lvl; +} + +static inline u8 sxe2_txsch_teid2hwl(u16 teid) +{ + return (u8)teid & SXE2_TXSCH_LAYER_BY_TEID_MASK; +} + +static inline struct sxe2_txsched_context * +sxe2_txsched_ctxt_get(struct sxe2_adapter *adapter) +{ + return &adapter->tx_sched_ctxt; +} + +static inline u8 sxe2_txsched_layer_max_get(struct sxe2_txsched_context *ctxt) +{ + return ctxt->cap.generic.layer_max; +} + +static inline u8 sxe2_txsched_sw_vsi_layer_get(void) +{ + return SXE2_TXSCHED_SW_VSI_LAYER; +} + +static inline u8 sxe2_txsched_sw_qp_layer_get(void) +{ + return SXE2_TXSCHED_SW_QG_LAYER; +} + +static u8 sxe2_txsched_sw_q_layer_get(void) +{ + return SXE2_TXSCHED_SW_Q_LAYER; +} + +bool sxe2_txsch_is_vf_vsi_agg_mode(struct sxe2_adapter *adapter) +{ + return (sxe2_vf_num_get(adapter) <= SXE2_TXSCH_VF_VSIG_AGG_MAX) ? true : false; +} + +static inline bool sxe2_txsch_is_vf_by_vsitype(enum sxe2_vsi_type vsi_type) +{ + if (vsi_type == SXE2_VSI_T_VF || vsi_type == SXE2_VSI_T_DPDK_VF) + return true; + else + return false; +} + +STATIC enum sxe2_txsch_vsi_type sxe2_txsch_vsi_type_get(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi) +{ + if (!(vsi->type == SXE2_VSI_T_VF || vsi->type == SXE2_VSI_T_DPDK_VF)) { + return (vsi->type == SXE2_VSI_T_DPDK_PF) ? OTHER_MODE_UVSI : OTHER_MODE_KVSI; + } + + if (sxe2_txsch_is_vf_vsi_agg_mode(adapter)) { + return (vsi->type == SXE2_VSI_T_VF) ? + FUSION_VF2VSIG_MODE_VF_KVSI : FUSION_VF2VSIG_MODE_VF_UVSI; + } else { + return (vsi->type == SXE2_VSI_T_VF) ? + FUSION_VF2VSI_NODE_VF_KVSI : FUSION_VF2VSI_MODE_VF_UVSI; + } + + return OTHER_MODE_UNKOWN; +} + +struct sxe2_txsched_node * +sxe2_txsched_find_node_by_teid(struct sxe2_txsched_node *start_node, u16 teid) +{ + u16 i; + struct sxe2_txsched_node *node; + + if (!start_node) + return NULL; + + if (SXE2_TXSCHED_NODE_TEID_GET(start_node) == teid) { + node = start_node; + goto l_end; + } + + if (!start_node->child_cnt || + start_node->info.data.hw_layer >= SXE2_TXSCHED_LAYER_MAX) { + node = NULL; + goto l_end; + } + + for (i = 0; i < start_node->child_cnt; i++) { + if (SXE2_TXSCHED_NODE_TEID_GET(start_node->child[i]) == teid) { + node = start_node->child[i]; + goto l_end; + } + } + + for (i = 0; i < start_node->child_cnt; i++) { + node = sxe2_txsched_find_node_by_teid(start_node->child[i], teid); + if (node) + break; + } + +l_end: + return node; +} + +STATIC struct sxe2_vsi_txsched_queue * +sxe2_txsched_q_ctxt_get(struct sxe2_vsi *vsi, u8 tc, u16 q_idx_in_vsi) +{ + u32 idx = (u32)(q_idx_in_vsi - vsi->tc.info[tc].txq_offset); + + return &vsi->txsched.q[tc][idx]; +} + +static inline void sxe2_txsched_q_ctxt_dflt_bw_cfg(struct sxe2_vsi *vsi, u8 tc, + u16 q_idx_in_tc) +{ + struct sxe2_vsi_txsched_queue *q_ctxt; + + q_ctxt = &vsi->txsched.q[tc][q_idx_in_tc]; + q_ctxt->bw_info.rl_type = SXE2_NODE_RL_TYPE_EIR; + q_ctxt->bw_info.pir_bw = SXE2_TXSCHED_DFLT_BW; + q_ctxt->bw_info.cir_bw = SXE2_TXSCHED_DFLT_BW; +} + +void sxe2_txsched_vsi_q_ctxt_free(struct sxe2_vsi *vsi) +{ + u8 i; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!sxe2_txsched_support_chk(adapter)) + return; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return; + + sxe2_for_each_tc(i) { + if (vsi->txsched.q[i]) { + devm_kfree(&vsi->adapter->pdev->dev, vsi->txsched.q[i]); + vsi->txsched.q[i] = NULL; + } + } + + LOG_INFO_BDF("txsched vsi[%d] txq ctxt free success\n", vsi->idx_in_dev); +} + +STATIC s32 sxe2_txsched_vsi_q_ctxt_alloc(struct sxe2_vsi *vsi, u8 tc, + u16 q_cnt) +{ + u16 i; + u16 prev_num; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = &adapter->pdev->dev; + struct sxe2_vsi_txsched_queue *q_ctxt; + + if (!vsi->txsched.q[tc]) { + vsi->txsched.q[tc] = devm_kcalloc(dev, q_cnt, sizeof(*q_ctxt), GFP_KERNEL); + if (!vsi->txsched.q[tc]) { + LOG_ERROR_BDF("txsche vsi queue ctxt alloc failed\n"); + return -ENOMEM; + } + + vsi->txsched.q_cnt[tc] = q_cnt; + + for (i = 0; i < q_cnt; i++) + sxe2_txsched_q_ctxt_dflt_bw_cfg(vsi, tc, i); + + LOG_INFO_BDF("txsched vsi[%d] tc[%d] txq ctxt alloc success\n", + vsi->idx_in_dev, tc); + + } else if (q_cnt > vsi->txsched.q_cnt[tc]) { + prev_num = vsi->txsched.q_cnt[tc]; + + q_ctxt = devm_kcalloc(dev, q_cnt, sizeof(*q_ctxt), GFP_KERNEL); + if (!q_ctxt) { + LOG_ERROR_BDF("txsche vsi queue ctxt alloc failed\n"); + return -ENOMEM; + } + + (void)memcpy(q_ctxt, vsi->txsched.q[tc], (prev_num * sizeof(*q_ctxt))); + devm_kfree(dev, vsi->txsched.q[tc]); + vsi->txsched.q[tc] = q_ctxt; + vsi->txsched.q_cnt[tc] = q_cnt; + + for (i = prev_num; i < q_cnt; i++) + sxe2_txsched_q_ctxt_dflt_bw_cfg(vsi, tc, i); + + LOG_INFO_BDF("txsched vsi[%d] tc[%d] txq ctxt extend success\n", + vsi->idx_in_dev, tc); + } + + return 0; +} + +static inline void +sxe2_txsched_node_rl_info_get(struct sxe2_txsched_node *node, u8 rl_type, + struct scbge_txsched_node_bw *rl) +{ + switch (rl_type) { + case SXE2_NODE_RL_TYPE_CIR: + rl->bw = node->info.data.cir.bw; + rl->prof_id = node->info.data.cir.prof_id; + break; + + case SXE2_NODE_RL_TYPE_EIR: + case SXE2_NODE_RL_TYPE_SRL: + rl->bw = node->info.data.srlPir.bw; + rl->prof_id = node->info.data.srlPir.prof_id; + break; + default: + rl->bw = SXE2_TXSCHED_DFLT_BW; + rl->prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + break; + } +} + +static inline void sxe2_txsched_node_rl_record(struct sxe2_vsi *vsi, + struct sxe2_txsched_node *node, + u8 rl_type, u32 bw, + u32 prof_idx) +{ + struct sxe2_vsi_txsched_queue *q_ctxt; + u8 q_sw_layer = sxe2_txsched_sw_q_layer_get(); + + node->info.data.rl_type |= rl_type; + + switch (rl_type) { + case SXE2_NODE_RL_TYPE_CIR: + node->info.data.cir.bw = bw; + node->info.data.cir.prof_id = prof_idx; + break; + + case SXE2_NODE_RL_TYPE_EIR: + case SXE2_NODE_RL_TYPE_SRL: + node->info.data.srlPir.bw = bw; + node->info.data.srlPir.prof_id = prof_idx; + break; + } + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return; + + if (node->info.data.hw_layer == (q_sw_layer + 1)) { + q_ctxt = sxe2_txsched_q_ctxt_get(vsi, node->tc, + node->txq_idx_in_vsi); + + q_ctxt->bw_info.rl_type = rl_type; + q_ctxt->teid = node->info.node_teid; + q_ctxt->idx_in_dev = node->txq_idx_in_dev; + switch (rl_type) { + case SXE2_NODE_RL_TYPE_CIR: + q_ctxt->bw_info.cir_bw = bw; + break; + + case SXE2_NODE_RL_TYPE_EIR: + case SXE2_NODE_RL_TYPE_SRL: + q_ctxt->bw_info.pir_bw = bw; + break; + } + } +} + +s32 sxe2_txsched_node_bw_lmt_cfg(struct sxe2_vsi *vsi, + struct sxe2_txsched_node *node, u8 rl_type, + u32 bw) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct scbge_txsched_node_bw orig_rl; + struct sxe2_txsched_cfg_node_rl_req req; + struct sxe2_txsched_cfg_node_rl_resp resp; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_txsched_node_rl_info_get(node, rl_type, &orig_rl); + if (orig_rl.bw == bw) { + LOG_INFO_BDF("txsched bw==node bw not modify, \t" + "teid=%#x, rl_type=%#x, bw=%u\n", + node->info.node_teid, rl_type, bw); + } + + req.orig_prof_id = (u16)orig_rl.prof_id; + req.bw = bw; + req.hw_layer = node->info.data.hw_layer; + req.prof_type = rl_type; + req.teid = node->info.node_teid; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_NODE_RL_CFG, &req, + sizeof(req), &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("txsched node rl cfg failed, \t" + "node_teid=%#x, layer=%u, \t" + "bw=%u, orig_prof_idx=%u, orig_bw=%u, \t" + "ret=%d\n", + req.teid, req.hw_layer, bw, orig_rl.prof_id, + orig_rl.bw, ret); + return -EIO; + } + + sxe2_txsched_node_rl_record(vsi, node, rl_type, bw, resp.prof_id); + + LOG_INFO_BDF("txsched node rl cfg success, \t" + "node_teid=%#x, layer=%u, \t" + "rl_type=%x, orig_prof_idx=%u, orig_bw=%u, \t" + "bw=%u, prof_idx=%u\n", + req.teid, req.hw_layer, rl_type, orig_rl.prof_id, + orig_rl.bw, bw, resp.prof_id); + + return 0; +} + +STATIC s32 sxe2_txsche_replay_node_bw(struct sxe2_vsi *vsi, + struct sxe2_txsched_node *node, + struct sxe2_txsched_q_bw_info *bw_info) +{ + s32 ret = 0; + + if (bw_info->rl_type == SXE2_NODE_RL_TYPE_CIR && + bw_info->cir_bw != node->info.data.cir.bw) { + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, SXE2_NODE_RL_TYPE_CIR, + bw_info->cir_bw); + + if (ret) + goto l_err; + } + + if (bw_info->rl_type == SXE2_NODE_RL_TYPE_EIR && + bw_info->pir_bw != node->info.data.srlPir.bw) { + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, + SXE2_NODE_RL_TYPE_EIR, bw_info->pir_bw); + if (ret) + goto l_err; + } + +l_err: + return ret; +} + +STATIC s32 sxe2_txsched_replay_q_bw(struct sxe2_vsi *vsi, + struct sxe2_vsi_txsched_queue *q_ctxt) +{ + s32 ret; + struct sxe2_txsched_node *node; + struct sxe2_txsched_context *sched_ctxt = &vsi->adapter->tx_sched_ctxt; + + node = sxe2_txsched_find_node_by_teid(sched_ctxt->root, (u16)q_ctxt->teid); + if (!node) + return -EINVAL; + + ret = sxe2_txsche_replay_node_bw(vsi, node, &q_ctxt->bw_info); + + return ret; +} + +s32 sxe2_txsched_q_bw_lmt_cfg(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + u8 rl_type, u32 bw) +{ + s32 ret; + struct sxe2_txsched_node *node; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return -EOPNOTSUPP; + + mutex_lock(&sched_ctxt->lock); + + node = sxe2_txsched_find_node_by_teid(sched_ctxt->root, txq->txq_teid); + if (!node) { + LOG_ERROR_BDF("txsched txq leaf node get failed, \t" + "node_teid=%#x, txq_idx=%u, bw=%u\n", + txq->txq_teid, txq->idx_in_vsi, bw); + ret = -ENXIO; + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, rl_type, bw); + +l_unlock: + mutex_unlock(&sched_ctxt->lock); + return ret; +} + +STATIC enum sxe2_node_type +sxe2_txsched_veb_node_type_get(enum sxe2_vsi_type vsi_type) +{ + enum sxe2_node_type node_type = SXE2_TXSCHD_NODE_TYPE_UNKNOW; + + if (vsi_type == SXE2_VSI_T_PF || + vsi_type == SXE2_VSI_T_LB || + vsi_type == SXE2_VSI_T_CTRL || + vsi_type == SXE2_VSI_T_DPDK_PF) + node_type = SXE2_TXSCHD_VEB_TYPE_PF; + + else if (vsi_type == SXE2_VSI_T_VF || + vsi_type == SXE2_VSI_T_DPDK_VF) + node_type = SXE2_TXSCHD_VEB_TYPE_VF; + + else if (vsi_type == SXE2_VSI_T_MACVLAN || + vsi_type == SXE2_VSI_T_ESW) + node_type = SXE2_TXSCHD_VEB_TYPE_MACVLAN_ESW; + + return node_type; +} + +STATIC enum sxe2_node_type +sxe2_txsched_vsi_node_type_get(enum sxe2_vsi_type vsi_type, u8 owner) +{ + enum sxe2_node_type node_type = SXE2_TXSCHD_NODE_TYPE_UNKNOW; + + switch (vsi_type) { + case SXE2_VSI_T_PF: + if (owner == SXE2_TXSCHED_NODE_OWNER_LAN) + node_type = SXE2_TXSCHD_VSI_TYPE_PF; + else if (owner == SXE2_TXSCHED_NODE_OWNER_RDMA) + node_type = SXE2_TXSCHD_VSI_TYPE_PF_RDMA; + break; + case SXE2_VSI_T_LB: + node_type = SXE2_TXSCHD_VSI_TYPE_PF_LOOPBACK; + break; + case SXE2_VSI_T_CTRL: + node_type = SXE2_TXSCHD_VSI_TYPE_PF_CTRL; + break; + case SXE2_VSI_T_VF: + node_type = SXE2_TXSCHD_VSI_TYPE_VF; + break; + case SXE2_VSI_T_MACVLAN: + case SXE2_VSI_T_ESW: + node_type = SXE2_TXSCHD_VSI_TYPE_MACVLAN_ESW; + break; + case SXE2_VSI_T_DPDK_PF: + node_type = SXE2_TXSCHD_VSI_TYPE_USER_PF; + break; + case SXE2_VSI_T_DPDK_VF: + node_type = SXE2_TXSCHD_VSI_TYPE_USER_VF; + break; + default: + node_type = SXE2_TXSCHD_NODE_TYPE_UNKNOW; + } + + return node_type; +} + +STATIC s32 sxe2_txsched_node_info_query(struct sxe2_adapter *adapter, + struct sxe2_txsched_node_param *param, + struct sxe2_txsched_node_info *node_info) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_txsched_query_node_req req; + + req.node_teid = param->node_teid; + req.parent_teid = param->parent_teid; + req.sibling_idx = param->node_silbing_idx; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_NODE_INFO_QUERY, + &req, sizeof(req), node_info, + sizeof(*node_info)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sched node query failed, \t" + "teid=%#x, parent_teid=%#x, sibling_idx=%d, ret=%d\n", + param->node_teid, param->parent_teid, + param->node_silbing_idx, ret); + } + + return ret; +} + +static s32 sxe2_txsched_sw_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node_param *param) +{ + s32 ret; + struct sxe2_txsched_node *node; + struct sxe2_txsched_node *parent; + struct sxe2_txsched_node_info node_info; + struct device *dev = &adapter->pdev->dev; + struct sxe2_txsched_context *ctxt = sxe2_txsched_ctxt_get(adapter); + + parent = sxe2_txsched_find_node_by_teid(ctxt->root, param->parent_teid); + if (!parent) { + LOG_ERROR_BDF("parent node not found, parent_teid=0x%x\n", + param->parent_teid); + return -EINVAL; + } + + if (parent->child_cnt >= SXE2_TXSCHED_NODE_CHILD_MAX) { + LOG_ERROR_BDF("the number of child nodes ara full, parent_teid=%#x\n", + parent->info.node_teid); + return -EINVAL; + } + + ret = sxe2_txsched_node_info_query(adapter, param, &node_info); + if (ret) + return ret; + + node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL); + if (!node) { + LOG_ERROR_BDF("alloc sw node failed, teid=0x%x\n", + node_info.node_teid); + return -ENOMEM; + } + + node->child = devm_kcalloc(dev, SXE2_TXSCHED_NODE_CHILD_MAX, + sizeof(*node->child), GFP_KERNEL); + if (!node->child) { + LOG_ERROR_BDF("alloc child array of sw node failed, teid=0x%x\n", + node_info.node_teid); + devm_kfree(dev, node); + return -ENOMEM; + } + + if (param->sw_layer == sxe2_txsched_sw_q_layer_get()) { + node->txq_idx_in_dev = param->txq_idx_in_dev; + node->txq_idx_in_vsi = param->txq_idx_in_vsi; + } + + if (param->sw_layer > sxe2_txsched_sw_vsi_layer_get()) + node->vsi_idx_in_dev = param->vsi_idx_in_dev; + + node->in_use = true; + node->tc = (u8)param->tc; + node->parent = parent; + node->info = node_info; + node->owner = (u8)param->owner; + parent->child[parent->child_cnt++] = node; + + LOG_DEBUG_BDF("tx sched create node success, \t" + "teid=%#x, parentTied=%#x, childIdx=%d, child_cnt=%u, addr=%p\n", + node->info.node_teid, node->info.parent_teid, + node->info.sibling_idx, parent->child_cnt, (void *)parent); + + return 0; +} + +static s32 sxe2_txsched_root_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node_info *root_info) +{ + struct sxe2_txsched_node *root; + struct device *dev = &adapter->pdev->dev; + + root = devm_kzalloc(dev, sizeof(*root), GFP_KERNEL); + if (!root) { + LOG_DEV_ERR("sched alloc root node failed\n"); + return -ENOMEM; + } + + root->child = devm_kcalloc(dev, SXE2_TXSCHED_NODE_CHILD_MAX, + sizeof(*root->child), GFP_KERNEL); + if (!root->child) { + LOG_DEV_ERR("sched alloc for child array of root node failed\n"); + devm_kfree(dev, root); + return -ENOMEM; + } + + (void)memcpy(&root->info, root_info, sizeof(*root_info)); + adapter->tx_sched_ctxt.root = root; + + return 0; +} + +static void sxe2_txsched_root_node_del(struct sxe2_adapter *adapter) +{ + struct sxe2_txsched_node *root = adapter->tx_sched_ctxt.root; + struct device *dev = &adapter->pdev->dev; + + if (root) { + if (root->child) { + devm_kfree(dev, root->child); + adapter->tx_sched_ctxt.root->child = NULL; + } + devm_kfree(dev, root); + adapter->tx_sched_ctxt.root = NULL; + } +} + +s32 sxe2_txsched_dflt_tc_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node_info *tc0_info) +{ + struct sxe2_txsched_node *node; + struct device *dev = &adapter->pdev->dev; + struct sxe2_txsched_node *root = adapter->tx_sched_ctxt.root; + + node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL); + if (!node) { + LOG_ERROR_BDF("alloc tc0 node failed, teid=0x%x\n", + tc0_info->node_teid); + return -ENOMEM; + } + + node->child = devm_kcalloc(dev, SXE2_TXSCHED_NODE_CHILD_MAX, + sizeof(*node->child), GFP_KERNEL); + if (!node->child) { + LOG_ERROR_BDF("alloc child array of tc0 node failed, teid=0x%x\n", + tc0_info->node_teid); + devm_kfree(dev, node); + return -ENOMEM; + } + + node->in_use = true; + node->parent = root; + node->tc = 0; + node->info = *tc0_info; + root->child[root->child_cnt++] = node; + + return 0; +} + +STATIC void sxe2_txsched_dflt_tc_node_del(struct sxe2_adapter *adapter) +{ + struct sxe2_txsched_node *root = adapter->tx_sched_ctxt.root; + struct device *dev = &adapter->pdev->dev; + + if (root && root->child) { + if (root->child[0]) { + if (root->child[0]->child) { + devm_kfree(dev, root->child[0]->child); + root->child[0]->child = NULL; + } + devm_kfree(dev, root->child[0]); + root->child[0] = NULL; + } + } +} + +bool sxe2_txsched_support_chk(struct sxe2_adapter *adapter) +{ + if (adapter->tx_sched_ctxt.cap.generic.layer_max) + return true; + else + return false; +} + +static s32 sxe2_fwc_txsched_cap_get(struct sxe2_adapter *adapter) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_txsched_cap_resp *cap = &adapter->tx_sched_ctxt.cap; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXSCHED_CAP_QUERY, NULL, 0, + cap, sizeof(*cap)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_INFO_BDF("sched cap get failed, ret=%d\n", ret); + else + LOG_INFO_BDF("sched cap get success: max_hw_layer=%d\n", cap->generic.layer_max); + + return ret; +} + +static inline void +sxe2_tx_sched_res_init(struct sxe2_txsched_context *sched_ctxt) +{ + sched_ctxt->user_root_teid = SXE2_TXSCHED_TEID_INVALID; + mutex_init(&sched_ctxt->lock); +} + +static inline void +sxe2_tx_sched_res_deinit(struct sxe2_txsched_context *sched_ctxt) +{ + (void)mutex_destroy(&sched_ctxt->lock); +} + +STATIC s32 sxe2_txsched_hw_dflt_topo_get(struct sxe2_adapter *adapter, + struct sxe2_fwc_txsched_dflt_topo_resp *dflt_topo) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TXSCHED_DFLT_TOPO_QUERY, NULL, + 0, dflt_topo, sizeof(*dflt_topo)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("tx sched dflt topo get failed, ret=%d\n", ret); + + return ret; +} + +s32 sxe2_txsched_dflt_topo_init(struct sxe2_adapter *adapter) +{ + s32 ret; + struct device *dev = &adapter->pdev->dev; + struct sxe2_fwc_txsched_dflt_topo_resp *dflt_topo; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + dflt_topo = devm_kzalloc(dev, sizeof(*dflt_topo), GFP_KERNEL); + if (!dflt_topo) { + LOG_DEV_ERR("sched dflt topo init: alloc failed\n"); + return -ENOMEM; + } + + mutex_lock(&sched_ctxt->lock); + ret = sxe2_txsched_hw_dflt_topo_get(adapter, dflt_topo); + if (ret) { + LOG_DEV_ERR("sxe2_txsched_hw_dflt_topo_get: failed, ret:%d\n", ret); + goto l_free_topo; + } + + ret = sxe2_txsched_root_node_add(adapter, + &dflt_topo->node_info[SXE2_TXSCHED_SW_PORT_LAYER]); + if (ret) + goto l_free_topo; + + ret = sxe2_txsched_dflt_tc_node_add(adapter, + &dflt_topo->node_info[SXE2_TXSCHED_SW_TC_LAYER]); + if (ret) { + LOG_DEV_ERR("sxe2_txsched_dflt_tc_node_add: failed, ret:%d\n", ret); + goto l_free_root; + } + + LOG_DEV_INFO("tx sched dflt topo init success, port_teid=%#x, tc_teid=%#x ret:%d\n", + dflt_topo->node_info[SXE2_TXSCHED_SW_PORT_LAYER].node_teid, + dflt_topo->node_info[SXE2_TXSCHED_SW_TC_LAYER].node_teid, ret); + + sched_ctxt->state = SXE2_TX_SCHED_STATE_READY; + goto l_free_topo; + +l_free_root: + sxe2_txsched_root_node_del(adapter); + +l_free_topo: + mutex_unlock(&sched_ctxt->lock); + devm_kfree(dev, dflt_topo); + LOG_DEV_INFO("sxe2 txsched dflt topo init ret:%d\n", ret); + return ret; +} + +void sxe2_txsched_dflt_topo_deinit(struct sxe2_adapter *adapter) +{ + if (!sxe2_txsched_support_chk(adapter)) + return; + + sxe2_txsched_dflt_tc_node_del(adapter); + sxe2_txsched_root_node_del(adapter); +} + +STATIC s32 sxe2_tx_sched_hw_node_del(struct sxe2_adapter *adapter, + u16 parent_teid, u16 start_child_idx, + u16 node_num, u16 node_teid[]) +{ + s32 ret; + u32 i, req_size; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_txsched_del_nodes_req *req; + struct device *dev = &adapter->pdev->dev; + + req_size = struct_size(req, teid, node_num); + req = devm_kzalloc(dev, req_size, GFP_KERNEL); + if (!req) { + LOG_ERROR_BDF("sched node del: alloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + req->hdr.parent_teid = cpu_to_le16(parent_teid); + req->hdr.start_child_idx = cpu_to_le16(start_child_idx); + req->hdr.node_num = cpu_to_le16(node_num); + for (i = 0; i < node_num; i++) + req->teid[i] = cpu_to_le16(node_teid[i]); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_NODE_DEL, req, + req_size, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("tx sched node del failed, ret=%d\n", ret); + +l_end: + devm_kfree(dev, req); + return ret; +} + +STATIC s32 sxe2_tx_sched_hw_lan_leaf_node_del(struct sxe2_adapter *adapter, + u16 parent_teid, u16 sibling_idx, + u16 node_teid, + u16 txq_idx_in_dev) +{ + s32 ret, req_size; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_del_leaf_req *req; + struct device *dev = &adapter->pdev->dev; + + req_size = sizeof(struct sxe2_txsched_del_leaf_req); + req = devm_kzalloc(dev, (u32)req_size, GFP_KERNEL); + if (!req) { + LOG_ERROR_BDF("sched node del: alloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + LOG_INFO_BDF("parent_teid=%#x, sibling_idx=%d node=%#x idx=%#x\n", + parent_teid, sibling_idx, node_teid, txq_idx_in_dev); + + req->parent_teid = cpu_to_le16(parent_teid); + req->sibling_idx = cpu_to_le16(sibling_idx); + req->node_teid = cpu_to_le16(node_teid); + req->txq_idx_in_dev = cpu_to_le16(txq_idx_in_dev); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_QUEUE_LEAF_DEL, req, + (u32)req_size, NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("tx sched node del failed, ret=%d\n", ret); + +l_end: + devm_kfree(dev, req); + return ret; +} + +static inline struct sxe2_txsched_node * +sxe2_txsched_first_node_get(struct sxe2_txsched_context *sched_ctxt, u8 tc, + u8 sw_layer) +{ + if (tc < SXE2_MAX_TRAFFIC_CLASS && sw_layer < SXE2_TXSCHED_LAYER_MAX) + return sched_ctxt->sib_head[tc][sw_layer]; + else + return NULL; +} + +STATIC void sxe2_txsched_sw_node_del(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node) +{ + u32 i, j; + struct sxe2_txsched_node *parent; + struct sxe2_txsched_node *node_list; + struct device *dev = &adapter->pdev->dev; + u8 sw_layer = node->info.data.hw_layer - 1; + struct sxe2_txsched_context *sched_ctxt = sxe2_txsched_ctxt_get(adapter); + + parent = node->parent; + if (parent) { + for (i = 0; i < parent->child_cnt; i++) { + if (parent->child[i] == node) { + if ((i + 1) == parent->child_cnt) { + parent->child[i] = NULL; + } else { + for (j = i + 1; j < parent->child_cnt; j++) { + parent->child[j - 1] = parent->child[j]; + } + } + + parent->child_cnt--; + break; + } + } + + node_list = sxe2_txsched_first_node_get(sched_ctxt, node->tc, + sw_layer); + if (node_list == node) { + sched_ctxt->sib_head[node->tc][sw_layer] = node->sibling; + } else { + while (node_list) { + if (node_list->sibling == node) { + node_list->sibling = node->sibling; + break; + } + + node_list = node_list->sibling; + } + } + } + + if (node->child) + devm_kfree(dev, node->child); + + devm_kfree(dev, node); +} + +STATIC s32 sxe2_txsched_subtree_clean(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node) +{ + s32 ret = 0; + u8 q_layer = sxe2_txsched_sw_q_layer_get(); + + if (!node) + return 0; + + while (node->child_cnt) { + ret = sxe2_txsched_subtree_clean(adapter, node->child[0]); + if (ret) { + LOG_ERROR_BDF("sched hw node del(child) failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->child[0]->info.node_teid, + node->child[0]->parent->info.node_teid, + node->child[0]->info.sibling_idx); + ret = 0; + goto l_end; + } + } + + if (node->info.data.hw_layer != (q_layer + 1) && + node->info.data.hw_layer > SXE2_TXSCHED_HW_LAYER_TC) { + ret = sxe2_tx_sched_hw_node_del(adapter, + node->parent->info.node_teid, + (u16)node->info.sibling_idx, 1, + &node->info.node_teid); + if (ret) { + LOG_ERROR_BDF("sched hw node del failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->info.node_teid, + node->parent->info.node_teid, + node->info.sibling_idx); + ret = 0; + } + } + +l_end: + sxe2_txsched_sw_node_del(adapter, node); + return ret; +} + +STATIC s32 sxe2_txsched_node_del(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node, + u16 not_del_root_teid, + u8 owner) +{ + u8 i = 0; + s32 ret = 0; + u8 q_layer = sxe2_txsched_sw_q_layer_get(); + + if (!node) + return 0; + + i = node->child_cnt; + while (i) { + --i; + ret = sxe2_txsched_node_del(adapter, node->child[i], + not_del_root_teid, owner); + if (ret) { + LOG_ERROR_BDF("sched hw node del(child) failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->child[i]->info.node_teid, + node->child[i]->parent->info.node_teid, + node->child[i]->info.sibling_idx); + ret = 0; + goto l_end; + } + } + + if (node->info.data.hw_layer != (q_layer + 1) && + node->info.data.hw_layer > SXE2_TXSCHED_HW_LAYER_TC && + node->owner == owner && + (node->info.node_teid != not_del_root_teid)) { + ret = sxe2_tx_sched_hw_node_del(adapter, + node->parent->info.node_teid, + (u16)node->info.sibling_idx, 1, + &node->info.node_teid); + if (ret) { + LOG_ERROR_BDF("sched hw node del failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->info.node_teid, + node->parent->info.node_teid, + node->info.sibling_idx); + ret = 0; + } else { + sxe2_txsched_sw_node_del(adapter, node); + } + } + +l_end: + return ret; +} + +s32 sxe2_txsched_init(struct sxe2_adapter *adapter) +{ + s32 ret; + + ret = sxe2_fwc_txsched_cap_get(adapter); + if (ret) + goto l_err; + + if (!sxe2_txsched_support_chk(adapter)) + goto l_err; + + sxe2_tx_sched_res_init(&adapter->tx_sched_ctxt); + + ret = sxe2_txsched_dflt_topo_init(adapter); + if (ret) { + sxe2_tx_sched_res_deinit(&adapter->tx_sched_ctxt); + goto l_err; + } + + LOG_INFO_BDF("tx sched init success\n"); +l_err: + return ret; +} + +void sxe2_txsched_deinit(struct sxe2_adapter *adapter) +{ + if (!sxe2_txsched_support_chk(adapter)) + return; + + sxe2_txsched_dflt_topo_deinit(adapter); + + sxe2_tx_sched_res_deinit(&adapter->tx_sched_ctxt); +} + +STATIC struct sxe2_txsched_node * +sxe2_txsched_tc_node_get(struct sxe2_txsched_context *ctxt, u8 tc) +{ + u8 i; + struct sxe2_txsched_node *node = NULL; + + if (!ctxt->root) + return node; + + for (i = 0; i < ctxt->root->child_cnt; i++) { + if (ctxt->root->child[i]->tc == tc) { + node = ctxt->root->child[i]; + break; + } + } + + return node; +} + +struct sxe2_txsched_node * +sxe2_txsched_vsi_first_node_get(struct sxe2_txsched_context *ctxt, u8 tc, + u16 vsi_idx, u8 owner) +{ + u8 sw_layer; + struct sxe2_txsched_node *node; + + sw_layer = sxe2_txsched_sw_vsi_layer_get(); + if (!sw_layer) + return NULL; + + node = sxe2_txsched_first_node_get(ctxt, tc, sw_layer); + + while (node) { + if (node->vsi_idx_in_dev == vsi_idx && node->owner == owner) + return node; + node = node->sibling; + } + + return NULL; +} + +STATIC struct sxe2_txsched_node * +sxe2_txsch_vsi_node_get(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, u8 tc, u8 owner) +{ + if (tc != 0 && vsi->type != SXE2_VSI_T_PF) { + LOG_ERROR_BDF("vsi node get failed, vsi_idx=%d, vsi_type=%u, tc=%d, owner=%d\n", + vsi->idx_in_dev, vsi->type, tc, owner); + return NULL; + } + + if (vsi->type == SXE2_VSI_T_DPDK_VF && + !sxe2_txsch_is_vf_vsi_agg_mode(adapter)) { + if (vsi->txsched.node) + return vsi->txsched.node; + else if (vsi->vf_node->vsi) + return vsi->vf_node->vsi->txsched.node; + else + return NULL; + } + + return sxe2_txsched_vsi_first_node_get(&adapter->tx_sched_ctxt, + tc, vsi->idx_in_dev, owner); +} + +static struct sxe2_txsched_node * +sxe2_txsch_qg_head_node_get(struct sxe2_txsched_node *vsi_node, u8 owner) +{ + u32 i; + + while (vsi_node) { + for (i = 0; i < SXE2_TXSCHED_NODE_CHILD_MAX; i++) { + if (!(vsi_node->child[i])) + continue; + + if (owner == vsi_node->child[i]->owner) + return vsi_node->child[i]; + } + + vsi_node = vsi_node->group; + } + + return NULL; +} + +static struct sxe2_txsched_node * +sxe2_txsch_qg_tail_node_get(struct sxe2_txsched_node *vsi_node, u8 owner) +{ + struct sxe2_txsched_node *node; + struct sxe2_txsched_node *tail = NULL; + + node = sxe2_txsch_qg_head_node_get(vsi_node, owner); + if (!node) + goto l_end; + + while (node) { + tail = node; + node = node->group; + } + +l_end: + return tail; +} + +static inline u32 +sxe2_txsched_qg_num_get(struct sxe2_txsched_node *fist_vsi_node, u8 owner) +{ + u32 qg_cnt = 0; + struct sxe2_txsched_node *qgrp_node; + + if (!fist_vsi_node) + return 0; + + qgrp_node = sxe2_txsch_qg_head_node_get(fist_vsi_node, owner); + if (!qgrp_node) + return 0; + + while (qgrp_node) { + qgrp_node = qgrp_node->group; + qg_cnt++; + } + + return qg_cnt; +} + +static struct sxe2_txsched_node * +sxe2_txsch_first_vsig_lookup_by_type(struct sxe2_txsched_node *tc_node, + enum sxe2_node_type veb_node_type, + enum sxe2_node_type vsig_node_type) +{ + u8 i, j; + struct sxe2_txsched_node *veb_node; + + for (i = 0; i < tc_node->child_cnt; i++) { + veb_node = tc_node->child[i]; + if (veb_node->node_type == veb_node_type) + for (j = 0; j < veb_node->child_cnt; j++) + if (veb_node->child[j]->node_type == vsig_node_type) + return veb_node->child[j]; + } + + return NULL; +} + +s32 sxe2_txsched_tc_max_bw_lmt_cfg(struct sxe2_vsi *vsi, u8 tc, u32 max_tx_rate) +{ + s32 ret; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_node *vsig_node; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return -EOPNOTSUPP; + + mutex_lock(&sched_ctxt->lock); + tc_node = sxe2_txsched_tc_node_get(sched_ctxt, tc); + if (!tc_node) { + LOG_ERROR_BDF("txsched tc node get failed, tc_id=%u, max_tx_rate=%u\n", + tc, max_tx_rate); + ret = -ENXIO; + goto l_unlock; + } + + vsig_node = sxe2_txsch_first_vsig_lookup_by_type(tc_node, + SXE2_TXSCHD_VEB_TYPE_PF, SXE2_TXSCHD_VSIG_TYPE_PF_AGG); + if (!vsig_node) { + LOG_ERROR_BDF("txsched vsig node get failed, tc_id=%u, max_tx_rate=%u\n", + tc, max_tx_rate); + ret = -ENXIO; + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, vsig_node, + SXE2_NODE_RL_TYPE_EIR, max_tx_rate); + if (ret) { + LOG_ERROR_BDF("unable to set tc max rate, ret=%d, tc=%u, maxrate=%u\n", + ret, tc, max_tx_rate); + } + +l_unlock: + mutex_unlock(&sched_ctxt->lock); + return ret; +} + +static bool sxe2_txsched_is_leaf_node_present(struct sxe2_txsched_node *node, + u8 q_layer) +{ + u8 i; + + for (i = 0; i < node->child_cnt; i++) + if (sxe2_txsched_is_leaf_node_present(node->child[i], q_layer)) + return true; + + return (node->info.data.hw_layer == q_layer); +} + +static bool sxe2_txsched_vsi_qset_left(struct sxe2_adapter *adapter, u8 tc, + u16 vsi_idx, u8 owner) +{ + struct sxe2_txsched_node *node; + struct sxe2_txsched_node *vsi_node; + struct sxe2_txsched_context *ctxt = &adapter->tx_sched_ctxt; + u8 max_layer = sxe2_txsched_layer_max_get(ctxt); + bool left = false; + + vsi_node = sxe2_txsched_vsi_first_node_get(ctxt, tc, vsi_idx, owner); + if (!vsi_node) + goto l_end; + + while (vsi_node) { + node = vsi_node->group; + if (sxe2_txsched_is_leaf_node_present(vsi_node, max_layer)) { + left = true; + LOG_INFO_BDF("sched VSI has leaf nodes in TC %d\n", tc); + goto l_end; + } + vsi_node = node; + } + +l_end: + return left; +} + +bool sxe2_txsched_qset_left(struct sxe2_adapter *adapter, u16 vsi_idx) +{ + bool left = false; + u8 tc; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_context *ctxt = sxe2_txsched_ctxt_get(adapter); + + mutex_lock(&adapter->tx_sched_ctxt.lock); + sxe2_for_each_tc(tc) + { + tc_node = sxe2_txsched_tc_node_get(ctxt, tc); + if (!tc_node) + continue; + + left = sxe2_txsched_vsi_qset_left(adapter, tc, + vsi_idx, SXE2_TXSCHED_NODE_OWNER_RDMA); + if (left) { + LOG_INFO_BDF("tc[%d] vsi node has left qset, left=%d\n", tc, left); + break; + } + } + mutex_unlock(&adapter->tx_sched_ctxt.lock); + return left; +} + +STATIC s32 sxe2_txsched_vsi_node_del(struct sxe2_adapter *adapter, u8 tc, + struct sxe2_vsi *vsi, u8 owner) +{ + struct sxe2_txsched_node *node; + struct sxe2_txsched_node *vsi_node; + struct sxe2_txsched_context *ctxt = &adapter->tx_sched_ctxt; + u8 max_layer = sxe2_txsched_layer_max_get(ctxt); + + vsi_node = sxe2_txsch_vsi_node_get(adapter, vsi, tc, owner); + if (!vsi_node) { + LOG_WARN_BDF("txsch vsi node del failed, vsi_node=NULL, " + "vsi_type=%d, vsi_idx=%d, owner=%d\n", + vsi->type, vsi->idx_in_dev, owner); + return 0; + } + + if (sxe2_txsched_is_leaf_node_present(vsi_node, max_layer)) { + LOG_ERROR_BDF("sched VSI has leaf nodes in TC %d\n", tc); + return -EBUSY; + } + + while (vsi_node) { + node = vsi_node->group; + (void)sxe2_txsched_node_del(adapter, vsi_node, SXE2_TXSCHED_TEID_INVALID, owner); + vsi_node = node; + } + + return 0; +} + +STATIC s32 sxe2_txsched_del_vsi_node_for_each_tc(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + u8 owner) +{ + u8 tc; + s32 ret = 0; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_context *ctxt = sxe2_txsched_ctxt_get(adapter); + + sxe2_for_each_tc(tc) + { + tc_node = sxe2_txsched_tc_node_get(ctxt, tc); + if (!tc_node) + continue; + + ret = sxe2_txsched_vsi_node_del(adapter, tc, vsi, owner); + if (ret) { + LOG_ERROR_BDF("tc[%d] vsi node del failed, ret=%d\n", tc, ret); + break; + } + } + + return ret; +} + +static inline void +sxe2_txsched_vsi_layer_calc(u8 max_layer, u32 queue_num, + enum sxe2_vsi_type vsi_type, u8 owner, + struct sxe2_txsched_add_node_info *add_node_info) +{ + u32 num_node = 0; + + num_node = DIV_ROUND_UP(queue_num, (SXE2_TXSCHED_NODE_CHILD_MAX * + SXE2_TXSCHED_NODE_CHILD_MAX)); + add_node_info->num = (u8)num_node; + add_node_info->type = sxe2_txsched_vsi_node_type_get(vsi_type, owner); + add_node_info->node = NULL; +} + +static struct sxe2_txsched_node * +sxe2_txsched_enough_slot_node_lookup(struct sxe2_txsched_context *sched_ctxt, + u8 tc, u8 sw_layer, u8 need_slot_num, + enum sxe2_node_type node_type) +{ + struct sxe2_txsched_node *node; + + node = sxe2_txsched_first_node_get(sched_ctxt, tc, sw_layer); + while (node) { + if ((node_type == node->node_type) && + (SXE2_TXSCHED_NODE_CHILD_MAX - node->child_cnt) >= need_slot_num) + break; + + node = node->sibling; + } + + return node; +} + +static struct sxe2_txsched_node * +sxe2_txsched_node_lookup_by_type(struct sxe2_txsched_context *sched_ctxt, + u8 tc, u8 sw_layer, + enum sxe2_node_type node_type) +{ + struct sxe2_txsched_node *node; + + node = sxe2_txsched_first_node_get(sched_ctxt, tc, sw_layer); + while (node) { + if (node_type == node->node_type) + break; + + node = node->sibling; + } + + return node; +} + +static struct sxe2_txsched_node * +sxe2_txsched_no_child_vsig_lookup(struct sxe2_txsched_node *tc_node, + enum sxe2_node_type veb_node_type) +{ + u8 i, j; + struct sxe2_txsched_node *veb_node; + + for (i = 0; i < tc_node->child_cnt; i++) { + veb_node = tc_node->child[i]; + if (veb_node->node_type == veb_node_type) { + for (j = 0; j < veb_node->child_cnt; j++) { + if (!veb_node->child[j]->child_cnt) + return veb_node->child[j]; + } + } + } + + return NULL; +} + +static struct sxe2_txsched_node * +sxe2_txsched_enough_slot_vsig_lookup(struct sxe2_txsched_node *tc_node, + enum sxe2_node_type veb_node_type, + enum sxe2_node_type vsig_node_type, u32 need_slot_num) +{ + u8 i, j; + struct sxe2_txsched_node *veb_node; + + for (i = 0; i < tc_node->child_cnt; i++) { + veb_node = tc_node->child[i]; + if (veb_node->node_type == veb_node_type) { + for (j = 0; j < veb_node->child_cnt; j++) { + if ((vsig_node_type == + veb_node->child[j]->node_type) && + (u32)((SXE2_TXSCHED_NODE_CHILD_MAX - + veb_node->child[j]->child_cnt)) >= + need_slot_num) + + return veb_node->child[j]; + } + } + } + + return NULL; +} + +static struct sxe2_txsched_node * +sxe2_txsch_vf_agg_node_lookup(struct sxe2_txsched_node *tc_node, + enum sxe2_node_type veb_node_type, u16 vf_idx_in_pf) +{ + u8 i, j; + struct sxe2_txsched_node *veb_node; + + for (i = 0; i < tc_node->child_cnt; i++) { + veb_node = tc_node->child[i]; + if (veb_node->node_type == veb_node_type) { + for (j = 0; j < veb_node->child_cnt; j++) { + if (vf_idx_in_pf == veb_node->child[j]->vf_idx_in_pf && + veb_node->child[j]->node_type == SXE2_TXSCHD_VSIG_TYPE_VF_AGG) { + return veb_node->child[j]; + } + } + } + } + + return NULL; +} + +STATIC void sxe2_txsch_vsi_node_map(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, u8 owner, + struct sxe2_txsched_node *vsi_node) +{ + if (owner == SXE2_TXSCHED_NODE_OWNER_RDMA) + return; + + if (sxe2_txsch_vsi_type_get(adapter, vsi) == FUSION_VF2VSI_MODE_VF_UVSI) + goto l_update_vsi; + + vsi_node->vsi_idx_in_dev = vsi->idx_in_dev; + +l_update_vsi: + vsi->txsched.node = vsi_node; + vsi->txsched.vsi_node_cnt = vsi_node->same_node_num_pre_tc; + +} + +STATIC void sxe2_txsch_vsi_node_unmap(struct sxe2_vsi *vsi, u8 owner) +{ + if (!vsi) + return; + + if (owner == SXE2_TXSCHED_NODE_OWNER_RDMA) + return; + + vsi->txsched.node = NULL; + vsi->txsched.vsi_node_cnt = 0; + +} + +s32 sxe2_txsched_vf_bw_lmt_cfg(struct sxe2_adapter *adapter, + struct sxe2_vf_node *vf_node, u8 rl_type, u32 bw) +{ + s32 ret; + u8 owner; + struct sxe2_vsi *vsi; + struct sxe2_txsched_node *rl_node; + struct sxe2_txsched_context *ctx = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return -EOPNOTSUPP; + + mutex_lock(&ctx->lock); + + vsi = vf_node->vsi ? vf_node->vsi : vf_node->dpdk_vf_vsi; + if (!vsi) { + LOG_ERROR_BDF("vsi for vf %d is null\n", vf_node->vf_idx); + ret = -EINVAL; + goto l_unlock; + } + + if (sxe2_txsch_is_vf_vsi_agg_mode(adapter)) { + rl_node = sxe2_txsch_vf_agg_node_lookup(ctx->root->child[0], + SXE2_TXSCHD_VEB_TYPE_VF, + vf_node->vf_idx); + } else { + owner = (vsi->type == SXE2_VSI_T_DPDK_VF) ? + SXE2_TXSCHED_NODE_OWNER_USER : SXE2_TXSCHED_NODE_OWNER_LAN; + rl_node = sxe2_txsch_vsi_node_get(adapter, vsi, 0, owner); + } + + if (!rl_node) { + LOG_ERROR_BDF("vf node get failed, active_vf_num=%d, vf[%d], bw=%u\n", + adapter->vf_ctxt.num_vfs, vf_node->vf_idx, bw); + ret = -ENXIO; + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, rl_node, rl_type, bw); + +l_unlock: + mutex_unlock(&ctx->lock); + return ret; +} + +static void +sxe2_txsched_vsig_layer_calc(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *tc_node, + struct sxe2_vsi *vsi, u32 vsi_node_num, + struct sxe2_txsched_add_node_info *add_node_info) +{ + enum sxe2_node_type type; + enum sxe2_node_type veb_node_type; + struct sxe2_txsched_node *vsig_node; + enum sxe2_txsch_vsi_type txsch_vsi_type; + enum sxe2_vsi_type vsi_type = vsi->type; + struct sxe2_txsched_context *sched_ctxt = sxe2_txsched_ctxt_get(adapter); + + txsch_vsi_type = sxe2_txsch_vsi_type_get(adapter, vsi); + + veb_node_type = sxe2_txsched_veb_node_type_get(vsi_type); + + if (vsi_type == SXE2_VSI_T_PF || vsi_type == SXE2_VSI_T_DPDK_PF) { + type = (vsi_type == SXE2_VSI_T_PF) ? + SXE2_TXSCHD_VSIG_TYPE_PF_AGG : SXE2_TXSCHED_VSIG_TYPE_USER_PF; + vsig_node = sxe2_txsched_node_lookup_by_type(sched_ctxt, + tc_node->tc, + SXE2_TXSCHED_SW_VSIG_LAYER, type); + + } else if (txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_KVSI || + txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_UVSI) { + vsig_node = sxe2_txsch_vf_agg_node_lookup(tc_node, + veb_node_type, + vsi->vf_node->vf_idx); + type = SXE2_TXSCHD_VSIG_TYPE_VF_AGG; + + } else { + if (vsi_node_num > 1) { + vsig_node = sxe2_txsched_no_child_vsig_lookup(tc_node, veb_node_type); + if (vsig_node) + vsig_node->node_type = SXE2_TXSCHD_VSIG_TYPE_AGG; + type = SXE2_TXSCHD_VSIG_TYPE_AGG; + + } else { + vsig_node = sxe2_txsched_enough_slot_vsig_lookup(tc_node, + veb_node_type, + SXE2_TXSCHD_VSIG_TYPE_GEN, + vsi_node_num); + + type = SXE2_TXSCHD_VSIG_TYPE_GEN; + } + } + + add_node_info->num = vsig_node ? 0 : 1; + add_node_info->type = type; + add_node_info->node = vsig_node; +} + +static void +sxe2_txsched_veb_layer_calc(struct sxe2_txsched_context *sched_ctxt, u8 tc, + enum sxe2_vsi_type vsi_type, u32 vsig_node_num, + struct sxe2_txsched_add_node_info *add_node_info) +{ + enum sxe2_node_type node_type; + struct sxe2_txsched_node *node; + + node_type = sxe2_txsched_veb_node_type_get(vsi_type); + + node = sxe2_txsched_enough_slot_node_lookup(sched_ctxt, tc, + SXE2_TXSCHED_SW_VEB_LAYER, + (u8)vsig_node_num, node_type); + + add_node_info->num = node ? 0 : 1; + add_node_info->type = node_type; + add_node_info->node = node; +} + +STATIC void sxe2_txsch_non_veb_calc( + struct sxe2_adapter *adapter, struct sxe2_vsi *vsi, + enum sxe2_node_type veb_node_type, + struct sxe2_txsched_add_node_info add_node_info[]) +{ + enum sxe2_node_type type; + enum sxe2_txsch_vsi_type txsch_vsi_type; + + add_node_info[SXE2_TXSCHED_SW_VEB_LAYER].num = 1; + add_node_info[SXE2_TXSCHED_SW_VEB_LAYER].type = veb_node_type; + add_node_info[SXE2_TXSCHED_SW_VEB_LAYER].node = NULL; + + txsch_vsi_type = sxe2_txsch_vsi_type_get(adapter, vsi); + if (vsi->type == SXE2_VSI_T_PF || + vsi->type == SXE2_VSI_T_LB || + vsi->type == SXE2_VSI_T_CTRL) { + type = SXE2_TXSCHD_VSIG_TYPE_PF_AGG; + + } else if (vsi->type == SXE2_VSI_T_DPDK_PF) { + type = SXE2_TXSCHED_VSIG_TYPE_USER_PF; + + } else if (txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_KVSI || + txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_UVSI) { + type = SXE2_TXSCHD_VSIG_TYPE_VF_AGG; + + } else { + type = (add_node_info[SXE2_TXSCHED_SW_VSI_LAYER].num > 1) ? + SXE2_TXSCHD_VSIG_TYPE_AGG : SXE2_TXSCHD_VSIG_TYPE_GEN; + } + + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].num = 1; + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].type = type; + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].node = NULL; +} + +STATIC void +sxe2_txsched_vsi_to_tc_calc(struct sxe2_adapter *adapter, struct sxe2_txsched_node *tc_node, + u32 queue_num, struct sxe2_vsi *vsi, + struct sxe2_txsched_add_node_info add_node_info[], u8 owner) +{ + struct sxe2_txsched_context *ctxt = sxe2_txsched_ctxt_get(adapter); + enum sxe2_vsi_type vsi_type = vsi->type; + enum sxe2_node_type veb_node_type; + struct sxe2_txsched_node *veb_node; + u8 max_layer = sxe2_txsched_layer_max_get(ctxt); + + add_node_info[SXE2_TXSCHED_SW_PORT_LAYER].num = 0; + add_node_info[SXE2_TXSCHED_SW_PORT_LAYER].type = SXE2_TXSCHD_NODE_TYPE_PORT; + + add_node_info[SXE2_TXSCHED_SW_TC_LAYER].num = 0; + add_node_info[SXE2_TXSCHED_SW_TC_LAYER].type = SXE2_TXSCHD_NODE_TYPE_TC; + + sxe2_txsched_vsi_layer_calc(max_layer, queue_num, vsi_type, owner, + &add_node_info[SXE2_TXSCHED_SW_VSI_LAYER]); + + veb_node_type = sxe2_txsched_veb_node_type_get(vsi_type); + veb_node = sxe2_txsched_node_lookup_by_type(ctxt, tc_node->tc, + SXE2_TXSCHED_SW_VEB_LAYER, + veb_node_type); + if (!veb_node) { + sxe2_txsch_non_veb_calc(adapter, vsi, veb_node_type, add_node_info); + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].node = NULL; + return; + } + + sxe2_txsched_vsig_layer_calc(adapter, tc_node, vsi, + add_node_info[SXE2_TXSCHED_SW_VSI_LAYER].num, + &add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER]); + + sxe2_txsched_veb_layer_calc(ctxt, tc_node->tc, vsi_type, + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].num, + &add_node_info[SXE2_TXSCHED_SW_VEB_LAYER]); +} + +static s32 +sxe2_txsched_hw_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsch_add_nodes_req *nodes_param, + u32 resp_size, struct sxe2_fwc_txsched_add_nodes_resp *resp) +{ + s32 ret; + u32 i, req_size; + struct sxe2_cmd_params cmd = {}; + struct device *dev = &adapter->pdev->dev; + struct sxe2_fwc_txsched_add_nodes_req *req; + + req_size = (u32)struct_size(req, node, nodes_param->num); + req = devm_kzalloc(dev, req_size, GFP_KERNEL); + if (!req) { + LOG_ERROR_BDF("sched node add: alloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + req->hdr.node_num = nodes_param->num; + req->hdr.parent_teid = nodes_param->parent_teid; + for (i = 0; i < nodes_param->num; i++) { + req->node[i].parent_teid = nodes_param->parent_teid; + req->node[i].data.prio = nodes_param->prio; + req->node[i].data.hw_layer = nodes_param->sw_layer + 1; + req->node[i].data.arb_mode = SXE2_NODE_ARB_MODE_BPS; + req->node[i].data.status = SXE2_NODE_STATUS_ENABLE; + req->node[i].data.rl_type = SXE2_NODE_RL_TYPE_EIR; + req->node[i].data.cir.bw = + SXE2_TXSCHED_DFLT_BW; + req->node[i].data.cir.weight = nodes_param->weight; + req->node[i].data.cir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->node[i].data.srlPir.bw = + SXE2_TXSCHED_DFLT_BW; + req->node[i].data.srlPir.weight = nodes_param->weight; + req->node[i].data.srlPir.prof_id = + SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->node[i].data.adj_lvl = nodes_param->adj_lvl; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_NODE_ADD, req, + req_size, resp, resp_size); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret || resp->add_node_num != nodes_param->num) { + LOG_ERROR_BDF("tx sched node add failed, ret=%d\n", ret); + ret = -EIO; + } + + devm_kfree(dev, req); +l_end: + return ret; +} + +STATIC s32 sxe2_txsched_nodes_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *parent, + struct sxe2_txsch_add_nodes_req *nodes_param, + struct sxe2_txsched_node **first_node, + u16 *num_nodes_added) +{ + s32 ret; + u16 teid; + u32 resp_size, i; + u8 tc = nodes_param->tc; + u8 swl = nodes_param->sw_layer; + struct device *dev = &adapter->pdev->dev; + struct sxe2_txsched_node *prev, *new_node; + struct sxe2_txsched_node_param node_param; + struct sxe2_fwc_txsched_add_nodes_resp *resp; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + resp_size = struct_size(resp, node_teid, nodes_param->num); + resp = devm_kzalloc(dev, resp_size, GFP_KERNEL); + if (!resp) { + LOG_ERROR_BDF("txshced alloc node failed\n"); + return -ENOMEM; + } + + ret = sxe2_txsched_hw_node_add(adapter, nodes_param, resp_size, resp); + if (ret) + goto l_free; + + *num_nodes_added = (u16)resp->add_node_num; + for (i = 0; i < *num_nodes_added; i++) { + node_param.tc = tc; + node_param.owner = nodes_param->owner; + node_param.sw_layer = swl; + node_param.node_teid = resp->node_teid[i]; + node_param.parent_teid = parent->info.node_teid; + node_param.node_silbing_idx = (u8)resp->sibling_idx[i]; + ret = sxe2_txsched_sw_node_add(adapter, &node_param); + if (ret) { + LOG_ERROR_BDF("add nodes in sw failed, ret =%d\n", ret); + break; + } + + teid = resp->node_teid[i]; + new_node = sxe2_txsched_find_node_by_teid(parent, teid); + if (!new_node) { + ret = -ENOMEM; + LOG_ERROR_BDF("node is missing for teid =%d\n", teid); + break; + } + + new_node->sibling = NULL; + prev = sxe2_txsched_first_node_get(sched_ctxt, tc, swl); + if (prev && prev != new_node) { + while (prev->sibling) + prev = prev->sibling; + prev->sibling = new_node; + } + + if (!sched_ctxt->sib_head[tc][swl]) + sched_ctxt->sib_head[tc][swl] = new_node; + if (i == 0) + *first_node = new_node; + } + +l_free: + devm_kfree(dev, resp); + return ret; +} + +static inline void +sxe2_txsched_node_group_list_updata(struct sxe2_txsched_node *first_node, + u32 num_added) +{ + struct sxe2_txsched_node *node = NULL; + + node = first_node; + while (--num_added && node) { + node->group = node->sibling; + node = node->sibling; + } + + if (node) + node->group = NULL; +} + +STATIC s32 sxe2_txsched_add_nodes_to_parent(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *parent, + struct sxe2_txsch_add_nodes_req *nodes_param, + struct sxe2_txsched_node **first_node, + u16 *num_nodes_added) +{ + s32 ret = 0; + + if (!nodes_param->num) { + *first_node = NULL; + *num_nodes_added = 0; + LOG_DEBUG_BDF("dont need create node, need_add==0\n"); + return 0; + } + + if ((parent->child_cnt + nodes_param->num) > SXE2_TXSCHED_NODE_CHILD_MAX) { + LOG_ERROR_BDF("txsched are not enough slot,\t" + "parent->child_cnt=%u, need_node=%u , ret=%d\n", + parent->child_cnt, nodes_param->num, ret); + return -ENOSPC; + } + + ret = sxe2_txsched_nodes_add(adapter, parent, + nodes_param, first_node, num_nodes_added); + if (ret) { + LOG_ERROR_BDF("txsched node create failed, ret=%d\n", ret); + return ret; + } + + sxe2_txsched_node_group_list_updata(*first_node, *num_nodes_added); + + return ret; +} + +static inline void +sxe2_txsched_node_info_setup(struct sxe2_txsched_node *first, + struct sxe2_vsi *vsi, u8 sw_layer, u32 node_num, + enum sxe2_node_type node_type, u8 owner) +{ + struct sxe2_txsched_node *node; + enum sxe2_txsch_vsi_type txsch_vsi_type; + + node = first; + while (node) { + node->in_use = true; + + txsch_vsi_type = sxe2_txsch_vsi_type_get(vsi->adapter, vsi); + + if (sw_layer == SXE2_TXSCHED_SW_VSI_LAYER) { + if (txsch_vsi_type != FUSION_VF2VSI_MODE_VF_UVSI) + node->vsi_idx_in_dev = vsi->idx_in_dev; + + node->vf_idx_in_pf = (sxe2_txsch_is_vf_by_vsitype(vsi->type)) ? + vsi->vf_node->vf_idx : SXE2_TXSCH_VF_IDX_INVAL; + } + + if (sw_layer == SXE2_TXSCHED_SW_VSIG_LAYER) { + if (txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_UVSI || + txsch_vsi_type == FUSION_VF2VSIG_MODE_VF_KVSI) { + node->vf_idx_in_pf = vsi->vf_node->vf_idx; + } + } + + node->same_node_num_pre_tc = (u8)node_num; + node->node_type = node_type; + node->owner = owner; + node = node->group; + } +} + +STATIC struct +sxe2_txsched_node *sxe2_txsched_vsi_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *tc_node, + struct sxe2_vsi *vsi, + struct sxe2_txsched_add_node_info add_node_info[], + u8 owner) +{ + s32 ret; + u8 swl, vsil; + u16 num_added = 0; + struct sxe2_txsched_node *first_node; + struct sxe2_txsched_node *parent = tc_node; + struct sxe2_txsch_add_nodes_req nodes_param; + + vsil = sxe2_txsched_sw_vsi_layer_get(); + + for (swl = SXE2_TXSCHED_SW_VEB_LAYER; swl <= vsil; swl++) { + nodes_param.sw_layer = swl; + nodes_param.owner = owner; + nodes_param.tc = tc_node->tc; + nodes_param.num = add_node_info[swl].num; + nodes_param.parent_teid = parent->info.node_teid; + nodes_param.prio = SXE2_TXSCH_NODE_PRIO_DLFT; + nodes_param.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + nodes_param.adj_lvl = sxe2_txsch_node_adj_lvl_get(); + ret = sxe2_txsched_add_nodes_to_parent(adapter, parent, + &nodes_param, &first_node, &num_added); + if (ret || add_node_info[swl].num != num_added) { + first_node = NULL; + goto l_end; + } + + if (num_added && add_node_info[swl].num) + parent = first_node; + else + parent = add_node_info[swl].node; + + if (!parent) { + first_node = NULL; + LOG_ERROR_BDF("txsched vsi build failed, \t" + "dont find parent node\n"); + goto l_end; + } + + if (num_added) + sxe2_txsched_node_info_setup(first_node, vsi, swl, + num_added, + add_node_info[swl].type, + owner); + } + + return first_node; + +l_end: + return NULL; +} + +STATIC struct sxe2_txsched_node * +sxe2_txsched_vsi_build(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *tc_node, + struct sxe2_vsi *vsi, u32 queue_num, + u8 owner) +{ + struct sxe2_txsched_add_node_info add_node_info[SXE2_TXSCHED_LAYER_MAX]; + + (void)memset(add_node_info, 0, + (sizeof(struct sxe2_txsched_add_node_info) * + SXE2_TXSCHED_LAYER_MAX)); + + sxe2_txsched_vsi_to_tc_calc(adapter, tc_node, queue_num, vsi, + add_node_info, owner); + +#ifndef SXE2_CFG_RELEASE + LOG_INFO("evb node calc, \t" + "hwlayer[3] node cnt = %u, node type = %d\n", + add_node_info[SXE2_TXSCHED_SW_VEB_LAYER].num, + add_node_info[SXE2_TXSCHED_SW_VEB_LAYER].type); + if (SXE2_TXSCHED_SW_VSIG_LAYER < SXE2_TXSCHED_LAYER_MAX) + LOG_INFO("vsig node calc, \t" + "hwlayer[4] node cnt = %u, node type = %d\n", + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].num, + add_node_info[SXE2_TXSCHED_SW_VSIG_LAYER].type); + if (SXE2_TXSCHED_SW_VSI_LAYER < SXE2_TXSCHED_LAYER_MAX) + LOG_INFO("vsi node calc, \t" + "hwlayer[5] node cnt = %u, node type = %d\n", + add_node_info[SXE2_TXSCHED_SW_VSI_LAYER].num, + add_node_info[SXE2_TXSCHED_SW_VSI_LAYER].type); +#endif + return sxe2_txsched_vsi_node_add(adapter, tc_node, vsi, add_node_info, owner); +} + +static inline u32 +sxe2_txsched_vsi_node_num_get(struct sxe2_txsched_node *fist_vsi_node) +{ + u32 vsi_cnt = 0; + struct sxe2_txsched_node *vsi_node = fist_vsi_node; + + while (vsi_node) { + vsi_node = vsi_node->group; + vsi_cnt++; + } + + return vsi_cnt; +} + +static inline bool +sxe2_txsched_vsi_node_enough(struct sxe2_txsched_node *fist_vsi_node, u16 q_cnt) +{ + u32 vsi_node_num; + u32 num_node = 0; + + num_node = (u32)DIV_ROUND_UP(q_cnt, (SXE2_TXSCHED_NODE_CHILD_MAX * + SXE2_TXSCHED_NODE_CHILD_MAX)); + + vsi_node_num = sxe2_txsched_vsi_node_num_get(fist_vsi_node); + + return (vsi_node_num >= num_node); +} + +static inline void +sxe2_txsched_qp_group_update(struct sxe2_txsched_node *start_node, + u32 num_added, u8 owner, u16 vsi_idx, + struct sxe2_txsched_node **prev_node) +{ + struct sxe2_txsched_node *node; + + node = start_node; + do { + node->owner = owner; + node->vsi_idx_in_dev = vsi_idx; + node = node->group; + } while (node && node->group); + + if (node) { + node->owner = owner; + node->vsi_idx_in_dev = vsi_idx; + } + + if (*prev_node) + (*prev_node)->group = start_node; + + *prev_node = node; +} + +static s32 +sxe2_txsched_vsi_child_nodes_add(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + struct sxe2_txsched_node *parent, + struct sxe2_txsched_node *tc_node, + u16 qg_nodes, u8 owner) +{ + s32 ret; + u8 qgl; + u32 add_nodes; + u16 num_added = 0; + struct sxe2_txsched_node *first_node; + struct sxe2_txsch_add_nodes_req nodes_param; + struct sxe2_txsched_node *prev_node = NULL; + + qgl = sxe2_txsched_sw_qp_layer_get(); + + prev_node = sxe2_txsch_qg_tail_node_get(parent, owner); + while (parent) { + add_nodes = SXE2_TXSCHED_NODE_CHILD_MAX - (u32)parent->child_cnt; + add_nodes = qg_nodes > add_nodes ? add_nodes : qg_nodes; + + nodes_param.sw_layer = qgl; + nodes_param.owner = owner; + nodes_param.tc = tc_node->tc; + nodes_param.num = (u16)add_nodes; + nodes_param.parent_teid = parent->info.node_teid; + nodes_param.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + nodes_param.adj_lvl = sxe2_txsch_node_adj_lvl_get(); + nodes_param.prio = (sxe2_txsch_vsi_type_get(adapter, vsi) == + FUSION_VF2VSI_NODE_VF_KVSI) ? \ + SXE2_TXSCH_NODE_PRIO_HIGH : SXE2_TXSCH_NODE_PRIO_DLFT; + ret = sxe2_txsched_add_nodes_to_parent(adapter, parent, + &nodes_param, &first_node, &num_added); + if (ret || add_nodes != num_added) { + LOG_ERROR_BDF("create vsi child node failed, \t" + "vsi_idx=%u, nend_node=%u, \t" + "qg_nodes =%u num_added=%u\n", + vsi->idx_in_dev, add_nodes, + qg_nodes, num_added); + return -EIO; + } + + if (num_added) + sxe2_txsched_qp_group_update(first_node, num_added, + owner, vsi->idx_in_dev, + &prev_node); + + qg_nodes -= num_added; + if (qg_nodes) + parent = parent->group; + else + break; + } + + return 0; +} + +static inline u32 +sxe2_txsched_vsi_child_nodes_calc(struct sxe2_txsched_node *fist_vsi_node, + u32 queue_num, u8 owner) +{ + u32 need_num; + u32 cur_qg_num; + + need_num = DIV_ROUND_UP(queue_num, SXE2_TXSCHED_NODE_CHILD_MAX); + need_num = need_num ? need_num : 1; + + cur_qg_num = sxe2_txsched_qg_num_get(fist_vsi_node, owner); + + return (need_num <= cur_qg_num) ? 0 : (need_num - cur_qg_num); +} + +STATIC s32 +sxe2_txsched_vsi_children_build(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + struct sxe2_txsched_node *vsi_node, + u8 tc, u32 new_numqs, u8 owner) +{ + int ret = 0; + u16 qg_nodes = 0x3fff; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_context *ctxt = &adapter->tx_sched_ctxt; + + tc_node = sxe2_txsched_tc_node_get(ctxt, tc); + if (!tc_node) { + LOG_ERROR_BDF("tc node = NULL, tc=%d\n", tc); + return -EIO; + } + + if (new_numqs) { + if (sxe2_txsch_vsi_type_get(adapter, vsi) == FUSION_VF2VSI_MODE_VF_UVSI) { + qg_nodes = (u16)DIV_ROUND_UP(new_numqs, SXE2_TXSCHED_NODE_CHILD_MAX); + qg_nodes = qg_nodes ? qg_nodes : 1; + } else { + qg_nodes = (u16)sxe2_txsched_vsi_child_nodes_calc(vsi_node, + new_numqs, owner); + } + } + + LOG_INFO_BDF("txsched queue grep node num=%d, q_num=%d\n", qg_nodes, new_numqs); + + ret = sxe2_txsched_vsi_child_nodes_add(adapter, vsi, vsi_node, tc_node, + qg_nodes, owner); + return ret; +} + +STATIC s32 sxe2_txsched_vsi_topo_build(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, u32 max_qs[], u8 owner, + struct sxe2_txsched_node **tc0_vsi_node) +{ + u32 i; + s32 ret = 0; + struct sxe2_txsched_node *vsi_node, *tc_node; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + sxe2_for_each_tc(i) { + if (!max_qs[i]) + continue; + + tc_node = sxe2_txsched_tc_node_get(sched_ctxt, i); + if (!tc_node || !tc_node->in_use) + continue; + + vsi_node = sxe2_txsch_vsi_node_get(adapter, vsi, i, owner); + if (vsi_node) { + if (sxe2_txsched_vsi_node_enough(vsi_node, (u16)max_qs[i])) + goto l_child; + + ret = sxe2_txsched_vsi_node_del(adapter, i, vsi, owner); + if (ret) { + LOG_ERROR_BDF("tc[%u] vsi node del failed, ret=%d\n", i, ret); + goto l_err; + } + } + + vsi_node = sxe2_txsched_vsi_build(adapter, tc_node, vsi, max_qs[i], + owner); + if (!vsi_node) { + ret = -EIO; + LOG_ERROR_BDF("sched vsi[%d] node build failed, owner=%d\n ", + vsi->idx_in_dev, owner); + break; + } + +l_child: + if (i == 0) + *tc0_vsi_node = vsi_node; + + ret = sxe2_txsched_vsi_children_build(adapter, vsi, vsi_node, i, + max_qs[i], owner); + if (ret) { + LOG_ERROR_BDF("sched vsi[%d] children build failed\n ", + vsi->idx_in_dev); + break; + } + } + +l_err: + return ret; +} + +s32 sxe2_txsched_lan_vsi_cfg(struct sxe2_vsi *vsi) +{ + u8 i; + s32 ret; + u32 q_cnt[SXE2_MAX_TRAFFIC_CLASS]; + struct sxe2_txsched_node *vsi_node; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return 0; + + (void)memset(q_cnt, 0, sizeof(u32) * SXE2_MAX_TRAFFIC_CLASS); + sxe2_for_each_tc(i) { + if (!(vsi->tc.tc_map & BIT(i))) + continue; + + if (vsi->type == SXE2_VSI_T_PF) { + q_cnt[i] = vsi->tc.info[i].txq_cnt; + if (i == 0) + q_cnt[i] += vsi->num_xdp_txq; + } else if (vsi->type == SXE2_VSI_T_CTRL) { + q_cnt[i] = 1; + } else if (vsi->type == SXE2_VSI_T_VF) { + q_cnt[i] = vsi->tc.info[i].txq_cnt; + if (i == 0) + q_cnt[i] += IEEE_8021Q_MAX_PRIORITIES; + } else { + q_cnt[i] = vsi->tc.info[i].txq_cnt; + } + + ret = sxe2_txsched_vsi_q_ctxt_alloc(vsi, i, (u16)q_cnt[i]); + if (ret) + return ret; + } + + mutex_lock(&adapter->tx_sched_ctxt.lock); + if (adapter->tx_sched_ctxt.state != SXE2_TX_SCHED_STATE_READY) { + mutex_unlock(&adapter->tx_sched_ctxt.lock); + LOG_ERROR_BDF("sched dont ready, state=%d\n", adapter->tx_sched_ctxt.state); + return -EIO; + } + + ret = sxe2_txsched_vsi_topo_build(adapter, vsi, q_cnt, + SXE2_TXSCHED_NODE_OWNER_LAN, &vsi_node); + if (ret) + LOG_ERROR_BDF("txsched lan vsi build failed, ret=%d\n", ret); + else + sxe2_txsch_vsi_node_map(adapter, vsi, SXE2_TXSCHED_NODE_OWNER_LAN, vsi_node); + + mutex_unlock(&adapter->tx_sched_ctxt.lock); + + return ret; +} + +s32 sxe2_txsched_lan_vsi_rm(struct sxe2_vsi *vsi) +{ + s32 ret; + + if (!sxe2_txsched_support_chk(vsi->adapter)) + return 0; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return 0; + + mutex_lock(&vsi->adapter->tx_sched_ctxt.lock); + + ret = sxe2_txsched_del_vsi_node_for_each_tc(vsi->adapter, + vsi, SXE2_TXSCHED_NODE_OWNER_LAN); + + sxe2_txsch_vsi_node_unmap(vsi, SXE2_TXSCHED_NODE_OWNER_LAN); + + mutex_unlock(&vsi->adapter->tx_sched_ctxt.lock); + + return ret; +} + +s32 sxe2_txsched_rdma_vsi_rm(struct sxe2_vsi *vsi) +{ + s32 ret; + + if (!sxe2_txsched_support_chk(vsi->adapter)) + return 0; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return 0; + + mutex_lock(&vsi->adapter->tx_sched_ctxt.lock); + + ret = sxe2_txsched_del_vsi_node_for_each_tc(vsi->adapter, + vsi, SXE2_TXSCHED_NODE_OWNER_RDMA); + + sxe2_txsch_vsi_node_unmap(vsi, SXE2_TXSCHED_NODE_OWNER_RDMA); + + mutex_unlock(&vsi->adapter->tx_sched_ctxt.lock); + + return ret; +} + +STATIC struct sxe2_txsched_node * +sxe2_txsched_get_free_qg(struct sxe2_txsched_node *qgrp_node, u16 vsi_idx, + u32 owner) +{ + u8 min_children; + struct sxe2_txsched_node *min_qgrp; + + min_children = qgrp_node->child_cnt; + if (!min_children) + return qgrp_node; + + min_qgrp = qgrp_node; + while (qgrp_node) { + if (vsi_idx == qgrp_node->vsi_idx_in_dev && + qgrp_node->child_cnt <= SXE2_TXSCHED_NODE_CHILD_MAX) { + if (qgrp_node->child_cnt < min_children && + qgrp_node->owner == owner) { + min_qgrp = qgrp_node; + min_children = min_qgrp->child_cnt; + if (!min_children) + break; + } + } + qgrp_node = qgrp_node->group; + } + + return min_qgrp; +} + +STATIC struct sxe2_txsched_node * +sxe2_txsched_get_free_qparent(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, u8 tc, u8 owner) +{ + struct sxe2_txsched_node *vsi_node; + struct sxe2_txsched_node *qgrp_node; + + vsi_node = sxe2_txsch_vsi_node_get(adapter, vsi, tc, owner); + if (!vsi_node) { + LOG_ERROR_BDF("sched vsi_node == NULL, vsi_idx=%d, owner=%d\n", + vsi->idx_in_dev, owner); + return NULL; + } + + qgrp_node = sxe2_txsch_qg_head_node_get(vsi_node, owner); + if (!qgrp_node) { + LOG_ERROR_BDF("sched qgrp == NULL, vsi=%d\n", vsi->idx_in_dev); + return NULL; + } + + return sxe2_txsched_get_free_qg(qgrp_node, vsi->idx_in_dev, owner); +} + +s32 sxe2_txsched_txq_node_add(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, struct sxe2_queue *txq, + enum sxe2_txsched_node_owner owner, + struct sxe2_fwc_cfg_txq_req *req) +{ + s32 ret; + u8 q_layer; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_node *parent; + struct sxe2_fwc_cfg_txq_resp resp; + struct sxe2_vsi_txsched_queue *q_ctxt; + struct sxe2_txsched_node_param node_param; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + q_layer = sxe2_txsched_sw_q_layer_get(); + + mutex_lock(&sched_ctxt->lock); + if (sched_ctxt->state != SXE2_TX_SCHED_STATE_READY) { + ret = -EIO; + goto l_unlock; + } + + parent = sxe2_txsched_get_free_qparent(adapter, vsi, txq->dcb_tc, owner); + if (!parent) { + ret = -EINVAL; + goto l_unlock; + } + + if ((parent->child_cnt + 1) > SXE2_TXSCHED_NODE_CHILD_MAX) { + ret = -ENOSPC; + LOG_ERROR_BDF("vsi[%u] are not enough slot, parent_teid=%#x, " + "parent->child_cnt=%u, need_node=1 , ret=%d\n", + vsi->idx_in_dev, parent->info.node_teid, + parent->child_cnt, ret); + + goto l_unlock; + } + + req->leaf.port = adapter->port_idx; + req->leaf.tc = txq->dcb_tc; + req->leaf.txq_idx_in_dev = + txq->idx_in_pf + adapter->q_ctxt.txq_base_idx_in_dev; + req->leaf.node.parent_teid = parent->info.node_teid; + req->leaf.node.data.hw_layer = q_layer + 1; + req->leaf.node.data.status = SXE2_NODE_STATUS_ENABLE; + req->leaf.node.data.arb_mode = SXE2_NODE_ARB_MODE_BPS; + req->leaf.node.data.rl_type = SXE2_NODE_RL_TYPE_EIR; + req->leaf.node.data.cir.bw = SXE2_TXSCHED_DFLT_BW; + req->leaf.node.data.cir.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + req->leaf.node.data.cir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->leaf.node.data.srlPir.bw = SXE2_TXSCHED_DFLT_BW; + req->leaf.node.data.srlPir.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + req->leaf.node.data.srlPir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->leaf.node.data.prio = 0; + req->leaf.node.data.adj_lvl = sxe2_txsch_node_adj_lvl_get(); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_Q_CFG, req, + sizeof(*req), &resp, sizeof(resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("tx sched txq add failed, ret=%d\n", ret); + ret = -EIO; + goto l_unlock; + } + + node_param.tc = txq->dcb_tc; + node_param.owner = owner; + node_param.sw_layer = q_layer; + node_param.node_teid = resp.node_teid; + node_param.vsi_idx_in_dev = vsi->idx_in_dev; + node_param.txq_idx_in_vsi = txq->idx_in_vsi; + node_param.node_silbing_idx = resp.sibling_idx; + node_param.parent_teid = parent->info.node_teid; + node_param.txq_idx_in_dev = req->leaf.txq_idx_in_dev; + ret = sxe2_txsched_sw_node_add(adapter, &node_param); + if (ret) { + LOG_ERROR_BDF("tx sched txq add failed, ret=%d\n", ret); + goto l_unlock; + } + txq->txq_teid = resp.node_teid; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + goto l_end; + + q_ctxt = sxe2_txsched_q_ctxt_get(vsi, txq->dcb_tc, txq->idx_in_vsi); + q_ctxt->teid = resp.node_teid; + q_ctxt->idx_in_dev = req->leaf.txq_idx_in_dev; + ret = sxe2_txsched_replay_q_bw(vsi, q_ctxt); + if (ret) { + LOG_ERROR_BDF("tx sched txq replay bw failed, ret=%d\n", ret); + goto l_unlock; + } +l_end: + LOG_INFO_BDF("txq node add success,teid:%#x,parent teid:%#x\n", + txq->txq_teid, node_param.parent_teid); +l_unlock: + mutex_unlock(&sched_ctxt->lock); + return ret; +} + +s32 sxe2_txsched_txq_node_del(struct sxe2_adapter *adapter, struct sxe2_queue *txq) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_node *node; + struct sxe2_fwc_disable_txq_req req; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + mutex_lock(&sched_ctxt->lock); + + LOG_INFO_BDF("[txq_teid:%#x]\n", txq->txq_teid); + + node = sxe2_txsched_find_node_by_teid(sched_ctxt->root, txq->txq_teid); + if (!node) { + LOG_ERROR_BDF("sched txq node unexit, dont need del\n"); + goto l_end; + } + + req.leaf.tc = txq->dcb_tc; + req.leaf.port = adapter->port_idx; + req.txq_idx_in_func = txq->idx_in_pf; + req.leaf.node_teid = node->info.node_teid; + req.leaf.parent_teid = node->info.parent_teid; + req.leaf.sibling_idx = (u16)node->info.sibling_idx; + req.txq_idx_in_nic = adapter->q_ctxt.txq_base_idx_in_dev + txq->idx_in_pf; + req.leaf.txq_idx_in_dev = adapter->q_ctxt.txq_base_idx_in_dev + txq->idx_in_pf; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_Q_STOP, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) + LOG_ERROR_BDF("sched txq del failed, ret=%d\n", ret); + + LOG_INFO_BDF("sched txq node del success,teid:%#x,parent teid:%#x\n", + txq->txq_teid, node->info.parent_teid); + + sxe2_txsched_sw_node_del(adapter, node); + + txq->txq_teid = 0; + +l_end: + mutex_unlock(&sched_ctxt->lock); + return ret; +} + +void sxe2_txsched_tree_clean(struct sxe2_adapter *adapter) +{ + if (!sxe2_txsched_support_chk(adapter)) + return; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + if (adapter->tx_sched_ctxt.state != SXE2_TX_SCHED_STATE_READY) { + (void)mutex_unlock(&adapter->tx_sched_ctxt.lock); + return; + } + + adapter->tx_sched_ctxt.state = SXE2_TX_SCHED_STATE_INIT; + (void)sxe2_txsched_subtree_clean(adapter, adapter->tx_sched_ctxt.root); + adapter->tx_sched_ctxt.root = NULL; + mutex_unlock(&adapter->tx_sched_ctxt.lock); +} + +void sxe2_txsched_vf_tree_clean(struct sxe2_adapter *adapter) +{ + u8 i, idx; + u8 child_cnt; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_node *veb_node; + + if (!sxe2_txsched_support_chk(adapter)) + return; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + if (adapter->tx_sched_ctxt.state != SXE2_TX_SCHED_STATE_READY) { + (void)mutex_unlock(&adapter->tx_sched_ctxt.lock); + return; + } + + tc_node = adapter->tx_sched_ctxt.root->child[0]; + child_cnt = tc_node->child_cnt; + for (i = 0, idx = 0; i < child_cnt; i++) { + veb_node = tc_node->child[idx]; + if (!veb_node) { + idx++; + continue; + } + + if (veb_node->node_type == SXE2_TXSCHD_VEB_TYPE_VF) { + (void)sxe2_txsched_subtree_clean(adapter, veb_node); + } else { + idx++; + continue; + } + } + + mutex_unlock(&adapter->tx_sched_ctxt.lock); +} + +void sxe2_txsched_sw_subtree_dump(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node) +{ + u8 i; + + if (!node) + return; + + for (i = 0; i < node->child_cnt; i++) + sxe2_txsched_sw_subtree_dump(adapter, node->child[i]); + + LOG_DEV_INFO("vsi_idx:%d, hw_layer:%#x teid:%#x parent_teid:%#x num_children:%d tc_idx:%#x \t" + "type:%#x use:%#x owner:%#x\n", + node->vsi_idx_in_dev, node->info.data.hw_layer, + le16_to_cpu(node->info.node_teid), + le16_to_cpu(node->info.parent_teid), + node->child_cnt, + node->tc, + node->node_type, + node->in_use, + node->owner); + + LOG_DEV_INFO("prio:%d, is_suspend:%#x is_pps:%#x profiled_type:%#x\n", + node->info.data.prio, node->info.data.status, + node->info.data.arb_mode, node->info.data.rl_type); + + LOG_DEV_INFO("cir_prof_id:%u bw:%u, crlweight:%u srl_prof_id:%u bw:%u srl_weight:%u \t" + " pri:#%x\n\n", + le32_to_cpu(node->info.data.cir.prof_id), + le32_to_cpu(node->info.data.cir.bw), + le16_to_cpu(node->info.data.cir.weight), + le32_to_cpu(node->info.data.srlPir.prof_id), + le32_to_cpu(node->info.data.srlPir.bw), + le16_to_cpu(node->info.data.srlPir.weight), + node->info.data.prio); +} + +void sxe2_txsched_hw_subtree_dump(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node) +{ + u8 i; + s32 ret; + struct sxe2_txsched_node_param param; + struct sxe2_txsched_node_props *data; + struct sxe2_txsched_node_info node_info; + + if (!node) + return; + + for (i = 0; i < node->child_cnt; i++) + sxe2_txsched_hw_subtree_dump(adapter, node->child[i]); + + LOG_DEV_INFO("vsi_idx:%#x, hw_layer:%#x, node_teid:%#x, parent_teid:%#x, child_idx:%#x,\t" + " num_children:%#x, tc_idx:%#x, type:%#x, pri:%#x, cirwgt:%#x, pirwgt:%#x,\t" + " cirbw:%#x, srlpirbw:%#x\n", + node->vsi_idx_in_dev, node->info.data.hw_layer, + le16_to_cpu(node->info.node_teid), + le16_to_cpu(node->info.parent_teid), + node->info.sibling_idx, + node->child_cnt, + node->tc, node->node_type, + node->info.data.prio, + node->info.data.cir.weight, + node->info.data.srlPir.weight, + node->info.data.cir.bw, + node->info.data.srlPir.bw); + + param.node_teid = node->info.node_teid; + param.parent_teid = node->info.parent_teid; + param.node_silbing_idx = (u8)node->info.sibling_idx; + ret = sxe2_txsched_node_info_query(adapter, ¶m, &node_info); + if (ret) + return; + + data = &node_info.data; + LOG_DEV_INFO("fw_is_pps:%#x, fw_is_enable:%#x, fw_profile_type:%#x\n", + data->arb_mode, data->status, data->rl_type); + LOG_DEV_INFO("fw_cir_id:%#x, fw_cir_bw:%#x, fw_cir_wgt:%#x,\t" + " fw_srl_pir_id:%#x, fw_srl_pir_bw:%#x, fw_srl_pir_wgt:%#x\n", + le32_to_cpu(data->cir.prof_id), le32_to_cpu(data->cir.bw), + le16_to_cpu(data->cir.weight), + le32_to_cpu(data->srlPir.prof_id), + le32_to_cpu(data->srlPir.bw), + le16_to_cpu(data->srlPir.weight)); +} + +void sxe2_txsched_tree_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_txsched_context *sched_ctxt = sxe2_txsched_ctxt_get(adapter); + + if (!sxe2_txsched_support_chk(adapter)) + return; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + sxe2_txsched_hw_subtree_dump(adapter, sched_ctxt->root); + mutex_unlock(&adapter->tx_sched_ctxt.lock); +} + +void sxe2_txsched_sw_tree_dump(struct sxe2_adapter *adapter) +{ + struct sxe2_txsched_context *sched_ctxt = sxe2_txsched_ctxt_get(adapter); + + if (!sxe2_txsched_support_chk(adapter)) + return; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + sxe2_txsched_sw_subtree_dump(adapter, sched_ctxt->root); + mutex_unlock(&adapter->tx_sched_ctxt.lock); +} + +STATIC s32 sxe2_txsched_ets_query(struct sxe2_adapter *adapter, u8 tc_cnt, + struct sxe2_txsched_ets_query_resp *resp) +{ + s32 ret; + struct sxe2_cmd_params cmd = { 0 }; + struct sxe2_txsched_ets_query_rep req; + + req.tc_cnt = tc_cnt; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_ETS_QUERY, &req, + sizeof(req), resp, sizeof(*resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sched ets query failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_txsched_tc_node_update(struct sxe2_adapter *adapter, + struct sxe2_txsched_ets_query_resp *resp) +{ + u8 i, j; + s32 ret = 0; + u32 teid1, teid2; + struct sxe2_txsched_node *tc_node; + struct sxe2_txsched_node_param param; + struct sxe2_txsched_context *sched_ctxt = sxe2_txsched_ctxt_get(adapter); + + if (!sched_ctxt->root) { + LOG_ERROR_BDF("tc node update failed: tx sched root node is null\n"); + return -EBUSY; + } + + for (i = 0; i < sched_ctxt->root->child_cnt; i++) { + teid1 = le32_to_cpu(sched_ctxt->root->child[i]->info.node_teid); + + sxe2_for_each_tc(j) { + teid2 = le32_to_cpu(resp->tc_node[j].teid); + if (teid1 == teid2) + break; + } + + if (j < IEEE_8021QAZ_MAX_TCS) + continue; + + sched_ctxt->root->child[i]->in_use = false; + } + + sxe2_for_each_tc(j) { + teid2 = le32_to_cpu(resp->tc_node[j].teid); + if (teid2 == SXE2_TXSCHED_TEID_INVALID) + continue; + + for (i = 0; i < sched_ctxt->root->child_cnt; i++) { + tc_node = sched_ctxt->root->child[i]; + if (!tc_node) + continue; + + teid1 = le16_to_cpu(tc_node->info.node_teid); + if (teid1 == teid2) { + tc_node->tc = j; + tc_node->in_use = true; + break; + } + } + + if (i < sched_ctxt->root->child_cnt) + continue; + + param.tc = j; + param.node_teid = (u16)teid2; + param.vsi_idx_in_dev = 0; + param.owner = SXE2_TXSCHED_NODE_OWNER_LAN; + param.sw_layer = SXE2_TXSCHED_SW_TC_LAYER; + param.node_silbing_idx = (u8)resp->tc_node[j].silbing_idx; + param.parent_teid = resp->tc_node[j].parent_teid; + ret = sxe2_txsched_sw_node_add(adapter, ¶m); + if (ret) { + LOG_ERROR_BDF("txsched alloc tc sw node failed, teid=0x%x\n", teid2); + goto l_err; + } + } +l_err: + return ret; +} + +s32 sxe2_txsched_ets_update(struct sxe2_adapter *adapter, u8 tc_cnt) +{ + s32 ret; + struct sxe2_txsched_ets_query_resp resp = { 0 }; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + + ret = sxe2_txsched_ets_query(adapter, tc_cnt, &resp); + if (ret) + goto l_unlock; + + ret = sxe2_txsched_tc_node_update(adapter, &resp); + if (ret) + goto l_unlock; + + LOG_INFO_BDF("dcb sched tc node update success\n"); + +l_unlock: + mutex_unlock(&adapter->tx_sched_ctxt.lock); + return ret; +} + +s32 sxe2_txsched_qset_node_move(struct sxe2_adapter *adapter, + struct sxe2_adapter *new_adapter, + struct aux_rdma_qset_params *dqset, + u16 *new_teid, u8 is_aa) + +{ + s32 ret; + u8 old_tc; + u8 new_tc; + struct sxe2_txsched_node *node; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + goto l_end; + + LOG_INFO("sched rdma node move user_prio %d pf %d(teid %d, qset %d) to pf %d.\n", + dqset->user_pri, adapter->pf_idx, dqset->teid, dqset->qset_id, + new_adapter->pf_idx); + + mutex_lock(&sched_ctxt->lock); + node = sxe2_txsched_find_node_by_teid(sched_ctxt->root, dqset->teid); + mutex_unlock(&sched_ctxt->lock); + if (!node) { + *new_teid = dqset->teid; + LOG_INFO_BDF("sched qset node unexit, dont move\n"); + goto l_end; + } + + old_tc = sxe2_rdma_aux_get_qset_tc(adapter, dqset); + ret = sxe2_txsched_qset_node_del(adapter, dqset, old_tc); + if (ret) { + LOG_ERROR_BDF("rdma qset move node del failed, ret=%d\n", ret); + return ret; + } + + ret = sxe2_txsched_rdma_vsi_cfg(new_adapter->vsi_ctxt.main_vsi, is_aa); + if (ret) { + LOG_ERROR_BDF("rdma move vsi cfg failed, ret=%d\n", ret); + return ret; + } + + new_tc = sxe2_rdma_aux_get_qset_tc(new_adapter, dqset); + ret = sxe2_txsched_qset_node_add(new_adapter, + new_adapter->vsi_ctxt.main_vsi, dqset, new_tc); + if (ret) { + LOG_ERROR_BDF("rdma qset move node add failed, ret=%d\n", ret); + return ret; + } + + *new_teid = dqset->teid; + + LOG_INFO_BDF("sched rdma node move user_prio %d " + "pf %d(teid %d, qset %d) to pf %d(teid %d) success.\n", + dqset->user_pri, adapter->pf_idx, dqset->teid, dqset->qset_id, + new_adapter->pf_idx, *new_teid); +l_end: + return 0; +} + +s32 sxe2_txsched_qset_node_add(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + struct aux_rdma_qset_params *qset, u8 tc) +{ + s32 ret; + u8 q_layer; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_node *parent; + struct sxe2_fwc_add_qset_req add_req = {}; + struct sxe2_fwc_add_qset_req *req = &add_req; + struct sxe2_fwc_cfg_txq_resp resp; + struct sxe2_txsched_node_param node_param; + struct sxe2_txsched_context *sched_ctxt = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + q_layer = sxe2_txsched_sw_q_layer_get(); + + mutex_lock(&sched_ctxt->lock); + if (sched_ctxt->state != SXE2_TX_SCHED_STATE_READY) { + ret = -EIO; + goto l_unlock; + } + + if (vsi->type != SXE2_VSI_T_VF) { + parent = sxe2_txsched_get_free_qparent(adapter, vsi, tc, + SXE2_TXSCHED_NODE_OWNER_RDMA); + } else { + parent = sxe2_txsched_get_free_qparent(adapter, vsi, tc, + SXE2_TXSCHED_NODE_OWNER_LAN); + } + if (!parent) { + ret = -EINVAL; + goto l_unlock; + } + + req->leaf.port = adapter->port_idx; + req->leaf.tc = tc; + req->leaf.txq_idx_in_dev = qset->qset_id; + req->leaf.node.parent_teid = parent->info.node_teid; + req->leaf.node.data.hw_layer = q_layer + 1; + req->leaf.node.data.status = SXE2_NODE_STATUS_ENABLE; + req->leaf.node.data.arb_mode = SXE2_NODE_ARB_MODE_BPS; + req->leaf.node.data.rl_type = SXE2_NODE_RL_TYPE_EIR; + req->leaf.node.data.cir.bw = SXE2_TXSCHED_DFLT_BW; + req->leaf.node.data.cir.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + req->leaf.node.data.cir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->leaf.node.data.srlPir.bw = SXE2_TXSCHED_DFLT_BW; + req->leaf.node.data.srlPir.weight = SXE2_TXSCHED_ARB_CREDIT_DFLT; + req->leaf.node.data.srlPir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req->leaf.node.data.prio = 0; + req->leaf.node.data.adj_lvl = sxe2_txsch_node_adj_lvl_get(); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_QSET_LEAF_ADD, req, + sizeof(*req), &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("tx sched txq add failed, ret=%d\n", ret); + ret = -EIO; + goto l_unlock; + } + + node_param.tc = tc; + node_param.sw_layer = q_layer; + node_param.node_teid = resp.node_teid; + node_param.vsi_idx_in_dev = vsi->idx_in_dev; + node_param.node_silbing_idx = resp.sibling_idx; + node_param.parent_teid = parent->info.node_teid; + node_param.txq_idx_in_dev = req->leaf.txq_idx_in_dev; + ret = sxe2_txsched_sw_node_add(adapter, &node_param); + if (ret) { + LOG_ERROR_BDF("tx sched qset add failed, ret=%d\n", ret); + goto l_unlock; + } + qset->teid = resp.node_teid; + + LOG_INFO_BDF("qset node add success,pf_id:%d, user_pri:%d, " + "tc:%d,teid:%#x,parent teid:%#x\n", + adapter->pf_idx, qset->user_pri, tc, + qset->teid, node_param.parent_teid); + +l_unlock: + mutex_unlock(&sched_ctxt->lock); + + return ret; +} + +s32 sxe2_txsched_qset_node_del(struct sxe2_adapter *adapter, + struct aux_rdma_qset_params *qset, u8 tc) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_node *node; + struct sxe2_fwc_del_qset_req req; + struct sxe2_txsched_context *ctx = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + mutex_lock(&ctx->lock); + node = sxe2_txsched_find_node_by_teid(ctx->root, qset->teid); + if (!node) { + LOG_DEBUG_BDF("sched qset node unexit, dont need del\n"); + goto l_end; + } + + req.leaf.tc = tc; + req.leaf.port = adapter->port_idx; + req.leaf.node_teid = node->info.node_teid; + req.leaf.parent_teid = node->info.parent_teid; + req.leaf.sibling_idx = (u16)node->info.sibling_idx; + req.leaf.txq_idx_in_dev = qset->qset_id; + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_QSET_LEAF_DEL, &req, + sizeof(req), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("sched qset del failed, ret=%d\n", ret); + ret = 0; + } + + LOG_DEBUG_BDF("sched qset node del success, pfid:%d, user_pri:%d, " + "teid:%#x,parent teid:%#x\n", + adapter->pf_idx, qset->user_pri, qset->teid, node->info.parent_teid); + + sxe2_txsched_sw_node_del(adapter, node); + +l_end: + mutex_unlock(&ctx->lock); + return ret; +} + +STATIC s32 sxe2_txsched_lan_child_tree_node_del(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node, + u16 not_del_root_teid, + u8 owner) +{ + u8 i; + s32 ret = 0; + u8 q_layer = sxe2_txsched_sw_q_layer_get(); + + if (!node) + return 0; + + i = node->child_cnt; + while (i) { + i--; + ret = sxe2_txsched_lan_child_tree_node_del(adapter, + node->child[i], + not_del_root_teid, owner); + if (ret) { + LOG_ERROR_BDF("sched hw node del(child) failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->child[i]->info.node_teid, + node->child[i]->parent->info.node_teid, + node->child[i]->info.sibling_idx); + ret = 0; + goto l_end; + } + } + + if (node->info.data.hw_layer > SXE2_TXSCHED_HW_LAYER_TC && + node->owner == owner && + node->info.node_teid != not_del_root_teid) { + if (node->info.data.hw_layer != (q_layer + 1)) { + ret = sxe2_tx_sched_hw_node_del(adapter, + node->parent->info.node_teid, + (u16)node->info.sibling_idx, 1, + &node->info.node_teid); + if (ret) { + LOG_ERROR_BDF("sched hw node del failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d\n", + node->info.node_teid, + node->parent->info.node_teid, + node->info.sibling_idx); + ret = 0; + } + } else { + ret = sxe2_tx_sched_hw_lan_leaf_node_del(adapter, + node->parent->info.node_teid, + (u16)node->info.sibling_idx, + node->info.node_teid, + node->txq_idx_in_dev); + if (ret) { + LOG_ERROR_BDF("sched hw leaf node del failed, teid=%#x, \t" + "parent_teid=%#x, sibling_idx=%d idx=%#x\n", + node->info.node_teid, + node->parent->info.node_teid, + node->info.sibling_idx, + node->txq_idx_in_dev); + ret = 0; + } + } + + if (!ret) + sxe2_txsched_sw_node_del(adapter, node); + } + +l_end: + return ret; +} + +s32 sxe2_txsched_rdma_vsi_cfg(struct sxe2_vsi *vsi, u8 is_aa) +{ + s32 ret; + struct sxe2_txsched_node *vsi_node; + u32 i, qset[SXE2_MAX_TRAFFIC_CLASS]; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + if (vsi->type == SXE2_VSI_T_DPDK_PF || vsi->type == SXE2_VSI_T_DPDK_VF) + return 0; + + if (is_aa) { + sxe2_for_each_tc(i) { + qset[i] = SXE2_AA_MODE_QSET_NUM; + } + } else { + sxe2_for_each_tc(i) { + qset[i] = IEEE_8021QAZ_MAX_TCS; + } + } + + mutex_lock(&adapter->tx_sched_ctxt.lock); + if (adapter->tx_sched_ctxt.state != SXE2_TX_SCHED_STATE_READY) { + mutex_unlock(&adapter->tx_sched_ctxt.lock); + LOG_ERROR_BDF("sched dont ready, state=%d\n", + adapter->tx_sched_ctxt.state); + return -EIO; + } + + ret = sxe2_txsched_vsi_topo_build(adapter, vsi, qset, + SXE2_TXSCHED_NODE_OWNER_RDMA, &vsi_node); + if (ret) { + LOG_ERROR_BDF("txsched rdma vsi build failed, ret=%d\n", ret); + } else { + sxe2_txsch_vsi_node_map(adapter, vsi, + SXE2_TXSCHED_NODE_OWNER_RDMA, vsi_node); + } + + mutex_unlock(&adapter->tx_sched_ctxt.lock); + + return ret; +} + +s32 sxe2_txsch_ucmd_root_vsi_cfg(struct sxe2_vsi *vsi, u16 *user_root_teid) +{ + s32 ret; + u32 q_cnt[SXE2_MAX_TRAFFIC_CLASS]; + struct sxe2_txsched_node *vsi_node; + u8 owner = SXE2_TXSCHED_NODE_OWNER_USER; + struct sxe2_adapter *adapter; + + *user_root_teid = SXE2_TXSCHED_TEID_INVALID; + + if (!vsi) { + LOG_ERROR("ucmd root add params invalid, vsi == NULL\n"); + return -EINVAL; + } + + adapter = vsi->adapter; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + LOG_INFO_BDF("txsch ucmd root add start, vsi_idx=%d\n", vsi->idx_in_dev); + + if (vsi->type != SXE2_VSI_T_DPDK_PF && vsi->type != SXE2_VSI_T_DPDK_VF) { + LOG_ERROR_BDF("Non-DPDK type vsi, vsi type=%d\n", vsi->type); + return -EINVAL; + } + + if (!(vsi->tc.tc_map & BIT(0))) { + LOG_ERROR_BDF("ucmd vsi build failed, tc0 not map\n"); + return -EINVAL; + } + + (void)memset(q_cnt, 0, sizeof(u32) * SXE2_MAX_TRAFFIC_CLASS); + if (vsi->type == SXE2_VSI_T_DPDK_PF) + q_cnt[0] = SXE2_TX_SCHED_VSI_MAX_TXQ_NUM; + else if (vsi->type == SXE2_VSI_T_DPDK_VF) + q_cnt[0] = vsi->tc.info[0].txq_cnt; + + mutex_lock(&adapter->tx_sched_ctxt.lock); + + if (adapter->tx_sched_ctxt.state != SXE2_TX_SCHED_STATE_READY) { + mutex_unlock(&adapter->tx_sched_ctxt.lock); + LOG_ERROR_BDF("sched dont ready, state=%d\n", + adapter->tx_sched_ctxt.state); + return -EIO; + } + + ret = sxe2_txsched_vsi_topo_build(adapter, vsi, q_cnt, owner, &vsi_node); + if (ret) { + LOG_ERROR_BDF("txsch ucmd root add failed, vsi_idx=%d, vsi_type=%d, ret=%d\n", + vsi->idx_in_dev, vsi->type, ret); + } else { + sxe2_txsch_vsi_node_map(adapter, vsi, owner, vsi_node); + if (vsi->type == SXE2_VSI_T_DPDK_PF) { + *user_root_teid = vsi_node->parent->info.node_teid; + adapter->tx_sched_ctxt.user_root_teid = *user_root_teid; + } else { + *user_root_teid = vsi_node->info.node_teid; + } + + LOG_INFO_BDF("txsch ucmd root add, vsi_idx=%d, vsi_teid=%#x, " + "owner=%d, parent_teid=%#x\n", + vsi->idx_in_dev, vsi_node->info.node_teid, + vsi_node->owner, vsi_node->parent->info.node_teid); + + LOG_INFO_BDF("txsch ucmd root add success, vsi_idx=%d, root_teid=%#x\n", + vsi->idx_in_dev, *user_root_teid); + } + + mutex_unlock(&adapter->tx_sched_ctxt.lock); + + return ret; +} + +s32 sxe2_txsch_ucmd_subtree_del(struct sxe2_adapter *adapter, + u16 vsi_idx, u16 node_teid, + bool del_root) +{ + s32 ret = 0; + u16 no_del_root_teid; + struct sxe2_vsi *vsi; + struct sxe2_txsched_node *node; + struct sxe2_txsched_context *ctxt = &(adapter->tx_sched_ctxt); + u8 q_layer = sxe2_txsched_sw_q_layer_get(); + + LOG_INFO_BDF("txsch ucmd root del start, vsi_idx=%d, teid=%#x, del_root=%d\n", + vsi_idx, node_teid, del_root); + + if (!sxe2_vsi_id_is_valid(adapter, vsi_idx)) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", vsi_idx); + return -EINVAL; + } + + mutex_lock(&ctxt->lock); + + node = sxe2_txsched_find_node_by_teid(ctxt->root, node_teid); + if (!node) { + LOG_INFO_BDF("txsch ucmd root node unexit, vsi_idx[%d], teid=%#x\n", + vsi_idx, node_teid); + ret = -EIO; + goto l_unlock; + } + + no_del_root_teid = del_root ? SXE2_TXSCHED_TEID_INVALID : node_teid; + + if (sxe2_txsched_is_leaf_node_present(node, q_layer)) { + ret = sxe2_txsched_lan_child_tree_node_del(adapter, node, + no_del_root_teid, + SXE2_TXSCHED_NODE_OWNER_USER); + } else { + ret = sxe2_txsched_node_del(adapter, node, + no_del_root_teid, SXE2_TXSCHED_NODE_OWNER_USER); + } + + if (ret) { + LOG_DEBUG_BDF("usr txsch del node failed, vsi_idx=%d, subtree root_teid=%#x\n", + vsi_idx, node_teid); + goto l_unlock; + } + + vsi = sxe2_vsi_get_by_idx(adapter, vsi_idx); + if (!vsi) + goto l_unlock; + + sxe2_txsch_vsi_node_unmap(vsi, SXE2_TXSCHED_NODE_OWNER_USER); + + if (vsi->type == SXE2_VSI_T_DPDK_PF && del_root) + adapter->tx_sched_ctxt.user_root_teid = SXE2_TXSCHED_TEID_INVALID; + +l_unlock: + mutex_unlock(&ctxt->lock); + return ret; +} + +STATIC struct +sxe2_txsched_node *sxe2_txsched_ucmd_node_create(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + struct sxe2_txsched_ucmd_node_params *ucmd) +{ + s32 ret = 0; + u16 num_nodes_added; + struct sxe2_txsched_node *parent; + struct sxe2_txsched_node *first_node = NULL; + struct sxe2_txsch_add_nodes_req nodes_param; + struct sxe2_txsched_context *ctx = sxe2_txsched_ctxt_get(adapter); + + parent = sxe2_txsched_find_node_by_teid(ctx->root, ucmd->parent_teid); + if (!parent) { + LOG_ERROR("ucmd node add: parent node unexit, parent_teid=%#x\n", + ucmd->parent_teid); + return NULL; + } + + if (parent->child_cnt >= SXE2_TXSCHED_NODE_CHILD_MAX) { + LOG_ERROR_BDF("ucmd node add: parent children cnt is max, parent_teid = %#x\n", + ucmd->parent_teid); + return NULL; + } + + nodes_param.num = 1; + nodes_param.owner = SXE2_TXSCHED_NODE_OWNER_USER; + nodes_param.parent_teid = parent->info.node_teid; + nodes_param.prio = ucmd->priority; + nodes_param.sw_layer = sxe2_txsch_teid2hwl(parent->info.node_teid); + nodes_param.tc = 0; + nodes_param.weight = ucmd->weight; + nodes_param.adj_lvl = ucmd->adj_lvl; + ret = sxe2_txsched_nodes_add(adapter, parent, + &nodes_param, &first_node, &num_nodes_added); + if (ret) { + first_node = NULL; + LOG_ERROR_BDF("ucmd node create failed, vsi_idx=%d, parent_teid=%#x\n", + vsi->idx_in_dev, ucmd->parent_teid); + } else { + LOG_INFO_BDF("ucmd node create success, vsi_idx=%d, parent_teid = %#x, teid=%#x\n", + vsi->idx_in_dev, ucmd->parent_teid, first_node->info.node_teid); + } + + return first_node; +} + +static bool sxe2_txsch_ucmd_add_node_is_valid(struct sxe2_vsi *vsi, + struct sxe2_txsched_ucmd_node_params *params) +{ + struct sxe2_adapter *adapter; + + if (!vsi) { + LOG_ERROR("invalid vsi=NULL\n"); + return false; + } + + adapter = vsi->adapter; + if (!params) { + LOG_ERROR_BDF("invalid params = NULL\n"); + return false; + } + + if (params->committed < SXE2_TXSCHED_MIN_BW || + (params->committed > SXE2_TXSCHED_MAX_BW && + params->committed != SXE2_TXSCHED_DFLT_BW)) { + LOG_ERROR_BDF("cir err, bw must > 500 Kbps, < %u Kbps, usrBw=%u\n", + SXE2_TXSCHED_MAX_BW, params->committed); + return false; + } + + if (params->peak < SXE2_TXSCHED_MIN_BW || + (params->peak > SXE2_TXSCHED_MAX_BW && + params->peak != SXE2_TXSCHED_DFLT_BW)) { + LOG_ERROR_BDF("eir err, bw must > 500 Kbps, < %u Kbps, usrBw=%u\n", + SXE2_TXSCHED_MAX_BW, params->peak); + return false; + } + + if (params->priority >= SXE2_TC_MAX_CNT) { + LOG_ERROR_BDF("prio err, priority must < 8, usr_prio=%u\n", + params->priority); + return false; + } + + return true; +} + +s32 sxe2_txsched_ucmd_node_add(struct sxe2_vsi *vsi, + struct sxe2_txsched_ucmd_node_params *node_params) +{ + s32 ret = 0; + struct sxe2_adapter *adapter; + struct sxe2_txsched_context *ctx; + struct sxe2_txsched_node *node = NULL; + + if (!sxe2_txsch_ucmd_add_node_is_valid(vsi, node_params)) { + LOG_ERROR("ucmd add node param invalid\n"); + return -EINVAL; + } + + adapter = vsi->adapter; + ctx = sxe2_txsched_ctxt_get(adapter); + + LOG_INFO_BDF("ucmd add node, params parent_teid=%#x, node_teid=%#x, " + "committed=%u, peak=%u, prio=%d, weight=%d\n", + node_params->parent_teid, node_params->node_teid, + node_params->committed, node_params->peak, + node_params->priority, node_params->weight); + + mutex_lock(&ctx->lock); + + node = sxe2_txsched_ucmd_node_create(adapter, vsi, node_params); + if (!node) { + ret = -EIO; + node_params->node_teid = SXE2_TXSCHED_TEID_INVALID; + LOG_ERROR_BDF("ucmd add node failed, parent_teid=%d\n", + node_params->parent_teid); + goto l_unlock; + } + + node_params->node_teid = node->info.node_teid; + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, SXE2_NODE_RL_TYPE_CIR, + node_params->committed); + if (ret) { + LOG_ERROR_BDF("ucmd add node cir failed, teid=%#x, parent_teid=%#x, cir=%u\n", + node->info.node_teid, + node_params->parent_teid, + node_params->committed); + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, SXE2_NODE_RL_TYPE_EIR, + node_params->peak); + if (ret) { + LOG_ERROR_BDF("ucmd add node cir failed, teid=%#x, parent_teid=%#x, eir=%u\n", + node->info.node_teid, + node_params->parent_teid, + node_params->peak); + } + +l_unlock: + mutex_unlock(&ctx->lock); + return ret; +} + +STATIC s32 sxe2_txsch_ucmd_tm_qnode_create(struct sxe2_vsi *vsi, + struct sxe2_txsch_ucmd_qnode_params *ucmd, + u16 *node_teid) +{ + s32 ret; + u8 q_layer; + struct sxe2_cmd_params cmd = {}; + struct sxe2_txsched_node *parent; + struct sxe2_fwc_cfg_txq_req req; + struct sxe2_fwc_cfg_txq_resp resp; + struct sxe2_queue *txq = vsi->txqs.q[ucmd->queue_id]; + struct sxe2_txsched_node_param node_param; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_txsched_context *ctx = &adapter->tx_sched_ctxt; + + if (!sxe2_txsched_support_chk(adapter)) + return 0; + + if (ctx->state != SXE2_TX_SCHED_STATE_READY) { + LOG_ERROR_BDF("txsch ucmd tm qnode add : txsch not ready\n"); + return -EIO; + } + + parent = sxe2_txsched_find_node_by_teid(ctx->root, ucmd->parent_teid); + if (!parent) { + LOG_ERROR_BDF("txsch ucmd tm qnode add: parent node is invalid parent_teid=%#x\n", + ucmd->parent_teid); + return -EINVAL; + } + + if ((parent->child_cnt + 1) > SXE2_TXSCHED_NODE_CHILD_MAX) { + LOG_ERROR_BDF("txsch ucmd tm qnode add: vsi[%u] parent node are not enough slot, " + "parent_teid=%#x, parent->child_cnt=%u, need_node=1\n", + vsi->idx_in_dev, parent->info.node_teid, + parent->child_cnt); + + return -ENOSPC; + } + + q_layer = sxe2_txsched_sw_q_layer_get(); + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.leaf.tc = 0; + req.leaf.port = adapter->port_idx; + req.leaf.txq_idx_in_dev = txq->idx_in_pf + + adapter->q_ctxt.txq_base_idx_in_dev; + req.leaf.node.parent_teid = parent->info.node_teid; + req.leaf.node.data.hw_layer = q_layer + 1; + req.leaf.node.data.prio = ucmd->priority; + req.leaf.node.data.status = SXE2_NODE_STATUS_ENABLE; + req.leaf.node.data.arb_mode = SXE2_NODE_ARB_MODE_BPS; + req.leaf.node.data.rl_type = SXE2_NODE_RL_TYPE_EIR; + req.leaf.node.data.cir.bw = ucmd->committed; + req.leaf.node.data.cir.weight = ucmd->weight; + req.leaf.node.data.cir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req.leaf.node.data.srlPir.bw = ucmd->peak; + req.leaf.node.data.srlPir.weight = ucmd->weight; + req.leaf.node.data.srlPir.prof_id = SXE2_TXSCHED_DFLT_RL_PROF_ID; + req.leaf.node.data.adj_lvl = ucmd->adj_lvl; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_TX_SCHED_QUEUE_LEAF_ADD, &req, + sizeof(req), &resp, sizeof(resp)); + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("txsch ucmd tm hw qnode add failed, vsi[%d] parent_teid=%#x, " + "parent->child_cnt=%u, ret=%d\n", + vsi->idx_in_dev, parent->info.node_teid, + parent->child_cnt, ret); + return -EIO; + } + + node_param.tc = 0; + node_param.owner = SXE2_TXSCHED_NODE_OWNER_USER; + node_param.sw_layer = q_layer; + node_param.node_teid = resp.node_teid; + node_param.vsi_idx_in_dev = vsi->idx_in_dev; + node_param.txq_idx_in_vsi = txq->idx_in_vsi; + node_param.node_silbing_idx = resp.sibling_idx; + node_param.parent_teid = parent->info.node_teid; + node_param.txq_idx_in_dev = req.leaf.txq_idx_in_dev; + ret = sxe2_txsched_sw_node_add(adapter, &node_param); + if (ret) { + LOG_ERROR_BDF("txsch ucmd tm sw qnode add failed, vsi[%d] parent_teid=%#x, " + "parent->child_cnt=%u, ret=%d\n", + vsi->idx_in_dev, parent->info.node_teid, + parent->child_cnt, ret); + return ret; + } + txq->txq_teid = resp.node_teid; + *node_teid = resp.node_teid; + + LOG_DEBUG_BDF("txq node add success,teid:%#x,parent teid:%#x\n", + txq->txq_teid, node_param.parent_teid); + + return ret; +} + +static bool sxe2_txsch_ucmd_add_txq_node_is_valid(struct sxe2_vsi *vsi, + struct sxe2_txsch_ucmd_qnode_params *params) +{ + struct sxe2_adapter *adapter; + + if (!vsi) { + LOG_ERROR("invalid vsi=NULL\n"); + return false; + } + + adapter = vsi->adapter; + if (!params) { + LOG_ERROR_BDF("invalid params = NULL\n"); + return false; + } + + if (params->committed < SXE2_TXSCHED_MIN_BW || + (params->committed > SXE2_TXSCHED_MAX_BW && + params->committed != SXE2_TXSCHED_DFLT_BW)) { + LOG_ERROR_BDF("cir err, bw must > 500 Kbps, < %u Kbps, usrBw=%u\n", + SXE2_TXSCHED_MAX_BW, params->committed); + return false; + } + + if (params->peak < SXE2_TXSCHED_MIN_BW || + (params->peak > SXE2_TXSCHED_MAX_BW && + params->peak != SXE2_TXSCHED_DFLT_BW)) { + LOG_ERROR_BDF("eir err, bw must > 500 Kbps, < %u Kbps, usrBw=%u\n", + SXE2_TXSCHED_MAX_BW, params->peak); + return false; + } + + if (params->priority >= SXE2_TC_MAX_CNT) { + LOG_ERROR_BDF("prio err, priority must < 8, usr_prio=%u\n", + params->priority); + return false; + } + + if (params->queue_id > vsi->txqs.q_cnt) { + LOG_ERROR_BDF("txq id:%u invalid, max:%u vsi_id:%d\n", + params->queue_id, vsi->txqs.q_cnt, vsi->idx_in_dev); + return false; + } + + if (params->adj_lvl > SXE2_TXSCH_NODE_ADJ_LVL_MAX) { + LOG_ERROR_BDF("adj lvl:%u invalid, max:%u vsi_id:%d\n", + params->adj_lvl, SXE2_TXSCH_NODE_ADJ_LVL_MAX, + vsi->idx_in_dev); + return false; + } + + return true; +} + +s32 sxe2_txsched_ucmd_qnode_add(struct sxe2_vsi *vsi, + struct sxe2_txsch_ucmd_qnode_params *node_params) +{ + s32 ret = 0; + struct sxe2_adapter *adapter; + struct sxe2_txsched_context *ctx; + struct sxe2_txsched_node *node = NULL; + u16 node_teid; + + if (!sxe2_txsch_ucmd_add_txq_node_is_valid(vsi, node_params)) { + LOG_ERROR("ucmd add txq node param invalid\n"); + return -EINVAL; + } + + adapter = vsi->adapter; + ctx = sxe2_txsched_ctxt_get(adapter); + + LOG_INFO_BDF("ucmd add tm queue node, params parent_teid=%#x, " + "node_teid=%#x committed=%u, peak=%u, prio=%d, weight=%u, qidx=%u\n", + node_params->parent_teid, node_params->node_teid, + node_params->committed, node_params->peak, + node_params->priority, node_params->weight, + node_params->queue_id); + + mutex_lock(&ctx->lock); + + ret = sxe2_txsch_ucmd_tm_qnode_create(vsi, node_params, &node_teid); + if (ret) { + node_params->node_teid = SXE2_TXSCHED_TEID_INVALID; + LOG_ERROR_BDF("txsch ucmd tm qnode add failed\n"); + goto l_unlock; + } + + node_params->node_teid = node_teid; + + node = sxe2_txsched_find_node_by_teid(ctx->root, node_teid); + if (!node) { + ret = -EIO; + LOG_ERROR_BDF("txsch ucmd tm qnode add : " + "find node failed, parent_teid=%#x, teid=%#x\n", + node_params->parent_teid, node_params->node_teid); + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, SXE2_NODE_RL_TYPE_CIR, + node_params->committed); + if (ret) { + LOG_ERROR_BDF("txsch ucmd tm qnode add : cir cfg failed, " + "parent_teid=%#x, teid=%#x, cir=%u\n", + node_params->parent_teid, + node_params->node_teid, + node_params->committed); + goto l_unlock; + } + + ret = sxe2_txsched_node_bw_lmt_cfg(vsi, node, SXE2_NODE_RL_TYPE_EIR, + node_params->peak); + if (ret) { + LOG_ERROR_BDF("txsch ucmd tm qnode add : eir cfg failed, " + "parent_teid=%#x, teid=%#x, eir=%u\n", + node_params->parent_teid, + node_params->node_teid, + node_params->peak); + } + +l_unlock: + mutex_unlock(&ctx->lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.h new file mode 100644 index 0000000000000000000000000000000000000000..c0e0e3948f54f9f8f1141f625ce85a77adac6391 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_txsched.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_txsched.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_TXSCHED_H__ +#define __SXE2_TXSCHED_H__ +#include "sxe2_cmd.h" + +struct sxe2_adapter; +struct sxe2_vsi; + +#define SXE2_TX_SCHED_NODE_PER_VSI_MAX 5 + +#define SXE2_TXSCHED_SW_PORT_LAYER 0 +#define SXE2_TXSCHED_SW_TC_LAYER 1 +#define SXE2_TXSCHED_SW_VEB_LAYER 2 +#define SXE2_TXSCHED_SW_VSIG_LAYER 3 +#define SXE2_TXSCHED_SW_VSI_LAYER 4 +#define SXE2_TXSCHED_SW_QG_LAYER 5 +#define SXE2_TXSCHED_SW_Q_LAYER 6 + +#define SXE2_TX_SCHED_STATE_INIT 0x0 +#define SXE2_TX_SCHED_STATE_READY 0x1 + +#define SXE2_TX_SCHED_VSI_MAX_TXQ_NUM 256 + +#define SXE2_TXSCH_VF_VSIG_AGG_MAX (48) + +#define SXE2_TXSCH_NODE_PRIO_DLFT 0 +#define SXE2_TXSCH_NODE_PRIO_HIGH 7 + +#define SXE2_TXSCH_NODE_ADJ_LVL_DFLT 3 +#define SXE2_TXSCH_NODE_ADJ_LVL_MAX 3 + +enum sxe2_node_type { + SXE2_TXSCHD_NODE_TYPE_UNKNOW = 0, + + SXE2_TXSCHD_NODE_TYPE_PORT, + + SXE2_TXSCHD_NODE_TYPE_TC, + + SXE2_TXSCHD_VEB_TYPE_PF, + SXE2_TXSCHD_VEB_TYPE_MACVLAN_ESW, + SXE2_TXSCHD_VEB_TYPE_VF, + + SXE2_TXSCHD_VSIG_TYPE_PF_AGG, + SXE2_TXSCHD_VSIG_TYPE_AGG, + SXE2_TXSCHD_VSIG_TYPE_GEN, + SXE2_TXSCHD_VSIG_TYPE_VF_AGG, + SXE2_TXSCHED_VSIG_TYPE_USER_PF, + + SXE2_TXSCHD_VSI_TYPE_PF, + SXE2_TXSCHD_VSI_TYPE_PF_RDMA, + SXE2_TXSCHD_VSI_TYPE_PF_CHANNEL, + SXE2_TXSCHD_VSI_TYPE_PF_LOOPBACK, + SXE2_TXSCHD_VSI_TYPE_PF_CTRL, + SXE2_TXSCHD_VSI_TYPE_MACVLAN_ESW, + SXE2_TXSCHD_VSI_TYPE_VF, + SXE2_TXSCHD_VSI_TYPE_USER_PF, + SXE2_TXSCHD_VSI_TYPE_USER_VF, + + SXE2_TXSCHD_QS_TYPE_RDMA, + SXE2_TXSCHD_QS_TYPE_LAN, +}; + +enum sxe2_txsch_vsi_type { + FUSION_VF2VSI_NODE_VF_KVSI = 0, + FUSION_VF2VSI_MODE_VF_UVSI, + FUSION_VF2VSIG_MODE_VF_KVSI, + FUSION_VF2VSIG_MODE_VF_UVSI, + OTHER_MODE_KVSI, + OTHER_MODE_UVSI, + OTHER_MODE_UNKOWN, +}; + +struct sxe2_txsch_add_nodes_req { + u8 tc; + u8 prio; + u8 sw_layer; + u8 adj_lvl; + u16 num; + u16 weight; + u16 parent_teid; + enum sxe2_txsched_node_owner owner; +}; + +struct sxe2_txsched_vf_tm_info { + u32 committed; + u32 peak; + u8 priority; + u16 weight; +}; + +struct sxe2_txsched_add_node_info { + u8 num; + enum sxe2_node_type type; + struct sxe2_txsched_node *node; +}; + +struct sxe2_txsched_node { + struct sxe2_txsched_node *parent; + struct sxe2_txsched_node *group; + struct sxe2_txsched_node *sibling; + struct sxe2_txsched_node * + *child; + struct sxe2_txsched_node_info info; + enum sxe2_node_type node_type; + u16 vf_idx_in_pf; + u16 vsi_idx_in_dev; + u16 txq_idx_in_vsi; + u16 txq_idx_in_dev; + u8 in_use; + u8 child_cnt; + u8 tc; + u8 owner; + u8 same_node_num_pre_tc; +}; + +struct sxe2_txsched_context { + struct sxe2_txsched_node *root; + struct sxe2_fwc_txsched_cap_resp cap; + struct mutex lock; + u8 state; + u8 sw_entry_point_layer; + u16 user_root_teid; + struct sxe2_txsched_node *sib_head[SXE2_MAX_TRAFFIC_CLASS][SXE2_TXSCHED_LAYER_MAX]; +}; + +struct sxe2_txsched_node_param { + u32 tc; + u32 owner; + u16 vsi_idx_in_dev; + u16 parent_teid; + u16 node_teid; + u16 txq_idx_in_dev; + u16 txq_idx_in_vsi; + u8 sw_layer; + u8 node_silbing_idx; +}; + +bool sxe2_txsch_is_vf_vsi_agg_mode(struct sxe2_adapter *adapter); + +bool sxe2_txsched_support_chk(struct sxe2_adapter *adapter); + +s32 sxe2_txsched_txq_node_add(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, struct sxe2_queue *txq, + enum sxe2_txsched_node_owner owner, + struct sxe2_fwc_cfg_txq_req *req); + +s32 sxe2_txsched_txq_node_del(struct sxe2_adapter *adapter, + struct sxe2_queue *txq); + +void sxe2_txsched_dflt_topo_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_txsched_dflt_topo_init(struct sxe2_adapter *adapter); + +s32 sxe2_txsched_init(struct sxe2_adapter *adapter); + +void sxe2_txsched_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_txsched_q_bw_lmt_cfg(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + u8 rl_type, u32 bw); + +void sxe2_txsched_vsi_q_ctxt_free(struct sxe2_vsi *vsi); + +s32 sxe2_txsched_lan_vsi_rm(struct sxe2_vsi *vsi); + +s32 sxe2_txsched_rdma_vsi_rm(struct sxe2_vsi *vsi); + +bool sxe2_txsched_qset_left(struct sxe2_adapter *adapter, u16 vsi_idx); + +s32 sxe2_txsched_lan_vsi_cfg(struct sxe2_vsi *vsi); + +void sxe2_txsched_tree_clean(struct sxe2_adapter *adapter); + +void sxe2_txsched_tree_dump(struct sxe2_adapter *adapter); + +void sxe2_txsched_sw_tree_dump(struct sxe2_adapter *adapter); + +s32 sxe2_txsched_vf_bw_lmt_cfg(struct sxe2_adapter *adapter, + struct sxe2_vf_node *vf_node, u8 rl_type, u32 bw); + +s32 sxe2_txsched_ets_update(struct sxe2_adapter *adapter, u8 tc_cnt); + +s32 sxe2_txsched_dflt_tc_node_add(struct sxe2_adapter *adapter, + struct sxe2_txsched_node_info *tc0_info); + +s32 sxe2_txsched_qset_node_add(struct sxe2_adapter *adapter, + struct sxe2_vsi *vsi, + struct aux_rdma_qset_params *qset, u8 tc); +s32 sxe2_txsched_qset_node_del(struct sxe2_adapter *adapter, + struct aux_rdma_qset_params *qset, u8 tc); +s32 sxe2_txsched_rdma_vsi_cfg(struct sxe2_vsi *vsi, u8 is_aa); + +struct sxe2_txsched_node * +sxe2_txsched_find_node_by_teid(struct sxe2_txsched_node *start_node, u16 teid); + +s32 sxe2_txsched_qset_node_move(struct sxe2_adapter *adapter, + struct sxe2_adapter *new_adapter, + struct aux_rdma_qset_params *dqset, + u16 *new_teid, u8 is_aa); + +s32 sxe2_txsched_node_bw_lmt_cfg(struct sxe2_vsi *vsi, + struct sxe2_txsched_node *node, u8 rl_type, + u32 bw); + +s32 sxe2_txsched_nodes_add_tm(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *parent, u8 tc, + u8 sw_layer, u16 sibling_num, + struct sxe2_txsched_node **first_node, + u16 *num_nodes_added, + struct sxe2_txsched_vf_tm_info *tm_info, + u16 *node_teid); + +void sxe2_txsched_sw_subtree_dump(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node); + +void sxe2_txsched_hw_subtree_dump(struct sxe2_adapter *adapter, + struct sxe2_txsched_node *node); + +void sxe2_txsched_vf_tree_clean(struct sxe2_adapter *adapter); + +struct sxe2_txsched_node * +sxe2_txsched_vsi_first_node_get(struct sxe2_txsched_context *ctxt, u8 tc, + u16 vsi_idx, u8 owner); + +s32 sxe2_txsch_ucmd_root_vsi_cfg(struct sxe2_vsi *vsi, u16 *vsi_teid); + +s32 sxe2_txsched_tc_max_bw_lmt_cfg(struct sxe2_vsi *vsi, u8 tc, u32 max_tx_rate); + +s32 sxe2_txsched_ucmd_vsig_node_del(struct sxe2_vsi *vsi); + +s32 sxe2_txsched_ucmd_vsi_node_del(struct sxe2_vsi *vsi); + +s32 sxe2_txsch_ucmd_subtree_del(struct sxe2_adapter *adapter, + u16 vsi_idx, u16 node_teid, + bool del_root); + +struct sxe2_txsched_ucmd_node_params { + u16 parent_teid; + u16 node_teid; + u32 committed; + u32 peak; + u8 priority; + u8 reserve; + u16 weight; + u8 adj_lvl; +}; + +struct sxe2_txsch_ucmd_qnode_params { + u16 parent_teid; + u16 node_teid; + u32 committed; + u32 peak; + u8 priority; + u8 reserve; + u16 weight; + u32 queue_id; + u8 adj_lvl; +}; + +s32 sxe2_txsched_ucmd_node_add(struct sxe2_vsi *vsi, + struct sxe2_txsched_ucmd_node_params *node_params); + +s32 sxe2_txsched_ucmd_qnode_add(struct sxe2_vsi *vsi, + struct sxe2_txsch_ucmd_qnode_params *node_params); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.c new file mode 100644 index 0000000000000000000000000000000000000000..2d0d0a35a27f65118bce3e35571c969282e5532c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_udp_tunnel.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2.h" +#include "sxe2_hw.h" +#include "sxe2_log.h" +#include "sxe2_cmd.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_vsi.h" +#include "sxe2_udp_tunnel.h" +#include "sxe2_com_ioctl.h" +#include "sxe2_netdev.h" + +static s32 sxe2_fwc_udp_tunnel_port_add(struct sxe2_adapter *adapter, + struct sxe2_udp_tunnel_cfg *tunnel_config) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_udp_tunnel_ref_add_req tunnel_cfg_fwc_req; + + tunnel_cfg_fwc_req.type = tunnel_config->protocol; + tunnel_cfg_fwc_req.port = cpu_to_le16(tunnel_config->dev_port); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_UDPTUNNEL_ADD, &tunnel_cfg_fwc_req, + sizeof(struct sxe2_fwc_udp_tunnel_ref_add_req), NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret != 0) + LOG_ERROR("Failed to add func 0 tunnel port, ret=%d", ret); + + return ret; +} + +static s32 sxe2_fwc_udp_tunnel_port_del(struct sxe2_adapter *adapter, + struct sxe2_udp_tunnel_cfg *tunnel_config) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_udp_tunnel_ref_delete_req tunnel_cfg_fwc_req; + + tunnel_cfg_fwc_req.type = tunnel_config->protocol; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_UDPTUNNEL_DEL, &tunnel_cfg_fwc_req, + sizeof(struct sxe2_fwc_udp_tunnel_ref_delete_req), NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret != 0) + LOG_ERROR("Failed to delete func 0 tunnel port, ret=%d", ret); + + return ret; +} + +static s32 sxe2_fwc_udp_tunnel_port_get(struct sxe2_adapter *adapter, + struct sxe2_udp_tunnel_cfg *tunnel_config) +{ + s32 ret = 0; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_udp_tunnel_ref_get_req tunnel_cfg_fwc_req; + struct sxe2_fwc_udp_tunnel_ref_get_resp tunnel_cfg_fwc_resp; + + tunnel_cfg_fwc_req.type = tunnel_config->protocol; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_UDPTUNNEL_GET, &tunnel_cfg_fwc_req, + sizeof(tunnel_cfg_fwc_req), + &tunnel_cfg_fwc_resp, sizeof(tunnel_cfg_fwc_resp)); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret != 0) { + LOG_ERROR("Failed to get func 0 tunnel port, ret=%d", ret); + goto l_end; + } + + tunnel_config->fw_port = le16_to_cpu(tunnel_cfg_fwc_resp.port); + tunnel_config->fw_status = tunnel_cfg_fwc_resp.enable; + tunnel_config->fw_dst_en = tunnel_cfg_fwc_resp.dst; + tunnel_config->fw_src_en = tunnel_cfg_fwc_resp.src; + tunnel_config->fw_used = tunnel_cfg_fwc_resp.used; + +l_end: + return ret; +} + +static s32 sxe2_udp_tunnel_port_add_fw(struct sxe2_adapter *ad, + enum sxe2_udp_tunnel_protocol tunnel_proto, + u16 udp_port, + struct sxe2_udp_tunnel_cfg *tunnel_config) +{ + s32 ret = 0; + + tunnel_config->protocol = tunnel_proto; + ret = sxe2_fwc_udp_tunnel_port_get(ad, tunnel_config); + if (ret) + goto l_end; + + if (tunnel_config->fw_used == SXE2_UDP_TUNNEL_ENABLE && + tunnel_config->fw_port != udp_port) { + LOG_ERROR("Hardware already configured with [type %d, udp_port %d]\n", + tunnel_proto, tunnel_config->fw_port); + ret = -EINVAL; + goto l_end; + } + + tunnel_config->dev_port = udp_port; + ret = sxe2_fwc_udp_tunnel_port_add(ad, tunnel_config); + if (ret) { + tunnel_config->dev_port = 0; + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_udp_tunnel_port_add_common(struct sxe2_vsi *vsi, + enum sxe2_udp_tunnel_protocol tunnel_proto, + u16 udp_port) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_udp_tunnel_cfg *tunnel_config = NULL, *tunnel_config_tmp = NULL; + struct sxe2_vsi *rule_vsi = NULL; + bool need_add_fw = true; + s32 ret = 0; + u32 pos = 0; + + mutex_lock(&adapter->udp_tunnel_ctxt.lock); + tunnel_config = &vsi->udp_tunnel.cfgs[tunnel_proto]; + if (tunnel_config->dev_status == SXE2_UDP_TUNNEL_ENABLE) { + if (udp_port == tunnel_config->dev_port && + tunnel_config->dev_ref_cnt < 0xFFFFU) { + need_add_fw = false; + } else { + LOG_ERROR_BDF("Udp port %u is invalid\n", udp_port); + ret = -EINVAL; + goto l_end; + } + } else { + while (pos < SXE2_MAX_VSI_NUM) { + pos = (u16)find_next_bit(adapter->udp_tunnel_ctxt.vsi_map, + SXE2_MAX_VSI_NUM, pos + 1); + if (pos < SXE2_MAX_VSI_NUM && pos != vsi->id_in_pf) { + rule_vsi = adapter->vsi_ctxt.vsi[pos]; + tunnel_config_tmp = + &rule_vsi->udp_tunnel.cfgs[tunnel_proto]; + if (tunnel_config_tmp->dev_status == + SXE2_UDP_TUNNEL_ENABLE) { + need_add_fw = false; + if (udp_port != tunnel_config_tmp->dev_port) { + ret = -EINVAL; + goto l_end; + } + break; + } + } + } + } + + if (need_add_fw) { + ret = sxe2_udp_tunnel_port_add_fw(adapter, tunnel_proto, udp_port, + tunnel_config); + if (ret) { + LOG_ERROR("Add udp proto port %u to fw failed!\n", udp_port); + goto l_end; + } + } + + tunnel_config->dev_status = SXE2_UDP_TUNNEL_ENABLE; + tunnel_config->dev_port = udp_port; + tunnel_config->dev_ref_cnt++; + +l_end: + mutex_unlock(&adapter->udp_tunnel_ctxt.lock); + return ret; +} + +STATIC s32 sxe2_udp_tunnel_port_del_common(struct sxe2_vsi *vsi, + enum sxe2_udp_tunnel_protocol tunnel_proto, + u16 udp_port, bool do_clear) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_udp_tunnel_cfg *tunnel_config, *tunnel_config_tmp; + struct sxe2_vsi *rule_vsi = NULL; + bool exist_in_cur_vsi = false; + bool exist_in_other_vsi = false; + bool need_rm_fw = false; + s32 ret = 0; + u16 pos = 0; + + tunnel_config = &vsi->udp_tunnel.cfgs[tunnel_proto]; + if (tunnel_config->dev_status == SXE2_UDP_TUNNEL_ENABLE && + udp_port == tunnel_config->dev_port) { + tunnel_config->dev_ref_cnt--; + if (tunnel_config->dev_ref_cnt > 0) { + exist_in_cur_vsi = true; + } else { + while (pos < SXE2_MAX_VSI_NUM) { + pos = (u16)find_next_bit(adapter->udp_tunnel_ctxt.vsi_map, + SXE2_MAX_VSI_NUM, pos + 1); + if (pos < SXE2_MAX_VSI_NUM && pos != vsi->id_in_pf) { + rule_vsi = adapter->vsi_ctxt.vsi[pos]; + tunnel_config_tmp = + &rule_vsi->udp_tunnel.cfgs + [tunnel_proto]; + if (tunnel_config_tmp->dev_status == + SXE2_UDP_TUNNEL_ENABLE && + udp_port == tunnel_config_tmp->dev_port) { + exist_in_other_vsi = true; + break; + } + } + } + } + } else { + ret = -EINVAL; + goto l_end; + } + + if ((!exist_in_other_vsi && (!exist_in_cur_vsi || do_clear)) || + (do_clear && vsi == adapter->vsi_ctxt.main_vsi)) { + need_rm_fw = true; + } + + if (need_rm_fw) { + tunnel_config->dev_port = udp_port; + ret = sxe2_fwc_udp_tunnel_port_del(adapter, tunnel_config); + if (ret) + goto l_end; + + tunnel_config->dev_status = SXE2_UDP_TUNNEL_DISABLE; + tunnel_config->dev_ref_cnt = 0; + } + +l_end: + return ret; +} + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + +STATIC enum sxe2_udp_tunnel_protocol +sxe2_udp_tunnel_type_to_priv(enum udp_parsable_tunnel_type type) +{ + static enum sxe2_udp_tunnel_protocol sxe2_udp_proto_map[] = { + [UDP_TUNNEL_TYPE_VXLAN] = SXE2_UDP_TUNNEL_PROTOCOL_VXLAN, + [UDP_TUNNEL_TYPE_GENEVE] = SXE2_UDP_TUNNEL_PROTOCOL_GENEVE, + [UDP_TUNNEL_TYPE_VXLAN_GPE] = SXE2_UDP_TUNNEL_PROTOCOL_VXLAN_GPE, + }; + + return sxe2_udp_proto_map[type]; +} + +s32 sxe2_udp_tunnel_set_port(struct net_device *netdev, u32 table_idx, u32 idx, + struct udp_tunnel_info *ti) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + enum sxe2_udp_tunnel_protocol type = 0; + u8 filter_index = 0; + + type = sxe2_udp_tunnel_type_to_priv(ti->type); + ret = sxe2_udp_tunnel_port_add_common(adapter->vsi_ctxt.main_vsi, type, ti->port); + if (ret != 0) { + LOG_ERROR("Set 0 tunnel port failed [ret=%d]\n", ret); + goto l_end; + } + + udp_tunnel_nic_set_port_priv(netdev, table_idx, idx, filter_index); +l_end: + return ret; +} + +s32 sxe2_udp_tunnel_unset_port(struct net_device *netdev, u32 table_idx, u32 idx, + struct udp_tunnel_info *ti) +{ + s32 ret = 0; + struct sxe2_netdev_priv *priv = netdev_priv(netdev); + struct sxe2_adapter *adapter = priv->vsi->adapter; + enum sxe2_udp_tunnel_protocol type = 0; + + type = sxe2_udp_tunnel_type_to_priv(ti->type); + ret = sxe2_udp_tunnel_port_del_common(adapter->vsi_ctxt.main_vsi, type, ti->port, + false); + if (ret != 0) { + LOG_ERROR("Del 0 tunnel port failed [ret=%d]\n", ret); + goto l_end; + } + +l_end: + return ret; +} +#endif + +s32 sxe2_com_udptunnel_handler(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_udp_tunnel_req *req = NULL; + struct sxe2_drv_udp_tunnel_resp resp = {}; + struct sxe2_udp_tunnel_cfg tunnel_config = {}; + struct sxe2_vsi *vsi = NULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, cmd_buf->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("failed to get vsi[%u]\n", cmd_buf->vsi_id); + ret = -EINVAL; + mutex_unlock(&adapter->vsi_ctxt.lock); + goto l_end; + } + mutex_unlock(&adapter->vsi_ctxt.lock); + + req = (struct sxe2_drv_udp_tunnel_req *)sxe2_com_req_data_copy_to_kernel(cmd_buf, + obj); + if (!req) { + LOG_ERROR_BDF("copy_from_user failed, len=%lu\n", + sizeof(struct sxe2_drv_udp_tunnel_req)); + ret = -EFAULT; + goto l_end; + } + + switch (cmd_buf->opcode) { + case SXE2_DRV_CMD_UDPTUNNEL_ADD: + ret = sxe2_udp_tunnel_port_add_common(vsi, req->type, req->port); + if (ret) { + LOG_ERROR_BDF("Udp tunnel port add failed, [ret=%d]\n", ret); + goto l_end; + } + break; + case SXE2_DRV_CMD_UDPTUNNEL_DEL: + ret = sxe2_udp_tunnel_port_del_common(vsi, req->type, req->port, false); + if (ret) { + LOG_ERROR_BDF("Udp tunnel port del failed, [ret=%d]\n", ret); + goto l_end; + } + break; + case SXE2_DRV_CMD_UDPTUNNEL_GET: + tunnel_config.protocol = req->type; + ret = sxe2_fwc_udp_tunnel_port_get(adapter, &tunnel_config); + if (ret) { + LOG_ERROR_BDF("Udp tunnel port get failed, [ret=%d]\n", ret); + goto l_end; + } + resp.type = tunnel_config.protocol; + resp.port = tunnel_config.fw_port; + resp.src = tunnel_config.fw_src_en; + resp.dst = tunnel_config.fw_dst_en; + resp.enable = tunnel_config.fw_status; + resp.fw_used = tunnel_config.fw_used; + + if (sxe2_com_resp_copy_to_user(cmd_buf, &resp, sizeof(resp), obj) != 0) { + LOG_ERROR_BDF("copy_to_user failed, len=%lu\n", sizeof(resp)); + ret = -EFAULT; + goto l_end; + } + break; + default: + LOG_ERROR_BDF("Invalid opcode: %d\n", cmd_buf->opcode); + ret = -EINVAL; + } + +l_end: + kfree(req); + return ret; +} + +STATIC s32 sxe2_udp_tunnel_port_clear_by_vsi(struct sxe2_vsi *vsi) +{ + struct sxe2_udp_tunnel_cfg *tunnel_config = NULL; + struct sxe2_adapter *adapter = vsi->adapter; + u8 tunnel_proto = 0; + s32 ret = 0; + + for (tunnel_proto = 0; tunnel_proto < SXE2_UDP_TUNNEL_MAX; tunnel_proto++) { + tunnel_config = &vsi->udp_tunnel.cfgs[tunnel_proto]; + if (tunnel_config->dev_status != SXE2_UDP_TUNNEL_ENABLE) + continue; + + ret = sxe2_udp_tunnel_port_del_common(vsi, tunnel_config->protocol, + tunnel_config->dev_port, true); + if (ret != 0) { + LOG_ERROR_BDF("Clear udp tunnel %d port %d failed, ret=%d\n", + tunnel_config->protocol, tunnel_config->dev_port, + ret); + } + } + + return ret; +} + +void sxe2_udptunnel_vsi_init(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vsi_udp_tunnel *udp_tunnel = &vsi->udp_tunnel; + + memset(udp_tunnel, 0, sizeof(struct sxe2_vsi_udp_tunnel)); + set_bit(vsi->id_in_pf, adapter->udp_tunnel_ctxt.vsi_map); +} + +void sxe2_udptunnel_vsi_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vsi_udp_tunnel *udp_tunnel = &vsi->udp_tunnel; + + (void)sxe2_udp_tunnel_port_clear_by_vsi(vsi); + + memset(udp_tunnel, 0, sizeof(struct sxe2_vsi_udp_tunnel)); + clear_bit(vsi->id_in_pf, adapter->udp_tunnel_ctxt.vsi_map); +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.h new file mode 100644 index 0000000000000000000000000000000000000000..43b315364b2c6b037783303bd99b7d8cffaa21fa --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_udp_tunnel.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_udp_tunnel.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_UDP_TUNNEL_H__ +#define __SXE2_UDP_TUNNEL_H__ + +#include +#include +#include +#include +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_cdev.h" + +struct sxe2_adapter; +struct sxe2_vsi; + +enum sxe2_udp_tunnel_status { + SXE2_UDP_TUNNEL_DISABLE = 0x0, + SXE2_UDP_TUNNEL_ENABLE, +}; + +struct sxe2_udp_tunnel_cfg { + u8 protocol; + u8 dev_status; + u16 dev_port; + u16 dev_ref_cnt; + u8 flags; + + u16 fw_port; + u8 fw_status; + u8 fw_dst_en; + u8 fw_src_en; + u8 fw_used; +}; + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +s32 sxe2_udp_tunnel_set_port(struct net_device *netdev, u32 table_idx, u32 idx, + struct udp_tunnel_info *ti); + +s32 sxe2_udp_tunnel_unset_port(struct net_device *netdev, u32 table_idx, u32 idx, + struct udp_tunnel_info *ti); +#endif + +s32 sxe2_com_udptunnel_handler(struct sxe2_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +void sxe2_udptunnel_vsi_init(struct sxe2_vsi *vsi); + +void sxe2_udptunnel_vsi_deinit(struct sxe2_vsi *vsi); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.c new file mode 100644 index 0000000000000000000000000000000000000000..18e26fbce9937317bec261f159881bce3e38e98a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_upgrade.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_ethtool.h" +#include "sxe2_upgrade.h" +#include "sxe2_netdev.h" +#include "sxe2_msg.h" +#include "sxe2_vsi.h" + +STATIC void sxe2_upgd_calc_check_sum(u32 *check_sum, u8 *data, u32 data_len) +{ + u32 per_len = sizeof(u16); + + SXE2_BUG_ON(!data); + + while (data_len >= per_len) { + *check_sum += *((u16 *)data); + data += per_len; + data_len -= per_len; + } + + *check_sum = ~(*check_sum); +} + +STATIC s32 sxe2_upgd_do_check_sum(u32 pack_check_sum, u8 *data, u32 data_len) +{ + s32 ret = 0; + u32 check_sum = 0; + + sxe2_upgd_calc_check_sum(&check_sum, data, data_len); + + if (pack_check_sum != check_sum) { + ret = -EINVAL; + LOG_ERROR("upgrade check_sum check failed: check_sum:%u, " + "expect:%u\n", + check_sum, pack_check_sum); + } + + return ret; +} + +STATIC s32 sxe2_upgd_package_check(u8 *pack_data, u32 pack_len) +{ + s32 ret = 0; + struct sxe2_pkg_header *pack_header = NULL; + u8 *data_pos = NULL; + u32 hdr_len = 0; + + SXE2_BUG_ON(!pack_data); + + if (pack_len <= sizeof(struct sxe2_pkg_header)) { + ret = -EINVAL; + LOG_INFO("pack_len %d less than package header:%zu\n", pack_len, + sizeof(struct sxe2_pkg_header)); + goto l_end; + } + + data_pos = pack_data; + pack_header = (struct sxe2_pkg_header *)data_pos; + + if (pack_header->magic != SXE2_PACK_DATA_BEGIN_NUM) { + LOG_INFO("magic(%d) failed\n", pack_header->magic); + ret = -EINVAL; + goto l_end; + } + + hdr_len = sizeof(struct sxe2_pkg_header); + ret = sxe2_upgd_do_check_sum(pack_header->pkg_check_sum, data_pos + hdr_len, + pack_len - hdr_len); + if (ret) { + LOG_INFO("sxe2_upgd_do_check_sum failed\n"); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_region_image_check(u8 *pack_data, u32 region_len, u32 pack_len) +{ + s32 ret = 0; + struct sxe2_fw_header_with_sign *fw_hdr = NULL; + u32 hdr_len = 0; + u8 *data_pos = NULL; + u32 hdr_chk_len = 0; + u32 magic_offset = 0; + + SXE2_BUG_ON(!pack_data); + + data_pos = pack_data; + fw_hdr = (struct sxe2_fw_header_with_sign *)data_pos; + + if (region_len > pack_len || + fw_hdr->fw_header.image_len > + (pack_len - sizeof(struct sxe2_fw_header_with_sign))) { + ret = -EINVAL; + LOG_INFO("region_len:%u > pack_len:%u\n", region_len, pack_len); + goto l_end; + } + + if (fw_hdr->fw_header.magic != SXE2_DATABEGIN_NUM) { + LOG_INFO("image magic(%d) check failed\n", fw_hdr->fw_header.magic); + goto l_end; + } + + data_pos = pack_data; + hdr_len = sizeof(struct sxe2_fw_header_with_sign); + magic_offset = hdr_len - sizeof(struct sxe2_region_header) + + sizeof(fw_hdr->fw_header.magic); + hdr_chk_len = hdr_len - magic_offset - + sizeof(fw_hdr->fw_header.check_sum_header); + ret = sxe2_upgd_do_check_sum(fw_hdr->fw_header.check_sum_header, + data_pos + magic_offset, hdr_chk_len); + if (ret) { + LOG_INFO("sxe2_upgd_do_check_sum header failed\n"); + goto l_end; + } + + data_pos = pack_data; + ret = sxe2_upgd_do_check_sum(fw_hdr->fw_header.check_sum_file, + data_pos + hdr_len, + fw_hdr->fw_header.image_len); + if (ret) { + LOG_INFO("sxe2_upgd_do_check_sum file failed\n"); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_upgd_fw_arr_get(struct sxe2_adapter *adapter, u8 *pkg_data, + u32 pkg_len, u32 fw_type, + struct sxe2_upgrade_fw_array *fw_arr) +{ + s32 ret = 0; + struct sxe2_pkg_header *pkg_header; + struct sxe2_fw_header_with_sign *fw_header; + struct sxe2_region_header *region_header; + u8 *data_pos = NULL; + u32 index = 0; + u32 offset = 0; + u32 fw_len = 0; + + if (fw_type == ETHTOOL_FLASH_ALL_REGIONS || fw_type > SXE2_INVAL_U16) { + pkg_header = (struct sxe2_pkg_header *)pkg_data; + + ret = sxe2_upgd_package_check(pkg_data, pkg_len); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_package_check failed\n"); + goto l_end; + } + + fw_arr->fw_cnt = pkg_header->fw_count; + offset += sizeof(struct sxe2_pkg_header); + for (index = 0; index < fw_arr->fw_cnt; index++) { + if (offset >= pkg_len || + offset + sizeof(struct sxe2_fw_header_with_sign) >= + pkg_len) { + ret = -EINVAL; + LOG_INFO_BDF("offset:%d large than " + "pkg_len:%d,sum:%lu, \t" + "it may occur visit invalid memory\n", + offset, pkg_len, + offset + sizeof(struct + sxe2_fw_header_with_sign)); + goto l_end; + } + + data_pos = pkg_data + offset; + fw_header = (struct sxe2_fw_header_with_sign *)data_pos; + region_header = &(fw_header->fw_header); + fw_len = region_header->image_len + + (u32)sizeof(struct sxe2_fw_header_with_sign); + + if (offset + fw_len > pkg_len) { + ret = EINVAL; + LOG_INFO_BDF("(offset large than pkg_len, %s", + "it may occur visit invalid memory\n"); + goto l_end; + } + + ret = sxe2_region_image_check(data_pos, fw_len, pkg_len); + if (ret) { + LOG_INFO_BDF("sxe2_region_image_check failed\n"); + goto l_end; + } + + fw_arr->fw_arr[index].offset = offset; + fw_arr->fw_arr[index].image_len = fw_len; + fw_arr->fw_arr[index].fw_type = + (u32)SXE2_FWHEADER_IMAGETYPE(region_header); + + offset += fw_len; + } + } else { + return -EINVAL; + } + +l_end: + return ret; +} + +STATIC s32 sxe2_upgd_64bit_set(u64 *value, u32 bit) +{ + s32 ret = 0; + + LOG_DEBUG("bit=%d\n", bit); + + if (bit >= SXE2_BIT_MAP_64) { + ret = -EINVAL; + LOG_DEBUG("upgrade u64 bit set failed. bit=%d > 64\n", bit); + goto l_end; + } else { + SXE2_SET_BIT64(*value, bit); + } + +l_end: + return ret; +} + +STATIC s32 sxe2_upgd_prep_info_fill(struct sxe2_upgrade_prepare_cmd *prepare_info, + struct sxe2_upgrade_fw_array *fw_array, u64 uuid, + bool isPack, struct sxe2_pkg_header *pkg_hdr) +{ + s32 ret = 0; + u32 fw_index = 0; + + prepare_info->fw_type_cnt = fw_array->fw_cnt; + prepare_info->uuid = uuid; + prepare_info->is_pkg = isPack; + + for (fw_index = 0; fw_index < fw_array->fw_cnt; fw_index++) { + ret = sxe2_upgd_64bit_set(&prepare_info->fw_type_bitmap, + fw_array->fw_arr[fw_index].fw_type); + if (ret) { + LOG_INFO("sxe2_upgd_64bit_set failed, imageType=%d\n", + fw_array->fw_arr[fw_index].fw_type); + goto l_end; + } + } + + (void)memcpy(&prepare_info->pkg_hdr_info, pkg_hdr, sizeof(*pkg_hdr)); + +l_end: + return ret; +} + +s32 sxe2_upgrade_prepare(struct sxe2_adapter *adapter, + struct sxe2_upgrade_fw_array *fw_arr, u64 uuid, + struct sxe2_pkg_header *pkg_hdr) +{ + s32 ret; + struct sxe2_upgrade_prepare_cmd prep_info = {}; + struct sxe2_cmd_params cmd = {}; + + LOG_DEBUG_BDF("fw_cnt:%d\n", fw_arr->fw_cnt); + + ret = sxe2_upgd_prep_info_fill(&prep_info, fw_arr, uuid, true, pkg_hdr); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_prep_info_fill failed\n"); + goto l_end; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FW_DOWNLOAD_PRE, (void *)&prep_info, + sizeof(prep_info), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("upgrade pkg failed, ret=%d\n", ret); + ret = -EIO; + goto l_end; + } + +l_end: + return ret; +} + +STATIC void sxe2_upgd_open_info_fill(struct sxe2_upgrade_open_cmd *open_info, + u32 frag_num, u32 pack_len, u32 fw_type, + u64 uuid) +{ + open_info->dev_type = SXE2_ETH_UPGRADE_DEV_TYPE_CTRL; + open_info->fw_type = fw_type; + + open_info->uuid = uuid; + open_info->frag_num = frag_num; + open_info->frag_len = SXE2_FRAG_LEN; + open_info->fw_len = pack_len; + open_info->force = false; + open_info->no_reset = false; + open_info->forcehcb = false; + open_info->ispacket = false; + open_info->resetnow = false; + open_info->forceclose = false; + open_info->all = false; + open_info->backup = false; + open_info->no_sign_chk = false; + open_info->no_ver_chk = true; + open_info->is_fw_head = false; +} + +s32 sxe2_upgrade_open(struct sxe2_adapter *adapter, u32 frag_num, u32 pack_len, + u32 fw_type, u64 uuid) +{ + s32 ret = 0; + struct sxe2_upgrade_open_cmd *open_info = NULL; + struct sxe2_cmd_params cmd = {}; + + open_info = kzalloc(sizeof(*open_info), GFP_KERNEL); + if (!open_info) { + ret = -ENOMEM; + LOG_ERROR_BDF("upgrade pkg no memory, ret=%d\n", ret); + (void)open_info; + goto l_end; + } + + sxe2_upgd_open_info_fill(open_info, frag_num, pack_len, fw_type, uuid); + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FW_DOWNLOAD_OPEN, (void *)open_info, + sizeof(*open_info), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("upgrade pkg failed, ret=%d\n", ret); + ret = -EIO; + } + + kfree(open_info); +l_end: + return ret; +} + +STATIC u32 sxe2_upgradefrag_num_get(u32 pack_len, u32 frag_len) +{ + u32 frag_num = 0; + + frag_num = DIV_ROUND_UP(pack_len, frag_len); + + return frag_num; +} + +STATIC s32 sxe2_upgd_pkg_div_frag(u8 *pack_data, u32 pack_len, u32 frag_id, + u8 *frag_data) +{ + s32 ret = 0; + u32 frag_num = 0; + u32 frag_len = SXE2_FRAG_LEN; + u32 offset = frag_id * SXE2_FRAG_LEN; + + SXE2_BUG_ON(NULL == pack_data || NULL == frag_data); + + frag_num = sxe2_upgradefrag_num_get(pack_len, SXE2_FRAG_LEN); + + LOG_DEBUG("frag_num = %d\n", frag_num); + + if (frag_num - 1 == frag_id) { + frag_len = pack_len - frag_id * SXE2_FRAG_LEN; + LOG_DEBUG("last frag_len = %d\n", frag_len); + } + + (void)memcpy(frag_data, &pack_data[offset], frag_len); + + return ret; +} + +STATIC s32 sxe2_upgd_frag_head_init(struct sxe2_frag_head *frag_head, u32 frag_id, + u8 *frag_data, u32 pack_len, u64 uuid) +{ + s32 ret = 0; + u32 frag_num = 0; + u32 check_sum = 0; + + SXE2_BUG_ON(NULL == frag_head || NULL == frag_data); + + frag_num = sxe2_upgradefrag_num_get(pack_len, SXE2_FRAG_LEN); + + frag_head->frag_sid = frag_id; + frag_head->uuid = uuid; + frag_head->frag_len = SXE2_FRAG_LEN; + frag_head->version = SXE2_UPGRADE_PROTOCAL_VERSION; + frag_head->symbol_enable = SXE2_FRAG_ENABLE; + frag_head->symbol_more = (frag_num - 1 == frag_id) ? 0 : 1; + + if (frag_head->symbol_more == 0) + frag_head->frag_len = pack_len - frag_id * SXE2_FRAG_LEN; + + sxe2_upgd_calc_check_sum(&check_sum, frag_data, frag_head->frag_len); + + frag_head->checksum = check_sum; + + return ret; +} + +s32 sxe2_upgrade_flash(struct sxe2_adapter *adapter, + struct sxe2_update_flash_param *upgd_flash_obj) +{ + s32 ret = 0; + struct sxe2_upgrade_flash_cmd *download_info; + struct sxe2_cmd_params cmd = {}; + + download_info = (struct sxe2_upgrade_flash_cmd *)upgd_flash_obj->raw_data; + memset(download_info, 0, sizeof(struct sxe2_upgrade_flash_cmd)); + + ret = sxe2_upgd_pkg_div_frag( + upgd_flash_obj->pack_data, upgd_flash_obj->pack_len, + upgd_flash_obj->frag_index, download_info->raw_data); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_pkg_div_frag failed, pack_len = %d, " + "frag_index =%d\n", + upgd_flash_obj->pack_len, upgd_flash_obj->frag_index); + goto l_end; + } + + ret = sxe2_upgd_frag_head_init( + &download_info->frag_head, upgd_flash_obj->frag_index, + download_info->raw_data, upgd_flash_obj->pack_len, + upgd_flash_obj->uuid); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_frag_head_init failed, pack_len = %d, " + "frag_index =%d\n", + upgd_flash_obj->pack_len, upgd_flash_obj->frag_index); + goto l_end; + } + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FW_DOWNLOAD_FLASH, + (void *)download_info, + sizeof(struct sxe2_upgrade_flash_cmd), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("upgrade pkg failed, ret=%d\n", ret); + ret = -EIO; + } + +l_end: + return ret; +} + +s32 sxe2_upgrade_close(struct sxe2_adapter *adapter, u64 uuid, u32 err) +{ + s32 ret = 0; + struct sxe2_upgrade_close_cmd close_info = {}; + struct sxe2_cmd_params cmd = {}; + + close_info.err_code = err; + close_info.reset_now = false; + close_info.uuid = uuid; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FW_DOWNLOAD_CLOSE, + (void *)&close_info, sizeof(close_info), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("upgrade pkg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +s32 sxe2_upgrade_end(struct sxe2_adapter *adapter, u32 err, u32 fw_type, u64 uuid) +{ + s32 ret = 0; + struct sxe2_upgrade_end_cmd end_info = {}; + struct sxe2_cmd_params cmd = {}; + + end_info.uuid = uuid; + end_info.fw_type = SXE2_UPDATE_FWTYPE_FW_PACKAGE; + end_info.err_code = err; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_FW_DOWNLOAD_END, (void *)&end_info, + sizeof(end_info), NULL, 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_ERROR_BDF("upgrade pkg failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_upgrade_data_trans(struct sxe2_adapter *adapter, u8 *pack_data, + u32 pack_len, u32 fw_type, u64 uuid) +{ + s32 ret = 0; + u32 err = 0; + u32 frag_num = 0; + u32 frag_index = 0; + struct sxe2_update_flash_param upgd_flash_obj = {}; + + upgd_flash_obj.raw_data = + kzalloc(sizeof(struct sxe2_upgrade_flash_cmd), GFP_KERNEL); + if (!upgd_flash_obj.raw_data) { + ret = -ENOMEM; + LOG_INFO_BDF("sxe2_upgrade_flash memory not enough, 2k is needed."); + goto l_end; + } + + frag_num = sxe2_upgradefrag_num_get(pack_len, SXE2_FRAG_LEN); + + ret = sxe2_upgrade_open(adapter, frag_num, pack_len, fw_type, uuid); + if (ret) { + LOG_INFO_BDF("sxe2_upgrade_open failed,ret = [%d]\n", ret); + goto l_close; + } + + upgd_flash_obj.uuid = uuid; + upgd_flash_obj.pack_len = pack_len; + upgd_flash_obj.frag_num = frag_num; + upgd_flash_obj.fw_type = fw_type; + upgd_flash_obj.pack_data = pack_data; + + for (frag_index = 0; frag_index < frag_num; frag_index++) { + upgd_flash_obj.frag_index = frag_index; + ret = sxe2_upgrade_flash(adapter, &upgd_flash_obj); + if (ret) { + LOG_INFO_BDF("sxe2_upgrade_flash frag[%d] failed\n", + frag_index); + goto l_close; + } + } + +l_close: + err = (u32)ret; + ret = sxe2_upgrade_close(adapter, uuid, err); + if (ret && !err) { + LOG_INFO_BDF("sxe2_upgrade_close failed, ret = [%d],err = [%d]", ret, + err); + goto l_end; + } else { + ret = (s32)err; + } + +l_end: + kfree(upgd_flash_obj.raw_data); + return ret; +} + +STATIC s32 sxe2_upgd_pkg_header_info_get(struct sxe2_adapter *adapter, u32 fw_type, + struct sxe2_pkg_header *pkg_hdr, + u8 *pack_data, u32 pack_len) +{ + s32 ret = 0; + + if (pack_len <= sizeof(struct sxe2_pkg_header)) { + ret = -EINVAL; + LOG_INFO_BDF("pack_len[%d] <= pack_header[%zd]\n", pack_len, + sizeof(struct sxe2_pkg_header)); + goto l_end; + } + (void)memcpy(pkg_hdr, pack_data, sizeof(*pkg_hdr)); + +l_end: + return ret; +} + +STATIC s32 sxe2_upgrade_image(struct sxe2_adapter *adapter, u8 *pkg_data, + u32 pkg_len, u32 install_type, u64 uuid) +{ + s32 ret = 0; + u32 err = 0; + u8 *data_pos = NULL; + u8 *fw_data = NULL; + u32 index = 0; + u32 fw_len = 0; + u32 region_type = 0; + struct sxe2_upgrade_fw_array *fw_arr = NULL; + struct sxe2_pkg_header pkg_hdr = {}; + + fw_arr = kzalloc(sizeof(*fw_arr), GFP_KERNEL); + if (!fw_arr) { + ret = -ENOMEM; + goto l_out; + } + + data_pos = pkg_data; + ret = sxe2_upgd_fw_arr_get(adapter, data_pos, pkg_len, install_type, fw_arr); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_fw_arr_get failed\n"); + goto final; + } + + data_pos = pkg_data; + ret = sxe2_upgd_pkg_header_info_get(adapter, install_type, &pkg_hdr, + data_pos, pkg_len); + if (ret) { + LOG_INFO_BDF("sxe2_upgd_pkg_header_info_get failed(%d)\n", ret); + goto final; + } + + ret = sxe2_upgrade_prepare(adapter, fw_arr, uuid, &pkg_hdr); + if (ret) { + LOG_INFO_BDF("sxe2_upgrade_prepare failed(%d)\n", ret); + goto l_end; + } + + data_pos = pkg_data; + for (index = 0; index < fw_arr->fw_cnt; index++) { + fw_data = data_pos + fw_arr->fw_arr[index].offset; + fw_len = fw_arr->fw_arr[index].image_len; + region_type = fw_arr->fw_arr[index].fw_type; + + ret = sxe2_upgrade_data_trans(adapter, fw_data, fw_len, region_type, + uuid); + if (ret) { + LOG_INFO_BDF("sxe2_upgrade_data_trans failed(%d)\n", ret); + goto l_end; + } + } + +l_end: + err = (u32)ret; + ret = sxe2_upgrade_end(adapter, err, install_type, uuid); + + if (ret && !err) { + LOG_INFO_BDF("sxe2_upgrade_end failed"); + goto final; + } else { + ret = (s32)err; + } + +final: + kfree(fw_arr); + +l_out: + return ret; +} + +s32 sxe2_flash_package_from_file(struct net_device *dev, const char *file_name, + u32 install_type) +{ + s32 ret; + struct sxe2_netdev_priv *priv = netdev_priv(dev); + struct sxe2_vsi *vsi = priv->vsi; + struct sxe2_adapter *adapter = vsi->adapter; + const struct firmware *fw; + u64 uuid; + + get_random_bytes(&uuid, sizeof(uuid)); + + ret = request_firmware_direct(&fw, file_name, &adapter->pdev->dev); + if (ret) { + LOG_INFO_BDF("pkg error %d requesting file: %s\n", ret, file_name); + goto l_end; + } + + dev_hold(dev); + rtnl_unlock(); + + ret = sxe2_upgrade_image(adapter, (u8 *)fw->data, fw->size, install_type, + uuid); + if (ret) + LOG_ERROR_BDF("sxe2 update image failed, ret = %d\n", ret); + else + LOG_DEV_INFO("sxe2 update image done!\n"); + + release_firmware(fw); + + rtnl_lock(); + dev_put(dev); + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.h new file mode 100644 index 0000000000000000000000000000000000000000..be46a24ad1a9707336008ba54c3e2791bcc418cb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_upgrade.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_upgrade.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_UPGRADE_H__ +#define __SXE2_UPGRADE_H__ + +#include "sxe2.h" +#include "sxe2_netdev.h" +#include "sxe2_common.h" +#include "sxe2_log.h" + +#define SXE2_MAX_UPDATE_FWTYPE (64) +#define SXE2_UPGRADE_PKGS_NAME_LEN (256) +#define SXE2_FRAG_LEN (2048) +#define SXE2_FW_VENDOR_LEN (8U) +#define SXE2_FW_SIGN_LEN (64U) +#define SXE2_FW_PKEY_LEN (68U) +#define SXE2_BIT_MAP_64 (64) + +#define SXE2_PAD_1_K (1024U) +#define SXE2_PACK_DATA_BEGIN_NUM (0x327f68cd) +#define SXE2_DATABEGIN_NUM (0x327f68ab) + +#define SXE2_UPGRADE_PROTOCAL_VERSION (0x00000001) +#define SXE2_FRAG_ENABLE (1) +#define SXE2_ETH_UPGRADE_DEV_TYPE_CTRL (1) + +#define SXE2_FWHEADER_IMAGETYPE(fw_header) ((fw_header)->image_type_append) +#define SXE2_SET_BIT64(x, y) ((x) |= ((u64)1 << (y))) +#define SXE2_UPDATE_FWTYPE_FW_PACKAGE (34) + +struct sxe2_update_flash_param { + u64 uuid; + u32 frag_index; + u32 pack_len; + u32 frag_num; + u32 fw_type; + u8 *pack_data; + u8 *raw_data; +}; + +struct sxe2_upgd_image_info { + __le32 offset; + __le32 image_len; + __le32 fw_type; +}; + +struct sxe2_upgrade_fw_array { + __le32 fw_cnt; + struct sxe2_upgd_image_info + fw_arr[SXE2_MAX_UPDATE_FWTYPE]; +}; + +struct sxe2_pkg_header { + __le32 magic; + __le32 fw_count; + __le32 pack_time; + __le32 pack_len; + __le32 pkg_check_sum; + __le32 pkg_version; + s8 pkg_name[SXE2_UPGRADE_PKGS_NAME_LEN]; + u8 reserved[4]; +}; + +struct sxe2_region_header { + __le32 magic; + u8 vendor[SXE2_FW_VENDOR_LEN]; + __le32 timestamp; + __le32 image_len; + __le32 image_type_append; + u8 signature[SXE2_FW_SIGN_LEN]; + u8 publickey[SXE2_FW_PKEY_LEN]; + __le32 check_sum_file; + __le32 image_type; + __le32 image_format; + __le32 entry_point; + __le32 load_addr; + __le32 reserved2; + __le32 image_version; + u8 reserved[68]; + __le32 check_sum_header; +}; + +struct sxe2_fw_header_with_sign { + __le32 header_over_sign[16]; + struct sxe2_region_header fw_header; +}; + +struct sxe2_upgrade_prepare_cmd { + __le64 uuid; + __le32 fw_type_cnt; + u8 pad[4]; + __le64 fw_type_bitmap; + bool is_pkg; + u8 pad2[4]; + struct sxe2_pkg_header pkg_hdr_info; + u8 pad3[4]; +}; + +struct sxe2_upgrade_open_cmd { + __le32 dev_type; + __le32 fw_type; + u8 pad1[SXE2_PAD_1_K]; + __le32 pad2[2]; + u64 uuid; + __le32 frag_num; + __le32 frag_len; + __le32 fw_len; + __le32 no_sign_chk : 1; + __le32 no_ver_chk : 1; + __le32 force : 1; + __le32 all : 1; + __le32 backup : 1; + __le32 is_fw_head : 1; + __le32 no_reset : 1; + __le32 forcehcb : 1; + __le32 resetnow : 1; + __le32 forceclose : 1; + __le32 ispacket : 1; + __le32 reserved : 21; +}; + +struct sxe2_frag_head { + __le64 uuid; + __le32 version; + __le32 frag_sid; + __le32 frag_len; + __le32 checksum; + __le32 symbol_enable : 1; + __le32 symbol_more : 1; + __le32 symbol_reserve : 6; + __le32 reserved : 24; + u8 pad[4]; +}; + +struct sxe2_upgrade_flash_cmd { + struct sxe2_frag_head frag_head; + u8 raw_data[SXE2_FRAG_LEN]; +}; + +struct sxe2_upgrade_close_cmd { + __le64 uuid; + __le32 err_code; + __le32 reset_now : 1; + __le32 reserved : 31; +}; + +struct sxe2_upgrade_end_cmd { + __le64 uuid; + __le32 err_code; + __le32 fw_type; +}; + +s32 sxe2_flash_package_from_file(struct net_device *dev, const char *filename, + __le32 install_type); + +s32 sxe2_upgrade_prepare(struct sxe2_adapter *adapter, + struct sxe2_upgrade_fw_array *fw_arr, u64 uuid, + struct sxe2_pkg_header *pkg_hdr); + +s32 sxe2_upgrade_open(struct sxe2_adapter *adapter, u32 frag_num, u32 pack_len, + u32 fw_type, u64 uuid); + +s32 sxe2_upgrade_flash(struct sxe2_adapter *adapter, + struct sxe2_update_flash_param *upgd_flash_obj); + +s32 sxe2_upgrade_close(struct sxe2_adapter *adapter, u64 uuid, u32 err); + +s32 sxe2_upgrade_end(struct sxe2_adapter *adapter, u32 err, u32 fw_type, + u64 uuid); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.c new file mode 100644 index 0000000000000000000000000000000000000000..cf244c9b6145c5b64714648aedf01753c5b845dd --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.c @@ -0,0 +1,3376 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_vsi.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_vsi.h" +#include "sxe2_log.h" +#include "sxe2_dcb.h" +#include "sxe2_common.h" +#include "sxe2_netdev.h" +#include "sxe2_txsched.h" +#include "sxe2_rss.h" +#include "sxe2_sriov.h" +#include "sxe2_eswitch.h" +#include "sxe2_switch.h" +#include "sxe2_xsk.h" +#include "sxe2_linkchg.h" +#include "sxe2_txsched.h" +#include "sxe2_com_vlan.h" +#ifndef NOT_HAVE_MINMAX_H +#include +#endif +#include "sxe2_acl.h" +#include "sxe2_com_ioctl.h" + +#define SXE2_IRQ_NAME_STR_LEN (IFNAMSIZ + 32) + +struct sxe2_vsi *sxe2_vsi_create(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *vsi_create); + +bool sxe2_vsi_id_is_valid(struct sxe2_adapter *adapter, u16 vsi_id) +{ + if ((vsi_id >= + (adapter->vsi_ctxt.max_cnt + adapter->vsi_ctxt.base_idx_in_dev)) || + (vsi_id < adapter->vsi_ctxt.base_idx_in_dev)) { + return false; + } else { + return true; + } +} + +STATIC void sxe2_mac_addr_clear(struct sxe2_vsi *vsi) +{ + struct sxe2_addr_node *node; + struct sxe2_addr_node *tmp; + + list_for_each_entry_safe(node, tmp, &vsi->mac_filter.mac_addr_list, list) + { + sxe2_switch_mac_node_del_and_free(node); + } +} + +u16 sxe2_vsi_get(struct sxe2_vsi_context *vsi_ctxt) +{ + u16 next = SXE2_INVAL_U16; + struct sxe2_vsi **array = vsi_ctxt->vsi; + u16 size = vsi_ctxt->max_cnt; + u16 curr = vsi_ctxt->next_vsi_id; + + for (; curr < size; curr++) { + if (!array[curr]) { + next = curr; + vsi_ctxt->next_vsi_id = curr + 1; + break; + } + } + + if (next != SXE2_INVAL_U16) + vsi_ctxt->cnt++; + + return next; +} + +void sxe2_vsi_put(struct sxe2_vsi_context *vsi_ctxt, u16 vsi_id) +{ + if (vsi_id > vsi_ctxt->max_cnt - 1) { + LOG_ERROR("invali vsi id:%d max cnt:%d\n", vsi_id, + vsi_ctxt->max_cnt); + return; + } + + vsi_ctxt->vsi[vsi_id] = NULL; + + if (vsi_id < vsi_ctxt->next_vsi_id) + vsi_ctxt->next_vsi_id = vsi_id; + + vsi_ctxt->cnt--; +} + +STATIC s32 sxe2_vsi_qs_stats_num_set(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + case SXE2_VSI_T_DPDK_PF: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = (u16)SXE2_VSI_TXRX_Q_MAX_CNT; + break; + + case SXE2_VSI_T_LB: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = 1; + break; + + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = SXE2_VF_RSS_Q_NUM; + break; + + case SXE2_VSI_T_MACVLAN: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = SXE2_DFLT_TXQ_VMDQ_VSI; + break; + + case SXE2_VSI_T_CTRL: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = 1; + break; + case SXE2_VSI_T_ESW: + case SXE2_VSI_T_DPDK_ESW: + vsi->vsi_qs_stats.vsi_qs_stats_maxcnt = SXE2_VF_NUM; + break; + default: + LOG_DEV_WARN("unknown vsi type: %d\n", vsi->type); + ret = -EINVAL; + goto l_end; + } + + LOG_DEBUG_BDF("vsi id:%u type:%u set qs stats num: %d.\n", vsi->id_in_pf, + vsi->type, vsi->vsi_qs_stats.vsi_qs_stats_maxcnt); + +l_end: + return ret; +} + +STATIC struct sxe2_vsi *sxe2_vsi_init(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *vsi_create) +{ + struct sxe2_vsi *vsi = NULL; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (adapter->vsi_ctxt.cnt >= adapter->vsi_ctxt.max_cnt) { + LOG_DEV_ERR("alloc vsi failed: Reach the limit of max_vsi %d.\n", + adapter->vsi_ctxt.max_cnt); + goto l_end; + } + + vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); + if (!vsi) { + LOG_DEV_ERR("alloc vsi struct failed.\n"); + goto l_end; + } + + vsi->idx_in_dev = SXE2_INVAL_U16; + vsi->src_prune.vsi_id_u = SXE2_VSI_ID_INVALID; + vsi->src_prune.vsi_id_k = SXE2_VSI_ID_INVALID; + vsi->type = vsi_create->type; + vsi->vf_node = vsi_create->vf; + vsi->adapter = adapter; + + if ((vsi_create->type == SXE2_VSI_T_VF) || + (vsi_create->type == SXE2_VSI_T_DPDK_VF)) { + vsi->txqs.q_cnt = vsi_create->txq_cnt; + vsi->rxqs.q_cnt = vsi_create->rxq_cnt; + vsi->irqs.cnt = vsi_create->irq_cnt; + vsi->id_in_pf = (u16)(vsi_create->vsi_id - + adapter->vsi_ctxt.base_idx_in_dev); + vsi->txqs.base_idx_in_feature = vsi_create->txq_base_idx; + vsi->rxqs.base_idx_in_feature = vsi_create->rxq_base_idx; + vsi->irqs.base_idx_in_feature = vsi_create->irq_base_idx; + } + + if (sxe2_vsi_qs_stats_num_set(vsi)) + goto l_err; + + set_bit(SXE2_VSI_S_DOWN, vsi->state); + set_bit(SXE2_VSI_S_CLOSE, vsi->state); + + if (vsi_create->type != SXE2_VSI_T_VF && + vsi_create->type != SXE2_VSI_T_DPDK_VF) { + vsi->id_in_pf = sxe2_vsi_get(&adapter->vsi_ctxt); + if (vsi->id_in_pf == SXE2_INVAL_U16) { + LOG_DEV_ERR("No Free Vsis.\n"); + goto l_err; + } + } + + vsi->idx_in_dev = vsi->id_in_pf + adapter->vsi_ctxt.base_idx_in_dev; + + adapter->vsi_ctxt.vsi[vsi->id_in_pf] = vsi; + + INIT_LIST_HEAD(&vsi->mac_filter.mac_addr_list); + mutex_init(&vsi->mac_filter.sync_lock); + + return vsi; + +l_err: + devm_kfree(dev, vsi); + vsi = NULL; + +l_end: + return vsi; +} + +STATIC void sxe2_vsi_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter; + struct device *dev; + u16 idx_in_pf = vsi->id_in_pf; + + sxe2_vsi_fltr_clean(vsi); + + adapter = vsi->adapter; + dev = SXE2_ADAPTER_TO_DEV(adapter); + + sxe2_mac_addr_clear(vsi); + + if (vsi->type != SXE2_VSI_T_VF && vsi->type != SXE2_VSI_T_DPDK_VF) + sxe2_vsi_put(&adapter->vsi_ctxt, vsi->id_in_pf); + + devm_kfree(dev, vsi); + + if (idx_in_pf < adapter->vsi_ctxt.max_cnt) + adapter->vsi_ctxt.vsi[idx_in_pf] = NULL; +} + +STATIC s32 sxe2_vsi_queues_num_set(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + if (vsi->txqs.req_q_cnt) + vsi->txqs.q_cnt = vsi->txqs.req_q_cnt; + else + vsi->txqs.q_cnt = adapter->q_ctxt.txq_layout.lan; + + if (vsi->rxqs.req_q_cnt) + vsi->rxqs.q_cnt = vsi->rxqs.req_q_cnt; + else + vsi->rxqs.q_cnt = adapter->q_ctxt.rxq_layout.lan; + break; + case SXE2_VSI_T_LB: + vsi->txqs.q_cnt = 1; + vsi->rxqs.q_cnt = 1; + break; + + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + LOG_DEBUG_BDF("vsi id:%u type:%u queue cnt %d %d already set.\n", + vsi->id_in_pf, vsi->type, vsi->txqs.q_cnt, + vsi->rxqs.q_cnt); + break; + + case SXE2_VSI_T_MACVLAN: + vsi->txqs.q_cnt = SXE2_DFLT_TXQ_VMDQ_VSI; + vsi->rxqs.q_cnt = SXE2_DFLT_RXQ_VMDQ_VSI; + break; + + case SXE2_VSI_T_CTRL: + vsi->txqs.q_cnt = 1; + vsi->rxqs.q_cnt = 1; + break; + case SXE2_VSI_T_ESW: + vsi->txqs.q_cnt = sxe2_vf_num_get(adapter); + vsi->rxqs.q_cnt = vsi->txqs.q_cnt; + break; + case SXE2_VSI_T_DPDK_PF: + vsi->txqs.q_cnt = adapter->q_ctxt.txq_layout.dpdk; + vsi->rxqs.q_cnt = adapter->q_ctxt.rxq_layout.dpdk; + break; + case SXE2_VSI_T_DPDK_ESW: + vsi->txqs.q_cnt = sxe2_vf_num_get(adapter); + vsi->rxqs.q_cnt = vsi->txqs.q_cnt; + break; + default: + LOG_DEV_WARN("unknown vsi type: %d\n", vsi->type); + ret = -EINVAL; + goto l_end; + } + + vsi->txqs.q_alloc = vsi->txqs.q_cnt; + vsi->rxqs.q_alloc = vsi->rxqs.q_cnt; + + if (vsi->txqs.q_cnt == 0 || vsi->rxqs.q_cnt == 0) { + LOG_DEV_ERR("vsi set queues num failed: txq alloced %d, rxq alloced " + "%d.\n", + vsi->txqs.q_cnt, vsi->rxqs.q_cnt); + ret = -ENOMEM; + } + + LOG_DEBUG_BDF("vsi id:%u type:%u set queues num: txq alloced %d, rxq " + "alloced %d.\n", + vsi->id_in_pf, vsi->type, vsi->txqs.q_cnt, vsi->rxqs.q_cnt); + +l_end: + return ret; +} + +STATIC s32 sxe2_vsi_irqs_num_set(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + u16 max_q_cnt = 0; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + max_q_cnt = (u16)max(vsi->txqs.q_cnt, vsi->rxqs.q_cnt); + vsi->irqs.cnt = (u16)min(adapter->irq_ctxt.irq_layout.lan, + max_q_cnt); + break; + case SXE2_VSI_T_DPDK_PF: + vsi->irqs.cnt = adapter->irq_ctxt.irq_layout.dpdk; + break; + case SXE2_VSI_T_LB: + vsi->irqs.cnt = SXE2_LB_RXQ_MSIX_CNT; + break; + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + LOG_DEBUG_BDF("vsi id:%u type:%u irq cnt %d already set.\n", + vsi->id_in_pf, vsi->type, vsi->irqs.cnt); + break; + case SXE2_VSI_T_MACVLAN: + vsi->irqs.cnt = SXE2_DFLT_VEC_VMDQ_VSI; + break; + case SXE2_VSI_T_CTRL: + vsi->irqs.cnt = SXE2_FNAV_MSIX_CNT; + break; + case SXE2_VSI_T_ESW: + vsi->irqs.cnt = SXE2_ESWITCH_MSIX_CNT; + break; + case SXE2_VSI_T_DPDK_ESW: + vsi->irqs.cnt = SXE2_DPDK_ESWITCH_MSIX_CNT; + break; + default: + LOG_DEV_WARN("unknown vsi type: %d, qcnt: %d\n", vsi->type, + max_q_cnt); + ret = -EINVAL; + goto l_end; + } + + if (vsi->irqs.cnt == 0) { + LOG_DEV_ERR("vsi set irqs num failed: irq cnt %d.\n", vsi->irqs.cnt); + ret = -ENOMEM; + } + +l_end: + return ret; +} + +STATIC void sxe2_vsi_queues_free(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 i; + + if (vsi->af_xdp_zc_qps) { + bitmap_free(vsi->af_xdp_zc_qps); + vsi->af_xdp_zc_qps = NULL; + } + + if (vsi->rxqs.q) { + sxe2_for_each_vsi_alloc_rxq(vsi, i) + { + if (vsi->rxqs.q[i]) { + kfree_rcu(vsi->rxqs.q[i], rcu); + WRITE_ONCE(vsi->rxqs.q[i], NULL); + } + } + devm_kfree(dev, vsi->rxqs.q); + vsi->rxqs.q = NULL; + } + + if (vsi->txqs.q) { + sxe2_for_each_vsi_alloc_txq(vsi, i) + { + if (vsi->txqs.q[i]) { + kfree_rcu(vsi->txqs.q[i], rcu); + WRITE_ONCE(vsi->txqs.q[i], NULL); + } + } + devm_kfree(dev, vsi->txqs.q); + vsi->txqs.q = NULL; + } + + if (vsi->origin_txqs) { + devm_kfree(dev, vsi->origin_txqs); + vsi->origin_txqs = NULL; + } + + if (vsi->xdp_rings.q) { + for (i = 0; i < vsi->num_xdp_txq; i++) { + if (vsi->xdp_rings.q[i]) { + kfree_rcu(vsi->xdp_rings.q[i], rcu); + WRITE_ONCE(vsi->xdp_rings.q[i], NULL); + } + } + + devm_kfree(dev, vsi->xdp_rings.q); + vsi->xdp_rings.q = NULL; + } +} + +STATIC void sxe2_vsi_irqs_data_free(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 i; + + if (vsi->irqs.irq_data) { + sxe2_for_each_vsi_irq(vsi, i) + { + if (vsi->irqs.irq_data[i]) { + devm_kfree(dev, vsi->irqs.irq_data[i]); + vsi->irqs.irq_data[i] = NULL; + } + } + devm_kfree(dev, vsi->irqs.irq_data); + vsi->irqs.irq_data = NULL; + } +} + +STATIC s32 sxe2_vsi_queues_alloc(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_queue *q; + u16 i; + + vsi->txqs.q = devm_kcalloc(dev, vsi->txqs.q_alloc, sizeof(*vsi->txqs.q), + GFP_KERNEL); + if (!vsi->txqs.q) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc txqs failed, count: %d, size: %zu.\n", + vsi->txqs.q_alloc, sizeof(*vsi->txqs.q)); + goto l_failed; + } + + vsi->rxqs.q = devm_kcalloc(dev, vsi->rxqs.q_alloc, sizeof(*vsi->rxqs.q), + GFP_KERNEL); + if (!vsi->rxqs.q) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc rxqs failed, count: %d, size: %zu.\n", + vsi->rxqs.q_alloc, sizeof(*vsi->rxqs.q)); + goto l_failed; + } + + sxe2_for_each_vsi_alloc_txq(vsi, i) + { + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + ret = -ENOMEM; + goto l_failed; + } + q->vsi = vsi; + q->idx_in_vsi = i; + q->idx_in_pf = SXE2_Q_IDX_INVAL; + q->dev = dev; + q->depth = vsi->txqs.depth; + u64_stats_init(&q->syncp); + WRITE_ONCE(vsi->txqs.q[i], q); + } + + sxe2_for_each_vsi_alloc_rxq(vsi, i) + { + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + ret = -ENOMEM; + goto l_failed; + } + q->vsi = vsi; + q->idx_in_vsi = i; + q->idx_in_pf = SXE2_Q_IDX_INVAL; + q->dev = dev; + q->depth = vsi->rxqs.depth; + u64_stats_init(&q->syncp); + WRITE_ONCE(vsi->rxqs.q[i], q); + } + vsi->af_xdp_zc_qps = bitmap_zalloc( + (unsigned int)max(vsi->rxqs.q_cnt, vsi->txqs.q_cnt), + GFP_KERNEL); + if (!vsi->af_xdp_zc_qps) { + ret = -ENOMEM; + goto l_failed; + } + + return ret; + +l_failed: + sxe2_vsi_queues_free(vsi); + return ret; +} + +STATIC s32 sxe2_vsi_irq_data_alloc(struct sxe2_vsi *vsi, u16 idx) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_irq_data *irq_data; + + irq_data = devm_kzalloc(dev, sizeof(*irq_data), GFP_KERNEL); + if (!irq_data) { + LOG_DEV_ERR("irq_data alloc failed.\n"); + ret = -ENOMEM; + goto l_end; + } + + irq_data->vsi = vsi; + irq_data->idx_in_vsi = idx; + irq_data->idx_in_pf = SXE2_IRQ_IDX_INVAL; + + vsi->irqs.irq_data[idx] = irq_data; + +l_end: + return ret; +} + +STATIC s32 sxe2_vsi_irqs_data_alloc(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 i; + + vsi->irqs.irq_data = devm_kcalloc(dev, vsi->irqs.cnt, + sizeof(*vsi->irqs.irq_data), GFP_KERNEL); + if (!vsi->irqs.irq_data) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc irq_data failed, count: %d, size: %zu.\n", + vsi->irqs.cnt, sizeof(*vsi->irqs.irq_data)); + goto l_end; + } + + sxe2_for_each_vsi_irq(vsi, i) + { + ret = sxe2_vsi_irq_data_alloc(vsi, i); + if (ret) + goto l_failed; + } + + return ret; + +l_failed: + sxe2_vsi_irqs_data_free(vsi); +l_end: + return ret; +} + +STATIC void sxe2_vsi_rxqs_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + struct sxe2_fwc_vsi_q_info *q_info = &info->props.rxq_info; + struct sxe2_vsi_queues *qs = &vsi->rxqs; + + q_info->cnt = cpu_to_le16(qs->q_cnt); + q_info->base_idx = cpu_to_le16(qs->q[0]->idx_in_pf); + + info->props.rxq_valid = 1; +} + +STATIC void sxe2_vsi_txqs_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + struct sxe2_fwc_vsi_q_info *q_info = &info->props.txq_info; + struct sxe2_vsi_queues *qs = &vsi->txqs; + + q_info->cnt = cpu_to_le16(qs->q_cnt); + q_info->base_idx = cpu_to_le16(qs->q[0]->idx_in_pf); +} + +STATIC void sxe2_vsi_base_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + info->vsi_id = cpu_to_le16(vsi->idx_in_dev); + + switch (vsi->type) { + case SXE2_VSI_T_PF: + case SXE2_VSI_T_LB: + case SXE2_VSI_T_CTRL: + case SXE2_VSI_T_DPDK_PF: + info->type = SXE2_VSI_HW_T_PF; + break; + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_VF: + info->type = SXE2_VSI_HW_T_VF; + info->vf_id = cpu_to_le16(vsi->vf_node->vf_idx); + break; + case SXE2_VSI_T_MACVLAN: + case SXE2_VSI_T_ESW: + case SXE2_VSI_T_DPDK_ESW: + info->type = SXE2_VSI_HW_T_VMDQ2; + break; + default: + break; + } +} + +static void sxe2_vsi_tc_rxqs_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + u32 i; + + sxe2_for_each_tc(i) + { + if (vsi->type == SXE2_VSI_T_VF) { + info->props.rxq_info.tc_q_map[i].offset = + vsi->tc.info[0].rxq_offset; + info->props.rxq_info.tc_q_map[i].pow = + (u16)order_base_2(vsi->tc.info[0].rxq_cnt); + } else { + info->props.rxq_info.tc_q_map[i].offset = + vsi->tc.info[i].rxq_offset; + info->props.rxq_info.tc_q_map[i].pow = + (u16)order_base_2(vsi->tc.info[i].rxq_cnt); + } + } +} + +static void sxe2_vsi_fnav_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + if (vsi->type != SXE2_VSI_T_PF && vsi->type != SXE2_VSI_T_VF && + vsi->type != SXE2_VSI_T_CTRL && vsi->type != SXE2_VSI_T_DPDK_PF && + vsi->type != SXE2_VSI_T_DPDK_VF) { + return; + } + info->props.fnav_info.gsize = vsi->fnav.space_gsize; + info->props.fnav_info.bsize = vsi->fnav.space_bsize; + info->props.fnav_info.fnav_enable = 1; + info->props.fnav_info.auto_evict = 0; + info->props.fnav_info.prog_enable = 1; +} + +STATIC void sxe2_vsi_info_trans(struct sxe2_vsi *vsi, + struct sxe2_fwc_vsi_crud_info *info) +{ + sxe2_vsi_base_info_trans(vsi, info); + + sxe2_vsi_rxqs_info_trans(vsi, info); + + sxe2_vsi_txqs_info_trans(vsi, info); + + sxe2_vsi_tc_rxqs_info_trans(vsi, info); + + sxe2_vsi_fnav_info_trans(vsi, info); +} + +STATIC s32 sxe2_fwc_vsi_cfg(struct sxe2_adapter *adapter, + struct sxe2_fwc_vsi_crud_info *info) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_CFG, info, sizeof(*info), NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("add vsi failed, ret=%d\n", ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_vsi_hw_decfg(struct sxe2_adapter *adapter, u16 vsi_id) +{ + s32 ret; + struct sxe2_cmd_params cmd = {}; + struct sxe2_fwc_vsi_crud_info info = {}; + + info.vsi_id = cpu_to_le16(vsi_id); + info.is_clear = true; + + sxe2_cmd_params_dflt_fill(&cmd, SXE2_CMD_VSI_CFG, &info, sizeof(info), NULL, + 0); + + ret = sxe2_cmd_fw_exec(adapter, &cmd); + if (ret) { + LOG_DEV_ERR("del vsi(%d in device) failed, ret=%d\n", vsi_id, ret); + ret = -EIO; + } + + return ret; +} + +STATIC s32 sxe2_vsi_hw_cfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_fwc_vsi_crud_info info = {}; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_vsi_info_trans(vsi, &info); + + ret = sxe2_fwc_vsi_cfg(adapter, &info); + if (ret) + goto l_end; + +l_end: + return ret; +} + +void sxe2_queue_add(struct sxe2_queue *queue, struct sxe2_list *head) +{ + struct sxe2_adapter *adapter = queue->vsi->adapter; + + if (queue->next) { + LOG_WARN_BDF("queue[%d][%d] next pointer is not NULL", + queue->idx_in_vsi, queue->idx_in_pf); + } + + queue->next = head->next; + head->next = queue; + head->cnt++; +} + +STATIC void sxe2_map_txq_to_irq(struct sxe2_vsi *vsi, u16 cnt, u16 q_idx, + u16 irq_idx) +{ + struct sxe2_queue *queue; + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[irq_idx]; + struct sxe2_adapter *adapter = vsi->adapter; + + irq_data->tx.list.cnt = 0; + irq_data->tx.list.next = NULL; + + while (cnt) { + queue = vsi->txqs.q[q_idx]; + queue->irq_data = irq_data; + sxe2_queue_add(queue, &irq_data->tx.list); + cnt--; + q_idx++; + LOG_INFO_BDF("map txq=%d to irq=%d\n", q_idx, irq_idx); + } +} + +STATIC void sxe2_map_rxq_to_irq(struct sxe2_vsi *vsi, u16 cnt, u16 q_idx, + u16 irq_idx) +{ + struct sxe2_queue *queue; + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[irq_idx]; + + irq_data->rx.list.cnt = 0; + irq_data->rx.list.next = NULL; + + while (cnt) { + queue = vsi->rxqs.q[q_idx]; + queue->irq_data = irq_data; + sxe2_queue_add(queue, &irq_data->rx.list); + cnt--; + q_idx++; + } +} + +void sxe2_vsi_queues_irqs_map(struct sxe2_vsi *vsi) +{ + u16 irq_cnt = vsi->irqs.cnt; + u16 txq_remain = vsi->txqs.q_cnt; + u16 rxq_remain = vsi->rxqs.q_cnt; + u16 i; + u16 txq_cnt, rxq_cnt, txq_idx = 0, rxq_idx = 0; + + sxe2_for_each_vsi_irq(vsi, i) + { + txq_cnt = (u16)DIV_ROUND_UP(txq_remain, irq_cnt - i); + rxq_cnt = (u16)DIV_ROUND_UP(rxq_remain, irq_cnt - i); + + sxe2_map_txq_to_irq(vsi, txq_cnt, txq_idx, i); + sxe2_map_rxq_to_irq(vsi, rxq_cnt, rxq_idx, i); + + txq_idx += txq_cnt; + rxq_idx += rxq_cnt; + txq_remain -= txq_cnt; + rxq_remain -= rxq_cnt; + } +} + +void sxe2_vsi_queues_irqs_unmap(struct sxe2_vsi *vsi) +{ + struct sxe2_irq_data *irq_data; + struct sxe2_queue *queue; + u16 i; + + if (vsi->irqs.cnt == 0 || !vsi->irqs.irq_data) + return; + + sxe2_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + + sxe2_for_each_queue(queue, irq_data->tx.list) + { + queue->irq_data = NULL; + queue->next = NULL; + irq_data->tx.list.cnt--; + } + irq_data->tx.list.next = NULL; + irq_data->tx.list.cnt = 0; + + sxe2_for_each_queue(queue, irq_data->rx.list) + { + queue->irq_data = NULL; + queue->next = NULL; + irq_data->rx.list.cnt--; + } + irq_data->rx.list.next = NULL; + irq_data->rx.list.cnt = 0; + } +} + +STATIC void sxe2_vsi_queues_cfg(struct sxe2_vsi *vsi) +{ + if (!vsi->txqs.depth) + vsi->txqs.depth = SXE2_DFLT_NUM_TX_DESC; + + if (!vsi->rxqs.depth) + vsi->rxqs.depth = SXE2_DFLT_NUM_RX_DESC; +} + +STATIC void sxe2_vsi_dcb_tc_cfg(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_dcbx_cfg *dcbxcfg = &adapter->dcb_ctxt.local_dcbx_cfg; + + if (!test_bit(SXE2_FLAG_DCB_ENABLE, adapter->flags)) { + vsi->tc.tc_cnt = 1; + vsi->tc.tc_map = SXE2_VSI_DFLT_TC; + } + + switch (vsi->type) { + case SXE2_VSI_T_PF: + vsi->tc.tc_cnt = sxe2_dcb_tc_cnt_get(dcbxcfg); + vsi->tc.tc_map = sxe2_dcb_tc_bitmap_get(dcbxcfg); + break; + + case SXE2_VSI_T_CTRL: + case SXE2_VSI_T_LB: + default: + vsi->tc.tc_cnt = 1; + vsi->tc.tc_map = SXE2_VSI_DFLT_TC; + break; + } +} + +void sxe2_vsi_tc_cfg(struct sxe2_vsi *vsi) +{ + u16 i, txqs_per_tc, rxqs_per_tc; + u16 rx_offset = 0, tx_offset = 0, txq_cnt = 0, rxq_cnt = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + sxe2_vsi_dcb_tc_cfg(vsi); + + LOG_INFO_BDF("vsi tc cfg, tc_cnt=%d, tc_map=%x\n", vsi->tc.tc_cnt, + vsi->tc.tc_map); + + txqs_per_tc = vsi->txqs.q_alloc / vsi->tc.tc_cnt; + rxqs_per_tc = vsi->rxqs.q_alloc / vsi->tc.tc_cnt; + + if (!txqs_per_tc) + txqs_per_tc = 1; + if (!rxqs_per_tc) + rxqs_per_tc = 1; + + if (vsi->tc.tc_cnt > 1) { + txqs_per_tc = rounddown_pow_of_two(txqs_per_tc); + rxqs_per_tc = rounddown_pow_of_two(rxqs_per_tc); + } + + sxe2_for_each_tc(i) + { + if (!(vsi->tc.tc_map & BIT(i))) { + vsi->tc.info[i].txq_offset = 0; + vsi->tc.info[i].rxq_offset = 0; + vsi->tc.info[i].rxq_cnt = 1; + vsi->tc.info[i].txq_cnt = 1; + LOG_INFO_BDF("vsi tc[%d] q cfg, txq_cnt=%d, txq_offset=%d\n", + i, vsi->tc.info[i].txq_cnt, + vsi->tc.info[i].txq_offset); + continue; + } + + vsi->tc.info[i].txq_offset = tx_offset; + vsi->tc.info[i].rxq_offset = rx_offset; + vsi->tc.info[i].txq_cnt = txqs_per_tc; + vsi->tc.info[i].rxq_cnt = rxqs_per_tc; + + rx_offset += rxqs_per_tc; + tx_offset += txqs_per_tc; + txq_cnt += txqs_per_tc; + rxq_cnt += rxqs_per_tc; + LOG_INFO_BDF("vsi tc[%d] q cfg, txq_cnt=%d, txq_offset=%d " + "rxq_offset=%d\n", + i, vsi->tc.info[i].txq_cnt, vsi->tc.info[i].txq_offset, + vsi->tc.info[i].rxq_offset); + } + + if (rx_offset) + rxq_cnt = rx_offset; + else + rxq_cnt = rxqs_per_tc; + + if (tx_offset) + txq_cnt = tx_offset; + else + txq_cnt = txqs_per_tc; + + if (rxq_cnt > vsi->rxqs.q_alloc) { + LOG_DEV_ERR("Trying to use more Rx queues (%u), than were allocated " + "(%u)!\n", + rxq_cnt, vsi->rxqs.q_alloc); + return; + } + + if (txq_cnt > vsi->txqs.q_alloc) { + LOG_DEV_ERR("Trying to use more Tx queues (%u), than were allocated " + "(%u)!\n", + txq_cnt, vsi->txqs.q_alloc); + return; + } + + vsi->txqs.q_cnt = txq_cnt; + vsi->rxqs.q_cnt = rxq_cnt; + + LOG_INFO_BDF("vsi txq_cnt=%d, rxq_cnt=%d\n", txq_cnt, rxq_cnt); +} + +STATIC void sxe2_vsi_irq_cfg(struct sxe2_vsi *vsi, u16 idx) +{ + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[idx]; + + irq_data->tx.itr_idx = SXE2_TX_ITR_IDX; + irq_data->tx.itr_setting = SXE2_TX_DFLT_ITR; + irq_data->rx.itr_idx = SXE2_RX_ITR_IDX; + irq_data->rx.itr_setting = SXE2_RX_DFLT_ITR; + if (vsi->type == SXE2_VSI_T_CTRL) { + irq_data->tx.itr_mode = SXE2_ITR_STATIC; + irq_data->rx.itr_mode = SXE2_ITR_STATIC; + } else { + irq_data->tx.itr_mode = SXE2_ITR_DYNAMIC; + irq_data->rx.itr_mode = SXE2_ITR_DYNAMIC; + } + + switch (vsi->type) { + case SXE2_VSI_T_PF: + case SXE2_VSI_T_MACVLAN: + vsi->irqs.proc = sxe2_msix_ring_irq_handler; + break; + case SXE2_VSI_T_CTRL: + vsi->irqs.proc = sxe2_msix_ctrl_vsi_handler; + break; + case SXE2_VSI_T_ESW: + vsi->irqs.proc = sxe2_eswitch_msix_ring_irq_handler; + break; + case SXE2_VSI_T_LB: + vsi->irqs.proc = sxe2_msix_lb_rx_irq_handler; + break; + default: + break; + } + + if (cpu_online(idx)) + cpumask_set_cpu(idx, &irq_data->affinity_mask); +} + +STATIC void sxe2_vsi_irqs_cfg(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_vsi_queues_irqs_map(vsi); + + sxe2_for_each_vsi_irq(vsi, i) + { + sxe2_vsi_irq_cfg(vsi, i); + } + + if (vsi->netdev) + sxe2_napi_add(vsi); +} + +STATIC s32 sxe2_vsi_irqs_get(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + u16 i; + s32 offset; + + if (vsi->type == SXE2_VSI_T_VF || + vsi->type == SXE2_VSI_T_DPDK_VF) { + vsi->irqs.base_idx_in_pf = vsi->vf_node->irq_base_idx + + vsi->irqs.base_idx_in_feature; + } else { + offset = sxe2_irq_offset_get(adapter, vsi->irqs.cnt, vsi->type); + if (offset < 0) { + ret = offset; + LOG_DEV_ERR("vsi get %d irqs failed.\n", vsi->irqs.cnt); + goto l_end; + } + + vsi->irqs.base_idx_in_pf = (u16)offset; + } + + sxe2_for_each_vsi_irq(vsi, i) vsi->irqs.irq_data[i]->idx_in_pf = + vsi->irqs.base_idx_in_pf + i; + +l_end: + return ret; +} + +STATIC void sxe2_vsi_irqs_put(struct sxe2_vsi *vsi) +{ + u16 i; + struct sxe2_adapter *adapter = vsi->adapter; + unsigned long *map = adapter->irq_ctxt.map; + unsigned long size = adapter->irq_ctxt.avail_cnt; + struct sxe2_irq_layout *irq_layout = &adapter->irq_ctxt.irq_layout; + + sxe2_for_each_vsi_irq(vsi, i) + { + vsi->irqs.irq_data[i]->idx_in_pf = SXE2_IRQ_IDX_INVAL; + } + + if (vsi->type == SXE2_VSI_T_MACVLAN) { + if (vsi->irqs.base_idx_in_pf + vsi->irqs.cnt > size) { + LOG_ERROR_BDF("put %d irq from index %d failed, size=%lu.\n", + vsi->irqs.cnt, vsi->irqs.base_idx_in_pf, size); + SXE2_BUG(); + return; + } + mutex_lock(&adapter->irq_ctxt.lock); + bitmap_clear(map, vsi->irqs.base_idx_in_pf, vsi->irqs.cnt); + irq_layout->macvlan--; + mutex_unlock(&adapter->irq_ctxt.lock); + } + + vsi->irqs.base_idx_in_pf = SXE2_IRQ_IDX_INVAL; +} + +STATIC s32 sxe2_vsi_queues_init(struct sxe2_vsi *vsi) +{ + s32 ret; + + ret = sxe2_vsi_queues_num_set(vsi); + if (ret) + goto l_end; + + sxe2_vsi_queues_cfg(vsi); + + sxe2_vsi_tc_cfg(vsi); + + ret = sxe2_vsi_queues_alloc(vsi); + if (ret) + goto l_end; + + ret = sxe2_vsi_txrx_queues_get(vsi); + if (ret) + goto l_failed; + + sxe2_vsi_netdev_tc_cfg(vsi, vsi->tc.tc_map); + + return ret; + +l_failed: + sxe2_vsi_queues_free(vsi); +l_end: + return ret; +} + +STATIC void sxe2_vsi_queues_deinit(struct sxe2_vsi *vsi) +{ + if (!vsi->rxqs.q && !vsi->txqs.q) + return; + + sxe2_vsi_txrx_queues_put(vsi); + sxe2_vsi_queues_free(vsi); +} + +STATIC s32 sxe2_vsi_irqs_init(struct sxe2_vsi *vsi) +{ + s32 ret; + + ret = sxe2_vsi_irqs_num_set(vsi); + if (ret) + goto l_end; + + ret = sxe2_vsi_irqs_data_alloc(vsi); + if (ret) + goto l_end; + + ret = sxe2_vsi_irqs_get(vsi); + if (ret) + goto l_failed; + + if (vsi->type != SXE2_VSI_T_VF && vsi->type != SXE2_VSI_T_DPDK_VF && + vsi->type != SXE2_VSI_T_DPDK_PF) { + sxe2_vsi_irqs_cfg(vsi); + } + + return ret; + +l_failed: + sxe2_vsi_irqs_data_free(vsi); + vsi->irqs.cnt = 0; +l_end: + return ret; +} + +STATIC void sxe2_vsi_irqs_deinit(struct sxe2_vsi *vsi) +{ + if (vsi->irqs.cnt == 0 || !vsi->irqs.irq_data) + return; + + sxe2_vsi_queues_irqs_unmap(vsi); + + sxe2_napi_del(vsi); + + sxe2_vsi_irqs_put(vsi); + sxe2_vsi_irqs_data_free(vsi); +} + +bool sxe2_vsi_rxft_support_get(struct sxe2_vsi *vsi) +{ + switch (vsi->type) { + case SXE2_VSI_T_PF: + case SXE2_VSI_T_VF: + case SXE2_VSI_T_DPDK_PF: + case SXE2_VSI_T_DPDK_VF: + return true; + default: + break; + } + + return false; +} + +STATIC s32 sxe2_vsi_rss_init(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + bool new_lut = false; + bool new_hkey = false; + + if (!sxe2_vsi_rxft_support_get(vsi)) { + LOG_INFO_BDF("unsupport vsi type: %u\n", vsi->type); + return 0; + } + + sxe2_rss_ctxt_init(vsi); + + if (vsi->type != SXE2_VSI_T_PF) + return 0; + + ret = sxe2_fwc_rss_hash_ctrl_set(vsi); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_rss_hash_ctrl_set failed, ret: %d\n", ret); + return ret; + } + + if (!vsi->rss_ctxt.lut) { + vsi->rss_ctxt.lut = devm_kzalloc(dev, vsi->rss_ctxt.lut_size, + GFP_KERNEL); + if (!vsi->rss_ctxt.lut) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_end; + } + new_lut = true; + sxe2_rss_fill_lut(vsi->rss_ctxt.lut, vsi->rss_ctxt.lut_size, + vsi->rss_ctxt.queue_size); + } + ret = sxe2_fwc_rss_lut_set(vsi, vsi->rss_ctxt.lut, vsi->rss_ctxt.lut_size); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_rss_lut_set failed, ret: %d, lut: %p, lut_size: " + "%u\n", + ret, vsi->rss_ctxt.lut, vsi->rss_ctxt.lut_size); + goto l_lut_free; + } + + if (!vsi->rss_ctxt.hkey) { + vsi->rss_ctxt.hkey = devm_kzalloc(dev, SXE2_RSS_HASH_KEY_SIZE, + GFP_KERNEL); + if (!vsi->rss_ctxt.hkey) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_lut_free; + } + new_hkey = true; + netdev_rss_key_fill((void *)vsi->rss_ctxt.hkey, + SXE2_RSS_HASH_KEY_SIZE); + } + ret = sxe2_fwc_rss_hkey_set(vsi, vsi->rss_ctxt.hkey); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_fwc_rss_hkey_set failed, ret: %d, key: %p\n", + ret, vsi->rss_ctxt.hkey); + goto l_key_free; + } + + (void)sxe2_rss_default_flow_set(vsi); + + goto l_end; + +l_key_free: + if (new_hkey) { + devm_kfree(dev, vsi->rss_ctxt.hkey); + vsi->rss_ctxt.hkey = NULL; + } +l_lut_free: + if (new_lut) { + devm_kfree(dev, vsi->rss_ctxt.lut); + vsi->rss_ctxt.lut = NULL; + } +l_end: + return ret; +} + +STATIC void sxe2_vsi_rss_deinit(struct sxe2_vsi *vsi) +{ + u8 *lut = NULL; + u8 *hash_key = NULL; + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + + if (!sxe2_vsi_rxft_support_get(vsi)) { + LOG_INFO_BDF("unsupport vsi type: %u\n", vsi->type); + return; + } + + if (vsi->rss_ctxt.lut) { + devm_kfree(dev, vsi->rss_ctxt.lut); + vsi->rss_ctxt.lut = NULL; + } + + if (vsi->rss_ctxt.hkey) { + devm_kfree(dev, vsi->rss_ctxt.hkey); + vsi->rss_ctxt.hkey = NULL; + } + + sxe2_rss_vsi_flow_clean(vsi); + + lut = kzalloc(vsi->rss_ctxt.lut_size, GFP_KERNEL); + if (!lut) { + LOG_ERROR_BDF("no memory for lut!\n"); + goto hkey_clean; + } + ret = sxe2_fwc_rss_lut_set(vsi, lut, vsi->rss_ctxt.lut_size); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_rss_lut_set failed, ret: %d, lut: %p, lut_size: " + "%u\n", + ret, lut, vsi->rss_ctxt.lut_size); + } + kfree(lut); + +hkey_clean: + hash_key = kzalloc(SXE2_RSS_HASH_KEY_SIZE, GFP_KERNEL); + if (!hash_key) { + ret = -ENOMEM; + LOG_ERROR_BDF("no memory for hkey!\n"); + goto l_end; + } + ret = sxe2_fwc_rss_hkey_set(vsi, hash_key); + if (ret != 0) { + LOG_ERROR_BDF("sxe2_fwc_rss_hkey_set failed, ret: %d, key: %p\n", + ret, hash_key); + } + kfree(hash_key); + +l_end: + LOG_INFO_BDF("sxe2 vsi rss deinit done, id=%u type=%u!\n", vsi->id_in_pf, + vsi->type); +} + +STATIC void sxe2_vsi_qs_stats_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_qs_stats *vsi_qs_stat = &vsi->vsi_qs_stats; + + kfree(vsi_qs_stat->txqs_stats); + vsi_qs_stat->txqs_stats = NULL; + + kfree(vsi_qs_stat->rxqs_stats); + vsi_qs_stat->rxqs_stats = NULL; +} + +STATIC s32 sxe2_vsi_qs_stats_init(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_qs_stats *vsi_qs_stats; + struct sxe2_adapter *adapter = vsi->adapter; + u16 i; + + vsi_qs_stats = &vsi->vsi_qs_stats; + + if (!vsi_qs_stats->txqs_stats) { + vsi_qs_stats->txqs_stats = kcalloc(vsi_qs_stats->vsi_qs_stats_maxcnt, + sizeof(*vsi_qs_stats->txqs_stats), + GFP_KERNEL); + if (!vsi_qs_stats->txqs_stats) { + LOG_ERROR_BDF("alloc txqs stats failed, count: %d, size: " + "%zu.\n", + vsi_qs_stats->vsi_qs_stats_maxcnt, + sizeof(*vsi_qs_stats->txqs_stats)); + goto err_out; + } + + sxe2_for_each_vsi_q_maxcnt(vsi, i) + { + u64_stats_init(&vsi_qs_stats->txqs_stats[i].syncp); + } + } + + if (!vsi_qs_stats->rxqs_stats) { + vsi_qs_stats->rxqs_stats = kcalloc(vsi_qs_stats->vsi_qs_stats_maxcnt, + sizeof(*vsi_qs_stats->rxqs_stats), + GFP_KERNEL); + if (!vsi_qs_stats->rxqs_stats) { + LOG_ERROR_BDF("alloc rxqs stats failed, count: %d, size: " + "%zu.\n", + vsi_qs_stats->vsi_qs_stats_maxcnt, + sizeof(*vsi_qs_stats->rxqs_stats)); + goto err_out; + } + + sxe2_for_each_vsi_q_maxcnt(vsi, i) + { + u64_stats_init(&vsi_qs_stats->rxqs_stats[i].syncp); + } + } + + sxe2_for_each_vsi_alloc_txq(vsi, i) + { + struct sxe2_queue *txq = vsi->txqs.q[i]; + + txq->stats = &vsi_qs_stats->txqs_stats[i]; + } + + sxe2_for_each_vsi_alloc_rxq(vsi, i) + { + struct sxe2_queue *rxq = vsi->rxqs.q[i]; + + rxq->stats = &vsi_qs_stats->rxqs_stats[i]; + } + + return 0; + +err_out: + sxe2_vsi_qs_stats_deinit(vsi); + return -ENOMEM; +} + +STATIC void sxe2_vsi_fnav_init(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + u16 g_val = adapter->caps_ctxt.fnav_space_gsize; + u16 b_val = adapter->caps_ctxt.fnav_space_bsize; + + if (!sxe2_vsi_rxft_support_get(vsi)) { + LOG_INFO_BDF("unsupport vsi type: %u\n", vsi->type); + return; + } + + memset(&vsi->fnav, 0, sizeof(vsi->fnav)); + + if (vsi->type == SXE2_VSI_T_PF || vsi->type == SXE2_VSI_T_DPDK_PF) { + vsi->fnav.space_gsize = g_val; + vsi->fnav.space_bsize = b_val; + } else if (vsi->type == SXE2_VSI_T_VF || vsi->type == SXE2_VSI_T_DPDK_VF) { + vsi->fnav.space_gsize = 0; + vsi->fnav.space_bsize = b_val; + } + + mutex_init(&vsi->fnav.flow_cfg_lock); + INIT_LIST_HEAD(&vsi->fnav.filter_list); + INIT_LIST_HEAD(&vsi->fnav.flow_cfg_list); +} + +STATIC void sxe2_vsi_fnav_deinit(struct sxe2_vsi *vsi) +{ + struct sxe2_vsi_fnav *vsi_fnav = &vsi->fnav; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!sxe2_vsi_rxft_support_get(vsi)) + return; + + sxe2_fnav_clean_by_vsi(vsi, true); + + sxe2_fnav_stats_free_by_vsi(vsi); + sxe2_fnav_filter_free_by_vsi(vsi); + sxe2_fnav_flow_cfg_free(vsi); + + mutex_destroy(&vsi_fnav->flow_cfg_lock); + + LOG_INFO_BDF("sxe2 fnav deinit done, id=%u type=%u!\n", vsi->id_in_pf, + vsi->type); +} + +static struct sxe2_vsi * +sxe2_vsi_create_unlock(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *vsi_create) +{ + struct sxe2_vsi *vsi; + + vsi = sxe2_vsi_init(adapter, vsi_create); + if (!vsi) + goto l_end; + + if (sxe2_vsi_queues_init(vsi) != 0) + goto l_queue_init_failed; + + if (sxe2_vsi_qs_stats_init(vsi)) + goto l_queues_stats_init_failed; + + if (sxe2_vsi_irqs_init(vsi) != 0) + goto l_irq_init_failed; + + sxe2_vsi_fnav_init(vsi); + + sxe2_vsi_acl_init(vsi); + sxe2_udptunnel_vsi_init(vsi); + + if (sxe2_vsi_hw_cfg(vsi) != 0) + goto l_vsi_setup_failed; + + if (sxe2_txsched_lan_vsi_cfg(vsi) != 0) + goto l_vsi_sched_failed; + + if (vsi_create->type != SXE2_VSI_T_LB) { + if (sxe2_vsi_rss_init(vsi) != 0) { + LOG_DEBUG_BDF("sxe2_rss_init failed!"); + goto l_vsi_update_config; + } + } + + (void)mutex_lock(&adapter->switch_ctxt.evb_mode_lock); + if ((adapter->switch_ctxt.evb_mode == BRIDGE_MODE_VEB || + vsi_create->type == SXE2_VSI_T_LB) && + sxe2_vsi_loopback_control(adapter, vsi->idx_in_dev, true)) { + (void)mutex_unlock(&adapter->switch_ctxt.evb_mode_lock); + goto l_vsi_rss_deinit; + } + (void)mutex_unlock(&adapter->switch_ctxt.evb_mode_lock); + + if (!sxe2_eswitch_is_offload(adapter) && + (vsi_create->type == SXE2_VSI_T_PF || + vsi_create->type == SXE2_VSI_T_VF || + vsi_create->type == SXE2_VSI_T_DPDK_PF || + vsi_create->type == SXE2_VSI_T_DPDK_VF)) { + if (!sxe2_is_safe_mode(adapter)) { + if (sxe2_etype_fltr_init(vsi)) { + LOG_ERROR("etype filter config failed.\n"); + goto l_vsi_rss_deinit; + } + } + + if (sxe2_src_vsi_prune_control(adapter, vsi->idx_in_dev, true)) + goto l_vsi_rss_deinit; + + if (sxe2_srcvsi_rule_add(vsi)) { + LOG_ERROR("vsi[%u][%u] srcvsi rule add failed.\n", + vsi->id_in_pf, vsi->idx_in_dev); + goto l_vsi_rss_deinit; + } + + if (vsi_create->type == SXE2_VSI_T_VF || + vsi_create->type == SXE2_VSI_T_DPDK_VF) { + if (sxe2_vsi_spoofchk_control( + adapter, vsi->idx_in_dev, + vsi_create->vf->prop.spoofchk)) { + LOG_ERROR("vsi[%u][%u] spoofchk set %s failed.\n", + vsi->id_in_pf, vsi->idx_in_dev, + vsi_create->vf->prop.spoofchk ? "on" + : "off"); + goto l_vsi_rss_deinit; + } + + if (sxe2_mac_spoofchk_rule_add(adapter, vsi->idx_in_dev)) { + LOG_ERROR("vsi[%u][%u] spoofchk rule add failed.\n", + vsi->id_in_pf, vsi->idx_in_dev); + goto l_vsi_rss_deinit; + } + } + } + + return vsi; + +l_vsi_rss_deinit: + sxe2_vsi_rss_deinit(vsi); +l_vsi_update_config: + (void)sxe2_txsched_lan_vsi_rm(vsi); +l_vsi_sched_failed: + sxe2_vsi_fltr_remove(adapter, vsi->idx_in_dev); + (void)sxe2_vsi_hw_decfg(adapter, vsi->idx_in_dev); +l_vsi_setup_failed: + sxe2_vsi_irqs_deinit(vsi); +l_irq_init_failed: + sxe2_vsi_qs_stats_deinit(vsi); +l_queues_stats_init_failed: + sxe2_vsi_queues_deinit(vsi); +l_queue_init_failed: + sxe2_vsi_deinit(vsi); + +l_end: + return NULL; +} + +struct sxe2_vsi *sxe2_vsi_create(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *vsi_create) +{ + struct sxe2_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_create_unlock(adapter, vsi_create); + mutex_unlock(&adapter->vsi_ctxt.lock); + + return vsi; +} + +STATIC void sxe2_vsi_irqs_coalesce_deinit(struct sxe2_vsi *vsi) +{ + kfree(vsi->irqs.coalesce); + vsi->irqs.coalesce = NULL; +} + +void sxe2_vsi_destroy_unlock(struct sxe2_vsi *vsi) +{ + if (vsi->type == SXE2_VSI_T_ESW) + (void)sxe2_esw_vsi_disable_unlock(vsi); + else + (void)sxe2_vsi_disable_unlock(vsi); + + (void)sxe2_txsched_lan_vsi_rm(vsi); + + sxe2_vsi_fltr_remove(vsi->adapter, vsi->idx_in_dev); + + sxe2_vsi_fnav_deinit(vsi); + + sxe2_vsi_rss_deinit(vsi); + + sxe2_vsi_acl_deinit(vsi); + + (void)sxe2_user_vlan_destroy(vsi); + + sxe2_udptunnel_vsi_deinit(vsi); + + (void)sxe2_vsi_hw_decfg(vsi->adapter, vsi->idx_in_dev); + + sxe2_txsched_vsi_q_ctxt_free(vsi); + + sxe2_vsi_irqs_deinit(vsi); + sxe2_vsi_qs_stats_deinit(vsi); +#ifdef HAVE_XDP_SUPPORT + sxe2_vsi_xdp_qs_stats_deinit(vsi); +#endif + sxe2_vsi_queues_deinit(vsi); + + sxe2_vsi_irqs_coalesce_deinit(vsi); + + sxe2_vsi_deinit(vsi); +} + +void sxe2_vsi_destroy(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + + sxe2_vsi_destroy_unlock(vsi); + + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +s32 sxe2_main_vsi_create(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi_cfg_params vsi_create = {}; + + vsi_create.type = SXE2_VSI_T_PF; + + adapter->vsi_ctxt.main_vsi = sxe2_vsi_create(adapter, &vsi_create); + if (!adapter->vsi_ctxt.main_vsi) + ret = -EIO; + + return ret; +} + +static s32 sxe2_ctrl_vsi_create(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi_cfg_params vsi_create = {}; + + vsi_create.type = SXE2_VSI_T_CTRL; + + adapter->vsi_ctxt.ctrl_vsi = sxe2_vsi_create(adapter, &vsi_create); + if (!adapter->vsi_ctxt.ctrl_vsi) + ret = -EIO; + + return ret; +} +static void sxe2_ctrl_vsi_destroy(struct sxe2_adapter *adapter) +{ + if (adapter->vsi_ctxt.ctrl_vsi) { + sxe2_vsi_destroy(adapter->vsi_ctxt.ctrl_vsi); + adapter->vsi_ctxt.ctrl_vsi = NULL; + } +} + +s32 sxe2_ctrl_vsi_init(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + + ret = sxe2_ctrl_vsi_create(adapter); + if (ret) { + LOG_ERROR_BDF("ctrl vsi create failed, ret:%d\n", ret); + goto l_out; + } + + ret = sxe2_vsi_open(adapter->vsi_ctxt.ctrl_vsi); + if (ret) { + LOG_ERROR_BDF("ctrl vsi open failed, ret:%d\n", ret); + goto l_vsi_open_failed; + } + + return 0; + +l_vsi_open_failed: + sxe2_ctrl_vsi_destroy(adapter); +l_out: + return ret; +} + +void sxe2_ctrl_vsi_deinit(struct sxe2_adapter *adapter) +{ + if (sxe2_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + LOG_WARN_BDF("dpdk only mode no need deinit ctrl vsi.\n"); + return; + } + + if (adapter->vsi_ctxt.ctrl_vsi) { + (void)sxe2_vsi_close(adapter->vsi_ctxt.ctrl_vsi); + sxe2_ctrl_vsi_destroy(adapter); + } +} + +void sxe2_vsi_destroy_all(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + sxe2_vsi_destroy_unlock(vsi); + } + adapter->vsi_ctxt.main_vsi = NULL; + + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +struct sxe2_vsi *sxe2_loopback_vsi_create(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi_cfg_params vsi_create = {}; + struct sxe2_vsi *vsi; + + vsi_create.type = SXE2_VSI_T_LB; + + vsi = sxe2_vsi_create_unlock(adapter, &vsi_create); + if (!vsi) + return NULL; + + return vsi; +} + +s32 sxe2_eswitch_vsi_create(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi_cfg_params vsi_create = {}; + struct sxe2_vsi *vsi; + + vsi_create.type = SXE2_VSI_T_ESW; + vsi = sxe2_vsi_create(adapter, &vsi_create); + if (!vsi) { + LOG_ERROR_BDF("eswitch vsi create fail.\n"); + return -ENOMEM; + } + adapter->eswitch_ctxt.esw_vsi = vsi; + + LOG_INFO_BDF("eswitch vsi:%p [%u][%u].\n", vsi, vsi->id_in_pf, + vsi->idx_in_dev); + + adapter->eswitch_ctxt.user_esw_vsi = NULL; + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_KERNEL) { + vsi_create.type = SXE2_VSI_T_DPDK_ESW; + vsi = sxe2_vsi_create(adapter, &vsi_create); + if (!vsi) { + LOG_ERROR_BDF("user eswitch vsi create fail.\n"); + } else { + adapter->eswitch_ctxt.user_esw_vsi = vsi; + LOG_INFO_BDF("user eswitch vsi:%p [%u][%u].\n", vsi, + vsi->id_in_pf, vsi->idx_in_dev); + } + } + + if (sxe2_com_mode_get(adapter) != SXE2_COM_MODULE_DPDK) { + adapter->eswitch_ctxt.uplink_vsi = adapter->vsi_ctxt.main_vsi; + LOG_INFO_BDF("uplink_vsi:%p [%u][%u].\n", + adapter->eswitch_ctxt.uplink_vsi, + adapter->eswitch_ctxt.uplink_vsi->id_in_pf, + adapter->eswitch_ctxt.uplink_vsi->idx_in_dev); + } else { + adapter->eswitch_ctxt.uplink_vsi = NULL; + } + + return 0; +} + +STATIC void sxe2_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct sxe2_irq_data *irq_data = + container_of(notify, struct sxe2_irq_data, affinity_notify); + + cpumask_copy(&irq_data->affinity_mask, mask); +} + +STATIC void sxe2_irq_affinity_release(struct kref __always_unused *ref) +{ +} + +STATIC void sxe2_vsi_get_q_idx(struct sxe2_vsi *vsi, u16 irq_idx, u16 *txq, u16 *rxq) +{ + u16 txq_per_irq, txq_remainder, rxq_per_irq, rxq_remainder; + + txq_per_irq = vsi->txqs.q_cnt / vsi->irqs.cnt; + rxq_per_irq = vsi->rxqs.q_cnt / vsi->irqs.cnt; + txq_remainder = vsi->txqs.q_cnt % vsi->irqs.cnt; + rxq_remainder = vsi->rxqs.q_cnt % vsi->irqs.cnt; + + *txq = (u16)((txq_per_irq * irq_idx) + + (irq_idx < txq_remainder ? irq_idx : txq_remainder)); + *rxq = (u16)((rxq_per_irq * irq_idx) + + (irq_idx < rxq_remainder ? irq_idx : rxq_remainder)); +} + +STATIC s32 sxe2_vsi_irq_request(struct sxe2_vsi *vsi, s8 *base_name, u16 idx) +{ + s32 ret = 0; + struct sxe2_irq_data *irq_data; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 rx_idx, tx_idx; + unsigned int irq_num; + + irq_data = vsi->irqs.irq_data[idx]; + irq_num = adapter->irq_ctxt.msix_entries[vsi->irqs.base_idx_in_pf + idx] + .vector; + + sxe2_vsi_get_q_idx(vsi, idx, &tx_idx, &rx_idx); + + if (SXE2_IRQ_HAS_TXQ(irq_data) && SXE2_IRQ_HAS_RXQ(irq_data)) { + if (irq_data->rx.list.cnt == 1) + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "TxRx", rx_idx); + else + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d-%d", base_name, "TxRx", rx_idx, + rx_idx + irq_data->rx.list.cnt - 1); + } else if (SXE2_IRQ_HAS_TXQ(irq_data)) { + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "Tx", tx_idx); + } else if (SXE2_IRQ_HAS_RXQ(irq_data)) { + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "Rx", rx_idx); + } else { + goto l_end; + } + + ret = devm_request_irq(dev, irq_num, vsi->irqs.proc, 0, irq_data->name, + irq_data); + if (ret) { + LOG_DEV_ERR("MSI-X devm_request_irq failed, result: %d\n", ret); + goto l_end; + } + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { + irq_data->affinity_notify.notify = sxe2_irq_affinity_notify; + irq_data->affinity_notify.release = sxe2_irq_affinity_release; + (void)irq_set_affinity_notifier(irq_num, &irq_data->affinity_notify); + } + +l_end: + return ret; +} + +s32 sxe2_vsi_irqs_request(struct sxe2_vsi *vsi) +{ + s32 ret; + s8 base_name[SXE2_IRQ_NAME_STR_LEN]; + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u16 i; + u32 irq_num; + struct sxe2_irq_data *irq_data; + + if (vsi->type == SXE2_VSI_T_CTRL) { + (void)snprintf(base_name, sizeof(base_name) - 1, "%s-%s:ctrl", + dev_driver_string(dev), dev_name(dev)); + } else if (vsi->type == SXE2_VSI_T_ESW) { + (void)snprintf(base_name, sizeof(base_name) - 1, "%s-%s:eswitch", + dev_driver_string(dev), dev_name(dev)); + } else if (vsi->type == SXE2_VSI_T_LB) { + (void)snprintf(base_name, sizeof(base_name) - 1, "%s-lbtest", + dev_driver_string(dev)); + } else { + (void)snprintf(base_name, sizeof(base_name) - 1, "%s-%s", + dev_driver_string(dev), vsi->netdev->name); + } + + sxe2_for_each_vsi_irq(vsi, i) + { + ret = sxe2_vsi_irq_request(vsi, base_name, i); + if (ret) + goto l_end; + } + + ret = sxe2_cpu_rx_rmap_set(vsi); + if (ret) { + LOG_DEV_ERR("failed to setup CPU RMAP on vsi %u: %d\n", + vsi->id_in_pf, ret); + goto l_end; + } + + sxe2_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + irq_num = adapter->irq_ctxt + .msix_entries[vsi->irqs.base_idx_in_pf + i] + .vector; + (void)irq_set_affinity_hint(irq_num, &irq_data->affinity_mask); + } + + return 0; + +l_end: + while (i) { + i--; + irq_num = adapter->irq_ctxt + .msix_entries[vsi->irqs.base_idx_in_pf + i] + .vector; + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + (void)irq_set_affinity_notifier(irq_num, NULL); + + (void)irq_set_affinity_hint(irq_num, NULL); + devm_free_irq(dev, irq_num, vsi->irqs.irq_data[i]); + } + return ret; +} + +STATIC void sxe2_vsi_irq_free(struct sxe2_vsi *vsi, u16 idx) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[idx]; + u32 irq_num = adapter->irq_ctxt.msix_entries[vsi->irqs.base_idx_in_pf + idx] + .vector; + + if (!irq_data || !(SXE2_IRQ_HAS_TXQ(irq_data) || SXE2_IRQ_HAS_RXQ(irq_data))) + return; + + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + (void)irq_set_affinity_notifier(irq_num, NULL); + + (void)irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + devm_free_irq(dev, irq_num, irq_data); +} + +STATIC void sxe2_vsi_irqs_free(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_cpu_rx_rmap_free(vsi); + + sxe2_for_each_vsi_irq(vsi, i) + { + sxe2_vsi_irq_free(vsi, i); + } +} + +void sxe2_irq_txqs_cause_setup(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + struct sxe2_queue *queue; + + sxe2_for_each_queue(queue, irq_data->tx.list) + { + sxe2_hw_txq_irq_cause_setup(hw, queue->idx_in_pf, + irq_data->tx.itr_idx, + irq_data->idx_in_pf); + sxe2_flush(hw); + } +} + +void sxe2_irq_txqs_cause_clear(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + struct sxe2_queue *queue; + + sxe2_for_each_queue(queue, irq_data->tx.list) + { + sxe2_hw_txq_irq_cause_clear(hw, queue->idx_in_pf); + sxe2_flush(hw); + } +} + +void sxe2_irq_rxqs_cause_setup(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + struct sxe2_queue *queue; + + sxe2_for_each_queue(queue, irq_data->rx.list) + { + sxe2_hw_rxq_irq_cause_setup(hw, queue->idx_in_pf, + irq_data->rx.itr_idx, + irq_data->idx_in_pf); + sxe2_flush(hw); + } +} + +void sxe2_irq_rxqs_cause_clear(struct sxe2_irq_data *irq_data) +{ + struct sxe2_hw *hw = &irq_data->vsi->adapter->hw; + struct sxe2_queue *queue; + + sxe2_for_each_queue(queue, irq_data->rx.list) + { + sxe2_hw_rxq_irq_cause_clear(hw, queue->idx_in_pf); + sxe2_flush(hw); + } +} + +STATIC void sxe2_vsi_irq_setup(struct sxe2_vsi *vsi, u16 idx) +{ + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[idx]; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_vf_node *vf; + u16 i; + + if (vsi->type != SXE2_VSI_T_VF) + sxe2_irq_itr_init(irq_data); + + sxe2_irq_rate_limit_init(irq_data); + + if (vsi->type == SXE2_VSI_T_ESW) { + sxe2_for_each_vsi_rxq(vsi, i) + { + vf = SXE2_VF_NODE(adapter, i); + if (!vf) { + LOG_WARN_BDF("vf:%u has freed.\n", i); + continue; + } + + sxe2_irq_txqs_cause_setup(vf->repr->irq_data); + + sxe2_irq_rxqs_cause_setup(vf->repr->irq_data); + } + } else { + sxe2_irq_txqs_cause_setup(irq_data); + + sxe2_irq_rxqs_cause_setup(irq_data); + } + + if (vsi->netdev && + (SXE2_IRQ_HAS_TXQ(irq_data) || SXE2_IRQ_HAS_RXQ(irq_data))) { + napi_enable(&irq_data->napi); + } + + if (vsi->type == SXE2_VSI_T_ESW) { + sxe2_for_each_vf(adapter, idx) + { + vf = SXE2_VF_NODE(adapter, idx); + napi_enable(&vf->repr->irq_data->napi); + } + } + + if (vsi->type != SXE2_VSI_T_VF) { + sxe2_hw_irq_enable(&irq_data->vsi->adapter->hw, irq_data->idx_in_pf); + sxe2_hw_irq_trigger(&irq_data->vsi->adapter->hw, + irq_data->idx_in_pf); + } +} + +STATIC void sxe2_vsi_irq_release(struct sxe2_vsi *vsi, u16 idx) +{ + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[idx]; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_queue *queue; + struct sxe2_vf_node *vf; + u16 i; + + if (vsi->type == SXE2_VSI_T_ESW) { + sxe2_for_each_vsi_rxq(vsi, i) + { + vf = SXE2_VF_NODE(adapter, i); + if (!vf) { + LOG_WARN_BDF("vf:%u has freed.\n", i); + continue; + } + + sxe2_for_each_queue(queue, vf->repr->irq_data->tx.list) + { + sxe2_hw_txq_irq_cause_clear(hw, queue->idx_in_pf); + } + sxe2_for_each_queue(queue, vf->repr->irq_data->rx.list) + { + sxe2_hw_rxq_irq_cause_clear(hw, queue->idx_in_pf); + } + } + } else { + sxe2_for_each_queue(queue, irq_data->tx.list) + { + sxe2_hw_txq_irq_cause_clear(hw, queue->idx_in_pf); + } + sxe2_for_each_queue(queue, irq_data->rx.list) + { + sxe2_hw_rxq_irq_cause_clear(hw, queue->idx_in_pf); + } + } + + if (SXE2_IRQ_HAS_TXQ(irq_data)) + sxe2_itr_set(irq_data, &irq_data->tx, 0); + + if (SXE2_IRQ_HAS_RXQ(irq_data)) + sxe2_itr_set(irq_data, &irq_data->rx, 0); + + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, 0); + sxe2_flush(hw); +} + +void sxe2_vsi_irqs_setup(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_irq(vsi, i) + { + sxe2_vsi_irq_setup(vsi, i); + } +} + +void sxe2_vsi_irqs_release(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_irq(vsi, i) + { + sxe2_vsi_irq_release(vsi, i); + } +} + +s32 sxe2_vsi_irqs_configure(struct sxe2_vsi *vsi) +{ + s32 ret; + + ret = sxe2_vsi_irqs_request(vsi); + if (ret) + goto l_end; + + sxe2_vsi_irqs_setup(vsi); + +l_end: + return ret; +} + +void sxe2_vsi_irqs_clear_free(struct sxe2_vsi *vsi) +{ + sxe2_vsi_irqs_release(vsi); + sxe2_vsi_irqs_free(vsi); +} + +s32 sxe2_vsi_check(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_hw_txqs_disable_check(vsi); + if (ret) { + LOG_ERROR_BDF("vsi[%d] type %d check txqs disable failed.\n", + vsi->id_in_pf, vsi->type); + } + + return ret; +} + +static inline void sxe2_vsi_carrier_on(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct net_device *netdev = vsi->netdev; + + if (netdev) { + mutex_lock(&adapter->link_ctxt.link_status_lock); + if (sxe2_get_pf_link_status(adapter)) { + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + LOG_DEV_INFO("nic link is up\n"); + } + mutex_unlock(&adapter->link_ctxt.link_status_lock); + } +} + +s32 sxe2_vsi_open(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!test_bit(SXE2_VSI_S_CLOSE, vsi->state)) { + LOG_WARN_BDF("vsi opened already.\n"); + return 0; + } + + ret = sxe2_vsi_check(vsi); + if (ret) { + LOG_ERROR_BDF("vsi[%u][%u] type:%u check failed ret:%d.\n", + vsi->id_in_pf, vsi->idx_in_dev, vsi->type, ret); + SXE2_BUG_ON(ret); + } + + ret = sxe2_tx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi[%u][%u] type:%u open: tx config err, ret=%d\n", + vsi->id_in_pf, vsi->idx_in_dev, vsi->type, ret); + goto l_end; + } + + if (sxe2_xdp_is_enable(vsi)) { + ret = sxe2_xdp_tx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi[%u][%u] type:%u open: tx config err, " + "ret=%d\n", + vsi->id_in_pf, vsi->idx_in_dev, vsi->type, + ret); + goto l_xdp_fail; + } + } + + ret = sxe2_rx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi[%u][%u] type:%u open: rx config err, ret=%d\n", + vsi->id_in_pf, vsi->idx_in_dev, vsi->type, ret); + goto l_rx_fail; + } + + ret = sxe2_vsi_irqs_configure(vsi); + if (ret) + goto l_irq_fail; + + if (vsi->netdev && vsi->type == SXE2_VSI_T_PF) + sxe2_vsi_carrier_on(vsi); + + clear_bit(SXE2_VSI_S_DOWN, vsi->state); + clear_bit(SXE2_VSI_S_CLOSE, vsi->state); + + return 0; + +l_irq_fail: + (void)sxe2_rxqs_stop(vsi); + sxe2_rx_rings_res_free(vsi); +l_rx_fail: + if (sxe2_xdp_is_enable(vsi)) + (void)sxe2_xdp_txqs_stop(vsi); +l_xdp_fail: + (void)sxe2_txqs_stop(vsi); + sxe2_tx_rings_res_free(vsi); +l_end: + return ret; +} + +STATIC void sxe2_vsi_irq_disable(struct sxe2_vsi *vsi, u16 idx) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + struct sxe2_queue *queue; + struct sxe2_irq_data *irq_data = vsi->irqs.irq_data[idx]; + u16 vf_idx; + struct sxe2_vf_node *vf; + + sxe2_for_each_queue(queue, irq_data->tx.list) + { + sxe2_hw_txq_irq_cause_switch(hw, queue->idx_in_pf, false); + } + + sxe2_for_each_queue(queue, irq_data->rx.list) + { + sxe2_hw_rxq_irq_cause_switch(hw, queue->idx_in_pf, false); + } + + if (vsi->type != SXE2_VSI_T_VF) + synchronize_irq(adapter->irq_ctxt.msix_entries[irq_data->idx_in_pf] + .vector); + + if (vsi->type == SXE2_VSI_T_ESW) { + sxe2_for_each_vf(adapter, vf_idx) + { + vf = SXE2_VF_NODE(adapter, vf_idx); + if (!vf->repr->dst) + continue; + napi_disable(&vf->repr->irq_data->napi); + } + } else { + if (vsi->netdev) { + if (irq_data->rx.list.next || irq_data->tx.list.next) + napi_disable(&irq_data->napi); + + cancel_work_sync(&irq_data->tx.dim.work); + cancel_work_sync(&irq_data->rx.dim.work); + } + } + + sxe2_hw_irq_disable(hw, irq_data->idx_in_pf); + sxe2_flush(hw); +} + +void sxe2_vsi_irqs_disable(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_irq(vsi, i) + { + sxe2_vsi_irq_disable(vsi, i); + } +} + +void sxe2_napi_add(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_irq(vsi, i) + { + netif_napi_add(vsi->netdev, &vsi->irqs.irq_data[i]->napi, + sxe2_napi_poll, NAPI_POLL_WEIGHT); + } + set_bit(SXE2_VSI_S_NAPI_ADDED, vsi->state); +} + +void sxe2_napi_del(struct sxe2_vsi *vsi) +{ + u16 i; + + if (!test_bit(SXE2_VSI_S_NAPI_ADDED, vsi->state)) + return; + + sxe2_for_each_vsi_irq(vsi, i) + { + netif_napi_del(&vsi->irqs.irq_data[i]->napi); + } + + clear_bit(SXE2_VSI_S_NAPI_ADDED, vsi->state); +} + +STATIC void sxe2_vsi_txq_clean(struct sxe2_vsi *vsi) +{ + u32 i; + + sxe2_for_each_vsi_txq(vsi, i) + { + sxe2_tx_ring_clean(vsi->txqs.q[i]); + } + + if (sxe2_xdp_is_enable(vsi)) { + for (i = 0; i < vsi->num_xdp_txq; i++) + sxe2_tx_ring_clean(vsi->xdp_rings.q[i]); + } +} + +void sxe2_vsi_rxq_clean(struct sxe2_vsi *vsi) +{ + u16 i; + + sxe2_for_each_vsi_rxq(vsi, i) + { + sxe2_rx_ring_clean(vsi->rxqs.q[i]); + } +} + +s32 sxe2_vsi_down(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + bool need_reset = false; + + if (vsi->netdev && vsi->type == SXE2_VSI_T_PF) { + netif_carrier_off(vsi->netdev); + netif_tx_disable(vsi->netdev); + } else if (vsi->type == SXE2_VSI_T_ESW) { + sxe2_eswitch_txqs_stop(adapter); + } + + ret = sxe2_txqs_stop(vsi); + if (ret) { + need_reset = true; + LOG_ERROR_BDF("stop tx queues failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + } + + if (sxe2_xdp_is_enable(vsi)) { + ret = sxe2_xdp_txqs_stop(vsi); + if (ret) { + need_reset = true; + LOG_ERROR_BDF("failed stop xdp rings, vsi %d error %d\n", + vsi->id_in_pf, ret); + } + } + + ret = sxe2_rxqs_stop(vsi); + if (ret) { + need_reset = true; + LOG_ERROR_BDF("stop rx queues failed, vsi %d error %d\n", + vsi->idx_in_dev, ret); + } + + sxe2_vsi_irqs_disable(vsi); + + if (need_reset) + sxe2_trigger_and_wait_resetting(adapter); + + sxe2_vsi_txq_clean(vsi); + + sxe2_vsi_rxq_clean(vsi); + + return ret; +} + +s32 sxe2_vsi_up(struct sxe2_vsi *vsi) +{ + s32 ret; + s32 rc; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_tx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("tx hw configure failed, ret=%d\n", ret); + goto l_end; + } + + if (sxe2_xdp_is_enable(vsi)) { + ret = sxe2_xdp_tx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("xdp tx hw configure failed, ret=%d\n", ret); + goto l_xdp_err; + } + } + + ret = sxe2_rx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u rx queue cfg failed.(err:%d)\n", + vsi->idx_in_dev, vsi->type, ret); + goto l_rx_err; + } + + sxe2_vsi_irqs_setup(vsi); + + clear_bit(SXE2_VSI_S_DOWN, vsi->state); + + if (vsi->netdev && vsi->type == SXE2_VSI_T_PF) + sxe2_vsi_carrier_on(vsi); + + return 0; + +l_rx_err: + if (sxe2_xdp_is_enable(vsi)) { + rc = sxe2_xdp_txqs_stop(vsi); + if (rc) + LOG_ERROR_BDF("failed stop xdp rings, vsi %d error %d\n", + vsi->id_in_pf, rc); + } + +l_xdp_err: + rc = sxe2_txqs_stop(vsi); + if (rc) { + LOG_ERROR_BDF("stop tx queues failed, vsi %d error %d\n", + vsi->idx_in_dev, rc); + } + +l_end: + return ret; +} + +s32 sxe2_vsi_down_up_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + if (!test_and_set_bit(SXE2_VSI_S_DOWN, vsi->state)) { + ret = sxe2_vsi_down(vsi); + if (ret) { + LOG_DEV_ERR("sxe2_vsi_down err %d\n", ret); + goto l_end; + } + + ret = sxe2_vsi_up(vsi); + if (ret) { + LOG_DEV_ERR("sxe2_vsi_up err %d\n", ret); + goto l_end; + } + } + +l_end: + + return ret; +} + +s32 sxe2_vsi_down_up(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2_vsi_down_up_unlock(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_vsi_close(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (!test_and_set_bit(SXE2_VSI_S_CLOSE, vsi->state)) { + if (!test_and_set_bit(SXE2_VSI_S_DOWN, vsi->state)) + ret = sxe2_vsi_down(vsi); + + sxe2_vsi_irqs_clear_free(vsi); + + sxe2_tx_rings_res_free(vsi); + + sxe2_rx_rings_res_free(vsi); + } + + return ret; +} + +static s32 __sxe2_vsi_disable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return ret; + + ret = sxe2_vsi_close(vsi); + + set_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +s32 sxe2_vsi_disable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return ret; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + ret = sxe2_main_vsi_disable_unlock(vsi); + break; + case SXE2_VSI_T_CTRL: + ret = sxe2_ctrl_vsi_disable_unlock(vsi); + break; + case SXE2_VSI_T_MACVLAN: + ret = sxe2_macvlan_vsi_disable(vsi); + break; + default: + break; + } + + set_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +s32 sxe2_main_vsi_disable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (vsi->type != SXE2_VSI_T_PF) + return -EINVAL; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return ret; + + if (vsi->netdev) + ret = sxe2_vsi_close(vsi); + + set_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +s32 sxe2_ctrl_vsi_disable_unlock(struct sxe2_vsi *vsi) +{ + if (vsi->type != SXE2_VSI_T_CTRL) + return -EINVAL; + + return __sxe2_vsi_disable_unlock(vsi); +} + +s32 sxe2_macvlan_vsi_disable(struct sxe2_vsi *vsi) +{ + if (vsi->type != SXE2_VSI_T_MACVLAN) + return -EINVAL; + + return __sxe2_vsi_disable_unlock(vsi); +} + +s32 sxe2_esw_vsi_disable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (vsi->type != SXE2_VSI_T_ESW) + return ret; + + if (test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return ret; + + ret = sxe2_vsi_close(vsi); + + set_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +static s32 __sxe2_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (!test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return 0; + + ret = sxe2_vsi_open(vsi); + if (!ret) + clear_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +s32 sxe2_main_vsi_open(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (vsi->type != SXE2_VSI_T_PF) + return -EINVAL; + + if (vsi->netdev && netif_running(vsi->netdev)) { + ret = sxe2_vsi_open(vsi); + if (ret) + return ret; + } + + return ret; +} + +s32 sxe2_main_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (vsi->type != SXE2_VSI_T_PF) + return -EINVAL; + + if (!test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return 0; + + if (vsi->netdev && netif_running(vsi->netdev)) { + ret = sxe2_vsi_open(vsi); + if (ret) + return ret; + } + + clear_bit(SXE2_VSI_S_DISABLE, vsi->state); + + return ret; +} + +s32 sxe2_ctrl_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + if (vsi->type != SXE2_VSI_T_CTRL) + return -EINVAL; + + return __sxe2_vsi_enable_unlock(vsi); +} + +s32 sxe2_macvlan_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + if (vsi->type != SXE2_VSI_T_MACVLAN) + return -EINVAL; + + if (test_bit(SXE2_VSI_S_MACVLAN_DEL, vsi->state)) + return 0; + + return __sxe2_vsi_enable_unlock(vsi); +} + +s32 sxe2_esw_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + if (vsi->type != SXE2_VSI_T_ESW) + return -EINVAL; + return __sxe2_vsi_enable_unlock(vsi); +} + +s32 sxe2_vsi_enable_unlock(struct sxe2_vsi *vsi) +{ + s32 ret = 0; + + if (!test_bit(SXE2_VSI_S_DISABLE, vsi->state)) + return 0; + + switch (vsi->type) { + case SXE2_VSI_T_PF: + ret = sxe2_main_vsi_enable_unlock(vsi); + break; + case SXE2_VSI_T_CTRL: + ret = sxe2_ctrl_vsi_enable_unlock(vsi); + break; + case SXE2_VSI_T_MACVLAN: + ret = sxe2_macvlan_vsi_enable_unlock(vsi); + break; + case SXE2_VSI_T_ESW: + ret = sxe2_esw_vsi_enable_unlock(vsi); + break; + default: + break; + } + + return ret; +} + +s32 sxe2_vsi_disable_all(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + u16 i; + + rtnl_lock(); + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + if (sxe2_vsi_disable_unlock(vsi)) + ret = -EIO; + } + mutex_unlock(&adapter->vsi_ctxt.lock); + rtnl_unlock(); + + return ret; +} + +s32 sxe2_vsi_enable_by_type(struct sxe2_adapter *adapter, enum sxe2_vsi_type type) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + if (!vsi || vsi->type != type) + continue; + if (sxe2_vsi_enable_unlock(vsi)) + ret = -EIO; + } + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +struct sxe2_vsi_coalesce { + u8 tx_valid; + u8 rx_valid; + u16 rate_limit; + u16 tx_itr; + u16 rx_itr; + u16 tx_itr_mode; + u16 rx_itr_mode; +}; + +STATIC void sxe2_vsi_coalesce_store(struct sxe2_vsi *vsi, + struct sxe2_vsi_coalesce *coalesce) +{ + s32 idx; + struct sxe2_irq_data *irq_data; + + if (!vsi->irqs.irq_data) + return; + + sxe2_for_each_vsi_irq(vsi, idx) + { + irq_data = vsi->irqs.irq_data[idx]; + coalesce[idx].tx_itr = irq_data->tx.itr_setting; + coalesce[idx].rx_itr = irq_data->rx.itr_setting; + coalesce[idx].rate_limit = irq_data->rate_limit; + coalesce[idx].tx_itr_mode = irq_data->tx.itr_mode; + coalesce[idx].rx_itr_mode = irq_data->rx.itr_mode; + + if (SXE2_IRQ_HAS_TXQ(irq_data)) + coalesce[idx].tx_valid = true; + if (SXE2_IRQ_HAS_RXQ(irq_data)) + coalesce[idx].rx_valid = true; + } +} + +STATIC void sxe2_vsi_coalesce_set(struct sxe2_vsi *vsi, + struct sxe2_vsi_coalesce *coalesce, + u16 old_irq_cnt) +{ + s32 i; + u16 default_coalesce_tx = coalesce[0].tx_itr; + u16 default_coalesce_rx = coalesce[0].rx_itr; + u16 default_tx_itr_mode = coalesce[0].tx_itr_mode; + u16 default_rx_itr_mode = coalesce[0].rx_itr_mode; + u16 default_rate_limit = coalesce[0].rate_limit; + struct sxe2_irq_data *irq_data; + struct sxe2_hw *hw = &vsi->adapter->hw; + + for (i = 0; i < old_irq_cnt && i < vsi->irqs.cnt; i++) { + irq_data = vsi->irqs.irq_data[i]; + if (SXE2_IRQ_HAS_TXQ(irq_data) && coalesce[i].tx_valid) { + irq_data->tx.itr_mode = coalesce[i].tx_itr_mode; + irq_data->tx.itr_setting = coalesce[i].tx_itr; + sxe2_itr_set(irq_data, &irq_data->tx, coalesce[i].tx_itr); + } else if (SXE2_IRQ_HAS_TXQ(irq_data)) { + irq_data->tx.itr_mode = default_tx_itr_mode; + irq_data->tx.itr_setting = default_coalesce_tx; + sxe2_itr_set(irq_data, &irq_data->tx, default_coalesce_tx); + } + if (SXE2_IRQ_HAS_RXQ(irq_data) && coalesce[i].rx_valid) { + irq_data->rx.itr_mode = coalesce[i].rx_itr_mode; + irq_data->rx.itr_setting = coalesce[i].rx_itr; + sxe2_itr_set(irq_data, &irq_data->rx, coalesce[i].rx_itr); + } else if (SXE2_IRQ_HAS_RXQ(irq_data)) { + irq_data->rx.itr_mode = default_rx_itr_mode; + irq_data->rx.itr_setting = default_coalesce_rx; + sxe2_itr_set(irq_data, &irq_data->rx, default_coalesce_rx); + } + irq_data->rate_limit = coalesce[i].rate_limit; + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, + irq_data->rate_limit); + } + for (; i < vsi->irqs.cnt; i++) { + irq_data = vsi->irqs.irq_data[i]; + + irq_data->tx.itr_setting = default_coalesce_tx; + sxe2_itr_set(irq_data, &irq_data->tx, default_coalesce_tx); + + irq_data->rx.itr_setting = default_coalesce_rx; + sxe2_itr_set(irq_data, &irq_data->rx, default_coalesce_rx); + + irq_data->rate_limit = default_rate_limit; + sxe2_hw_irq_rate_limit_set(hw, irq_data->idx_in_pf, + irq_data->rate_limit); + } +} + +#ifdef HAVE_XDP_SUPPORT +STATIC s32 sxe2_vsi_xdp_qs_stats_realloc(struct sxe2_vsi *vsi) +{ + struct sxe2_adapter *adapter = vsi->adapter; + u16 req_txq = (u16)vsi->num_xdp_txq; + struct sxe2_queue_stats **xdp_stats; + struct sxe2_vsi_qs_stats *vsi_qs_stats; + u16 i; + + vsi_qs_stats = &vsi->vsi_qs_stats; + if (req_txq < vsi->xdp_rings.q_cnt) { + for (i = req_txq; i < vsi->xdp_rings.q_cnt; i++) { + kfree(vsi_qs_stats->xdp_stats[i]); + WRITE_ONCE(vsi_qs_stats->xdp_stats[i], NULL); + } + } + + xdp_stats = vsi_qs_stats->xdp_stats; +#ifdef SXE2_TEST + vsi_qs_stats->xdp_stats = (struct sxe2_queue_stats **)SXE2_REALLOC( + vsi_qs_stats->xdp_stats, req_txq, + sizeof(*vsi_qs_stats->xdp_stats), GFP_KERNEL | __GFP_ZERO, + vsi->xdp_rings.q_cnt); +#else + vsi_qs_stats->xdp_stats = (struct sxe2_queue_stats **)SXE2_REALLOC( + vsi_qs_stats->xdp_stats, req_txq, + sizeof(*vsi_qs_stats->xdp_stats), GFP_KERNEL | __GFP_ZERO); +#endif + if (!vsi_qs_stats->xdp_stats) { + LOG_ERROR_BDF("alloc txqs stats failed, count: %d, size: %zu.\n", + vsi->xdp_rings.q_cnt, + sizeof(*vsi_qs_stats->xdp_stats)); + vsi_qs_stats->xdp_stats = xdp_stats; + return -ENOMEM; + } + + return 0; +} +#endif + +s32 sxe2_vsi_recfg(struct sxe2_vsi *vsi) +{ + s32 ret; + struct sxe2_adapter *adapter = vsi->adapter; + + ret = sxe2_vsi_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%d hw cfg failed %d\n", vsi->id_in_pf, ret); + goto l_end; + } + + ret = sxe2_txsched_lan_vsi_cfg(vsi); + if (ret) + goto l_defcfg; + + ret = sxe2_vsi_rss_init(vsi); + if (ret) + goto l_defcfg; + + if (vsi->type == SXE2_VSI_T_PF) { + ret = sxe2_rss_replay_hash_cfg(&vsi->adapter->rss_flow_ctxt, + vsi->id_in_pf); + if (ret) + goto l_defcfg; + } + + (void)mutex_lock(&adapter->switch_ctxt.evb_mode_lock); + if ((adapter->switch_ctxt.evb_mode == BRIDGE_MODE_VEB || + vsi->type == SXE2_VSI_T_LB) && + sxe2_vsi_loopback_control(adapter, vsi->idx_in_dev, true)) { + (void)mutex_unlock(&adapter->switch_ctxt.evb_mode_lock); + goto l_defcfg; + } + (void)mutex_unlock(&adapter->switch_ctxt.evb_mode_lock); + + if (SXE2_VSI_T_PF == vsi->type || SXE2_VSI_T_VF == vsi->type) { + ret = sxe2_src_vsi_prune_control(adapter, vsi->idx_in_dev, true); + if (ret) + goto l_defcfg; + } + + return ret; +l_defcfg: + (void)sxe2_vsi_hw_decfg(adapter, vsi->idx_in_dev); + +l_end: + return ret; +} + +s32 sxe2_vsi_rebuild(struct sxe2_vsi *vsi, bool init) +{ + s32 ret; + u16 old_irq_cnt = vsi->irqs.cnt; + + if (!vsi->irqs.coalesce) { + vsi->irqs.coalesce = + kcalloc(old_irq_cnt, sizeof(*vsi->irqs.coalesce), + GFP_KERNEL); + if (!vsi->irqs.coalesce) + return -ENOMEM; + + sxe2_vsi_coalesce_store(vsi, vsi->irqs.coalesce); + } + + (void)sxe2_txsched_lan_vsi_rm(vsi); + + sxe2_vsi_irqs_deinit(vsi); + +#ifdef HAVE_XDP_SUPPORT + if (sxe2_xdp_is_enable(vsi) && vsi->type == SXE2_VSI_T_PF) + (void)sxe2_destroy_xdp_rings(vsi, true); +#endif + + sxe2_vsi_queues_deinit(vsi); + + ret = sxe2_vsi_queues_init(vsi); + if (ret) + goto l_queue_init_failed; + + if (sxe2_vsi_qs_stats_init(vsi)) + goto l_queue_init_failed; + + ret = sxe2_vsi_irqs_init(vsi); + if (ret) + goto l_queue_init_failed; + + ret = sxe2_vsi_recfg(vsi); + if (ret) + goto l_queue_init_failed; + +#ifdef HAVE_XDP_SUPPORT + if (sxe2_xdp_is_enable(vsi) && vsi->type == SXE2_VSI_T_PF) { + sxe2_xdp_queue_cnt_set(vsi, vsi->rxqs.q_cnt); + + (void)sxe2_vsi_xdp_qs_stats_realloc(vsi); + ret = sxe2_prepare_xdp_rings(vsi, vsi->xdp_prog); + if (ret) + goto l_queue_init_failed; + } +#endif + + sxe2_vsi_coalesce_set(vsi, vsi->irqs.coalesce, old_irq_cnt); + kfree(vsi->irqs.coalesce); + vsi->irqs.coalesce = NULL; + + return 0; + +l_queue_init_failed: + (void)sxe2_vsi_disable_unlock(vsi); + + return ret; +} + +s32 sxe2_vsi_rebuild_by_type(struct sxe2_adapter *adapter, enum sxe2_vsi_type type, + bool init) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + if (!vsi || vsi->type != type) + continue; + + if (vsi->type == SXE2_VSI_T_PF) { + ret = sxe2_switch_fltr_restore_prepare(adapter); + if (ret) { + LOG_DEV_ERR("adapter %d switch filter restore " + "prepare failed, ret %d\n", + adapter->pf_idx, ret); + break; + } + } + + ret = sxe2_vsi_rebuild(vsi, init); + if (ret) + break; + + ret = sxe2_vsi_l2_fltr_restore(vsi); + if (ret) + break; + + LOG_DEBUG_BDF("pf_idx[%u] vsi_id_in_pf[%u] vsi_idx_in_dev[%u].\n", + adapter->pf_idx, vsi->id_in_pf, vsi->idx_in_dev); + } + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +struct sxe2_vsi *sxe2_macvlan_vsi_create(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi_cfg_params vsi_create = {}; + struct sxe2_vsi *vsi; + + vsi_create.type = SXE2_VSI_T_MACVLAN; + + vsi = sxe2_vsi_create_unlock(adapter, &vsi_create); + if (!vsi) + return NULL; + + return vsi; +} + +void sxe2_vsi_id_in_dev_clear(struct sxe2_adapter *adapter) +{ + struct sxe2_vsi *vsi; + u16 i; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + + vsi->idx_in_dev = SXE2_VSI_ID_INVALID; + } + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +struct sxe2_addr_node *sxe2_mac_addr_find(struct sxe2_vsi *vsi, const u8 *macaddr) +{ + struct sxe2_addr_node *node = NULL; + + list_for_each_entry(node, &vsi->mac_filter.mac_addr_list, list) + { + if (ether_addr_equal(macaddr, node->mac_addr)) + return node; + } + return NULL; +} + +int sxe2_mac_addr_add(struct sxe2_vsi *vsi, const u8 *addr, + enum sxe2_mac_owner owner) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_addr_node *node = NULL; + struct sxe2_mac_filter *mac_filter = &vsi->mac_filter; + struct sxe2_vsi *user_pf_vsi = NULL; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&switch_ctxt->mac_addr_lock); + + node = sxe2_mac_addr_find(vsi, addr); + if (!node) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + LOG_ERROR_BDF("create list node for mac:%pM failed.\n", + addr); + ret = -ENOMEM; + goto l_end; + } + + user_pf_vsi = sxe2_vsi_get_by_type_unlock(adapter, + SXE2_VSI_T_DPDK_PF); + if (user_pf_vsi && is_unicast_ether_addr(addr)) { + if (!sxe2_mac_addr_find(user_pf_vsi, addr)) { + ret = sxe2_mac_rule_add(vsi, addr); + if (ret == -EEXIST) { + LOG_WARN_BDF("mac filter exist, addr %pM\n", + addr); + ret = 0; + } else if (ret) { + kfree(node); + goto l_end; + } + } + } else { + ret = sxe2_mac_rule_add(vsi, addr); + if (ret == -EEXIST) { + LOG_WARN_BDF("mac filter exist, addr %pM\n", addr); + ret = 0; + } else if (ret) { + kfree(node); + goto l_end; + } + } + + ether_addr_copy(node->mac_addr, addr); + list_add_tail(&node->list, &mac_filter->mac_addr_list); + } + + set_bit((s32)owner, &node->usage); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +int sxe2_mac_addr_del(struct sxe2_vsi *vsi, const u8 *addr, + enum sxe2_mac_owner owner) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_addr_node *node = NULL; + s32 owner_nr = (s32)owner; + struct sxe2_vsi *user_pf_vsi = NULL; + struct sxe2_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&switch_ctxt->mac_addr_lock); + + node = sxe2_mac_addr_find(vsi, addr); + if (!node) { + LOG_WARN_BDF("mac filter not exist, addr %pM\n", addr); + goto l_end; + } + + if (!test_bit(owner_nr, &node->usage)) { + LOG_WARN_BDF("mac not belong to owner %d, addr %pM, usage %lx\n", + owner_nr, addr, node->usage); + goto l_end; + } + clear_bit(owner_nr, &node->usage); + + if (node->usage) + goto l_end; + + user_pf_vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (user_pf_vsi && is_unicast_ether_addr(addr)) { + if (!sxe2_mac_addr_find(user_pf_vsi, addr)) { + ret = sxe2_mac_rule_del(adapter, vsi->idx_in_dev, addr); + if (ret == -ENOENT) { + LOG_WARN_BDF("mac filter not exist, addr %pM\n", + addr); + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("del mac %pM failed err:%d\n", addr, + ret); + set_bit(owner_nr, &node->usage); + goto l_end; + } + } + } else { + ret = sxe2_mac_rule_del(adapter, vsi->idx_in_dev, addr); + if (ret == -ENOENT) { + LOG_WARN_BDF("mac filter not exist, addr %pM\n", addr); + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("del mac %pM failed err:%d\n", addr, ret); + set_bit(owner_nr, &node->usage); + goto l_end; + } + } + + sxe2_switch_mac_node_del_and_free(node); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +struct sxe2_vsi *sxe2_vsi_get_by_idx(struct sxe2_adapter *adapter, u16 idx_in_dev) +{ + struct sxe2_vsi *vsi = NULL; + s32 i = 0; + bool found = false; + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + if (!vsi) + continue; + if (vsi->idx_in_dev == idx_in_dev) { + found = true; + break; + } + } + + if (!found) + vsi = NULL; + + return vsi; +} + +struct sxe2_vsi *sxe2_vsi_get_by_type_unlock(struct sxe2_adapter *adapter, + enum sxe2_vsi_type target_type) +{ + struct sxe2_vsi *vsi; + u16 i; + + lockdep_assert_held(&adapter->vsi_ctxt.lock); + + sxe2_for_each_vsi(&adapter->vsi_ctxt, i) + { + vsi = adapter->vsi_ctxt.vsi[i]; + + if (!vsi) + continue; + + if (vsi->type != target_type) + continue; + + if (target_type == SXE2_VSI_T_VF || + target_type == SXE2_VSI_T_DPDK_VF) { + continue; + } + + return vsi; + } + + return NULL; +} + +s32 sxe2_dpdk_vsi_create(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *params, + struct sxe2_fwc_vsi_crud_resp *resp) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + vsi = sxe2_vsi_create(adapter, params); + if (!vsi) { + LOG_ERROR_BDF("dpdk pf vsi create fail.\n"); + return -ENOMEM; + } + + resp->vsi_id = vsi->idx_in_dev; + + LOG_INFO_BDF("dpdk pf vsi create success vsi_id_in_pf:%d " + "vsi_id_in_dev:%d.\n", + vsi->id_in_pf, vsi->idx_in_dev); + + return ret; +} + +s32 sxe2_dpdk_vsi_destroy(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *params) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, params->vsi_id); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", params->vsi_id); + ret = -EINVAL; + goto l_unlock; + } + + (void)sxe2_user_l2_feature_clean(adapter, vsi->idx_in_dev); + + sxe2_vsi_destroy_unlock(vsi); + LOG_INFO_BDF("dpdk vsi_in_dev:%d destroy done.\n", params->vsi_id); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static s32 sxe2_dpdk_pfvsi_resource_release(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_PF); + if (!vsi) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("dpdk vsi null.\n"); + ret = -EINVAL; + return ret; + } + + if (sxe2_txqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u txqs disable failed.\n", vsi->idx_in_dev); + + if (sxe2_rxqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u rxqs disable failed.\n", vsi->idx_in_dev); + + sxe2_vsi_irqs_disable(vsi); + + LOG_INFO_BDF("dpdk vsi id_in_pf:%d id_in_dev:%d destroy done.\n", + vsi->id_in_pf, vsi->idx_in_dev); + + (void)sxe2_txsch_ucmd_subtree_del(adapter, vsi->idx_in_dev, + adapter->tx_sched_ctxt.user_root_teid, + true); + + (void)sxe2_user_l2_feature_clean(adapter, vsi->idx_in_dev); + + mutex_unlock(&adapter->vsi_ctxt.lock); + + sxe2_vsi_destroy(vsi); + + return ret; +} + +static s32 sxe2_dpdk_repr_recover(struct sxe2_adapter *adapter) +{ + u16 idx; + struct sxe2_vf_node *vf_node = NULL; + s32 ret = 0; + + if (test_bit(SXE2_FLAG_SWITCHDEV_ENABLE, adapter->flags)) { + sxe2_for_each_vf(adapter, idx) + { + (void)mutex_lock(SXE2_VF_NODE_LOCK(adapter, idx)); + vf_node = sxe2_vf_node_get(adapter, idx); + ret = sxe2_eswitch_ucmd_repr_cfg(vf_node, false); + if (ret) { + (void)mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + break; + } + (void)mutex_unlock(SXE2_VF_NODE_LOCK(adapter, idx)); + } + } + return ret; +} + +static s32 sxe2_dpdk_reprvsi_resource_release(struct sxe2_adapter *adapter) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + ret = sxe2_dpdk_repr_recover(adapter); + if (ret) + LOG_ERROR_BDF("dpdk reprvsi resource release failed.\n"); + + mutex_lock(&adapter->vsi_ctxt.lock); + + vsi = sxe2_vsi_get_by_type_unlock(adapter, SXE2_VSI_T_DPDK_ESW); + if (!vsi) { + LOG_ERROR_BDF("dpdk vsi null.\n"); + mutex_unlock(&adapter->vsi_ctxt.lock); + ret = -EINVAL; + return ret; + } + if (sxe2_txqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u txqs disable failed.\n", vsi->idx_in_dev); + if (sxe2_rxqs_stop(vsi)) + LOG_DEV_ERR("vsi:%u rxqs disable failed.\n", vsi->idx_in_dev); + + sxe2_vsi_irqs_disable(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2_dpdk_resource_release(void *pf_adapter, struct sxe2_obj *obj) +{ + s32 ret = 0; + struct sxe2_adapter *adapter = pf_adapter; + + ret = sxe2_dpdk_pfvsi_resource_release(adapter); + if (ret) + LOG_ERROR_BDF("dpdk pfvsi resource release failed.\n"); + + ret = sxe2_dpdk_reprvsi_resource_release(adapter); + if (ret) + LOG_ERROR_BDF("dpdk reprvsi resource release failed.\n"); + + ret = sxe2_dpdk_ipsec_resource_release(adapter, obj); + if (ret) + LOG_ERROR_BDF("dpdk ipsec resource release failed.\n"); + + ret = sxe2_dpdk_q_map_resource_release(adapter, obj); + if (ret) + LOG_ERROR_BDF("dpdk q_map resource release failed.\n"); + + return ret; +} + +s32 sxe2_user_vsi_info_get(struct sxe2_adapter *adapter, u16 vsi_id, + struct sxe2_fwc_func_caps *caps) +{ + s32 ret = 0; + struct sxe2_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = sxe2_vsi_get_by_idx(adapter, vsi_id); + if (!vsi) { + LOG_ERROR_BDF("invalid vsi id:%d.\n", vsi_id); + ret = -EINVAL; + goto l_end; + } + + caps->tx_caps.cnt = vsi->txqs.q_cnt; + if (vsi->type == SXE2_VSI_T_DPDK_ESW) { + caps->tx_caps.base_idx = adapter->q_ctxt.txq_layout.dpdk_esw_offset; + LOG_DEBUG_BDF("dpdk esw vsi queue_in_pf:%d\n", + caps->tx_caps.base_idx); + } else { + caps->tx_caps.base_idx = vsi->txqs.base_idx_in_feature; + LOG_DEBUG_BDF("vsi queue_in_pf:%d\n", caps->tx_caps.base_idx); + } + + caps->msix_caps.cnt = vsi->irqs.cnt; + caps->msix_caps.base_idx = vsi->irqs.base_idx_in_pf; + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.h new file mode 100644 index 0000000000000000000000000000000000000000..c52b6cee1e897770a1cf3b890cd69e8e6ecd156f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_vsi.h @@ -0,0 +1,526 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_vsi.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_VSI_H__ +#define __SXE2_VSI_H__ + +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_irq.h" +#include "sxe2_cmd.h" +#include "sxe2_flow.h" +#include "sxe2_mbx_public.h" +#include "sxe2_udp_tunnel.h" + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +struct sxe2_vf_node; + +#define SXE2_VSI_DFLT_TC BIT(0) +#define SXE2_VSI_ID_INVALID (0xFFFF) + +#define sxe2_for_each_vsi(vsi_ctxt, i) \ + for ((i) = 0; (i) < (vsi_ctxt)->max_cnt; \ + (i)++) + +#define sxe2_for_each_vsi_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->txqs.q_cnt; \ + (i)++) + +#define sxe2_for_each_vsi_rxq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->rxqs.q_cnt; \ + (i)++) + +#define sxe2_for_each_vsi_alloc_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->txqs.q_alloc; \ + (i)++) + +#define sxe2_for_each_vsi_alloc_rxq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->rxqs.q_alloc; \ + (i)++) + +#define sxe2_for_each_vsi_irq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->irqs.cnt; (i)++) + +#define sxe2_for_each_tc(i) \ + for ((i) = 0; (i) < IEEE_8021QAZ_MAX_TCS; (i)++) + +#define sxe2_for_each_vsi_q_maxcnt(vsi, i) \ + for ((i) = 0; (i) < (vsi)->vsi_qs_stats.vsi_qs_stats_maxcnt; (i)++) + +#ifdef SXE2_TEST +#define SXE2_REALLOC(p, new_n, new_size, gfp, old_n) \ + sxe2_krealloc_array(p, new_n, new_size, gfp, old_n) +#else +#define SXE2_REALLOC(p, new_n, new_size, gfp) \ + krealloc_array(p, new_n, new_size, gfp) +#endif + +enum sxe2_vsi_type { + SXE2_VSI_T_PF = 0, + SXE2_VSI_T_VF, + SXE2_VSI_T_CTRL, + SXE2_VSI_T_LB, + SXE2_VSI_T_MACVLAN, + SXE2_VSI_T_ESW, + SXE2_VSI_T_RDMA, + SXE2_VSI_T_DPDK_PF, + SXE2_VSI_T_DPDK_VF, + SXE2_VSI_T_DPDK_ESW, + SXE2_VSI_T_NR, +}; + +enum sxe2_vsi_state { + SXE2_VSI_S_DOWN = 0, + SXE2_VSI_S_NEEDS_RESTART, + SXE2_VSI_S_NETDEV_ALLOCED, + SXE2_VSI_S_MAC_FLTR_CHANGED, + SXE2_VSI_S_DISABLE, + SXE2_VSI_S_CLOSE, + SXE2_VSI_S_NAPI_ADDED, + SXE2_VSI_S_MACVLAN_DEL, + SXE2_VSI_S_MAX, +}; + +struct sxe2_vsi_irqs { + u16 cnt; + u16 base_idx_in_pf; + u16 base_idx_in_feature; + struct sxe2_irq_data **irq_data; + irqreturn_t (*proc)(int irq, void *data); + struct sxe2_vsi_coalesce + *coalesce; +}; + +struct sxe2_txsched_q_bw_info { + u8 rl_type; + u32 cir_bw; + u32 pir_bw; +}; + +struct sxe2_vsi_txsched_queue { + u32 teid; + u16 idx_in_dev; + struct sxe2_txsched_q_bw_info bw_info; +}; + +struct sxe2_vsi_txsched { + u16 q_cnt[IEEE_8021QAZ_MAX_TCS]; + u16 vsi_teid; + u16 vsi_node_cnt; + struct sxe2_txsched_node *node; + struct sxe2_txsched_q_bw_info vsi_bw_info; + struct sxe2_vsi_txsched_queue *q[IEEE_8021QAZ_MAX_TCS]; +}; + +struct sxe2_vsi_queues { + u16 base_idx_in_feature; + u16 q_cnt; + u16 q_alloc; + u16 depth; + u16 rx_buf_len; + u16 max_frame; + struct sxe2_queue **q; + u16 req_q_cnt; +}; + +struct sxe2_tc_info { + u16 rxq_offset; + u16 txq_offset; + u16 txq_cnt; + u16 rxq_cnt; +}; + +struct sxe2_vsi_tc { + u8 tc_cnt; + u8 tc_map; + u16 rxq_pow; + struct sxe2_tc_info info[IEEE_8021QAZ_MAX_TCS]; +}; + +struct sxe2_vsi_rss { + u16 lut_size; + u16 queue_size; + u8 *lut; + u8 *hkey; + u8 lut_type; + u8 hash_type; + u8 global_lut_id; +}; + +#define SXE2_FNAV_MAX_FILTERS (16 * 1024) + +struct sxe2_vsi_fnav { + struct mutex flow_cfg_lock; + u16 space_gsize; + u16 space_bsize; + struct list_head flow_cfg_list; + + DECLARE_BITMAP(flow_ids, SXE2_FNAV_MAX_FILTERS); + struct list_head filter_list; + u32 filter_cnt; +}; + +#define SXE2_ACL_MAX_FILTERS (2048) +struct sxe2_vsi_acl { + struct mutex flow_cfg_lock; + struct list_head flow_cfg_list; + + DECLARE_BITMAP(filter_ids, SXE2_ACL_MAX_FILTERS); + struct list_head filter_list; + u32 filter_cnt; +}; + +struct sxe2_vsi_udp_tunnel { +#define SXE2_UDP_TUNNEL_MAX_PROTO (13) + struct sxe2_udp_tunnel_cfg cfgs[SXE2_UDP_TUNNEL_MAX_PROTO]; +}; + +struct sxe2_vsi_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_unnecessary_inner; + u64 rx_lro_packets; + u64 rx_lro_bytes; + u64 rx_vlan_strip; + u64 rx_pkts_sw_drop; + u64 rx_buff_alloc_err; + u64 rx_pg_alloc_fail; + u64 rx_csum_err; + u64 rx_lro_count; + u64 rx_page_alloc; + u64 rx_non_eop_descs; + u64 rx_xdp_pkts; + u64 rx_xdp_bytes; + u64 rx_xdp_pass; + u64 rx_xdp_drop; + u64 rx_xdp_unknown; + u64 rx_xdp_redirect; + u64 rx_xdp_redirect_fail; + u64 rx_xdp_tx_xmit; + u64 rx_xdp_tx_xmit_fail; + u64 rx_xsk_drop; + u64 rx_xsk_redirect; + u64 rx_xsk_redirect_fail; + u64 rx_xsk_packets; + u64 rx_xsk_bytes; + u64 rx_xsk_pass; + u64 rx_xsk_unknown; + u64 rx_xsk_tx_xmit; + u64 rx_xsk_tx_xmit_fail; + u64 rx_pa_err; + + u64 tx_packets; + u64 tx_bytes; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_vlan_insert; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_dropped; + u64 tx_xmit_more; + u64 tx_linearize; + u64 tx_busy; + u64 tx_restart; + u64 tx_tso_linearize_chk; +}; + +struct sxe2_vsi_qs_stats { + struct sxe2_queue_stats *txqs_stats; + struct sxe2_queue_stats *rxqs_stats; + struct sxe2_queue_stats **xdp_stats; + u16 vsi_qs_stats_maxcnt; +}; + +struct sxe2_vsi_stats { + struct sxe2_vsi_hw_stats vsi_hw_stats; + struct sxe2_vsi_hw_stats parse_vsi_hw_stats; + struct sxe2_vsi_sw_stats vsi_sw_stats; +}; + +enum sxe2_vsi_flags { + SXE2_VSI_FLAG_LRO_ENABLE = 0, + SXE2_VSI_FLAG_RXFCS_ENABLE = 1, + SXE2_VSI_FLAG_FC_ON, + SXE2_VSI_FLAGS_NBITS +}; + +enum sxe2_mac_owner { + SXE2_MAC_OWNER_NETDEV = 0, + SXE2_MAC_OWNER_UC_MC, + SXE2_MAC_OWNER_ROCE, +}; + +struct sxe2_addr_node { + struct list_head list; + u8 mac_addr[ETH_ALEN]; + unsigned long usage; +}; + +struct sxe2_mac_filter { + struct list_head mac_addr_list; + struct list_head tmp_sync_list; + struct list_head tmp_unsync_list; + struct mutex sync_lock; +}; + +struct sxe2_vsi_cfg_params { + enum sxe2_vsi_type type; + struct sxe2_vf_node *vf; + u16 txq_base_idx; + u16 txq_cnt; + u16 rxq_base_idx; + u16 rxq_cnt; + u16 irq_base_idx; + u16 irq_cnt; + u16 vsi_id; +}; + +struct sxe2_user_vlan_offload_cfg { + u8 outer_insert; + u8 outer_strip; + u8 inner_insert; + u8 inner_strip; +}; + +struct sxe2_vlan { + u16 tpid; + u16 vid; + u8 prio; + u8 rsv[3]; +}; + +struct sxe2_vsi_user_vlan_info { + struct sxe2_vlan port_vlan; + u8 port_vlan_exsit; + u8 rsv[3]; + struct sxe2_user_vlan_offload_cfg vlan_offload; +}; +struct sxe2_vsilist_prune_info { + u16 vsi_id_u; + u16 vsi_id_k; +}; + +struct sxe2_vsi { + struct sxe2_adapter *adapter; + struct net_device *netdev; + u16 id_in_pf; + u16 idx_in_dev; + u8 is_from_pool; + enum sxe2_vsi_type type; + DECLARE_BITMAP(state, SXE2_VSI_S_MAX); + struct sxe2_vsi_irqs irqs; + struct sxe2_vsi_queues txqs; + struct sxe2_vsi_txsched txsched; + struct sxe2_vsi_queues rxqs; + struct sxe2_queue * + *origin_txqs; + struct sxe2_vsi_tc tc; + struct sxe2_mac_filter mac_filter; + struct sxe2_vsi_rss rss_ctxt; + struct sxe2_vsi_fnav fnav; + struct sxe2_vsi_acl acl; + struct sxe2_vf_node *vf_node; + struct sxe2_vsi_qs_stats vsi_qs_stats; + struct sxe2_vsi_stats vsi_stats; + struct sxe2_vsi_user_vlan_info user_vlan; + struct sxe2_vsi_udp_tunnel udp_tunnel; + struct sxe2_vsilist_prune_info src_prune; + + struct bpf_prog *xdp_prog; + struct sxe2_vsi_queues xdp_rings; + unsigned long *af_xdp_zc_qps; + DECLARE_BITMAP(flags, SXE2_VSI_FLAGS_NBITS); + u16 num_xdp_txq; +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef HAVE_AF_XDP_NETDEV_UMEM + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; +#endif +#endif +}; + +struct sxe2_vsi_context { + struct sxe2_vsi **vsi; + u16 next_vsi_id; + u16 cnt; + u16 max_cnt; + u16 base_idx_in_dev; + struct mutex lock; + struct sxe2_vsi *main_vsi; + struct sxe2_vsi *ctrl_vsi; +}; + +s32 sxe2_main_vsi_create(struct sxe2_adapter *adapter); + +s32 sxe2_ctrl_vsi_init(struct sxe2_adapter *adapter); + +void sxe2_ctrl_vsi_deinit(struct sxe2_adapter *adapter); + +struct sxe2_vsi *sxe2_loopback_vsi_create(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_recfg(struct sxe2_vsi *vsi); + +void sxe2_vsi_destroy(struct sxe2_vsi *vsi); + +void sxe2_vsi_destroy_unlock(struct sxe2_vsi *vsi); + +void sxe2_vsi_destroy_all(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_open(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_up(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_close(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_down(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_down_up_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_down_up(struct sxe2_vsi *vsi); + +void sxe2_napi_add(struct sxe2_vsi *vsi); + +void sxe2_napi_del(struct sxe2_vsi *vsi); + +void sxe2_vsi_rxq_clean(struct sxe2_vsi *vsi); + +void sxe2_vsi_tc_cfg(struct sxe2_vsi *vsi); + +void sxe2_vsi_queues_irqs_map(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_disable_all(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_disable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_esw_vsi_disable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_enable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_rebuild_by_type(struct sxe2_adapter *adapter, + enum sxe2_vsi_type type, bool init); + +s32 sxe2_vsi_enable_by_type(struct sxe2_adapter *adapter, + enum sxe2_vsi_type type); + +void sxe2_vsis_irqs_deinit(struct sxe2_adapter *adapter); + +s32 sxe2_vsis_irqs_init(struct sxe2_adapter *adapter); + +s32 __sxe2_vf_vsi_create(struct sxe2_vf_node *vf_node); + +void sxe2_queue_add(struct sxe2_queue *queue, struct sxe2_list *head); + +void sxe2_vsi_queues_irqs_unmap(struct sxe2_vsi *vsi); + +void sxe2_vsi_irqs_setup(struct sxe2_vsi *vsi); + +void sxe2_vsi_irqs_release(struct sxe2_vsi *vsi); + +struct sxe2_vsi *sxe2_macvlan_vsi_create(struct sxe2_adapter *adapter); + +s32 sxe2_eswitch_vsi_create(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_rebuild(struct sxe2_vsi *vsi, bool init); + +void sxe2_vsi_id_in_dev_clear(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_enable_unlock(struct sxe2_vsi *vsi); +s32 sxe2_rdma_vsi_create(struct sxe2_adapter *adapter); +void sxe2_rdma_vsi_destroy(struct sxe2_adapter *adapter); + +s32 sxe2_vsi_queues_get(struct sxe2_vsi *vsi, u8 q_type); + +void sxe2_irq_rxqs_cause_setup(struct sxe2_irq_data *irq_data); + +void sxe2_irq_txqs_cause_setup(struct sxe2_irq_data *irq_data); + +int sxe2_mac_addr_add(struct sxe2_vsi *vsi, const u8 *addr, + enum sxe2_mac_owner owner); + +int sxe2_mac_addr_del(struct sxe2_vsi *vsi, const u8 *addr, + enum sxe2_mac_owner owner); + +s32 sxe2_main_vsi_disable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_ctrl_vsi_disable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_macvlan_vsi_disable(struct sxe2_vsi *vsi); + +s32 sxe2_main_vsi_open(struct sxe2_vsi *vsi); + +s32 sxe2_main_vsi_enable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_ctrl_vsi_enable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_macvlan_vsi_enable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_esw_vsi_enable_unlock(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_irqs_request(struct sxe2_vsi *vsi); + +void sxe2_irq_txqs_cause_clear(struct sxe2_irq_data *irq_data); + +void sxe2_irq_rxqs_cause_clear(struct sxe2_irq_data *irq_data); + +void sxe2_vsi_irqs_disable(struct sxe2_vsi *vsi); + +void sxe2_vsi_irqs_clear_free(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_irqs_configure(struct sxe2_vsi *vsi); + +s32 sxe2_vsi_check(struct sxe2_vsi *vsi); + +struct sxe2_vsi *sxe2_vsi_get_by_idx(struct sxe2_adapter *adapter, + u16 idx_in_dev); + +struct sxe2_addr_node *sxe2_mac_addr_find(struct sxe2_vsi *vsi, + const u8 *macaddr); + +struct sxe2_vsi *sxe2_vsi_get_by_type_unlock(struct sxe2_adapter *adapter, + enum sxe2_vsi_type target_type); + +u16 sxe2_vsi_get(struct sxe2_vsi_context *vsi_ctxt); + +void sxe2_vsi_put(struct sxe2_vsi_context *vsi_ctxt, u16 vsi_id); + +struct sxe2_vsi *sxe2_vsi_create(struct sxe2_adapter *adapter, + struct sxe2_vsi_cfg_params *vsi_create); + +void sxe2_vsi_destroy(struct sxe2_vsi *vsi); + +bool sxe2_vsi_id_is_valid(struct sxe2_adapter *adapter, u16 vsi_id); + +s32 sxe2_dpdk_resource_release(void *adapter, struct sxe2_obj *obj); + +s32 sxe2_dpdk_vsi_create(struct sxe2_adapter *adapter, struct sxe2_vsi_cfg_params *params, + struct sxe2_fwc_vsi_crud_resp *resp); + +s32 sxe2_dpdk_vsi_destroy(struct sxe2_adapter *adapter, struct sxe2_vsi_cfg_params *params); + +bool sxe2_vsi_rxft_support_get(struct sxe2_vsi *vsi); + +s32 sxe2_user_vsi_info_get(struct sxe2_adapter *adapter, u16 vsi_id, + struct sxe2_fwc_func_caps *caps); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.c b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.c new file mode 100644 index 0000000000000000000000000000000000000000..6b08e99d2518b7de80b4a9cbb67771b97025e8d3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.c @@ -0,0 +1,1105 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_xsk.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif +#endif + +#include "sxe2_compat.h" +#include "sxe2_common.h" +#include "sxe2.h" +#include "sxe2_log.h" +#include "sxe2_tx.h" +#include "sxe2_rx.h" +#include "sxe2_xsk.h" +#include "sxe2_netdev.h" + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +STATIC void sxe2_qp_reset_stats(struct sxe2_vsi *vsi, u16 q_idx) +{ + memset(vsi->rxqs.q[q_idx]->stats, 0, sizeof(*vsi->rxqs.q[q_idx]->stats)); + memset(vsi->txqs.q[q_idx]->stats, 0, sizeof(*vsi->txqs.q[q_idx]->stats)); + if (sxe2_xdp_is_enable(vsi)) + memset(vsi->xdp_rings.q[q_idx]->stats, 0, + sizeof(*vsi->xdp_rings.q[q_idx]->stats)); +} + +STATIC void sxe2_qp_clean_rings(struct sxe2_vsi *vsi, u16 q_idx) +{ + sxe2_tx_ring_clean(vsi->txqs.q[q_idx]); + if (sxe2_xdp_is_enable(vsi)) { + synchronize_rcu(); + sxe2_tx_ring_clean(vsi->xdp_rings.q[q_idx]); + } + sxe2_rx_ring_clean(vsi->rxqs.q[q_idx]); +} + +STATIC void sxe2_qvec_toggle_napi(struct sxe2_vsi *vsi, + struct sxe2_irq_data *q_vector, bool enable) +{ + if (!vsi->netdev || !q_vector) + return; + + if (enable) + napi_enable(&q_vector->napi); + else + napi_disable(&q_vector->napi); +} + +STATIC void sxe2_qvec_dis_irq(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + struct sxe2_queue *rxq, struct sxe2_irq_data *q_vector) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + sxe2_hw_txq_irq_cause_switch(hw, txq->idx_in_pf, false); + sxe2_hw_rxq_irq_cause_switch(hw, rxq->idx_in_pf, false); + + if (q_vector) { + synchronize_irq(adapter->irq_ctxt.msix_entries[q_vector->idx_in_pf] + .vector); + sxe2_hw_irq_disable(hw, q_vector->idx_in_pf); + sxe2_flush(hw); + } +} + +STATIC void sxe2_qvec_cfg_msix(struct sxe2_vsi *vsi, struct sxe2_queue *txq, + struct sxe2_queue *rxq, + struct sxe2_irq_data *q_vector) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + sxe2_irq_itr_init(q_vector); + + sxe2_hw_txq_irq_cause_setup(hw, txq->idx_in_pf, q_vector->tx.itr_idx, + q_vector->idx_in_pf); + + sxe2_hw_rxq_irq_cause_setup(hw, rxq->idx_in_pf, q_vector->rx.itr_idx, + q_vector->idx_in_pf); + + sxe2_flush(hw); +} + +STATIC s32 sxe2_qp_dis(struct sxe2_vsi *vsi, u16 q_idx) +{ + struct sxe2_queue *txq, *rxq; + struct sxe2_irq_data *q_vector; + struct sxe2_adapter *adapter = vsi->adapter; + + s32 err; + + if (q_idx >= vsi->txqs.q_cnt || q_idx >= vsi->rxqs.q_cnt) + return -EINVAL; + + txq = vsi->txqs.q[q_idx]; + rxq = vsi->txqs.q[q_idx]; + q_vector = rxq->irq_data; + + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + + err = sxe2_rxq_ctrl_set(adapter, rxq, false, true); + if (err) + LOG_DEV_INFO("sxe2 stop rx error = %d\n", err); + + err = sxe2_txq_stop(vsi, txq); + if (err) { + LOG_DEV_INFO("sxe2 stop tx error = %d\n", err); + return err; + } + + if (sxe2_xdp_is_enable(vsi)) { + struct sxe2_queue *xdp_ring = vsi->xdp_rings.q[q_idx]; + + err = sxe2_txq_stop(vsi, xdp_ring); + if (err) { + LOG_DEV_INFO("sxe2 stop xdp tx error = %d\n", err); + return err; + } + } + + sxe2_qvec_dis_irq(vsi, txq, rxq, q_vector); + + sxe2_qvec_toggle_napi(vsi, q_vector, false); + sxe2_qp_clean_rings(vsi, q_idx); + sxe2_qp_reset_stats(vsi, q_idx); + + return 0; +} + +static void sxe2_qvec_ena_irq(struct sxe2_vsi *vsi, struct sxe2_irq_data *q_vector) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct sxe2_hw *hw = &adapter->hw; + + if (vsi) + if (test_bit(SXE2_VSI_S_DOWN, vsi->state)) + return; + + sxe2_hw_irq_enable(hw, q_vector->idx_in_pf); + sxe2_hw_irq_trigger(hw, q_vector->idx_in_pf); + + sxe2_flush(hw); +} + +STATIC void sxe2_xsk_remove_pool(struct sxe2_vsi *vsi, u16 q_idx) +{ + struct sxe2_queue *rxq; + struct sxe2_queue *xdp_ring; + struct sxe2_adapter *adapter = vsi->adapter; + + if (q_idx >= vsi->txqs.q_cnt || q_idx >= vsi->rxqs.q_cnt) { + LOG_WARN_BDF("queue(%d) is illegal.\n", q_idx); + return; + } + + rxq = vsi->rxqs.q[q_idx]; + rxq->xsk_pool = NULL; + if (sxe2_xdp_is_enable(vsi)) { + xdp_ring = vsi->xdp_rings.q[q_idx]; + xdp_ring->xsk_pool = NULL; + } +} + +STATIC s32 sxe2_qp_ena(struct sxe2_vsi *vsi, u16 q_idx, bool pool_present) +{ + struct sxe2_queue *txq, *rxq; + struct sxe2_irq_data *q_vector; + s32 err; + + if (q_idx >= vsi->txqs.q_cnt || q_idx >= vsi->rxqs.q_cnt) + return -EINVAL; + + txq = vsi->txqs.q[q_idx]; + rxq = vsi->rxqs.q[q_idx]; + q_vector = rxq->irq_data; + + err = sxe2_hw_txq_configure(vsi, txq); + if (err) + goto end; + +#ifdef HAVE_XDP_SUPPORT + if (sxe2_xdp_is_enable(vsi)) { + struct sxe2_queue *xdp_ring = vsi->xdp_rings.q[q_idx]; + + err = sxe2_hw_txq_configure(vsi, xdp_ring); + if (err) + goto end; + sxe2_set_ring_xdp(xdp_ring); + xdp_ring->xsk_pool = sxe2_xsk_pool(xdp_ring); + } +#endif + + err = sxe2_vsi_cfg_rxq(rxq); + if (err) + goto end; + + sxe2_qvec_cfg_msix(vsi, txq, rxq, q_vector); + + err = sxe2_rxq_ctrl_set(vsi->adapter, rxq, true, true); + if (err) + goto end; + + sxe2_qvec_toggle_napi(vsi, q_vector, true); + sxe2_qvec_ena_irq(vsi, q_vector); + + netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); +end: + if (!pool_present) + sxe2_xsk_remove_pool(vsi, q_idx); + + return err; +} + +#ifndef HAVE_AF_XDP_NETDEV_UMEM +STATIC int sxe2_xsk_alloc_umems(struct sxe2_vsi *vsi) +{ + if (vsi->xsk_umems) + return 0; + + vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), + GFP_KERNEL); + + if (!vsi->xsk_umems) { + vsi->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +STATIC void sxe2_xsk_remove_umem(struct sxe2_vsi *vsi, u16 qid) +{ + vsi->xsk_umems[qid] = NULL; + vsi->num_xsk_umems_used--; + + if (vsi->num_xsk_umems_used == 0) { + kfree(vsi->xsk_umems); + vsi->xsk_umems = NULL; + vsi->num_xsk_umems = 0; + } +} +#endif + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +bool sxe2_alloc_rx_bufs_zc(struct sxe2_queue *rxq, u16 count) +#else +static bool sxe2_alloc_rx_bufs_zc(struct sxe2_queue *rxq, u16 count, + bool (*alloc)(struct sxe2_queue *, + struct sxe2_rx_buf *)) +#endif +{ + union sxe2_rx_desc *rx_desc; + u16 ntu = rxq->next_to_use; + struct sxe2_rx_buf *rx_buf; + bool ok = true; +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma_addr_t dma; +#endif + + if (!count) + return true; + + rx_desc = SXE2_RX_DESC(rxq, ntu); + rx_buf = &rxq->rx_buf[ntu]; + + do { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + rx_buf->xdp = xsk_buff_alloc(rxq->xsk_pool); + if (!rx_buf->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(rx_buf->xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); +#else + if (!alloc(rxq, rx_buf)) { + ok = false; + break; + } + + dma_sync_single_range_for_device(rxq->dev, rx_buf->dma, 0, + rxq->rx_buf_len, DMA_BIDIRECTIONAL); + + rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma); +#endif + rx_desc->wb.status0_err = 0; + rx_desc++; + rx_buf++; + ntu++; + if (unlikely(ntu == rxq->depth)) { + rx_desc = SXE2_RX_DESC(rxq, 0); + rx_buf = rxq->rx_buf; + ntu = 0; + } + } while (--count); + + if (rxq->next_to_use != ntu) { + rx_desc->wb.status0_err = 0; + sxe2_rxq_tail_update(rxq, ntu); + } + + return ok; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +static s32 sxe2_xsk_umem_dma_map(struct sxe2_vsi *vsi, struct xdp_umem *umem) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 i; + + for (i = 0; i < umem->npgs; i++) { + dma_addr_t dma = dma_map_page_attrs( + dev, umem->pgs[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (dma_mapping_error(dev, dma)) { + LOG_DEV_DEBUG("XSK UMEM DMA mapping error on page num %d/n", + i); + goto out_unmap; + } + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (; i > 0; i--) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_WEAK_ORDERING); + umem->pages[i].dma = 0; + } + + return -EFAULT; +} + +static void sxe2_xsk_umem_dma_unmap(struct sxe2_vsi *vsi, struct xdp_umem *umem) +{ + struct sxe2_adapter *adapter = vsi->adapter; + struct device *dev = SXE2_ADAPTER_TO_DEV(adapter); + u32 i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_WEAK_ORDERING); + + umem->pages[i].dma = 0; + } +} + +void sxe2_zca_free(struct zero_copy_allocator *alloc, unsigned long handle_addr) +{ + struct sxe2_queue *rxq; + struct sxe2_rx_buf *rx_buf; + struct xdp_umem *umem; + u64 hr, mask; + u16 nta; + + rxq = container_of(alloc, struct sxe2_queue, zca); + umem = rxq->xsk_pool; + hr = umem->headroom + XDP_PACKET_HEADROOM; + +#ifndef HAVE_XDP_UMEM_PROPS + mask = umem->chunk_mask; +#else + mask = umem->props.chunk_mask; +#endif + + nta = rxq->next_to_alloc; + rx_buf = &rxq->rx_buf[nta]; + + nta++; + rxq->next_to_alloc = (nta < rxq->depth) ? nta : 0; + + handle_addr &= mask; + + rx_buf->dma = xdp_umem_get_dma(umem, handle_addr); + rx_buf->dma += hr; + + rx_buf->addr = xdp_umem_get_data(umem, handle_addr); + rx_buf->addr += hr; + + rx_buf->handle = (u64)handle_addr + umem->headroom; +} + +static __always_inline bool sxe2_alloc_buf_fast_zc(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf) +{ + struct xdp_umem *umem = rxq->xsk_pool; + void *addr = rx_buf->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rxq->stats->rx_stats.rx_pg_alloc_fail++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + rx_buf->dma = xdp_umem_get_dma(umem, handle); + rx_buf->dma += hr; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += hr; + + rx_buf->handle = handle + umem->headroom; + + xsk_umem_release_addr(umem); + return true; +} + +static __always_inline bool sxe2_alloc_buf_slow_zc(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf) +{ + struct xdp_umem *umem = rxq->xsk_pool; + u64 handle, headroom; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rxq->stats->rx_stats.rx_pg_alloc_fail++; + return false; + } + + handle &= umem->chunk_mask; + headroom = umem->headroom + XDP_PACKET_HEADROOM; + + rx_buf->dma = xdp_umem_get_dma(umem, handle); + rx_buf->dma += headroom; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += headroom; + + rx_buf->handle = handle + umem->headroom; + + xsk_umem_release_addr_rq(umem); + return true; +} + +static bool sxe2_alloc_rx_bufs_fast_zc(struct sxe2_queue *rxq, u16 count) +{ + return sxe2_alloc_rx_bufs_zc(rxq, count, sxe2_alloc_buf_fast_zc); +} + +bool sxe2_alloc_rx_bufs_slow_zc(struct sxe2_queue *rxq, u16 count) +{ + return sxe2_alloc_rx_bufs_zc(rxq, count, sxe2_alloc_buf_slow_zc); +} + +static struct sxe2_rx_buf *sxe2_get_rx_buf_zc(struct sxe2_queue *rxq, int size) +{ + struct sxe2_rx_buf *rx_buf; + + rx_buf = &rxq->rx_buf[rxq->next_to_clean]; + + dma_sync_single_range_for_cpu(rxq->dev, rx_buf->dma, 0, size, + DMA_BIDIRECTIONAL); + + return rx_buf; +} + +static void sxe2_reuse_rx_buf_zc(struct sxe2_queue *rxq, struct sxe2_rx_buf *old_buf) +{ +#ifdef HAVE_XDP_UMEM_PROPS + unsigned long mask = (unsigned long)rxq->xsk_pool->props.chunk_mask; +#else + unsigned long mask = (unsigned long)rxq->xsk_pool->chunk_mask; +#endif + u64 hr = rxq->xsk_pool->headroom + XDP_PACKET_HEADROOM; + u16 nta = rxq->next_to_alloc; + struct sxe2_rx_buf *new_buf; + + new_buf = &rxq->rx_buf[nta++]; + rxq->next_to_alloc = (nta < rxq->depth) ? nta : 0; + + new_buf->dma = old_buf->dma & mask; + new_buf->dma += hr; + + new_buf->addr = (void *)((unsigned long)old_buf->addr & mask); + new_buf->addr += hr; + + new_buf->handle = old_buf->handle & mask; + new_buf->handle += rxq->xsk_pool->headroom; + + old_buf->addr = NULL; +} +#endif + +STATIC s32 sxe2_xsk_pool_disable(struct sxe2_vsi *vsi, u16 qid) +{ +#ifdef HAVE_AF_XDP_NETDEV_UMEM +#ifdef HAVE_NETDEV_BPF_XSK_POOL + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); +#else + struct xdp_umem *pool = xsk_get_pool_from_qid(vsi->netdev, qid); +#endif +#else + struct xdp_umem *pool; + + if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems) + return -EINVAL; + + pool = vsi->xsk_umems[qid]; +#endif + + if (!pool) + return -EINVAL; + + clear_bit(qid, vsi->af_xdp_zc_qps); + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_pool_dma_unmap(pool, SXE2_RX_DMA_ATTR); +#else + sxe2_xsk_umem_dma_unmap(vsi, pool); +#endif + +#ifndef HAVE_AF_XDP_NETDEV_UMEM + sxe2_xsk_remove_umem(vsi, qid); +#endif + + return 0; +} + +STATIC s32 +#ifdef HAVE_NETDEV_BPF_XSK_POOL +sxe2_xsk_pool_enable(struct sxe2_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) +#else +sxe2_xsk_pool_enable(struct sxe2_vsi *vsi, struct xdp_umem *pool, u16 qid) +#endif +{ +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_umem_fq_reuse *reuseq; +#endif + s32 err; + + if (vsi->type != SXE2_VSI_T_PF) + return -EINVAL; + +#ifndef HAVE_AF_XDP_NETDEV_UMEM + if (!vsi->num_xsk_umems) + vsi->num_xsk_umems = min_t(u16, vsi->rxqs.q_cnt, vsi->txqs.q_cnt); + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + + err = sxe2_xsk_alloc_umems(vsi); + if (err) + return err; + + if (vsi->xsk_umems && vsi->xsk_umems[qid]) + return -EBUSY; + + vsi->xsk_umems[qid] = pool; + vsi->num_xsk_umems_used++; +#endif + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + err = xsk_pool_dma_map(pool, SXE2_ADAPTER_TO_DEV(vsi->adapter), + SXE2_RX_DMA_ATTR); +#else + reuseq = xsk_reuseq_prepare(vsi->rxqs.q[0]->depth); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(pool, reuseq)); + err = sxe2_xsk_umem_dma_map(vsi, pool); +#endif + if (err) + return err; + + set_bit(qid, vsi->af_xdp_zc_qps); + + return 0; +} + +#ifdef HAVE_NETDEV_BPF_XSK_POOL +s32 sxe2_xsk_pool_setup(struct sxe2_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) +#else +s32 sxe2_xsk_umem_setup(struct sxe2_vsi *vsi, struct xdp_umem *pool, u16 qid) +#endif +{ + struct sxe2_adapter *adapter = vsi->adapter; + bool if_running, pool_present = !!pool; + s32 ret = 0, pool_failure = 0; + struct net_device *netdev = vsi->netdev; + + if (qid >= vsi->rxqs.q_cnt || qid >= vsi->txqs.q_cnt) { + LOG_DEV_ERR("please use queue id in scope of combined queues " + "count.\n"); + pool_failure = -EINVAL; + goto failure; + } + + if (sxe2_xdp_is_enable(vsi) && qid >= vsi->num_xdp_txq) { + LOG_DEV_ERR("please use queue id in scope of xdp queues count.\n"); + pool_failure = -EINVAL; + goto failure; + } + + if_running = netif_running(netdev) && sxe2_xdp_is_enable(vsi); + + if (if_running) { + ret = sxe2_qp_dis(vsi, qid); + if (ret) { + LOG_NETDEV_ERR("sxe2_qp_dis error = %d\n", ret); + if (pool_present) + goto xsk_pool_if_up; + } + } + + pool_failure = pool_present ? sxe2_xsk_pool_enable(vsi, pool, qid) + : sxe2_xsk_pool_disable(vsi, qid); + +xsk_pool_if_up: + if (if_running) { + ret = sxe2_qp_ena(vsi, qid, pool_present); + if (!ret && pool_present) + napi_schedule(&vsi->xdp_rings.q[qid]->irq_data->napi); + else if (ret) + LOG_NETDEV_ERR("sxe2_qp_ena error = %d\n", ret); + } + +failure: + if (pool_failure) { + LOG_NETDEV_ERR("Could not %sable buffer pool, error = %d\n", + pool_present ? "en" : "dis", pool_failure); + return pool_failure; + } + + return ret; +} + +STATIC bool sxe2_xmit_zc(struct sxe2_queue *xdp_ring, s32 budget) +{ + union sxe2_tx_data_desc *tx_desc = NULL; + bool work_done = true; +#ifdef XSK_UMEM_RETURNS_XDP_DESC + struct xdp_desc desc; +#endif + dma_addr_t dma; +#ifndef XSK_UMEM_RETURNS_XDP_DESC + u32 len; +#endif + struct sxe2_tx_offload_info offload; + + while (likely(budget-- > 0)) { + struct sxe2_tx_buf *tx_buf; + + if (unlikely(!SXE2_DESC_UNUSED(xdp_ring))) { + xdp_ring->stats->tx_stats.tx_busy++; + work_done = false; + break; + } + + tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; + +#ifdef XSK_UMEM_RETURNS_XDP_DESC + if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) + break; +#endif + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc.len); +#else + dma = xdp_umem_get_dma(xdp_ring->xsk_pool, desc.addr); + dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, + DMA_BIDIRECTIONAL); +#endif + tx_buf->bytecount = (u32)desc.len; + + offload.adapter = xdp_ring->vsi->adapter; + offload.data_desc_cmd = SXE2_TXDD_CMD_EOP | SXE2_TXDD_CMD_RS; + offload.data_desc_offset = 0; + offload.data_desc_l2tag1 = 0; + + tx_desc = SXE2_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buf_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_offset_bsz = +#ifdef XSK_UMEM_RETURNS_XDP_DESC + sxe2_tx_data_desc_qword1_setup(&offload, + (u32)desc.len); +#else + sxe2_tx_data_desc_qword1_setup(&offload, len); +#endif + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->depth) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + sxe2_xdp_ring_update_tail(xdp_ring); + xsk_tx_release(xdp_ring->xsk_pool); + } + + return budget > 0 && work_done; +} + +STATIC void sxe2_clean_xdp_tx_buf(struct sxe2_queue *xdp_ring, + struct sxe2_tx_buf *tx_buf) +{ + xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); + dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); +} + +bool sxe2_txq_irq_clean_zc(struct sxe2_queue *xdp_ring, s32 budget) +{ + s32 ntc = xdp_ring->next_to_clean; + union sxe2_tx_data_desc *tx_desc; + struct sxe2_tx_buf *tx_buf; + u32 xsk_frames = 0; + bool xmit_done; + struct sxe2_queue_stats queue_stats = {}; + + tx_desc = SXE2_TX_DESC(xdp_ring, ntc); + tx_buf = &xdp_ring->tx_buf[ntc]; + ntc -= xdp_ring->depth; + + do { + if (!(tx_desc->wb.dd & cpu_to_le64(SXE2_TX_DESC_DTYPE_DESC_DONE))) + break; + + queue_stats.bytes += tx_buf->bytecount; + queue_stats.packets++; + + if (tx_buf->raw_buf) { + sxe2_clean_xdp_tx_buf(xdp_ring, tx_buf); + tx_buf->raw_buf = NULL; + } else { + xsk_frames++; + } + + tx_desc->read.cmd_type_offset_bsz = 0; + tx_buf++; + tx_desc++; + ntc++; + + if (unlikely(!ntc)) { + ntc -= xdp_ring->depth; + tx_buf = xdp_ring->tx_buf; + tx_desc = SXE2_TX_DESC(xdp_ring, 0); + } + + prefetch(tx_desc); + + } while (likely(--budget)); + + ntc += xdp_ring->depth; + xdp_ring->next_to_clean = (u16)ntc; + + if (xsk_frames) + xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); + +#ifdef HAVE_NDO_XSK_WAKEUP + if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) + (void)xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); +#endif + + sxe2_tx_pkt_stats_update(xdp_ring, &queue_stats); + xmit_done = sxe2_xmit_zc(xdp_ring, NAPI_POLL_WEIGHT); + + return budget > 0 && xmit_done; +} + +STATIC void sxe2_bump_ntc(struct sxe2_queue *rxq) +{ + u16 ntc = rxq->next_to_clean + 1; + + ntc = (ntc < rxq->depth) ? ntc : 0; + rxq->next_to_clean = ntc; + prefetch(SXE2_RX_DESC(rxq, ntc)); +} + +STATIC struct sk_buff *sxe2_construct_skb_zc(struct sxe2_queue *rxq, + struct sxe2_rx_buf *rx_buf) +{ + s32 metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; + s32 datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; + s32 datasize_hard = rx_buf->xdp->data_end - rx_buf->xdp->data_hard_start; + struct sk_buff *skb; + + if (datasize < 0 || datasize_hard < 0) + return NULL; + + skb = __napi_alloc_skb(&rxq->irq_data->napi, (u32)datasize_hard, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); + memcpy(__skb_put(skb, (u32)datasize), rx_buf->xdp->data, (size_t)datasize); + if (metasize) + skb_metadata_set(skb, (u8)metasize); + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buff_free(rx_buf->xdp); + rx_buf->xdp = NULL; +#else + sxe2_reuse_rx_buf_zc(rxq, rx_buf); +#endif + + return skb; +} + +#ifdef HAVE_XDP_SUPPORT +STATIC s32 sxe2_run_xdp_zc(struct sxe2_queue *rxq, struct xdp_buff *xdp) +{ + s32 err, result; + struct bpf_prog *xdp_prog; + struct sxe2_queue *xdp_ring; + u32 act; + u64 rx_bytes = (u64)(xdp->data_end - xdp->data); + struct sxe2_rxq_xdp_stats *xdp_stats = &rxq->stats->rx_stats.xdp_stats; + struct sxe2_adapter *adapter = rxq->vsi->adapter; + u16 tx_qid; + + xdp_stats->rx_xsk_packets++; + xdp_stats->rx_xsk_bytes += rx_bytes; + + xdp_prog = READ_ONCE(rxq->xdp_prog); + + act = bpf_prog_run_xdp(xdp_prog, xdp); + LOG_DEBUG_BDF("bpf_prog_run_xdp ret:%d\n", act); + + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rxq->netdev, xdp, xdp_prog); + result = !err ? SXE2_XDP_REDIR : SXE2_XDP_CONSUMED; + if (err) + xdp_stats->rx_xsk_redirect_fail++; + else + xdp_stats->rx_xsk_redirect++; + + goto l_end; + } + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + xdp->handle += xdp->data - xdp->data_hard_start; +#endif + + switch (act) { + case XDP_PASS: + xdp_stats->rx_xsk_pass++; + result = SXE2_XDP_PASS; + break; + case XDP_TX: + tx_qid = rxq->idx_in_vsi; + if (tx_qid >= rxq->vsi->num_xdp_txq) + tx_qid = (u16)(tx_qid % rxq->vsi->num_xdp_txq); + + xdp_ring = rxq->vsi->xdp_rings.q[tx_qid]; + result = sxe2_xmit_xdp_buff(xdp, xdp_ring); + if (result == SXE2_XDP_TX) + xdp_stats->rx_xsk_tx_xmit++; + else + xdp_stats->rx_xsk_tx_xmit_fail++; + break; + + default: + bpf_warn_invalid_xdp_action(rxq->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + xdp_stats->rx_xsk_unknown++; + trace_xdp_exception(rxq->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = SXE2_XDP_CONSUMED; + xdp_stats->rx_xsk_drop++; + break; + } + +l_end: + return result; +} +#endif + +s32 sxe2_rx_irq_clean_zc(struct sxe2_queue *rxq, s32 budget) +{ + u32 total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = SXE2_DESC_UNUSED(rxq); + u32 xdp_xmit = 0; + bool failure = false; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +#ifdef HAVE_XDP_SUPPORT + struct xdp_buff xdp; + + xdp.rxq = &rxq->xdp_rxq; +#endif +#endif + + while (likely(total_rx_packets < (u32)budget)) { + union sxe2_rx_desc *rx_desc; + u32 size = 0; + s32 xdp_res = 0; + struct sxe2_rx_buf *rx_buf; + struct sk_buff *skb; + u16 rx_ptype; + + rx_desc = SXE2_RX_DESC(rxq, rxq->next_to_clean); + + if (!sxe2_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2_RX_DESC_STATUS0_DD))) + break; + + dma_rmb(); + + size = le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) & + SXE2_RX_DESC_PKT_LEN_MASK; + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#ifdef HAVE_XDP_SUPPORT + rx_buf = &rxq->rx_buf[rxq->next_to_clean]; + rx_buf->xdp->data_end = rx_buf->xdp->data + size; +#ifdef XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM + xsk_buff_dma_sync_for_cpu(rx_buf->xdp); +#else + xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rxq->xsk_pool); +#endif + xdp_res = sxe2_run_xdp_zc(rxq, rx_buf->xdp); +#endif +#else + rx_buf = sxe2_get_rx_buf_zc(rxq, size); + if (!rx_buf->addr) + break; + +#ifdef HAVE_XDP_SUPPORT + xdp.data = rx_buf->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = (u8 *)xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = (u8 *)xdp.data + size; + xdp.handle = rx_buf->handle; + + xdp_res = sxe2_run_xdp_zc(rxq, &xdp); +#endif +#endif + if (xdp_res) { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + if (xdp_res & (SXE2_XDP_TX | SXE2_XDP_REDIR)) { + xdp_xmit |= xdp_res; + } else { + xsk_buff_free(rx_buf->xdp); + rx_buf->xdp = NULL; + } +#else + if (xdp_res & (SXE2_XDP_TX | SXE2_XDP_REDIR)) { + xdp_xmit |= xdp_res; + rx_buf->addr = NULL; + } else { + sxe2_reuse_rx_buf_zc(rxq, rx_buf); + } +#endif + + total_rx_bytes += size; + total_rx_packets++; + cleaned_count++; + + sxe2_bump_ntc(rxq); + continue; + } + + skb = sxe2_construct_skb_zc(rxq, rx_buf); + if (!skb) { + rxq->stats->rx_stats.rx_buff_alloc_err++; + break; + } + + cleaned_count++; + sxe2_bump_ntc(rxq); + + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + total_rx_bytes += skb->len; + + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_status1) & + SXE2_RX_DESC_PTYPE_MASK; + + sxe2_skb_field_fill(rxq, rx_desc, skb, rx_ptype); + + (void)napi_gro_receive(&rxq->irq_data->napi, skb); + + total_rx_packets++; + } + + if (cleaned_count >= SXE2_RX_BUF_WRITE) +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + failure = !sxe2_alloc_rx_bufs_zc(rxq, cleaned_count); +#else + failure = !sxe2_alloc_rx_bufs_fast_zc(rxq, cleaned_count); +#endif + + sxe2_xdp_tail_update(rxq, xdp_xmit); + + sxe2_rxq_stats_update(rxq, total_rx_packets, total_rx_bytes); + +#ifdef HAVE_NDO_XSK_WAKEUP + if (xsk_uses_need_wakeup(rxq->xsk_pool)) { + if (failure || rxq->next_to_clean == rxq->next_to_use) + xsk_set_rx_need_wakeup(rxq->xsk_pool); + else + xsk_clear_rx_need_wakeup(rxq->xsk_pool); + + return (s32)total_rx_packets; + } +#endif + + return failure ? budget : (s32)total_rx_packets; +} + +void sxe2_xsk_clean_rx_ring(struct sxe2_queue *rx_ring) +{ + u16 i; + + for (i = 0; i < rx_ring->depth; i++) { + struct sxe2_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + + if (!rx_buf->xdp) + continue; + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_umem_fq_reuse(rx_ring->xsk_pool, rx_buf->handle); +#endif +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buff_free(rx_buf->xdp); +#endif + rx_buf->xdp = NULL; + } +} + +void sxe2_xsk_clean_xdp_ring(struct sxe2_queue *xdp_ring) +{ + u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; + u32 xsk_frames = 0; + + while (ntc != ntu) { + struct sxe2_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; + + if (tx_buf->raw_buf) + sxe2_clean_xdp_tx_buf(xdp_ring, tx_buf); + else + xsk_frames++; + + tx_buf->raw_buf = NULL; + + ntc++; + if (ntc >= xdp_ring->depth) + ntc = 0; + } + + if (xsk_frames) + xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); +} + +#ifdef HAVE_NETDEV_BPF_XSK_POOL +struct xsk_buff_pool *sxe2_xsk_pool(struct sxe2_queue *ring) +#else +struct xdp_umem *sxe2_xsk_pool(struct sxe2_queue *ring) +#endif +{ + struct sxe2_vsi *vsi = ring->vsi; + u16 qid = ring->idx_in_vsi; +#ifndef HAVE_AF_XDP_NETDEV_UMEM + struct xdp_umem **umems = vsi->xsk_umems; +#endif + + if (sxe2_queue_is_xdp(ring)) + qid -= vsi->num_xdp_txq; + + if (!sxe2_xdp_is_enable(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) + return NULL; + +#ifndef HAVE_AF_XDP_NETDEV_UMEM + if (qid >= vsi->num_xsk_umems || !umems || !umems[qid]) + return NULL; + + return umems[qid]; +#else + return xsk_get_pool_from_qid(vsi->netdev, qid); +#endif +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.h b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.h new file mode 100644 index 0000000000000000000000000000000000000000..7e04336bda881f0dc3c8c4545a2f46a47a59d3c6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2/sxe2pf/sxe2_xsk.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_xsk.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_XSK_H__ +#define __SXE2_XSK_H__ + +#include "sxe2_compat.h" + +#ifdef HAVE_XDP_SUPPORT +#ifdef HAVE_XDP_BUFF_IN_XDP_H +#include +#else +#include +#endif +#endif +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#include +#endif +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +#endif +#include +#ifdef HAVE_XDP_SUPPORT +#include +#endif + +#include "sxe2.h" + +#define SXE2_RX_BUF_WRITE 16 + +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +void sxe2_zca_free(struct zero_copy_allocator *alloc, + unsigned long handle_addr); + +bool sxe2_alloc_rx_bufs_slow_zc(struct sxe2_queue *rxq, u16 count); +#else +bool sxe2_alloc_rx_bufs_zc(struct sxe2_queue *rxq, u16 count); +#endif + +#ifdef HAVE_NETDEV_BPF_XSK_POOL +s32 sxe2_xsk_pool_setup(struct sxe2_vsi *vsi, struct xsk_buff_pool *pool, + u16 qid); +#else +s32 sxe2_xsk_umem_setup(struct sxe2_vsi *vsi, struct xdp_umem *pool, u16 qid); +#endif +#endif + +s32 sxe2_rx_irq_clean_zc(struct sxe2_queue *rxq, int budget); + +bool sxe2_txq_irq_clean_zc(struct sxe2_queue *txq, s32 napi_budget); + +void sxe2_xsk_clean_xdp_ring(struct sxe2_queue *xdp_ring); + +void sxe2_xsk_clean_rx_ring(struct sxe2_queue *rx_ring); + +static inline void sxe2_set_ring_xdp(struct sxe2_queue *q) +{ +#ifdef HAVE_XDP_SUPPORT + set_bit(SXE2_TX_FLAGS_Q_XDP, &q->flags); +#endif +} + +static inline bool sxe2_xdp_is_enable(struct sxe2_vsi *vsi) +{ +#ifdef HAVE_XDP_SUPPORT + return !!READ_ONCE(vsi->xdp_prog); +#else + return false; +#endif +} + +static inline void sxe2_xdp_queue_cnt_set(struct sxe2_vsi *vsi, u16 count) +{ + vsi->num_xdp_txq = count; + if (vsi->num_xdp_txq > SXE2_XDP_MAX_CNT) + vsi->num_xdp_txq = SXE2_XDP_MAX_CNT; +} + +static inline bool sxe2_queue_is_xdp(struct sxe2_queue *q) +{ + return (bool)test_bit(SXE2_TX_FLAGS_Q_XDP, &q->flags); +} + +#ifdef HAVE_NETDEV_BPF_XSK_POOL +struct xsk_buff_pool *sxe2_xsk_pool(struct sxe2_queue *ring); +#else +struct xdp_umem *sxe2_xsk_pool(struct sxe2_queue *ring); +#endif + +#ifdef HAVE_NDO_XSK_WAKEUP +s32 sxe2_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); +#else +s32 sxe2_xsk_async_xmit(struct net_device *netdev, u32 queue_id); +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/Kconfig b/drivers/net/ethernet/linkdata/sxe2vf/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..728aba54db693c31b997d0737be6e95ffd00d7f4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sxe2vf network device configuration +# + +config SXE2_VF + tristate "sxe2vf PCI Express adapters support" + depends on (X86 || ARM64) && PCI + select MDIO + select PHYLIB + select PTP_1588_CLOCK + default m + help + This driver supports sxe2vf PCI Express family of adapters. + + To compile this driver as a module, choose M here. The module + will be called ngbe. diff --git a/drivers/net/ethernet/linkdata/sxe2vf/Makefile b/drivers/net/ethernet/linkdata/sxe2vf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9c97a2a5234e16e26856282d0fcc76429e788105 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/Makefile @@ -0,0 +1,88 @@ +# SPDX-License-Identifier: GPL-2.0 +CONFIG_MODULE_SIG=n + +#当前目录下的Makefile路径 +MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST))) +#当前路径 +CURDIR :=$(shell dirname $(MAKEPATH)) +KDIR := /lib/modules/$(shell uname -r)/build +MT_BUILDMODE?=asic + +ifneq ($(KERNELRELEASE),) +#编译 +CONFIG_SXE2 ?= m +obj-$(CONFIG_SXE2) += sxe2vf.o +sxe2vf-objs += $(patsubst %.c, sxe2vf/%.o, $(notdir $(wildcard $(CURDIR)/sxe2vf/*.c))) +sxe2vf-objs += $(patsubst %.c, sxe2vf/com_parse/%.o, $(notdir $(wildcard $(CURDIR)/sxe2vf/com_parse/*.c))) +sxe2vf-objs += $(patsubst %.c, common/mbx/%.o, $(notdir $(wildcard $(CURDIR)/common/mbx/*.c))) +sxe2vf-objs += $(patsubst %.c, common/sxe2vf/%.o, $(notdir $(wildcard $(CURDIR)/common/sxe2vf/*.c))) +sxe2vf-objs += $(patsubst %.c, base/log/%.o, $(notdir $(wildcard $(CURDIR)/base/log/*.c))) +sxe2vf-objs += $(patsubst %.c, base/compat/%.o, $(notdir $(wildcard $(CURDIR)/base/compat/*.c))) +sxe2vf-objs += $(patsubst %.c, base/ioctl/%.o, $(notdir $(wildcard $(CURDIR)/base/ioctl/*.c))) + +# 生成 linux kernel version code +ifneq ($(wildcard $(CURDIR)/vercode_build.sh),) + KER_DIR=$(srctree) + SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR)) + ifneq ($(SPECIFIC_LINUX),) + ccflags-y += -DSPECIFIC_LINUX + ccflags-y += -D$(SPECIFIC_LINUX) + endif +endif + +#是否编译辅助设备 +ifeq ($(SPECIFIC_LINUX),BCLINUX_21_10U4) +obj-$(CONFIG_SXE2) += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +else +CHECK_AUX_BUS ?= $(CURDIR)/check_aux_support +NEED_AUX_BUS := $(shell bash ${CHECK_AUX_BUS} --ksrc="$(KDIR)" >/dev/null 2>&1; echo $$?) +$(info "NEED_AUX_BUS = $(NEED_AUX_BUS)") +ifeq ($(NEED_AUX_BUS), 2) +obj-$(CONFIG_SXE2) += sxe2_auxiliary.o +sxe2_auxiliary-objs += $(patsubst %.c, base/auxiliary/%.o, $(notdir $(wildcard $(CURDIR)/base/auxiliary/*.c))) +endif +endif + +#添加编译选项和编译宏 +ccflags-y += -Werror -Wmaybe-uninitialized -frecord-gcc-switches +ccflags-y += -I$(CURDIR)/sxe2vf +ccflags-y += -I$(CURDIR)/sxe2vf/com_parse +ccflags-y += -I$(CURDIR)/common/mbx +ccflags-y += -I$(CURDIR)/common/sxe2vf +ccflags-y += -I$(CURDIR)/common/ioctl +ccflags-y += -I$(CURDIR)/common/sxe2flow +ccflags-y += -I$(CURDIR)/include +ccflags-y += -I$(CURDIR)/base/compat +ccflags-y += -I$(CURDIR)/base/auxiliary +ccflags-y += -I$(CURDIR)/base/log +ccflags-y += -I$(CURDIR)/base/trace +ccflags-y += -I$(CURDIR)/base/cdev +ccflags-y += -I$(CURDIR)/base/ioctl +ccflags-y += -DSXE2_HOST_DRIVER +ccflags-y += -DSXE2_MBX_SUPPORT +ccflags-y += -DCUR_DIR=$(CURDIR) +# ccflags-y += -DSXE2_DRIVER_TRACE +ccflags-y += -DSXE2_CFG_DEBUG + +#默认支持fpga平台,支持不同的硬件平台 +ifeq ($(MT_BUILDMODE),asic) + ccflags-y += -DSXE2_HARDWARE_ASIC +else #ifeq ($(MT_BUILDMODE),emu) + ccflags-y += -DSXE2_HARDWARE_EMU +endif + +else # KERNELRELEASE +#内核树路径 +KDIR := /lib/modules/$(shell uname -r)/build + +all: + $(info "HWVERSION = $(VER)" ) + @$(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt + @rm -rf ./sxe2vf/*.o ./sxe2vf/.*.cmd + @rm -rf ./base/log/*.o ./common/mbx/*.o ./common/sxe2vf/*.o ./base/auxiliary/*.o ./base/compat/*.o + +endif # KERNELRELEASE diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/Makefile b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4a05f84853fc90187b0412209b2407ccdd1cf570 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/Makefile @@ -0,0 +1,13 @@ +################################################################################ +# 专用于辅助设备驱动的编译 +################################################################################ +obj-m := sxe2_auxiliary.o + +KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +all: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary.c b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary.c new file mode 100644 index 0000000000000000000000000000000000000000..34570f1768ba6bbe30fc0bf8a8e5d222a9c06ab2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: auxiliary.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include "auxiliary_bus.h" + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + size_t match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +static int auxiliary_match(struct device *dev, struct device_driver *drv) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv); + + return !!auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *name, *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX, + (int)(p - name), name); +} + +static const struct dev_pm_ops auxiliary_dev_pm_ops = { + SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) +}; + +static int auxiliary_bus_probe(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + if (auxdrv->probe) { + ret = auxdrv->probe(auxdev, + auxiliary_match_id(auxdrv->id_table, + auxdev)); + if (ret) + dev_pm_domain_detach(dev, true); + } else { + ret = 0; + } + } + + return ret; +} + +static int auxiliary_bus_remove(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->remove) + auxdrv->remove(auxdev); + dev_pm_domain_detach(dev, true); + + return 0; +} + +static void auxiliary_bus_shutdown(struct device *dev) +{ + struct auxiliary_driver *auxdrv = NULL; + struct auxiliary_device *auxdev; + + if (dev->driver) { + auxdrv = to_auxiliary_drv(dev->driver); + auxdev = to_auxiliary_dev(dev); + } + + if (auxdrv && auxdrv->shutdown) + auxdrv->shutdown(auxdev); +} + +static struct bus_type auxiliary_bus_type = { + .name = "sxe2_auxiliary", + .probe = auxiliary_bus_probe, + .remove = auxiliary_bus_remove, + .shutdown = auxiliary_bus_shutdown, + .match = auxiliary_match, + .uevent = auxiliary_uevent, + .pm = &auxiliary_dev_pm_ops, +}; + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &auxiliary_bus_type; + device_initialize(&auxdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_device_init); + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname) +{ + struct device *dev = &auxdev->dev; + int ret; + + if (!modname) { + dev_err(dev, "auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret) + dev_err(dev, "adding auxiliary device failed!: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_device_add); + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)) +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)) +#endif +{ + struct device *dev; + + dev = bus_find_device(&auxiliary_bus_type, start, data, match); + if (!dev) + return NULL; + + return to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_find_device); + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + int ret; + + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + if (!auxdrv->driver.name) + return -ENOMEM; + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + ret = driver_register(&auxdrv->driver); + if (ret) + kfree(auxdrv->driver.name); + + return ret; +} +EXPORT_SYMBOL_GPL(__sxe2_auxiliary_driver_register); + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(sxe2_auxiliary_driver_unregister); + +static int __init auxiliary_bus_init(void) +{ + return bus_register(&auxiliary_bus_type); +} + +static void __exit auxiliary_bus_exit(void) +{ + bus_unregister(&auxiliary_bus_type); +} + +module_init(auxiliary_bus_init); +module_exit(auxiliary_bus_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Auxiliary Bus Standalone"); +MODULE_AUTHOR("linux.tucana@Stars Micro System.com"); diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_bus.h b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_bus.h new file mode 100644 index 0000000000000000000000000000000000000000..b33cdae94dc7bfb0aaa294714546a6c0a6b518b7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_bus.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: auxiliary_bus.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#include +#include +#include +#include "auxiliary_compat.h" + +#ifndef HAVE_AUXILIARY_DEVICE_ID +#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_MODULE_PREFIX "sxe2_auxiliary:" +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; +#endif + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct auxiliary_device, dev); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +int sxe2_auxiliary_device_init(struct auxiliary_device *auxdev); +#define auxiliary_device_init(auxdev) sxe2_auxiliary_device_init(auxdev) + +int __sxe2_auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); + +#define auxiliary_device_add(auxdev) __sxe2_auxiliary_device_add(auxdev, KBUILD_MODNAME) + +static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int __sxe2_auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define auxiliary_driver_register(auxdrv) \ + __sxe2_auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void sxe2_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +#define auxiliary_driver_unregister(auxdrv) sxe2_auxiliary_driver_unregister(auxdrv) + +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) + +#ifndef NO_NEED_AUXILIARY_FIND_DEVICE_CONST_DATA +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, + const void *data)); +#else +struct auxiliary_device *sxe2_auxiliary_find_device(struct device *start, + void *data, + int (*match)(struct device *dev, + void *data)); +#endif + +#define auxiliary_find_device sxe2_auxiliary_find_device + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_compat.h b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..fb588676c19b21e3d4acbe4f750cbf5435906520 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/auxiliary/auxiliary_compat.h @@ -0,0 +1,44 @@ +#ifndef __AUXILIARY_COMPAT_H__ +#define __AUXILIARY_COMPAT_H__ + +#include +#include + +#include "sxe2_compat_gcc.h" + +#include "sxe2_compat_inc.h" + +#include "sxe2_compat.h" + +#ifdef NEED_BUS_FIND_DEVICE_CONST_DATA +struct _kc_bus_find_device_custom_data { + const void *real_data; + int (*real_match)(struct device *dev, const void *data); +}; + +static inline int _kc_bus_find_device_wrapped_match(struct device *dev, void *data) +{ + struct _kc_bus_find_device_custom_data *custom_data = data; + + return custom_data->real_match(dev, custom_data->real_data); +} + +static inline struct device * +_kc_bus_find_device(struct bus_type *type, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct _kc_bus_find_device_custom_data custom_data = {}; + + custom_data.real_data = data; + custom_data.real_match = match; + + return bus_find_device(type, start, &custom_data, + _kc_bus_find_device_wrapped_match); +} + +#define bus_find_device(type, start, data, match) \ + _kc_bus_find_device(type, start, data, match) +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/cdev/sxe2_cdev.h b/drivers/net/ethernet/linkdata/sxe2vf/base/cdev/sxe2_cdev.h new file mode 100644 index 0000000000000000000000000000000000000000..31ed4bb402520e38ae026774651f32f4a15c9113 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/cdev/sxe2_cdev.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CDEV_H__ +#define __SXE2_CDEV_H__ + +#include + +struct sxe2_cdev_info { + struct cdev cdev; + dev_t dev_no; + struct device *device; + struct semaphore cdev_sem; +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.c b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..6d4b404ea0b6af8d5113bd7ded65c848562e0b9b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_log.h" +#include "sxe2_compat.h" + +#ifdef NEED_DEFINE_ETHTOOL_SPRINTF +void ethtool_sprintf_compat(u8 **data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(*data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + *data += ETH_GSTRING_LEN; +} +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.h b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ecdd7868fb210aa7ecc9efa6e5bc42eb2a4803 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat.h @@ -0,0 +1,739 @@ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat.h + * @author: Linkdata + * @date: 2026.04.18 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_H__ +#define __SXE2_COMPAT_H__ + +#include "sxe2_compat_gcc.h" +#include +#include +#include "sxe2_compat_flow_offload.h" + +#include +#include +#include +#include + +#define HAVE_MACSEC_SUPPORT +#define SUPPORT_ETHTOOL_GET_RMON_STATS +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NETDEV_BPF_XSK_POOL +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_RXQ +#define HAVE_AF_XDP_ZC_SUPPORT +#define XSK_UMEM_RETURNS_XDP_DESC +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_AF_XDP_NETDEV_UMEM +#define SUPPORTED_COALESCE_PARAMS +#define IPSEC_HAVE_REAL_DEV +#define SUPPORTED_FLASH_UPDATE_PARAMS +#define HAVE_NDO_ETH_IOCTL +#define HAVE_FLOW_BLOCK_API +#define HAVE_FLOW_INDR_BLOCK_API +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_FLOW_OFFLOAD_H +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_INDIR_BLOCK +#define HAVE_TC_FLOWER_ENC +#define HAVE_VXLAN_TYPE +#define HAVE_GENEVE_TYPE +#define HAVE_GRETAP_TYPE +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#define HAVE_FLOW_DISSECTOR_KEY_CVLAN +#define HAVE_PTP_CLOCK_INFO_GETTIMEX64 +#define HAVE_XDP_BUFF_IN_XDP_H +#define HAVE_SCTP +#define HAVE_METADATA_PORT_INFO +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_NDO_OFFLOAD_STATS +#define HAVE_XDP_SUPPORT +#define HAVE_NETDEV_MIN_MAX_MTU +#define HAVE_NDO_SELECT_QUEUE_SB_DEV +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_NETDEV_SB_DEV +#define HAVE_ETHTOOL_FLOW_RSS + +#define NEED_INCLUDE_NETDEV_RX_QUEUE_H +#define NEED_NAPI_ALLOC_SKB +#define RANDOM_ETHER_ADDR_RENAME +#undef HAVE_ETHTOOL_RXFH_PARAM +#define SUPPORT_MACSEC_INFO_STRUCT +#define SUPPORTED_ETHTOOL_EEPROM_BY_PAGE +#define SXE2_KERNEL_MATCHED +#define VM_FLAGS_ONLY_READ +#define IOMMU_MAP_6_PARAMS +#define PIN_USER_PAGE_6_PARAMS +#define NOT_SUPP_IOMMU_CAPABLE + +#ifdef RANDOM_ETHER_ADDR_RENAME +#define random_ether_addr eth_random_addr +#endif + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#endif + +#ifdef XDP_BUFF_TO_FRAME_RENAME +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif + +#ifdef NEED_XSK_UMEM_RELEASE_RENAME +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif + +#ifdef NEED_DEFINE_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifdef NEED_FLOW_CLS_OFFLOAD +struct tc_cls_flower_offload; +#define flow_cls_offload tc_cls_flower_offload +struct tc_block_offload; +#define flow_block_offload tc_block_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#endif + +#ifdef NEED_PCI_AER_CLEAR_NONFATAL_STATUS +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#ifdef NEED_FLOW_BLOCK_BINDER_TYPE +#define FLOW_BLOCK_BINDER_TYPE_UNSPEC TCF_BLOCK_BINDER_TYPE_UNSPEC +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS +#endif + +#ifdef NEED_FLOW_BLOCK_BIND +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND +#endif + +#ifdef NEED_FLOW_INDR_BLOCK_CB_REGISTER +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif + +#ifdef NEED_XDP_CONVERT_BUFF_TO_FRAME +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif + +#ifndef NETIF_NAPI_ADD_API_NEED_4_PARAMS +static inline void +netif_napi_add_compat(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netif_napi_add(dev, napi, poll); +} + +#ifdef netif_napi_add +#undef netif_napi_add +#endif +#define netif_napi_add(dev, napi, poll, weight) netif_napi_add_compat(dev, napi, poll, weight) +#endif + +#ifdef BPF_WARN_INVALID_XDP_ACTION_NEED_1_PARAM +static inline void +bpf_warn_invalid_xdp_action_compat(struct net_device *dev, struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#ifdef bpf_warn_invalid_xdp_action +#undef bpf_warn_invalid_xdp_action +#endif +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + bpf_warn_invalid_xdp_action_compat(dev, prog, act) +#endif + +#ifndef CLASS_CREATE_API_NEED_2_PARAMS +static inline struct class *class_create_compat(struct module *owner, const char *devname) +{ + return class_create(devname); +} + +#ifdef class_create +#undef class_create +#endif +#define class_create(owner, devname) class_create_compat(owner, devname) +#endif + +#ifdef XDP_RXQ_INFO_REG_NEED_3_PARAMS +static inline int +xdp_rxq_info_reg_compat(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + xdp_rxq_info_reg_compat(xdp_rxq, dev, queue_index, napi_id) +#endif + +#ifdef DEVLINK_ALLOC_NEED_2_PARAMS +static inline struct devlink * +devlink_alloc_compat(const struct devlink_ops *ops, size_t priv_size, + struct device * __always_unused dev) +{ + return devlink_alloc(ops, priv_size); +} + +#define devlink_alloc(ops, priv_size, dev) devlink_alloc_compat(ops, priv_size, dev) +#endif + +#ifdef XDP_DO_FLUSH_MAP_DROP +#define xdp_do_flush_map xdp_do_flush +#endif + +#ifdef HAVE_STRSCPY +#define SXE2_STRCPY strscpy +#else +#define SXE2_STRCPY strlcpy +#endif + +#ifdef NEED_NAPI_ALLOC_SKB +#define __napi_alloc_skb(napi, len, gfp_mask) napi_alloc_skb(napi, len) +#endif + +#ifdef NEED_NAPI_BUILD_SKB +#define napi_build_skb(data, frag_size) build_skb(data, frag_size) +#endif + +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->(MEMBER)))) +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifdef NEED_DEFINE_BITS_TO_U32 +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#endif + +#ifdef NEED_XSK_UMEM_GET_RX_FRAME_SIZE +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 +xsk_umem_get_rx_frame_size_compat(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size xsk_umem_get_rx_frame_size_compat +#endif +#endif +#endif + +#ifdef NEED_DEFINE_ETHTOOL_SPRINTF +#ifndef ethtool_sprintf +void ethtool_sprintf_compat(u8 **data, const char *fmt, ...); +#define ethtool_sprintf ethtool_sprintf_compat +#endif +#endif + +#ifdef NEED_DEFINE_KREALLOC_ARRAY +static inline void *__must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return krealloc(p, bytes, flags); +} +#endif + +#ifdef NEED_DEFINE_DEV_PAGE_IS_REUSABLE +static inline bool dev_page_is_reusable(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && + !page_is_pfmemalloc(page)); +} +#endif + +#ifdef NEED_DEFINE_NET_PREFETCH +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif + +#ifdef NEED_DEFINE_XDP_PREPARE_BUFF +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + + xdp->data_hard_start = hard_start; + xdp->data = data; + xdp->data_end = data + data_len; + xdp->data_meta = meta_valid ? data : data + 1; +} +#endif + +#ifdef NEED_DEFINE_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} +#endif + +#ifdef NEED_DEFINE_MUL_U64_U64_DIV_U64 +static inline u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + if (ilog2(a) + ilog2(b) > 62) { + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +#endif + +#ifdef NEED_DEFINE_FLEX_ARRAY_SIZE +#ifndef array_size +static inline size_t size_mul(size_t factor1, size_t factor2) +{ + size_t bytes; + + if (check_mul_overflow(factor1, factor2, &bytes)) + return SIZE_MAX; + + return bytes; +} + +#define array_size(a, b) size_mul(a, b) +#endif + +#undef flex_array_size +#define flex_array_size(p, member, count) ({ \ + typeof((p)->(member)) _local_member = (p)->(member); \ + typeof(count) _local_count = (count); \ + (void)_local_member; \ + array_size(_local_count, sizeof(*_local_member)) + __must_be_array(_local_member); \ +}) + +#endif + +#ifdef NEED_FLOW_BLOCK_CB_SETUP_SIMPLE +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head __always_unused *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} +#endif +#endif + +#ifdef NEED_DEFINE_KFREE_SENSITIVE +static inline void kfree_sensitive(const void *p) +{ + size_t ks; + void *mem = (void *)p; + + ks = ksize(mem); + if (ks) + memzero_explicit(mem, ks); + kfree(mem); +} +#endif + +#ifdef NEED_DEFINE_NETIF_IS_GENEVE +static inline bool netif_is_geneve(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "geneve"); +} +#endif + +#ifdef NEED_DEFINE_SKB_FRAG_OFF +#define skb_frag_off(frag) ((frag)->page_offset) +#endif + +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif + +#ifdef NEED_PTP_SYSTEM_TIMESTAMP +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif + +#ifdef NEED_PTP_SYSTEM_TIMESTAMP_INTERFACE +#include +static inline void +ptp_read_system_prets(struct ptp_system_timestamp *sts) { } + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp *sts) { } +#endif + +#ifdef HAVE_SKB_XMIT_MORE +#define netdev_xmit_more() (first_buf->skb->xmit_more) +#endif + +#ifdef NEED_NETDEV_TX_SENT_QUEUE +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +#endif + +#ifdef ETH_GET_HEADLEN_NEED_2_PRAMS +static inline u32 eth_get_headlen_compat(const struct net_device __always_unused *dev, + void *data, unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) eth_get_headlen_compat(dev, data, len) +#endif + +#ifdef NEED_IDA_ALLOC_MIN_MAX_RANGE_FREE +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_simple_get(ida, min, 0, gfp); +} + +static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, 0, max, gfp); +} + +static inline int +ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp) +{ + return ida_simple_get(ida, min, max, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif + +#ifndef struct_size +#define flex_array_size(p, member, count) ({ \ + typeof((p)->(member)) _local_member = (p)->(member); \ + typeof(count) _local_count = (count); \ + (void)_local_member; \ + array_size(_local_count, sizeof(*_local_member)) + __must_be_array(_local_member); \ +}) + +#endif + +#ifdef NEED_BITMAP_FROM_ARR32 +#if BITS_PER_LONG == 64 +static inline void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + bitmap[i / 2] = (unsigned long)buf[i]; + if (++i < halfwords) + bitmap[i / 2] |= ((unsigned long)buf[i]) << 32; + } + + if (nbits % BITS_PER_LONG) + bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); +} +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *)(bitmap), \ + (const unsigned long *)(buf), (nbits)) +#endif +#endif + +#ifdef NEED_BITMAP_TO_ARR32 +#if BITS_PER_LONG == 64 +static inline void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits) +{ + unsigned int i, halfwords; + + halfwords = DIV_ROUND_UP(nbits, 32); + for (i = 0; i < halfwords; i++) { + buf[i] = (u32)(bitmap[i / 2] & UINT_MAX); + if (++i < halfwords) + buf[i] = (u32)(bitmap[i / 2] >> 32); + } + + if (nbits % BITS_PER_LONG) + buf[halfwords - 1] &= (u32)(UINT_MAX >> ((-nbits) & 31)); +} +#else +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *)(buf), \ + (const unsigned long *)(bitmap), (nbits)) +#endif +#endif + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif + +#ifdef NEED_DEFINE_PAGE_FRAG_FREE +#define page_frag_free __free_page_frag +#endif + +#ifdef NEED_ASSIGN_BIT +static inline void assign_bit(long nr, unsigned long *addr, bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} +#endif + +#ifdef NEED_DMA_ATTRS +#include +static inline +dma_addr_t __kc_dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} + +#define dma_map_page_attrs __kc_dma_map_page_attrs + +static inline +void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} + +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + WARN_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif + +#ifdef FIXUP_USER_FAULT_5_PARAMS +static inline int +fixup_user_fault_compat(struct mm_struct *mm, + unsigned long address, unsigned int fault_flags, + bool *unlocked) +{ + return fixup_user_fault(NULL, mm, address, fault_flags, unlocked); +} + +#ifdef fixup_user_fault +#undef fixup_user_fault +#endif +#define fixup_user_fault(mm, address, fault_flags, unlocked) \ + fixup_user_fault_compat((mm), (address), (fault_flags), (unlocked)) +#endif + +#ifdef NOT_SUPP_MMAP_READ_LOCK +static inline void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_sem); +} + +static inline void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_sem); +} +#endif + +#ifdef NOT_SUPP_UNTAGGED_ADDR +#include +#ifndef untagged_addr +#define untagged_addr(addr) (addr) +#endif +#endif + +#ifdef NOT_SUPP_VMA_LOOKUP +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + return find_vma_intersection(mm, addr, addr + 1); +} +#endif + +#ifdef PIN_USER_PAGE_8_PARAMS +static inline long pin_user_pages_remote_compat(struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked) +{ + return pin_user_pages_remote(NULL, mm, start, nr_pages, gup_flags, pages, vmas, locked); +} + +#ifdef pin_user_pages_remote +#undef pin_user_pages_remote +#endif +#define pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked) \ + pin_user_pages_remote_compat(mm, start, nr_pages, gup_flags, pages, vmas, locked) +#elif defined(PIN_USER_PAGE_6_PARAMS) +static inline long pin_user_pages_remote_compat(struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked) +{ + (void)vmas; + return pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, locked); +} + +#ifdef pin_user_pages_remote +#undef pin_user_pages_remote +#endif +#define pin_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked) \ + pin_user_pages_remote_compat(mm, start, nr_pages, gup_flags, pages, vmas, locked) +#endif + +#ifdef EVENTFD_SIGNAL_1PARAM +static inline int eventfd_signal_compat(struct eventfd_ctx *ctx, int n) +{ + (void)eventfd_signal(ctx); + + return n; +} + +#ifdef eventfd_signal +#undef eventfd_signal +#endif +#define eventfd_signal(ctx, n) \ + eventfd_signal_compat(ctx, n) +#endif + +#ifndef VM_FLAGS_ONLY_READ +static inline void vm_flags_set(struct vm_area_struct *vma, + vm_flags_t flags) +{ + vma->vm_flags |= flags; +} +#endif + +#ifdef IOMMU_MAP_6_PARAMS +static inline int iommu_map_compat(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL_ACCOUNT); +} + +#ifdef iommu_map +#undef iommu_map +#endif +#define iommu_map(domain, iova, paddr, size, prot) \ + iommu_map_compat(domain, iova, paddr, size, prot) +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.c b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.c new file mode 100644 index 0000000000000000000000000000000000000000..1a3875434eafdf882b595cde168f43bd21705b6a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_dim.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2_compat_dim.h" +#ifdef NEED_COMPAT_DIM +#include +#include + +bool dim_on_top(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + return true; + case DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +void dim_turn(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + dim->tune_state = DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case DIM_GOING_LEFT: + dim->tune_state = DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +void dim_park_on_top(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = DIM_PARKING_ON_TOP; +} + +void dim_park_tired(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = DIM_PARKING_TIRED; +} + +void dim_calc_stats(struct dim_sample *start, const struct dim_sample *end, + struct dim_stats *curr_stats) +{ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr, + start->comp_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, + delta_us); + curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us); + if (curr_stats->epms != 0) + curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(curr_stats->cpms * 100, + curr_stats->epms); + else + curr_stats->cpe_ratio = 0; +} + +static int net_dim_step(struct dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return DIM_TOO_TIRED; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return DIM_STEPPED; +} + +static void net_dim_exit_parking(struct dim *dim) +{ + dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT; + net_dim_step(dim); +} + +static int net_dim_stats_compare(struct dim_stats *curr, + struct dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->epms) + return DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + return DIM_STATS_SAME; +} + +static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case DIM_GOING_RIGHT: + case DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, + &dim->prev_stats); + if (stats_res != DIM_STATS_BETTER) + dim_turn(dim); + + if (dim_on_top(dim)) { + dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case DIM_ON_EDGE: + dim_park_on_top(dim); + break; + case DIM_TOO_TIRED: + dim_park_tired(dim); + break; + } + + break; + } + + if (prev_state != DIM_PARKING_ON_TOP || + dim->tune_state != DIM_PARKING_ON_TOP) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +void net_dim(struct dim *dim, const struct dim_sample end_sample) +{ + struct dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < DIM_NEVENTS) + break; + dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + fallthrough; + case DIM_START_MEASURE: + dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr, + end_sample.byte_ctr, &dim->start_sample); + dim->state = DIM_MEASURE_IN_PROGRESS; + break; + case DIM_APPLY_NEW_PROFILE: + break; + } +} +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.h b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..8b08f951087b4e350aec5c200049e7f815f0a7b6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_dim.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_dim.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_COMAPT_DIM_H_ +#define _SXE2_COMAPT_DIM_H_ + +#ifdef NEED_COMPAT_DIM +#include +#include +#include +#include +#include + +#define NET_DIM_PARAMS_NUM_PROFILES 5 + +#define IS_SIGNIFICANT_DIFF(val, ref) ({ \ + typeof(val) _val = (val); \ + typeof(ref) _ref = (ref); \ + (_ref != 0) ? ((((100UL * abs(_val - _ref))) / _ref) > 10) : 0; \ +}) + +#define DIM_NEVENTS 64 + +#define BIT_GAP(bits, end, start) ({ \ + typeof(bits) _bits = (bits); \ + typeof(end) _end = (end); \ + typeof(start) _start = (start); \ + (((_end) - (_start)) + _bits) & ((_bits) - 1); \ +}) + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +struct dim_stats { + int ppms; + int bpms; + int epms; + int cpms; + int cpe_ratio; +}; + +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum dim_tune_state { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +enum dim_stats_state { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +enum dim_step_result { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +enum dim_cq_period_mode { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +enum dim_state { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +bool dim_on_top(struct dim *dim); + +void dim_turn(struct dim *dim); + +void dim_park_on_top(struct dim *dim); + +void dim_park_tired(struct dim *dim); + +void dim_calc_stats(struct dim_sample *start, const struct dim_sample *end, + struct dim_stats *curr_stats); + +static inline void +dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +void net_dim(struct dim *dim, const struct dim_sample end_sample); +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_flow_offload.h b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_flow_offload.h new file mode 100644 index 0000000000000000000000000000000000000000..19d4bfd89013689a20303041baa8718089f80e44 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_flow_offload.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_flow_offload.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_FLOW_OFFLOAD_H__ +#define __SXE2_COMPAT_FLOW_OFFLOAD_H__ + +#ifdef NEED_FLOW_MATCH +#include +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; +#endif + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +struct flow_rule { + struct flow_match match; +}; + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +#define FLOW_DISSECTOR_MATCH(_rule, _type, _out) do { \ + typeof(_rule) _local_rule = (_rule); \ + typeof(_type) _local_type = (_type); \ + typeof(_out) _local_out = (_out); \ + const struct flow_dissector *_d = (_local_rule)->match; \ + struct flow_match _m = (_local_rule)->mask; \ + (_local_out)->key = skb_flow_dissector_target(_d, _local_type, (_m)->key); \ + (_local_out)->mask = skb_flow_dissector_target(_d, _local_type, (_m)->mask); \ +} while (0) + +static inline void +flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void +flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void +flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +static inline void +flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} +#endif + +static inline void +flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +static inline void +flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} +#endif + +static inline void +flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +#ifdef HAVE_TC_FLOWER_ENC +static inline void +flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void +flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_IP +#ifdef HAVE_FLOW_DISSECTOR_KEY_ENC_IP +static inline void +flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} +#endif +#endif + +static inline void +flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void +flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} +#endif + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +static inline bool __must_check __must_check_overflow(bool overflow) +{ + return unlikely(overflow); +} + +#define check_add_overflow(a, b, d) \ + __must_check_overflow(__builtin_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __must_check_overflow(__builtin_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __must_check_overflow(__builtin_mul_overflow(a, b, d)) +#endif + +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_gcc.h b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_gcc.h new file mode 100644 index 0000000000000000000000000000000000000000..992156294dccb2bfa209e61679c38bdeceb3b480 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_gcc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_gcc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_GCC_H__ +#define __SXE2_COMPAT_GCC_H__ + +#ifndef SXE2_TEST +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) +#endif +#else +# define fallthrough do {} while (0) +#endif +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_inc.h b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_inc.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2e034f0919a6b869702e154be197e31870e581 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/compat/sxe2_compat_inc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_compat_inc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COMPAT_INC_H__ +#define __SXE2_COMPAT_INC_H__ + +#include + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.c b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.c new file mode 100644 index 0000000000000000000000000000000000000000..7329824e40d0b01cb6e4210f3e8cc80acf581129 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.c @@ -0,0 +1,828 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_cdev.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_log.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_cdev.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_MAX_COM_CMDS (1) + +STATIC dev_t sxe2_com_cdev_major; +STATIC struct class *sxe2_com_cdev_class; +STATIC struct sxe2_com_dev_mgr sxe2_com_mgr; +STATIC struct mutex sxe2_com_minor_lock; +STATIC DEFINE_IDR(sxe2_com_minor_idr); + +static struct sxe2_com_dev_mgr *sxe2_com_dev_get(void) +{ + return &sxe2_com_mgr; +} + +STATIC s32 sxe2_com_handshake(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_cmd_common_hdr hdr = {}; + u32 arg_sz; + u32 dpdk_ver; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_HANDSHAKE); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_HANDSHAKE); + goto l_unlock; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&hdr, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + LOG_ERROR_BDF_COM("copy user arg failed ver: 0x%x arg_sz:%u.\n", com_ctxt->dpdk_ver, arg_sz); + goto l_unlock; + } + + if (SXE2_MK_VER_MAJOR(hdr.dpdk_ver) != SXE2_MK_VER_MAJOR(SXE2_COM_VER)) { + LOG_ERROR_BDF_COM("incompatible dpdk ver: 0x%x.\n", hdr.dpdk_ver); + ret = -EINVAL; + goto l_unlock; + } + + dpdk_ver = hdr.dpdk_ver; + + memset(&hdr, 0, sizeof(hdr)); + hdr.drv_ver = SXE2_COM_VER; + hdr.msg_len = arg_sz; + + if (SXE2_COM_IOMMU_SUPP(com_ctxt)) + hdr.cap |= (1 << SXE2_COM_CAP_IOMMU_MAP); + + if (copy_to_user((void __user *)arg, &hdr, arg_sz)) { + ret = -EFAULT; + LOG_ERROR_BDF_COM("copy user hdr failed ver: 0x%x.\n", com_ctxt->dpdk_ver); + goto l_unlock; + } + + com_ctxt->dpdk_ver = dpdk_ver; + com_ctxt->is_handshake = true; + +l_unlock: + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +static s32 sxe2_com_cmd_exec(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret; + struct sxe2_drv_cmd_params param = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_PASSTHROUGH); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_PASSTHROUGH); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) + return -EFAULT; + + if (!com_ctxt->ops || !com_ctxt->ops->cmd_exec) + return -EFAULT; + + ret = com_ctxt->ops->cmd_exec(com_ctxt->adapter, &com_ctxt->obj, ¶m); + + LOG_DEBUG_BDF_COM("result:%d\n", ret); + + return ret; +} + +STATIC long sxe2_com_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + long ret = 0; + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filep->private_data; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + if (filep == NULL || cmd == 0 || arg == 0 || com_ctxt == NULL) { + LOG_ERROR_BDF_COM("filep=%pK cmd=%d arg=%ld, com_ctxt=%pK\n", filep, cmd, arg, com_ctxt); + ret = -EINVAL; + goto l_end; + } + + LOG_DEBUG_BDF_COM("driver dpdk cmd=%x, arg=0x%lx\n", cmd, arg); + + mutex_lock(&dev_mgr->lock); + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) { + mutex_unlock(&dev_mgr->lock); + ret = -EACCES; + goto l_end; + } + mutex_unlock(&dev_mgr->lock); + + if (!com_ctxt->is_handshake && cmd != SXE2_COM_CMD_HANDSHAKE) { + LOG_WARN_BDF_COM("plase handshake first\n"); + ret = -ERESTARTSYS; + goto l_end; + } + + if (down_interruptible(&com_ctxt->cdev_info.cdev_sem)) { + LOG_WARN_BDF_COM("ioctl concurrency full\n"); + ret = -ERESTARTSYS; + goto l_end; + } + + switch (cmd) { + case SXE2_COM_CMD_HANDSHAKE: + ret = sxe2_com_handshake(com_ctxt, arg); + break; + case SXE2_COM_CMD_IO_IRQS_REQ: + ret = sxe2_com_io_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_EVT_IRQ_REQ: + ret = sxe2_com_event_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_EVT_CAUSE_GET: + ret = sxe2_com_event_cause_get(com_ctxt, arg); + break; + case SXE2_COM_CMD_RST_IRQ_REQ: + ret = sxe2_com_reset_irq_req(com_ctxt, arg); + break; + case SXE2_COM_CMD_DMA_MAP: + ret = sxe2_com_dma_map(com_ctxt, arg); + break; + case SXE2_COM_CMD_DMA_UNMAP: + ret = sxe2_com_dma_unmap(com_ctxt, arg); + break; + case SXE2_COM_CMD_PASSTHROUGH: + ret = sxe2_com_cmd_exec(com_ctxt, arg); + break; + default: + ret = -EINVAL; + LOG_ERROR_BDF_COM("unknown ioctl cmd, filep=%p, cmd=%d, arg=0x%8.8lx\n", + filep, cmd, arg); + break; + } + + up(&com_ctxt->cdev_info.cdev_sem); + +l_end: + if (ret) { + LOG_ERROR_BDF_COM("driver ioctl filep=%p, cmd=%x, arg=0x%lx end, ret:%ld\n", + filep, cmd, arg, ret); + } else { + LOG_DEBUG_BDF_COM("driver ioctl filep=%p, cmd=%x, arg=0x%lx end, ret:%ld\n", + filep, cmd, arg, ret); + } + + return ret; +} + +static s32 sxe2_com_add_vma(struct sxe2_com_context *com_ctxt, struct vm_area_struct *vma) +{ + struct sxe2_com_vma_device *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &com_ctxt->vma.vma_list); + + return 0; +} + +STATIC vm_fault_t __sxe2_com_mmap(struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_vma_device *mmap_vma; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&com_ctxt->vma.vma_lock); + + list_for_each_entry(mmap_vma, &com_ctxt->vma.vma_list, vma_next) { + if (mmap_vma->vma == vma) + goto l_out; + } + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + ret = VM_FAULT_SIGBUS; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + goto l_out; + } + + if (sxe2_com_add_vma(com_ctxt, vma)) { + ret = VM_FAULT_OOM; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + +l_out: + mutex_unlock(&com_ctxt->vma.vma_lock); + return ret; +} + +STATIC vm_fault_t __sxe2_com_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + s32 ret = VM_FAULT_NOPAGE; + struct page *new_page = NULL; + unsigned long pfn; + + mutex_lock(&com_ctxt->vma.vma_lock); + + if (vma->vm_flags & FAULT_FLAG_WRITE) { + new_page = com_ctxt->vma.write_page; + LOG_WARN_BDF_COM("write fault at VA 0x%lx\n", vmf->address); + } else { + new_page = com_ctxt->vma.read_page; + LOG_WARN_BDF_COM("read fault at VA 0x%lx\n", vmf->address); + } + + pfn = page_to_pfn(new_page); + + ret = vmf_insert_pfn(vma, vmf->address, pfn); + if (ret & VM_FAULT_ERROR) { + LOG_ERROR_BDF_COM("failed to insert pfn into page tables\n"); + goto l_out; + } + + LOG_INFO_BDF_COM("successfully handled page fault\n"); + +l_out: + mutex_unlock(&com_ctxt->vma.vma_lock); + return ret; +} + +static vm_fault_t sxe2_com_mmap_fault(struct vm_fault *vmf) +{ + vm_fault_t ret = VM_FAULT_SIGBUS; + struct vm_area_struct *vma = vmf->vma; + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) + ret = __sxe2_com_mmap_fault(vmf); + else + ret = __sxe2_com_mmap(vma); + + mutex_unlock(&dev_mgr->lock); + + return ret; +} + +static void sxe2_com_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void sxe2_com_mmap_clear(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_vma_device *mmap_vma; + struct sxe2_com_vma_device *tmp; + + mutex_lock(&com_ctxt->vma.vma_lock); + list_for_each_entry_safe(mmap_vma, tmp, &com_ctxt->vma.vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + } + + mutex_unlock(&com_ctxt->vma.vma_lock); +} + +static void sxe2_com_mmap_close(struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = vma->vm_private_data; + struct sxe2_com_vma_device *mmap_vma; + + mutex_lock(&com_ctxt->vma.vma_lock); + list_for_each_entry(mmap_vma, &com_ctxt->vma.vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&com_ctxt->vma.vma_lock); +} + +static const struct vm_operations_struct sxe2_com_mmap_ops = { + .open = sxe2_com_mmap_open, + .close = sxe2_com_mmap_close, + .fault = sxe2_com_mmap_fault, +}; + +STATIC s32 sxe2_com_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filp->private_data; + struct pci_dev *pdev = com_ctxt->pdev; + u32 bar_idx; + u64 phys_len, req_len, pgoff, req_start; + + bar_idx = vma->vm_pgoff >> (SXE2_COM_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + if (bar_idx >= DEVICE_COUNT_RESOURCE) { + return -EINVAL; + (void)pdev; + } + + if (vma->vm_end <= vma->vm_start) { + LOG_WARN_BDF_COM("end:%lu large than start:%lu.\n", vma->vm_end, vma->vm_start); + return -EINVAL; + (void)pdev; + } + if ((vma->vm_flags & VM_SHARED) == 0) { + return -EINVAL; + (void)pdev; + } + + phys_len = PAGE_ALIGN(pci_resource_len(pdev, bar_idx)); + req_len = vma->vm_end - vma->vm_start; + pgoff = vma->vm_pgoff & ((1U << (SXE2_COM_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (req_start + req_len > phys_len) + return -EINVAL; + + vma->vm_private_data = com_ctxt; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, bar_idx) >> PAGE_SHIFT) + pgoff; + + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &sxe2_com_mmap_ops; + + return 0; +} + +STATIC void fill_page_with_ff(struct page *page) +{ + void *kaddr; + + kaddr = kmap_local_page(page); + if (kaddr) { + memset(kaddr, 0xFF, PAGE_SIZE); + kunmap_local(kaddr); + } +} + +STATIC int sxe2_com_bar_init(struct sxe2_com_context *com_ctxt) +{ + int ret = 0; + + com_ctxt->vma.read_page = dev_alloc_pages(0); + if (unlikely(!com_ctxt->vma.read_page)) { + LOG_ERROR_BDF_COM("failed to allocate page in fault handler\n"); + ret = -ENOMEM; + goto l_out; + } + fill_page_with_ff(com_ctxt->vma.read_page); + + com_ctxt->vma.write_page = dev_alloc_pages(0); + if (unlikely(!com_ctxt->vma.write_page)) { + LOG_ERROR_BDF_COM("failed to allocate page in fault handler\n"); + ret = -ENOMEM; + goto l_read_page_failed; + } + + return ret; +l_read_page_failed: + __free_pages(com_ctxt->vma.read_page, 0); +l_out: + return ret; +} + +STATIC void sxe2_com_bar_deinit(struct sxe2_com_context *com_ctxt) +{ + __free_pages(com_ctxt->vma.read_page, 0); + __free_pages(com_ctxt->vma.write_page, 0); +} + +STATIC s32 sxe2_com_open(struct inode *node, struct file *filep) +{ + s32 ret = 0; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + struct sxe2_com_context *com_ctxt; + + com_ctxt = container_of(node->i_cdev, struct sxe2_com_context, cdev_info.cdev); + + filep->private_data = com_ctxt; + + mutex_lock(&dev_mgr->lock); + + if (!atomic_read(&com_ctxt->ref_count) == 0) { + LOG_DEV_WARN_COM("only allow open once\n"); + ret = -EBUSY; + goto l_unlock; + } + + if (com_ctxt->status == SXE2_COM_CDEV_STATUS_UNACCESS) { + ret = -EACCES; + goto l_unlock; + } + + atomic_inc(&com_ctxt->ref_count); + + com_ctxt->obj.func_type = com_ctxt->func_type; + com_ctxt->obj.drv_type = SXE2_DPDK_DRV; + com_ctxt->obj.pf_id = com_ctxt->pf_id; + com_ctxt->obj.vf_id = com_ctxt->vf_id; + com_ctxt->obj.drv_id = 0; + +l_unlock: + mutex_unlock(&dev_mgr->lock); + return ret; +} + +STATIC void sxe2_com_ctxt_clear(struct sxe2_com_context *com_ctxt) +{ + mutex_lock(&com_ctxt->com_lock); + com_ctxt->dpdk_ver = SXE2_COM_INVAL_U32; + com_ctxt->is_handshake = false; + mutex_unlock(&com_ctxt->com_lock); +} + +STATIC void sxe2_com_resource_clear(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_mmap_clear(com_ctxt); + + sxe2_com_irqs_clear(com_ctxt); + + sxe2_com_dma_clear(com_ctxt); +} + +STATIC void sxe2_com_clear(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_ctxt_clear(com_ctxt); + + if (com_ctxt->ops && com_ctxt->ops->release) + com_ctxt->ops->release(com_ctxt->adapter, &com_ctxt->obj); + + (void)sxe2_com_irq_notifier_call_chain(com_ctxt, SXE2_COM_EC_RESET); + + sxe2_com_resource_clear(com_ctxt); +} + +STATIC s32 sxe2_com_close(struct inode *node, struct file *filep) +{ + struct sxe2_com_context *com_ctxt = (struct sxe2_com_context *)filep->private_data; + s32 ref_count = 0; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + sxe2_com_clear(com_ctxt); + + mutex_lock(&dev_mgr->lock); + ref_count = atomic_dec_return(&com_ctxt->ref_count); + if (ref_count == 0) + wake_up(&com_ctxt->waitq); + mutex_unlock(&dev_mgr->lock); + + return 0; +} + +void sxe2_com_disable(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + if (!com_ctxt->ops || !com_ctxt->ops->com_mode_get || + (com_ctxt->ops->com_mode_get(com_ctxt->adapter) == SXE2_COM_MODULE_KERNEL)) + return; + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + sxe2_com_clear(com_ctxt); + mutex_unlock(&dev_mgr->lock); +} + +void sxe2_com_enable(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_NORMAL; + mutex_unlock(&dev_mgr->lock); +} + +static void sxe2_com_cdev_wait_clear(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + unsigned long cur_jiffies; + + mutex_lock(&dev_mgr->lock); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + mutex_unlock(&dev_mgr->lock); + + do { + cur_jiffies = msecs_to_jiffies(SXE2_COM_CMD_DFLT_TIMEOUT_MS); + ret = (s32)wait_event_timeout(com_ctxt->waitq, (atomic_read(&com_ctxt->ref_count) == 0), + (long)cur_jiffies); + if (!ret) { + LOG_INFO_BDF_COM("cdev wait ref count time out.\n"); + } else { + mutex_lock(&dev_mgr->lock); + if (atomic_read(&com_ctxt->ref_count) == 0) { + mutex_unlock(&dev_mgr->lock); + break; + } + mutex_unlock(&dev_mgr->lock); + } + } while (1); +} + +static s32 sxe2_com_minor_get(s32 *dev_minor) +{ + s32 ret = -ENOMEM; + + mutex_lock(&sxe2_com_minor_lock); + ret = idr_alloc(&sxe2_com_minor_idr, NULL, 0, (s32)SXE2_MAX_DEVICES_NUM, GFP_KERNEL); + if (ret >= 0) { + *dev_minor = ret; + ret = 0; + } + mutex_unlock(&sxe2_com_minor_lock); + return ret; +} + +static void sxe2_com_minor_free(s32 dev_minor) +{ + mutex_lock(&sxe2_com_minor_lock); + idr_remove(&sxe2_com_minor_idr, dev_minor); + mutex_unlock(&sxe2_com_minor_lock); +} + +const struct file_operations sxe2_com_cdev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sxe2_com_ioctl, + .mmap = sxe2_com_mmap, + .open = sxe2_com_open, + .release = sxe2_com_close, +}; + +STATIC s32 sxe2_com_cdev_create(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + s32 dev_major, dev_minor; + struct pci_dev *pdev = com_ctxt->pdev; + struct sxe2_cdev_info *cdev_info = NULL; + + ret = sxe2_com_minor_get(&dev_minor); + if (ret) { + LOG_ERROR_BDF_COM("cdev minor get failed, ret=%d\n", ret); + ret = -ENOMEM; + goto l_get_minor_failed; + } + + cdev_info = &com_ctxt->cdev_info; + dev_major = (s32)MAJOR(sxe2_com_cdev_major); + cdev_info->dev_no = (dev_t)MKDEV(dev_major, dev_minor); + cdev_init(&cdev_info->cdev, &sxe2_com_cdev_fops); + cdev_info->cdev.owner = THIS_MODULE; + + LOG_INFO_BDF_COM("cdev_add: dev_major: %d, dev_minor: %d.\n", dev_major, dev_minor); + + ret = cdev_add(&cdev_info->cdev, cdev_info->dev_no, 1); + if (ret) { + LOG_ERROR_BDF_COM("failed to add cdev dev_no=%ld, ret=%d\n", (unsigned long)cdev_info->dev_no, ret); + goto l_add_cdev_failed; + } + + cdev_info->device = device_create(sxe2_com_cdev_class, NULL, cdev_info->dev_no, NULL, + SXE2_COM_CHRDEV_NAME "-%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus), + pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (IS_ERR(cdev_info->device)) { + ret = (s32)PTR_ERR(cdev_info->device); + LOG_ERROR_BDF_COM("failed to create device, dev_no=%ld\n", (unsigned long)cdev_info->dev_no); + goto l_create_dev_failed; + } + + LOG_INFO_BDF_COM("create char dev[%p] dev_no[major:minor=%u:%u] on pci_dev[%p] belongs to class dev[%p] success\n", + &cdev_info->cdev, dev_major, dev_minor, pdev, cdev_info->device); + + return 0; + +l_create_dev_failed: + cdev_del(&cdev_info->cdev); +l_add_cdev_failed: + sxe2_com_minor_free(dev_minor); +l_get_minor_failed: + return ret; +} + +STATIC void sxe2_com_cdev_delete(struct sxe2_com_context *com_ctxt) +{ + s32 dev_minor; + struct sxe2_cdev_info *cdev_info = &com_ctxt->cdev_info; + + dev_minor = (s32)MINOR(cdev_info->dev_no); + + sxe2_com_cdev_wait_clear(com_ctxt); + + LOG_INFO_BDF_COM("delete char dev[%p], dev_no[major:minor=%u:%u]\n", &cdev_info->cdev, MAJOR(cdev_info->dev_no), + dev_minor); + + device_destroy(sxe2_com_cdev_class, cdev_info->dev_no); + cdev_del(&cdev_info->cdev); + sxe2_com_minor_free(dev_minor); +} + +STATIC s32 sxe2_com_ctxt_init_once(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops) +{ + const char *device_name; + u32 device_len; + size_t copy_result; + + com_ctxt->adapter = adapter; + com_ctxt->ops = ops; + + com_ctxt->ops->com_ctxt_fill(adapter); + + com_ctxt->com_log_param.pdev = com_ctxt->pdev; + com_ctxt->dma_dev.dev = &(com_ctxt->pdev->dev); + atomic_set(&com_ctxt->ref_count, 0); + com_ctxt->dpdk_mode = ops->com_mode_get(adapter); + com_ctxt->status = SXE2_COM_CDEV_STATUS_UNACCESS; + + device_name = dev_name(&com_ctxt->pdev->dev); + device_len = (u32)(strlen(device_name) + 1); + copy_result = + SXE2_STRCPY(com_ctxt->com_log_param.dev_name, device_name, min_t(u32, device_len, SXE2_COM_DEV_NAME)); + + sema_init(&com_ctxt->cdev_info.cdev_sem, SXE2_MAX_COM_CMDS); + init_waitqueue_head(&com_ctxt->waitq); + mutex_init(&com_ctxt->vma.vma_lock); + INIT_LIST_HEAD(&com_ctxt->vma.vma_list); + mutex_init(&com_ctxt->dma_dev.lock); + INIT_LIST_HEAD(&com_ctxt->dma_dev.buffer_list); + INIT_LIST_HEAD(&com_ctxt->list); + mutex_init(&com_ctxt->irqs.lock); + + mutex_init(&com_ctxt->com_lock); + + sxe2_com_ctxt_clear(com_ctxt); + + (void)copy_result; + return 0; +} + +STATIC void sxe2_com_ctxt_deinit_once(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->adapter = NULL; + com_ctxt->pdev = NULL; + com_ctxt->ops = NULL; + com_ctxt->dma_dev.dev = NULL; + com_ctxt->dpdk_ver = SXE2_COM_INVAL_U32; + memset(&com_ctxt->com_log_param, 0, sizeof(com_ctxt->com_log_param)); + + mutex_destroy(&com_ctxt->com_lock); + mutex_destroy(&com_ctxt->vma.vma_lock); + mutex_destroy(&com_ctxt->dma_dev.lock); + mutex_destroy(&com_ctxt->irqs.lock); +} + +s32 sxe2_com_init(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops) +{ + s32 ret; + + if (!com_ctxt || !ops || !ops->com_ctxt_fill || !ops->com_mode_get || !adapter) { + LOG_DEV_ERR_COM("sxe2_com_ctxt_init_once failed, com_ctxt: %pK, adapter:%pK, ops: %pK\n", + com_ctxt, adapter, ops); + return -EINVAL; + } + + if (ops->com_mode_get(adapter) == SXE2_COM_MODULE_KERNEL) + return 0; + + ret = sxe2_com_ctxt_init_once(com_ctxt, adapter, ops); + if (ret) + goto l_end; + + ret = sxe2_com_irqs_init(com_ctxt); + if (ret) { + LOG_DEV_ERR_COM("dpdk irqs init failed, ret: %d\n", ret); + goto l_com_deinit; + } + + ret = sxe2_com_bar_init(com_ctxt); + if (ret) { + LOG_DEV_ERR_COM("dpdk irqs init failed, ret: %d\n", ret); + goto l_irqs_deinit; + } + + ret = sxe2_com_cdev_create(com_ctxt); + if (ret != 0) { + LOG_DEV_ERR_COM("eth dpdk cdev create failed, ret=%d\n", ret); + goto l_bar_deinit; + } + + sxe2_com_enable(com_ctxt); + + return 0; + +l_bar_deinit: + sxe2_com_bar_deinit(com_ctxt); +l_irqs_deinit: + sxe2_com_irqs_deinit(com_ctxt); +l_com_deinit: + sxe2_com_ctxt_deinit_once(com_ctxt); +l_end: + return ret; +} + +void sxe2_com_deinit(struct sxe2_com_context *com_ctxt) +{ + if (!com_ctxt->adapter || !com_ctxt->ops || !com_ctxt->ops->com_mode_get || + (com_ctxt->ops->com_mode_get(com_ctxt->adapter) == SXE2_COM_MODULE_KERNEL)) + return; + + sxe2_com_disable(com_ctxt); + sxe2_com_cdev_delete(com_ctxt); + sxe2_com_bar_deinit(com_ctxt); + sxe2_com_irqs_deinit(com_ctxt); + sxe2_com_ctxt_deinit_once(com_ctxt); +} + +s32 sxe2_com_adapter_register(enum sxe2_func_type func_type) +{ + s32 ret; + + memset(&sxe2_com_mgr, 0, sizeof(sxe2_com_mgr)); + mutex_init(&sxe2_com_mgr.lock); + + ret = alloc_chrdev_region(&sxe2_com_cdev_major, 0, SXE2_MAX_DEVICES_NUM, + func_type == SXE2_PF ? SXE2_COM_CHRDEV_NAME : SXE2VF_COM_CHRDEV_NAME); + if (ret) { + LOG_ERROR("alloc cdev number failed: %d\n", ret); + goto l_alloc_cdev_failed; + } + + sxe2_com_cdev_class = + class_create(THIS_MODULE, func_type == SXE2_PF ? SXE2_COM_CHRDEV_NAME : SXE2VF_COM_CHRDEV_NAME); + if (IS_ERR(sxe2_com_cdev_class)) { + ret = (s32)PTR_ERR(sxe2_com_cdev_class); + LOG_ERROR("create cdev class failed: %d\n", ret); + goto l_create_class_failed; + } + + mutex_init(&sxe2_com_minor_lock); + + return 0; + +l_create_class_failed: + unregister_chrdev_region(sxe2_com_cdev_major, SXE2_MAX_DEVICES_NUM); +l_alloc_cdev_failed: + return ret; +} + +void sxe2_com_adapter_unregister(void) +{ + class_destroy(sxe2_com_cdev_class); + unregister_chrdev_region(sxe2_com_cdev_major, SXE2_MAX_DEVICES_NUM); + idr_destroy(&sxe2_com_minor_idr); + + mutex_destroy(&sxe2_com_minor_lock); + mutex_destroy(&sxe2_com_mgr.lock); +} + +void sxe2_com_info_print(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dev_mgr *dev_mgr = sxe2_com_dev_get(); + + mutex_lock(&dev_mgr->lock); + + if (!com_ctxt->adapter) + return; + + LOG_DEV_INFO_COM("com cdev status: %s\n", com_ctxt->status == 0 ? "Unaccess" : "Normal"); + + sxe2_com_dma_print(com_ctxt); + + mutex_unlock(&dev_mgr->lock); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.h b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.h new file mode 100644 index 0000000000000000000000000000000000000000..baab0998f6bf7ec0b61759700c7cf8e694aff788 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_cdev.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_cdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_CDEV_H__ +#define __SXE2_COM_CDEV_H__ + +#include +#include +#include + +#include "sxe2_log.h" +#include "sxe2_cdev.h" +#include "sxe2_com_dma.h" +#include "sxe2_com_irq.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_COM_DEV_MGR_DATA_SIZE (128) +#define SXE2_MAX_IOCTL_CMDS (1) +#define SXE2_COM_CHRDEV_NAME "sxe2-dpdk" +#define SXE2VF_COM_CHRDEV_NAME "sxe2vf-dpdk" +#define SXE2_MAX_DEVICES_NUM BIT(MINORBITS) +#define SXE2_COM_CMD_DFLT_TIMEOUT (30) +#define SXE2_COM_CMD_DFLT_TIMEOUT_MS (30000) +#define SXE2_COM_DEV_MGR_DATA_CNT (1) + +#define SXE2_COM_DEV_NAME 16 + +struct com_log_param { + char dev_name[SXE2_COM_DEV_NAME]; + struct pci_dev *pdev; +}; + +#define LOG_ERROR_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_ERROR_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_WARN_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_WARN_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_INFO_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_INFO_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEBUG_BDF_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEBUG_BDF(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_ERR_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_ERR(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_WARN_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_WARN(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_INFO_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_INFO(fmt, ##__VA_ARGS__); \ + } + +#define LOG_DEV_DEBUG_COM(fmt, ...) \ + { \ + struct com_log_param *adapter = &com_ctxt->com_log_param; \ + LOG_DEV_DEBUG(fmt, ##__VA_ARGS__); \ + } + +enum sxe2_com_dev_status { + SXE2_COM_CDEV_STATUS_UNACCESS = 0, + SXE2_COM_CDEV_STATUS_NORMAL, +}; + +enum sxe2_com_module { + SXE2_COM_MODULE_KERNEL = 0, + SXE2_COM_MODULE_DPDK, + SXE2_COM_MODULE_MIXED, + SXE2_COM_MODULE_RDMA, + SXE2_COM_MODULE_UNDEFINED, + SXE2_COM_MODULE_INVAL, +}; + +enum sxe2_func_type { + SXE2_PF = 0, + SXE2_VF, +}; + +enum sxe2_drv_type { + SXE2_KERNEL_DRV = 0, + SXE2_DPDK_DRV, +}; + +struct sxe2_obj { + u32 func_type : 2; + u32 resv : 2; + u32 pf_id : 4; + u32 vf_id : 12; + u32 resv1 : 4; + u32 drv_type : 2; + u32 drv_id : 6; +}; + +struct sxe2_com_vma_mgr { + struct mutex vma_lock; + struct list_head vma_list; + struct page *read_page; + struct page *write_page; + u8 reserved[3]; +}; + +struct sxe2_com_ops { + void (*com_ctxt_fill)(void *adapter); + s32 (*cmd_exec)(void *adapter, struct sxe2_obj *obj, struct sxe2_drv_cmd_params *param); + s32 (*get_irq_num)(void *adapter); + s32 (*get_vector)(void *adapter, u16 irq_id_in_com); + s32 (*release)(void *adapter, struct sxe2_obj *obj); + s32 (*com_mode_get)(void *adapter); +}; + +struct sxe2_com_context { + void *adapter; + struct pci_dev *pdev; + struct sxe2_com_ops *ops; + struct com_log_param com_log_param; + u8 dpdk_mode; + atomic_t ref_count; + struct list_head list; + wait_queue_head_t waitq; + enum sxe2_com_dev_status status; + struct sxe2_cdev_info cdev_info; + enum sxe2_func_type func_type; + u16 pf_id; + u16 vf_id; + bool is_handshake; + u32 dpdk_ver; + struct sxe2_com_irqs_ctxt irqs; + struct sxe2_com_vma_mgr vma; + struct sxe2_com_dma_dev dma_dev; + struct sxe2_obj obj; + struct mutex com_lock; +}; + +struct sxe2_com_dev_mgr { + struct mutex lock; +}; + +struct sxe2_com_vma_device { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +#define SXE2_DRV_MSG_INFO_SIZE (sizeof(struct drv_msg_info)) +#define SXE2_COM_DRV_REQ_MSG_HDR(param) ((struct drv_msg_info *)(param)->req_buff) + +#define SXE2_DRV_MSG_MAX_SIZE (8192) +#define SXE2_DRV_MSG_MAGIC_CODE (0x56781234) +#define SXE2_MOD_DRV (1) +#define SXE2_SUB_MOD_DEV (1) + +#define MODULE_ID_SHIFT (24) +#define SUB_MODULE_ID_SHIFT (16) +#define ERROR_INDEX_MASK (0xFFFF0000) +#define SXE2_MAKE_ERR_CODE_INDEX(module, sub_module) \ + ((((u32)((module) << MODULE_ID_SHIFT)) | ((u32)((sub_module) << SUB_MODULE_ID_SHIFT))) & \ + ERROR_INDEX_MASK) + +#define SXE2_COM_MODE_NAME_SIZE 64 +#define SXE2_COM_KERNEL_MODE_NAME "kernel" +#define SXE2_COM_DPDK_MODE_NAME "dpdk" +#define SXE2_COM_RDMA_MODE_NAME "rdma" +#define SXE2_COM_MIXED_MODE_NAME "mixed" +#define SXE2_COM_UNDEFINED_MODE_NAME "undefined" + +void sxe2_com_deinit(struct sxe2_com_context *com_ctxt); + +s32 sxe2_com_init(struct sxe2_com_context *com_ctxt, void *adapter, struct sxe2_com_ops *ops); + +s32 sxe2_com_adapter_register(enum sxe2_func_type func_type); + +void sxe2_com_adapter_unregister(void); + +s32 sxe2_com_mode_get(void *adapter); + +void sxe2_com_disable(struct sxe2_com_context *com_ctxt); + +void sxe2_com_enable(struct sxe2_com_context *com_ctxt); + +void sxe2_com_info_print(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.c b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.c new file mode 100644 index 0000000000000000000000000000000000000000..b65bc92b00ff7a43843894848961f118eb333f89 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_dma.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_com_dma.h" +#include "sxe2_compat.h" + +struct sxe2_com_batch { + struct page **pages; + struct page *fallback_page; + int capacity; + int size; + int offset; +}; + +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int put_pfn(unsigned long pfn, int prot) +{ + if (!is_invalid_reserved_pfn(pfn)) { + struct page *page = pfn_to_page(pfn); +#ifdef NOT_SUPP_UNPIN_USER_PAGE + if (prot & IOMMU_WRITE) + SetPageDirty(page); + put_page(page); +#else + unpin_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE); +#endif + return 1; + } + return 0; +} + +#define SXE2_COM_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void sxe2_com_batch_init(struct sxe2_com_batch *batch) +{ + batch->size = 0; + batch->offset = 0; + + batch->pages = (struct page **)__get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = SXE2_COM_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void sxe2_com_batch_unpin(struct sxe2_com_batch *batch, struct sxe2_com_dma *dma) +{ + while (batch->size) { + unsigned long pfn = page_to_pfn(batch->pages[batch->offset]); + + put_pfn(pfn, dma->prot); + batch->offset++; + batch->size--; + } +} + +static void sxe2_com_batch_fini(struct sxe2_com_batch *batch) +{ + if (batch->capacity == SXE2_COM_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + +static int follow_fault_pfn(struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ +#ifdef NOT_SUPP_FOLLOW_PTE + (void)mm; + (void)write_fault; + *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; + return 0; +#else + pte_t *ptep; + spinlock_t *ptl; + bool unlocked; + int ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) { + unlocked = false; + ret = fixup_user_fault(mm, vaddr, + FAULT_FLAG_REMOTE | (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl); + if (ret) + return ret; + } + + if (write_fault && !pte_write(*ptep)) + ret = -EFAULT; + else + *pfn = pte_pfn(*ptep); + + pte_unmap_unlock(ptep, ptl); + return ret; +#endif +} + +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) +{ + struct vm_area_struct *vma; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + mmap_read_lock(mm); + +#ifndef NOT_SUPP_PIN_USER_PAGE + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, pages, NULL, NULL); +#elif !defined(NOT_SUPP_FOLL_LONGTERM) + ret = get_user_pages(vaddr, npages, flags | FOLL_LONGTERM, pages, NULL); +#elif !defined(NOT_SUPP_GET_USER_PAGES_LONGTERM) + ret = get_user_pages_longterm(vaddr, npages, flags, pages, NULL); +#else + ret = get_user_pages_fast(vaddr, npages, !!(prot & IOMMU_WRITE), pages); +#endif + + if (ret > 0) { + *pfn = page_to_pfn(pages[0]); + goto done; + } + + vaddr = untagged_addr(vaddr); + +retry: + vma = vma_lookup(mm, vaddr); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } + } +done: + mmap_read_unlock(mm); + return ret; +} + +static long sxe2_com_pin_pages_remote(struct sxe2_com_dma *dma, unsigned long vaddr, long npage, + unsigned long *pfn_base, struct sxe2_com_batch *batch) +{ + unsigned long pfn = 0; + struct mm_struct *mm = current->mm; + long ret = 0; + long pinned = 0; + bool rsvd = 0; + dma_addr_t iova = vaddr - dma->vaddr + dma->iova; + + if (!mm) + return -ENODEV; + + if (batch->size) { + *pfn_base = page_to_pfn(batch->pages[batch->offset]); + pfn = *pfn_base; + rsvd = is_invalid_reserved_pfn(*pfn_base); + } else { + *pfn_base = 0; + } + + while (npage) { + if (!batch->size) { + long req_pages = min_t(long, npage, batch->capacity); + + ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot, &pfn, batch->pages); + if (ret < 0) + goto unpin_out; + + batch->size = ret; + batch->offset = 0; + + if (!*pfn_base) { + *pfn_base = pfn; + rsvd = is_invalid_reserved_pfn(*pfn_base); + } + } + + while (true) { + if (pfn != *pfn_base + pinned || rsvd != is_invalid_reserved_pfn(pfn)) + goto unpin_out; + + pinned++; + npage--; + vaddr += PAGE_SIZE; + iova += PAGE_SIZE; + batch->offset++; + batch->size--; + + if (!batch->size) + break; + + pfn = page_to_pfn(batch->pages[batch->offset]); + } + } + +unpin_out: + if (batch->size == 1 && !batch->offset) { + put_pfn(pfn, dma->prot); + batch->size = 0; + } + + if (ret < 0) { + if (pinned && !rsvd) { + for (pfn = *pfn_base; pinned; pfn++, pinned--) + put_pfn(pfn, dma->prot); + } + sxe2_com_batch_unpin(batch, dma); + + return ret; + } + + return pinned; +} + +static int sxe2_com_iommu_map(struct sxe2_com_context *com_ctxt, + dma_addr_t iova, unsigned long pfn, long npage, + int prot) +{ +#ifdef NOT_SUPP_IOMMU_CAPABLE + prot |= IOMMU_CACHE; +#else + if (iommu_capable(SXE2_COM_TO_BUS(com_ctxt), IOMMU_CAP_CACHE_COHERENCY)) + prot |= IOMMU_CACHE; +#endif + + return iommu_map(SXE2_COM_TO_DOMAIN(com_ctxt), + iova, (phys_addr_t)pfn << PAGE_SHIFT, npage << PAGE_SHIFT, prot); +} + +static long sxe2_com_unpin_pages_remote(struct sxe2_com_dma *dma, + dma_addr_t iova, unsigned long pfn, long npage) +{ + long unlocked = 0; + long i; + + for (i = 0; i < npage; i++, iova += PAGE_SIZE) { + if (put_pfn(pfn++, dma->prot)) + unlocked++; + } + + return unlocked; +} + +static size_t unmap_unpin(struct sxe2_com_context *com_ctxt, + struct sxe2_com_dma *dma, dma_addr_t *iova, size_t len, + phys_addr_t phys, long *unlocked) +{ + size_t unmapped = iommu_unmap(SXE2_COM_TO_DOMAIN(com_ctxt), *iova, len); + + if (unmapped) { + *unlocked += sxe2_com_unpin_pages_remote(dma, *iova, phys >> PAGE_SHIFT, + unmapped >> PAGE_SHIFT); + *iova += unmapped; + cond_resched(); + } + return unmapped; +} + +static long sxe2_com_unmap_unpin(struct sxe2_com_context *com_ctxt, struct sxe2_com_dma *dma) +{ + dma_addr_t iova = dma->iova, end = dma->iova + dma->size; + long unlocked = 0; + + if (!dma->size) + return 0; + + while (iova < end) { + size_t unmapped; + size_t len = PAGE_SIZE; + phys_addr_t phys; + + phys = iommu_iova_to_phys(SXE2_COM_TO_DOMAIN(com_ctxt), iova); + if (WARN_ON(!phys)) { + iova += PAGE_SIZE; + continue; + } + + unmapped = unmap_unpin(com_ctxt, dma, &iova, len, phys, &unlocked); + if (WARN_ON(!unmapped)) + break; + } + + return unlocked; +} + +static int sxe2_com_pin_map_dma(struct sxe2_com_context *com_ctxt, + struct sxe2_com_dma *dma, size_t map_size) +{ + dma_addr_t iova = dma->iova; + unsigned long vaddr = dma->vaddr; + struct sxe2_com_batch batch; + size_t size = map_size; + long npage; + unsigned long pfn; + int ret = 0; + + sxe2_com_batch_init(&batch); + + while (size) { + npage = sxe2_com_pin_pages_remote(dma, vaddr + dma->size, + size >> PAGE_SHIFT, &pfn, &batch); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; + break; + } + + ret = sxe2_com_iommu_map(com_ctxt, iova + dma->size, pfn, npage, dma->prot); + if (ret) { + sxe2_com_unpin_pages_remote(dma, iova + dma->size, pfn, npage); + sxe2_com_batch_unpin(&batch, dma); + break; + } + + size -= npage << PAGE_SHIFT; + dma->size += npage << PAGE_SHIFT; + } + + sxe2_com_batch_fini(&batch); + + return ret; +} + +STATIC struct sxe2_com_dma *sxe2_com_dma_find_unlock(struct sxe2_com_context *com_ctxt, + dma_addr_t start, u32 size) +{ + struct sxe2_com_dma *dma; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + list_for_each_entry(dma, &dma_dev->buffer_list, list) { + if ((start + size > dma->iova) && (start < dma->iova + dma->size)) + return dma; + } + + return NULL; +} + +static s32 sxe2_com_dma_alloc(struct sxe2_com_context *com_ctxt, + struct sxe2_ioctl_iommu_dma_map *map) +{ + int ret = 0; + struct sxe2_com_dma *dma = NULL; + dma_addr_t iova = map->iova; + unsigned long vaddr = map->vaddr; + size_t size = map->size; + size_t pgsize; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + if (!SXE2_COM_IOMMU_SUPP(com_ctxt)) + return -EOPNOTSUPP; + + if (map->size != size || map->vaddr != vaddr || map->iova != iova) { + ret = -EINVAL; + goto l_out; + } + + pgsize = (size_t)1 << __ffs(PAGE_MASK); + if (!size || (size | iova | vaddr) & (pgsize - 1)) { + ret = -EINVAL; + goto l_out; + } + + if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { + ret = -EINVAL; + goto l_out; + } + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) { + ret = -ENOMEM; + goto l_out; + } + + dma->vaddr = map->vaddr; + dma->iova = iova; + dma->prot = IOMMU_WRITE | IOMMU_READ; + + mutex_lock(&dma_dev->lock); + if (sxe2_com_dma_find_unlock(com_ctxt, iova, size)) { + ret = -EEXIST; + goto l_free; + } + + ret = sxe2_com_pin_map_dma(com_ctxt, dma, size); + if (ret) + goto l_free; + + list_add(&dma->list, &dma_dev->buffer_list); + + ret = 0; + goto l_unlock; + +l_free: + kfree(dma); +l_unlock: + mutex_unlock(&dma_dev->lock); +l_out: + LOG_INFO_BDF_COM("vaddr: 0x%lx, iova: 0x%llx, size: %zu, ret:%d\n", vaddr, iova, size, + ret); + return ret; +} + +static int sxe2_com_dma_free(struct sxe2_com_context *com_ctxt, dma_addr_t iova, u8 free_all) +{ + int ret = free_all ? 0 : -ENOENT; + struct sxe2_com_dma *dma; + struct sxe2_com_dma *tmp; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + mutex_lock(&dma_dev->lock); + list_for_each_entry_safe(dma, tmp, &dma_dev->buffer_list, list) { + if (free_all) { + list_del(&dma->list); + sxe2_com_unmap_unpin(com_ctxt, dma); + kfree(dma); + continue; + } + + if (dma->iova == iova) { + list_del(&dma->list); + sxe2_com_unmap_unpin(com_ctxt, dma); + kfree(dma); + ret = 0; + break; + } + } + + mutex_unlock(&dma_dev->lock); + LOG_INFO_BDF_COM("iova: 0x%llx, free_all %d, ret:%d\n", iova, free_all, ret); + return ret; +} + +s32 sxe2_com_dma_map(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_iommu_dma_map map = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_MAP); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", + com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_MAP); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&map, (void __user *)arg, arg_sz)) + return -EFAULT; + + ret = sxe2_com_dma_alloc(com_ctxt, &map); + + return ret; +} + +s32 sxe2_com_dma_unmap(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_iommu_dma_unmap map = {}; + u32 arg_sz; + + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_UNMAP); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", + com_ctxt->dpdk_ver, SXE2_DEVICE_DMA_UNMAP); + return ret; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(&map, (void __user *)arg, arg_sz)) + return -EFAULT; + + return sxe2_com_dma_free(com_ctxt, map.iova, false); +} + +s32 sxe2_com_dma_clear(struct sxe2_com_context *com_ctxt) +{ + return sxe2_com_dma_free(com_ctxt, 0xFF, true); +} + +void sxe2_com_dma_print(struct sxe2_com_context *com_ctxt) +{ + struct sxe2_com_dma *dma; + struct sxe2_com_dma_dev *dma_dev = &com_ctxt->dma_dev; + + mutex_lock(&dma_dev->lock); + LOG_DEV_INFO_COM("dma map list:\n"); + list_for_each_entry(dma, &dma_dev->buffer_list, list) { + LOG_DEV_INFO_COM("\tdma vaddr: 0x%lx, iova:0x%llx, size:%zu\n", + dma->vaddr, dma->iova, dma->size); + } + mutex_unlock(&dma_dev->lock); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.h b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.h new file mode 100644 index 0000000000000000000000000000000000000000..d753cc74365304b5053334871975308b7c1840d1 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_dma.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_dma.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_DMA_H__ +#define __SXE2_COM_DMA_H__ + +#include + +struct sxe2_com_context; + +#define SXE2_COM_TO_DOMAIN(com_ctxt) (iommu_get_domain_for_dev((com_ctxt)->dma_dev.dev)) +#define SXE2_COM_IOMMU_SUPP(com_ctxt) \ + (SXE2_COM_TO_DOMAIN(com_ctxt) && SXE2_COM_TO_DOMAIN(com_ctxt)->type & __IOMMU_DOMAIN_PAGING) +#define SXE2_COM_TO_BUS(com_ctxt) (((com_ctxt)->dma_dev.dev)->bus) + +struct sxe2_com_dma { + struct list_head list; + + unsigned long vaddr; + size_t size; + int prot; + dma_addr_t iova; +}; + +struct sxe2_com_dma_dev { + struct device *dev; + struct mutex lock; + struct list_head buffer_list; +}; + +s32 sxe2_com_dma_map(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_dma_unmap(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_dma_clear(struct sxe2_com_context *com_ctxt); + +void sxe2_com_dma_print(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.c b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..23d60eb6a691814d9504deeff3c93fac7ac606dc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_irq.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_com_irq.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_compat.h" + +STATIC irqreturn_t sxe2_com_io_irq_handler(int irq, void *dev) +{ + struct sxe2_com_irq_entry *entry = dev; + + if (likely(entry->trigger)) + eventfd_signal(entry->trigger, 1); + + return IRQ_HANDLED; +} + +static s32 sxe2_com_io_set_trigger(struct sxe2_com_context *com_ctxt, int index, int fd, irq_handler_t handler) +{ + struct sxe2_com_irq_entry *entry = &com_ctxt->irqs.entry[index]; + struct eventfd_ctx *trigger; + int ret; + + if (entry->trigger) { + synchronize_irq(entry->vector); + free_irq(entry->vector, entry); + kfree(entry->name); + eventfd_ctx_put(entry->trigger); + entry->trigger = NULL; + } + + if (fd < 0) + return 0; + + entry->name = kasprintf(GFP_KERNEL, "sxe2-com-irq[%d](%s)", entry->vector, com_ctxt->com_log_param.dev_name); + if (!entry->name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + kfree(entry->name); + return PTR_ERR(trigger); + } + + entry->trigger = trigger; + + ret = request_irq(entry->vector, handler, 0, entry->name, entry); + if (ret) { + kfree(entry->name); + eventfd_ctx_put(trigger); + entry->trigger = NULL; + return ret; + } + + return 0; +} + +STATIC s32 sxe2_com_io_irq_init(struct sxe2_com_context *com_ctxt) +{ + s32 ret; + u32 num_irq; + u32 i; + + if (!com_ctxt->ops || !com_ctxt->ops->get_irq_num || !com_ctxt->ops->get_vector) + return -EFAULT; + + ret = com_ctxt->ops->get_irq_num(com_ctxt->adapter); + if (ret <= 0) { + LOG_ERROR_BDF_COM("get irq num failed: %d\n", ret); + return -ENOMEM; + } + num_irq = ret; + + com_ctxt->irqs.entry = kcalloc(num_irq, sizeof(*com_ctxt->irqs.entry), GFP_KERNEL); + if (!com_ctxt->irqs.entry) + return -ENOMEM; + + for (i = 0; i < num_irq; i++) { + com_ctxt->irqs.entry[i].vector = com_ctxt->ops->get_vector(com_ctxt->adapter, i); + if (com_ctxt->irqs.entry[i].vector < 0) + goto l_err; + } + com_ctxt->irqs.num_irqs = num_irq; + + return 0; +l_err: + kfree(com_ctxt->irqs.entry); + com_ctxt->irqs.entry = NULL; + return -EINVAL; +} + +STATIC void sxe2_com_io_irq_clear(struct sxe2_com_context *com_ctxt) +{ + int i; + + for (i = 0; i < com_ctxt->irqs.num_irqs; i++) + sxe2_com_io_set_trigger(com_ctxt, i, -1, NULL); +} + +STATIC void sxe2_com_io_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_io_irq_clear(com_ctxt); + + com_ctxt->irqs.num_irqs = 0; + kfree(com_ctxt->irqs.entry); +} + +s32 sxe2_com_io_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_irq_set param = {}; + u32 arg_sz; + u32 i; + s32 *fd; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_IO_IRQS_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("get arg sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_IO_IRQS_REQ); + ret = -EINVAL; + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if (param.cnt == 0 || param.cnt > com_ctxt->irqs.num_irqs || + param.base_irq_in_com + param.cnt - 1 > com_ctxt->irqs.num_irqs - 1 || param.event_fd == NULL) { + ret = -EINVAL; + goto l_end; + } + + fd = memdup_user((void __user *)(param.event_fd), param.cnt * sizeof(*param.event_fd)); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto l_end; + } + + for (i = 0; i < param.cnt; i++) { + ret = sxe2_com_io_set_trigger(com_ctxt, i + param.base_irq_in_com, fd[i], sxe2_com_io_irq_handler); + if (ret) + goto l_roll_back; + } + + goto l_free; + +l_roll_back: + for (i = 0; i < param.cnt; i++) + (void)sxe2_com_io_set_trigger(com_ctxt, i + param.base_irq_in_com, -1, NULL); +l_free: + kfree(fd); +l_end: + LOG_INFO_BDF_COM("cnt:%u:%u, base_irq_in_com: %u, ret: %d.\n", param.cnt, + com_ctxt->irqs.num_irqs, param.base_irq_in_com, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +STATIC s32 sxe2_com_irq_notifier_register(struct sxe2_com_context *com_ctxt, struct sxe2_nb *irq_nb) +{ + return atomic_notifier_chain_register(&com_ctxt->irqs.irq_nh, &irq_nb->nb); +} +STATIC s32 sxe2_com_irq_notifier_unregister(struct sxe2_com_context *com_ctxt, struct sxe2_nb *irq_nb) +{ + return atomic_notifier_chain_unregister(&com_ctxt->irqs.irq_nh, &irq_nb->nb); +} +s32 sxe2_com_irq_notifier_call_chain(struct sxe2_com_context *com_ctxt, enum sxe2_com_event_cause ec) +{ + SXE2_BUG_ON(ec >= SXE2_COM_EC_MAX); + if (ec >= SXE2_COM_EC_MAX) + return -EFAULT; + + return atomic_notifier_call_chain(&com_ctxt->irqs.irq_nh, ec, NULL); +} + +STATIC u64 sxe2_com_event_cause_rc(struct sxe2_com_context *com_ctxt) +{ + u64 ec; + unsigned long flags; + + spin_lock_irqsave(&com_ctxt->irqs.evt_lock, flags); + ec = com_ctxt->irqs.evt_cause; + com_ctxt->irqs.evt_cause = 0; + spin_unlock_irqrestore(&com_ctxt->irqs.evt_lock, flags); + + return ec; +} + +STATIC s32 sxe2_com_event_nb_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct sxe2_com_irqs_ctxt *irqs = sxe2_nb_cof(nb, struct sxe2_com_irqs_ctxt, evt_nb); + enum sxe2_com_event_cause ec = (enum sxe2_com_event_cause)event; + unsigned long flags; + + SXE2_BUG_ON(ec >= SXE2_COM_EC_MAX); + if (ec >= SXE2_COM_EC_MAX) + return NOTIFY_BAD; + + if (ec == SXE2_COM_EC_RESET) + return NOTIFY_DONE; + + spin_lock_irqsave(&irqs->evt_lock, flags); + if (!test_bit(ec, (unsigned long *)&irqs->evt_sub_map)) { + spin_unlock_irqrestore(&irqs->evt_lock, flags); + return NOTIFY_DONE; + } + + set_bit(ec, (unsigned long *)&irqs->evt_cause); + spin_unlock_irqrestore(&irqs->evt_lock, flags); + + eventfd_signal(irqs->evt_trigger, 1); + + return NOTIFY_OK; +} + +STATIC void sxe2_com_event_irq_init(struct sxe2_com_context *com_ctxt) +{ + spin_lock_init(&com_ctxt->irqs.evt_lock); + com_ctxt->irqs.evt_sub_map = 0; + com_ctxt->irqs.evt_trigger = NULL; + + SXE2_NB_INIT(&com_ctxt->irqs.evt_nb, sxe2_com_event_nb_call, 0); +} + +STATIC void sxe2_com_event_irq_clear(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->irqs.evt_sub_map = 0; + + if (com_ctxt->irqs.evt_trigger) { + eventfd_ctx_put(com_ctxt->irqs.evt_trigger); + com_ctxt->irqs.evt_trigger = NULL; + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.evt_nb); + } +} + +STATIC void sxe2_com_event_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_event_irq_clear(com_ctxt); + SXE2_NB_INIT(&com_ctxt->irqs.evt_nb, NULL, 0); +} + +s32 sxe2_com_event_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_other_evt_set param = {}; + u32 arg_sz; + struct eventfd_ctx *trigger; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_EVT_IRQ_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_EVT_IRQ_REQ); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if ((param.eventfd >= 0 && com_ctxt->irqs.evt_trigger) || (param.eventfd < 0 && !com_ctxt->irqs.evt_trigger) || + (param.eventfd >= 0 && param.filter_table == 0)) { + ret = -EINVAL; + goto l_end; + } + + if (param.eventfd < 0) { + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.evt_nb); + eventfd_ctx_put(com_ctxt->irqs.evt_trigger); + com_ctxt->irqs.evt_trigger = NULL; + com_ctxt->irqs.evt_sub_map = 0; + goto l_end; + } + + trigger = eventfd_ctx_fdget(param.eventfd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto l_end; + } + com_ctxt->irqs.evt_trigger = trigger; + com_ctxt->irqs.evt_sub_map = param.filter_table; + sxe2_com_irq_notifier_register(com_ctxt, &com_ctxt->irqs.evt_nb); + +l_end: + LOG_INFO_BDF_COM("eventfd:%d, filter_table: %llu, ret: %d.\n", param.eventfd, + param.filter_table, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +s32 sxe2_com_event_cause_get(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_other_evt_get ec = {}; + u32 arg_sz; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_EVT_CAUSE_GET); + if (ret < 0) { + LOG_ERROR_BDF_COM("get arg sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_EVT_CAUSE_GET); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + ec.evt_cause = sxe2_com_event_cause_rc(com_ctxt); + + if (copy_to_user((void __user *)arg, &ec, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + LOG_INFO_BDF_COM("ec:%llu, ret: %d.\n", ec.evt_cause, ret); + +l_end: + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +s32 sxe2_com_reset_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg) +{ + s32 ret = 0; + struct sxe2_ioctl_reset_sub_set param = {}; + u32 arg_sz; + struct eventfd_ctx *trigger; + + mutex_lock(&com_ctxt->com_lock); + ret = sxe2_com_get_arg_sz(com_ctxt->dpdk_ver, SXE2_DEVICE_RST_IRQ_REQ); + if (ret < 0) { + LOG_ERROR_BDF_COM("sxe2_com_get_arg_sz failed, ver: %d, cmd:%d\n", com_ctxt->dpdk_ver, + SXE2_DEVICE_RST_IRQ_REQ); + goto l_end; + } + + arg_sz = ret; + ret = 0; + + if (copy_from_user(¶m, (void __user *)arg, arg_sz)) { + ret = -EFAULT; + goto l_end; + } + + if ((param.eventfd >= 0 && com_ctxt->irqs.rst_trigger) || (param.eventfd < 0 && !com_ctxt->irqs.rst_trigger)) { + ret = -EINVAL; + goto l_end; + } + + if (param.eventfd < 0) { + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.rst_nb); + eventfd_ctx_put(com_ctxt->irqs.rst_trigger); + com_ctxt->irqs.rst_trigger = NULL; + goto l_end; + } + + trigger = eventfd_ctx_fdget(param.eventfd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto l_end; + } + com_ctxt->irqs.rst_trigger = trigger; + sxe2_com_irq_notifier_register(com_ctxt, &com_ctxt->irqs.rst_nb); + +l_end: + LOG_INFO_BDF_COM("eventfd:%d, ret: %d.\n", param.eventfd, ret); + mutex_unlock(&com_ctxt->com_lock); + return ret; +} + +static s32 sxe2_com_reset_nb_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct sxe2_com_irqs_ctxt *irqs = sxe2_nb_cof(nb, struct sxe2_com_irqs_ctxt, rst_nb); + enum sxe2_com_event_cause ec = (enum sxe2_com_event_cause)event; + + if (ec != SXE2_COM_EC_RESET) + return NOTIFY_DONE; + + eventfd_signal(irqs->rst_trigger, 1); + + return NOTIFY_OK; +} + +STATIC void sxe2_com_reset_irq_init(struct sxe2_com_context *com_ctxt) +{ + com_ctxt->irqs.rst_trigger = NULL; + SXE2_NB_INIT(&com_ctxt->irqs.rst_nb, sxe2_com_reset_nb_call, 0); +} +STATIC void sxe2_com_reset_irq_clear(struct sxe2_com_context *com_ctxt) +{ + if (com_ctxt->irqs.rst_trigger) { + eventfd_ctx_put(com_ctxt->irqs.rst_trigger); + com_ctxt->irqs.rst_trigger = NULL; + sxe2_com_irq_notifier_unregister(com_ctxt, &com_ctxt->irqs.rst_nb); + } +} + +STATIC void sxe2_com_reset_irq_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_reset_irq_clear(com_ctxt); + + SXE2_NB_INIT(&com_ctxt->irqs.rst_nb, NULL, 0); +} + +s32 sxe2_com_irqs_init(struct sxe2_com_context *com_ctxt) +{ + (void)sxe2_com_io_irq_init(com_ctxt); + + sxe2_com_event_irq_init(com_ctxt); + + sxe2_com_reset_irq_init(com_ctxt); + + return 0; +} + +void sxe2_com_irqs_clear(struct sxe2_com_context *com_ctxt) +{ + mutex_lock(&com_ctxt->com_lock); + + sxe2_com_reset_irq_clear(com_ctxt); + sxe2_com_event_irq_clear(com_ctxt); + sxe2_com_io_irq_clear(com_ctxt); + mutex_unlock(&com_ctxt->com_lock); +} + +void sxe2_com_irqs_deinit(struct sxe2_com_context *com_ctxt) +{ + sxe2_com_reset_irq_deinit(com_ctxt); + sxe2_com_event_irq_deinit(com_ctxt); + sxe2_com_io_irq_deinit(com_ctxt); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.h b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..df2273da83f853fac3a8276d50ed9fd6907cdac7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_irq.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_irq.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_IRQ_H__ +#define __SXE2_COM_IRQ_H__ + +#include + +#include "sxe2_ioctl_chnl.h" + +struct sxe2_com_context; + +struct sxe2_nb { + struct notifier_block nb; + u32 event_type; + void *priv; +}; + +#define sxe2_nb_cof(ptr, type, member) \ + (container_of(container_of(ptr, struct sxe2_nb, nb), type, member)) + +#define sxe2_nb_priv(ptr) (((struct sxe2_nb *)(container_of(ptr, struct sxe2_nb, nb)))->priv) + +#define SXE2_NB_INIT(name, handler, event) \ +do { \ + typeof(name) _name = (name); \ + _name->nb.notifier_call = handler; \ + _name->event_type = event; \ +} while (0) + +struct sxe2_com_irq_entry { + s32 vector; + char *name; + struct eventfd_ctx *trigger; +}; + +struct sxe2_com_irqs_ctxt { + struct mutex lock; + struct atomic_notifier_head irq_nh; + u32 num_irqs; + struct sxe2_com_irq_entry *entry; + spinlock_t evt_lock; + struct eventfd_ctx *evt_trigger; + u64 evt_cause; + u64 evt_sub_map; + struct sxe2_nb evt_nb; + + struct eventfd_ctx *rst_trigger; + struct sxe2_nb rst_nb; +}; + +s32 sxe2_com_irq_notifier_call_chain(struct sxe2_com_context *com_ctxt, + enum sxe2_com_event_cause ec); + +s32 sxe2_com_io_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_event_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); +s32 sxe2_com_reset_irq_req(struct sxe2_com_context *com_ctxt, unsigned long arg); + +s32 sxe2_com_event_cause_get(struct sxe2_com_context *com_ctxt, unsigned long arg); + +void sxe2_com_irqs_deinit(struct sxe2_com_context *com_ctxt); + +s32 sxe2_com_irqs_init(struct sxe2_com_context *com_ctxt); +void sxe2_com_irqs_clear(struct sxe2_com_context *com_ctxt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.c b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.c new file mode 100644 index 0000000000000000000000000000000000000000..2f693541e1e77b183373a9b9590563ffbf60f1f9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ver_compat.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_com_cdev.h" +#include "sxe2_com_ver_compat.h" + +#define SXE2_COM_VER_ARG_SZ_END { SXE2_COM_INVAL_U32, SXE2_COM_INVAL_U32 } + +struct sxe2_com_ver_arg_sz io_irq_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_irq_set, event_fd) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz other_evt_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_other_evt_set, filter_table) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz reset_irqs_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_reset_sub_set, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz evt_cause_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_other_evt_get, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz dma_map_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_iommu_dma_map, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz dma_unmap_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_ioctl_iommu_dma_unmap, iova) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_ver_arg_sz cmd_send_sz[] = { + { SXE2_COM_VER, offsetofend(struct sxe2_drv_cmd_params, resv) }, + SXE2_COM_VER_ARG_SZ_END, +}; + +struct sxe2_com_cmd_arg_sz g_cmd_arg_sz[] = { + [SXE2_DEVICE_IO_IRQS_REQ] = { io_irq_sz }, + [SXE2_DEVICE_EVT_IRQ_REQ] = { other_evt_sz }, + [SXE2_DEVICE_RST_IRQ_REQ] = { reset_irqs_sz }, + [SXE2_DEVICE_DMA_MAP] = { dma_map_sz }, + [SXE2_DEVICE_DMA_UNMAP] = { dma_unmap_sz }, + [SXE2_DEVICE_PASSTHROUGH] = { cmd_send_sz }, + [SXE2_DEVICE_EVT_CAUSE_GET] = { evt_cause_sz }, + [SXE2_DEVICE_MAX] = {}, +}; + +s32 sxe2_com_get_arg_sz(u32 ver, u32 cmd) +{ + struct sxe2_com_ver_arg_sz *ver_arg_sz; + u32 minor_ver = SXE2_MK_VER_MINOR(ver); + + if (cmd == SXE2_DEVICE_HANDSHAKE) + return sizeof(struct sxe2_ioctl_cmd_common_hdr); + + if (cmd >= SXE2_DEVICE_MAX) + return -EINVAL; + + if (ver == SXE2_COM_INVAL_U32) + return -EINVAL; + + ver_arg_sz = g_cmd_arg_sz[cmd].ver_arg_sz; + + if (ver_arg_sz->arg_size == 0) + return ver_arg_sz->arg_size; + + while (ver_arg_sz->ver != SXE2_COM_INVAL_U32) { + if (SXE2_MK_VER_MINOR(ver_arg_sz->ver) <= minor_ver) + break; + + ver_arg_sz++; + } + + return ver_arg_sz->arg_size == SXE2_COM_INVAL_U32 ? -EINVAL : ver_arg_sz->arg_size; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.h b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5657807ff99613e66fdb6c0041f8f41f1db0ff4b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/ioctl/sxe2_com_ver_compat.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_com_ver_compat.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_COM_VER_COMPAT_H__ +#define __SXE2_COM_VER_COMPAT_H__ + +#include "sxe2_ioctl_chnl.h" + +struct sxe2_com_cmd_arg_sz { + struct sxe2_com_ver_arg_sz *ver_arg_sz; +}; + +struct sxe2_com_ver_arg_sz { + u32 ver; + u32 arg_size; +}; + +s32 sxe2_com_get_arg_sz(u32 ver, u32 cmd); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.c b/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.c new file mode 100644 index 0000000000000000000000000000000000000000..c7c959c4d691b02a48bf6c6013a1a478006412bc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.c @@ -0,0 +1,1120 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_log.h" +#include "sxe2_compat.h" + +#if (defined SXE2_CFG_DEBUG && defined __KERNEL__) || (defined SXE2_DRIVER_TRACE) + +int time_for_file_name(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + + return snprintf(buff, buf_len, "%04ld-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +int sxe2_file_write(struct file *file, char *buf, int len) +{ + int ret = 0; + + void *journal; + + journal = current->journal_info; + current->journal_info = NULL; + + if (!file) + return 0; + + do { +#ifdef KERNEL_WRITE_POS_LOFF + ret = kernel_write(file, buf, len, file->f_pos); +#else + ret = kernel_write(file, buf, len, &file->f_pos); +#endif + } while (ret == -EINTR); + + if (ret >= 0) + fsnotify_modify(file); + + current->journal_info = journal; + + return ret; +} +#endif + +#if !defined SXE2_CFG_RELEASE || !defined __KERNEL__ +static s32 sxe2_snprintf(char *buf, size_t size, const char *fmt, ...) +{ + va_list args; + long size_check = (long)size; + s32 len = 0; + + if (size_check <= 0) + return len; + + va_start(args, fmt); + len = vsnprintf(buf, size, fmt, args); + va_end(args); + + return len; +} +#endif + +#if defined SXE2_CFG_DEBUG && defined __KERNEL__ +#define FILE_NAME_SIZE 128 +#define SXE2_KLOG_OUT_WAIT (5 * HZ) +#define SWITCH_FILE +#define LOG_PATH_LEN 100 +#define DRV_LOG_FILE_SIZE_MIN_MB 10 +#define DRV_LOG_FILE_SIZE_MAX_MB 200 + +struct sxe2_debug_t g_sxe2_debug; +char g_log_path_str[LOG_PATH_LEN] = {0}; +char g_log_path_bin[LOG_PATH_LEN] = {0}; + +static char g_log_path[80] = {0}; +module_param_string(g_log_path, g_log_path, 80, 0644); +MODULE_PARM_DESC(g_log_path, + "the path host driver will be saved(<80 chars) Default: /var/log"); + +static u32 g_log_file_size = 200; +module_param(g_log_file_size, uint, 0644); +MODULE_PARM_DESC(g_log_file_size, + "single driver log file size(10MB ~ 200MB), Default: 200, Unit: MB"); + +static u32 g_log_space_size; +module_param(g_log_space_size, uint, 0644); +MODULE_PARM_DESC(g_log_space_size, + "the space allowed host driver log to be store,\t" + "Default: 0(unlimited), Unit: MB"); + +static u32 g_log_tty; +module_param(g_log_tty, uint, 0644); +MODULE_PARM_DESC(g_log_tty, + "allow driver log(ERROR, WARN, INFO) output to tty console,\t" + "Default: 0(not allowed)"); + +u32 g_sxe2_dmesg_level = LOGLEVEL_INFO; +module_param_named(dmesg_level, g_sxe2_dmesg_level, uint, 0644); +MODULE_PARM_DESC(dmesg_level, + "modify sxe2 dmesg log level. Default: INFO(release):INFO(debug)"); + +static inline int time_for_log(char *buff, int buf_len) +{ + struct timespec64 tv; + struct tm td; + + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &td); + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_nsec * 1000); +} + +static inline char *sxe2_stack_top(void) +{ + unsigned long *ptr = (unsigned long *)(task_thread_info(current) + 1); + + return (char *)(ptr + 1); +} + +static inline struct sxe2_thread_local_t *sxe2_thread_local_get(struct sxe2_thread_key_t *key) +{ + return (struct sxe2_thread_local_t *)(sxe2_stack_top() + key->offset); +} + +void sxe2_thread_key_create(int size, struct sxe2_thread_key_t *key) +{ + key->offset = g_sxe2_debug.key_offset; + g_sxe2_debug.key_offset += sizeof(struct sxe2_thread_local_t) + size; +} + +void *sxe2_thread_get_specific(struct sxe2_thread_key_t *key) +{ + struct sxe2_thread_local_t *local = sxe2_thread_local_get(key); + + if (local->magic != DEBUG_TRACE_MAGIC) + return NULL; + + return (void *)local->data; +} + +void sxe2_thread_clear_specific(struct sxe2_thread_key_t *key) +{ + struct sxe2_thread_local_t *local = sxe2_thread_local_get(key); + + local->magic = 0; +} + +int sxe2_filter_file_add(char *name) +{ + struct debug_file_t *file = NULL; + + file = kmalloc(sizeof(*file), GFP_ATOMIC); + if (!file) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strscpy(file->name, name, sizeof(file->name)); + INIT_LIST_HEAD(&file->list); + + list_add_rcu(&file->list, &g_sxe2_debug.filter_file); + return 0; +} + +void sxe2_filter_file_del(char *filename) +{ + struct debug_file_t *file = NULL; + + list_for_each_entry_rcu(file, &g_sxe2_debug.filter_file, list) { + if (!strcmp(file->name, filename)) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + return; + } + } +} + +void sxe2_log_level_modify(u32 level) +{ + sxe2_level_set(level); +} + +STATIC char *sxe2_log_path_query(void) +{ +#ifndef __cplusplus + return g_log_path; +#else + return NULL; +#endif +} + +STATIC u32 sxe2_log_space_size_query(void) +{ + return g_log_space_size; +} + +STATIC u32 sxe2_log_file_size_query(void) +{ + return g_log_file_size; +} + +STATIC void sxe2_log_file_size_modify(u32 size) +{ + g_log_file_size = size; +} + +STATIC u32 sxe2_log_tty_query(void) +{ + return g_log_tty; +} + +#ifndef SXE2_CFG_RELEASE +static inline int sxe2_filter_file_print(const char *filename) +{ + struct debug_file_t *file; + + rcu_read_lock(); + + list_for_each_entry_rcu(file, &g_sxe2_debug.filter_file, list) { + if (!strcmp(file->name, filename)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static inline int sxe2_filter_func_print(const char *name) +{ + struct debug_func_t *func; + + rcu_read_lock(); + list_for_each_entry_rcu(func, &g_sxe2_debug.filter_func, list) { + if (!strcmp(func->name, name)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +#endif +void sxe2_filter_file_clear(void) +{ + struct debug_file_t *file = NULL; + + do { + file = list_first_or_null_rcu(&g_sxe2_debug.filter_file, + struct debug_file_t, + list); + if (file) { + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + } + } while (file); +} + +int sxe2_filter_func_add(char *name) +{ + struct debug_func_t *func = NULL; + + func = kmalloc(sizeof(*func), GFP_ATOMIC); + if (!func) { + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strscpy(func->name, name, sizeof(func->name)); + INIT_LIST_HEAD(&func->list); + + list_add_rcu(&func->list, &g_sxe2_debug.filter_func); + return 0; +} + +void sxe2_filter_func_del(char *name) +{ + struct debug_func_t *func = NULL; + + list_for_each_entry_rcu(func, &g_sxe2_debug.filter_func, list) { + if (!strcmp(func->name, name)) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + return; + } + } +} + +void sxe2_filter_func_clear(void) +{ + struct debug_func_t *func = NULL; + + do { + func = list_first_or_null_rcu(&g_sxe2_debug.filter_func, + struct debug_func_t, + list); + if (func) { + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + } + } while (func); +} + +static void sxe2_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +static int sxe2_file_open(struct sxe2_log_t *log, struct file **pp_file) +{ + struct file *file; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int flags_rewrite = O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC; + int err = 0; + int len = 0; + char filename[FILE_NAME_SIZE]; + +#ifdef SWITCH_FILE + memset(filename, 0, FILE_NAME_SIZE); + len += snprintf(filename, PAGE_SIZE, "%s", log->file_path); + if (log->file_num == 0) { + time_for_file_name(filename + len, FILE_NAME_SIZE - len); + } else { + snprintf(filename + len, FILE_NAME_SIZE - len, "%04d", log->index++); + log->index = log->index % log->file_num; + } + + if (log->file_num == 1 && log->file) { + sxe2_file_close(&log->file); + log->file_pos = 0; + } +#else + memset(filename, 0, FILE_NAME_SIZE); + strscpy(filename, path, FILE_NAME_SIZE); +#endif + if (log->file_num == 0) { + file = filp_open(filename, flags_new, 0666); + } else { + file = filp_open(filename, flags_rewrite, 0666); + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + if (err == -ENOENT) + file = filp_open(filename, flags_new, 0666); + } + } + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + sxe2_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", filename, err); + goto l_out; + } + mapping_set_gfp_mask(file->f_path.dentry->d_inode->i_mapping, GFP_NOFS); + + sxe2_print(KERN_INFO, NULL, "redirect file %s\n", filename); + + *pp_file = file; + +l_out: + return err; +} + +static void sxe2_file_sync(struct file *file) +{ + struct address_space *mapping; + void *journal; + int ret = 0; + int err; + + (void)ret; + (void)err; + + if (!file || !file->f_op || !file->f_op->fsync) + goto l_end; + + journal = current->journal_info; + current->journal_info = NULL; + + mapping = file->f_mapping; + + ret = filemap_fdatawrite(mapping); + + err = file->f_op->fsync(file, 0, file->f_mapping->host->i_size, 1); + + current->journal_info = journal; + +l_end: + return; +} + +static void sxe2_klog_in(struct sxe2_log_t *log, char *buf, unsigned int *length) +{ + int begin = 0; + int end = 0; + int free_size; + unsigned long flags; + unsigned int len = 0; + + spin_lock_irqsave(&log->lock, flags); + len = *length; + if (len == 0) { + spin_unlock_irqrestore(&log->lock, flags); + goto l_out; + } + len = min((unsigned int)PER_CPU_PAGE_SIZE, len); + + if (log->head > log->tail) { + sxe2_print(KERN_WARNING, NULL, "FAILURE: log head exceeds log tail\n"); + SXE2_BUG_NO_SYNC(); + } + + free_size = log->buf_size - (log->tail - log->head); + + if (free_size <= len) { + log->is_drop = 1; + *length = 0; + spin_unlock_irqrestore(&log->lock, flags); + goto l_out; + } + + begin = log->tail % log->buf_size; + end = (log->tail + len) % log->buf_size; + + if (begin < end) { + memcpy(log->buf + begin, buf, len); + } else { + memcpy(log->buf + begin, buf, log->buf_size - begin); + memcpy(log->buf, buf + log->buf_size - begin, end); + } + + log->tail = log->tail + len; + *length = 0; + spin_unlock_irqrestore(&log->lock, flags); + +l_out: + return; +} + +static void sxe2_klog_out(struct sxe2_log_t *log) +{ + int len = 0; + int rc = 0; + long long tail; + int begin; + int end; + int schedule_count_th = 0; + const int max_loop = 4096; + +#ifdef SWITCH_FILE + struct file *file = NULL; +#endif + + if (!log->file) { + rc = sxe2_file_open(log, &log->file); + if (!log->file) + return; + + log->file_pos = 0; + } + + do { + tail = log->tail; + begin = log->head % log->buf_size; + end = tail % log->buf_size; + len = 0; + rc = 0; + + schedule_count_th++; + if (schedule_count_th >= max_loop) { + schedule_count_th = 0; + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + } + + if (log->is_drop) { + rc = sxe2_file_write(log->file, DEBUG_DROP_LOG_STRING, + strlen(DEBUG_DROP_LOG_STRING)); + if (rc < 0) + break; + + log->is_drop = 0; + } + + if (begin < end) { + rc = sxe2_file_write(log->file, log->buf + begin, end - begin); + if (rc > 0) + len += rc; + } else if (begin > end) { + rc = sxe2_file_write(log->file, log->buf + begin, log->buf_size - begin); + if (rc > 0) { + len += rc; + rc = sxe2_file_write(log->file, log->buf, end); + if (rc > 0) + len += rc; + } + } + log->head += len; + log->file_pos += len; + + LOG_BUG_ON(log->head > log->tail, "FAILURE: log head exceeds log tail\n"); + } while (log->head != log->tail && rc > 0); + + if (rc < 0) { + sxe2_print(KERN_ERR, NULL, "write file %s error %d\n", log->file_path, rc); + return; + } + +#ifdef SWITCH_FILE + if (log->file_pos >= log->file_size) { + rc = sxe2_file_open(log, &file); + if (rc >= 0 && log->file && log->file_num != 1) { + sxe2_file_close(&log->file); + log->file = file; + log->file_pos = 0; + } + } +#endif +} + +static int sxe2_klog_flush(void *arg) +{ + int i; + + while (!kthread_should_stop()) { + schedule_timeout_interruptible(SXE2_KLOG_OUT_WAIT); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug.log); i++) + sxe2_klog_out(&g_sxe2_debug.log[i]); + } + return 0; +} + +static int sxe2_klog_init(struct sxe2_log_t *log, + long long buf_size, + char *file_path, + long long file_size, + u32 file_num) +{ + int rc = 0; + + memset(log, 0, sizeof(*log)); + spin_lock_init(&log->lock); + + log->buf = vmalloc(buf_size + PER_CPU_PAGE_SIZE); + if (!log->buf) { + rc = -ENOMEM; + goto l_end; + } + + log->file = NULL; + log->head = 0; + log->tail = 0; + log->buf_size = buf_size; + + log->file_path = file_path; + log->file_pos = 0; + log->file_size = file_size; + log->file_num = file_num; + log->index = 0; +l_end: + return rc; +} + +static void sxe2_klog_exit(struct sxe2_log_t *log) +{ + if (log->buf) + vfree(log->buf); + + if (log->file) + sxe2_file_close(&log->file); +} + +static inline char *sxe2_file_name_locale(char *file) +{ + char *p_slash = strrchr(file, '/'); + + return (!p_slash) ? file : (p_slash + 1); +} + +void sxe2_level_set(int level) +{ + g_sxe2_debug.level = level; +} + +s32 sxe2_level_get(void) +{ + return (s32)g_sxe2_debug.level; +} + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_debug.status = status; +} + +s32 sxe2_bin_status_get(void) +{ + return (s32)g_sxe2_debug.status; +} + +void sxe2_log_string(enum debug_level_e level, + const char *dev_name, + const char *file, + const char *func, + int line, + const char *fmt, ...) +{ + struct sxe2_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + const char *name = dev_name ? dev_name : ""; + + va_list args; + + if (level > g_sxe2_debug.level) { +#ifndef SXE2_CFG_RELEASE + if (!sxe2_filter_file_print(file) && + !sxe2_filter_func_print(func)) { + return; + } +#else + return; +#endif + } + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + len = ctxt->len; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%s", + sxe2_debug_level_name(level)); + len += time_for_log(buf + len, PER_CPU_PAGE_SIZE - len); + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "[%d][%d][%s]%s:%4d:%s:", + raw_smp_processor_id(), current->pid, + name, + sxe2_file_name_locale((char *)file), line, func); + + if (len < PER_CPU_PAGE_SIZE) { + va_start(args, fmt); + len += vsnprintf(buf + len, + PER_CPU_PAGE_SIZE - len, + fmt, + args); + va_end(args); + } + + if (len >= PER_CPU_PAGE_SIZE) { + g_sxe2_debug.log[DEBUG_TYPE_STRING].is_drop = 1; + len = PER_CPU_PAGE_SIZE; + } + ctxt->len = len; + + if (!in_interrupt()) + local_irq_restore(flags); + + if (sxe2_log_tty_query()) { + if (buf[0] == 'I' || buf[0] == 'W') + pr_warn_ratelimited("%s", buf + LOG_INFO_PREFIX_LEN); + else if (buf[0] == 'E') + pr_warn_ratelimited("%s", buf + LOG_ERROR_PREFIX_LEN); + } + + sxe2_klog_in(&g_sxe2_debug.log[DEBUG_TYPE_STRING], ctxt->buff, &ctxt->len); + + wake_up_process(g_sxe2_debug.task); +} + +void sxe2_log_binary(const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + struct sxe2_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + unsigned long flags = 0; + u32 i = 0; + u32 j = 0; + u32 max; + u32 mod; + + if (sxe2_bin_status_get() != true) + return; + + max = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + if (!in_interrupt()) + local_irq_save(flags); + + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + len = ctxt->len; + + len += time_for_log(buf + len, PER_CPU_PAGE_SIZE - len); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "[%d] %s %s():%d %s size:%d\n", + current->pid, sxe2_file_name_locale((char *)file), func, + line, str, size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < max; i++) { + j = i * LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + if (len >= PER_CPU_PAGE_SIZE) { + g_sxe2_debug.log[DEBUG_TYPE_BINARY].is_drop = 1; + len = PER_CPU_PAGE_SIZE; + } + + ctxt->len = len; + if (!in_interrupt()) + local_irq_restore(flags); + + sxe2_klog_in(&g_sxe2_debug.log[DEBUG_TYPE_BINARY], ctxt->buff, &ctxt->len); + + wake_up_process(g_sxe2_debug.task); +} + +void sxe2_log_sync(void) +{ + sxe2_file_sync(g_sxe2_debug.log[DEBUG_TYPE_STRING].file); + sxe2_file_sync(g_sxe2_debug.log[DEBUG_TYPE_BINARY].file); +} + +static void sxe2_log_file_prefix_add(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, BINARY_FILE_PREFIX); + } +} + +static void sxe2_log_file_prefix_add_default(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, BINARY_FILE_PREFIX); + } +} + +static void sxe2_log_file_path_set(bool is_vf) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", VF_LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", VF_BINARY_FILE_PATH); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", BINARY_FILE_PATH); + } +} + +static int sxe2_log_path_init(bool is_vf) +{ + int rc = 0; + u32 file_num = 0; + char *log_path_p = NULL; + u32 log_path_len = 0; + u32 input_log_space = sxe2_log_space_size_query(); + u32 input_log_file_size = sxe2_log_file_size_query(); + unsigned int log_file_size = 0; + struct sxe2_log_t *log_bin = &g_sxe2_debug.log[DEBUG_TYPE_BINARY]; + struct sxe2_log_t *log_str = &g_sxe2_debug.log[DEBUG_TYPE_STRING]; + + log_path_p = sxe2_log_path_query(); + log_path_len = strlen(log_path_p); + if (log_path_p && log_path_p[0] == '/') { + if (log_path_p[log_path_len] == '/') + sxe2_log_file_prefix_add(is_vf, log_path_p); + else + sxe2_log_file_prefix_add_default(is_vf, log_path_p); + } else { + sxe2_log_file_path_set(is_vf); + } + if (input_log_file_size < DRV_LOG_FILE_SIZE_MIN_MB || + input_log_file_size > DRV_LOG_FILE_SIZE_MAX_MB) { + sxe2_log_file_size_modify(LOG_FILE_SIZE >> MEGABYTE); + input_log_file_size = LOG_FILE_SIZE >> MEGABYTE; + } + if (input_log_space && input_log_space < input_log_file_size) { + sxe2_log_file_size_modify(input_log_space); + input_log_file_size = input_log_space; + } + log_file_size = input_log_file_size << MEGABYTE; + + if (input_log_space) { + file_num = input_log_space / input_log_file_size; + if (file_num == 0) { + sxe2_print(KERN_ERR, NULL, "filenum shouldnot be 0\n"); + SXE2_BUG(); + } + } else { + file_num = 0; + } + + rc = sxe2_klog_init(log_str, + BUF_SIZE, + g_log_path_str, + log_file_size, + file_num); + if (rc < 0) + goto l_end; + + rc = sxe2_klog_init(log_bin, + BUF_SIZE, + g_log_path_bin, + BINARY_FILE_SIZE, + 0); + if (rc < 0) + goto l_free_string; + + sxe2_print(KERN_INFO, NULL, "sxe2 debug init logpath[%s] strlogsize[%dM] filenum[%d]\n", + g_log_path_str, (log_file_size >> MEGABYTE), log_str->file_num); + rc = 0; + return rc; +l_free_string: + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_STRING]); +l_end: + return rc; +} + +int sxe2_log_init(bool is_vf) +{ + struct task_struct *task = NULL; + struct sxe2_ctxt_t *ctxt = NULL; + int rc = 0; + int i; + int nid; + + INIT_LIST_HEAD(&g_sxe2_debug.filter_file); + INIT_LIST_HEAD(&g_sxe2_debug.filter_func); + +#ifdef SXE2_CFG_RELEASE + g_sxe2_debug.level = LEVEL_INFO; + g_sxe2_debug.status = false; +#else + g_sxe2_debug.level = LEVEL_DEBUG; + g_sxe2_debug.status = true; +#endif + + g_sxe2_debug.ctxt = alloc_percpu(struct sxe2_ctxt_t); + if (!g_sxe2_debug.ctxt) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "alloc percpu failed\n"); + goto l_end; + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + memset(ctxt, 0, sizeof(*ctxt)); + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + nid = cpu_to_node(i); + + ctxt->page = alloc_pages_node(nid, GFP_ATOMIC, PAGE_ORDER); + if (!ctxt->page) { + rc = -ENOMEM; + sxe2_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PER_CPU_PAGE_SIZE); + goto l_free_cpu_buff; + } + ctxt->buff = page_address(ctxt->page); + } + rc = sxe2_log_path_init(is_vf); + if (rc < 0) + goto l_free_cpu_buff; + + task = kthread_create(sxe2_klog_flush, NULL, "sxe2_klog_flush"); + if (IS_ERR(task)) { + rc = (int)PTR_ERR(task); + sxe2_print(KERN_ERR, NULL, "Create kernel thread, err: %d\n", rc); + goto l_free_binary; + } + wake_up_process(task); + g_sxe2_debug.task = task; + rc = 0; +l_end: + return rc; + +l_free_binary: + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_BINARY]); + sxe2_klog_exit(&g_sxe2_debug.log[DEBUG_TYPE_STRING]); + +l_free_cpu_buff: + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + free_percpu(g_sxe2_debug.ctxt); + goto l_end; +} + +void sxe2_log_exit(void) +{ + int i = 0; + struct sxe2_ctxt_t *ctxt; + + if (!g_sxe2_debug.task) + return; + + kthread_stop(g_sxe2_debug.task); + + for (i = 0; i < ARRAY_SIZE(g_sxe2_debug.log); i++) + sxe2_klog_exit(&g_sxe2_debug.log[i]); + + if (g_sxe2_debug.ctxt) { + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe2_debug.ctxt, i); + if (ctxt && ctxt->page) + __free_pages(ctxt->page, PAGE_ORDER); + } + + free_percpu(g_sxe2_debug.ctxt); + g_sxe2_debug.ctxt = NULL; + } +} + +#elif defined SXE2_CFG_RELEASE && defined __KERNEL__ +u32 g_sxe2_dmesg_level = LOGLEVEL_INFO; + +#elif !defined SXE2_CFG_RELEASE + +s32 g_sxe2_log_level = LEVEL_INFO; +s32 g_sxe2_bin_status; +char *test_bin_buf; + +s32 sxe2_log_init(bool is_vf) +{ + return 0; +} + +void sxe2_level_set(s32 level) +{ + g_sxe2_log_level = level; +} + +s32 sxe2_level_get(void) +{ + return g_sxe2_log_level; +} + +void sxe2_bin_status_set(bool status) +{ + g_sxe2_bin_status = status; +} + +s32 sxe2_bin_status_get(void) +{ + return g_sxe2_bin_status; +} + +void sxe2_log_sync(void) +{ +} + +void sxe2_log_exit(void) +{ + if (test_bin_buf) + free(test_bin_buf); +} + +void sxe2_log_binary(const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + u32 i = 0; + u32 j = 0; + u32 iMax; + u32 mod; + char *buf = NULL; + int len = 0; + + if (sxe2_bin_status_get() != true) + return; + + buf = zalloc(PER_CPU_PAGE_SIZE); + test_bin_buf = buf; + + iMax = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "%s size:%d\n", str, size); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (i = 0; i < iMax; i++) { + j = i * LINE_TOTAL; + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + if (mod) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + + for (; j < size; j++) { + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + + len += sxe2_snprintf(buf + len, PER_CPU_PAGE_SIZE - len, "%c", '\n'); + if (len >= PER_CPU_PAGE_SIZE) + goto l_end; + } + +l_end: + printf("buf:%s", buf); +} + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.h b/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.h new file mode 100644 index 0000000000000000000000000000000000000000..657b163da14264f978f874f2837a2e0e4e812734 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/log/sxe2_log.h @@ -0,0 +1,616 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_log.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_LOG_H_ +#define _SXE2_LOG_H_ + +#ifdef SXE2_TEST +#define STATIC +#define pr_err printf +#else +#define STATIC static +#endif + +#ifdef __cplusplus +extern "C"{ +#endif + +#define SXE2_HOST(ins) ((ins)->host->host_no) + +#define LOG_INFO_PREFIX_LEN 32 +#define LOG_ERROR_PREFIX_LEN 33 +#define MEGABYTE 20 + +enum debug_level_e { + LEVEL_ERROR, + LEVEL_WARN, + LEVEL_INFO, + LEVEL_DEBUG, +}; + +static inline const char *sxe2_debug_level_name(enum debug_level_e lv) +{ + static const char * const level[] = { + [LEVEL_ERROR] = "ERROR", + [LEVEL_WARN] = "WARN", + [LEVEL_INFO] = "INFO", + [LEVEL_DEBUG] = "DEBUG", + }; + + return level[lv]; +} + +#ifdef __KERNEL__ + +#define PRINT_DEBUG KERN_DEBUG +#define PRINT_INFO KERN_INFO +#define PRINT_WARN KERN_WARNING +#define PRINT_ERR KERN_ERR + +#define sxe2_print(level, bdf, fmt, ...) do { \ + if (strcmp(level, KERN_DEBUG)) { \ + pr_debug("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_INFO)) { \ + pr_info("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_WARNING)) { \ + pr_warn("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (strcmp(level, KERN_ERR)) { \ + pr_err("[SXE2]%s():%d %s:" fmt, __func__, \ + __LINE__, bdf ? bdf : "", ##__VA_ARGS__); \ + } \ +} while (0) + +#else + +#define PRINT_DEBUG LEVEL_DEBUG +#define PRINT_INFO LEVEL_INFO +#define PRINT_WARN LEVEL_WARN +#define PRINT_ERR LEVEL_ERROR + +#include +#include +#include +#include + +#define __percpu + +static inline U64 get_now_ms(void) +{ + struct timeval tv; + U64 timestamp = 0; + + gettimeofday(&tv, NULL); + timestamp = tv.tv_sec * 1000 + tv.tv_usec / 1000; + return timestamp; +} + +#define filename_printf(x) (strrchr((x), '/') ? strrchr((x), '/') + 1 : (x)) + +#define sxe2_print(level, bdf, fmt, ...) do { \ + if (level <= sxe2_level_get()) { \ + if (level == LEVEL_DEBUG) { \ + (void)printf("DEBUG:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_INFO) { \ + (void)printf("INFO:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_WARN) { \ + (void)printf("WARN:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_ERROR) { \ + (void)printf("ERROR:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), \ + filename_printf(__FILE__), \ + __func__, __LINE__, pthread_self(), \ + bdf ? bdf : "", ##__VA_ARGS__); \ + } \ + } \ +} while (0) + +#endif + +#define LOG_BUG_ON(cond, fmt, ...) do { \ + if ((cond)) { \ + LOG_ERROR(fmt, ##__VA_ARGS__); \ + LOG_SYNC(); \ + BUG(); \ + } \ +} while (0) + +#define DEBUG_TRACE_MAGIC 0x456789 +#define BUF_SIZE (1024LL << 10) + +#define PAGE_ORDER 2 +#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2)) + +#define LOG_FILE_SIZE (200LL << 20) +#define BINARY_FILE_SIZE (200LL << 20) + +#define VF_LOG_FILE_PATH "/var/log/sxe2vf.log" +#define VF_LOG_FILE_PREFIX "sxe2vf.log" +#define VF_BINARY_FILE_PATH "/var/log/sxe2vf.bin" +#define VF_BINARY_FILE_PREFIX "sxe2vf.bin" + +#define LOG_FILE_PATH "/var/log/sxe2.log" +#define LOG_FILE_PREFIX "sxe2.log" +#define BINARY_FILE_PATH "/var/log/sxe2.bin" +#define BINARY_FILE_PREFIX "sxe2.bin" + +#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n" + +enum { + DEBUG_TYPE_STRING, + DEBUG_TYPE_BINARY, + DEBUG_TYPE_NR, +}; + +struct debug_func_t { + struct list_head list; + char name[64]; +}; + +struct debug_file_t { + struct list_head list; + char name[64]; +}; + +struct sxe2_log_t { + struct { + char *buf; + int buf_size; + long long head; + long long tail; + spinlock_t lock; + unsigned char is_drop; + }; + struct { + char *file_path; + struct file *file; + long long file_pos; + long long file_size; + unsigned int file_num; + unsigned int index; + }; +}; + +struct sxe2_thread_local_t { + s32 magic; + char data[]; +}; + +struct sxe2_ctxt_t { + struct page *page; + void *buff; + unsigned int len; +}; + +struct sxe2_thread_key_t { + s32 offset; +}; + +struct sxe2_debug_t { + enum debug_level_e level; + bool status; + u16 key_offset; + struct sxe2_ctxt_t __percpu *ctxt; + struct list_head filter_func; + struct list_head filter_file; + struct task_struct *task; + struct sxe2_log_t log[DEBUG_TYPE_NR]; +}; + +void sxe2_level_set(int level); +s32 sxe2_level_get(void); + +void sxe2_bin_status_set(bool status); +s32 sxe2_bin_status_get(void); + +int sxe2_log_init(bool is_vf); +void sxe2_log_exit(void); + +void sxe2_log_string(enum debug_level_e level, + const char *dev_name, const char *file, const char *func, + int line, const char *fmt, ...); + +void sxe2_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str); + +#ifndef SXE2_CFG_RELEASE +#define DATA_DUMP(ptr, size, str) \ + sxe2_log_binary(__FILE__, __func__, __LINE__, (u8 *)ptr, 0, size, str) +#else +#define DATA_DUMP(ptr, size, str) +#endif +void sxe2_log_sync(void); + +void sxe2_thread_key_create(int size, struct sxe2_thread_key_t *key); + +void *sxe2_thread_get_specific(struct sxe2_thread_key_t *key); + +void sxe2_thread_clear_specific(struct sxe2_thread_key_t *key); + +int sxe2_filter_file_add(char *name); + +void sxe2_filter_file_del(char *filename); + +void sxe2_log_level_modify(u32 level); + +void sxe2_filter_file_clear(void); + +int sxe2_filter_func_add(char *name); + +void sxe2_filter_func_del(char *name); + +void sxe2_filter_func_clear(void); + +#ifdef SXE2_DRIVER_TRACE +int time_for_file_name(char *buff, int buf_len); +int sxe2_file_write(struct file *file, char *buf, int len); +#endif + +#if defined __KERNEL__ +extern u32 g_sxe2_dmesg_level; + +#if defined SXE2_CFG_DEBUG +int time_for_file_name(char *buff, int buf_len); +int sxe2_file_write(struct file *file, char *buf, int len); + +#define WRITE_LOG(level, bdf, fmt, ...) \ + sxe2_log_string(level, bdf, __FILE__, __func__, __LINE__, fmt, ##__VA_ARGS__) +#define LOG_SYNC() sxe2_log_sync() +#define LOG_DEBUG_BDF(fmt, ...) WRITE_LOG(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) WRITE_LOG(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) WRITE_LOG(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) WRITE_LOG(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) +#else +#define UNUSED(x) ((void)(x)) +#define WRITE_LOG(level, bdf, fmt, ...) do {\ + UNUSED(level); \ + UNUSED(bdf); \ + UNUSED(fmt); \ +} while (0) +#define LOG_SYNC() +#define LOG_DEBUG_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) + +#define LOG_INFO_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#define LOG_WARN_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#define LOG_ERROR_BDF(fmt, ...) do {\ + UNUSED(adapter); \ + UNUSED(fmt); \ +} while (0) +#endif + +#define FL_PR_DEBUG(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + pr_debug(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_INFO(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + pr_info(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_WARN(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + pr_warn(fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_PR_ERR(fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + pr_err(fmt, ##__VA_ARGS__); } \ +} while (0) + +#define FL_DEV_DBG(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + dev_dbg(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_INFO(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + dev_info(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_WARN(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + dev_warn(dev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_DEV_ERR(dev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + dev_err(dev, fmt, ##__VA_ARGS__); } \ +} while (0) + +#define FL_NETDEV_DBG(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_DEBUG) {\ + netdev_dbg(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_INFO(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_INFO) {\ + netdev_info(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_WARN(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_WARNING) {\ + netdev_warn(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) +#define FL_NETDEV_ERR(netdev, fmt, ...) do {\ + if (g_sxe2_dmesg_level >= LOGLEVEL_ERR) {\ + netdev_err(netdev, fmt, ##__VA_ARGS__); } \ +} while (0) + +#define LOG_DEBUG(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) + +#define LOG_INFO(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__) + +#define LOG_WARN(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) + +#define LOG_ERROR(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) +#define LOG_INFO_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__) + +#define LOG_WARN_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) + +#define LOG_ERROR_IRQ(fmt, ...) \ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_DEBUG(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_INFO_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_INFO, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_INFO(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_WARN_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_WARN(fmt, ##__VA_ARGS__);\ +} while (0) +#define LOG_ERROR_D(fmt, ...) do {\ + WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__);\ + FL_PR_ERR(fmt, ##__VA_ARGS__);\ +} while (0) + +#define LOG_DEV_DEBUG(format, arg...) do {\ + FL_DEV_DBG(&adapter->pdev->dev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_INFO(format, arg...) do {\ + FL_DEV_INFO(&adapter->pdev->dev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_WARN(format, arg...) do {\ + FL_DEV_WARN(&adapter->pdev->dev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ +} while (0) + +#define LOG_DEV_ERR(format, arg...) do {\ + FL_DEV_ERR(&adapter->pdev->dev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_DEBUG(msglvl, format, arg...) do {\ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_INFO(msglvl, format, arg...) do {\ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_WARN(msglvl, format, arg...) do {\ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ +} while (0) + +#define LOG_MSG_ERR(msglvl, format, arg...) do {\ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ +} while (0) + +#define LOG_PR_DEBUG(format, arg...) FL_PR_DEBUG("sxe2: "format, ## arg) +#define LOG_PR_INFO(format, arg...) FL_PR_INFO("sxe2: "format, ## arg) +#define LOG_PR_WARN(format, arg...) FL_PR_WARN("sxe2: "format, ## arg) +#define LOG_PR_ERR(format, arg...) FL_PR_ERR("sxe2: "format, ## arg) +#define LOG_PRVF_DEBUG(format, arg...) FL_PR_DEBUG("sxe2vf: "format, ## arg) +#define LOG_PRVF_INFO(format, arg...) FL_PR_INFO("sxe2vf: "format, ## arg) +#define LOG_PRVF_WARN(format, arg...) FL_PR_WARN("sxe2vf: "format, ## arg) +#define LOG_PRVF_ERR(format, arg...) FL_PR_ERR("sxe2vf: "format, ## arg) + +#define LOG_NETDEV_DEBUG(format, arg...) do {\ + FL_NETDEV_DBG(netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_INFO(format, arg...) do {\ + FL_NETDEV_INFO(netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_WARN(format, arg...) do {\ + FL_NETDEV_WARN(netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_ERR(format, arg...) do {\ + FL_NETDEV_ERR(netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg); \ + (void)netdev; \ +} while (0) + +#else + +#define LOG_DEBUG(fmt, ...) sxe2_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) sxe2_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) sxe2_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) sxe2_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_IRQ(fmt, ...) sxe2_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO_IRQ(fmt, ...) sxe2_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN_IRQ(fmt, ...) sxe2_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR_IRQ(fmt, ...) sxe2_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_DEV_DEBUG(fmt, ...) \ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_INFO(fmt, ...) \ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_WARN(fmt, ...) \ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_ERR(fmt, ...) \ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_MSG_DEBUG(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_INFO(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_WARN(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) +#define LOG_MSG_ERR(msglvl, fmt, ...) do {\ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)msglvl; \ +} while (0) + +#define LOG_NETDEV_DEBUG(fmt, ...) do {\ + sxe2_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_INFO(fmt, ...) do {\ + sxe2_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_WARN(fmt, ...) do {\ + sxe2_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_NETDEV_ERR(fmt, ...) do {\ + sxe2_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__); \ + (void)netdev; \ +} while (0) + +#define LOG_DEBUG_D(fmt, ...) UNUSED(fmt) +#define LOG_INFO_D(fmt, ...) UNUSED(fmt) +#define LOG_WARN_D(fmt, ...) UNUSED(fmt) +#define LOG_ERROR_D(fmt, ...) UNUSED(fmt) + +#define LOG_PR_DEBUG(fmt, ...) \ + sxe2_print(PRINT_DEBUG, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_INFO(fmt, ...) \ + sxe2_print(PRINT_INFO, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_WARN(fmt, ...) \ + sxe2_print(PRINT_WARN, "sxe2", fmt, ##__VA_ARGS__) + +#define LOG_PR_ERR(fmt, ...) \ + sxe2_print(PRINT_ERR, "sxe2", fmt, ##__VA_ARGS__) +#define LOG_PRVF_DEBUG(fmt, ...) \ + sxe2_print(PRINT_DEBUG, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_INFO(fmt, ...) \ + sxe2_print(PRINT_INFO, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_WARN(fmt, ...) \ + sxe2_print(PRINT_WARN, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_PRVF_ERR(fmt, ...) \ + sxe2_print(PRINT_ERR, "sxe2vf", fmt, ##__VA_ARGS__) + +#define LOG_SYNC() + +#endif + +#if defined SXE2_CFG_RELEASE +#define SXE2_BUG_ON(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +} while (0) + +#define SXE2_BUG() {pr_err("trigger bug on test.\n"); } +#define SXE2_BUG_ON_NO_SYNC(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +} while (0) + +#define SXE2_BUG_NO_SYNC() +#else +#define SXE2_BUG_ON(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_SYNC(); \ + } \ + BUG_ON(cond); \ +} while (0) + +#define SXE2_BUG(void) do { \ + LOG_SYNC(); \ + BUG(void); \ +} while (0) + +#define SXE2_BUG_ON_NO_SYNC(cond) do { \ + if ((cond)) { \ + pr_err("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ + BUG_ON(cond); \ +} while (0) + +#define SXE2_BUG_NO_SYNC(void) \ + BUG(void) + +#endif + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2_trace.h b/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..f73ce8aa35c2defba716cb00f2747d1a14923f9e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2_trace.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_trace.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if !IS_ENABLED(CONFIG_TRACEPOINTS) || defined(__CHECKER__) || !defined(SXE2_DRIVER_TRACE) +#if !defined(_SXE2_TRACE_H_) +#define _SXE2_TRACE_H_ + +#define sxe2_trace(trace_name, args...) +#define sxe2_trace_enabled(trace_name) ((void)"" #trace_name, 0) +#endif +#else + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sxe2 + +#if !defined(_SXE2_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _SXE2_TRACE_H_ + +#include "sxe2_irq.h" +#include "sxe2_queue.h" + +#include + +#define _SXE2_TRACE_NAME(trace_name) (trace_##sxe2##_##trace_name) +#define SXE2_TRACE_NAME(trace_name) _SXE2_TRACE_NAME(trace_name) + +#define sxe2_trace(trace_name, args...) SXE2_TRACE_NAME(trace_name)(args) + +DECLARE_EVENT_CLASS(sxe2_irq_rxclean, TP_PROTO(struct sxe2_irq_data *irq_data, int total_clean), + TP_ARGS(irq_data, total_clean), + TP_STRUCT__entry(__string(irqname, irq_data->name) __field(int, total_clean)), + TP_fast_assign(__assign_str(irqname, irq_data->name); + __entry->total_clean = total_clean;), + TP_printk("irqname: %s total_clean: %d", __get_str(irqname), + __entry->total_clean)); +#define DEFINE_IRQ_RXCLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2_irq_rxclean, name, \ + TP_PROTO(struct sxe2_irq_data *irq_data, int total_clean), \ + TP_ARGS(irq_data, total_clean)) +DEFINE_IRQ_RXCLEAN_EVENT(sxe2_irq_rxclean_begin); +DEFINE_IRQ_RXCLEAN_EVENT(sxe2_irq_rxclean_end); + +TRACE_EVENT(sxe2_rxq_clean_begin, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi;), + TP_printk("rxq idx in vsi: %u", __entry->idx_in_vsi)); +TRACE_EVENT(sxe2_rxq_clean_end, TP_PROTO(struct sxe2_queue *rxq, s32 clean), TP_ARGS(rxq, clean), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(s32, clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; __entry->clean = clean), + TP_printk("rxq idx in vsi: %u, clean: %d", __entry->idx_in_vsi, __entry->clean)); + +DECLARE_EVENT_CLASS(sxe2_pkt_clean, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(u16, next_to_clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; + __entry->next_to_clean = rxq->next_to_clean;), + TP_printk("idx_in_vsi: %u next_to_clean: %u", __entry->idx_in_vsi, + __entry->next_to_clean)); +#define DEFINE_PKT_CLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2_pkt_clean, name, TP_PROTO(struct sxe2_queue *rxq), TP_ARGS(rxq)) +DEFINE_PKT_CLEAN_EVENT(sxe2_rx_pkt_clean_begin); +DEFINE_PKT_CLEAN_EVENT(sxe2_rx_pkt_clean_end); + +DECLARE_EVENT_CLASS(sxe2_xmit_template, TP_PROTO(struct sxe2_queue *queue, struct sk_buff *skb), + TP_ARGS(queue, skb), + TP_STRUCT__entry(__field(void *, queue) __field(void *, skb) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; __entry->skb = skb; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s skb: %pK queue: %pK", __get_str(devname), __entry->skb, + __entry->queue)); + +#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2_xmit_template, name, \ + TP_PROTO(struct sxe2_queue *queue, struct sk_buff *skb), TP_ARGS(queue, skb)) + +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2_queue_xmit); +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2_queue_xmit_drop); + +union sxe2_tx_data_desc; +struct sxe2_tx_buf; +DECLARE_EVENT_CLASS(sxe2_tx_clean_template, + TP_PROTO(struct sxe2_queue *txq, + union sxe2_tx_data_desc *tx_desc, + struct sxe2_tx_buf *tx_buf), + TP_ARGS(txq, tx_desc, tx_buf), + TP_STRUCT__entry(__field(void *, txq) + __field(void *, tx_desc) + __field(void *, tx_buf) + __string(devname, txq->netdev->name)), + TP_fast_assign(__entry->txq = txq; + __entry->tx_desc = tx_desc; + __entry->tx_buf = tx_buf; + __assign_str(devname, txq->netdev->name);), + TP_printk("netdev: %s txq_idx: %u txq: %p tx_desc: %p tx_buf %p", __get_str(devname), + ((struct sxe2_queue *)__entry->txq)->idx_in_vsi, __entry->txq, + __entry->tx_desc, __entry->tx_buf) +); + +#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ +DEFINE_EVENT(sxe2_tx_clean_template, name, \ + TP_PROTO(struct sxe2_queue *txq, \ + union sxe2_tx_data_desc *tx_desc, \ + struct sxe2_tx_buf *tx_buf), \ + TP_ARGS(txq, tx_desc, tx_buf)) + +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq_unmap); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2_clean_tx_irq_unmap_eop); + +DECLARE_EVENT_CLASS(sxe2_txts_template, TP_PROTO(struct sk_buff *skb, int idx), TP_ARGS(skb, idx), + TP_STRUCT__entry(__field(void *, skb) __field(int, idx)), + TP_fast_assign(__entry->skb = skb; __entry->idx = idx;), + TP_printk("skb %pK idx %d", __entry->skb, __entry->idx)); + +#define DEFINE_TX_TSTAMP_OP_EVENT(name) \ + DEFINE_EVENT(sxe2_txts_template, name, TP_PROTO(struct sk_buff *skb, int idx), \ + TP_ARGS(skb, idx)) + +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_request); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_hw_request); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_hw_done); +DEFINE_TX_TSTAMP_OP_EVENT(sxe2_txts_complete); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH CUR_DIR "/base/trace" +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sxe2_trace +#include +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2vf_trace.h b/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2vf_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..f1a589bc4d1f0c6550b5fdc79da4e7499ae6eef3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/base/trace/sxe2vf_trace.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_trace.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if !IS_ENABLED(CONFIG_TRACEPOINTS) || defined(__CHECKER__) || !defined(SXE2_DRIVER_TRACE) +#if !defined(_SXE2VF_TRACE_H_) +#define _SXE2VF_TRACE_H_ + +#define sxe2vf_trace(trace_name, args...) +#define sxe2vf_trace_enabled(trace_name) ((void)"" #trace_name, 0) +#endif +#else + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sxe2vf + +#if !defined(_SXE2VF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _SXE2VF_TRACE_H_ + +#include "sxe2vf_irq.h" +#include "sxe2vf_queue.h" +#include + +#define _SXE2VF_TRACE_NAME(trace_name) (trace_##sxe2vf##_##trace_name) +#define SXE2VF_TRACE_NAME(trace_name) _SXE2VF_TRACE_NAME(trace_name) + +#define sxe2vf_trace(trace_name, args...) SXE2VF_TRACE_NAME(trace_name)(args) + +DECLARE_EVENT_CLASS(sxe2vf_irq_rxclean, + TP_PROTO(struct sxe2vf_irq_data *irq_data, int total_clean), + TP_ARGS(irq_data, total_clean), + TP_STRUCT__entry(__string(irqname, irq_data->name) __field(int, total_clean)), + TP_fast_assign(__assign_str(irqname, irq_data->name); + __entry->total_clean = total_clean;), + TP_printk("irqname: %s total_clean: %d", __get_str(irqname), + __entry->total_clean)); +#define DEFINE_IRQ_RXCLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2vf_irq_rxclean, name, \ + TP_PROTO(struct sxe2vf_irq_data *irq_data, int total_clean), \ + TP_ARGS(irq_data, total_clean)) +DEFINE_IRQ_RXCLEAN_EVENT(sxe2vf_irq_rxclean_begin); +DEFINE_IRQ_RXCLEAN_EVENT(sxe2vf_irq_rxclean_end); + +TRACE_EVENT(sxe2vf_rxq_clean_begin, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi;), + TP_printk("rxq idx in vsi: %u", __entry->idx_in_vsi)); +TRACE_EVENT(sxe2vf_rxq_clean_end, TP_PROTO(struct sxe2vf_queue *rxq, s32 clean), + TP_ARGS(rxq, clean), TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(s32, clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; __entry->clean = clean), + TP_printk("rxq idx in vsi: %u, clean: %d", __entry->idx_in_vsi, __entry->clean)); + +DECLARE_EVENT_CLASS(sxe2vf_pkt_clean, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq), + TP_STRUCT__entry(__field(u16, idx_in_vsi) __field(u16, next_to_clean)), + TP_fast_assign(__entry->idx_in_vsi = rxq->idx_in_vsi; + __entry->next_to_clean = rxq->next_to_clean;), + TP_printk("idx_in_vsi: %u next_to_clean: %u", __entry->idx_in_vsi, + __entry->next_to_clean)); +#define DEFINE_PKT_CLEAN_EVENT(name) \ + DEFINE_EVENT(sxe2vf_pkt_clean, name, TP_PROTO(struct sxe2vf_queue *rxq), TP_ARGS(rxq)) +DEFINE_PKT_CLEAN_EVENT(sxe2vf_rx_pkt_clean_begin); +DEFINE_PKT_CLEAN_EVENT(sxe2vf_rx_pkt_clean_end); + +union sxe2vf_tx_data_desc; +struct sxe2vf_tx_buf; +DECLARE_EVENT_CLASS(sxe2vf_tx_template, + TP_PROTO(struct sxe2vf_queue *queue, union sxe2vf_tx_data_desc *desc, + struct sxe2vf_tx_buf *buf), + TP_ARGS(queue, desc, buf), + TP_STRUCT__entry(__field(void *, queue) __field(void *, desc) + __field(void *, buf) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; + __entry->desc = desc; __entry->buf = buf; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s queue: %pK desc: %pK buf %pK", __get_str(devname), + __entry->queue, __entry->desc, __entry->buf)); + +#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2vf_tx_template, name, \ + TP_PROTO(struct sxe2vf_queue *queue, union sxe2vf_tx_data_desc *desc, \ + struct sxe2vf_tx_buf *buf), \ + TP_ARGS(queue, desc, buf)) + +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean_unmap); +DEFINE_TX_TEMPLATE_OP_EVENT(sxe2vf_txq_irq_clean_unmap_eop); + +DECLARE_EVENT_CLASS(sxe2vf_xmit_template, + TP_PROTO(struct sxe2vf_queue *queue, struct sk_buff *skb), + TP_ARGS(queue, skb), + TP_STRUCT__entry(__field(void *, queue) __field(void *, skb) + __string(devname, queue->netdev->name)), + TP_fast_assign(__entry->queue = queue; __entry->skb = skb; + __assign_str(devname, queue->netdev->name);), + TP_printk("netdev: %s skb: %pK queue: %pK", __get_str(devname), __entry->skb, + __entry->queue)); + +#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \ + DEFINE_EVENT(sxe2vf_xmit_template, name, \ + TP_PROTO(struct sxe2vf_queue *queue, struct sk_buff *skb), TP_ARGS(queue, skb)) + +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2vf_queue_xmit); +DEFINE_XMIT_TEMPLATE_OP_EVENT(sxe2vf_queue_xmit_drop); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH CUR_DIR "/base/trace" +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE sxe2vf_trace +#include +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/check_aux_support b/drivers/net/ethernet/linkdata/sxe2vf/check_aux_support new file mode 100644 index 0000000000000000000000000000000000000000..d08339f69c1026bc43dcecaa2641d1e9a932a927 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/check_aux_support @@ -0,0 +1,120 @@ +#!/bin/bash + +msg() +{ + echo -e $1 +} + +exit_builtin_auxiliary_enabled() { exit 0; } +exit_kconfig_invalid() { exit 1; } +exit_need_oot_auxiliary() { exit 2; } +exit_not_found_failure() { exit 3; } + +find_aux_bus_inc() +{ + aux_bus_inc=$(find -L ${ksrc} -name "auxiliary_bus.h") + msg "auxiliary_bus.h location: ${aux_bus_inc}" +} + +LINUX_INCLUDE_DIR="include/linux" + +find_config_file() +{ + file_locations=(${ksrc}/include/generated/autoconf.h \ + ${ksrc}/include/linux/autoconf.h \ + /boot/bmlinux.autoconf.h) + + for file in "${file_locations[@]}"; do + if [ -f ${file} ]; then + kconfig=${file} + break + fi + done + + if [ -z ${kconfig} ]; then + msg "Kernel config file not found at any of the expected locations." + fi +} + +get_config_auxiliary_bus() +{ + # CONFIG_AUXILIARY_BUS=0 corresponds to CONFIG_AUXILIARY_BUS=n + # CONFIG_AUXILIARY_BUS=1 corresponds to CONFIG_AUXILIARY_BUS=y + # CONFIG_AUXILIARY_BUS= corresponds to CONFIG_AUXILIARY_BUS not available in the kernel + CONFIG_AUXILIARY_BUS=$(grep CONFIG_AUXILIARY_BUS ${kconfig} | awk -F" " '{print $3}') + msg "CONFIG_AUXILIARY_BUS=${CONFIG_AUXILIARY_BUS}" +} + +ksrc="" +verbose=0 + +options=$(getopt -o "k:vh" --long ksrc:,verbose,help -- "$@") +eval set -- "$options" +while :; do + case $1 in + -k|--ksrc) ksrc=$2; shift;; + -v|--verbose) verbose=1 ;; + -h|--help) usage && exit 0;; + --) shift; break;; + esac + shift +done + +if [ $verbose == 1 ]; then + set -x +fi + +set -x +find_config_file + +if [ ! -z $kconfig ]; then + # if we found the kernel .config file then exit the script based on various + # conditions that depend on the CONFIG_AUXILIARY_BUS string being found + get_config_auxiliary_bus + + if [ -z "$CONFIG_AUXILIARY_BUS" ]; then + msg "CONFIG_AUXILIARY_BUS not found in ${kconfig}." + # CONFIG_AUXILIARY_BUS string was not found, so OOT auxiliary is needed + exit_need_oot_auxiliary + elif [ "$CONFIG_AUXILIARY_BUS" = "1" ]; then + msg "CONFIG_AUXILIARY_BUS=y in ${kconfig}." + # CONFIG_AUXILIARY_BUS=y, so OOT auxiliary is not needed + exit_builtin_auxiliary_enabled + else + msg "" + msg "kernel $build_kernel supports auxiliary bus, but CONFIG_AUXILIARY_BUS" + msg "is not set in ${kconfig}. Rebuild your kernel with" + msg "CONFIG_AUXILIARY_BUS=y" + msg "" + # CONFIG_AUXILIARY_BUS is not "=y", but the string was found, so report + # the failure so it can be used to fail build/install + exit_kconfig_invalid + fi +else + if [ ! -d ${ksrc}/${LINUX_INCLUDE_DIR} ] && [ ! -d ${ksrc}/source/${LINUX_INCLUDE_DIR} ]; then + echo "${ksrc}/${LINUX_INCLUDE_DIR} and ${ksrc}/source/${LINUX_INCLUDE_DIR} do not exist" + exit_not_found_failure + fi + + # We didn't find a kernel .config file, so check to see if auxiliary_bus.h + # is found in the kernel source include directory + find_aux_bus_inc + + if [ -f "$aux_bus_inc" ]; then + # AUXILIARY_MODULE_PREFIX is defined only in out-of-tree auxiliary bus + if [ $(grep -c AUXILIARY_MODULE_PREFIX $aux_bus_inc) -eq 0 ]; then + msg "in-tree auxiliary_bus.h found at ${ksrc}/${LINUX_INCLUDE_DIR}" + # If auxiliary_bus.h is included at ${ksrc} and it isn't our OOT version, then + # don't build OOT auxiliary as part of the driver makefile + exit_builtin_auxiliary_enabled + else + msg "OOT auxiliary_bus.h found at ${ksrc}/${LINUX_INCLUDE_DIR}" + # If auxiliary bus is included at ${ksrc} and it is our OOT version, then + # build OOT auxiliary as part of the driver makefile + exit_need_oot_auxiliary + fi + else + msg "auxiliary_bus.h not found at ${ksrc}/${LINUX_INCLUDE_DIR}" + exit_need_oot_auxiliary + fi +fi diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_drv_cmd.h b/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_drv_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..cf5c2fa964bef4ff7456ddd4622613c676dadbb5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_drv_cmd.h @@ -0,0 +1,893 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_cmd.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_DRV_CMD_H__ +#define __SXE2_DRV_CMD_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_type.h" +#include "sxe2_cmd.h" +#include "sxe2_flow_public.h" + +#define SXE2_DPDK_RESOURCE_INSUFFICIENT +#endif + +#ifdef SXE2_LINUX_DRIVER +#ifdef __KERNEL__ +#include +#include +#endif +#endif + +#ifdef __KERNEL__ +#define SXE2_ATTRIBUTE __aligned(4) +#else +#define SXE2_ATTRIBUTE +#pragma pack(4) +#endif + +#define SXE2_DRV_CMD_MODULE_S (16) +#define SXE2_MK_DRV_CMD(module, cmd) ((module) << SXE2_DRV_CMD_MODULE_S | (cmd)) + +#define SXE2_DEV_CAPS_OFFLOAD_L2 BIT(0) +#define SXE2_DEV_CAPS_OFFLOAD_VLAN BIT(1) +#define SXE2_DEV_CAPS_OFFLOAD_RSS BIT(2) +#define SXE2_DEV_CAPS_OFFLOAD_IPSEC BIT(3) +#define SXE2_DEV_CAPS_OFFLOAD_FNAV BIT(4) +#define SXE2_DEV_CAPS_OFFLOAD_TM BIT(5) +#define SXE2_DEV_CAPS_OFFLOAD_PTP BIT(6) +#define SXE2_DEV_CAPS_OFFLOAD_Q_MAP BIT(7) +#define SXE2_DEV_CAPS_OFFLOAD_FC_STATE BIT(8) + +#define SXE2_TXQ_STATS_MAP_MAX_NUM 16 +#define SXE2_RXQ_STATS_MAP_MAX_NUM 4 +#define SXE2_RXQ_MAP_Q_MAX_NUM 256 + +#define SXE2_STAT_MAP_INVALID_QID 0xFFFF + +#define SXE2_SCHED_MODE_DEFAULT 0 +#define SXE2_SCHED_MODE_TM 1 +#define SXE2_SCHED_MODE_HIGH_PERFORMANCE 2 +#define SXE2_SCHED_MODE_INVALID 3 + +#define SXE2_SRCVSI_PRUNE_MAX_NUM 2 + +#define SXE2_PTYPE_UNKNOWN BIT(0) +#define SXE2_PTYPE_L2_ETHER BIT(1) +#define SXE2_PTYPE_L3_IPV4 BIT(2) +#define SXE2_PTYPE_L3_IPV6 BIT(4) +#define SXE2_PTYPE_L4_TCP BIT(6) +#define SXE2_PTYPE_L4_UDP BIT(7) +#define SXE2_PTYPE_L4_SCTP BIT(8) +#define SXE2_PTYPE_INNER_L2_ETHER BIT(9) +#define SXE2_PTYPE_INNER_L3_IPV4 BIT(10) +#define SXE2_PTYPE_INNER_L3_IPV6 BIT(12) +#define SXE2_PTYPE_INNER_L4_TCP BIT(14) +#define SXE2_PTYPE_INNER_L4_UDP BIT(15) +#define SXE2_PTYPE_INNER_L4_SCTP BIT(16) +#define SXE2_PTYPE_TUNNEL_GRENAT BIT(17) + +#define SXE2_PTYPE_L2_MASK (SXE2_PTYPE_L2_ETHER) +#define SXE2_PTYPE_L3_MASK (SXE2_PTYPE_L3_IPV4 | SXE2_PTYPE_L3_IPV6) +#define SXE2_PTYPE_L4_MASK (SXE2_PTYPE_L4_TCP | SXE2_PTYPE_L4_UDP | \ + SXE2_PTYPE_L4_SCTP) +#define SXE2_PTYPE_INNER_L2_MASK (SXE2_PTYPE_INNER_L2_ETHER) +#define SXE2_PTYPE_INNER_L3_MASK (SXE2_PTYPE_INNER_L3_IPV4 | \ + SXE2_PTYPE_INNER_L3_IPV6) +#define SXE2_PTYPE_INNER_L4_MASK (SXE2_PTYPE_INNER_L4_TCP | \ + SXE2_PTYPE_INNER_L4_UDP | \ + SXE2_PTYPE_INNER_L4_SCTP) +#define SXE2_PTYPE_TUNNEL_MASK (SXE2_PTYPE_TUNNEL_GRENAT) + +enum sxe2_dev_type { + SXE2_DEV_T_PF = 0, + SXE2_DEV_T_VF, + SXE2_DEV_T_PF_BOND, + SXE2_DEV_T_MAX, +}; + +struct sxe2_drv_queue_caps { + __le16 queues_cnt; + __le16 base_idx_in_pf; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_msix_caps { + __le16 msix_vectors_cnt; + __le16 base_idx_in_func; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rss_hash_caps { + __le16 hash_key_size; + __le16 lut_key_size; +} SXE2_ATTRIBUTE; + +enum sxe2_vf_vsi_valid { + SXE2_VF_VSI_BOTH = 0, + SXE2_VF_VSI_ONLY_DPDK, + SXE2_VF_VSI_ONLY_KERNEL, + SXE2_VF_VSI_MAX, +}; + +struct sxe2_drv_vsi_caps { + __le16 func_id; + __le16 dpdk_vsi_id; + __le16 kernel_vsi_id; + __le16 vsi_type; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_representor_caps { + __le16 cnt_repr_vf; + u8 rsv[2]; + struct sxe2_drv_vsi_caps repr_vf_id[256]; +} SXE2_ATTRIBUTE; + +enum sxe2_phys_port_name_type { + SXE2_PHYS_PORT_NAME_TYPE_NOTSET = 0, + SXE2_PHYS_PORT_NAME_TYPE_LEGACY, + SXE2_PHYS_PORT_NAME_TYPE_UPLINK, + SXE2_PHYS_PORT_NAME_TYPE_PFVF, + SXE2_PHYS_PORT_NAME_TYPE_UNKNOWN, +}; + +struct sxe2_switchdev_info { + u8 is_switchdev; + u8 master; + u8 representor; + u8 port_name_type; + __le32 ctrl_num; + __le32 pf_num; + __le32 vf_num; + __le32 mpesw_owner; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_uplink_info { + u8 pf_id; + u8 is_set; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_repr_info { + u8 pf_id; + u8 is_set; + u8 rsv[2]; + __le16 cp_vsi_id; + __le16 repr_pf_id; + __le16 repr_vf_id; + __le16 repr_q_id; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_mode_info { + u8 pf_id; + u8 is_switchdev; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_switchdev_cpvsi_info { + __le16 cp_vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_txsch_caps { + u8 layer_cap; + u8 tm_mid_node_num; + u8 prio_num; + u8 rev; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_caps_resp { + struct sxe2_drv_queue_caps queue_caps; + struct sxe2_drv_msix_caps msix_caps; + struct sxe2_drv_rss_hash_caps rss_hash_caps; + struct sxe2_drv_vsi_caps vsi_caps; + struct sxe2_txsch_caps txsch_caps; + struct sxe2_drv_representor_caps repr_caps; + u8 port_idx; + u8 pf_idx; + u8 dev_type; + u8 rev; + __le32 cap_flags; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_info_resp { + __le64 dsn; + __le16 vsi_id; + u8 rsv[2]; + u8 mac_addr[ETH_ALEN]; + u8 rsv2[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_dev_fw_info_resp { + u8 main_version_id; + u8 sub_version_id; + u8 fix_version_id; + u8 build_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rxq_ctxt { + __le64 dma_addr; + __le32 max_lro_size; + __le32 split_type_mask; + __le16 hdr_len; + __le16 buf_len; + __le16 depth; + __le16 queue_id; + u8 lro_en; + u8 keep_crc_en; + u8 split_en; + u8 desc_size; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rxq_cfg_req { + __le16 q_cnt; + __le16 vsi_id; + __le16 max_frame_size; + u8 rsv[2]; + struct sxe2_drv_rxq_ctxt cfg[]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_txq_ctxt { + __le64 dma_addr; + __le32 sched_mode; + __le16 queue_id; + __le16 depth; + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_txq_cfg_req { + __le16 q_cnt; + __le16 vsi_id; + struct sxe2_drv_txq_ctxt cfg[]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_q_switch_req { + __le16 q_idx; + __le16 vsi_id; + u8 is_enable; + u8 sched_mode; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_queue_irq_bind_req { + __le16 q_idx; + __le16 msix_idx; + u8 itr_idx; + u8 bind; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_create_req_resp { + __le16 vsi_id; + __le16 vsi_type; + struct sxe2_drv_queue_caps used_queues; + struct sxe2_drv_msix_caps used_msix; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_free_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_info_get_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_info_get_resp { + __le16 vsi_id; + __le16 vsi_type; + struct sxe2_drv_queue_caps used_queues; + struct sxe2_drv_msix_caps used_msix; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_udp_tunnel_req { + u8 type; + u8 rsv; + __le16 port; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_udp_tunnel_resp { + u8 type; + u8 enable; + u8 dst; + u8 src; + u16 port; + u8 fw_used; + u8 rsv; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_rx_map_req { + __le16 queue_id; + u8 pool_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_tx_map_req { + __le16 queue_id; + u8 pool_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vlan_cfg_query_resp { + __le16 vsi_id; + u8 port_vlan_exist; + u8 is_switchdev; + __le16 tpid; + __le16 vid; + u8 outer_insert; + u8 outer_strip; + u8 inner_insert; + u8 inner_strip; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vlan_offload_cfg_req { + __le16 vsi_id; + __le16 tpid; + u8 outer_insert; + u8 outer_strip; + u8 inner_insert; + u8 inner_strip; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_port_vlan_cfg_req { + __le16 vsi_id; + __le16 tpid; + __le16 vid; + u8 prio; + u8 rsv; +} SXE2_ATTRIBUTE; + +enum sxe2_mac_filter_type { + SXE2_MAC_FILTER_TYPE_UC = 0, + SXE2_MAC_FILTER_TYPE_MC, + SXE2_MAC_FILTER_TYPE_MAX, +}; + +struct sxe2_mac_filter_cfg_req { + __le16 vsi_id; + u8 addr[ETH_ALEN]; + u8 type; + u8 is_add; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +enum sxe2_promisc_filter_type { + SXE2_PROMISC_FILTER_TYPE_PROMISC = 0, + SXE2_PROMISC_FILTER_TYPE_ALLMULTI, + SXE2_PROMISC_FILTER_TYPE_MAX, +}; + +struct sxe2_promisc_filter_cfg_req { + __le16 vsi_id; + u8 type; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_srcvsi_ext_cfg_req { + __le16 vsi_id; + __le16 srcvsi_list[SXE2_SRCVSI_PRUNE_MAX_NUM]; + u8 srcvsi_cnt; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_vlan_filter_cfg_req { + __le16 vsi_id; + __le16 vlan_id; + __le16 tpid_id; + u8 prio; + u8 is_add; +} SXE2_ATTRIBUTE; + +struct sxe2_vlan_filter_switch_req { + __le16 vsi_id; + u8 is_oper_enable; + u8 rsv; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_key_req { + __le16 vsi_id; + __le16 key_size; + u8 key[]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_lut_req { + __le16 vsi_id; + __le16 lut_size; + u8 lut[]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_func_req { + __le16 vsi_id; + u8 func; + u8 rsv[1]; +} SXE2_ATTRIBUTE; + +struct sxe2_rss_hf_req { + __le16 vsi_id; + u8 rsv[2]; + __le32 headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + __le32 hash_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + __le32 hdr_type; + u8 symm; + u8 rsv1[3]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_capa_resq { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; + __le16 ip_id_cnt; + __le16 udp_group_cnt; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_add_req { + __le32 mode; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_add_resp { + __le16 index; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_add_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 is_over_sdn; + u8 sdn_group_id; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_add_resp { + u8 ip_id; + u8 udp_group_id; + __le16 sa_idx; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_txsa_del_req { + __le16 sa_idx; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_ipsec_rxsa_del_req { + u8 ip_id; + u8 group_id; + __le16 sa_idx; + __le32 spi; + bool func_type; + u8 func_id; + u8 drv_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_link_info_resp { + __le32 speed; + u8 status; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_filter_req { + __le32 flow_id; + struct sxe2_flow_meta meta; + enum sxe2_flow_engine_type engine_type; + struct sxe2_flow_pattern pattern_outer; + struct sxe2_flow_pattern pattern_inner; + struct sxe2_flow_action action; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_filter_resp { + enum sxe2_flow_engine_type engine_type; + __le32 flow_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_get_stat_id_req { + u8 need_update; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_get_stat_id_resp { + __le32 stat_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_free_stat_id_req { + __le32 stat_id; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_query_stat_req { + __le32 stat_id; + __le32 stat_ctrl; + __le32 is_clear; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_flow_fnav_query_stat_resp { + __le32 stat_index; + __le64 stat_hits; + __le64 stat_bytes; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_sw_stats { + __le64 rx_packets; + __le64 rx_bytes; + __le64 tx_packets; + __le64 tx_bytes; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_stats_req { + __le16 vsi_id; + u8 rsv[2]; + struct sxe2_drv_vsi_sw_stats sw_stats; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_stats_resp { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_mac_stats_resp { + __le64 rx_out_of_buffer; + __le64 rx_qblock_drop; + __le64 tx_frame_good; + __le64 rx_frame_good; + __le64 rx_crc_errors; + __le64 tx_bytes_good; + __le64 rx_bytes_good; + __le64 tx_multicast_good; + __le64 tx_broadcast_good; + __le64 rx_multicast_good; + __le64 rx_broadcast_good; + __le64 rx_len_errors; + __le64 rx_out_of_range_errors; + __le64 rx_oversize_pkts_phy; + __le64 rx_symbol_err; + __le64 rx_pause_frame; + __le64 tx_pause_frame; + __le64 rx_discards_phy; + __le64 rx_discards_ips_phy; + __le64 tx_dropped_link_down; + __le64 rx_undersize_good; + __le64 rx_runt_error; + __le64 tx_bytes_good_bad; + __le64 tx_frame_good_bad; + __le64 rx_jabbers; + __le64 rx_size_64; + __le64 rx_size_65_127; + __le64 rx_size_128_255; + __le64 rx_size_256_511; + __le64 rx_size_512_1023; + __le64 rx_size_1024_1522; + __le64 rx_size_1523_max; + __le64 rx_pcs_symbol_err_phy; + __le64 rx_corrected_bits_phy; + __le64 rx_err_lane_0_phy; + __le64 rx_err_lane_1_phy; + __le64 rx_err_lane_2_phy; + __le64 rx_err_lane_3_phy; + __le64 rx_prio_buf_discard[SXE2_MAX_USER_PRIORITY]; + __le64 rx_illegal_bytes; + __le64 rx_oversize_good; + __le64 tx_unicast; + __le64 tx_broadcast; + __le64 tx_multicast; + __le64 tx_vlan_packet_good; + __le64 tx_size_64; + __le64 tx_size_65_127; + __le64 tx_size_128_255; + __le64 tx_size_256_511; + __le64 tx_size_512_1023; + __le64 tx_size_1024_1522; + __le64 tx_size_1523_max; + __le64 tx_underflow_error; + __le64 rx_byte_good_bad; + __le64 rx_frame_good_bad; + __le64 rx_unicast_good; + __le64 rx_vlan_packets; + __le64 prio_xoff_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xoff_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_2_xoff[SXE2_MAX_USER_PRIORITY]; +} SXE2_ATTRIBUTE; + +enum sxe2_fc_type { + SXE2_FC_T_DIS = 0, + SXE2_FC_T_LFC, + SXE2_FC_T_PFC, + + SXE2_FC_T_UNKNOW = 255, +}; + +struct sxe2_drv_vsi_fc_get_req { + __le16 vsi_id; + u8 rsv[2]; +} SXE2_ATTRIBUTE; + +struct sxe2_drv_vsi_fc_get_resp { + u8 fc_enable; + u8 rsv[3]; +} SXE2_ATTRIBUTE; + +struct sxe2_tm_res { + __le16 teid; +}; + +struct sxe2_tm_info { + __le32 committed; + __le32 peak; + u8 priority; + u8 reserve; + __le16 weight; +}; + +struct sxe2_tm_add_mid_msg { + __le16 parent_teid; + u8 adj_lvl; + struct sxe2_tm_info info; +}; + +struct sxe2_tm_add_queue_msg { + __le16 parent_teid; + __le16 queue_id; + u8 adj_lvl; + struct sxe2_tm_info info; +}; + +struct sxe2_stats_txq_map_pool { + __le16 queue_id_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + u8 curr_map_idx; +}; + +struct sxe2_stats_hw_txq_map_pool { + __le16 txq_id; +}; + +struct sxe2_stats_hw_txq_map { + struct sxe2_stats_hw_txq_map_pool hw_txq_map_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + u8 curr_map_idx; +}; + +struct sxe2_stats_rxq_map_pool { + u8 pool_id; + __le16 queue_id_pool[SXE2_RXQ_MAP_Q_MAX_NUM]; + __le16 curr_map_idx; +}; + +struct sxe2_stats_txq_map { + struct sxe2_stats_txq_map_pool txq_map_pool[SXE2_TXQ_STATS_MAP_MAX_NUM]; + struct sxe2_stats_hw_txq_map hw_txq_map; +}; + +struct sxe2_stats_rxq_map { + struct sxe2_stats_rxq_map_pool rxq_map_pool[SXE2_RXQ_STATS_MAP_MAX_NUM]; +}; + +struct sxe2_txq_map_info { + __le32 txq_lan_pkt_cnt; + __le32 txq_lan_byte_cnt; +}; + +struct sxe2_rxq_map_info { + __le64 rxq_lan_in_pkt_cnt; + __le64 rxq_lan_in_byte_cnt; + + __le64 rxq_fd_in_pkt_cnt; + + __le64 rxq_mng_in_pkt_cnt; + __le64 rxq_mng_in_byte_cnt; + __le64 rxq_mng_out_pkt_cnt; +}; + +struct sxe2_queue_map_info { + struct sxe2_rxq_map_info + rxq_stats_map_info[SXE2_RXQ_STATS_MAP_MAX_NUM]; + struct sxe2_txq_map_info + txq_stats_map_info[SXE2_TXQ_STATS_MAP_MAX_NUM]; +}; + +struct sxe2_stats_map { + struct sxe2_stats_txq_map txq_map; + struct sxe2_stats_rxq_map rxq_map; + + struct sxe2_queue_map_info q_info; +}; + +struct sxe2_drv_sfp_req { + u8 is_wr; + u8 is_qsfp; + __le16 bus_addr; + __le16 page_cnt; + __le16 offset; + __le16 data_len; + __le16 rvd; + u8 data[]; +}; + +struct sxe2_drv_sfp_resp { + u8 is_wr; + u8 is_qsfp; + __le16 data_len; + u8 data[]; +}; + +enum sxe2_drv_cmd_module { + SXE2_DRV_CMD_MODULE_HANDSHAKE = 0, + SXE2_DRV_CMD_MODULE_DEV = 1, + SXE2_DRV_CMD_MODULE_VSI = 2, + SXE2_DRV_CMD_MODULE_QUEUE = 3, + SXE2_DRV_CMD_MODULE_STATS = 4, + SXE2_DRV_CMD_MODULE_SUBSCRIBE = 5, + SXE2_DRV_CMD_MODULE_RSS = 6, + SXE2_DRV_CMD_MODULE_FLOW = 7, + SXE2_DRV_CMD_MODULE_TM = 8, + SXE2_DRV_CMD_MODULE_IPSEC = 9, + SXE2_DRV_CMD_MODULE_PTP = 10, + + SXE2_DRV_CMD_MODULE_VLAN = 11, + SXE2_DRV_CMD_MODULE_RDMA = 12, + SXE2_DRV_CMD_MODULE_LINK = 13, + SXE2_DRV_CMD_MODULE_MACADDR = 14, + SXE2_DRV_CMD_MODULE_PROMISC = 15, + + SXE2_DRV_CMD_MODULE_LED = 16, + SXE2_DEV_CMD_MODULE_OPT = 17, + SXE2_DEV_CMD_MODULE_SWITCH = 18, + SXE2_DRV_CMD_MODULE_ACL = 19, + SXE2_DRV_CMD_MODULE_UDPTUNEEL = 20, + SXE2_DRV_CMD_MODULE_QUEUE_MAP = 21, + + SXE2_DRV_CMD_MODULE_SCHED = 22, + + SXE2_DRV_CMD_MODULE_IRQ = 23, + + SXE2_DRV_CMD_MODULE_OPT = 24, +}; + +enum sxe2_drv_cmd_code { + SXE2_DRV_CMD_HANDSHAKE_ENABLE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_HANDSHAKE, 1), + SXE2_DRV_CMD_HANDSHAKE_DISABLE, + + SXE2_DRV_CMD_DEV_GET_CAPS = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_DEV, 1), + SXE2_DRV_CMD_DEV_GET_INFO, + SXE2_DRV_CMD_DEV_GET_FW_INFO, + SXE2_DRV_CMD_DEV_RESET, + SXE2_DRV_CMD_DEV_GET_SWITCHDEV_INFO, + + SXE2_DRV_CMD_VSI_CREATE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_VSI, 1), + SXE2_DRV_CMD_VSI_FREE, + SXE2_DRV_CMD_VSI_INFO_GET, + SXE2_DRV_CMD_VSI_SRCVSI_PRUNE, + SXE2_DRV_CMD_VSI_FC_GET, + + SXE2_DRV_CMD_RX_MAP_SET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_QUEUE_MAP, 1), + SXE2_DRV_CMD_TX_MAP_SET, + SXE2_DRV_CMD_TX_RX_MAP_GET, + SXE2_DRV_CMD_TX_RX_MAP_RESET, + SXE2_DRV_CMD_TX_RX_MAP_INFO_CLEAR, + + SXE2_DRV_CMD_SCHED_ROOT_TREE_ALLOC = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_SCHED, 1), + SXE2_DRV_CMD_SCHED_ROOT_TREE_RELEASE, + SXE2_DRV_CMD_SCHED_ROOT_CHILDREN_DELETE, + SXE2_DRV_CMD_SCHED_TM_ADD_MID_NODE, + SXE2_DRV_CMD_SCHED_TM_ADD_QUEUE_NODE, + + SXE2_DRV_CMD_RXQ_CFG_ENABLE = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_QUEUE, 1), + SXE2_DRV_CMD_TXQ_CFG_ENABLE, + SXE2_DRV_CMD_RXQ_DISABLE, + SXE2_DRV_CMD_TXQ_DISABLE, + + SXE2_DRV_CMD_VSI_STATS_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_STATS, 1), + SXE2_DRV_CMD_VSI_STATS_CLEAR, + SXE2_DRV_CMD_MAC_STATS_GET, + SXE2_DRV_CMD_MAC_STATS_CLEAR, + + SXE2_DRV_CMD_RSS_KEY_SET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_RSS, 1), + SXE2_DRV_CMD_RSS_LUT_SET, + SXE2_DRV_CMD_RSS_FUNC_SET, + SXE2_DRV_CMD_RSS_HF_ADD, + SXE2_DRV_CMD_RSS_HF_DEL, + SXE2_DRV_CMD_RSS_HF_CLEAR, + + SXE2_DRV_CMD_FLOW_FILTER_ADD = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_FLOW, 1), + SXE2_DRV_CMD_FLOW_FILTER_DEL, + SXE2_DRV_CMD_FLOW_FILTER_CLEAR, + SXE2_DRV_CMD_FLOW_FNAV_STAT_ALLOC, + SXE2_DRV_CMD_FLOW_FNAV_STAT_FREE, + SXE2_DRV_CMD_FLOW_FNAV_STAT_QUERY, + + SXE2_DRV_CMD_DEL_TM_ROOT = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_TM, 1), + SXE2_DRV_CMD_ADD_TM_ROOT, + SXE2_DRV_CMD_ADD_TM_NODE, + SXE2_DRV_CMD_ADD_TM_QUEUE, + + SXE2_DRV_CMD_GET_PTP_CLOCK = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_PTP, 1), + + SXE2_DRV_CMD_VLAN_FILTER_ADD_DEL = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_VLAN, 1), + SXE2_DRV_CMD_VLAN_FILTER_SWITCH, + SXE2_DRV_CMD_VLAN_OFFLOAD_CFG, + SXE2_DRV_CMD_VLAN_PORTVLAN_CFG, + SXE2_DRV_CMD_VLAN_CFG_QUERY, + + SXE2_DRV_CMD_RDMA_DUMP_PCAP = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_RDMA, 1), + + SXE2_DRV_CMD_LINK_STATUS_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_LINK, 1), + + SXE2_DRV_CMD_MAC_ADDR_UC = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_MACADDR, 1), + SXE2_DRV_CMD_MAC_ADDR_MC, + + SXE2_DRV_CMD_PROMISC_CFG = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_PROMISC, 1), + SXE2_DRV_CMD_ALLMULTI_CFG, + + SXE2_DRV_CMD_LED_CTRL = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_LED, 1), + + SXE2_DRV_CMD_OPT_EEP = + SXE2_MK_DRV_CMD(SXE2_DEV_CMD_MODULE_OPT, 1), + + SXE2_DRV_CMD_SWITCH = + SXE2_MK_DRV_CMD(SXE2_DEV_CMD_MODULE_SWITCH, 1), + SXE2_DRV_CMD_SWITCH_UPLINK, + SXE2_DRV_CMD_SWITCH_REPR, + SXE2_DRV_CMD_SWITCH_MODE, + SXE2_DRV_CMD_SWITCH_CPVSI, + + SXE2_DRV_CMD_UDPTUNNEL_ADD = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_UDPTUNEEL, 1), + SXE2_DRV_CMD_UDPTUNNEL_DEL, + SXE2_DRV_CMD_UDPTUNNEL_GET, + + SXE2_DRV_CMD_IPSEC_CAP_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_IPSEC, 1), + SXE2_DRV_CMD_IPSEC_TXSA_ADD, + SXE2_DRV_CMD_IPSEC_RXSA_ADD, + SXE2_DRV_CMD_IPSEC_TXSA_DEL, + SXE2_DRV_CMD_IPSEC_RXSA_DEL, + SXE2_DRV_CMD_IPSEC_RESOURCE_CLEAR, + + SXE2_DRV_CMD_EVT_IRQ_BAND_RXQ = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_IRQ, 1), + + SXE2_DRV_CMD_OPT_EEP_GET = + SXE2_MK_DRV_CMD(SXE2_DRV_CMD_MODULE_OPT, 1), + +}; + +#ifndef __KERNEL__ +#pragma pack() +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_ioctl_chnl.h b/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_ioctl_chnl.h new file mode 100644 index 0000000000000000000000000000000000000000..07118eecdbf2da3a777eec846e61b9a3ce7a9a85 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/ioctl/sxe2_ioctl_chnl.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ioctl_chnl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_IOCTL_CHNL_H__ +#define __SXE2_IOCTL_CHNL_H__ + +#ifdef SXE2_DPDK_DRIVER + +#include +#if (RTE_VERSION_NUM(22, 0, 0, 0) <= RTE_VERSION) +#include +#else +#include +#include +#endif + +#include "sxe2_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#ifdef __KERNEL__ +#include +#include +#endif +#endif + +#include "sxe2_internal_ver.h" + +#define SXE2_COM_INVAL_U32 0xFFFFFFFF + +#define SXE2_COM_PCI_OFFSET_SHIFT 40 + +#define SXE2_COM_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << SXE2_COM_PCI_OFFSET_SHIFT) +#define SXE2_COM_PCI_OFFSET_MASK (((u64)(1) << SXE2_COM_PCI_OFFSET_SHIFT) - 1) +#define SXE2_COM_PCI_OFFSET_GEN(index, off) ((((u64)(index)) << SXE2_COM_PCI_OFFSET_SHIFT) | \ + (((u64)(off)) & SXE2_COM_PCI_OFFSET_MASK)) + +#define SXE2_DRV_TRACE_ID_COUNT_MASK 0x003FFFFFFFFFFFFFLLU + +#define SXE2_DRV_CMD_DFLT_TIMEOUT (30) + +#define SXE2_COM_VER_MAJOR 1 +#define SXE2_COM_VER_MINOR 0 +#define SXE2_COM_VER SXE2_MK_VER(SXE2_COM_VER_MAJOR, SXE2_COM_VER_MINOR) + +enum SXE2_COM_CMD { + SXE2_DEVICE_HANDSHAKE = 1, + SXE2_DEVICE_IO_IRQS_REQ, + SXE2_DEVICE_EVT_IRQ_REQ, + SXE2_DEVICE_RST_IRQ_REQ, + SXE2_DEVICE_EVT_CAUSE_GET, + SXE2_DEVICE_DMA_MAP, + SXE2_DEVICE_DMA_UNMAP, + SXE2_DEVICE_PASSTHROUGH, + SXE2_DEVICE_MAX, +}; + +#define SXE2_CMD_TYPE 'S' + +#define SXE2_COM_CMD_HANDSHAKE _IO(SXE2_CMD_TYPE, SXE2_DEVICE_HANDSHAKE) +#define SXE2_COM_CMD_IO_IRQS_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_IO_IRQS_REQ) +#define SXE2_COM_CMD_EVT_IRQ_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_EVT_IRQ_REQ) +#define SXE2_COM_CMD_RST_IRQ_REQ _IO(SXE2_CMD_TYPE, SXE2_DEVICE_RST_IRQ_REQ) +#define SXE2_COM_CMD_EVT_CAUSE_GET _IO(SXE2_CMD_TYPE, SXE2_DEVICE_EVT_CAUSE_GET) +#define SXE2_COM_CMD_DMA_MAP _IO(SXE2_CMD_TYPE, SXE2_DEVICE_DMA_MAP) +#define SXE2_COM_CMD_DMA_UNMAP _IO(SXE2_CMD_TYPE, SXE2_DEVICE_DMA_UNMAP) +#define SXE2_COM_CMD_PASSTHROUGH _IO(SXE2_CMD_TYPE, SXE2_DEVICE_PASSTHROUGH) + +enum sxe2_com_cap { + SXE2_COM_CAP_IOMMU_MAP = 0, +}; + +struct sxe2_ioctl_cmd_common_hdr { + u32 dpdk_ver; + u32 drv_ver; + u32 msg_len; + u32 cap; + u8 reserved[32]; +}; + +struct sxe2_drv_cmd_params { + u64 trace_id; + u32 timeout; + u32 opcode; + u16 vsi_id; + u16 repr_id; + u32 req_len; + u32 resp_len; + void *req_data; + void *resp_data; + u8 resv[32]; +}; + +struct sxe2_ioctl_irq_set { + u32 cnt; + u8 resv[4]; + u32 base_irq_in_com; + s32 *event_fd; +}; + +enum sxe2_com_event_cause { + SXE2_COM_EC_LINK_CHG = 0, + SXE2_COM_SW_MODE_LEGACY, + SXE2_COM_SW_MODE_SWITCHDEV, + SXE2_COM_FC_ST_CHANGE, + + SXE2_COM_EC_RESET = 62, + SXE2_COM_EC_MAX = 63, +}; + +struct sxe2_ioctl_other_evt_set { + s32 eventfd; + u8 resv[4]; + u64 filter_table; +}; + +struct sxe2_ioctl_other_evt_get { + u64 evt_cause; + u8 resv[8]; +}; + +struct sxe2_ioctl_reset_sub_set { + s32 eventfd; + u8 resv[4]; +}; + +struct sxe2_ioctl_iommu_dma_map { + u64 vaddr; + u64 iova; + u64 size; + u8 resv[4]; +}; + +struct sxe2_ioctl_iommu_dma_unmap { + u64 iova; +}; + +union sxe2_drv_trace_info { + u64 id; + struct { + u64 count : 54; + u64 cpu_id : 10; + } sxe2_drv_trace_id_param; +}; +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/mbx/sxe2_mbx_public.h b/drivers/net/ethernet/linkdata/sxe2vf/common/mbx/sxe2_mbx_public.h new file mode 100644 index 0000000000000000000000000000000000000000..b3a59a9b723ecdd1a1ded92d484b58ce9202aa6c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/mbx/sxe2_mbx_public.h @@ -0,0 +1,837 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_mbx_public.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_VF_PUBLIC_H__ +#define __SXE2_VF_PUBLIC_H__ + +#include "sxe2_spec.h" +#ifdef __KERNEL__ +#include "sxe2_compat.h" +#endif + +#include "sxe2_host_regs.h" +#include "sxe2_flow_public.h" + +#pragma pack(1) + +#define SXE2_VF_VERSION_MAJOR 1 +#define SXE2_VF_VERSION_MINOR 1 + +#define SXE2_VF_MAX_VSI_CNT 4 + +#define SXE2_VF_VLAN_STATUS_INVALID (0xFF) + +#define SXE2_VF_ETH_Q_NUM 16 +#define SXE2_VF_DPDK_Q_NUM 16 + +#define SXE2_VF_DRV_TO_HW (0x3) +#define SXE2_VF_VF_TO_PF (0x0801) +#define SXE2_VF_PF_TO_VF (0x0802) + +#define SXE2_VF_MBX_MAGIC (0xFEFEEFEF) + +#define SXE2_VF_PROMISC BIT_ULL(0) +#define SXE2_VF_PROMISC_MULTICAST BIT_ULL(1) +#define SXE2_VF_VLAN_FILTER BIT_ULL(2) + +#define SXE2_VF_OFFLOAD_L2 BIT(0) +#define SXE2_VF_OFFLOAD_VLAN BIT(1) +#define SXE2_VF_OFFLOAD_RSS BIT(2) +#define SXE2_VF_OFFLOAD_IPSEC BIT(3) +#define SXE2_VF_OFFLOAD_FNAV BIT(4) +#define SXE2_VF_OFFLOAD_TM BIT(5) +#define SXE2_VF_OFFLOAD_PTP BIT(6) + +#define SXE2_IPSEC_DIR_TX (0) +#define SXE2_IPSEC_DIR_RX (1) +#define SXE2_IPSEC_INVAILID_SA_IDX (0xFFFF) + +enum sxe2vf_vsi_type { + SXE2VF_VSI_TYPE_ETH = 0, + SXE2VF_VSI_TYPE_DPDK, + SXE2VF_VSI_TYPE_NR, +}; + +#define SXE2_VF_VSI_CNT_USED SXE2VF_VSI_TYPE_NR + +enum sxe2_vf_opcode { + SXE2_VF_UNKOWN = 0, + SXE2_VF_RESET_REQUEST = 0x1, + SXE2_VF_VERSION_MATCH = 0x2, + SXE2_VF_HW_RES_GET = 0x3, + SXE2_VF_IRQ_MAP = 0x4, + SXE2_VF_QUEUES_DISABLE = 0x5, + SXE2_VF_RXQ_CFG_AND_ENABLE = 0x6, + SXE2_VF_TXQ_CFG_AND_ENABLE = 0x7, + SXE2_VF_MAC_ADDR_ADD = 0x8, + SXE2_VF_MAC_ADDR_DEL = 0x9, + SXE2_VF_VLAN_ADD = 0xa, + SXE2_VF_VLAN_DEL = 0xb, + SXE2_VF_STATS_GET = 0xc, + SXE2_VF_LINK_UPDATE_NOTIFY = 0xd, + SXE2_VF_PROMISC_CFG = 0xe, + SXE2_VF_VLAN_CAPS_GET = 0xf, + SXE2_VF_VLAN_OFFLOAD_CFG = 0x10, + SXE2_VF_VLAN_FILTER_CFG = 0x11, + SXE2_VF_LINK_STATUS_GET = 0x12, + SXE2_VF_RESET_NOTIFY = 0x13, + SXE2_VF_RDMA = 0x14, + SXE2_VF_QV_MAP = 0x15, + SXE2_VF_QV_UNMAP = 0x16, + SXE2_VF_RDMA_MGR_CMD = 0x17, + + SXE2_VF_GET_RSS_KEY = 0x18, + SXE2_VF_GET_RSS_LUT = 0x19, + SXE2_VF_SET_RSS_KEY = 0x1a, + SXE2_VF_SET_RSS_LUT = 0x1b, + SXE2_VF_ADD_RSS_CFG = 0x1c, + + SXE2_VF_DEL_RSS_CFG = 0x1d, + SXE2_VF_CLEAR_RSS_CFG = 0x1e, + SXE2_VF_SET_RSS_HASH_CTRL = 0X1f, + + SXE2_VF_FNAV_FILTER_ADD = 0x20, + SXE2_VF_FNAV_FILTER_DEL = 0x21, + SXE2_VF_FNAV_FILTER_CLEAR = 0X22, + SXE2_VF_FNAV_ALLOC_STAT = 0X23, + SXE2_VF_FNAV_FREE_STAT = 0X24, + SXE2_VF_FNAV_QUERY_STAT = 0x25, + + SXE2_VF_STATS_CLEAR = 0x26, + SXE2_VF_RXQ_DISABLE = 0x27, + SXE2_VF_TXQ_DISABLE = 0x28, + + SXE2_VF_GET_PTP_CLOCK = 0x29, + SXE2_VF_IPSEC_SA_ADD = 0x2a, + SXE2_VF_IPSEC_SA_CLEAR = 0x2b, + SXE2_VF_IPSEC_GET_CAPA = 0x2c, + + SXE2_VF_RDMA_DUMP_PCAP = 0x2d, + + SXE2_VF_IRQ_UNMAP = 0x2e, + + SXE2_VF_ADD_DEFAULT_RSS_CFG = 0x2f, + SXE2_VF_REPLAY_RSS_CFG = 0x30, + SXE2_VF_STATS_PUSH = 0x31, + SXE2_VF_GET_ETHTOOL_INFO = 0x32, + SXE2_VF_FNAV_MATCH_CLEAR = 0x33, + SXE2_VF_VSI_CFG = 0x34, + SXE2_VF_USER_DRIVER_RELEASE = 0x35, + + SXE2_VF_MAC_ADDR_UPDATE = 0x36, + SXE2_VF_PROMISC_UPDATE = 0x37, + SXE2_VF_USER_VLAN_PROCESS = 0x38, + + SXE2_VF_ACL_FILTER_ADD = 0x39, + SXE2_VF_ACL_FILTER_DEL = 0x3a, + SXE2_VF_ACL_FILTER_CLEAR = 0x3b, + + SXE2_VF_PASSTHROUGH_USER_VF_DATA = 0x3c, + + SXE2_VF_DRV_MODE_SET = 0x3d, + SXE2_VF_DRV_MODE_GET = 0x3e, + + SXE2_VF_OPCODE_NR, + + SXE2_VF_MBX_DISABLE = 0xFFFF, +}; + +enum sxe2_vf_err_code { + SXE2_VF_ERR_SUCCESS = 0, + SXE2_VF_ERR_PARAM = 1024, + SXE2_VF_ERR_NO_MEMORY, + SXE2_VF_ERR_HANDLE_ERROR, + SXE2_VF_ERR_CQP_COMPL_ERROR, + SXE2_VF_ERR_INVALID_VF_ID, + SXE2_VF_ERR_ADMIN_QUEUE_ERROR, + SXE2_VF_ERR_NOT_SUPPORTED, + SXE2_VF_ERR_PF_STATUS_ABNORMAL, + SXE2_VF_ERR_VF_STATUS_ABNORMAL, +}; + +enum sxe2_vf_msg_type { + SXE2VF_MSG_TYPE_PF_TO_VF = 3, + SXE2VF_MSG_TYPE_VF_TO_PF, + SXE2VF_MSG_TYPE_DRV_TO_HW, + SXE2VF_MSG_TYPE_PF_REPLY_VF, +}; + +#define SXE2VF_CMD_HDR_SIZE sizeof(struct sxe2vf_cmd_hdr) + +#define SXE2VF_MBX_MSG_HDR_SIZE \ + sizeof(struct sxe2vf_mbx_msg_hdr) + +#define SXE2VF_MBX_RAW_MSG_MAX_SPEC (4096) + +#define SXE2VF_MBX_RAW_MSG_OFFSET (SXE2VF_CMD_HDR_SIZE + SXE2VF_MBX_MSG_HDR_SIZE) + +#define SXE2VF_MBX_FULL_HDR_SIZE SXE2VF_MBX_RAW_MSG_OFFSET + +#define SXE2VF_MBX_RAW_MSG_MAX_SIZE \ + (SXE2VF_MBX_RAW_MSG_MAX_SPEC - SXE2VF_MBX_RAW_MSG_OFFSET) + +#define SXE2VF_MBX_DATA_OFFSET(buf) \ + ((((struct sxe2_cmd_hdr *)(buf))->hdr_len) + \ + (SXE2_MBX_MSG_HDR_PTR((struct sxe2_cmd_hdr *)buf)->data_offset)) + +#define SXE2_FNAV_MAX_NUM_PROTO_HDRS (9) +#define SXE2_FNAV_MAX_NUM_ACTIONS (3) +#define SXE2_FNAV_IPV6_ADDR_LEN_TO_U32 (4) +#define SXE2_FNAV_ETH_ADDR_LEN (6) +#define SXE2_VF_FNAV_INVALID_LOC (0xFFFF) +#define SXE2_VF_FNAV_INVALID_FLOW_ID (0xFFFF) +#define SXE2_VF_FNAV_INVALID_STAT_IDX (0xFFFF) + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F + +struct sxe2vf_cmd_hdr { + __le32 magic_code; + __le16 in_len; + __le16 out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 resv[28]; + u8 body[]; +}; + +struct sxe2vf_mbx_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 recv[14]; + u8 body[]; +}; + +enum sxe2_driver_type { + SXE2_DRIVER_TYPE_VF = 0, +}; + +struct sxe2_vf_vfres_msg_req { + u8 driver_type; + u8 support_sw_stats; + u8 reserve[2]; +}; + +struct sxe2_vf_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_vf_ver_msg { + __le16 major; + __le16 minor; +}; + +struct sxe2_vf_rxq_ctxt { + u8 lro_status; + u8 keep_crc_en; + __le16 queue_id; + __le16 depth; + __le16 buf_len; + __le64 dma_addr; +}; + +struct sxe2_vf_rxq_msg { + __le16 q_cnt; + __le16 vsi_id; + __le16 max_frame_size; + struct sxe2_vf_rxq_ctxt ctxt[]; +}; + +struct sxe2_vf_vsi_sw_stats { + __le64 rx_packets; + __le64 rx_bytes; + __le64 tx_packets; + __le64 tx_bytes; +}; + +struct sxe2_vf_sw_stats { + __le16 vsi_id; + struct sxe2_vf_vsi_sw_stats sw_stats; + __le16 fnav_stats_idx; +}; + +struct sxe2_vf_vsi_res { + __le16 vsi_id; +}; + +struct sxe2_vf_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; +}; + +struct sxe2_vf_hw_stats_rsp { + struct sxe2_vf_vsi_hw_stats hw_stats; + __le64 fnav_match; +}; + +struct sxe2_fw_ver_msg { + u8 main_version_id; + u8 sub_version_id; + u8 fix_version_id; + u8 build_id; +}; + +struct sxe2_vf_txsch_caps { + u8 layer_cap; + u8 tm_mid_node_num; + u8 prio_num; +}; + +struct sxe2_vf_rxft_caps { + __le16 rss_lut_type; + __le16 rss_key_size; + __le16 rss_lut_size; + __le16 fnav_space_gsize; + __le16 fnav_space_bsize; +}; + +struct sxe2_vf_vfres_msg { + __le16 num_vsis; + __le16 max_vectors; + __le16 q_cnt; + __le16 itr_gran; + u8 addr[ETH_ALEN]; + __le16 max_vlan_cnt; + u8 port_vlan_exsit; + u8 is_switchdev; + u8 pf_cnt; + u8 parent_pfid; + __le16 vf_id_in_dev; + struct sxe2_vf_rxft_caps rxft_cap; + struct sxe2_vf_vsi_res vsi_res[SXE2_VF_MAX_VSI_CNT]; + struct sxe2_vf_txsch_caps vf_txsch_cap; + struct sxe2_fw_ver_msg fw_ver; + __le32 cap_flags; + u8 tm_layers; + u8 parent_portid; + u8 mode; +}; + +struct sxe2_vf_irq_map { + __le16 irq_id; + __le16 txq_map; + __le16 rxq_map; + __le16 rxitr_idx; + __le16 txitr_idx; +}; + +struct sxe2_vf_irq_map_msg { + __le16 num_irqs; + __le16 vsi_id; + struct sxe2_vf_irq_map irq_maps[]; +}; + +struct sxe2_vf_irq_unmap_msg { + __le16 vsi_id; +}; + +enum { + SXE2_VF_MAC_TYPE_P = 0, + SXE2_VF_MAC_TYPE_C, +}; + +struct sxe2_vf_addr { + u8 addr[ETH_ALEN]; + u8 type; +}; + +struct sxe2_vf_addr_msg { + bool is_user; + __le16 vsi_id; + __le16 addr_cnt; + struct sxe2_vf_addr elem[]; +}; + +struct sxe2_vf_addr_update_msg { + bool to_user; + __le16 vsi_id; + u8 addr[ETH_ALEN]; +}; + +struct sxe2_vf_promisc_update_msg { + bool to_user; + bool is_promisc; + __le16 vsi_id; +}; + +struct sxe2_vf_link_msg { + __le32 speed; + u8 status; +}; + +struct sxe2_vf_txq_stop_msg { + __le16 q_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_txq_ctxt { + __le16 vsi_id; + __le16 queue_id; + __le16 depth; + __le64 dma_addr; + __le32 sched_mode; +}; + +struct sxe2_vf_txq_ctxt_msg { + __le16 q_cnt; + __le16 vsi_id; + struct sxe2_vf_txq_ctxt ctxs[]; +}; + +struct sxe2_vf_qps_dis_msg { + __le16 qps_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_q_stop_msg { + __le16 vsi_id; + __le16 q_idx; +}; + +struct sxe2_vf_promisc_msg { + bool is_user; + __le16 vsi_id; + u8 resv[2]; + __le32 flags; +}; + +struct sxe2_vf_vlan_caps { + u8 port_vlan_exsit; + __le16 max_cnt; +}; + +struct sxe2_vf_vlan_offload_cfg { + u8 stag_strip_enable; + u8 ctag_strip_enable; + u8 stag_insert_enable; + u8 ctag_insert_enable; +}; + +struct sxe2_vf_vlan_filter_cfg { + bool is_user; + u8 ctag_filter_enable; + u8 stag_filter_enable; +}; + +struct sxe2_vf_vlan { + __le16 vid; + __le16 tpid; +}; + +struct sxe2_vf_vlan_filter_msg { + __le16 vsi_id; + __le16 vlan_cnt; + struct sxe2_vf_vlan elem[]; +}; + +struct sxe2_vf_user_vlan_msg { + bool is_add; + __le16 vsi_id; + struct sxe2_vf_vlan vlan; +}; + +struct sxe2_vf_user_vlan_fltr_msg { + bool is_en; + __le16 vsi_id; +}; + +struct sxe2_vf_rss_hash_ctrl { + u8 hash_func; +}; + +struct sxe2_vf_rss_hash_msg { + __le32 headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + __le32 hash_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + __le32 hdr_type; + u8 symm; +}; + +enum sxe2_fnav_flow_type { + SXE2_FNAV_FLOW_TYPE_NONE = SXE2_FLOW_TYPE_NONE, + SXE2_FNAV_FLOW_TYPE_FRAG_IPV4 = SXE2_FLOW_MAC_IPV4_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_ETH = SXE2_FLOW_MAC_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_OTHER = SXE2_FLOW_MAC_IPV4_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_UDP = SXE2_FLOW_MAC_IPV4_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_TCP = SXE2_FLOW_MAC_IPV4_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV4_SCTP = SXE2_FLOW_MAC_IPV4_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_FRAG_IPV6 = SXE2_FLOW_MAC_IPV6_FRAG_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_OTHER = SXE2_FLOW_MAC_IPV6_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_UDP = SXE2_FLOW_MAC_IPV6_UDP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_TCP = SXE2_FLOW_MAC_IPV6_TCP_PAY, + SXE2_FNAV_FLOW_TYPE_IPV6_SCTP = SXE2_FLOW_MAC_IPV6_SCTP_PAY, + + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_TCP = SXE2_FLOW_TYPE_MAX, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV4_UDP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_TCP, + SXE2_FNAV_FLOW_TYPE_ARFS_IPV6_UDP, + SXE2_FNAV_FLOW_TYPE_MAX, +}; + +enum sxe2_fnav_act_type { + SXE2_FNAV_ACTION_DROP = 0, + SXE2_FNAV_ACTION_TC_REDIRECT, + SXE2_FNAV_ACTION_PASSTHRU, + SXE2_FNAV_ACTION_QUEUE, + SXE2_FNAV_ACTION_Q_REGION, + SXE2_FNAV_ACTION_MARK, + SXE2_FNAV_ACTION_COUNT, +}; + +enum sxe2_fnav_tunnel_level { + SXE2_FNAV_TUNNEL_OUTER, + SXE2_FNAV_TUNNEL_INNER, + SXE2_FNAV_TUNNEL_ANY, +}; + +enum sxe2_fnav_tunnel_flag_type { + SXE2_FNAV_TUN_FLAG_NO_TUNNEL, + SXE2_FNAV_TUN_FLAG_TUNNEL, + SXE2_FNAV_TUN_FLAG_ANY, +}; + +struct sxe2_fnav_comm_eth { + u8 dst[SXE2_FNAV_ETH_ADDR_LEN]; + u8 src[SXE2_FNAV_ETH_ADDR_LEN]; + __be16 etype; +}; + +struct sxe2_fnav_comm_vlan { + __be16 vlan_vid; + __be16 vlan_tci; + __be16 vlan_type; +}; + +struct sxe2_fnav_comm_ipv4 { + __be32 saddr; + __be32 daddr; + u8 tos; + u8 ttl; + u8 proto; +}; + +struct sxe2_fnav_comm_ipv6 { + __be32 dst_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + __be32 src_ip[SXE2_FNAV_IPV6_ADDR_LEN_TO_U32]; + u8 tc; + u8 proto; + u8 hlim; +}; + +struct sxe2_fnav_comm_l4 { + __be16 dst_port; + __be16 src_port; +}; + +struct sxe2_fnav_comm_vxlan { + __be32 vni; +}; + +struct sxe2_fnav_comm_geneve { + __be32 vni; +}; + +struct sxe2_fnav_comm_gtpu { + __be32 teid; +}; + +struct sxe2_fnav_comm_gre { + __be32 tni; +}; + +struct sxe2_fnav_comm_proto_hdr { + u8 tunnel_level; + u8 type; + __le32 flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + union { + struct sxe2_fnav_comm_eth eth; + struct sxe2_fnav_comm_vlan vlan; + struct sxe2_fnav_comm_ipv4 ipv4; + struct sxe2_fnav_comm_ipv6 ipv6; + struct sxe2_fnav_comm_l4 l4; + struct sxe2_fnav_comm_vxlan vxlan; + struct sxe2_fnav_comm_geneve geneve; + struct sxe2_fnav_comm_gtpu gtpu; + struct sxe2_fnav_comm_gre gre; + }; +}; + +struct sxe2_fnav_comm_action_queue { + __le16 q_index; +}; + +struct sxe2_fnav_comm_action_queue_region { + __le16 q_index; + u8 region; +}; + +struct sxe2_fnav_comm_action_mark { + __le32 mark_id; +}; + +struct sxe2_fnav_comm_action_count { + __le32 stat_index; + __le32 stat_ctrl; +}; + +struct sxe2_fnav_comm_action { + __le32 type; + union { + struct sxe2_fnav_comm_action_queue act_queue; + struct sxe2_fnav_comm_action_queue_region act_q_region; + struct sxe2_fnav_comm_action_mark act_mark; + struct sxe2_fnav_comm_action_count act_count; + }; +}; + +struct sxe2_fnav_comm_user_data { + u8 has_flex_filed; + u8 resv[3]; + __le16 flex_offset; + __be16 flex_word; +}; + +struct sxe2_fnav_comm_full_msg { + __le32 filter_loc; + __le32 flow_type; + __le32 tunn_flag; + u8 action_cnt; + u8 proto_cnt; + u8 rsv[2]; + struct sxe2_fnav_comm_action action[SXE2_FNAV_MAX_NUM_ACTIONS]; + struct sxe2_fnav_comm_proto_hdr proto_hdr[SXE2_FNAV_MAX_NUM_PROTO_HDRS]; + struct sxe2_fnav_comm_user_data usr_data; +}; + +struct sxe2_vf_fnav_filter_del_msg { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_add_filter_resp { + __le32 flow_id; +}; + +struct sxe2_vf_fnav_stat_alloc_req_msg { + u8 need_update; +}; + +struct sxe2_vf_fnav_stat_msg { + __le16 stat_index; +}; + +struct sxe2_vf_fnav_stat_query_req_msg { + __le16 stat_index; + __le32 stat_ctrl; + __le32 is_clear; +}; + +struct sxe2_vf_fnav_stat_query_resp_msg { + __le16 stat_index; + __le64 stat_hits; + __le64 stat_bytes; +}; + +struct sxe2_vf_qv_info { + __le32 v_idx; + __le16 ceq_idx; + __le16 aeq_idx; + u8 itr_idx; + u8 pad[3]; +}; + +struct sxe2_vf_qv_map_msg { + __le32 num_vectors; + struct sxe2_vf_qv_info qv_info[]; +}; + +struct sxe2_vf_rdma_mgr_cmd_msg { + __le32 opcode; + __le32 msg_len; + __le32 resv_len; + u8 msg[]; +}; + +struct sxe2_vf_tm_res { + __le16 teid; +}; + +struct sxe2_vf_tm_info { + __le32 committed; + __le32 peak; + u8 priority; + u8 reserve; + __le16 weight; +}; + +struct sxe2_vf_tm_add_root_msg { + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_node_msg { + __le16 parent_teid; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_tm_add_queue_msg { + __le16 parent_teid; + __le16 queue_id; + struct sxe2_vf_tm_info info; +}; + +struct sxe2_vf_ptp_clock_res { + __le32 clock_ns; + __le64 clock_s; +}; + +struct sxe2_mbx_obj { + __le32 func_type : 2; + __le32 resv : 2; + __le32 pf_id : 4; + __le32 vf_id : 12; + __le32 resv1 : 4; + __le32 drv_type : 2; + __le32 drv_id : 6; +}; + +struct sxe2_com_user_data_passthrough_req { + struct sxe2_mbx_obj obj; + u32 opcode; + u16 func_id; + u16 vsi_id; + u32 req_len; + u32 resp_len; + u32 buff_len; + u8 cmd_buff[]; +}; + +struct sxe2_com_user_data_passthrough_resp { + u32 buff_len; + u8 cmd_buff[]; +}; + +#define SXE2_MBX_IPSEC_IPV6 BIT(0) +#define SXE2_MBX_IPSEC_SM4 BIT(1) +#define SXE2_MBX_IPSEC_AUTH BIT(2) +#define SXE2_MBX_IPSEC_KEY_LEN (32) +#define SCBGE_MBX_IPSEC_IPV4_LEN (4) +#define SCBGE_MBX_IPSEC_IPV6_LEN (16) + +struct sxe2_vf_ipsec_sa_add_msg { + __le32 spi; + u8 dir; + u8 mode; + u8 rsvd[2]; + __le32 addr[SCBGE_MBX_IPSEC_IPV6_LEN / 4]; + u8 enc_key[SXE2_MBX_IPSEC_KEY_LEN]; + u8 auth_key[SXE2_MBX_IPSEC_KEY_LEN]; + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_add_resp { + __le32 sa_idx; +}; + +struct sxe2_vf_ipsec_sa_del_msg { + u8 dir; + u8 rsvd[3]; + __le32 sa_idx; +}; + +struct sxe2vf_get_capa_response { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; +}; + +struct sxe2vf_acl_filter_del_req { + __le32 filter_id; +}; + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021Q SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_8021AD SXE2_VSI_L2TAGSTXVALID_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_VLAN SXE2_VSI_L2TAGSTXVALID_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_OUTER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q SXE2_VSI_TSR_ID_OUT_VLAN1 +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD SXE2_VSI_TSR_ID_STAG +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1 SXE2_VSI_TSR_ID_OUT_VLAN2 + +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_QINQ1 SXE2_VSI_L2TAGSTXVALID_ID_VLAN +#define SXE2_DPDK_OFFLOAD_INNER_INSERT_ENABLE SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID + +#define SXE2_DPDK_OFFLOAD_INNER_STRIP_QINQ1 SXE2_VSI_TSR_ID_VLAN + +#define SXE2_DPDK_OFFLOAD_FIELD (0X0F) +#define SXE2_DPDK_OFFLOAD_TAGID_FIELD (0X07) + +#define SXE2_DPDK_OFFLOAD_OUTER_STRIP_MASK (SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021Q | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_8021AD | \ + SXE2_DPDK_OFFLOAD_OUTER_STRIP_QINQ1) +#define SXE2_DPDK_OFFLOAD_STRIP_OFFSET SXE2_VSI_TSR_SHOW_TAG_S + +#define SXE2_DPDK_OFFLOAD_INSERT_ENABLE (BIT(3)) + +struct sxe2_dpdk_portvlan_cfg { + u16 vf_idx; + u16 tpid; + u16 vid; + u8 prio; + u8 rsv; +}; + +struct sxe2vf_rdma_dump_pcap_msg { + u8 mac[ETH_ALEN]; + u8 rsvd[2]; + bool is_add; + u8 rsvd1[3]; +}; + +struct sxe2_vf_vsi_cfg { + bool is_clear; + __le16 txq_base_idx; + __le16 txq_cnt; + __le16 rxq_base_idx; + __le16 rxq_cnt; + __le16 irq_base_idx; + __le16 irq_cnt; + __le16 vsi_id; +}; + +struct sxe2_vf_user_driver_release { + u8 func_id; + u8 drv_id; +}; + +#pragma pack() +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2flow/sxe2_flow_public.h b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2flow/sxe2_flow_public.h new file mode 100644 index 0000000000000000000000000000000000000000..dc15aebd9163b7e42331d58f8f2a3b555746b7e2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2flow/sxe2_flow_public.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_flow_public.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_FLOW_PUBLIC_H__ +#define __SXE2_FLOW_PUBLIC_H__ + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_osal.h" +#endif +enum sxe2_flow_type { + SXE2_FLOW_TYPE_NONE = 0, + SXE2_FLOW_MAC_PAY = 1, + SXE2_FLOW_MAC_IPV4_FRAG_PAY = 22, + SXE2_FLOW_MAC_IPV4_PAY = 23, + SXE2_FLOW_MAC_IPV4_UDP_PAY = 24, + SXE2_FLOW_MAC_IPV4_TCP_PAY = 26, + SXE2_FLOW_MAC_IPV4_SCTP_PAY = 27, + SXE2_FLOW_MAC_IPV4_IPV4_FRAG_PAY = 29, + SXE2_FLOW_MAC_IPV4_IPV4_PAY = 30, + SXE2_FLOW_MAC_IPV4_IPV4_UDP_PAY = 31, + SXE2_FLOW_MAC_IPV4_IPV4_TCP_PAY = 33, + SXE2_FLOW_MAC_IPV4_IPV4_SCTP_PAY = 34, + SXE2_FLOW_MAC_IPV4_IPV6_FRAG_PAY = 36, + SXE2_FLOW_MAC_IPV4_IPV6_PAY = 37, + SXE2_FLOW_MAC_IPV4_IPV6_UDP_PAY = 38, + SXE2_FLOW_MAC_IPV4_IPV6_TCP_PAY = 40, + SXE2_FLOW_MAC_IPV4_IPV6_SCTP_PAY = 41, + SXE2_FLOW_MAC_IPV4_GRE_PAY = 43, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_FRAG_PAY = 44, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_PAY = 45, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_UDP_PAY = 46, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_TCP_PAY = 48, + SXE2_FLOW_MAC_IPV4_GRE_IPV4_SCTP_PAY = 49, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_FRAG_PAY = 51, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_PAY = 52, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_UDP_PAY = 53, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_TCP_PAY = 55, + SXE2_FLOW_MAC_IPV4_GRE_IPV6_SCTP_PAY = 56, + SXE2_FLOW_MAC_IPV4_GRE_MAC_PAY = 58, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_FRAG_PAY = 59, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_PAY = 60, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_UDP_PAY = 61, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_TCP_PAY = 63, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV4_SCTP_PAY = 64, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_FRAG_PAY = 66, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_PAY = 67, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_UDP_PAY = 68, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_TCP_PAY = 70, + SXE2_FLOW_MAC_IPV4_GRE_MAC_IPV6_SCTP_PAY = 71, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_PAY = 73, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_FRAG_PAY = 74, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_PAY = 75, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_UDP_PAY = 76, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_TCP_PAY = 78, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV4_SCTP_PAY = 79, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_FRAG_PAY = 81, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_PAY = 82, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_UDP_PAY = 83, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_TCP_PAY = 85, + SXE2_FLOW_MAC_IPV4_GRE_MAC_VLAN_IPV6_SCTP_PAY = 86, + SXE2_FLOW_MAC_IPV6_FRAG_PAY = 88, + SXE2_FLOW_MAC_IPV6_PAY = 89, + SXE2_FLOW_MAC_IPV6_UDP_PAY = 90, + SXE2_FLOW_MAC_IPV6_TCP_PAY = 92, + SXE2_FLOW_MAC_IPV6_SCTP_PAY = 93, + SXE2_FLOW_MAC_IPV6_IPV4_FRAG_PAY = 95, + SXE2_FLOW_MAC_IPV6_IPV4_PAY = 96, + SXE2_FLOW_MAC_IPV6_IPV4_UDP_PAY = 97, + SXE2_FLOW_MAC_IPV6_IPV4_TCP_PAY = 99, + SXE2_FLOW_MAC_IPV6_IPV4_SCTP_PAY = 100, + SXE2_FLOW_MAC_IPV6_IPV6_FRAG_PAY = 102, + SXE2_FLOW_MAC_IPV6_IPV6_PAY = 103, + SXE2_FLOW_MAC_IPV6_IPV6_UDP_PAY = 104, + SXE2_FLOW_MAC_IPV6_IPV6_TCP_PAY = 106, + SXE2_FLOW_MAC_IPV6_IPV6_SCTP_PAY = 107, + SXE2_FLOW_MAC_IPV6_GRE_PAY = 109, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_FRAG_PAY = 110, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_PAY = 111, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_UDP_PAY = 112, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_TCP_PAY = 114, + SXE2_FLOW_MAC_IPV6_GRE_IPV4_SCTP_PAY = 115, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_FRAG_PAY = 117, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_PAY = 118, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_UDP_PAY = 119, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_TCP_PAY = 121, + SXE2_FLOW_MAC_IPV6_GRE_IPV6_SCTP_PAY = 122, + SXE2_FLOW_MAC_IPV6_GRE_MAC_PAY = 124, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_FRAG_PAY = 125, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_PAY = 126, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_UDP_PAY = 127, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_TCP_PAY = 129, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV4_SCTP_PAY = 130, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_FRAG_PAY = 132, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_PAY = 133, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_UDP_PAY = 134, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_TCP_PAY = 136, + SXE2_FLOW_MAC_IPV6_GRE_MAC_IPV6_SCTP_PAY = 137, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_PAY = 139, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_FRAG_PAY = 140, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_PAY = 141, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_UDP_PAY = 142, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_TCP_PAY = 144, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV4_SCTP_PAY = 145, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_FRAG_PAY = 147, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_PAY = 148, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_UDP_PAY = 149, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_TCP_PAY = 151, + SXE2_FLOW_MAC_IPV6_GRE_MAC_VLAN_IPV6_SCTP_PAY = 152, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_PAY = 329, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_PAY = 330, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_FRAG_PAY = 331, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_PAY = 332, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_UDP_PAY = 333, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_TCP_PAY = 334, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV4_SCTP_PAY = 335, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_FRAG_PAY = 336, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_PAY = 337, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_UDP_PAY = 338, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_TCP_PAY = 339, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV4_SCTP_PAY = 340, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_FRAG_PAY = 341, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_PAY = 342, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_UDP_PAY = 343, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_TCP_PAY = 344, + SXE2_FLOW_MAC_IPV4_UDP_GTPU_IPV6_SCTP_PAY = 345, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_FRAG_PAY = 346, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_PAY = 347, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_UDP_PAY = 348, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_TCP_PAY = 349, + SXE2_FLOW_MAC_IPV6_UDP_GTPU_IPV6_SCTP_PAY = 350, + SXE2_FLOW_MAC_IPV6_MAC_PAY = 820, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_FRAG_PAY = 821, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_PAY = 822, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_UDP_PAY = 823, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_TCP_PAY = 824, + SXE2_FLOW_MAC_IPV6_MAC_IPV4_SCTP_PAY = 825, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_FRAG_PAY = 827, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_PAY = 828, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_UDP_PAY = 829, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_TCP_PAY = 830, + SXE2_FLOW_MAC_IPV6_MAC_IPV6_SCTP_PAY = 831, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_PAY = 835, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_FRAG_PAY = 836, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_PAY = 837, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_UDP_PAY = 838, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_TCP_PAY = 839, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV4_SCTP_PAY = 840, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_FRAG_PAY = 842, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_PAY = 843, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_UDP_PAY = 844, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_TCP_PAY = 845, + SXE2_FLOW_MAC_IPV6_MAC_VLAN_IPV6_SCTP_PAY = 846, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_PAY = 878, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_FRAG_PAY = 877, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_PAY = 876, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_UDP_PAY = 879, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_TCP_PAY = 880, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV4_SCTP_PAY = 875, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_FRAG_PAY = 871, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_PAY = 870, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_UDP_PAY = 872, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_TCP_PAY = 873, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_IPV6_SCTP_PAY = 869, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_PAY = 891, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_FRAG_PAY = 890, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_PAY = 889, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_UDP_PAY = 892, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_TCP_PAY = 893, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV4_SCTP_PAY = 888, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_FRAG_PAY = 884, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_PAY = 883, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_UDP_PAY = 885, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_TCP_PAY = 886, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_IPV6_SCTP_PAY = 882, + SXE2_FLOW_MAC_IPV6_UDP_GRE_PAY = 904, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_FRAG_PAY = 903, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_PAY = 902, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_UDP_PAY = 905, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_TCP_PAY = 906, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV4_SCTP_PAY = 901, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_FRAG_PAY = 897, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_PAY = 896, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_UDP_PAY = 898, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_TCP_PAY = 899, + SXE2_FLOW_MAC_IPV6_UDP_GRE_IPV6_SCTP_PAY = 895, + SXE2_FLOW_MAC_IPV4_UDP_GRE_PAY = 917, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_FRAG_PAY = 916, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_PAY = 915, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_UDP_PAY = 918, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_TCP_PAY = 919, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV4_SCTP_PAY = 914, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_FRAG_PAY = 910, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_PAY = 909, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_UDP_PAY = 911, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_TCP_PAY = 912, + SXE2_FLOW_MAC_IPV4_UDP_GRE_IPV6_SCTP_PAY = 908, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_PAY = 930, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 929, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 928, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 931, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 932, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 927, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 923, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 922, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 924, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 925, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 921, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_PAY = 943, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_FRAG_PAY = 942, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_PAY = 941, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_UDP_PAY = 944, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_TCP_PAY = 945, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV4_SCTP_PAY = 940, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_FRAG_PAY = 936, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_PAY = 935, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_UDP_PAY = 937, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_TCP_PAY = 938, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_VLAN_IPV6_SCTP_PAY = 934, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_PAY = 956, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 955, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_PAY = 954, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 957, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 958, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 953, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 949, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_PAY = 948, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 950, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 951, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 947, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_PAY = 969, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_FRAG_PAY = 968, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_PAY = 967, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_UDP_PAY = 970, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_TCP_PAY = 971, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV4_SCTP_PAY = 966, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_FRAG_PAY = 962, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_PAY = 961, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_UDP_PAY = 963, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_TCP_PAY = 964, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_VLAN_IPV6_SCTP_PAY = 960, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_PAY = 982, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 981, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_PAY = 980, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_UDP_PAY = 983, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_TCP_PAY = 984, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 979, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 975, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_PAY = 974, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_UDP_PAY = 976, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_TCP_PAY = 977, + SXE2_FLOW_MAC_IPV6_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 973, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_PAY = 995, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_FRAG_PAY = 994, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_PAY = 993, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_UDP_PAY = 996, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_TCP_PAY = 997, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV4_SCTP_PAY = 992, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_FRAG_PAY = 988, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_PAY = 987, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_UDP_PAY = 989, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_TCP_PAY = 990, + SXE2_FLOW_MAC_IPV4_UDP_VXGEN_MAC_IPV6_SCTP_PAY = 986, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_PAY = 1008, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_FRAG_PAY = 1007, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_PAY = 1006, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_UDP_PAY = 1009, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_TCP_PAY = 1010, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV4_SCTP_PAY = 1005, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_FRAG_PAY = 1001, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_PAY = 1000, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_UDP_PAY = 1002, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_TCP_PAY = 1003, + SXE2_FLOW_MAC_IPV6_UDP_GRE_MAC_IPV6_SCTP_PAY = 999, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_PAY = 1021, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_FRAG_PAY = 1020, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_PAY = 1019, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_UDP_PAY = 1022, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_TCP_PAY = 1023, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV4_SCTP_PAY = 1018, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_FRAG_PAY = 1014, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_PAY = 1013, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_UDP_PAY = 1015, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_TCP_PAY = 1016, + SXE2_FLOW_MAC_IPV4_UDP_GRE_MAC_IPV6_SCTP_PAY = 1012, + SXE2_FLOW_TYPE_MAX = 2048, +}; + +enum sxe2_rss_cfg_hdr_type { + SXE2_RSS_OUTER_HEADERS, + SXE2_RSS_INNER_HEADERS, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GRE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_VXLAN, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GENEVE, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV4_UDP_GTPU, + SXE2_RSS_INNER_HEADERS_WITH_OUTER_IPV6_UDP_GTPU, + SXE2_RSS_ANY_HEADERS +}; + +enum sxe2_flow_hdr { + SXE2_FLOW_HDR_ETH = 0, + SXE2_FLOW_HDR_VLAN, + SXE2_FLOW_HDR_QINQ, + SXE2_FLOW_HDR_IPV4, + SXE2_FLOW_HDR_IPV6, + SXE2_FLOW_HDR_ICMP = 5, + SXE2_FLOW_HDR_TCP, + SXE2_FLOW_HDR_UDP, + SXE2_FLOW_HDR_SCTP, + SXE2_FLOW_HDR_GRE, + SXE2_FLOW_HDR_VXLAN = 10, + SXE2_FLOW_HDR_GENEVE, + SXE2_FLOW_HDR_GTPU, + SXE2_FLOW_HDR_IPV_FRAG, + SXE2_FLOW_HDR_IPV_OTHER, + SXE2_FLOW_HDR_ETH_NON_IP = 15, + SXE2_FLOW_HDR_MAX = 128, +}; + +enum sxe2_flow_fld_id { + SXE2_FLOW_FLD_ID_ETH_DA = 0, + SXE2_FLOW_FLD_ID_ETH_SA, + SXE2_FLOW_FLD_ID_S_TCI, + SXE2_FLOW_FLD_ID_C_TCI, + SXE2_FLOW_FLD_ID_S_TPID, + SXE2_FLOW_FLD_ID_C_TPID = 5, + SXE2_FLOW_FLD_ID_S_VID, + SXE2_FLOW_FLD_ID_C_VID, + SXE2_FLOW_FLD_ID_ETH_TYPE, + SXE2_FLOW_FLD_ID_IPV4_TOS, + SXE2_FLOW_FLD_ID_IPV6_DSCP = 10, + SXE2_FLOW_FLD_ID_IPV4_TTL, + SXE2_FLOW_FLD_ID_IPV4_PROT, + SXE2_FLOW_FLD_ID_IPV6_TTL, + SXE2_FLOW_FLD_ID_IPV6_PROT, + SXE2_FLOW_FLD_ID_IPV4_SA = 15, + SXE2_FLOW_FLD_ID_IPV4_DA, + SXE2_FLOW_FLD_ID_IPV6_SA, + SXE2_FLOW_FLD_ID_IPV6_DA, + SXE2_FLOW_FLD_ID_IPV4_CHKSUM, + SXE2_FLOW_FLD_ID_IPV4_ID = 20, + SXE2_FLOW_FLD_ID_IPV6_ID, + SXE2_FLOW_FLD_ID_IPV6_PRE32_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE32_DA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE48_DA = 25, + SXE2_FLOW_FLD_ID_IPV6_PRE64_SA, + SXE2_FLOW_FLD_ID_IPV6_PRE64_DA, + SXE2_FLOW_FLD_ID_TCP_SRC_PORT, + SXE2_FLOW_FLD_ID_TCP_DST_PORT, + SXE2_FLOW_FLD_ID_UDP_SRC_PORT = 30, + SXE2_FLOW_FLD_ID_UDP_DST_PORT, + SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, + SXE2_FLOW_FLD_ID_SCTP_DST_PORT, + SXE2_FLOW_FLD_ID_TCP_FLAGS, + SXE2_FLOW_FLD_ID_TCP_CHKSUM = 35, + SXE2_FLOW_FLD_ID_UDP_CHKSUM, + SXE2_FLOW_FLD_ID_SCTP_CHKSUM, + SXE2_FLOW_FLD_ID_VXLAN_VNI, + SXE2_FLOW_FLD_ID_GENEVE_VNI, + SXE2_FLOW_FLD_ID_GTPU_TEID = 40, + SXE2_FLOW_FLD_ID_NVGRE_TNI, + + SXE2_FLOW_FLD_ID_MAX = 128, +}; + +struct sxe2_ether_hdr { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be16 ether_type; +}; + +struct sxe2_vlan_hdr { + __be16 type; + __be16 vlan; +}; + +struct sxe2_ipv4_hdr { + u8 ver_ihl; + u8 tos; + __be16 tot_len; + __be16 id; + __be16 frag_off; + u8 ttl; + u8 protocol; + __be16 check; + __be32 saddr; + __be32 daddr; +}; + +#define SXE2_IPV6_ADDR_LENGTH (16) +#define SXE2_IPV6_TC_SHIFT (20) +#define SXE2_IPV6_TC_MASK (0xFF) +struct sxe2_ipv6_hdr { + __be32 pri_ver_flow; + __be16 payload_len; + u8 nexthdr; + u8 hop_limit; + union { + u8 saddr[16]; + __be16 saddr16[8]; + __be32 saddr32[4]; + }; + union { + u8 daddr[16]; + __be16 daddr16[8]; + __be32 daddr32[4]; + }; +}; + +struct sxe2_tcp_hdr { + __be16 source; + __be16 dest; + __be32 seq; + __be32 ack_seq; + __be16 flag; + __be16 window; + __be16 check; + __be16 urg_ptr; +}; + +struct sxe2_udp_hdr { + __be16 source; + __be16 dest; + __be16 len; + __be16 check; +}; + +struct sxe2_sctp_hdr { + __be16 src_port; + __be16 dst_port; +}; + +struct sxe2_nvgre_hdr { + __be16 flags; + __be16 protocol; + __be32 tni; +}; + +struct sxe2_geneve_hdr { + __be16 flags; + __be16 protocol; + __be32 vni; +}; + +struct sxe2_gtpu_hdr { + u8 flag; + u8 msg_type; + __be16 msg_len; + __be32 teid; +}; + +struct sxe2_vxlan_hdr { + u8 flag; + u8 resvd0; + u8 resvd1; + u8 protocol; + __be32 vni; +}; + +enum sxe2_flow_act_type { + SXE2_FLOW_ACTION_DROP = 0, + SXE2_FLOW_ACTION_TC_REDIRECT, + SXE2_FLOW_ACTION_TO_VSI, + SXE2_FLOW_ACTION_TO_VSI_LIST, + SXE2_FLOW_ACTION_PASSTHRU, + SXE2_FLOW_ACTION_QUEUE, + SXE2_FLOW_ACTION_Q_REGION, + SXE2_FLOW_ACTION_MARK, + SXE2_FLOW_ACTION_COUNT, + SXE2_FLOW_ACTION_RSS, + SXE2_FLOW_ACTION_MAX = 32, +}; + +enum sxe2_rss_hash_key_func { + SXE2_RSS_HASH_FUNC_TOEPLITZ = 0, + SXE2_RSS_HASH_FUNC_SYM_TOEPLITZ = 1, + SXE2_RSS_HASH_FUNC_XOR = 2, + SXE2_RSS_HASH_FUNC_JEKINS = 3 +}; + +struct sxe2_flow_action_rss { + DECLARE_BITMAP(hdr_out, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hdr_in, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(fld, SXE2_FLOW_FLD_ID_MAX); + u8 is_inner; + u8 func; + u8 hdr_type; +}; + +struct sxe2_flow_action_queue { + u16 vsi_index; + u16 q_index; +}; + +struct sxe2_flow_action_queue_region { + u16 vsi_index; + u16 q_index; + u8 region; +}; + +struct sxe2_flow_action_passthru { + u16 vsi_index; +}; + +struct sxe2_flow_action_mark { + u32 mark_id; +}; + +#define SXE2_VSI_MAX (2048) +struct sxe2_flow_action_vsi { + u16 vsi_index; +}; + +struct sxe2_flow_action_vsi_list { + DECLARE_BITMAP(vsi_list_map, SXE2_VSI_MAX); + u16 vsi_cnt; +}; + +enum sxe2_fnav_stat_ctrl_type { + SXE2_FNAV_STAT_ENA_NONE = 0, + SXE2_FNAV_STAT_ENA_PKTS, + SXE2_FNAV_STAT_ENA_BYTES, + SXE2_FNAV_STAT_ENA_ALL, +}; + +struct sxe2_flow_action_count { + u32 user_id; + u32 driver_id; + u32 stat_index; + u32 stat_ctrl; +}; + +enum sxe2_flow_engine_type { + SXE2_FLOW_ENGINE_ACL, + SXE2_FLOW_ENGINE_SWITCH, + SXE2_FLOW_ENGINE_FNAV, + SXE2_FLOW_ENGINE_RSS, + SXE2_FLOW_ENGINE_MAX, +}; + +struct sxe2_flow_item { + struct sxe2_ether_hdr eth; + struct sxe2_vlan_hdr vlan; + struct sxe2_vlan_hdr qinq; + struct sxe2_ipv4_hdr ipv4; + struct sxe2_ipv6_hdr ipv6; + struct sxe2_udp_hdr udp; + struct sxe2_tcp_hdr tcp; + struct sxe2_sctp_hdr sctp; + struct sxe2_gtpu_hdr gtpu; + struct sxe2_vxlan_hdr vxlan; + struct sxe2_nvgre_hdr nvgre; + struct sxe2_geneve_hdr geneve; +}; + +enum sxe2_flow_sw_direct_type { + SXE2_FLOW_SW_DIRECT_TX, + SXE2_FLOW_SW_DIRECT_RX, + SXE2_FLOW_SW_DIRECT_MAX, +}; + +enum sxe2_flow_sw_pattern_type { + SXE2_FLOW_SW_PATTERN_ONLY, + SXE2_FLOW_SW_PATTERN_LAST, + SXE2_FLOW_SW_PATTERN_FIRST, + SXE2_FLOW_SW_PATTERN_MAX, +}; + +enum sxe2_flow_tunnel_type { + SXE2_FLOW_TUNNEL_TYPE_NONE, + SXE2_FLOW_TUNNEL_TYPE_PARENT, + SXE2_FLOW_TUNNEL_TYPE_VXLAN, + SXE2_FLOW_TUNNEL_TYPE_GTPU, + SXE2_FLOW_TUNNEL_TYPE_GENEVE, + SXE2_FLOW_TUNNEL_TYPE_GRE, + SXE2_FLOW_TUNNEL_TYPE_IPIP, +}; + +struct sxe2_flow_meta { + u8 switch_pattern_dup_allow; + u8 switch_src_direct; + u16 flow_src_vsi; + u16 flow_rule_vsi; + u32 flow_prio; + u16 flow_type; + u8 tunnel_type; + u8 rsv; +}; + +struct sxe2_flow_pattern { + DECLARE_BITMAP(hdrs, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(map_spec, SXE2_FLOW_FLD_ID_MAX); + DECLARE_BITMAP(map_mask, SXE2_FLOW_FLD_ID_MAX); + struct sxe2_flow_item item_spec; + struct sxe2_flow_item item_mask; + u64 rss_type_allow; +}; + +struct sxe2_flow_action { + DECLARE_BITMAP(act_types, SXE2_FLOW_ACTION_MAX); + struct sxe2_flow_action_rss rss; + struct sxe2_flow_action_queue queue; + struct sxe2_flow_action_queue_region q_region; + struct sxe2_flow_action_passthru passthru; + struct sxe2_flow_action_vsi vsi; + struct sxe2_flow_action_vsi_list vsi_list; + struct sxe2_flow_action_mark mark; + struct sxe2_flow_action_count count; +}; +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.c b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.c new file mode 100644 index 0000000000000000000000000000000000000000..c7cec16cd9fbe338b8c0291f099772544376c642 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.c @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_hw.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) +#include "sxe2vf.h" +#else +#include +#endif + +#include "sxe2vf_hw.h" +#include "sxe2vf_regs.h" +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) +#else +#include "sxe2_log.h" +#endif + +#ifdef SXE2_CFG_DEBUG +extern int vf_reg_log; +#endif + +u32 sxe2vf_reg_read(struct sxe2vf_hw *hw, u32 reg) +{ + u32 value; + u8 __iomem *base_addr = hw->reg_base_addr; +#ifdef SXE2_CFG_DEBUG + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) + value = rte_read32(base_addr + reg); +#else + value = hw->reg_read(base_addr + reg); +#endif + +#ifdef SXE2_CFG_DEBUG + if (vf_reg_log) + LOG_DEBUG_BDF("reg: 0x%x, read value: 0x%x\n", reg, value); +#endif + + return value; +} + +void sxe2vf_reg_write(struct sxe2vf_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *base_addr = hw->reg_base_addr; +#ifdef SXE2_CFG_DEBUG + struct sxe2vf_adapter *adapter = hw->adapter; +#endif +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) + rte_write32(value, base_addr + reg); +#else + hw->reg_write(value, base_addr + reg); +#endif + +#ifdef SXE2_CFG_DEBUG + if (vf_reg_log) + LOG_DEBUG_BDF("reg:0x%x write value:0x%x read value:0x%x.\n", reg, + value, hw->reg_read(base_addr + reg)); +#endif +} + +s32 sxe2vf_hw_mbx_txq_enable(struct sxe2vf_hw *hw, u16 depth, dma_addr_t addr) +{ + s32 ret = 0; + u32 value; + u32 old_tail; + u32 old_head; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + + old_tail = sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_TAIL); + old_head = sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_HEAD); + if (old_tail >= old_head) { + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_TAIL, 0); + } else { + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_TAIL, 0); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_TAIL, old_tail); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_TAIL, 0); + } +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + LOG_INFO_BDF("mbx txq old tail:0x%x old head:0x%x.\n", old_tail, old_head); +#endif + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_HEAD, 0); + + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_BAL, lower_32_bits(addr)); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_BAH, upper_32_bits(addr)); + + value = (u32)(FIELD_PREP(SXE2VF_MBX_Q_LEN_M, depth) | + SXE2VF_MBX_Q_LEN_ENA_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_LEN, value); + + if (sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_BAL) != lower_32_bits(addr)) + ret = -SXE2VF_HW_ERR_IO; + return ret; +} + +void sxe2vf_hw_mbx_txq_disable(struct sxe2vf_hw *hw) +{ + u32 value; + + value = sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_LEN); + value &= ~(SXE2VF_MBX_Q_LEN_VFE_M | SXE2VF_MBX_Q_LEN_OVFL_M | + SXE2VF_MBX_Q_LEN_CRIT_M | SXE2VF_MBX_Q_LEN_ENA_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_LEN, value); + + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_BAL, 0); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_BAH, 0); +} + +u32 sxe2vf_hw_mbx_txq_h_read(struct sxe2vf_hw *hw) +{ + return sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_HEAD); +} + +bool sxe2vf_hw_mbx_txq_is_enable(struct sxe2vf_hw *hw) +{ + return !!(sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_LEN) & SXE2VF_MBX_Q_LEN_ENA_M); +} + +bool sxe2vf_hw_mbx_rxq_is_enable(struct sxe2vf_hw *hw) +{ + return !!(sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN) & SXE2VF_MBX_Q_LEN_ENA_M); +} + +void sxe2vf_hw_mbx_txq_fault_clear(struct sxe2vf_hw *hw, u32 *err) +{ + u32 value = sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_LEN); + + if (value & SXE2VF_MBX_Q_LEN_VFE_M) + *err |= SXE2VF_MBX_Q_LEN_VFE_M; + else if (value & SXE2VF_MBX_Q_LEN_OVFL_M) + *err |= SXE2VF_MBX_Q_LEN_OVFL_M; + else if (value & SXE2VF_MBX_Q_LEN_CRIT_M) + *err |= SXE2VF_MBX_Q_LEN_CRIT_M; + if (*err) { + value &= ~(SXE2VF_MBX_Q_LEN_VFE_M | SXE2VF_MBX_Q_LEN_OVFL_M | + SXE2VF_MBX_Q_LEN_CRIT_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_LEN, value); + } +} + +void sxe2vf_hw_mbx_txq_t_write(struct sxe2vf_hw *hw, u32 tail) +{ + sxe2vf_reg_write(hw, SXE2VF_MBX_TQ_TAIL, tail); +} + +u32 sxe2vf_hw_mbx_rxq_h_read(struct sxe2vf_hw *hw) +{ + return sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_HEAD); +} + +void sxe2vf_hw_mbx_rxq_t_write(struct sxe2vf_hw *hw, u32 tail) +{ + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_TAIL, tail); +} + +s32 sxe2vf_hw_mbx_rxq_enable(struct sxe2vf_hw *hw, u16 depth, dma_addr_t addr) +{ + s32 ret = 0; + u32 value; + + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_HEAD, 0); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_TAIL, 0); + + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_BAL, lower_32_bits(addr)); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_BAH, upper_32_bits(addr)); + + value = FIELD_PREP(SXE2VF_MBX_Q_LEN_M, depth) | SXE2VF_MBX_Q_LEN_ENA_M | + (sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN) & SXE2VF_MBX_Q_LEN_VFE_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_LEN, value); + + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_TAIL, (u32)(depth - 1)); + + if (sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_BAL) != lower_32_bits(addr)) + ret = -SXE2VF_HW_ERR_IO; + + return ret; +} + +void sxe2vf_hw_mbx_rxq_disable(struct sxe2vf_hw *hw) +{ + u32 val; + + val = sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN); + val &= ~(SXE2VF_MBX_Q_LEN_OVFL_M | SXE2VF_MBX_Q_LEN_CRIT_M | + SXE2VF_MBX_Q_LEN_ENA_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_LEN, val); + + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_HEAD, 0); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_TAIL, 0); + + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_BAL, 0); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_BAH, 0); +} + +void sxe2vf_hw_irq_enable(struct sxe2vf_hw *hw, u16 irq_idx) +{ + u32 value = SXE2VF_DYN_CTL_INTENABLE | + (SXE2VF_ITR_IDX_NONE + << SXE2VF_DYN_CTL_ITR_IDX_SHIFT); + + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL(irq_idx), value); +} + +u32 sxe2vf_hw_irq_dyn_ctl_read(struct sxe2vf_hw *hw, u16 irq_idx) +{ + return sxe2vf_reg_read(hw, SXE2VF_DYN_CTL(irq_idx)); +} + +void sxe2vf_hw_irq_dyn_ctl(struct sxe2vf_hw *hw, u16 irq_idx, u32 value) +{ + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL(irq_idx), value); +} + +void sxe2vf_hw_msix_disable(struct sxe2vf_hw *hw, u16 irq_idx) +{ + sxe2vf_reg_write(hw, SXE2VF_BAR4_MSIX_CTL(irq_idx), + SXE2VF_BAR4_MSIX_DISABLE); +} + +void sxe2vf_hw_msix_enable(struct sxe2vf_hw *hw, u16 irq_idx) +{ + sxe2vf_reg_write(hw, SXE2VF_BAR4_MSIX_CTL(irq_idx), SXE2VF_BAR4_MSIX_ENABLE); +} + +void sxe2vf_hw_irq_clear_pba(struct sxe2vf_hw *hw, u16 irq_idx) +{ + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL(irq_idx), + (SXE2VF_ITR_IDX_NONE << SXE2VF_DYN_CTL_ITR_IDX_SHIFT) | + SXE2VF_DYN_CTL_CLEARPBA | + SXE2VF_DYN_CTL_INTENABLE_MSK); +} + +void sxe2vf_hw_irq_trigger(struct sxe2vf_hw *hw, u16 irq_idx) +{ + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL(irq_idx), + (SXE2VF_ITR_IDX_NONE << SXE2VF_DYN_CTL_ITR_IDX_SHIFT) | + SXE2VF_DYN_CTL_SWINT_TRIG | + SXE2VF_DYN_CTL_INTENABLE_MSK); +} + +void sxe2vf_hw_int_itr_set(struct sxe2vf_hw *hw, u16 itr_idx, u16 irq_idx, u32 itr) +{ + sxe2vf_reg_write(hw, SXE2VF_INT_ITR(itr_idx, irq_idx), itr); +} + +void sxe2vf_hw_event_irq_enable(struct sxe2vf_hw *hw) +{ + u32 value = SXE2VF_DYN_CTL_INTENABLE | + (SXE2VF_ITR_IDX_NONE + << SXE2VF_DYN_CTL_ITR_IDX_SHIFT); + + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL0, value); +} + +void sxe2vf_hw_event_irq_disable(struct sxe2vf_hw *hw) +{ + u32 value = (SXE2VF_ITR_IDX_NONE + << SXE2VF_DYN_CTL_ITR_IDX_SHIFT); + + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL0, value); +} + +void sxe2vf_hw_irq_disable(struct sxe2vf_hw *hw, u16 irq_idx) +{ + u32 value = (SXE2VF_ITR_IDX_NONE << SXE2VF_DYN_CTL_ITR_IDX_SHIFT); + + sxe2vf_reg_write(hw, SXE2VF_DYN_CTL(irq_idx), value); +} + +bool sxe2vf_hw_corer_check(struct sxe2vf_hw *hw) +{ + u32 val = 0; + bool ret = 0; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)hw->adapter; + (void)pci_read_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, &val); +#else + (void)rte_pci_read_config(hw->pci_device, &val, sizeof(val), + SXE2VF_PCIE_SYS_READY); +#endif + if (val == SXE2VF_REG_INVAL_VALUE) + return 0; + ret = !(val & SXE2VF_PCIE_SYS_READY_STOP_DROP_DONE); +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + if (ret) + LOG_INFO_BDF("core reset detected.\n"); +#endif + + return ret; +} + +bool sxe2vf_hw_corer_done(struct sxe2vf_hw *hw) +{ + u32 val; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)hw->adapter; + + (void)pci_read_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, &val); + if (val == SXE2VF_REG_INVAL_VALUE) + return 0; + +#else + (void)rte_pci_read_config(hw->pci_device, &val, sizeof(val), + SXE2VF_PCIE_SYS_READY); +#endif + + return val & SXE2VF_PCIE_SYS_READY_R5; +} + +void sxe2vf_hw_corer_stop_drop(struct sxe2vf_hw *hw) +{ + u32 val; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)hw->adapter; + (void)pci_read_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, &val); + (void)pci_write_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, + (val | SXE2VF_PCIE_SYS_READY_STOP_DROP)); +#else + (void)rte_pci_read_config(hw->pci_device, &val, sizeof(val), + SXE2VF_PCIE_SYS_READY); + val = val | SXE2VF_PCIE_SYS_READY_STOP_DROP; + (void)rte_pci_write_config(hw->pci_device, &val, sizeof(val), + SXE2VF_PCIE_SYS_READY); +#endif +} + +s32 sxe2vf_hw_corer_stop_drop_done(struct sxe2vf_hw *hw) +{ + u32 val; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)hw->adapter; + + (void)pci_read_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, &val); + if (val == SXE2VF_REG_INVAL_VALUE) + return 0; + +#else + (void)rte_pci_read_config(hw->pci_device, &val, sizeof(val), + SXE2VF_PCIE_SYS_READY); +#endif + return val & SXE2VF_PCIE_SYS_READY_STOP_DROP_DONE; +} + +bool sxe2vf_hw_vfr_is_checked(struct sxe2vf_hw *hw) +{ + u32 val; + bool ret = false; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + + val = sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN); + if (val == SXE2VF_REG_INVAL_VALUE) + return ret; + + ret = val & SXE2VF_MBX_Q_LEN_VFE_M; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + if (ret) { + LOG_DEBUG_BDF("vf hw reset detected\n"); + sxe2vf_hw_vfr_clear(&adapter->hw); + } +#endif + + return ret; +} + +void sxe2vf_hw_vfr_clear(struct sxe2vf_hw *hw) +{ + u32 val; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + val = sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN) & (~SXE2VF_MBX_Q_LEN_VFE_M); + sxe2vf_reg_write(hw, SXE2VF_MBX_RQ_LEN, val); +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + LOG_INFO_BDF("vfr status cleared.\n"); +#endif +} + +bool sxe2vf_hw_vfr_is_complete(struct sxe2vf_hw *hw) +{ + u32 val; + bool done; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + val = sxe2vf_reg_read(hw, SXE2VF_VF_VRC_VFGEN_RSTAT); + + if (val != SXE2VF_REG_INVAL_VALUE) + done = !!(val & SXE2VF_VF_VRC_VFGEN_VFRSTAT_COMPLETE); + else + done = false; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + if (done) + LOG_INFO_BDF("vfr is complete.\n"); +#endif + return done; +} + +bool sxe2vf_hw_vf_is_active(struct sxe2vf_hw *hw) +{ + u32 val; + bool active; +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; +#endif + val = sxe2vf_reg_read(hw, SXE2VF_VF_VRC_VFGEN_RSTAT); + + if (val != SXE2VF_REG_INVAL_VALUE) + active = !!(val & SXE2VF_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE); + else + active = false; + +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + if (active) + LOG_INFO_BDF("vf is active.\n"); +#endif + return active; +} + +void sxe2vf_hw_mbx_regs_dump(struct sxe2vf_hw *hw) +{ +#if !defined(SXE2_DPKD_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2vf_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("RXQH:0x%x RXQT:0x%x RXQLEN:0x%x base addr high:0x%x\n" + "base addr low:0x%x TXQH:0x%x TXQT:0x%x \n" + "TXQLEN:0x%x base addr high:0x%x base addr low:0x%x.\n", + sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_HEAD), + sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_TAIL), + sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN), + sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_BAH), + sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_BAL), + sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_HEAD), + sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_TAIL), + sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_LEN), + sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_BAH), + sxe2vf_reg_read(hw, SXE2VF_MBX_TQ_BAL)); +#else + (void)hw; +#endif +} + +u32 sxe2vf_hw_rxq_tail_read(struct sxe2vf_hw *hw, u16 queue_id) +{ + return sxe2vf_reg_read(hw, SXE2VF_RXQ_TAIL(queue_id)); +} + +void sxe2vf_hw_rxq_tail_write(struct sxe2vf_hw *hw, u16 queue_id, u32 value) +{ + sxe2vf_reg_write(hw, SXE2VF_RXQ_TAIL(queue_id), value); +} + +u32 sxe2vf_hw_txq_tail_read(struct sxe2vf_hw *hw, u16 queue_id) +{ + return sxe2vf_reg_read(hw, SXE2VF_TXQ_TAIL(queue_id)); +} + +void sxe2vf_hw_txq_tail_write(struct sxe2vf_hw *hw, u16 queue_id, u32 value) +{ + sxe2vf_reg_write(hw, SXE2VF_TXQ_TAIL(queue_id), value); +} + +void __iomem *sxe2vf_reg_addr_get(struct sxe2vf_hw *hw, u64 reg) +{ + return (void __iomem *)(hw->reg_base_addr + reg); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.h b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.h new file mode 100644 index 0000000000000000000000000000000000000000..238e3f5d3da583826798b53c91fca77fcd54196d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_hw.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_hw.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) +#include +#if (RTE_VERSION_NUM(22, 0, 0, 0) <= RTE_VERSION) +#include +#else +#include +#endif +#include "sxe2_osal.h" +#include "sxe2_type.h" +#include "sxe2_common.h" +#else +#include +#include +#include +#include "sxe2_mbx_public.h" +#endif + +#ifndef __SXE2VF_HW_H__ +#define __SXE2VF_HW_H__ + +#define SXE2VF_REG_INVAL_VALUE 0xFFFFFFFF + +enum sxe2vf_hw_err_code { + SXE2VF_HW_ERR_SUCCESS = 0, + SXE2VF_HW_ERR_FAULT, + SXE2VF_HW_ERR_TIMEDOUT, + SXE2VF_HW_ERR_IO, +}; + +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) +struct sxe2vf_hw; +typedef struct rte_pci_device *(*sxe2vf_get_pci_device)(struct sxe2vf_hw *hw); +#endif + +struct sxe2vf_hw { + u8 __iomem *reg_base_addr; + + u32 (*reg_read)(void __iomem *reg); + void (*reg_write)(u32 value, void __iomem *reg); +#if !defined(SXE2_DPDK_VF_DRIVER) && !defined(SXE2_DPDK_DRIVER) + struct sxe2_fw_ver_msg fw_ver; + struct sxe2vf_adapter *adapter; +#endif +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) + struct rte_pci_device *pci_device; +#endif +}; + +#if defined(SXE2_DPDK_VF_DRIVER) || defined(SXE2_DPDK_DRIVER) +#define __SXE2_INTERNAL __rte_internal +#else +#define __SXE2_INTERNAL +#endif + +u32 sxe2vf_reg_read(struct sxe2vf_hw *hw, u32 reg); + +void sxe2vf_reg_write(struct sxe2vf_hw *hw, u32 reg, u32 value); +__SXE2_INTERNAL s32 sxe2vf_hw_mbx_txq_enable(struct sxe2vf_hw *hw, u16 depth, dma_addr_t addr); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_txq_disable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL s32 sxe2vf_hw_mbx_rxq_enable(struct sxe2vf_hw *hw, u16 depth, dma_addr_t addr); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_rxq_disable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL u32 sxe2vf_hw_mbx_txq_h_read(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_txq_t_write(struct sxe2vf_hw *hw, u32 tail); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_txq_fault_clear(struct sxe2vf_hw *hw, u32 *err); + +__SXE2_INTERNAL u32 sxe2vf_hw_mbx_rxq_h_read(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_rxq_t_write(struct sxe2vf_hw *hw, u32 tail); + +__SXE2_INTERNAL void sxe2vf_hw_irq_enable(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL void sxe2vf_hw_irq_dyn_ctl(struct sxe2vf_hw *hw, u16 irq_idx, u32 value); + +__SXE2_INTERNAL void sxe2vf_hw_event_irq_enable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_event_irq_disable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_irq_disable(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL bool sxe2vf_hw_mbx_txq_is_enable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL bool sxe2vf_hw_mbx_rxq_is_enable(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_int_itr_set(struct sxe2vf_hw *hw, u16 itr_idx, u16 irq_idx, u32 itr); + +__SXE2_INTERNAL bool sxe2vf_hw_corer_check(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL bool sxe2vf_hw_corer_done(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_corer_stop_drop(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL s32 sxe2vf_hw_corer_stop_drop_done(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL bool sxe2vf_hw_vfr_is_checked(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL bool sxe2vf_hw_vfr_is_complete(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL bool sxe2vf_hw_vf_is_active(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_irq_trigger(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL void sxe2vf_hw_vfr_clear(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL void sxe2vf_hw_mbx_regs_dump(struct sxe2vf_hw *hw); + +__SXE2_INTERNAL u32 sxe2vf_hw_rxq_tail_read(struct sxe2vf_hw *hw, u16 queue_id); + +__SXE2_INTERNAL void sxe2vf_hw_rxq_tail_write(struct sxe2vf_hw *hw, u16 queue_id, u32 value); + +__SXE2_INTERNAL u32 sxe2vf_hw_txq_tail_read(struct sxe2vf_hw *hw, u16 queue_id); + +__SXE2_INTERNAL void sxe2vf_hw_txq_tail_write(struct sxe2vf_hw *hw, u16 queue_id, u32 value); + +__SXE2_INTERNAL void __iomem *sxe2vf_reg_addr_get(struct sxe2vf_hw *hw, u64 reg); + +__SXE2_INTERNAL void sxe2vf_hw_msix_disable(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL void sxe2vf_hw_msix_enable(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL void sxe2vf_hw_irq_clear_pba(struct sxe2vf_hw *hw, u16 irq_idx); + +__SXE2_INTERNAL u32 sxe2vf_hw_irq_dyn_ctl_read(struct sxe2vf_hw *hw, u16 irq_idx); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_regs.h b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..98ec6b59ac53726287cdd050cf982216587fb625 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/common/sxe2vf/sxe2vf_regs.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_regs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_REGS_H__ +#define __SXE2VF_REGS_H__ + +#define SXE2VF_MBX_Q_LEN_M 0x3FF +#define SXE2VF_MBX_Q_LEN_VFE_M BIT(28) +#define SXE2VF_MBX_Q_LEN_OVFL_M BIT(29) +#define SXE2VF_MBX_Q_LEN_CRIT_M BIT(30) +#define SXE2VF_MBX_Q_LEN_ENA_M BIT(31) + +#define SXE2VF_MBX_RQ_HEAD (0x00008000) +#define SXE2VF_MBX_RQ_TAIL (0x00008400) +#define SXE2VF_MBX_RQ_LEN (0x00007C00) +#define SXE2VF_MBX_RQ_BAH (0x00007800) +#define SXE2VF_MBX_RQ_BAL (0x00007400) + +#define SXE2VF_MBX_TQ_HEAD (0x00006C00) +#define SXE2VF_MBX_TQ_TAIL (0x00007000) +#define SXE2VF_MBX_TQ_LEN (0x00006800) +#define SXE2VF_MBX_TQ_BAH (0x00006400) +#define SXE2VF_MBX_TQ_BAL (0x00006000) + +#define SXE2VF_RXQ_TAIL(_QRX) (0x2000 + ((_QRX) * 4)) +#define SXE2VF_TXQ_TAIL(_QRX) (0x1000 + ((_QRX) * 4)) + +#define SXE2VF_INT_BASE 0x00002800 + +#define SXE2VF_DYN_CTL0 (SXE2VF_INT_BASE + 0x0) +#define SXE2VF_DYN_CTL(_idx) (SXE2VF_INT_BASE + 0x4 + ((_idx) * 4)) +#define SXE2VF_VF_DYN_CTL(_idx) (SXE2VF_INT_BASE + ((_idx) * 4)) + +#define SXE2VF_BAR4_MSIX_BASE 0 +#define SXE2VF_BAR4_MSIX_CTL(_idx) (SXE2VF_BAR4_MSIX_BASE + 0xC + ((_idx) * 0x10)) +#define SXE2VF_BAR4_MSIX_ENABLE 0 +#define SXE2VF_BAR4_MSIX_DISABLE 1 + +#define SXE2VF_DYN_CTL_INTENABLE BIT(0) +#define SXE2VF_DYN_CTL_CLEARPBA BIT(1) +#define SXE2VF_DYN_CTL_SWINT_TRIG BIT(2) +#define SXE2VF_DYN_CTL_SW_ITR_IDX_ENABLE BIT(24) +#define SXE2VF_DYN_CTL_INTENABLE_MSK BIT(31) + +#define SXE2VF_DYN_CTL_ITR_IDX_SHIFT 3 + +enum sxe2vf_itr_idx { + SXE2VF_ITR_IDX_0 = 0, + SXE2VF_ITR_IDX_1, + SXE2VF_ITR_IDX_2, + SXE2VF_ITR_IDX_NONE, +}; + +#define SXE2VF_INT_ITR(_i, _irq_idx) ((0x00002800 + 65 * 0x4) + 0x4 + \ +(_i) * 0x104 + ((_irq_idx) * 4)) +#define SXE2VF_VF_INT_ITR(_itr_idx, _irq_idx) (0x00002800 + \ +(0x104 * (_itr_idx)) + ((_irq_idx) * 4)) +#define SXE2VF_PFG_INT_CTL_ITR_GRAN_0 (2) + +#define SXE2VF_PCIE_SYS_READY 0x38c +#define SXE2VF_PCIE_SYS_READY_CORER_ASSERT BIT(0) +#define SXE2VF_PCIE_SYS_READY_STOP_DROP_DONE BIT(2) +#define SXE2VF_PCIE_SYS_READY_R5 BIT(3) +#define SXE2VF_PCIE_SYS_READY_STOP_DROP BIT(16) + +#define SXE2VF_PCIE_DEV_CTRL_DEV_STATUS 0x78 +#define SXE2VF_PCIE_DEV_CTRL_DEV_STATUS_TRANS_PENDING BIT(21) + +#define SXE2VF_VF_VRC_VFGEN_RSTAT (0x5800) +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT GENMASK(1, 0) +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT_VFR (0) +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT_COMPLETE (BIT(0)) +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE (BIT(1)) + +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT_FORVF_VFR (1) +#define SXE2VF_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK (BIT(10)) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/ps3_cfg_lanData.h b/drivers/net/ethernet/linkdata/sxe2vf/include/ps3_cfg_lanData.h new file mode 100644 index 0000000000000000000000000000000000000000..5f11c0be920f29909dc8ff567045c850c14e7ee4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/ps3_cfg_lanData.h @@ -0,0 +1,657 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: ps3_cfg_lanData.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __PS3_CFG_DP_INFO_H__ +#define __PS3_CFG_DP_INFO_H__ + +typedef struct PS3CfgDpHeader { + U32 magic ; + U32 version ; + U32 segment_cnt; + U32 reserved0[16]; +}__attribute__((packed))PS3CfgDpHeader_s; + +typedef struct PS3CfgDpInfo { + U32 magic ; + U16 cnt ; + U16 elem_size; + U8 type ; +}__attribute__((packed))PS3CfgDpInfo_s; + +typedef struct PS3CfgDpSwptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpSwptgentry_s; + +typedef struct PS3CfgDpSwtcam { + U32 data_l ; + U32 data_h :8; + U32 data_rsv :24; + U32 mask_l ; + U32 mask_h :8; + U32 valid :1; + U32 mask_rsv :23; +}__attribute__((packed))PS3CfgDpSwtcam_s; + +typedef struct PS3CfgDpSwextractor { + U32 protocol_id0 :8; + U32 offset0 :9; + U32 enable0 :1; + U32 rsv0 :14; + U32 protocol_id1 :8; + U32 offset1 :9; + U32 enable1 :1; + U32 rsv1 :14; + U32 protocol_id2 :8; + U32 offset2 :9; + U32 enable2 :1; + U32 rsv2 :14; + U32 protocol_id3 :8; + U32 offset3 :9; + U32 enable3 :1; + U32 rsv3 :14; + U32 protocol_id4 :8; + U32 offset4 :9; + U32 enable4 :1; + U32 rsv4 :14; + U32 protocol_id5 :8; + U32 offset5 :9; + U32 enable5 :1; + U32 rsv5 :14; + U32 protocol_id6 :8; + U32 offset6 :9; + U32 enable6 :1; + U32 rsv6 :14; + U32 protocol_id7 :8; + U32 offset7 :9; + U32 enable7 :1; + U32 rsv7 :14; + U32 protocol_id8 :8; + U32 offset8 :9; + U32 enable8 :1; + U32 rsv8 :14; + U32 protocol_id9 :8; + U32 offset9 :9; + U32 enable9 :1; + U32 rsv9 :14; + U32 protocol_id10 :8; + U32 offset10 :9; + U32 enable10 :1; + U32 rsv10 :14; + U32 protocol_id11 :8; + U32 offset11 :9; + U32 enable11 :1; + U32 rsv11 :14; + U32 protocol_id12 :8; + U32 offset12 :9; + U32 enable12 :1; + U32 rsv12 :14; + U32 protocol_id13 :8; + U32 offset13 :9; + U32 enable13 :1; + U32 rsv13 :14; + U32 protocol_id14 :8; + U32 offset14 :9; + U32 enable14 :1; + U32 rsv14 :14; + U32 protocol_id15 :8; + U32 offset15 :9; + U32 enable15 :1; + U32 rsv15 :14; + U32 protocol_id16 :8; + U32 offset16 :9; + U32 enable16 :1; + U32 rsv16 :14; + U32 protocol_id17 :8; + U32 offset17 :9; + U32 enable17 :1; + U32 rsv17 :14; + U32 protocol_id18 :8; + U32 offset18 :9; + U32 enable18 :1; + U32 rsv18 :14; + U32 protocol_id19 :8; + U32 offset19 :9; + U32 enable19 :1; + U32 rsv19 :14; + U32 protocol_id20 :8; + U32 offset20 :9; + U32 enable20 :1; + U32 rsv20 :14; + U32 protocol_id21 :8; + U32 offset21 :9; + U32 enable21 :1; + U32 rsv21 :14; + U32 protocol_id22 :8; + U32 offset22 :9; + U32 enable22 :1; + U32 rsv22 :14; + U32 protocol_id23 :8; + U32 offset23 :9; + U32 enable23 :1; + U32 rsv23 :14; + U32 protocol_id24 :8; + U32 offset24 :9; + U32 enable24 :1; + U32 rsv24 :14; + U32 protocol_id25 :8; + U32 offset25 :9; + U32 enable25 :1; + U32 rsv25 :14; + U32 protocol_id26 :8; + U32 offset26 :9; + U32 enable26 :1; + U32 rsv26 :14; + U32 protocol_id27 :8; + U32 offset27 :9; + U32 enable27 :1; + U32 rsv27 :14; + U32 protocol_id28 :8; + U32 offset28 :9; + U32 enable28 :1; + U32 rsv28 :14; + U32 protocol_id29 :8; + U32 offset29 :9; + U32 enable29 :1; + U32 rsv29 :14; + U32 protocol_id30 :8; + U32 offset30 :9; + U32 enable30 :1; + U32 rsv30 :14; + U32 protocol_id31 :8; + U32 offset31 :9; + U32 enable31 :1; + U32 rsv31 :14; + U32 protocol_id32 :8; + U32 offset32 :9; + U32 enable32 :1; + U32 rsv32 :14; + U32 protocol_id33 :8; + U32 offset33 :9; + U32 enable33 :1; + U32 rsv33 :14; + U32 protocol_id34 :8; + U32 offset34 :9; + U32 enable34 :1; + U32 rsv34 :14; + U32 protocol_id35 :8; + U32 offset35 :9; + U32 enable35 :1; + U32 rsv35 :14; + U32 protocol_id36 :8; + U32 offset36 :9; + U32 enable36 :1; + U32 rsv36 :14; + U32 protocol_id37 :8; + U32 offset37 :9; + U32 enable37 :1; + U32 rsv37 :14; + U32 protocol_id38 :8; + U32 offset38 :9; + U32 enable38 :1; + U32 rsv38 :14; + U32 protocol_id39 :8; + U32 offset39 :9; + U32 enable39 :1; + U32 rsv39 :14; + U32 protocol_id40 :8; + U32 offset40 :9; + U32 enable40 :1; + U32 rsv40 :14; + U32 protocol_id41 :8; + U32 offset41 :9; + U32 enable41 :1; + U32 rsv41 :14; + U32 protocol_id42 :8; + U32 offset42 :9; + U32 enable42 :1; + U32 rsv42 :14; + U32 protocol_id43 :8; + U32 offset43 :9; + U32 enable43 :1; + U32 rsv43 :14; + U32 protocol_id44 :8; + U32 offset44 :9; + U32 enable44 :1; + U32 rsv44 :14; + U32 protocol_id45 :8; + U32 offset45 :9; + U32 enable45 :1; + U32 rsv45 :14; + U32 protocol_id46 :8; + U32 offset46 :9; + U32 enable46 :1; + U32 rsv46 :14; + U32 protocol_id47 :8; + U32 offset47 :9; + U32 enable47 :1; + U32 rsv47 :14; +}__attribute__((packed))PS3CfgDpSwextractor_s; + +typedef struct PS3CfgDpSwmap { + U32 map0 :8; + U32 map1 :8; + U32 map2 :8; + U32 map3 :8; +}__attribute__((packed))PS3CfgDpSwmap_s; + +typedef struct PS3CfgDpSwrcp { + U8 rid :6; + U8 rcp_rsv0 :1; + U8 is_root :1; + U8 lookup_index0 :7; + U8 lookup_index0_valid :1; + U8 lookup_index1 :7; + U8 lookup_index1_valid :1; + U8 lookup_index2 :7; + U8 lookup_index2_valid :1; + U8 lookup_index3 :7; + U8 lookup_index3_valid :1; + U8 lookup_index4 :7; + U8 lookup_index4_valid :1; + U8 join_priority; + U8 priority :3; + U8 need_pass_l2 :1; + U8 allow_pass_l2 :1; + U8 inverse_action :1; + U8 prune_idx :2; + U32 default_action :19; + U32 rcp_rsv1 :4; + U32 default_action_valid:1; + U32 rcp_rsv2 :8; + U32 fv4_bitmask :16; + U32 fv3_bitmask :16; + U32 fv2_bitmask :16; + U32 fv1_bitmask :16; + U32 fv0_bitmask :16; + U32 rcp_rsv3 :16; +}__attribute__((packed))PS3CfgDpSwrcp_s; + +typedef struct PS3CfgDpSwprofilercpbitmap { + U32 recipe_high; + U32 recipe_low; +}__attribute__((packed))PS3CfgDpSwprofilercpbitmap_s; + +typedef struct PS3CfgDpSwvsigentry { + U32 vsig0 :12; + U32 vsig_rsv0 :4; + U32 vsig1 :12; + U32 vsig_rsv1 :4; + U32 vsig2 :12; + U32 vsig_rsv2 :4; + U32 vsig3 :12; + U32 vsig_rsv3 :4; +}__attribute__((packed))PS3CfgDpSwvsigentry_s; + +typedef struct PS3CfgDpRssptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpRssptgentry_s; + +typedef struct PS3CfgDpRssvsigentry { + U32 vsig0 :12; + U32 vsig_rsv0 :4; + U32 vsig1 :12; + U32 vsig_rsv1 :4; + U32 vsig2 :12; + U32 vsig_rsv2 :4; + U32 vsig3 :12; + U32 vsig_rsv3 :4; +}__attribute__((packed))PS3CfgDpRssvsigentry_s; + +typedef struct PS3CfgDpRsstcam { + U32 data_l ; + U32 data_h :8; + U32 data_rsv :24; + U32 mask_l ; + U32 mask_h :8; + U32 valid :1; + U32 mask_rsv :23; +}__attribute__((packed))PS3CfgDpRsstcam_s; + +typedef struct PS3CfgDpRssextractor { + U32 protocol_id0 :8; + U32 offset0 :9; + U32 enable0 :1; + U32 rsv0 :14; + U32 protocol_id1 :8; + U32 offset1 :9; + U32 enable1 :1; + U32 rsv1 :14; + U32 protocol_id2 :8; + U32 offset2 :9; + U32 enable2 :1; + U32 rsv2 :14; + U32 protocol_id3 :8; + U32 offset3 :9; + U32 enable3 :1; + U32 rsv3 :14; + U32 protocol_id4 :8; + U32 offset4 :9; + U32 enable4 :1; + U32 rsv4 :14; + U32 protocol_id5 :8; + U32 offset5 :9; + U32 enable5 :1; + U32 rsv5 :14; + U32 protocol_id6 :8; + U32 offset6 :9; + U32 enable6 :1; + U32 rsv6 :14; + U32 protocol_id7 :8; + U32 offset7 :9; + U32 enable7 :1; + U32 rsv7 :14; + U32 protocol_id8 :8; + U32 offset8 :9; + U32 enable8 :1; + U32 rsv8 :14; + U32 protocol_id9 :8; + U32 offset9 :9; + U32 enable9 :1; + U32 rsv9 :14; + U32 protocol_id10 :8; + U32 offset10 :9; + U32 enable10 :1; + U32 rsv10 :14; + U32 protocol_id11 :8; + U32 offset11 :9; + U32 enable11 :1; + U32 rsv11 :14; + U32 protocol_id12 :8; + U32 offset12 :9; + U32 enable12 :1; + U32 rsv12 :14; + U32 protocol_id13 :8; + U32 offset13 :9; + U32 enable13 :1; + U32 rsv13 :14; + U32 protocol_id14 :8; + U32 offset14 :9; + U32 enable14 :1; + U32 rsv14 :14; + U32 protocol_id15 :8; + U32 offset15 :9; + U32 enable15 :1; + U32 rsv15 :14; + U32 protocol_id16 :8; + U32 offset16 :9; + U32 enable16 :1; + U32 rsv16 :14; + U32 protocol_id17 :8; + U32 offset17 :9; + U32 enable17 :1; + U32 rsv17 :14; + U32 protocol_id18 :8; + U32 offset18 :9; + U32 enable18 :1; + U32 rsv18 :14; + U32 protocol_id19 :8; + U32 offset19 :9; + U32 enable19 :1; + U32 rsv19 :14; + U32 protocol_id20 :8; + U32 offset20 :9; + U32 enable20 :1; + U32 rsv20 :14; + U32 protocol_id21 :8; + U32 offset21 :9; + U32 enable21 :1; + U32 rsv21 :14; + U32 protocol_id22 :8; + U32 offset22 :9; + U32 enable22 :1; + U32 rsv22 :14; + U32 protocol_id23 :8; + U32 offset23 :9; + U32 enable23 :1; + U32 rsv23 :14; +}__attribute__((packed))PS3CfgDpRssextractor_s; + +typedef struct PS3CfgDpRssmap { + U32 rssmap0 :7; + U32 rssrsv0 :1; + U32 rssmap1 :7; + U32 rssrsv1 :1; + U32 rssmap2 :7; + U32 rssrsv2 :1; + U32 rssmap3 :7; + U32 rssrsv3 :1; +}__attribute__((packed))PS3CfgDpRssmap_s; + +typedef struct PS3CfgDpRssipset { + U32 fwWordIndex0 :5; + U32 ipSetRsv0 :2; + U32 fwWordIndexEn0 :1; + U32 fwWordIndex1 :5; + U32 ipSetRsv1 :2; + U32 fwWordIndexEn1 :1; + U32 fwWordIndex2 :5; + U32 ipSetRsv2 :2; + U32 fwWordIndexEn2 :1; + U32 fwWordIndex3 :5; + U32 ipSetRsv3 :2; + U32 fwWordIndexEn3 :1; + U32 fwWordIndex4 :5; + U32 ipSetRsv4 :2; + U32 fwWordIndexEn4 :1; + U32 fwWordIndex5 :5; + U32 ipSetRsv5 :2; + U32 fwWordIndexEn5 :1; + U32 fwWordIndex6 :5; + U32 ipSetRsv6 :2; + U32 fwWordIndexEn6 :1; + U32 fwWordIndex7 :5; + U32 ipSetRsv7 :2; + U32 fwWordIndexEn7 :1; + U32 fwWordIndex8 :5; + U32 ipSetRsv8 :2; + U32 fwWordIndexEn8 :1; + U32 fwWordIndex9 :5; + U32 ipSetRsv9 :2; + U32 fwWordIndexEn9 :1; + U32 fwWordIndex10 :5; + U32 ipSetRsv10 :2; + U32 fwWordIndexEn10 :1; + U32 fwWordIndex11 :5; + U32 ipSetRsv11 :2; + U32 fwWordIndexEn11 :1; + U32 fwWordIndex12 :5; + U32 ipSetRsv12 :2; + U32 fwWordIndexEn12 :1; + U32 fwWordIndex13 :5; + U32 ipSetRsv13 :2; + U32 fwWordIndexEn13 :1; + U32 fwWordIndex14 :5; + U32 ipSetRsv14 :2; + U32 fwWordIndexEn14 :1; + U32 fwWordIndex15 :5; + U32 ipSetRsv15 :2; + U32 fwWordIndexEn15 :1; + U32 fwWordIndex16 :5; + U32 ipSetRsv16 :2; + U32 fwWordIndexEn16 :1; + U32 fwWordIndex17 :5; + U32 ipSetRsv17 :2; + U32 fwWordIndexEn17 :1; + U32 fwWordIndex18 :5; + U32 ipSetRsv18 :2; + U32 fwWordIndexEn18 :1; + U32 fwWordIndex19 :5; + U32 ipSetRsv19 :2; + U32 fwWordIndexEn19 :1; + U32 fwWordIndex20 :5; + U32 ipSetRsv20 :2; + U32 fwWordIndexEn20 :1; + U32 fwWordIndex21 :5; + U32 ipSetRsv21 :2; + U32 fwWordIndexEn21 :1; + U32 fwWordIndex122 :5; + U32 ipSetRsv22 :2; + U32 fwWordIndexEn22 :1; + U32 fwWordIndex23 :5; + U32 ipSetRsv23 :2; + U32 fwWordIndexEn23 :1; +}__attribute__((packed))PS3CfgDpRssipset_s; + +typedef struct PS3CfgDpFnavptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpFnavptgentry_s; + +typedef struct PS3CfgDpFnavmask { + U32 val :16; + U32 rsv :16; + U32 fldbit_l; + U32 fldbit_h; +}__attribute__((packed))PS3CfgDpFnavmask_s; + +typedef struct PS3CfgDpRssmask { + U32 val :16; + U32 rsv :16; + U32 fldbit_l; + U32 fldbit_h; +}__attribute__((packed))PS3CfgDpRssmask_s; + +typedef struct PS3CfgDpAclptgentry { + U32 ptg0 :8; + U32 ptg1 :8; + U32 ptg2 :8; + U32 ptg3 :8; +}__attribute__((packed))PS3CfgDpAclptgentry_s; + +typedef struct PS3CfgDpPagtpuexhdr { + U32 hdrLen :8; + U32 hdrrsv :24; +}__attribute__((packed))PS3CfgDpPagtpuexhdr_s; + +typedef struct PS3CfgDpPkghdr { + struct PS3CfgDpHeader dpHeader; +}__attribute__((packed))PS3CfgDpPkghdr_s; + +typedef struct PS3CfgDpSwptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwptgentry dpSwptgentry[256]; +}__attribute__((packed))PS3CfgDpSwptgcfg_s; + +typedef struct PS3CfgDpSwtcamcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwtcam dpSwtcam[1]; +}__attribute__((packed))PS3CfgDpSwtcamcfg_s; + +typedef struct PS3CfgDpSwextractorcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwextractor dpSwextractor[3]; +}__attribute__((packed))PS3CfgDpSwextractorcfg_s; + +typedef struct PS3CfgDpSwmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwmap dpSwmap[1]; +}__attribute__((packed))PS3CfgDpSwmapcfg_s; + +typedef struct PS3CfgDpSwrcpcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwrcp dpSwrcp[9]; +}__attribute__((packed))PS3CfgDpSwrcpcfg_s; + +typedef struct PS3CfgDpSwprofilercpbitmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwprofilercpbitmap dpSwprofilercpbitmap[3]; +}__attribute__((packed))PS3CfgDpSwprofilercpbitmapcfg_s; + +typedef struct PS3CfgDpSwvsigcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpSwvsigentry dpSwvsigentry[1]; +}__attribute__((packed))PS3CfgDpSwvsigcfg_s; + +typedef struct PS3CfgDpRssptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssptgentry dpRssptgentry[1]; +}__attribute__((packed))PS3CfgDpRssptgcfg_s; + +typedef struct PS3CfgDpRssvsigcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssvsigentry dpRssvsigentry[1]; +}__attribute__((packed))PS3CfgDpRssvsigcfg_s; + +typedef struct PS3CfgDpRsstcamcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRsstcam dpRsstcam[1]; +}__attribute__((packed))PS3CfgDpRsstcamcfg_s; + +typedef struct PS3CfgDpRssextractorcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssextractor dpRssextractor[1]; +}__attribute__((packed))PS3CfgDpRssextractorcfg_s; + +typedef struct PS3CfgDpRssmapcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssmap dpRssmap[1]; +}__attribute__((packed))PS3CfgDpRssmapcfg_s; + +typedef struct PS3CfgDpRssipsetcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssipset dpRssipset[1]; +}__attribute__((packed))PS3CfgDpRssipsetcfg_s; + +typedef struct PS3CfgDpFnavptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpFnavptgentry dpFnavptgentry[1]; +}__attribute__((packed))PS3CfgDpFnavptgcfg_s; + +typedef struct PS3CfgDpFnavmaskcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpFnavmask dpFnavmask[1]; +}__attribute__((packed))PS3CfgDpFnavmaskcfg_s; + +typedef struct PS3CfgDpRssmaskcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpRssmask dpRssmask[1]; +}__attribute__((packed))PS3CfgDpRssmaskcfg_s; + +typedef struct PS3CfgDpAclptgcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpAclptgentry dpAclptgentry[1]; +}__attribute__((packed))PS3CfgDpAclptgcfg_s; + +typedef struct PS3CfgDpPagtpuexhdrcfg { + struct PS3CfgDpInfo dpInfo; + struct PS3CfgDpPagtpuexhdr dpPagtpuexhdr[1]; +}__attribute__((packed))PS3CfgDpPagtpuexhdrcfg_s; + +typedef struct PS3CfgDpArea { + struct PS3CfgDpPkghdr dpPkghdr; + struct PS3CfgDpSwptgcfg dpSwptgcfg; + struct PS3CfgDpSwtcamcfg dpSwtcamcfg; + struct PS3CfgDpSwextractorcfg dpSwextractorcfg; + struct PS3CfgDpSwmapcfg dpSwmapcfg; + struct PS3CfgDpSwrcpcfg dpSwrcpcfg; + struct PS3CfgDpSwprofilercpbitmapcfg dpSwprofilercpbitmapcfg; + struct PS3CfgDpSwvsigcfg dpSwvsigcfg; + struct PS3CfgDpRssptgcfg dpRssptgcfg; + struct PS3CfgDpRssvsigcfg dpRssvsigcfg; + struct PS3CfgDpRsstcamcfg dpRsstcamcfg; + struct PS3CfgDpRssextractorcfg dpRssextractorcfg; + struct PS3CfgDpRssmapcfg dpRssmapcfg; + struct PS3CfgDpRssipsetcfg dpRssipsetcfg; + struct PS3CfgDpFnavptgcfg dpFnavptgcfg; + struct PS3CfgDpFnavmaskcfg dpFnavmaskcfg; + struct PS3CfgDpRssmaskcfg dpRssmaskcfg; + struct PS3CfgDpAclptgcfg dpAclptgcfg; + struct PS3CfgDpPagtpuexhdrcfg dpPagtpuexhdrcfg; +}__attribute__((aligned(8)))PS3CfgDpArea_s; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cli_drv_msg.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cli_drv_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..c035fcf6e33089b34aa6af2fe5d45e3ae2704c92 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cli_drv_msg.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cli_drv_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CLI_DRV_MSG_H__ +#define __SXE2_CLI_DRV_MSG_H__ + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define SXE2_DRV_MSG_MAX_SIZE (8192) +#define SXE2_DRV_MSG_MAGIC_CODE (0x56781234) +#define SXE2_MAX_NETDEV_NAME_SIZE (128) + +#define SXE2_CLI_DRV_SUCCESS (0) +#define SXE2_MOD_DRV (1) +#define SXE2_SUB_MOD_DEV (1) + +#define MODULE_ID_SHIFT (24) +#define SUB_MODULE_ID_SHIFT (16) +#define ERROR_INDEX_MASK (0xFFFF0000) +#define SXE2_MAKE_ERR_CODE_INDEX(module, sub_module) \ + ((((u32)((module) << MODULE_ID_SHIFT)) | ((u32)((sub_module) << SUB_MODULE_ID_SHIFT))) & \ + ERROR_INDEX_MASK) + +enum sxe2_priv_drv_err_code { + SXE2_ERR_DRV_DEV = SXE2_MAKE_ERR_CODE_INDEX(SXE2_MOD_DRV, SXE2_SUB_MOD_DEV), + SXE2_ERR_DRV_DEV_PARAMS_INVAL, + SXE2_ERR_DRV_DEV_NULL_PTR, + SXE2_ERR_DRV_DEV_NOT_FOUND, + SXE2_ERR_DRV_DEV_NOT_SUPPORT, + SXE2_ERR_DRV_DEV_NO_MEM, + SXE2_ERR_DRV_DEV_FAULT, + SXE2_ERR_DRV_DEV_MAGIC_INVAL, +}; + +enum sxe2_cli_drv_cmd_opcode { + SXE2_CLI_CMD_GET_NETDEV_NAME = 0, + SXE2_CLI_CMD_MAX = 0xFFFF, +}; + +struct drv_msg_info { + u32 magic; + u32 opcode; + u32 error; + u32 timeout; + u32 runver; + u32 req_length; + u32 ack_length; + u16 hdr_len; + u8 reserved[2]; + u64 trace_id; + u8 pad[8]; + u8 body[]; +}; + +struct sxe2_cli_drv_get_pname_rsp_msg { + char netdev_name[SXE2_MAX_NETDEV_NAME_SIZE]; +}; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cmd.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cmd.h new file mode 100644 index 0000000000000000000000000000000000000000..be85edad72d0e2b575cee3fa0144f69e405e877e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_cmd.h @@ -0,0 +1,3687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_cmd.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_CMD_H__ +#define __SXE2_CMD_H__ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#if defined(SXE2_SUPPORT_UEFI) || defined(SXE2_SUPPORT_IPXE) +#include "sxe2_uefi_type.h" +#include "sxe2_uefi_def.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "rte_os.h" +#include "sxe2_type.h" +#include "sxe2_common.h" +#include "sxe2_osal.h" +#endif + +#ifndef SXE2_DRIVER_SIM +#include "sxe2_spec.h" +#endif + +#pragma pack(4) + +#define SXE2_VSI_MAX_CNT (768) + +#define SXE2_INVAL_U8 (0xFF) +#define SXE2_INVAL_U16 (0xFFFF) +#define SXE2_INVAL_U32 (0xFFFFFFFF) +#define SXE2_VF_ID_INVAL (0xFFFF) + +#define SXE2_CMD_MAGIC (0xFEFEEFEF) + +#define SXE2_FW_COMP_MAJOR_VER (1) +#define SXE2_FW_COMP_MINOR_VER (1) +#define SXE2_FW_COMP_VER \ + (SXE2_FW_COMP_MAJOR_VER << 16 | \ + SXE2_FW_COMP_MINOR_VER) + +#define SXE2_CMD_LARGE_BUF_SIZE (512) +#define SXE2_CMD_MAX_BUF \ + (2 * 4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE \ + (SXE2_CMD_MAX_BUF - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_MAX_BUF_MBX \ + (4096) +#define SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX \ + (SXE2_CMD_MAX_BUF_MBX - SXE2_CMD_HDR_SIZE) +#define SXE2_DRV_CMD_MAX_MSG_SIZE_MBX \ + (SXE2_CMD_MAX_TRANSMIT_DATA_SIZE_MBX - \ + SXE2_DRV_MSG_HDR_SIZE) + +#define SXE2_CMD_DD BIT(0) +#define SXE2_CMD_COMPLETE BIT(1) +#define SXE2_CMD_ERROR BIT(2) +#define SXE2_CMD_LARGE_BUF BIT(9) +#define SXE2_CMD_READ BIT(10) +#define SXE2_CMD_BUF BIT(12) +#define SXE2_CMD_NO_INTR BIT(13) + +#define SXE2_CMD_DONE (SXE2_CMD_DD | SXE2_CMD_COMPLETE | SXE2_CMD_ERROR) + +#define SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT \ + (16) +#define SXE2_TC_MAX_CNT (8) +#define SXE2_TXQ_CTXT_LEN (24) + +#define SXE2_CMD_HDR_SIZE sizeof(struct sxe2_cmd_hdr) +#define SXE2_DRV_MSG_HDR_SIZE \ + sizeof(struct sxe2_drv_msg_hdr) + +#define SXE2_MBX_DATA_PTR(type, msg_hdr_ptr) \ + ((type *)((u8 *)(msg_hdr_ptr) + (msg_hdr_ptr)->data_offset)) + +#define SXE2_MBX_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2vf_mbx_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_DRV_MSG_HDR_PTR(cmd_hdr_ptr) \ + ((struct sxe2_drv_msg_hdr *)((u8 *)(cmd_hdr_ptr) + (cmd_hdr_ptr)->hdr_len)) + +#define SXE2_CMD_MODULE_S (8) +#define SXE2_MK_CMD(module, cmd) ((module) << SXE2_CMD_MODULE_S | (cmd)) + +#define SXE2_TCAM_KEY_VALUE_LEN (5) +#define SXE2_TCAM_KEY_LEN (2 * SXE2_TCAM_KEY_VALUE_LEN) + +#define SXE2_FULLKEY_DWORD_CNT (3) +#define SXE2_PACKET_INFO_DWORD_CNT (20) +#define SXE2_SWITCH_FV_CNT (48) +#define SXE2_PACKET_MAX_RECIPES (32) + +#define SXE2_MAX_NUM_RECIPES (64) +#define SXE2_MAX_NUM_RECIPES_PER_PROFILE (32) +#define SXE2_MAX_NUM_ROOT_RECIPES_PER_PROFILE (24) + +#if defined(SXE2_TEST) +#define SXE2_MAX_NUM_RECIPES_VER_I_O (64) +#else +#define SXE2_MAX_NUM_RECIPES_VER_I_O (24) +#endif + +#define SXE2_MAX_NUM_PROFILES (256) +#define SXE2_NUM_WORDS_RECIPE (4) +#define SXE2_MAX_REPLY_RECIPE (4) +#define SXE2_MAX_CHAIN_RECIPE (SXE2_MAX_REPLY_RECIPE + 1) +#define SXE2_MAX_CHAIN_WORDS (SXE2_NUM_WORDS_RECIPE * \ + SXE2_MAX_REPLY_RECIPE) +#define SXE2_VSI_LIST_DAT_LEN DIV_ROUND_UP(SXE2_VSI_MAX_CNT, \ + (BITS_PER_BYTE * sizeof(u32))) + +#define SXE2_ACTION_PRIORITY_HIGH (7) + +#define SXE2_CMD_SWITCH_RULE_FLAG_COMPLEX BIT(0) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_PRUNE BIT(1) +#define SXE2_CMD_SWITCH_VSI_FLAG_LIST_INC BIT(2) + +#define SXE2_SINGLE_ACT_LB_ENABLE BIT(16) +#define SXE2_SINGLE_ACT_LAN_ENABLE BIT(15) + +#define SXE2_SINGLE_ACT_VSI_TYPE_S (17) +#define SXE2_SINGLE_ACT_VSI_FORWARD (0x0 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_VSI_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_S (5) +#define SXE2_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << SXE2_SINGLE_ACT_VSI_LIST_ID_S) +#define SXE2_SINGLE_ACT_VSI_LIST BIT(4) +#define SXE2_SINGLE_ACT_VALID_BIT BIT(1) +#define SXE2_SINGLE_ACT_DROP BIT(0) + +#define SXE2_SINGLE_ACT_TO_Q (0x1 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_Q_INDEX_S (4) +#define SXE2_SINGLE_ACT_Q_INDEX_M (0x7FF << SXE2_SINGLE_ACT_Q_INDEX_S) +#define SXE2_SINGLE_ACT_Q_REGION_S (1) +#define SXE2_SINGLE_ACT_Q_REGION_M (0x7 << SXE2_SINGLE_ACT_Q_REGION_S) +#define SXE2_SINGLE_ACT_Q_PRIORITY BIT(0) + +#define SXE2_SINGLE_ACT_PRUNE (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_EGRESS BIT(3) +#define SXE2_SINGLE_ACT_INGRESS BIT(2) +#define SXE2_SINGLE_ACT_PRUNET BIT(1) + +#define SXE2_SINGLE_ACT_MIRROR (0x3 << SXE2_SINGLE_ACT_VSI_TYPE_S) + +#define SXE2_SINGLE_ACT_POINTER (0x2 << SXE2_SINGLE_ACT_VSI_TYPE_S) +#define SXE2_SINGLE_ACT_TO_LARGE BIT(0) +#define SXE2_SINGLE_ACT_HASFWD BIT(1) + +#define SXE2_MAC_NUM (4) + +#define SXE2_RSS_FV_CNT (24) + +#define SXE2_RSS_FV_TRACE_CNT (12) + +#define SXE2_OG_BUF_SIZE (4096) +#define SXE2_FV_CNT_MAX SXE2_SWITCH_FV_CNT +#define SXE2_FNAV_INPUT_CNT (30) + +#define SXE2_BFD_FV_CNT_MAX (32) + +#define SXE2_RXFT_PPE_INFO_REG_CNT (20) + +#define SXE2_FV_DIRECTION_OFFSET (10) +#define SXE2_FV_DIRECTION_MASK BIT(SXE2_FV_DIRECTION_OFFSET) +#define SXE2_FV_DIRECTION_TX (0) +#define SXE2_FV_DIRECTION_RX (1) + +#define SXE2_FV_CAST_OFFSET (0) +#define SXE2_FV_CAST_UNI (0) +#define SXE2_FV_CAST_MULTI (1) +#define SXE2_FV_CAST_BROAD (2) + +#define SXE2_FV_PKT_SRC_OFFSET (10) +#define SXE2_FV_PKT_SRC_MASK (0x3 << SXE2_FV_PKT_SRC_OFFSET) +#define SXE2_FV_PKT_SRC_TX (0x3) +#define SXE2_FV_PKT_SRC_RX (0x0) + +#define SXE2_FV_VSI_NUM_OFFSET (0) +#define SXE2_FV_VSI_NUM_MASK (0x3ff << SXE2_FV_VSI_NUM_OFFSET) + +#define SXE2_FV_PKT_TO_RDMA_OFFSET (8) +#define SXE2_FV_PKT_TO_RDMA_MASK (0x1 << SXE2_FV_PKT_TO_RDMA_OFFSET) +#define SXE2_FV_PKT_TO_RDMA (1) +#define SXE2_FV_PKT_TO_RDMA_NO (0) + +#define SXE2_SWITCH_RECIPE_PRIO_7 (7) +#define SXE2_SWITCH_RECIPE_PRIO_6 (6) + +#define SXE2_LLDP_FRAME_MAX_SIZE (1500) +#define SXE2_MAX_TRAFFIC_CLASS (8) +#define SXE2_MAX_USER_PRIORITY (8) +#define SXE2_DCBX_MAX_APPS (64) +#define SXE2_DSCP_MAX_NUM (64) + +#define SXE2_DSCP_OUI (0xFFFFFFU) +#define SXE2_DSCP_SUBTYPE_DSCP2UP (0x41U) +#define SXE2_DSCP_SUBTYPE_ENFORCE (0x42U) +#define SXE2_DSCP_SUBTYPE_TCBW (0x43U) +#define SXE2_DSCP_SUBTYPE_PFC (0x44U) +#define SXE2_DSCP_IPV6_OFFSET (80) +#define SXE2_DSCP_IPV4_UNTAG_OFFSET (64) +#define SXE2_DSCP_IPV6_UNTAG_OFFSET (144) + +#define SXE2_CMD_VSI_STATS_MAX_CNT (16) + +#define SXE2_SERIAL_NUM_LEN (20) + +#define SXE2_MDD_TYPE_TX (1) +#define SXE2_MDD_TYPE_RX (2) + +#define SXE2_FNAV_DEFAULT_MASK_CNT (6) + +#define SXE2_RSS_CORE_LUT_SIZE (32) + +#define SXE2_LARGE_ACTION_COUNT_IN_GROUP (4) +#define SXE2_FLM_VENDOR_LEN 16 +#define SXE2_FLM_VENDOR_PN_LEN 16 +#define SXE2_HOST_FLM_VENDOR_LEN 32 +#define SXE2_HOST_FLM_VENDOR_PN_LEN 32 + +#define SXE2_LLDP_FW_AGENT_DISABLE 0 +#define SXE2_LLDP_FW_AGENT_ENABLE 1 + +enum sxe2_txq_quanta_prof_cfg { + SXE2_TXQ_QUANTA_PROF_DEFAULT = 0, + SXE2_TXQ_QUANTA_PROF_SIMPLE, + SXE2_TXQ_QUANTA_PROF_COMPLEX, +}; + +enum sxe2_cmd_type { + SXE2_CMD_TYPE_CLI = 0, + SXE2_CMD_TYPE_DRV_TO_FW, + SXE2_CMD_TYPE_FW_NOTIFY, + SXE2_CMD_TYPE_PF_TO_VF, + SXE2_CMD_TYPE_VF_TO_PF, + SXE2_CMD_TYPE_DRV_TO_HW, + SXE2_CMD_TYPE_PF_REPLY_VF, +}; + +enum sxe2_cmd_module { + SXE2_CMD_MODULE_HANDSHAKE = 0, + SXE2_CMD_MODULE_CAPS = 1, + SXE2_CMD_MODULE_VSI = 2, + SXE2_CMD_MODULE_QUEUE = 3, + SXE2_CMD_MODULE_CFG = 4, + SXE2_CMD_MODULE_SWITCH = 5, + SXE2_CMD_MODULE_RULE = 6, + SXE2_CMD_MODULE_EVENT = 7, + SXE2_CMD_MODULE_MBX = 8, + SXE2_CMD_MODULE_TXSCHED = 9, + SXE2_CMD_MODULE_STATS = 11, + SXE2_CMD_MODULE_OPT = 12, + SXE2_CMD_MODULE_RSS = 13, + SXE2_CMD_MODULE_LED = 14, + SXE2_CMD_MODULE_OG = 15, + SXE2_CMD_MODULE_RDMA = 16, + SXE2_CMD_MODULE_IPSEC = 17, + SXE2_CMD_MODULE_FNAV = 18, + SXE2_CMD_MODULE_PXE = 19, + SXE2_CMD_MODULE_DCB = 20, + SXE2_CMD_MODULE_LLDP = 21, + SXE2_CMD_MODULE_PTP = 22, + SXE2_CMD_MODULE_MACADDR = 23, + SXE2_CMD_MODULE_MACSEC = 24, + SXE2_CMD_MODULE_UPGRADE = 25, + SXE2_CMD_MODULE_ETHTOOL = 26, + SXE2_CMD_MODULE_FLM = 27, + SXE2_CMD_MODULE_SFP = 28, + SXE2_CMD_MODULE_RWREG = 29, + SXE2_CMD_MODULE_UDPTUNEEL = 30, + SXE2_CMD_MODULE_NCD = 31, + SXE2_CMD_MODULE_BFD = 32, + SXE2_CMD_MODULE_NCD_UDF = 33, + SXE2_CMD_MODULE_QUEUE_STATS_MAP = 34, + SXE2_CMD_MODULE_ACL = 35, +}; + +enum sxe2_drv_cmd_opcode { + + SXE2_CMD_Q_HANDSHAKE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 1), + SXE2_CMD_Q_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_HANDSHAKE, 3), + + SXE2_CMD_DEV_CAPS = SXE2_MK_CMD(SXE2_CMD_MODULE_CAPS, 1), + SXE2_CMD_FUNC_CAPS, + SXE2_CMD_PF_CFG_CLEAR, + SXE2_CMD_PF_SRIOV_SET, + SXE2_CMD_PF_DDP_REF_CLR, + SXE2_CMD_PHY_PORT_INFO_GET, + SXE2_CMD_PF_SERIAL_GET, + SXE2_CMD_DRV_MODE_GET, + SXE2_CMD_DRV_MODE_SET, + + SXE2_CMD_VSI_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_VSI, 1), + SXE2_CMD_UPDATE_VSI, + SXE2_CMD_FREE_VSI, + SXE2_CMD_VSI_VLAN_FILTER, + SXE2_CMD_VSI_LOOPBACK, + SXE2_CMD_VSI_SPOOFCHK, + SXE2_CMD_VSI_SRC_PRUNE, + SXE2_CMD_VSI_MDD_CHECK, + SXE2_CMD_VSI_VF_QUEUE_SET, + SXE2_CMD_VSI_VF_QUEUE_CLEAR, + + SXE2_CMD_TXQ_CFG_AND_ENABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 1), + SXE2_CMD_RXQ_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 2), + SXE2_CMD_RX_FB = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 3), + SXE2_CMD_TXQ_DISABLE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 4), + SXE2_CMD_TXQ_STATE = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE, 5), + + SXE2_CMD_CFG_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_CFG, 1), + SXE2_CMD_CFG_UPDATE, + SXE2_CMD_DP_DLD_PRE, + SXE2_CMD_DP_DLD_PROC, + SXE2_CMD_DP_DLD_DONE, + SXE2_CMD_DP_DLD_STATE, + + SXE2_CMD_SWITCH_RULE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_SWITCH, 1), + SXE2_CMD_SWITCH_RULE_DEL, + SXE2_CMD_SWITCH_RULE_UPDATE, + SXE2_CMD_SWITCH_RULE_GET, + SXE2_CMD_SWITCH_VSI_LIST_ADD, + SXE2_CMD_SWITCH_VSI_LIST_DEL, + SXE2_CMD_SWITCH_VSI_LIST_GET, + SXE2_CMD_SWITCH_VSI_LIST_UPDATE, + SXE2_CMD_SWITCH_LARGE_ACTION_CFG, + SXE2_CMD_SWITCH_RULE_CPX_ADD, + SXE2_CMD_SWITCH_RULE_CPX_DEL, + SXE2_CMD_SWITCH_RULE_CPX_UPDATE, + SXE2_CMD_SWITCH_RULE_CPX_GET, + SXE2_CMD_SWITCH_TRACE_TRIGGER, + SXE2_CMD_SWITCH_TRACE_RECORDER, + SXE2_CMD_HW_DFX_SHOW, + SXE2_CMD_SWITCH_RECIPE_GET, + SXE2_CMD_SWITCH_PROFILE_RECIPE_MAP_GET, + SXE2_CMD_SWITCH_SHARE_ID_GET, + SXE2_CMD_SWITCH_DFX_IRQ, + + SXE2_CMD_PARSE_RULE = SXE2_MK_CMD(SXE2_CMD_MODULE_RULE, 1), + SXE2_CMD_UDP_TUNNEL_PORT, + + SXE2_CMD_EVENT_SUBSCRIBE = SXE2_MK_CMD(SXE2_CMD_MODULE_EVENT, 1), + SXE2_CMD_EVENT_UNSUBSCRIBE, + SXE2_CMD_EVENT_FW_LOG_ACK, + + SXE2_CMD_MBX_TO_PF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 1), + SXE2_CMD_MBX_TO_VF = + SXE2_MK_CMD(SXE2_CMD_MODULE_MBX, 2), + + SXE2_CMD_TXSCHED_CAP_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 1), + SXE2_CMD_TXSCHED_DFLT_TOPO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 2), + SXE2_CMD_TX_SCHED_NODE_INFO_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 3), + SXE2_CMD_TX_SCHED_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 4), + SXE2_CMD_TX_SCHED_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 5), + SXE2_CMD_TX_SCHED_NODE_SUSPEND = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 6), + SXE2_CMD_TX_SCHED_NODE_RESUME = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 7), + SXE2_CMD_TX_SCHED_LEAF_NODE_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 10), + SXE2_CMD_TX_SCHED_LEAF_NODE_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 11), + SXE2_CMD_TX_SCHED_NODE_RL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 12), + SXE2_CMD_TX_SCHED_Q_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 13), + SXE2_CMD_TX_SCHED_Q_STOP = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 14), + SXE2_CMD_TX_SCHED_ETS_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 15), + SXE2_CMD_TX_SCHED_LEAF_NODE_MOVE = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 16), + SXE2_CMD_TX_SCHED_QSET_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 17), + SXE2_CMD_TX_SCHED_QSET_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 18), + SXE2_CMD_TX_SCHED_PRIO_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 19), + SXE2_CMD_TX_SCHED_WEIGHT_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 20), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 21), + SXE2_CMD_TX_SCHED_QUEUE_LEAF_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 22), + SXE2_CMD_TX_SCHED_NODE_SRL_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 23), + SXE2_CMD_TX_SCHED_PROFILE_RL_PRE_QUERY = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 24), + SXE2_CMD_TX_SCHED_PROFILE_SRL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 25), + SXE2_CMD_TX_SCHED_PROFILE_SRL_DEL = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 26), + SXE2_CMD_TX_SCHED_PROFILE_SRL_UPD = SXE2_MK_CMD(SXE2_CMD_MODULE_TXSCHED, 27), + + SXE2_CMD_GET_PF_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 1), + SXE2_CMD_GET_VSI_STATS = SXE2_MK_CMD(SXE2_CMD_MODULE_STATS, 2), + SXE2_CMD_GET_PPE_DFX, + + SXE2_CMD_OPT_EEP = SXE2_MK_CMD(SXE2_CMD_MODULE_OPT, 1), + + SXE2_CMD_RSS_VSI_HCTRL_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_RSS, 1), + SXE2_CMD_RSS_LUT_SET, + SXE2_CMD_RSS_LUT_GET, + SXE2_CMD_RSS_HKEY_SET, + SXE2_CMD_RSS_HKEY_GET, + SXE2_CMD_RSS_SYMM_FV_SET, + SXE2_CMD_RSS_TRACE_TRIGGER, + SXE2_CMD_RSS_TRACE_RECORDER, + + SXE2_CMD_LED_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_LED, 1), + + SXE2_CMD_OG_CFG_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_OG, 1), + SXE2_CMD_OG_TCAM_ENTRY_ALLOC, + SXE2_CMD_OG_TCAM_ENTRY_FREE, + SXE2_CMD_OG_TCAM_ENTRY_BATCH, + SXE2_CMD_OG_PROF_ID_ALLOC, + SXE2_CMD_OG_PROF_ID_FREE, + SXE2_CMD_OG_MASK_SEL_UPDATE, + + SXE2_CMD_RDMA_QP_ATTACH_MC = SXE2_MK_CMD(SXE2_CMD_MODULE_RDMA, 1), + SXE2_CMD_RDMA_QP_DETACH_MC, + SXE2_CMD_RDMA_QET_BIND_TC, + SXE2_CMD_RDMA_PF_FUNC_TABLE_INIT, + SXE2_CMD_RDMA_DESTROY_CC_QP, + SXE2_CMD_RDMA_GET_CC_QP_DFX, + SXE2_CMD_RDMA_NOTIFY_STATUS, + + SXE2_CMD_IPSEC_GET_CAPA = SXE2_MK_CMD(SXE2_CMD_MODULE_IPSEC, 1), + SXE2_CMD_IPSEC_TXSA_ADD, + SXE2_CMD_IPSEC_TXSA_DEL, + SXE2_CMD_IPSEC_TXSA_SET, + SXE2_CMD_IPSEC_RXSA_ADD, + SXE2_CMD_IPSEC_RXSA_DEL, + SXE2_CMD_IPSEC_RXSA_SET, + SXE2_CMD_IPSEC_STATS_SHOW, + SXE2_CMD_IPSEC_TXRX_SWITCH, + SXE2_CMD_IPSEC_DRV_CLEAR, + + SXE2_CMD_FNAV_FILTER_UPDATE = SXE2_MK_CMD(SXE2_CMD_MODULE_FNAV, 1), + SXE2_CMD_FNAV_TRACE_TRIGGER, + SXE2_CMD_FNAV_TRACE_RECORDER, + SXE2_CMD_FNAV_HW_STS, + SXE2_CMD_FNAV_HW_CLEAR, + SXE2_CMD_RXFT_PPE_INFO, + SXE2_CMD_VF_FNAV_FILTER_CLEAR, + SXE2_CMD_FNAV_STATS_GET, + SXE2_CMD_FNAV_DFLT_COMP_QIDX_SET, + SXE2_CMD_FNAV_SPACE_CNT_GET, + SXE2_CMD_FNAV_MATCH_GET_BATCH, + + SXE2_PXE_CTRL = SXE2_MK_CMD(SXE2_CMD_MODULE_PXE, 1), + SXE2_UEFI_PRIV_DATA_SET, + SXE2_UEFI_PRIV_DATA_GET, + SXE2_UEFI_SOCINFO_GET, + + SXE2_CMD_QOS_MODE_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_DCB, 1), + SXE2_CMD_QOS_MODE_GET, + SXE2_CMD_LINK_FLOW_CONTROL_GET, + SXE2_CMD_LINK_FLOW_CONTROL_SET, + + SXE2_CMD_LLDP_MIB_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_LLDP, 1), + SXE2_CMD_LLDP_MIB_GET, + SXE2_CMD_LLDP_MIB_NOTIFY, + SXE2_CMD_LLDP_DCBX_FW_AGENT_SET, + SXE2_CMD_LLDP_DCBX_FW_AGENT_GET, + SXE2_CMD_LLDP_FW_STATS, + SXE2_CMD_LLDP_REMOTE_MIBS_INFO, + SXE2_CMD_LLDP_REMOTE_MIBS_DUMP, + SXE2_CMD_LLDP_FW_AGENT_SET, + SXE2_CMD_LLDP_FW_AGENT_GET, + + SXE2_CMD_PTP_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_PTP, 1), + SXE2_CMD_PTP_RX_MODE_SET, + SXE2_CMD_PTP_SEM_CLEAN, + + SXE2_CMD_MAC_ADDR_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_MACADDR, 1), + SXE2_CMD_MAC_ADDR_SET, + SXE2_CMD_MAC_MTU_SET, + + SXE2_CMD_MACSEC_TXSC_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_MACSEC, 1), + SXE2_CMD_MACSEC_TXSA_CFG, + SXE2_CMD_MACSEC_RXSC_CFG, + SXE2_CMD_MACSEC_RXSA_CFG, + SXE2_CMD_MACSEC_FIX_CFG, + + SXE2_CMD_FW_DOWNLOAD = SXE2_MK_CMD(SXE2_CMD_MODULE_UPGRADE, 1), + SXE2_CMD_FW_DOWNLOAD_PRE, + SXE2_CMD_FW_DOWNLOAD_OPEN, + SXE2_CMD_FW_DOWNLOAD_FLASH, + SXE2_CMD_FW_DOWNLOAD_CLOSE, + SXE2_CMD_FW_DOWNLOAD_END, + + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_QUEUE_STATS_MAP, 1), + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_POOL_SET, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_GET, + SXE2_CMD_TXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXLAN_QUEUE_STATS_MAP_INFO_CLEAR, + SXE2_CMD_RXQUEUE_STATS_MAP_RES_REL, + SXE2_CMD_TXQUEUE_STATS_MAP_RES_REL, + + SXE2_CMD_ETHTOOL_LOOPBACK_SET = SXE2_MK_CMD(SXE2_CMD_MODULE_ETHTOOL, 1), + + SXE2_CMD_FLM_INIT = SXE2_MK_CMD(SXE2_CMD_MODULE_FLM, 1), + SXE2_CMD_FLM_LINK_UP, + SXE2_CMD_FLM_LINK_DOWN, + SXE2_CMD_FLM_FEC_GET, + SXE2_CMD_FLM_FEC_SET, + SXE2_CMD_FLM_AN_SET, + SXE2_CMD_FLM_LINK_INFO_SET, + SXE2_CMD_FLM_LINK_INFO_GET, + SXE2_CMD_FLM_LINK_STATUS_SET, + SXE2_CMD_FLM_LINK_STATUS_SYNC, + SXE2_CMD_TEST_LINK_STATUS, + SXE2_CMD_FLM_LINK_UP_DOWN_SET, + + SXE2_CMD_SFP_WHITE_CFG = SXE2_MK_CMD(SXE2_CMD_MODULE_SFP, 1), + SXE2_CMD_SFP_TX_FAULT_CFG, + SXE2_CMD_SFP_SET_FEC_CFG, + SXE2_CMD_SFP_GET_FEC_CFG, + SXE2_CMD_SFP_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_GET_LINKINFO_CFG, + SXE2_CMD_ETHTOOL_SET_LINKINFO_CFG, + SXE2_CMD_PERSIST_GET_LINK_CFG, + SXE2_CMD_SUPPORT_SPEED_GET_CFG, + SXE2_CMD_CURRENT_SPEED_STATUS_GET_CFG, + SXE2_CMD_CURRENT_SPEED_GET_CFG, + SXE2_CMD_SFP_SET_LINK_CFG, + SXE2_CMD_GET_LINKST_CFG, + SXE2_CMD_GET_VENDOR_INFO_CHECK_WARNING, + SXE2_CMD_GET_OPT_DATA_INFO, + + SXE2_CMD_CLI_READ_REG = SXE2_MK_CMD(SXE2_CMD_MODULE_RWREG, 1), + SXE2_CMD_CLI_WRITE_REG, + + SXE2_CMD_UDPTUNNEL_ADD = SXE2_MK_CMD(SXE2_CMD_MODULE_UDPTUNEEL, 1), + SXE2_CMD_UDPTUNNEL_DEL, + SXE2_CMD_UDPTUNNEL_GET, + + SXE2_CMD_BFD_INTRQ_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_BFD, 1), + SXE2_CMD_BFD_CAPA_GET, + SXE2_CMD_BFD_CFG_SET, + SXE2_CMD_BFD_SESS_CFG_SET, + SXE2_CMD_BFD_SESS_STATE_GET, + SXE2_CMD_BFD_PACK_PROF_SET, + SXE2_CMD_BFD_FLOW_RULE_SET, + SXE2_CMD_BFD_KEYLEN_SET, + + SXE2_CMD_NCD_CORE_NUM = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD, 1), + SXE2_CMD_NCD_CORE_FS_QUEUE_SET, + SXE2_CMD_NCD_CORE_FS_QUEUE_GET, + + SXE2_CMD_NCD_UDF_CAPA_GET = SXE2_MK_CMD(SXE2_CMD_MODULE_NCD_UDF, 1), + + SXE2_CMD_NCD_SWITCH_TAG_EN, + SXE2_CMD_NCD_SWITCH_TAG_SET, + SXE2_CMD_NCD_SWITCH_TAG_GET, + SXE2_CMD_NCD_TXLEN_ADJ_SET, + SXE2_CMD_NCD_TXLEN_ADJ_GET, + + SXE2_CMD_NCD_SDF_EN, + SXE2_CMD_NCD_SDF_SET, + SXE2_CMD_NCD_SDF_GET, + + SXE2_CMD_NCD_SDN_UDP_ADD, + SXE2_CMD_NCD_SDN_UDP_DEL, + SXE2_CMD_NCD_SDN_UDP_QUERY, + SXE2_CMD_NCD_SDN_ADD, + SXE2_CMD_NCD_SDN_DEL, + SXE2_CMD_NCD_SDN_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_QUERY, + SXE2_CMD_NCD_SDN_IPSEC_UDP_ADD, + SXE2_CMD_NCD_SDN_IPSEC_UDP_DEL, + + SXE2_CMD_NCD_APP_PORT_SET, + + SXE2_CMD_NCD_PKT_PRI_SET, + + SXE2_CMD_ACL_LUT_ALLOC = SXE2_MK_CMD(SXE2_CMD_MODULE_ACL, 1), + SXE2_CMD_ACL_LUT_DEALLOC, + SXE2_CMD_ACL_PROF_SEL_BASE_SET, + SXE2_CMD_ACL_SCEN_ALLOC, + SXE2_CMD_ACL_SCEN_DEALLOC, + SXE2_CMD_ACL_LUT_ENTRY_SET, + SXE2_CMD_ACL_ACT_ENTRY_SET, + SXE2_CMD_ACL_TRACE_TRIGGER, + SXE2_CMD_ACL_TRACE_RECORDER, + SXE2_CMD_ACL_DFX_INFO_GET, + + SXE2_CMD_MAX = 0xFFFF, +}; + +enum sxe2_drv_event_code { + SXE2_EVENT_CODE_INVAL = 0, + SXE2_EVENT_CODE_AUTO_LOG, + SXE2_EVENT_CODE_MIB_NOTIFY, + SXE2_EVENT_CODE_SFP_WHITE_LIST, + SXE2_EVENT_CODE_SFP_TX_FAULT, + SXE2_EVENT_CODE_QSFP_TX_FAULT_COUNT, + SXE2_EVENT_CODE_LLDP_AGENT_NOTIFY, + + SXE2_EVENT_CODE_MAX, + SXE2_EVENT_CODE_ALL = 255, +}; + +enum sxe2_desc_err_code { + SXE2_CMD_DESC_ERR_NONE = 0, + SXE2_CMD_DESC_ERR_DES_ERR, + SXE2_CMD_DESC_ERR_BUF_ERR, + SXE2_CMD_DESC_ERR_BUF_NUM_ERR, + SXE2_CMD_DESC_ERR_SRC_BUSY, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK, + SXE2_CMD_DESC_ERR_DATA_LEN_LACK2, + SXE2_CMD_DESC_ERR_SESSION_BUFFER_OV, + SXE2_CMD_DESC_ERR_CMD_BUFFER_OV, + SXE2_CMD_DESC_ERR_IN_OUT_LEN_LACK, + SXE2_CMD_DESC_ERR_UNKNOW_OPCODE, + SXE2_CMD_DESC_ERR_UNKNOW_CMD_TYPE, + SXE2_CMD_DESC_ERR_ADMINQ_STATE, + SXE2_CMD_DESC_ERR_FIND_JOB, + SXE2_CMD_DESC_ERR_NONE_START, + SXE2_CMD_DESC_ERR_JOB_DELIVERY, + SXE2_CMD_DESC_ERR_PF_FLR, + SXE2_CMD_DESC_ERR_OVER_FLOW, + SXE2_CMD_DESC_ERR_SEQ_ERR, + SXE2_CMD_DESC_ERR_NR, +}; + +enum sxe2_cmd_drv_err_code { + SXE2_CMD_DRV_SUCCESS = 0, + SXE2_CMD_DRV_HW_OP_ERR = 1024, + + SXE2_CMD_DRV_NO_FREE_VSI, + SXE2_CMD_DUMP_LOG_FAILED, + + SXE2_CMD_DRV_RXQ_CFG_FAIL, + SXE2_CMD_DRV_TXQ_EN_FAIL, + SXE2_CMD_DRV_TXQ_DISA_FAIL, + + SXE2_CMD_DRV_PFR_FAILED, + SXE2_CMD_DRV_VFR_FAILED, + SXE2_CMD_DRV_PARAM_INVALID, + SXE2_CMD_DRV_HW_RETURN, + SXE2_CMD_DRV_HW_TIMEOUT, + SXE2_CMD_DRV_HW_MISMATCH, + SXE2_CMD_DRV_HW_NOSPC, + SXE2_CMD_DRV_HW_EXIST, + SXE2_CMD_DRV_HW_HID_EXIST, + SXE2_CMD_DRV_HW_NOENT, + SXE2_CMD_DRV_FW_NOMEM, + SXE2_CMD_DRV_HW_NO_RES, + SXE2_CMD_DRV_TLV_ERROR, + SXE2_CMD_DRV_DCB_ERROR, + SXE2_CMD_DRV_LINK_REBUILD_FAILED, + + SXE2_CMD_DRV_UNSUPPORT, + SXE2_CMD_DRV_TXSCHED_CFG_FAILED, + SXE2_CMD_DRV_TXSCHED_TIMEOUT, + SXE2_CMD_DRV_TXSCHED_TEID_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_CHILDIDX_ALLOC_FAILED, + SXE2_CMD_DRV_TXSCHED_ALLOC_FAILED, + + SXE2_CMD_DRV_UDP_TUNNEL_WRONG_PORT, + + SXE2_CMD_DRV_NCD_UNSUPPORT, + SXE2_CMD_DRV_BFD_INTQ_NOP, + SXE2_CMD_DRV_BFD_FLOW_NOSPC, + SXE2_CMD_DRV_BFD_FLOW_HT_COLLISION, + + SXE2_CMD_DRV_LINK_UPDATE_FAILED, + SXE2_OPT_DEV_BUSY, + +}; + +enum sxe2_fwc_mapping_mode { + SXE2_MAPPING_CONTIG = 0, + SXE2_MAPPING_SCATTER, +}; + +enum sxe2_fwc_vsi_type_hw { + SXE2_VSI_HW_T_VF = 0, + SXE2_VSI_HW_T_VMDQ2 = 1, + SXE2_VSI_HW_T_PF = 2, + SXE2_VSI_HW_T_MNG = 3, +}; + +enum sxe2_cmd_buffer_st { + SXE2_CMD_BUFFER_ST_NORMAL = (s16)0, + SXE2_CMD_BUFFER_ST_OVERFLOW, + SXE2_CMD_BUFFER_ST_SEQ_ERR, + SXE2_CMD_BUFFER_ST_NR, +}; + +enum sxe2_cmd_queue_stats_map_add { + SXE2_CMD_QUEUE_STATS_MAP_ADD_SUCCEED = 0, + SXE2_CMD_QUEUE_STATS_MAP_ADD_FAIL = 1, +}; + +enum sxe2_weight_type { + SXE2_UNKNOWN_TYPE = 0, + SXE2_CIR_WEIGHT, + SXE2_PIR_WEIGHT, +}; + +struct sxe2_cmd_desc { + __le16 flags; + __le16 opcode; + __le16 data_len; + __le16 ret; + u8 checksum; + u8 rsvd[3]; + __le32 custom1; + __le32 custom2; + __le32 custom3; + __le32 buf_addr_h; + __le32 buf_addr_l; +}; + +#define SXE2_CMD_HDR_MULTI_END BIT(6) +#define SXE2_CMD_HDR_MULTI_START BIT(7) +#define SXE2_CMD_HDR_MULTI_CMD_ID_MASK 0x3F +struct sxe2_cmd_hdr { + __le32 magic_code; + __le16 tran_in_len; + __le16 tran_out_len; + __le16 hdr_len; + u8 cmd_type; + u8 multi_packet; + + __le64 trace_id; + __le64 session_id; + __le32 ret; + __le32 timeout; + u8 no_resp; + u8 resv1; + __le16 cur_in_len; + u8 resv[24]; + u8 body[]; +}; + +struct sxe2_drv_msg_hdr { + __le32 op_code; + __le32 err_code; + __le32 data_offset; + __le32 data_len; + __le16 vf_id; + u8 mac_id; + u8 mac_id_valid; + u8 resv[12]; + u8 body[]; +}; + +struct sxe2_channel_handshake_req { + __le32 drv_ver; + u8 drv_mode; + u8 resv[3]; + __le64 timestamp; +}; + +struct sxe2_channel_handshake_resp { + __le32 fw_ver; +}; + +struct sxe2_fwc_serial_num_resp { + u8 serial_num[SXE2_SERIAL_NUM_LEN]; +}; + +struct sxe2_fwc_drv_mode_resp { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_drv_mode_req { + u8 drv_mode; + u8 reserve[3]; +}; + +struct sxe2_fwc_vf_caps { + __le16 cnt; + __le16 base_idx; + u8 sriov_cap; + u8 resv[27]; +}; + +struct sxe2_fwc_queue_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_msix_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_vsi_caps { + __le16 cnt; + __le16 base_idx; + u8 resv[28]; +}; + +struct sxe2_fwc_ppe_caps { + __le16 rss_lut_size; + __le16 fnav_space_bsize; + __le16 fnav_space_gsize; + __le16 fnav_counter_base; + __le16 fnav_counter_num; + __le16 bfd_sess_size; + __le16 rss_global_lut_base; + __le16 rss_global_lut_num; + u8 resv[16]; +}; + +struct sxe2_dev_common_caps { + u8 rdma_support; + u8 ipsec_support; + u8 macsec_support; + u8 rss_support; + u8 fnav_support; + u8 acl_support; + u8 switch_support; + u8 bfd_support; + u8 sdn_support; + u8 sdf_support; + u8 core_fs_support; + u8 switch_tag_support; + u8 mac_ts_support; + u8 resv[19]; +}; + +struct sxe2_fwc_dev_caps { + u8 pf_cnt; + u8 port_cnt; + __le16 vf_cnt; + struct sxe2_dev_common_caps dev_common_caps; + u8 pad[92]; +}; + +struct sxe2_common_caps { + u8 vmdq_support; + u8 ptp_owner; + u8 resv[30]; +}; + +struct sxe2_fwc_func_caps { + struct sxe2_fwc_vf_caps vf_caps; + struct sxe2_fwc_queue_caps tx_caps; + struct sxe2_fwc_queue_caps rx_caps; + struct sxe2_fwc_msix_caps msix_caps; + struct sxe2_fwc_vsi_caps vsi_caps; + struct sxe2_fwc_ppe_caps ppe_caps; + struct sxe2_common_caps common_caps; + u8 pf_idx; + u8 port_idx; + u8 mode; + u8 resv; +}; + +struct sxe2_fwc_sw_cfg_entry { + __le16 type; + __le16 idx; + __le16 sw_id; + __le16 pf_vf_id; + u8 resv[8]; +}; + +struct sxe2_fwc_phy_port_info { + u8 mac_to_phy_port[SXE2_MAC_NUM]; +}; + +struct sxe2_fwc_sw_cfg { + __le16 count; + __le16 remain; + struct sxe2_fwc_sw_cfg_entry caps_entry[]; +}; + +struct sxe2_fwc_tc_rxq_info { + __le16 pow; + __le16 offset; +}; + +struct sxe2_fwc_vsi_q_info { + u8 mapping_mode; + u8 resv[7]; + __le16 cnt; + u8 resv1[6]; + union { + __le16 base_idx; + __le16 q_id[SXE2_VSI_SCATTER_TXRX_Q_MAX_CNT]; + }; + + struct sxe2_fwc_tc_rxq_info tc_q_map[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_fnav_info { + u8 fnav_enable; + u8 auto_evict; + u8 prog_enable; + u8 rsv0[1]; + __le16 gsize; + __le16 bsize; +}; + +struct sxe2_fwc_vsi_props { + u8 rxq_valid; + u8 rsv[31]; + struct sxe2_fwc_vsi_q_info rxq_info; + struct sxe2_fwc_vsi_q_info txq_info; + struct sxe2_fwc_vsi_fnav_info fnav_info; + +}; + +struct sxe2_fwc_vsi_crud_info { + __le16 vsi_id; + __le16 vf_id; + u8 type; + u8 is_clear; + u8 resv[10]; + struct sxe2_fwc_vsi_props props; +}; + +struct sxe2_fwc_vsi_crud_resp { + __le16 vsi_id; + u8 resv[14]; +}; + +struct sxe2_fwc_ena_txq_entry { + __le16 q_id; + u8 resv[2]; + u8 txq_ctxt[SXE2_TXQ_CTXT_LEN]; +}; + +struct sxe2_fwc_ena_txqs { + __le16 cnt; + u8 resv[14]; + struct sxe2_fwc_ena_txq_entry txq[]; +}; + +struct sxe2_fwc_dis_txqs { + __le16 cnt; + u8 resv[2]; + __le16 q_id[]; +}; + +#define SXE2_TXSCHED_PROFIDX_INVALID U16_MAX +#define SXE2_TXSCHED_TEID_INVALID 0x7FFF +#define SXE2_TXSHCED_HW_DEFT_LAYER 2 +#define SXE2_TXSCHED_NODE_CHILD_MAX 8 + +#define SXE2_TXSCHED_MIN_BW 500 +#define SXE2_TXSCHED_MAX_BW 100000000 +#define SXE2_TXSCHED_BW_50G 50000000 +#define SXE2_TXSCHED_BW_25G 25000000 +#define SXE2_TXSCHED_BW_10G 10000000 + +#define SXE2_TXSCHED_DFLT_BW 0xFFFFFFFF +#define SXE2_TXSCHED_CLK_FREQ 500000000 +#define SXE2_TXSCHED_ARB_CREDIT_TOTAL 32768 +#define SXE2_TXSCHED_ARB_CREDIT_UNIT 328 +#define SXE2_TXSCHED_ARB_CREDIT_DFLT SXE2_TXSCHED_ARB_CREDIT_TOTAL + +#define SXE2_NODE_RL_TYPE_CIR BIT(0) +#define SXE2_NODE_RL_TYPE_EIR BIT(1) +#define SXE2_NODE_RL_TYPE_SRL BIT(2) + +#define SXE2_NODE_ARB_MODE_BPS 0 +#define SXE2_NODE_ARB_MODE_PPS 1 + +#define SXE2_NODE_STATUS_ENABLE 0x0 +#define SXE2_NODE_STATUS_SUSPEND 0x1 + +#define SXE2_TXSCHED_DFLT_RL_PROF_ID 0 + +enum sxe2_txsched_node_owner { + SXE2_TXSCHED_NODE_OWNER_LAN = 0, + SXE2_TXSCHED_NODE_OWNER_RDMA, + SXE2_TXSCHED_NODE_OWNER_USER, + SXE2_TXSCHED_NODE_OWNER_UNKOWN, +}; + +enum sxe2_txsched_hw_layer { + SXE2_TXSCHED_HW_LAYER_UNDEFINED = 0, + SXE2_TXSCHED_HW_LAYER_PORT, + SXE2_TXSCHED_HW_LAYER_TC, + SXE2_TXSCHED_HW_LAYER_SW_ENTRY, + SXE2_TXSCHED_HW_LAYER_4, + SXE2_TXSCHED_HW_LAYER_5, + SXE2_TXSCHED_HW_LAYER_6, + SXE2_TXSCHED_HW_LAYER_7, +}; + +struct sxe2_txsched_generic_props { + u8 layer_max; + + __le32 clk_freq; +}; + +struct sxe2_txsched_layer_props { + u8 hw_layer; + __le16 max_rl_cir_prof; + __le16 max_rl_pir_prof; + __le16 max_rl_srl_prof; +}; + +struct sxe2_fwc_txsched_cap_resp { + struct sxe2_txsched_generic_props generic; + + struct sxe2_txsched_layer_props layer[SXE2_TXSCHED_HW_LAYER_7]; +}; + +struct scbge_txsched_node_bw { + __le32 bw; + __le32 prof_id; + __le16 weight; + __le16 rsv; +}; + +struct sxe2_txsched_node_props { + u8 prio; + u8 status; + u8 arb_mode; + u8 rl_type; + enum sxe2_txsched_hw_layer hw_layer; + struct scbge_txsched_node_bw cir; + struct scbge_txsched_node_bw srlPir; + u8 adj_lvl; + u8 rsv[3]; +}; + +struct sxe2_txsched_node_info { + __le16 parent_teid; + __le16 node_teid; + __le32 sibling_idx; + struct sxe2_txsched_node_props data; +}; + +struct sxe2_fwc_txsched_dflt_topo_resp { + struct sxe2_txsched_node_info node_info[SXE2_TXSHCED_HW_DEFT_LAYER]; +}; + +struct sxe2_txsched_topo_upd_hdr { + __le16 parent_teid; + __le16 node_num; + __le16 start_child_idx; + __le16 rsv; +}; + +struct sxe2_fwc_txsched_del_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_move_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + __le16 teid[]; +}; + +struct sxe2_fwc_txsched_query_node_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_txsched_query_node_resp { + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_txsched_pri_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + u8 prio; +}; + +struct sxe2_fwc_txsched_weight_node_cfg_req { + __le16 parent_teid; + __le16 node_teid; + u8 sibling_idx; + __le16 weight; + enum sxe2_weight_type type; +}; + +struct sxe2_fwc_txsched_add_nodes_req { + struct sxe2_txsched_topo_upd_hdr hdr; + struct sxe2_txsched_node_info node[]; +}; + +struct sxe2_fwc_txsched_add_nodes_resp { + __le32 add_node_num; + __le16 node_teid[SXE2_TXSCHED_NODE_CHILD_MAX]; + __le16 sibling_idx[SXE2_TXSCHED_NODE_CHILD_MAX]; +}; + +struct sxe2_fwc_txq_stats_map_pool_get_resp { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_pool_get_resp { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_pool_set_req { + u8 hw_index; + u32 cfg_info; +}; + +struct sxe2_fwc_rxq_stats_map_pool_set_req { + u8 hw_pool_idx; + u32 cfg_info; +}; + +struct sxe2_fwc_txq_stats_map_get_info_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_info_clear_req { + u8 hw_index; +}; + +struct sxe2_fwc_rxq_stats_map_info_clear_req { + u8 hw_pool_idx; +}; + +struct sxe2_fwc_txq_stats_map_get_info_resp { + u32 txq_lan_pkt_cnt; + u32 txq_lan_byte_cnt; +}; + +struct sxe2_fwc_rxq_stats_map_get_info_resp { + u64 rxq_lan_in_pkt_cnt; + u64 rxq_lan_in_byte_cnt; + + u64 rxq_fd_in_pkt_cnt; + + u64 rxq_mng_in_pkt_cnt; + u64 rxq_mng_in_byte_cnt; + u64 rxq_mng_out_pkt_cnt; +}; + +struct sxe2_fwc_rxlan_rxq_stats_map_get_info_resp { + u64 rxq_lan_out_pkt_cnt; + u64 rxq_lan_out_byte_cnt; +}; + +struct sxe2_txq_ctxt { + + __le16 q_idx_in_nic; + u8 rsv[2]; + + __le64 base_addr; + + __le16 cgd_idx; + __le16 vmvf_idx; + u8 port_idx; + u8 pf_idx; + u8 vmvf_type; + u8 tsyn_enable; + u8 alt_vlan; + u8 wb_mode; + u8 itr_notify_mode; + u8 legacy_enable; + u8 adv_sso; + u8 rsv1[3]; + + __le16 src_vsi; + __le16 cpuid; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; + u8 rsv2; + + __le16 q_idx_in_func; + u8 rd_desc_ro; + u8 wb_desc_ro; + __le32 qlen; + u8 ptp_en; + u8 rsv3[3]; + + u8 quanta_prof_idx; + + u8 is_tm; + u8 rsv4[2]; +}; + +struct sxe2_txsched_add_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + struct sxe2_txsched_node_info node; +}; + +struct sxe2_fwc_cfg_txq_req { + struct sxe2_txq_ctxt ctxt; + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_st_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; +}; + +struct sxe2_fwc_st_txq_resp { + u8 state; +}; + +struct sxe2_fwc_add_qset_req { + struct sxe2_txsched_add_leaf_req leaf; +}; + +struct sxe2_fwc_add_qset_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_fwc_cfg_txq_resp { + __le16 node_teid; + u8 sibling_idx; +}; + +struct sxe2_txsched_del_leaf_req { + u8 port; + u8 tc; + __le16 txq_idx_in_dev; + __le16 parent_teid; + __le16 sibling_idx; + __le16 node_teid; + __le16 rsv; +}; + +struct sxe2_txsched_rl_profile_pre_query_req { + u8 hw_layer; + u8 prof_type; + __le16 rsv; + u32 bw; +}; + +struct sxe2_txsched_rl_profile_pre_query_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_req { + u8 hw_layer; + u8 prof_type; + __le16 orig_prof_id; + + u32 bw; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_rl_resp { + u8 hw_layer; + u8 prof_type; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_req { + u8 hw_layer; + u8 rsv; + __le16 prof_id; + u32 bw; +}; + +struct sxe2_txsched_cfg_profile_srl_resp { + __le16 prof_id; + __le16 rsv; +}; + +struct sxe2_txsched_cfg_node_srl_req { + u8 hw_layer; + u8 attach; + __le16 prof_id; + __le16 teid; + __le16 rsv; +}; + +struct sxe2_txsched_tc_node { + __le16 teid; + __le16 parent_teid; + __le16 silbing_idx; + __le16 rsv; +}; + +struct sxe2_txsched_ets_query_rep { + u8 tc_cnt; + u8 rsv[3]; +}; + +struct sxe2_txsched_ets_query_resp { + u8 tc_cnt; + struct sxe2_txsched_tc_node tc_node[SXE2_TC_MAX_CNT]; +}; + +struct sxe2_fwc_del_qset_req { + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_disable_txq_req { + __le16 txq_idx_in_func; + __le16 txq_idx_in_nic; + struct sxe2_txsched_del_leaf_req leaf; +}; + +struct sxe2_fwc_txsched_suspend_node_req { + u8 port; + u8 tc; + __le16 node_teid; + __le16 parent_teid; + u8 child_idx; +}; + +struct sxe2_fwc_txsched_resume_node_req { + u8 port; + u8 tc; + struct sxe2_txsched_node_info node; +}; + +enum sxe2QosMode { + SXE2_QOS_MODE_VLAN = 0, + SXE2_QOS_MODE_DSCP, +}; + +enum sxe2_block_id { + SXE2_HW_BLOCK_ID_SWITCH = 0x1, + SXE2_HW_BLOCK_ID_ACL, + SXE2_HW_BLOCK_ID_RSS, + SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_BFD = SXE2_HW_BLOCK_ID_FNAV, + SXE2_HW_BLOCK_ID_PE, + SXE2_HW_BLOCK_ID_MAX +}; + +enum sxe2_class_id { + SXE2_XLT0_CLASS_ID = 0x1, + SXE2_XLT2_CLASS_ID, + SXE2_EXTRACTOR_CLASS_ID, + SXE2_MAP_CLASS_ID, + SXE2_TCAM_CLASS_ID, + SXE2_RECIPE_CLASS_ID, +}; + +#define SXE2_CFG_ID(block_id, class_id) ((block_id) << 16 | (class_id)) +#define SXE2_CFG_CLASS_ID_MASK (0xFFFF) + +#define SXE2_CFG_GROUP_SIZE SXE2_DRV_CMD_MAX_MSG_SIZE + +enum { + SXE2_SWITCH_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_SWITCH, SXE2_XLT0_CLASS_ID), + SXE2_SWITCH_XLT2_CLASS_ID, + SXE2_SWITCH_EXTRACTOR_CLASS_ID, + SXE2_SWITCH_MAP_CLASS_ID, + SXE2_SWITCH_TCAM_CLASS_ID, + + SXE2_ACL_XLT0_CLASS_ID = SXE2_CFG_ID(SXE2_HW_BLOCK_ID_ACL, SXE2_XLT0_CLASS_ID), +}; + +enum sxe2_udp_tunnel_protocol { + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN = 0, + SXE2_UDP_TUNNEL_PROTOCOL_VXLAN_GPE, + SXE2_UDP_TUNNEL_PROTOCOL_GENEVE, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_C = 4, + SXE2_UDP_TUNNEL_PROTOCOL_GTP_U, + SXE2_UDP_TUNNEL_PROTOCOL_PFCP, + SXE2_UDP_TUNNEL_PROTOCOL_ECPRI, + SXE2_UDP_TUNNEL_PROTOCOL_MPLS, + SXE2_UDP_TUNNEL_PROTOCOL_NVGRE = 10, + SXE2_UDP_TUNNEL_PROTOCOL_L2TP, + SXE2_UDP_TUNNEL_PROTOCOL_TEREDO, + SXE2_UDP_TUNNEL_MAX, +}; + +struct sxe2_cfg_group_hdr { + __le16 class_cnt; + __le16 size; +}; + +struct sxe2_cfg_class { + __le32 class_id; + __le16 offset; + __le16 size; +}; + +struct sxe2_pipeline_group { + struct sxe2_cfg_group_hdr hdr; + struct sxe2_cfg_class class[]; +}; + +struct sxe2_es_fv { + u8 prot_id; + u8 rsv; + __le16 off; +}; + +struct sxe2_vsi_hw_stats { + __le64 rx_vsi_unicast_packets; + __le64 rx_vsi_bytes; + __le64 tx_vsi_unicast_packets; + __le64 tx_vsi_bytes; + __le64 rx_vsi_multicast_packets; + __le64 tx_vsi_multicast_packets; + __le64 rx_vsi_broadcast_packets; + __le64 tx_vsi_broadcast_packets; + __le64 rx_lan_engine_packets; +}; + +struct sxe2_pf_hw_stats { + + __le64 tx_frame_good; + __le64 rx_frame_good; + __le64 rx_crc_errors; + __le64 tx_bytes_good; + __le64 rx_bytes_good; + __le64 tx_multicast_good; + __le64 tx_broadcast_good; + __le64 rx_multicast_good; + __le64 rx_broadcast_good; + __le64 rx_len_errors; + __le64 rx_out_of_range_errors; + __le64 rx_symbol_err; + __le64 rx_pause_frame; + __le64 tx_pause_frame; + + __le64 rx_discards_phy; + __le64 tx_dropped_link_down; + __le64 tx_bytes_good_bad; + __le64 tx_frame_good_bad; + __le64 rx_size_64; + __le64 rx_size_65_127; + __le64 rx_size_128_255; + __le64 rx_size_256_511; + __le64 rx_size_512_1023; + __le64 rx_size_1024_1522; + __le64 rx_size_1523_max; + __le64 rx_illegal_bytes; + __le64 tx_unicast; + __le64 tx_broadcast; + __le64 tx_multicast; + __le64 tx_vlan_packet_good; + __le64 tx_size_64; + __le64 tx_size_65_127; + __le64 tx_size_128_255; + __le64 tx_size_256_511; + __le64 tx_size_512_1023; + __le64 tx_size_1024_1522; + __le64 tx_size_1523_max; + __le64 tx_underflow_error; + __le64 rx_byte_good_bad; + __le64 rx_frame_good_bad; + __le64 rx_unicast_good; + __le64 rx_vlan_packets; + __le64 prio_xoff_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_rx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xoff_tx[SXE2_MAX_USER_PRIORITY]; + __le64 prio_xon_2_xoff[SXE2_MAX_USER_PRIORITY]; + __le64 rx_pause; + __le64 tx_pause; + __le64 rx_undersize_good; + __le64 rx_runt_error; + __le64 rx_oversize_good; + __le64 rx_jabbers; + __le64 rx_oversize_pkts_phy; + + __le64 rx_out_of_buffer; + __le64 rx_qblock_drop; + __le64 rx_discards_ips_phy; + + __le64 rx_pcs_symbol_err_phy; + __le64 rx_corrected_bits_phy; + + __le64 rx_err_lane_0_phy; + __le64 rx_err_lane_1_phy; + __le64 rx_err_lane_2_phy; + __le64 rx_err_lane_3_phy; + __le64 rx_prio_buf_discard[8]; + __le64 fnav_match; + __le64 spoof_mac_packets; + __le64 spoof_vlan_packets; +}; + +#define SXE2_FNAV_INVALID_STATS_IDX (0xFFFF) + +struct sxe2_fwc_pf_stats_req { + __le16 fnav_stats_idx; +}; + +struct sxe2_fwc_pf_stats_resp { + struct sxe2_pf_hw_stats stats; +}; + +struct sxe2_fwc_vsi_stats { + struct sxe2_vsi_hw_stats stats; + __le16 vsi_id; +}; + +struct sxe2_fwc_vsi_stats_req { + __le16 vsi_cnt; + __le16 vsi_ids[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_vsi_stats_resp { + __le16 vsi_cnt; + struct sxe2_fwc_vsi_stats vsi_stats[SXE2_CMD_VSI_STATS_MAX_CNT]; +}; + +struct sxe2_fwc_pxe_req { + u8 ena; +}; + +#define SXE2_EVENT_SUBSCRIBE_MAX_COUNT 32 + +struct sxe2_fwc_event { + u8 count; + u8 rsv[3]; + __le16 code[SXE2_EVENT_SUBSCRIBE_MAX_COUNT]; +}; + +struct sxe2_fwc_fw_log_ack { + __le32 result; +}; + +enum sxe2_default_recipe_id { + SXE2_DEFAULT_RECIPE_MAC = 0, + SXE2_DEFAULT_RECIPE_VLAN, + SXE2_DEFAULT_RECIPE_TX_ETYPE, + SXE2_DEFAULT_RECIPE_RX_ETYPE, + SXE2_DEFAULT_RECIPE_ALLMULTI, + SXE2_DEFAULT_RECIPE_PROMISC, + SXE2_DEFAULT_RECIPE_SRCVSI, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK, + SXE2_DEFAULT_RECIPE_MAC_SPOOFCHK_EXT, + SXE2_DEFAULT_RECIPE_SRCVSI_EXT, + SXE2_DEFAULT_RECIPE_MAX +}; + +union sxe2_switch_full_key_dw0 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv0 : 16; + u32 rid : 6; + u32 rsvd0 : 9; + u32 is_root : 1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 is_root : 1; + u32 rsvd0 : 9; + u32 rid : 6; + u32 fv0 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw1 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv2 : 16; + u32 fv1 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv1 : 16; + u32 fv2 : 16; +#endif + } field; +}; + +union sxe2_switch_full_key_dw2 { + u32 val; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 fv4 : 16; + u32 fv3 : 16; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 fv3: 16; + u32 fv4: 16; +#endif + } field; +}; + +struct sxe2_fwc_switch_rule { + __le16 flag; + __le16 recipe_id; + __le32 act; + __le16 rule_id; + u8 recv[2]; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + u8 add_fkot; + u8 resv2[3]; +}; + +struct sxe2_fwc_switch_vsi_list { + __le16 flag; + __le16 vsi_list_id; + __le16 vsi_cnt; + __le16 vsi[]; +}; + +union sxe2_switch_large_action { + u32 val; + struct { + u32 rsv0 : 8; + u32 valid : 1; + u32 rsv1 : 2; + u32 list : 1; + u32 vsi_list : 10; + u32 fwd_vsi000 : 3; + u32 rsv2 : 8; + } reg; +}; + +struct sxe2_fwc_switch_large_action { + union sxe2_switch_large_action action[SXE2_LARGE_ACTION_COUNT_IN_GROUP]; + __le32 idx; +}; + +struct sxe2_fwc_switch_recipe { + u8 rid :6; + u8 rcp_rsv0 :1; + u8 is_root :1; + u8 lookup_index0 :7; + u8 lookup_index0_valid :1; + u8 lookup_index1 :7; + u8 lookup_index1_valid :1; + u8 lookup_index2 :7; + u8 lookup_index2_valid :1; + u8 lookup_index3 :7; + u8 lookup_index3_valid :1; + u8 lookup_index4 :7; + u8 lookup_index4_valid :1; + u8 join_priority; + u8 priority :3; + u8 need_pass_l2 :1; + u8 allow_pass_l2 :1; + u8 inverse_action :1; + u8 prune_idx :2; + __le32 default_action :19; + __le32 rcp_rsv1 :4; + __le32 default_action_valid:1; + __le32 rcp_rsv2 :8; + __le32 fv4_bitmask :16; + __le32 fv3_bitmask :16; + __le32 fv2_bitmask :16; + __le32 fv1_bitmask :16; + __le32 fv0_bitmask :16; + __le32 rcp_rsv3 :16; + __le16 ref_cnt; +}; + +struct sxe2_fwc_switch_profile_recipe_map { + __le16 profile_id; + __le32 map[2]; +}; + +struct sxe2_fwc_switch_share_id { + __le32 usage; + __le32 share_id[SXE2_MAX_NUM_RECIPES]; + __le32 bitmap[SXE2_MAX_NUM_RECIPES]; +}; + +struct sxe2_fwc_switch_rule_resp { + __le16 index; + u8 resv1[2]; + __le32 act; + __le32 full_key[SXE2_FULLKEY_DWORD_CNT]; + __le16 ref_cnt; + u8 resv2[2]; +}; + +struct sxe2_fwc_switch_vsi_list_resp { + __le16 index; + u8 resv1[2]; + __le32 vsi[SXE2_VSI_LIST_DAT_LEN]; + u8 resv2[4]; +}; + +struct sxe2_fwc_switch_mac_info { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fwc_switch_mac_info_resp { + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2_fw_mtu_info { + __le32 mtu; + u8 is_set_hw; + u8 resv0; + __le16 resv1; +}; + +struct sxe2_fwc_switch_complex_rule { + __le16 flag; + __le32 act; + u8 priority; + u8 resv; + u8 add_fkot; + + __le16 word_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 profile_cnt; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_switch_complex_rule_resp { + __le32 act; + + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_value[SXE2_MAX_CHAIN_WORDS]; + + __le16 recipe_root; + __le16 recipe_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + + __le16 rule_root; + __le16 rule_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_og_trace_rcd { + u8 done; + u8 status; + u8 profile_id; + u8 resv; + __le16 fv[SXE2_SWITCH_FV_CNT]; +}; + +struct sxe2_recp_trace_rcd { + __le16 recipe_id; + u8 ht1_hit; + u8 ht2_hit; + u8 fkot_hit; + u8 kt_hit; + __le16 index; +}; + +struct sxe2_swe_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + struct sxe2_recp_trace_rcd recp[SXE2_PACKET_MAX_RECIPES]; +}; + +struct sxe2_rg_trace_rcd { + u8 done; + u8 status; + u8 resv[2]; + __le32 ppe_info[SXE2_PACKET_INFO_DWORD_CNT]; +}; + +struct sxe2_fwc_switch_trace_req { + u8 is_rx; + u8 resv[3]; +}; + +struct sxe2_fwc_switch_trace_resp { + struct sxe2_og_trace_rcd og; + struct sxe2_swe_trace_rcd swe; + struct sxe2_rg_trace_rcd rg; +}; + +struct sxe2_fwc_recipe_get_req { + __le16 recipe_id; + u8 resv[2]; +}; + +struct sxe2_fwc_recipe_get_resp { + u8 is_root; + u8 priority; + u8 is_inverse; + u8 resv; + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_req { + u8 is_root; + u8 priority; + __le16 profile_cnt; + __le16 recipe_cnt; + __le16 lkup_index[SXE2_MAX_CHAIN_WORDS]; + __le16 lkup_mask[SXE2_MAX_CHAIN_WORDS]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +struct sxe2_fwc_recipe_add_resp { + __le16 recipe_cnt; + __le16 recipe_root; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; +}; + +struct sxe2_fwc_recipe_del_req { + __le16 recipe_cnt; + __le16 profile_cnt; + __le16 recipe_id[SXE2_MAX_CHAIN_RECIPE]; + __le16 profile_id[SXE2_MAX_NUM_PROFILES]; +}; + +enum sxe2_switch_dfx_stats_index { + SXE2_SW_DFX_PROFILE_ID_BYPASS = 0, + SXE2_SW_DFX_PROFILE_TCAM_HIT, + SXE2_SW_DFX_PROFILE_TCAM_MISS, + SXE2_SW_DFX_RX_FB_INPUT, + SXE2_SW_DFX_TX_PA_INPUT, + SXE2_SW_DFX_OG_PROCESS_RX, + SXE2_SW_DFX_OG_PROCESS_TX, + SXE2_SW_DFX_OUTPUT_TO_SWE, + SXE2_SW_DFX_OUTPUT_TO_RG, + SXE2_SW_DFX_MEMORY_HT1_IN, + SXE2_SW_DFX_MEMORY_HT1_OUT, + SXE2_SW_DFX_MEMORY_HT2_IN, + SXE2_SW_DFX_MEMORY_HT2_OUT, + SXE2_SW_DFX_MEMORY_KT_IN, + SXE2_SW_DFX_MEMORY_KT_OUT, + SXE2_SW_DFX_SWE_OG_IN, + SXE2_SW_DFX_SWE_TX_IN, + SXE2_SW_DFX_SWE_RX_IN, + SXE2_SW_DFX_SWE_OUTPUT_ACTION, + SXE2_SW_DFX_PIPE_HASH_MISS, + SXE2_SW_DFX_PIPE_HASH_HIT, + SXE2_SW_DFX_PIPE_KT_HIT, + SXE2_SW_DFX_PIPE_HI1_HIT, + SXE2_SW_DFX_PIPE_HI2_HIT, + SXE2_SW_DFX_PIPE_FKOT_HIT, + SXE2_SW_DFX_PIPE_HW_SEARCH_ERR, + + SXE2_SW_DFX_MAX, +}; + +struct sxe2_fwc_switch_dfx_stats { + __le32 stats[SXE2_SW_DFX_MAX]; +}; + +enum sxe2_ipsec_stats_index { + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC0, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC0, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC0, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC0, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC1, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC1, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC1, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC1, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC2, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC2, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC2, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC2, + + SXE2_IPSEC_STATS_TX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_SOP_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_PKTID_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_OVER_2K_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_TX_DIS_DROP_PKT_CNT_MAC3, + + SXE2_IPSEC_STATS_RX_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NOT_SEC_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_TCAM_NOT_MATCH_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_LEN_ERR_PKT_CNT_MAC3, + SXE2_IPSEC_STATS_RX_OVER_2K_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_OK_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_ICV_FAIL_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_NO_BUFFER_DROP_PKTS_CNT_MAC3, + SXE2_IPSEC_STATS_RX_PKTID_DROP_PKTS_CNT_MAC3, + + SXE2_IPSEC_STATS_MAX, +}; + +struct sxe2_ipsec_stats { + __le64 stats[SXE2_IPSEC_STATS_MAX]; +}; + +struct sxe2_fwc_vsi_vlan_filter { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_src_prune { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_loopback { + __le16 vsi_hw_id; + u8 enable; + u8 resv; +}; + +struct sxe2_fwc_vsi_spoofchk { + __le16 vsi_hw_id; + u8 mac_enable; + u8 vlan_enable; +}; + +struct sxe2_fwc_switch_dfx_irq { + u8 enable; + u8 resv[3]; +}; + +struct sxe2_rss_vsi_hctrl { + __le16 vsi_hw_id; + u8 hash_type; + u8 resv; +}; + +struct sxe2_rss_lut_cfg { + __le16 vsi_hw_id; + u8 lut_type; + u8 global_lut_id; + __le16 lut_size; + u8 resv[2]; + u8 lut[]; +}; + +struct sxe2_rss_core_lut_info { + u8 lut[SXE2_RSS_CORE_LUT_SIZE]; +}; + +struct sxe2_rss_hkey_cfg { + __le16 vsi_hw_id; + u8 resv[2]; + u8 key[]; +}; + +struct sxe2_rss_symm_fv { + u8 fv_idx : 5; + u8 rsv : 2; + u8 valid : 1; +}; + +struct sxe2_rss_symm_fv_cfg { + __le16 prof_id; + u8 rsv[2]; + struct sxe2_rss_symm_fv fv[SXE2_RSS_FV_CNT]; +}; + +struct sxe2_rss_trace_recorder { + u8 trace_status0; + u8 rsv0[3]; + __le32 profile_id0; + __le32 fv[SXE2_RSS_FV_TRACE_CNT]; + u8 trace_status1; + u8 rsv1[3]; + __le32 hash1; + u8 trace_status2; + u8 rsv2[3]; + __le32 hash2; + u8 profile_id2; + u8 bad_profile; + __le16 q_index; + u8 thread_id; + u8 rsv3[1]; + __le16 vsi; +}; + +struct sxe2_fwc_xlt2_entry { + __le16 vsi_hw_idx; + __le16 vsig; +}; + +struct sxe2_fwc_tcam_entry { + __le16 addr; + u8 key[SXE2_TCAM_KEY_LEN]; + u8 prof_id; + u8 rsv; +}; + +struct sxe2_fwc_es_entry { + u8 prof_id; + u8 cnt; + struct sxe2_es_fv fv[SXE2_FV_CNT_MAX]; +}; + +struct sxe2_fwc_prof_section { + u8 type; + u8 rsv[1]; + __le16 offset; + __le16 size; +}; + +struct sxe2_fwc_prof_buf { + __le16 entry_cnt; + __le16 data_end; + struct sxe2_fwc_prof_section sect[]; +}; + +struct sxe2_fwc_prof_pkg { + u8 blk; + u8 buf[]; +}; + +struct sxe2_fwc_tcam_idx { + u8 blk; + u8 rsv; + __le16 tcam_idx; +}; + +enum sxe2_fwc_tcam_action { + SXE2_FWC_TCAM_ACTION_ADD, + SXE2_FWC_TCAM_ACTION_DEL, +}; + +struct sxe2_fwc_tcam_info { + u8 action; + __le16 tcam_idx; +}; + +struct sxe2_fwc_tcam_idx_batch { + u8 blk; + u16 tcam_cnt; + struct sxe2_fwc_tcam_info tcam_info[]; +}; + +struct sxe2_fwc_prof_id { + u8 blk; + u8 rsv; + __le16 prof_id; +}; + +struct sxe2_fwc_mask_idx { + u8 blk; + u8 rsv; + __le16 mask_idx; +}; + +struct sxe2_fwc_mask_val { + u8 blk; + u8 rsv; + __le16 mask_idx; + __le16 fv_idx; + __le16 mask; +}; + +struct sxe2_fwc_mask_sel { + u8 blk; + u8 rsv; + __le16 prof_id; + __le32 mask_sel; +}; + +struct sxe2_fwc_fnav_kt_entry { + u8 is_add; + u8 prof_id; + u8 fdid_prio; + u8 toq_prio; + u8 drop; + u8 stat_ena; + u8 to_queue; + u8 inputset[SXE2_FNAV_INPUT_CNT * 2]; + u8 fd_space; + __le16 ori_vsi; + __le16 vsi; + __le16 flow_id; + __le16 qindex; + __le16 stat_cnt; + u8 rsv1[2]; + __le32 fdid; +}; + +struct sxe2_fwc_fnav_kt_resp { + __le32 gcnt_pf; + __le32 bcnt_global; +}; + +struct sxe2_fnav_hit_info { + + __le32 hit_flg : 4; + __le32 ck1 : 13; + __le32 ht_index1 : 11; + __le32 ck2_3_0 : 4; + + __le32 ck2_12_4 : 9; + __le32 ht_index2 : 11; + __le32 ht1_avl : 8; + __le32 ht2_avl_3_0 : 4; + + __le32 ht2_avl_7_4 : 4; + __le32 kt_index : 15; + __le32 entry_vld : 1; + __le32 qindex : 11; + __le32 stat_0 : 1; + + __le32 stat_13_1 : 13; + __le32 stat_ena : 2; + __le32 evict_ena : 1; + __le32 toqueue : 3; + __le32 toqueue_prio : 3; + __le32 ad_drop : 1; + __le32 fdid_8_0 : 9; + + __le32 fdid_31_9 : 23; + __le32 fdid_prio : 3; + __le32 flow_id_5_0 : 6; + + __le32 flow_id_15_6 : 10; + __le32 ad_fd_vsi : 10; + __le32 gl_space : 1; + __le32 pf_space : 1; + __le32 vsi_space : 1; + __le32 ad2 : 4; + __le32 inset_511_507 : 5; + + __le32 inset_506_475; + + __le32 inset_474_443; + + __le32 inset_442_411; + + __le32 inset_410_379; + + __le32 inset_378_347; + + __le32 inset_346_315; + + __le32 inset_314_283; + + __le32 inset_282_251; + + __le32 inset_250_219; + + __le32 inset_218_187; + + __le32 inset_186_155; + + __le32 inset_154_123; + + __le32 inset_122_91; + + __le32 inset_90_59; + + __le32 inset_58_27; + + __le32 inset_26_0 : 27; + __le32 profile_id_4_0 : 5; + + __le32 profile_id_6_5 : 2; + __le32 rsv0 : 1; + __le32 vsi : 10; + __le32 rsv1 : 6; + __le32 fail_sts : 3; + __le32 cmd : 4; + __le32 thread_id_5_0 : 6; + + __le32 thread_id_6 : 1; + __le32 pf : 3; + __le32 vf_vm : 10; + __le32 function_type : 2; + __le32 bypass_ft : 1; + __le32 pcmd : 2; + __le32 comp_report : 2; + __le32 fd_vsi : 10; + __le32 comp_queue : 1; + + __le32 not_enabled : 1; + __le32 bad_profile_id : 1; + __le32 drop : 1; + __le32 round_drop : 1; + __le32 round_cnt : 4; + __le32 rsv2 : 24; +}; + +struct sxe2_fnav_addition_info { + + __le32 fd_profile_id : 7; + __le32 hit_flg : 4; + __le32 rlt_sel : 2; + __le32 dst_vsi : 10; + __le32 rlt_queue_8_0 : 9; + + __le32 rlt_queue_10_9 : 2; + __le32 rlt_toqueue : 3; + __le32 rlt_toqueue_pri : 3; + __le32 drop : 1; + __le32 cmd : 4; + __le32 bypass_absq : 1; + __le32 fd_search_ena : 1; + __le32 pkt_id : 7; + __le32 deflt_qindx_pri : 3; + __le32 sa_toqueue_pri : 3; + __le32 rsv : 3; + __le32 search_rss_fs_hit : 1; + + __le32 bypass_rss : 1; + __le32 rsv1 : 31; +}; + +struct sxe2_fnav_trace_recorder { + u8 trace_status0; + u8 trace_status1; + struct sxe2_fnav_hit_info hit_info; + struct sxe2_fnav_addition_info addition_info; + u8 trace_status2; +}; + +struct sxe2_fnav_glspace_cnt { + __le32 bcnt; + __le32 gcnt; +}; + +#define SXE2_FNAV_MAX_COUNTER_BANK_NUM (2) + +enum sxe2_fnav_counter_bank_type { + SXE2_FNAV_COUNTER_BANK_0, + SXE2_FNAV_COUNTER_BANK_1, + SXE2_FNAV_COUNTER_BANK_ALL, + SXE2_FNAV_COUNTER_BANK_MAX, +}; + +struct sxe2_fwc_fnav_stats_req { + u8 is_clear; + u8 bank_type; + __le16 counter_idx; +}; + +struct sxe2_fwc_fnav_stats_resp { + __le64 stats[SXE2_FNAV_MAX_COUNTER_BANK_NUM]; +}; + +struct sxe2_fwc_fnav_match_req { + __le16 vsi_id; + __le16 stat_idx; +}; + +struct sxe2_fwc_fnav_match_req_batch { + u8 is_clear; + u8 bank_type; + __le16 stat_cnt; + struct sxe2_fwc_fnav_match_req match_req[]; +}; + +struct sxe2_fwc_fnav_match_rsp_batch { + __le16 stat_cnt; + __le64 fnav_match[]; +}; + +struct sxe2_fwc_fnav_dlft_compq_req { + __le16 vsi_idx_in_dev; + __le16 rxq_idx_in_func; +}; + +struct sxe2_fnav_vsispace_cnt { + __le32 bcnt; + __le32 gcnt; + __le16 vsi_id; +}; + +struct sxe2_fnav_space_cnt { + __le32 bcnt_global; + __le32 gcnt_global; + __le32 bcnt_pf; + __le32 gcnt_pf; + __le32 bcnt_vsi; + __le32 gcnt_vsi; + __le16 vsi_id; +}; + +struct sxe2_vf_fnav_clear_ctxt { + __le16 vf_indev; + u8 io_close; +}; + +enum sxe2_rxft_dbg_ppe_info_type { + SXE2_RXFT_PPE_INFO_TX_IN, + SXE2_RXFT_PPE_INFO_TX_EX, + SXE2_RXFT_PPE_INFO_RX_IN, + SXE2_RXFT_PPE_INFO_RX_EX, + SXE2_RXFT_PPE_INFO_LP_IN, + SXE2_RXFT_PPE_INFO_LP_EX, + SXE2_RXFT_PPE_INFO_TYPE_MAX, +}; + +struct sxe2_rxft_ppe_info { + struct { + __le32 data[SXE2_RXFT_PPE_INFO_REG_CNT]; + } info[SXE2_RXFT_PPE_INFO_TYPE_MAX]; +}; + +struct sxe2_rxq_ctxt { + __le64 base_addr; + __le16 depth; + + __le16 dbuff_len; + __le16 hbuff_len; + u8 hsplit_type; + u8 desc_type; + u8 crc_strip; + u8 l2tag1_show; + u8 hsplit_0; + u8 hsplit_1; + u8 inner_vlan_strip; + + u8 lro_enable; + u8 cpuid; + __le16 max_frame_size; + __le16 lro_desc_max; + u8 relax_data; + u8 relax_wb_desc; + u8 relax_rd_desc; + + u8 tphrdesc_enable; + u8 tphwdesc_enable; + u8 tphdata_enable; + u8 tphhead_enable; + + u8 low_desc_waterline; + __le16 vfid; + u8 pfid; + u8 vfen; + __le16 vsi_id; + + u8 pref_enable; + __le16 head; +}; + +struct sxe2_fwc_cfg_rxq_req { + u8 pf_idx; + __le16 idx_in_dev; + struct sxe2_rxq_ctxt rxq_ctxt; +}; + +struct sxe2_fwc_local_mib_set { + __le16 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_local_mib_get { + u8 mib_len; + u8 mib_buffer[]; +}; + +struct sxe2_fwc_fw_agent { + u8 enable; + u8 resv[3]; +}; + +#ifndef FW_LLDP_STATE +#define FW_LLDP_STATE +enum sxe2LldpStatus { + sxe2_lldp_enabled_rx_tx = 0, + sxe2_lldp_enabled_tx_only, + sxe2_lldp_enabled_rx_only, + sxe2_lldp_disabled, +}; +#endif + +struct sxe2_fwc_lldp_fw_agent { + u8 status; + u8 resv[3]; +}; + +struct sxe2_fwc_notify_lldp_fw_agent { + u8 stats; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_stats { + u8 rx_state; + u8 tx_state; + u8 lldp_enable; + u8 admin_status; + __le32 tx_failed; + __le32 tx_frames_out_total; + __le32 tx_lldpdu_length_errors; + __le32 rx_ageouts_total; + __le32 rx_frames_discarded_total; + __le32 rx_frames_in_errors_total; + __le32 rx_frames_in_total; + __le32 rx_tlvs_discarded_total; + __le32 rx_tlvs_unrecognized_total; +}; + +struct sxe2_fwc_lldp_mibs_info { + u8 count; + u8 resv[3]; +}; + +struct sxe2_fwc_lldp_mibs_dump_req { + u8 index; + u8 resv[3]; +}; + +struct sxe2_lldp_mibs_tl { + __le16 offset; + __le16 length; +}; + +struct sxe2_lldp_mibs_ets { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioTable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[SXE2_MAX_TRAFFIC_CLASS]; + u8 tsatable[SXE2_MAX_TRAFFIC_CLASS]; +}; + +struct sxe2_lldp_mibs_pfc { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcena; +}; + +struct sxe2_lldp_mibs_app { + __le16 protId; + u8 priority; + u8 selector; +}; + +struct sxe2_fwc_lldp_mibs_dump_resp { + u8 index; + u8 resv1[3]; + u8 buffer[SXE2_LLDP_FRAME_MAX_SIZE]; + __le16 size; + u8 num_apps; + u8 resv2[3]; + struct sxe2_lldp_mibs_ets ets_cfg; + struct sxe2_lldp_mibs_ets ets_rec; + struct sxe2_lldp_mibs_pfc pfc_cfg; + struct sxe2_lldp_mibs_app app_cfg[SXE2_DCBX_MAX_APPS]; +}; + +enum sxe2FlowCtrlMode { + SXE2_FC_MODE_DISABLE, + SXE2_FC_MODE_LFC, + SXE2_FC_MODE_PFC, + SXE2_FC_MDDE_COUNT, +}; + +struct sxe2_fwc_lfc_info { + u8 rx_en; + u8 tx_en; + u8 tc_num; + u8 fc_mode; + __le32 port_size; + __le32 high_water[SXE2_MAX_TRAFFIC_CLASS]; + __le32 low_water[SXE2_MAX_TRAFFIC_CLASS]; + __le16 pause_time[SXE2_MAX_TRAFFIC_CLASS]; + u8 priority; + u8 resv1; +}; + +struct sxe2_mdd_vf_req { + __le16 vf_idx; + u8 q_mapping_mode; + u8 reserve; +}; + +struct sxe2_fwc_mdd_req { + __le16 vf_cnt; + u8 mdd_check; + u8 reserve; + struct sxe2_mdd_vf_req vfs[]; +}; + +struct sxe2_mdd_vf_resp { + __le16 vf_idx; + u8 mdd; + u8 reserve; +}; + +struct sxe2_fwc_mdd_resp { + __le32 vf_mdd_tx_event; + __le32 pf_mdd_tx_event; + + u8 vf_mdd_rx_event; + u8 pf_mdd_rx_event; + + __le16 mdd_vf_cnt; + struct sxe2_mdd_vf_resp mdd_vfs[]; +}; + +struct sxe2_fwc_ptp_filter_addr { + u8 filter_type; + __le32 ipv4; + __le32 ipv6[4]; + __le32 mac[2]; +}; + +struct sxe2_fwc_ptp_correction { + __le32 ingress_corr_nanosec; + __le32 ingress_corr_subnanosec; + __le32 egress_corr_nanosec; + __le32 egress_corr_subnanosec; + __le32 ingress_sync_corr; + __le32 egress_sync_corr; +}; + +struct sxe2_fwc_ptp_init_req { + u8 sample_type; + u8 threshold; + struct sxe2_fwc_ptp_filter_addr filter_addr; + struct sxe2_fwc_ptp_correction corr; +}; + +enum sxe2_rx_timestamp_mode { + SXE2_RX_TIMESTAMP_MODE_PTP = 0, + SXE2_RX_TIMESTAMP_MODE_ALL_1024, + SXE2_RX_TIMESTAMP_MODE_ALL_2048, + SXE2_RX_TIMESTAMP_MODE_ALL_4096, + SXE2_RX_TIMESTAMP_MODE_ALL_8192, + SXE2_RX_TIMESTAMP_MODE_ALL_16384, + SXE2_RX_TIMESTAMP_MODE_MAX, +}; + +struct sxe2_fwc_ptp_mode_set_req { + u8 mode; +}; + +#define SXE2_IPSEC_KEY_LEN (32) +#define SXE2_IPV6_ADDR_LEN (4) +struct sxe2_fwc_ipsec_txsa_add_req { + __le32 mode; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_set_req { + __le32 mode; + __le32 sa_index; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_txsa_add_resp { + __le16 index; +}; + +struct sxe2_fwc_ipsec_rxsa_add_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 is_over_sdn; + u8 sdn_group_id; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_set_req { + __le32 mode; + __le32 spi; + __le32 ipaddr[SXE2_IPV6_ADDR_LEN]; + __le32 sa_index; + __le32 udp_port; + u8 sport_en; + u8 dport_en; + u8 encrypt_keys[SXE2_IPSEC_KEY_LEN]; + u8 auth_keys[SXE2_IPSEC_KEY_LEN]; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_add_resp { + u8 ip_id; + u8 udp_group_id; + __le16 sa_idx; +}; + +struct sxe2_fwc_ipsec_txsa_del_req { + __le16 sa_idx; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_rxsa_del_req { + u8 ip_id; + u8 group_id; + __le16 sa_idx; + __le32 spi; + u8 func_type; + u8 func_id; + u8 drv_id; +}; + +struct sxe2_fwc_ipsec_switch_op_req { + u8 dir; + u8 op; + __le16 mac_id; +}; + +struct sxe2_fwc_ipsec_drv_clr_req { + u8 func_type; + u8 func_id; + u8 drv_id; + u8 rsv; +}; + +#define SXE2_IPSEC_WDRR_COUNT (4) +struct sxe2_fwc_ipsec_wdrr_req { + __le16 tx_wdrr[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_iqm[SXE2_IPSEC_WDRR_COUNT]; + __le16 rx_wdrr_oqm[SXE2_IPSEC_WDRR_COUNT]; +}; + +struct sxe2_fwc_ipsec_capa_resq { + __le16 tx_sa_cnt; + __le16 rx_sa_cnt; + __le16 ip_id_cnt; + __le16 udp_group_cnt; +}; + +#define MACSEC_PN_LEN_MAX (2) +#define MACSEC_SALT_COUNT (3) +#define MACSEC_KEY_LEN (4) + +enum sxe2_macsec_validate_mode { + SXE2_MACSEC_VALIDATE_DISABLED = 0, + SXE2_MACSEC_VALIDATE_CHECK = 1, + SXE2_MACSEC_VALIDATE_STRICT = 2, + SXE2_MACSEC_VALIDATE_END, +}; + +struct sxe2_fw_macsec_sa { + u8 active; + u8 an_value; + __le32 pn[MACSEC_PN_LEN_MAX]; + __le32 ssci; + __le32 salt[MACSEC_SALT_COUNT]; + __le32 key[MACSEC_KEY_LEN]; +}; + +struct sxe2_fw_macsec_txsc { + u8 active; + u8 xpn; + u8 aisci; + u8 es; + u8 encrypt; + __le64 sci; +}; + +struct sxe2_fw_macsec_rxsc { + u8 active; + u8 xpn; + u8 protect; + u8 validate_mode; + __le64 sci; +}; + +struct sxe2_fwc_ddp_state { + u8 act_pfid; + u8 pad; + __le16 ver; + __le32 state; +}; + +struct flm_link_cap { + u32 speed; + u32 fecMode; + + u8 an; + u8 lscEn; +}; + +enum sxe2_fec_mode { + SXE2_ETHTOOL_FEC_NONE = 0, + SXE2_ETHTOOL_FEC_OFF = 1, + SXE2_ETHTOOL_FEC_BASER = 2, + SXE2_ETHTOOL_FEC_RS = 3, + SXE2_ETHTOOL_FEC_AUTO = 15, + SXE2_ETHTOOL_FEC_MAX, +}; + +enum flm_link_speed_fec { + FEC_MOD_UNNKOW = 0x0, + FEC_MOD_10G = 0x7, + FEC_MOD_50G = 0xC, + FEC_MOD_25G = 0xF, + FEC_MOD_100G = 0XD, +}; + +enum flm_link_speed_info { + FEC_MOD_SPEED_UNNKOW = 0x0, + FEC_MOD_SPEED_10G = 0x2, + FEC_MOD_SPEED_25G = 0x4, + FEC_MOD_SPEED_50G = 0x8, + FEC_MOD_SPEED_100G = 0X10, +}; + +enum sxe2_speed_mode { + SXE2_ETHTOOL_SPEED_UNKNOWN = 0, + SXE2_ETHTOOL_SPEED_10GB = 10000, + SXE2_ETHTOOL_SPEED_25GB = 25000, + SXE2_ETHTOOL_SPEED_50GB = 50000, + SXE2_ETHTOOL_SPEED_100GB = 100000, + SXE2_ETHTOOL_SPEED_AUTO = 200000, +}; + +struct flm_link_fec_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct configure_fc { + u8 rx_en; + u8 tx_en; + u8 resv[2]; +}; + +struct configure_an { + u32 port; + u32 speed; + u32 fec_mode; + u32 lt_en; + struct configure_fc fc_mode; + u32 an_en; +}; + +enum sxe2_fw_connect_mode { + SXE2_FW_CONNECT_MODE_DAC, + SXE2_FW_CONNECT_MODE_AOC, + SXE2_FW_CONNECT_MODE_TRANSCEIVER, + SXE2_FW_CONNECT_MODE_BACKPLANE, + SXE2_FW_CONNECT_MDDE_UNKNOW, +}; + +enum flm_link_speed { + FLM_FW_SPEED_10G = 0, + FLM_FW_SPEED_25G = 1, + FLM_FW_SPEED_50G = 2, + FLM_FW_SPEED_100G = 3, + FLM_FW_SPEED_AUTO = 15, + FLM_FW_SPEED_MAX +}; + +struct flm_link_ret { + u32 speed; + u32 module_type; + u32 link_status; + u32 fec_mode; + struct configure_fc fc_mode; +}; + +struct ethtool_flm_link_info { + s32 speed; + u32 link_status; +}; + +struct flm_link_info_pasist { + u8 speed; + u8 link_status; + u8 fec_mode; + u8 resv; + struct configure_fc fc_mode; +}; + +struct sxe2_fw_loop_back_config { + u8 enable; + u8 resv[3]; +}; + +struct flm_link_info { + u32 port_num; + u32 is_link_up; + u32 module_type; + u32 is_an_enable; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct flm_link_config { + u32 port_num; + u32 speed; + u32 fec; + u32 port; +}; + +struct flm_link_result { + u8 result; + u8 resv[3]; + u32 port; +}; + +struct flm_ethtool_get_link_req { + u32 port_num; +}; + +enum sxe2_support_fec_mode { + SXE2_SUPPORTR_FEC_NONE = 0, + SXE2_SUPPORTR_FEC_BASER = 1, + SXE2_SUPPORTR_FEC_RS = 2, + SXE2_SUPPORTR_FEC_AUTO = 15, + SXE2_SUPPORTR_FEC_MAX, +}; + +enum sxe2_advertis_fec_mode { + SXE2_ADVERTIS_FEC_NONE = 0, + SXE2_ADVERTIS_FEC_BASER = 1, + SXE2_ADVERTIS_FEC_RS = 2, + SXE2_ADVERTIS_FEC_AUTO = 15, + SXE2_ADVERTIS_FEC_MAX, +}; + +enum sxe2_support_speed_duplex_mode { + SXE2_SUPPORTR_SPEED_10G, + SXE2_SUPPORTR_SPEED_25G, + SXE2_SUPPORTR_SPEED_50G, + SXE2_SUPPORTR_SPEED_100G, + SXE2_SUPPORTR_SPEED_AUTO = 15, + SXE2_SUPPORTR_SPEED_MAX, +}; + +enum sxe2_support_duplex { + HALF_DUPLEX = 0, + FULL_DUPLEX = 1, + MAX_DUPLEX, +}; + +enum sxe2_duplex_type { + CURRENT_HALF_DUPLEX = 0, + CURRENT_FULL_DUPLEX = 1, + CURRENT_MAX_DUPLEX, +}; + +enum sxe2_support_media_type { + SXE2_MEDIA_UNKNOWN = 0, + SXE2_MEDIA_FIBER, + SXE2_MEDIA_BASET, + SXE2_MEDIA_BACKPLANE, + SXE2_MEDIA_DA, +}; + +enum sxe2_support_pause_frame { + SCGEB_EN_TX_LINK_PAUSE, + SCGEB_EN_RX_LINK_PAUSE, + SCGEB_EN_TX_RX_LINK_PAUSE, + SCGEB_DIS_EN_LINK_PAUSE, +}; + +enum sxe2_an_status { + SXE2_AN_ENABLE = 0, + SXE2_AN_TRANSMIT_DISABLE = 1, + SXE2_AN_ABILITY_DETECT = 2, + SXE2_AN_ACKNOWLEDGE_DETECT = 3, + SXE2_AN_COMPLETE_ACKNOWLEDGE = 4, + SXE2_AN_NEXT_PAGE_WAIT = 5, + SXE2_AN_LINK_STATUS_CHECK = 6, + SXE2_AN_PARALLET_DETECT_FAULT = 7, + SXE2_AN_GOOD_CHECK = 8, + SXE2_AN_GOOD = 9, +}; + +struct sxe2_pause_publicity_ability { + u8 bit_pause; + u8 bit_asym; + u8 resv[2]; +}; + +struct sxe2_local_suppet_advertis_an_en { + u8 suppert_an; + u8 advertis_an; + u8 resv[2]; +}; + +struct sxe2_peer_suppet_an_en { + u8 suppert_an; + u8 resv[3]; +}; + +enum sxe2_current_media_type { + CURRENT_MEDIA_UNKNOWN = 0, + CURRENT_MEDIA_FIBER, + CURRENT_MEDIA_BASET, + CURRENT_MEDIA_BACKPLANE, + CURRENT_MEDIA_DA, +}; + +struct sxe2_current_an_en { + u8 current_an; + u8 resv[3]; +}; + +enum flm_fec_mode { + FLM_FEC_NONE = 0, + FLM_FEC_BSFEC = 1, + FLM_FEC_528 = 2, + FLM_FEC_544 = 3, + FLM_FEC_AUTO = 15, + FLM_FEC_MAX +}; + +enum flm_link_status { + FLM_PORT_DOWN = 0, + FLM_PORT_UP = 1, + FLM_PORT_MAX = 2 +}; + +struct fec_ability_supported { + u32 fec_br : 1; + u32 fec_528 : 1; + u32 fec_544 : 1; + u32 rec : 29; +}; + +struct spec_entry { + u8 spec_id; + u8 reserved[3]; + enum flm_link_speed speed; + s8 spec_name[16]; +}; + +struct optical_module { + char vendor[SXE2_FLM_VENDOR_LEN]; + char vendor_pn[SXE2_FLM_VENDOR_PN_LEN]; + u8 module_type; + u8 current_connection; +}; + +struct optical_warning_info { + u8 vendor[SXE2_HOST_FLM_VENDOR_LEN]; + u8 vendor_pn[SXE2_HOST_FLM_VENDOR_PN_LEN]; + bool is_warning; +}; + +struct __an_mode { + u32 pause; + u32 speed_ability_10Gkr; + u32 speed_ability_25Gkrcr; + u32 speed_ability_25Gkrcr_s; + u32 speed_ability_100Gcr4; + u32 speed_ability_100Gkr4; + u32 fec_ability_10g; + + u32 fec_en_10g; + u32 fec_bsfec_25g; + u32 fec_rs528_25g; + u8 Consortium_25g_50g_en; +}; + +struct __an_np_mode { + u32 speed_ability_25Gkr; + u32 speed_ability_25Gcr; + u32 speed_ability_50Gkr2; + u32 speed_ability_50Gcr2; + u32 fec_ability_rs528; + u32 fec_ability_bsfec; + u32 fec_en_rs528; + u32 fec_en_bsfec; +}; + +struct __an_orig_speed_fec { + u32 orig_speed; + u32 orig_fec; +}; + +struct sxe2_an_publicity { + + struct __an_mode an_mode; + + struct __an_np_mode an_np_mode; +}; + +struct support_speed_ability_mode { + u32 ability_speed_25Gkr; + u32 ability_speed_25Gcr; + u32 ability_speed_50Gkr2; + u32 ability_speed_50Gcr2; + u32 ability_speed_10Gkr; + u32 ability_speed_25Gkrcr; + u32 ability_speed_25Gkrcr_s; + u32 ability_speed_100Gcr4; + u32 ability_speed_100Gkr4; + u32 ability_speed_100Gsr4; +}; + +struct sxe2_fwc_link_state_resp { + u8 link; + u8 resv[3]; +}; + +struct flm_ethtool_get_link_resp { + u8 specs_list[32]; + u32 sxe2_ana_fsm; + struct optical_module optical_module; + struct configure_fc configed_pause_result; + struct configure_fc partner_pause_result; + struct sxe2_pause_publicity_ability local_pause; + + struct sxe2_local_suppet_advertis_an_en local_an_en; + enum flm_fec_mode local_fec_mode; + struct sxe2_pause_publicity_ability partner_pause; + struct sxe2_peer_suppet_an_en partner_an_en; + enum sxe2_duplex_type support_duplx; + enum sxe2_current_media_type current_media; + struct sxe2_current_an_en current_an_en; + struct fec_ability_supported advertis_fec; + struct fec_ability_supported partner_fec; + struct sxe2_an_publicity an_publicity; +}; + +struct sxe2_msg_ethtool_info { + struct flm_ethtool_get_link_resp cfg; + struct support_speed_ability_mode ability; + u8 usr_link_speed; +}; + +struct sxe2_fwc_udp_tunnel_ref_add_req { + u8 type; + __le16 port; +}; + +struct sxe2_fwc_udp_tunnel_ref_delete_req { + u8 type; + u8 clear; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_req { + u8 type; +}; + +struct sxe2_fwc_udp_tunnel_ref_get_resp { + u8 type; + u8 enable; + u8 dst; + u8 src; + __le16 port; + u8 used; + u8 rsvd; +}; + +struct sxe2_fw_ncd_core_num_config_req { + u8 core_num; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_core_pri_queue { + u8 core_id; + u8 pri; + __le16 queue_id; +}; + +struct sxe2_fw_ncd_switch_tag_req { + u8 loc; + u8 len; + u8 en; + u8 mac_id; +}; + +struct sxe2_fw_ncd_switch_tag_resp { + u8 loc; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_req { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_txlen_adj_resp { + u8 mac_id; + u8 len; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdf_req { + __le16 eth_type; + __le16 mask; + u8 en; + u8 resv; +}; + +struct sxe2_fw_ncd_sdf_resp { + __le16 eth_type; + __le16 mask; +}; + +struct sxe2_fw_ncd_sdn_udp_req_resp { + __le16 start_port; + __le16 end_port; + u8 ph_len; + u8 udp_grp_id; + u8 used_count; + u8 resv; +}; + +struct sxe2_fw_ncd_sdn_req_resp { + union { + __le32 ipv4_addr; + __le32 ipv6_addr[4]; + } dest_addr; +__le16 used_count; + u8 is_ipv4; + u8 udp_grp_id; + u8 ip_id; + u8 resv[3]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_req { + __le16 udp_port; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_query_resp { + u8 find; + u8 udp_grp_id; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_sdn_ipsec_udp_req { + __le32 port_bmp; + __le16 start_port; + u8 udp_grp_id; + u8 resv; +}; + +struct sxe2_fw_ncd_app_port_req { + u8 idx; + u8 is_tcp; + u8 sport_en; + u8 dport_en; +}; + +struct sxe2_fw_ncd_pkt_pri_req { + u8 idx; + u8 pri; + u8 resv[2]; +}; + +struct sxe2_fw_ncd_udf_capa_get_resp { + __le32 sdn_ip_addr; + __le32 sdn_udp_ipsec_bm; + __le16 sdn_udp_ipsec_sp; + __le16 sdn_udp_start_port; + __le16 sdn_udp_end_port; + __le16 sdf_eth_type; + __le16 sdf_mask; + u8 sdf_en; + u8 switch_tag_en; + u8 switch_tag_len; + u8 switch_tag_loc; + u8 txlen_adj_len[4]; + u8 sdn_udp_ph_len; + u8 sdn_ip_type; + u8 sdn_ip_udp_grp_id; + u8 spec_proto_port_type; + u8 spec_ptype_pri_level; + u8 resv; +}; + +#define BFD_INTQ_CMD_SRC_IRQ (0) +#define BFD_INTQ_CMD_SRC_POLLING (1) +#define BFD_INTQ_CMD_BUF_LEN (64) + +struct sxe2_fwc_bfd_config_set_req { + __le16 max_sess; + u8 scan_interval; + u8 bfd_en; +}; + +struct sxe2_fwc_bfd_intq_info_get_req { + u8 src; + u8 sess_num; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_intq_info_get_resp { + u8 buf_empty; + u8 valid_len; + u8 resv[2]; + struct { + __le16 sess_id; + u8 reason; + u8 sess_state; + } data[BFD_INTQ_CMD_BUF_LEN]; +}; + +struct sxe2_fwc_bfd_kt_entry { + u8 is_add; + u8 prof_id; + u8 rsv[2]; + u8 inputset[SXE2_BFD_FV_CNT_MAX * 2]; + __le16 vsi; + __le16 sess_id; + __le32 fdid; +}; + +struct sxe2_fwc_bfd_kt_entry_resp { + __le16 kt_index; + u8 resv[2]; +}; + +struct sxe2_fwc_bfd_sess_cfg_set_req { + __le16 sess_id; + u8 valid; + u8 sess_mode; + __le16 kt_index; + u8 mac_id; + u8 tc_id; + u8 tx_en; + u8 rx_en; + __le16 tx_interval; + __le32 rx_interval; + __le16 ppt_id; + u8 state; + u8 rsv; +}; + +struct sxe2_fwc_bfd_sess_state_get_req { + __le16 sess_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_bfd_sess_state_get_resp { + u8 state; + u8 aging_state; + __le16 prof_id; + __le16 rx_cnt; + __le16 tx_cnt; +}; + +struct sxe2_fwc_bfd_capability_get_resp { + __le16 max_sess; + __le16 max_package_profile; + __le16 min_scan_interval; + __le16 bfde_vsi; + __le16 max_pkt_buf; + __le16 max_edit_cmd; + __le16 max_csum_desc; + u8 rsv[2]; +}; + +#define SXE2_BYTES_PER_PKT_BUF_ENT (32) +#define SXE2_PKT_BUF_MAX_PER_PROF (16) +#define SXE2_PKT_BUF_SIZE_MAX \ + (SXE2_BYTES_PER_PKT_BUF_ENT * SXE2_PKT_BUF_MAX_PER_PROF) + +#define SXE2_BYTES_PER_EDIT_CMD (8) +#define SXE2_EDIT_CMD_PER_ENT (4) +#define SXE2_EDIT_CMD_MAX (16) +#define SXE2_EDIT_CMD_ENT_MAX (SXE2_EDIT_CMD_MAX / SXE2_EDIT_CMD_PER_ENT) +#define SXE2_BYTES_PER_EDIT_CMD_ENT (SXE2_BYTES_PER_EDIT_CMD * SXE2_EDIT_CMD_PER_ENT) +#define SXE2_EDIT_CMD_SIZE_MAX (SXE2_EDIT_CMD_MAX * SXE2_BYTES_PER_EDIT_CMD) + +struct sxe2_bfd_edit_cmd_req { + struct { + u8 code; + u8 size; + __le16 loc; + } instr[SXE2_EDIT_CMD_PER_ENT]; +}; + +struct sxe2_fwc_bfd_pack_prof_set_req { + __le16 prof_id; + u8 rsv[2]; + + struct { + __le16 pkt_addr; + u8 pkt_len; + u8 pkt_lbo; + u8 data[SXE2_PKT_BUF_SIZE_MAX]; + } pkt_buffer; + + struct { + __le16 edit_cmd_addr; + u8 edit_cmd_len; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_MAX]; + } edit_cmd; + + __le16 csum_desc_addr; + u8 csum_desc_len; + u8 rsv0; + struct { + __le16 start_addr; + __le16 csum_loc; + __le16 csum_len; + u8 result_negate; + u8 rsv1; + __le32 chk_data; + struct sxe2_bfd_edit_cmd_req cmd[SXE2_EDIT_CMD_PER_ENT]; + } csum_desc[2]; +}; + +struct sxe2_fwc_bfd_meta_key_len_req { + __le16 prof_id; + __le16 key_len; +}; + +struct sxe2_tx_fault_count_mib { + u64 tx_fault_count; +}; + +struct single_link_result { + u32 port_num; + u32 is_link_up; + u32 speed; + u32 fec; + struct configure_fc fc_mode; +}; + +struct configure_fixed { + u32 port; + u32 speed; + u32 fec_mode; + u32 port_mode; + u32 an_en; + u32 lt_en; + struct configure_fc fc_mode; + s32 pcsRet; +}; + +struct all_link_status { + u32 dut_status; + u32 sb_status; +}; + +struct sxe2_fwc_txpa_dfx { + u32 txpa_in_all; + u32 txpa_out_all; + u32 txpa_in_drop; + u32 txpa_out_drop; + u32 txpa_in_err; + u32 txpa_out_err; +}; + +struct sxe2_fwc_txfb_dfx { + u32 txfb_in_all; + u32 txfb_in_drop; + u32 txfb_out_all; + u32 txfb_out_drop; + u32 txfb_internal_drop; +}; + +struct sxe2_fwc_rxpa_dfx { + u32 rxpa_in_all; + u32 rxpa_out_all; + u32 rxpa_in_drop; + u32 rxpa_out_drop; + u32 rxpa_in_err; + u32 rxpa_out_err; +}; + +struct sxe2_fwc_rxfb_dfx { + u32 rxfb_tx_in_all; + u32 rxfb_rx_in_all; + u32 rxfb_tx_in_drop; + u32 rxfb_rx_in_drop; + u32 rxfb_out_all; + u32 rxfb_out_drop; + u32 rxfb_internal_drop; +}; + +struct sxe2_fwc_switch_dfx { + u32 tx_all; + u32 tx_drop; + u32 rx_all; + u32 rx_drop; +}; + +struct sxe2_fwc_rxft_dfx { + u32 tx_in_all; + u32 tx_in_drop; + u32 tx_out_all; + u32 tx_out_drop; + u32 rx_in_all; + u32 rx_in_drop; + u32 rx_out_all; + u32 rx_out_drop; + u32 lp_in_all; + u32 lp_in_drop; + u32 lp_out_all; + u32 lp_out_drop; +}; + +struct sxe2_fwc_ppe_dfx { + struct sxe2_fwc_txpa_dfx txpa[4]; + struct sxe2_fwc_txfb_dfx txfb; + struct sxe2_fwc_rxpa_dfx rxpa[4]; + struct sxe2_fwc_rxfb_dfx rxfb; + struct sxe2_fwc_switch_dfx sw; + struct sxe2_fwc_rxft_dfx rxft; +}; + +#define SXE2_ACL_LUT_ENTRY_WIDTH (5) +#define SXE2_ACL_ACTION_TCAM_CNT (16) +#define SXE2_ACL_ACTION_MEM_CNT (20) +#define SXE2_ACL_ACTION_NUM_PER_ENTRY (2) +#define SXE2_ACL_ACTION_TCAM_DEPTH (512) +#define SXE2_ACL_ACTION_MEM_DEPTH (512) + +struct sxe2_fwc_acl_lut_alloc_req { + __le16 width; + __le16 depth; + u8 act_pairs_per_entry; + + u8 concurr; + u8 num_dependent_alloc_ids; + __le16 alloc_ids[SXE2_ACL_ACTION_TCAM_CNT - 1]; +}; + +struct sxe2_fwc_acl_lut_alloc_resp { + + __le16 alloc_id; + + __le16 first_entry; + __le16 last_entry; + + u8 first_tcam; + u8 last_tcam; + + u8 act_mem[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_lut_dealloc_req { + __le16 alloc_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_sel_base_req { + __le16 prof_id; + + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_entry_data { + struct { + u8 val[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; + u8 reserved[2]; + } entry_key, entry_key_invert; +}; + +struct sxe2_fwc_acl_lut_entry_set_req { + u8 tcam_idx; + __le16 entry_idx; + u8 rsv; + + struct sxe2_acl_entry_data data; +}; + +struct sxe2_acl_act_entry_data { + u8 prio; + u8 mdid; + __le16 value; +}; + +struct sxe2_fwc_acl_act_entry_set_req { + u8 act_mem_idx; + __le16 act_entry_idx; + u8 rsv; + + struct sxe2_acl_act_entry_data data[SXE2_ACL_ACTION_NUM_PER_ENTRY]; +}; + +struct sxe2_fwc_acl_scen_alloc_req { + struct { + u8 tcam_select[SXE2_ACL_LUT_ENTRY_WIDTH]; + u8 enable; +#define SXE2_ACL_ALLOC_SCEN_START_CMP BIT(0) +#define SXE2_ACL_ALLOC_SCEN_START_SET BIT(1) + u8 start_cmp_set; + u8 rsv; + } tcam_cfg[SXE2_ACL_ACTION_TCAM_CNT]; + +#define SXE2_ACL_ACT_MEM_EN BIT(4) + u8 act_mem_cfg[SXE2_ACL_ACTION_MEM_CNT]; +}; + +struct sxe2_fwc_acl_scen_alloc_resp { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_scen_dealloc_req { + __le16 scen_id; + u8 rsv[2]; +}; + +struct sxe2_fwc_acl_prof_querey_req { + __le16 prof_id; +}; + +struct sxe2_fwc_acl_prof_querey_resp { + u8 byte_selection[30]; + u8 word_selection[32]; + u8 dword_selection[15]; + u8 pf_scenario_num[8]; +}; + +struct sxe2_acl_hit_info { + + __le32 profile_id : 7; + __le32 rsv0 : 25; + + __le32 fv1 : 16; + __le32 fv0 : 16; + + __le32 fv3 : 16; + __le32 fv2 : 16; + + __le32 fv5 : 16; + __le32 fv4 : 16; + + __le32 fv7 : 16; + __le32 fv6 : 16; + + __le32 fv9 : 16; + __le32 fv8 : 16; + + __le32 fv11 : 16; + __le32 fv10 : 16; + + __le32 fv13 : 16; + __le32 fv12 : 16; + + __le32 fv15 : 16; + __le32 fv14 : 16; + + __le32 fv17 : 16; + __le32 fv16 : 16; + + __le32 fv19 : 16; + __le32 fv18 : 16; + + __le32 fv21 : 16; + __le32 fv20 : 16; + + __le32 fv23 : 16; + __le32 fv22 : 16; + + __le32 fv25 : 16; + __le32 fv24 : 16; + + __le32 fv27 : 16; + __le32 fv26 : 16; + + __le32 fv29 : 16; + __le32 fv28 : 16; + + __le32 fv31 : 16; + __le32 fv30 : 16; +}; + +struct sxe2_acl_dfx_info { + + __le32 og_inbuf_hdr_cnt; + __le32 og_inbuf_info_cnt; + __le32 og_proc_hdr_cnt; + __le32 og_proc_info_cnt; + __le32 og_to_engine_cnt; + __le32 og_in_rg_cnt; + __le32 og_out_rg_cnt; + __le32 sel_base_cnt; + __le32 key_gen_cnt; + __le32 key_gen_to_lkt_cnt; + __le32 act_mem_cnt; + __le32 osc_act_cnt; + __le32 osc_pkt_cnt; + __le32 acl_rxft_cnt; + __le32 acl_recv_drop_cnt; + __le32 acl_action_drop_cnt; + __le32 acl_vsi_disable_drop_cnt; + __le32 prfl_tcam_hit_cnt; + __le32 prfl_tcam_miss_cnt; + __le32 prfl_tcam_bypss_cnt; + __le32 act_tcam_hit_cnt[16]; + __le32 act_tcam_miss_cnt[16]; + + __le16 act_idx_first[16]; + __le16 act_idx_last[16]; + __le32 act_key_first_low[16]; + __le32 act_key_first_high[16]; + __le32 act_key_last_low[16]; + __le32 act_key_last_high[16]; + + __le64 key_first; + __le64 key_last; + + u8 first_prfl_id; + u8 last_prfl_id; + u8 first_scen_id; + u8 last_scen_id; + __le16 first_prfl_tcam_idx; + __le16 last_prfl_tcam_idx; + + __le16 first_cascade; + __le16 last_cascade; + __le16 first_stack; + __le16 last_stack; + __le16 first_tcam_en; + __le16 last_tcam_en; +}; + +struct sxe2_acl_trace_recorder { + u8 trace_status0; + u8 trace_status2; + u8 rsv[2]; + struct sxe2_acl_hit_info hit_info; +}; + +struct sxe2_vf_queue_info { + __le16 rxq_base; + __le16 rxq_cnt; + __le16 txq_base; + __le16 txq_cnt; +}; + +struct sxe2_fwc_vf_queue_info { + u8 pf_id; + u16 vf_cnt; + u8 rsv[1]; + struct sxe2_vf_queue_info queue_info[]; +}; + +#pragma pack() + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ddp_common.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ddp_common.h new file mode 100644 index 0000000000000000000000000000000000000000..1dae61f83a801c7a5be3062927650444eb00d5f8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ddp_common.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ddp_common.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DDP_COMMON_H_ +#define _SXE2_DDP_COMMON_H_ + +#ifdef SXE2_FW +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +#ifdef SXE2_DPDK_DRIVER +#include "sxe2_type.h" +#include "sxe2_common.h" +#endif + +#define SXE2_DDP_DRV_VER_MAJ 1 +#define SXE2_DDP_DRV_VER_MNR 0 + +#define SXE2_DDP_FW_VER_MAJ 1 +#define SXE2_DDP_FW_VER_MNR 0 + +enum sxe2_ddp_error { + + SXE2_DDP_PKG_SUCCESS = 0, + + SXE2_DDP_PKG_ALREADY_LOADED = 1, + + SXE2_DDP_PKG_SAME_VERSION_ALREADY_LOADED = 2, + + SXE2_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = 3, + + SXE2_DDP_PKG_COMPATIBLE_ALREADY_LOADED = 4, + + SXE2_DDP_PKG_FW_MISMATCH = 5, + + SXE2_DDP_PKG_INVALID_FILE = 6, + + SXE2_DDP_PKG_FILE_VERSION_TOO_HIGH = 7, + + SXE2_DDP_PKG_FILE_VERSION_TOO_LOW = 8, + + SXE2_DDP_PKG_NO_SEC_MANIFEST = 9, + + SXE2_DDP_PKG_MANIFEST_INVALID = 10, + + SXE2_DDP_PKG_BUFFER_INVALID = 11, + + SXE2_DDP_PKG_BUSY = 12, + + SXE2_DDP_PKG_ERR = 13, +}; + +enum sxe2_ddp_state { + SXE2_DDP_STATE_UNINIT, + SXE2_DDP_STATE_PROC, + SXE2_DDP_STATE_FINISH, + SXE2_DDP_STATE_ERROR, + SXE2_DDP_STATE_INVALID = 0xFFFFFFFF, +}; + +struct sxe2_ddp_pkg_ver { + __le16 major; + __le16 minor; +}; + +union sxe2_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct sxe2_device_id_entry { + union sxe2_device_id device; + union sxe2_device_id sub_device; +}; + +struct sxe2_pkg_hdr { + struct sxe2_ddp_pkg_ver pkg_drv_ver; + struct sxe2_ddp_pkg_ver pkg_fw_ver; + struct sxe2_device_id_entry dev_vend_id; + __le32 seg_count; + + __le32 seg_offset[]; +}; + +#define SEGMENT_SIGN_TYPE_NONE 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 +#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 + +struct sxe2_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_SXE2_DDP 0x00000010 +#define SEGMENT_TYPE_SXE2_RUN_TIME_CFG 0x00000020 + __le32 seg_id; + __le32 seg_type; + __le32 seg_size; +}; + +struct sxe2_buf { +#define SXE2_PKG_BUF_SIZE 4096 + u8 buf[SXE2_PKG_BUF_SIZE]; +}; + +struct sxe2_buf_table { + __le32 buf_count; + struct sxe2_buf buf_array[]; +}; + +struct sxe2_seg { + struct sxe2_generic_seg_hdr hdr; + u8 rsvd[8]; + struct sxe2_buf_table buf_table; +}; + +#define SXE2_MIN_S_OFF 12 +#define SXE2_MAX_S_OFF 4095 +#define SXE2_MIN_S_SZ 1 +#define SXE2_MAX_S_SZ 4084 +#define SXE2_MIN_CFG_SZ (sizeof(struct sxe2_pkg_hdr) + sizeof(struct sxe2_seg)) + +struct sxe2_section_entry { + __le16 type; + __le16 unit_size; + + __le16 offset; + __le16 size; +}; + +#define SXE2_MIN_SECT_COUNT 1 +#define SXE2_MAX_SECT_COUNT 512 +#define SXE2_MIN_SECT_DATA_END 12 +#define SXE2_MAX_SECT_DATA_END 4096 + +struct sxe2_buf_hdr { + __le16 section_count; + + __le16 data_end; + __le32 buf_idx; + + __le32 crc; + struct sxe2_section_entry section_entry[]; +}; + +enum sxe2_segment_type { + SXE2_SGM_BLK_DP = 0, + SXE2_SGM_BLK_MAX +}; + +enum sxe2_section_type { + SXE2_SECT_SWPTG_TYPE = 0, + SXE2_SECT_SWVSIG_TYPE, + SXE2_SECT_SWTCAM_TYPE, + SXE2_SECT_SWEXTRACTOR_TYPE, + SXE2_SECT_SWMAP_TYPE, + SXE2_SECT_SWRCP_TYPE, + SXE2_SECT_SWPROFILERCPBITMAP_TYPE, + SXE2_SECT_RSSPTG_TYPE, + SXE2_SECT_RSSVSIG_TYPE, + SXE2_SECT_RSSTCAM_TYPE, + SXE2_SECT_RSSEXTRACTOR_TYPE, + SXE2_SECT_RSSMAP_TYPE, + SXE2_SECT_RSSIPSET_TYPE, + SXE2_SECT_FNAVPTG_TYPE, + SXE2_SECT_FNAVMASK_TYPE, + SXE2_SECT_ACLPTG_TYPE = 16, + SXE2_SECT_TYPE_MAX, +}; +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_aux.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_aux.h new file mode 100644 index 0000000000000000000000000000000000000000..6de4bb332f48e4fce8853b5e3dcaba1fd25f6c8d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_aux.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_aux.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef _SXE2_DRV_AUX_H_ +#define _SXE2_DRV_AUX_H_ + +#include +#include +#include +#include "sxe2_compat.h" +#include "sxe2_cmd.h" + +#ifdef NOT_SUPPORT_AUXILIARY_BUS +#include "auxiliary_bus.h" +#else +#include +#endif + +#define AUX_MAJOR_VER (1) +#define AUX_MINOR_VER (1) +#define SXE2_RDMA_INDEX (0x1) +#define AUX_RDMA_INVALID_PF_IDX (0xFF) +#define AUX_MAX_USER_PRIORITY (8) +#define AUX_MAX_APPS (64) +#define AUX_MAX_DSCP_MAPPING (64) +#define AUX_MAX_NUM_AUX (5) +#define SXE2_RDMA_VCHNL_Q_INVALID_IDX (0xFFFF) + +#define SXE2_RDMA_INVALID_PF 0xFF +#define SXE2_RDMA_PF0 BIT(0) +#define SXE2_RDMA_PF1 BIT(1) +#define SXE2_RDMA_BOTH_PF 0x3 + +#define SXE2_DRV_VER_STR_LEN 32 + +enum aux_rdma_opcode { + RDMA_MAC_RULE_ADD, + RDMA_MAC_RULE_DELETE, + RDMA_OPCODE_MAX +}; + +enum aux_reset_type { + AUX_PFR, + AUX_CORER, + AUX_GLOBR, +}; + +enum aux_function_type { + AUX_FUNCTION_TYPE_PF, + AUX_FUNCTION_TYPE_VF, +}; + +enum aux_rdma_gen { + AUX_RDMA_GEN_RESERVED = 0, + AUX_RDMA_GEN_1 = 1, + AUX_RDMA_GEN_2 = 2, + AUX_RDMA_GEN_3 = 3, +}; + +struct aux_rdma_caps { + u8 gen; +}; + +enum aux_event_type { + SXE2_EVENT_MTU_CHANGED, + SXE2_EVENT_NOTIFY_RESET, + SXE2_EVENT_VF_RESET, + SXE2_EVENT_AEQ_OVERFLOW, + SXE2_EVENT_FAILOVER, + SXE2_EVENT_TC_CHANGE, + SXE2_EVENT_MAX +}; + +struct aux_ver_info { + u16 major; + u16 minor; + u64 support; +}; + +struct aux_core_dev_info; + +struct aux_rdma_qset_params { + u16 teid; + u16 qset_id; + u16 vport_id; + u8 tc[2]; + u8 user_pri; + u8 qset_port; +}; + +struct aux_rdma_multi_qset_params { + u16 teid[2]; + u16 qset_id[2]; + u8 qset_port[2]; + u16 vport_id; + u8 tc[2]; + u8 num; + u8 rdma_port[2]; + u8 active_ports; + u8 user_pri; +}; + +struct aux_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct aux_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct aux_qos_params { + struct aux_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[AUX_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u32 num_apps; + u8 pfc_mode; + struct aux_dcb_app_info apps[AUX_MAX_APPS]; + u8 dscp_map[AUX_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +struct aux_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct aux_qvlist_info { + u32 num_vectors; + struct aux_qv_info qv_info[]; +}; + +struct aux_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +struct sxe2_core_ops { + int (*alloc_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*free_res)(struct aux_core_dev_info *cdev_info, struct aux_rdma_qset_params *qset); + int (*request_reset)(struct aux_core_dev_info *cdev_info, + enum aux_reset_type reset_type); + int (*update_vport_filter)(struct aux_core_dev_info *cdev_info, + u16 vport_id, bool enable); + int (*get_vf_info)(struct aux_core_dev_info *cdev_info, + u16 vf_id, struct aux_vf_port_info *vf_port_info); + int (*vc_send)(struct aux_core_dev_info *cdev_info, + u16 vf_id, u8 *msg, u16 len, u64 session_id); + int (*vc_send_sync)(struct aux_core_dev_info *cdev_info, u8 *msg, + u16 len, u8 *recv_msg, u16 recv_len); + int (*rdma_send_cmd)(struct aux_core_dev_info *cdev_info, + enum sxe2_drv_cmd_opcode opcode, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len); + int (*rdma_drv_config)(struct aux_core_dev_info *cdev_info, u8 opcode, u8 *msg); + int (*vc_queue_vec_map_unmap)(struct aux_core_dev_info *cdev_info, + struct aux_qvlist_info *qvl_info, bool map); + int (*alloc_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*free_multi_res)(struct aux_core_dev_info *cdev_info, + struct aux_rdma_multi_qset_params *qset); + int (*dump_pcap_cmd)(struct aux_core_dev_info *cdev_info, u8 *mac, bool is_add); + void (*notify_rdma_load)(struct aux_core_dev_info *cdev_info, bool loaded); + u32 (*rdma_get_link_speed)(struct aux_core_dev_info *cdev_info); +}; + +struct sxe2_rdma_event_info { + DECLARE_BITMAP(type, SXE2_EVENT_MAX); + u16 vf_id; + struct aux_qos_params port_qos; +}; + +struct aux_core_dev_info { + struct pci_dev *pdev; + struct auxiliary_device *adev; + u8 __iomem *hw_addr; + struct aux_ver_info ver; + char drv_ver[SXE2_DRV_VER_STR_LEN]; + enum aux_function_type ftype; + const struct sxe2_aux_ops *aux_ops; + struct sxe2_core_ops *ops; + int cdev_info_id; + u8 pf_id; + u8 pf_cnt; + u16 vfid_base; + u16 vport_id; + struct aux_qos_params qos_info[2]; + struct net_device *netdev; + struct msix_entry *msix_entries; + u32 msix_count; + struct aux_rdma_caps rdma_caps; + struct sxe2_adapter *adapter; + u8 bond_mode; + u8 rdma_pf_bitmap; + void *ext_ops; + void *ext_info; +}; + +struct sxe2_aux_ops { + void (*event_handler)(struct aux_core_dev_info *cdev_info, + struct sxe2_rdma_event_info *event); + int (*vc_receive)(struct aux_core_dev_info *cdev_info, + u32 vf_id, u8 *msg, u16 len, u64 session_id); +}; + +struct sxe2_auxiliary_device { + struct auxiliary_device adev; + struct aux_core_dev_info *cdev_info; +}; + +struct sxe2_auxiliary_drv { + struct auxiliary_driver adrv; + struct sxe2_aux_ops aux_ops; +}; + +void sxe2_rdma_aux_adev_release(struct device *dev); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_type.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_type.h new file mode 100644 index 0000000000000000000000000000000000000000..3131b33f57845130e0f08b5ef0d1b1e55ed155ba --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_drv_type.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_drv_type.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_DRV_TYPEDEF_H__ +#define __SXE2_DRV_TYPEDEF_H__ + +#include "ps3_types.h" + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#endif + +typedef U8 u8; +typedef U16 u16; +typedef U32 u32; +typedef U64 u64; + +#ifndef SXE2_SUPPORT_IPXE +typedef S8 s8; +#endif + +typedef S16 s16; +typedef S32 s32; +typedef S64 s64; + +typedef U16 __le16; +typedef U32 __le32; +typedef U64 __le64; + +#ifndef true +#define true (1) +#endif + +#ifndef false +#define false (0) +#endif + +#ifndef bool +#define bool Ps3Bool_t +#endif + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_host_regs.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_host_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..165167d4f384dc89bb25b663de9f1b8a7664a0fe --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_host_regs.h @@ -0,0 +1,717 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_host_regs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_HOST_REGS_H__ +#define __SXE2_HOST_REGS_H__ + +#define SXE2_BITS_MASK(m, s) ((m ## UL) << (s)) + +#define SXE2_RXQ_CTXT(_i, _QRX) (0x0050000 + ((_i) * 4 + (_QRX) * 0x20)) +#define SXE2_RXQ_HEAD(_QRX) (0x0060000 + ((_QRX) * 4)) +#define SXE2_RXQ_TAIL(_QRX) (0x0070000 + ((_QRX) * 4)) +#define SXE2_RXQ_CTRL(_QRX) (0x006d000 + ((_QRX) * 4)) +#define SXE2_RXQ_WB(_QRX) (0x006B000 + ((_QRX) * 4)) + +#define SXE2_RXQ_CTRL_STATUS_ACTIVE 0x00000004 +#define SXE2_RXQ_CTRL_ENABLED 0x00000001 +#define SXE2_RXQ_CTRL_CDE_ENABLE BIT(3) + +#define SXE2_PCIEPROC_BASE 0x002d6000 +#define SXE2_PF_INT_BASE 0x00260000 +#define SXE2_PF_INT_ALLOC (SXE2_PF_INT_BASE + 0x0000) +#define SXE2_PF_INT_ALLOC_FIRST 0x7FF +#define SXE2_PF_INT_ALLOC_LAST_S 12 +#define SXE2_PF_INT_ALLOC_LAST \ + (0x7FF << SXE2_PF_INT_ALLOC_LAST_S) +#define SXE2_PF_INT_ALLOC_VALID BIT(31) + +#define SXE2_PF_INT_OICR (SXE2_PF_INT_BASE + 0x0040) +#define SXE2_PF_INT_OICR_PCIE_TIMEOUT BIT(0) +#define SXE2_PF_INT_OICR_UR BIT(1) +#define SXE2_PF_INT_OICR_CA BIT(2) +#define SXE2_PF_INT_OICR_VFLR BIT(3) +#define SXE2_PF_INT_OICR_VFR_DONE BIT(4) +#define SXE2_PF_INT_OICR_LAN_TX_ERR BIT(5) +#define SXE2_PF_INT_OICR_BFDE BIT(6) +#define SXE2_PF_INT_OICR_LAN_RX_ERR BIT(7) +#define SXE2_PF_INT_OICR_ECC_ERR BIT(8) +#define SXE2_PF_INT_OICR_GPIO BIT(9) +#define SXE2_PF_INT_OICR_TSYN_TX BIT(11) +#define SXE2_PF_INT_OICR_TSYN_EVENT BIT(12) +#define SXE2_PF_INT_OICR_TSYN_TGT BIT(13) +#define SXE2_PF_INT_OICR_EXHAUST BIT(14) +#define SXE2_PF_INT_OICR_FW BIT(15) +#define SXE2_PF_INT_OICR_SWINT BIT(16) +#define SXE2_PF_INT_OICR_LINKSEC_CHG BIT(17) +#define SXE2_PF_INT_OICR_INT_CFG_ADDR_ERR BIT(18) +#define SXE2_PF_INT_OICR_INT_CFG_DATA_ERR BIT(19) +#define SXE2_PF_INT_OICR_INT_CFG_ADR_UNRANGE BIT(20) +#define SXE2_PF_INT_OICR_INT_RAM_CONFLICT BIT(21) +#define SXE2_PF_INT_OICR_GRST BIT(22) +#define SXE2_PF_INT_OICR_FWQ_INT BIT(29) +#define SXE2_PF_INT_OICR_FWQ_TOOL_INT BIT(30) +#define SXE2_PF_INT_OICR_MBXQ_INT BIT(31) + +#define SXE2_PF_INT_OICR_ENABLE (SXE2_PF_INT_BASE + 0x0020) + +#define SXE2_PF_INT_FW_EVENT (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_FW_ABNORMAL BIT(0) +#define SXE2_PF_INT_RDMA_AEQ_OVERFLOW BIT(1) +#define SXE2_PF_INT_CGMAC_LINK_CHG BIT(18) +#define SXE2_PF_INT_VFLR_DONE BIT(2) + +#define SXE2_PF_INT_OICR_CTL (SXE2_PF_INT_BASE + 0x0060) +#define SXE2_PF_INT_OICR_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_OICR_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_OICR_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_OICR_CTL_ITR_IDX_S) +#define SXE2_PF_INT_OICR_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_FWQ_CTL (SXE2_PF_INT_BASE + 0x00C0) +#define SXE2_PF_INT_FWQ_CTL_MSIX_IDX 0x7FFF +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_FWQ_CTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_FWQ_CTL_ITR_IDX_S) +#define SXE2_PF_INT_FWQ_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_MBX_CTL (SXE2_PF_INT_BASE + 0x00A0) +#define SXE2_PF_INT_MBX_CTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_MBX_CTL_ITR_IDX_S 11 +#define SXE2_PF_INT_MBX_CTL_ITR_IDX (0x3 << SXE2_PF_INT_MBX_CTL_ITR_IDX_S) +#define SXE2_PF_INT_MBX_CTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_GPIO_ENA (SXE2_PF_INT_BASE + 0x0100) +#define SXE2_PF_INT_GPIO_X_ENA(x) BIT(x) + +#define SXE2_PFG_INT_CTL (SXE2_PF_INT_BASE + 0x0120) +#define SXE2_PFG_INT_CTL_ITR_GRAN 0x7 +#define SXE2_PFG_INT_CTL_ITR_GRAN_0 (2) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN BIT(4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_0 (4) +#define SXE2_PFG_INT_CTL_CREDIT_GRAN_1 (8) + +#define SXE2_VFG_RAM_INIT_DONE \ + (SXE2_PF_INT_BASE + 0x0128) +#define SXE2_VFG_RAM_INIT_DONE_0 BIT(0) +#define SXE2_VFG_RAM_INIT_DONE_1 BIT(1) +#define SXE2_VFG_RAM_INIT_DONE_2 BIT(2) + +#define SXE2_LINK_REG_GET_10G_VALUE 4 +#define SXE2_LINK_REG_GET_25G_VALUE 1 +#define SXE2_LINK_REG_GET_50G_VALUE 2 +#define SXE2_LINK_REG_GET_100G_VALUE 3 + +#define SXE2_PORT0_CNT 0 +#define SXE2_PORT1_CNT 1 +#define SXE2_PORT2_CNT 2 +#define SXE2_PORT3_CNT 3 + +#define SXE2_LINK_STATUS_BASE (0x002ac200) +#define SXE2_LINK_STATUS_PORT0_POS 3 +#define SXE2_LINK_STATUS_PORT1_POS 11 +#define SXE2_LINK_STATUS_PORT2_POS 19 +#define SXE2_LINK_STATUS_PORT3_POS 27 +#define SXE2_LINK_STATUS_MASK 1 + +#define SXE2_LINK_SPEED_BASE (0x002ac200) +#define SXE2_LINK_SPEED_PORT0_POS 0 +#define SXE2_LINK_SPEED_PORT1_POS 8 +#define SXE2_LINK_SPEED_PORT2_POS 16 +#define SXE2_LINK_SPEED_PORT3_POS 24 +#define SXE2_LINK_SPEED_MASK 7 + +#define SXE2_PFVP_INT_ALLOC(vf_idx) (SXE2_PF_INT_BASE + 0x012C + ((vf_idx) * 4)) +#define SXE2_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PFVP_INT_ALLOC_LAST_S 12 +#define SXE2_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCI_PFVP_INT_ALLOC(vf_idx) (SXE2_PCIEPROC_BASE + 0x5800 + ((vf_idx) * 4)) +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_S 0 + +#define SXE2_PCI_PFVP_INT_ALLOC_FIRST_M (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_FIRST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_S 12 + +#define SXE2_PCI_PFVP_INT_ALLOC_LAST_M \ + (0x7FF << SXE2_PCI_PFVP_INT_ALLOC_LAST_S) +#define SXE2_PCI_PFVP_INT_ALLOC_VALID BIT(31) + +#define SXE2_PCIEPROC_INT2FUNC(_INT) (SXE2_PCIEPROC_BASE + 0xe000 + ((_INT) * 4)) +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_S 0 +#define SXE2_PCIEPROC_INT2FUNC_VF_NUM_M (0xFF << SXE2_PCIEPROC_INT2FUNC_VF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_S 12 +#define SXE2_PCIEPROC_INT2FUNC_PF_NUM_M (0x7 << SXE2_PCIEPROC_INT2FUNC_PF_NUM_S) +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_S 16 +#define SXE2_PCIEPROC_INT2FUNC_IS_PF_M BIT(16) + +#define SXE2_VSI_PF(vf_idx) (SXE2_PF_INT_BASE + 0x14000 + ((vf_idx) * 4)) +#define SXE2_VSI_PF_ID_S 0 +#define SXE2_VSI_PF_ID_M (0x7 << SXE2_VSI_PF_ID_S) +#define SXE2_VSI_PF_EN_M BIT(3) + +#define SXE2_MBX_CTL(_VSI) (0x0026692C + ((_VSI) * 4)) +#define SXE2_MBX_CTL_MSIX_INDX_S 0 +#define SXE2_MBX_CTL_MSIX_INDX_M (0x7FF << SXE2_MBX_CTL_MSIX_INDX_S) +#define SXE2_MBX_CTL_CAUSE_ENA_M BIT(30) + +#define SXE2_PF_INT_TQCTL(q_idx) (SXE2_PF_INT_BASE + 0x092C + 4 * (q_idx)) +#define SXE2_PF_INT_TQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_TQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_TQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_TQCTL_ITR_IDX_S) +#define SXE2_PF_INT_TQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RQCTL(q_idx) (SXE2_PF_INT_BASE + 0x292C + 4 * (q_idx)) +#define SXE2_PF_INT_RQCTL_MSIX_IDX 0x7FF +#define SXE2_PF_INT_RQCTL_ITR_IDX_S 11 +#define SXE2_PF_INT_RQCTL_ITR_IDX \ + (0x3 << SXE2_PF_INT_RQCTL_ITR_IDX_S) +#define SXE2_PF_INT_RQCTL_CAUSE_ENABLE BIT(30) + +#define SXE2_PF_INT_RATE(irq_idx) (SXE2_PF_INT_BASE + 0x7530 + 4 * (irq_idx)) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL (0x3F) +#define SXE2_PF_INT_RATE_CREDIT_INTERVAL_MAX \ + (0x3F) +#define SXE2_PF_INT_RATE_INTRL_ENABLE (BIT(6)) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT (7) +#define SXE2_PF_INT_RATE_CREDIT_MAX_VALUE \ + (0x3F << SXE2_PF_INT_RATE_CREDIT_MAX_VALUE_SHIFT) + +#define SXE2_VF_INT_ITR(itr_idx, irq_idx) \ + (SXE2_PF_INT_BASE + 0xB530 + 0x2000 * (itr_idx) + 4 * (irq_idx)) +#define SXE2_VF_INT_ITR_INTERVAL 0xFFF + +#define SXE2_VF_DYN_CTL(irq_idx) (SXE2_PF_INT_BASE + 0x9530 + 4 * (irq_idx)) +#define SXE2_VF_DYN_CTL_INTENABLE BIT(0) +#define SXE2_VF_DYN_CTL_CLEARPBA BIT(1) +#define SXE2_VF_DYN_CTL_SWINT_TRIG BIT(2) +#define SXE2_VF_DYN_CTL_ITR_IDX_S \ + 3 +#define SXE2_VF_DYN_CTL_ITR_IDX_M 0x3 +#define SXE2_VF_DYN_CTL_INTERVAL_S 5 +#define SXE2_VF_DYN_CTL_INTERVAL_M 0xFFF +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_ENABLE BIT(24) +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_S 25 +#define SXE2_VF_DYN_CTL_SW_ITR_IDX_M 0x3 + +#define SXE2_VF_DYN_CTL_INTENABLE_MSK \ + BIT(31) + +#define SXE2_BAR4_MSIX_BASE 0 +#define SXE2_BAR4_MSIX_CTL(_idx) (SXE2_BAR4_MSIX_BASE + 0xC + ((_idx) * 0x10)) +#define SXE2_BAR4_MSIX_ENABLE 0 +#define SXE2_BAR4_MSIX_DISABLE 1 + +#define SXE2_TXQ_LEGACY_DBLL(_DBQM) (0x1000 + ((_DBQM) * 4)) + +#define SXE2_TXQ_CONTEXT0(_pf_idx) (0x10040 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT1(_pf_idx) (0x10044 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT2(_pf_idx) (0x10048 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT3(_pf_idx) (0x1004C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT4(_pf_idx) (0x10050 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7(_pf_idx) (0x1005C + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CONTEXT7_HEAD_S 0 +#define SXE2_TXQ_CONTEXT7_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_HEAD_S) +#define SXE2_TXQ_CONTEXT7_READ_HEAD_S 16 +#define SXE2_TXQ_CONTEXT7_READ_HEAD_M SXE2_BITS_MASK(0xFFF, SXE2_TXQ_CONTEXT7_READ_HEAD_S) + +#define SXE2_TXQ_CTRL(_pf_idx) (0x10064 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_CTXT_CTRL(_pf_idx) (0x100C8 + ((_pf_idx) * 0x100)) +#define SXE2_TXQ_DIS_CNT(_pf_idx) (0x100D0 + ((_pf_idx) * 0x100)) + +#define SXE2_TXQ_CTXT_CTRL_USED_MASK 0x00000800 +#define SXE2_TXQ_CTRL_SW_EN_M BIT(0) +#define SXE2_TXQ_CTRL_HW_EN_M BIT(1) + +#define SXE2_TXQ_CTXT2_PROT_IDX_S 0 +#define SXE2_TXQ_CTXT2_PROT_IDX_M SXE2_BITS_MASK(0x7, 0) +#define SXE2_TXQ_CTXT2_CGD_IDX_S 4 +#define SXE2_TXQ_CTXT2_CGD_IDX_M SXE2_BITS_MASK(0x1F, 4) +#define SXE2_TXQ_CTXT2_PF_IDX_S 9 +#define SXE2_TXQ_CTXT2_PF_IDX_M SXE2_BITS_MASK(0x7, 9) +#define SXE2_TXQ_CTXT2_VMVF_IDX_S 12 +#define SXE2_TXQ_CTXT2_VMVF_IDX_M SXE2_BITS_MASK(0x3FF, 12) +#define SXE2_TXQ_CTXT2_VMVF_TYPE_S 23 +#define SXE2_TXQ_CTXT2_VMVF_TYPE_M SXE2_BITS_MASK(0x3, 23) +#define SXE2_TXQ_CTXT2_TSYN_ENA_S 25 +#define SXE2_TXQ_CTXT2_TSYN_ENA_M BIT(25) +#define SXE2_TXQ_CTXT2_ALT_VLAN_S 26 +#define SXE2_TXQ_CTXT2_ALT_VLAN_M BIT(26) +#define SXE2_TXQ_CTXT2_WB_MODE_S 27 +#define SXE2_TXQ_CTXT2_WB_MODE_M BIT(27) +#define SXE2_TXQ_CTXT2_ITR_WB_S 28 +#define SXE2_TXQ_CTXT2_ITR_WB_M BIT(28) +#define SXE2_TXQ_CTXT2_LEGACY_EN_S 29 +#define SXE2_TXQ_CTXT2_LEGACY_EN_M BIT(29) +#define SXE2_TXQ_CTXT2_SSO_EN_S 30 +#define SXE2_TXQ_CTXT2_SSO_EN_M BIT(30) + +#define SXE2_TXQ_CTXT3_SRC_VSI_S 0 +#define SXE2_TXQ_CTXT3_SRC_VSI_M SXE2_BITS_MASK(0x3FF, 0) +#define SXE2_TXQ_CTXT3_CPU_ID_S 12 +#define SXE2_TXQ_CTXT3_CPU_ID_M SXE2_BITS_MASK(0xFF, 12) +#define SXE2_TXQ_CTXT3_TPH_RDDESC_S 20 +#define SXE2_TXQ_CTXT3_TPH_RDDESC_M BIT(20) +#define SXE2_TXQ_CTXT3_TPH_RDDATA_S 21 +#define SXE2_TXQ_CTXT3_TPH_RDDATA_M BIT(21) +#define SXE2_TXQ_CTXT3_TPH_WRDESC_S 22 +#define SXE2_TXQ_CTXT3_TPH_WRDESC_M BIT(22) + +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_S 0 +#define SXE2_TXQ_CTXT3_QID_IN_FUNC_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_TXQ_CTXT3_RDDESC_RO_S 13 +#define SXE2_TXQ_CTXT3_RDDESC_RO_M BIT(13) +#define SXE2_TXQ_CTXT3_WRDESC_RO_S 14 +#define SXE2_TXQ_CTXT3_WRDESC_RO_M BIT(14) +#define SXE2_TXQ_CTXT3_RDDATA_RO_S 15 +#define SXE2_TXQ_CTXT3_RDDATA_RO_M BIT(15) +#define SXE2_TXQ_CTXT3_QLEN_S 16 +#define SXE2_TXQ_CTXT3_QLEN_M SXE2_BITS_MASK(0x1FFF, 16) + +#define SXE2_RX_BUF_CHAINED_MAX 10 +#define SXE2_RX_DESC_BASE_ADDR_UNIT 7 +#define SXE2_RX_HBUF_LEN_UNIT 6 +#define SXE2_RX_DBUF_LEN_UNIT 7 +#define SXE2_RX_DBUF_LEN_MASK (~0x7F) +#define SXE2_RX_HWTAIL_VALUE_MASK (~0x7) + +enum { + SXE2_RX_CTXT0 = 0, + SXE2_RX_CTXT1, + SXE2_RX_CTXT2, + SXE2_RX_CTXT3, + SXE2_RX_CTXT4, + SXE2_RX_CTXT_CNT, +}; + +#define SXE2_RX_CTXT_BASE_L_S 0 +#define SXE2_RX_CTXT_BASE_L_W 32 + +#define SXE2_RX_CTXT_BASE_H_S 0 +#define SXE2_RX_CTXT_BASE_H_W 25 +#define SXE2_RX_CTXT_DEPTH_L_S 25 +#define SXE2_RX_CTXT_DEPTH_L_W 7 + +#define SXE2_RX_CTXT_DEPTH_H_S 0 +#define SXE2_RX_CTXT_DEPTH_H_W 6 + +#define SXE2_RX_CTXT_DBUFF_S 6 +#define SXE2_RX_CTXT_DBUFF_W 7 + +#define SXE2_RX_CTXT_HBUFF_S 13 +#define SXE2_RX_CTXT_HBUFF_W 5 + +#define SXE2_RX_CTXT_HSPLT_TYPE_S 18 +#define SXE2_RX_CTXT_HSPLT_TYPE_W 2 + +#define SXE2_RX_CTXT_DESC_TYPE_S 20 +#define SXE2_RX_CTXT_DESC_TYPE_W 1 + +#define SXE2_RX_CTXT_CRC_S 21 +#define SXE2_RX_CTXT_CRC_W 1 + +#define SXE2_RX_CTXT_L2TAG_FLAG_S 23 +#define SXE2_RX_CTXT_L2TAG_FLAG_W 1 + +#define SXE2_RX_CTXT_HSPLT_0_S 24 +#define SXE2_RX_CTXT_HSPLT_0_W 4 + +#define SXE2_RX_CTXT_HSPLT_1_S 28 +#define SXE2_RX_CTXT_HSPLT_1_W 2 + +#define SXE2_RX_CTXT_INVALN_STP_S 31 +#define SXE2_RX_CTXT_INVALN_STP_W 1 + +#define SXE2_RX_CTXT_LRO_ENABLE_S 0 +#define SXE2_RX_CTXT_LRO_ENABLE_W 1 + +#define SXE2_RX_CTXT_CPUID_S 3 +#define SXE2_RX_CTXT_CPUID_W 8 + +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_S 11 +#define SXE2_RX_CTXT_MAX_FRAME_SIZE_W 14 + +#define SXE2_RX_CTXT_LRO_DESC_MAX_S 25 +#define SXE2_RX_CTXT_LRO_DESC_MAX_W 4 + +#define SXE2_RX_CTXT_RELAX_DATA_S 29 +#define SXE2_RX_CTXT_RELAX_DATA_W 1 + +#define SXE2_RX_CTXT_RELAX_WB_S 30 +#define SXE2_RX_CTXT_RELAX_WB_W 1 + +#define SXE2_RX_CTXT_RELAX_RD_S 31 +#define SXE2_RX_CTXT_RELAX_RD_W 1 + +#define SXE2_RX_CTXT_THPRDESC_ENABLE_S 1 +#define SXE2_RX_CTXT_THPRDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPWDESC_ENABLE_S 2 +#define SXE2_RX_CTXT_THPWDESC_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPRDATA_ENABLE_S 3 +#define SXE2_RX_CTXT_THPRDATA_ENABLE_W 1 + +#define SXE2_RX_CTXT_THPHEAD_ENABLE_S 4 +#define SXE2_RX_CTXT_THPHEAD_ENABLE_W 1 + +#define SXE2_RX_CTXT_LOW_DESC_LINE_S 6 +#define SXE2_RX_CTXT_LOW_DESC_LINE_W 3 + +#define SXE2_RX_CTXT_VF_ID_S 9 +#define SXE2_RX_CTXT_VF_ID_W 8 + +#define SXE2_RX_CTXT_PF_ID_S 17 +#define SXE2_RX_CTXT_PF_ID_W 3 + +#define SXE2_RX_CTXT_VF_ENABLE_S 20 +#define SXE2_RX_CTXT_VF_ENABLE_W 1 + +#define SXE2_RX_CTXT_VSI_ID_S 21 +#define SXE2_RX_CTXT_VSI_ID_W 10 + +#define SXE2_PF_CTRLQ_FW_BASE 0x00312000 +#define SXE2_PF_CTRLQ_FW_ATQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0000) +#define SXE2_PF_CTRLQ_FW_ARQBAL (SXE2_PF_CTRLQ_FW_BASE + 0x0080) +#define SXE2_PF_CTRLQ_FW_ATQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0100) +#define SXE2_PF_CTRLQ_FW_ARQBAH (SXE2_PF_CTRLQ_FW_BASE + 0x0180) +#define SXE2_PF_CTRLQ_FW_ATQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0200) +#define SXE2_PF_CTRLQ_FW_ARQLEN (SXE2_PF_CTRLQ_FW_BASE + 0x0280) +#define SXE2_PF_CTRLQ_FW_ATQH (SXE2_PF_CTRLQ_FW_BASE + 0x0300) +#define SXE2_PF_CTRLQ_FW_ARQH (SXE2_PF_CTRLQ_FW_BASE + 0x0380) +#define SXE2_PF_CTRLQ_FW_ATQT (SXE2_PF_CTRLQ_FW_BASE + 0x0400) +#define SXE2_PF_CTRLQ_FW_ARQT (SXE2_PF_CTRLQ_FW_BASE + 0x0480) + +#define SXE2_PF_CTRLQ_MBX_BASE 0x00316000 +#define SXE2_PF_CTRLQ_MBX_ATQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE100) +#define SXE2_PF_CTRLQ_MBX_ATQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE180) +#define SXE2_PF_CTRLQ_MBX_ATQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE200) +#define SXE2_PF_CTRLQ_MBX_ATQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE280) +#define SXE2_PF_CTRLQ_MBX_ATQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE300) +#define SXE2_PF_CTRLQ_MBX_ARQBAL (SXE2_PF_CTRLQ_MBX_BASE + 0xE380) +#define SXE2_PF_CTRLQ_MBX_ARQBAH (SXE2_PF_CTRLQ_MBX_BASE + 0xE400) +#define SXE2_PF_CTRLQ_MBX_ARQLEN (SXE2_PF_CTRLQ_MBX_BASE + 0xE480) +#define SXE2_PF_CTRLQ_MBX_ARQH (SXE2_PF_CTRLQ_MBX_BASE + 0xE500) +#define SXE2_PF_CTRLQ_MBX_ARQT (SXE2_PF_CTRLQ_MBX_BASE + 0xE580) + +#define SXE2_CMD_REG_LEN_M 0x3FF +#define SXE2_CMD_REG_LEN_VFE_M BIT(28) +#define SXE2_CMD_REG_LEN_OVFL_M BIT(29) +#define SXE2_CMD_REG_LEN_CRIT_M BIT(30) +#define SXE2_CMD_REG_LEN_ENABLE_M BIT(31) + +#define SXE2_CMD_REG_HEAD_M 0x3FF + +#define SXE2_PF_CTRLQ_FW_HW_STS (SXE2_PF_CTRLQ_FW_BASE + 0x0500) +#define SXE2_PF_CTRLQ_FW_ATQ_IDLE_MASK BIT(0) +#define SXE2_PF_CTRLQ_FW_ARQ_IDLE_MASK BIT(1) + +#define SXE2_TOP_CFG_BASE 0x00292000 +#define SXE2_HW_VER (SXE2_TOP_CFG_BASE + 0x48c) +#define SXE2_HW_FPGA_VER_M SXE2_BITS_MASK(0xFFF, 0) + +#define SXE2_FW_VER (SXE2_TOP_CFG_BASE + 0x214) +#define SXE2_FW_VER_BUILD_M SXE2_BITS_MASK(0xFF, 0) +#define SXE2_FW_VER_FIX_M SXE2_BITS_MASK(0xFF, 8) +#define SXE2_FW_VER_SUB_M SXE2_BITS_MASK(0xFF, 16) +#define SXE2_FW_VER_MAIN_M SXE2_BITS_MASK(0xFF, 24) +#define SXE2_FW_VER_FIX_SHIFT (8) +#define SXE2_FW_VER_SUB_SHIFT (16) +#define SXE2_FW_VER_MAIN_SHIFT (24) + +#define SXE2_FW_COMP_VER_ADDR (SXE2_TOP_CFG_BASE + 0x20c) + +#define SXE2_STATUS SXE2_FW_VER + +#define SXE2_FW_STATE (SXE2_TOP_CFG_BASE + 0x210) + +#define SXE2_FW_HEARTBEAT (SXE2_TOP_CFG_BASE + 0x218) + +#define SXE2_FW_MISC (SXE2_TOP_CFG_BASE + 0x21c) +#define SXE2_FW_MISC_MODE_M SXE2_BITS_MASK(0xF, 0) +#define SXE2_FW_MISC_POP_M SXE2_BITS_MASK(0x80000000, 0) + +#define SXE2_TX_OE_BASE 0x00030000 +#define SXE2_RX_OE_BASE 0x00050000 + +#define SXE2_PFP_L2TAGSEN(_i) (SXE2_TX_OE_BASE + 0x00300 + ((_i) * 4)) +#define SXE2_VSI_L2TAGSTXVALID(_i) \ + (SXE2_TX_OE_BASE + 0x01000 + ((_i) * 4)) +#define SXE2_VSI_TIR0(_i) (SXE2_TX_OE_BASE + 0x01C00 + ((_i) * 4)) +#define SXE2_VSI_TIR1(_i) (SXE2_TX_OE_BASE + 0x02800 + ((_i) * 4)) +#define SXE2_VSI_TAR(_i) (SXE2_TX_OE_BASE + 0x04C00 + ((_i) * 4)) +#define SXE2_VSI_TSR(_i) (SXE2_RX_OE_BASE + 0x18000 + ((_i) * 4)) + +#define SXE2_STATS_TX_LAN_CONFIG(_i) (SXE2_TX_OE_BASE + 0x08300 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_PKT_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08340 + ((_i) * 4)) +#define SXE2_STATS_TX_LAN_BYTE_CNT_GET(_i) (SXE2_TX_OE_BASE + 0x08380 + ((_i) * 4)) + +#define SXE2_STATS_RX_CONFIG(_i) (SXE2_RX_OE_BASE + 0x230B0 + ((_i) * 4)) +#define SXE2_STATS_RX_LAN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230C0 + ((_i) * 8)) +#define SXE2_STATS_RX_LAN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23120 + ((_i) * 8)) +#define SXE2_STATS_RX_FD_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x230E0 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23100 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_IN_BYTE_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23140 + ((_i) * 8)) +#define SXE2_STATS_RX_MNG_OUT_PKT_CNT_GET(_i) (SXE2_RX_OE_BASE + 0x23160 + ((_i) * 8)) + +#define SXE2_L2TAG_ID_STAG 0 +#define SXE2_L2TAG_ID_OUT_VLAN1 1 +#define SXE2_L2TAG_ID_OUT_VLAN2 2 +#define SXE2_L2TAG_ID_VLAN 3 + +#define SXE2_PFP_L2TAGSEN_ALL_TAG 0xFF +#define SXE2_PFP_L2TAGSEN_DVM BIT(10) + +#define SXE2_VSI_TSR_STRIP_TAG_S 0 +#define SXE2_VSI_TSR_SHOW_TAG_S 4 + +#define SXE2_VSI_TSR_ID_STAG BIT(0) +#define SXE2_VSI_TSR_ID_OUT_VLAN1 BIT(1) +#define SXE2_VSI_TSR_ID_OUT_VLAN2 BIT(2) +#define SXE2_VSI_TSR_ID_VLAN BIT(3) + +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_S 0 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG1_VALID BIT(3) +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_S 4 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_ID_M 0x7 +#define SXE2_VSI_L2TAGSTXVALID_L2TAG2_VALID BIT(7) +#define SXE2_VSI_L2TAGSTXVALID_TIR0_ID_S 16 +#define SXE2_VSI_L2TAGSTXVALID_TIR0_VALID BIT(19) +#define SXE2_VSI_L2TAGSTXVALID_TIR1_ID_S 20 +#define SXE2_VSI_L2TAGSTXVALID_TIR1_VALID BIT(23) + +#define SXE2_VSI_L2TAGSTXVALID_ID_STAG 0 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN1 2 +#define SXE2_VSI_L2TAGSTXVALID_ID_OUT_VLAN2 3 +#define SXE2_VSI_L2TAGSTXVALID_ID_VLAN 4 + +#define SXE2_SWITCH_OG_BASE 0x00140000 +#define SXE2_SWITCH_SWE_BASE 0x00150000 +#define SXE2_SWITCH_RG_BASE 0x00160000 + +#define SXE2_VSI_RX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01074 + ((_i) * 4)) +#define SXE2_VSI_TX_SWITCH_CTRL(_i) (SXE2_SWITCH_RG_BASE + 0x01C74 + ((_i) * 4)) + +#define SXE2_VSI_RX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TX_SW_CTRL_LOOPBACK_EN BIT(1) +#define SXE2_VSI_TX_SW_CTRL_LAN_EN BIT(2) +#define SXE2_VSI_TX_SW_CTRL_MACAS_EN BIT(3) +#define SXE2_VSI_TX_SW_CTRL_VLAN_PRUNE BIT(9) + +#define SXE2_VSI_TAR_UNTAGGED_SHIFT (16) + +#define SXE2_PCIE_SYS_READY 0x38c +#define SXE2_PCIE_SYS_READY_CORER_ASSERT BIT(0) +#define SXE2_PCIE_SYS_READY_STOP_DROP_DONE BIT(2) +#define SXE2_PCIE_SYS_READY_R5 BIT(3) +#define SXE2_PCIE_SYS_READY_STOP_DROP BIT(16) + +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS 0x78 +#define SXE2_PCIE_DEV_CTRL_DEV_STATUS_TRANS_PENDING BIT(21) + +#define SXE2_TOP_CFG_CORE (SXE2_TOP_CFG_BASE + 0x0630) +#define SXE2_TOP_CFG_CORE_RST_CODE 0x09FBD586 + +#define SXE2_PFGEN_CTRL (0x00336000) +#define SXE2_PFGEN_CTRL_PFSWR BIT(0) + +#define SXE2_VFGEN_CTRL(_vf) (0x00337000 + ((_vf) * 4)) +#define SXE2_VFGEN_CTRL_VFSWR BIT(0) + +#define SXE2_VF_VRC_VFGEN_RSTAT(_vf) (0x00338000 + (_vf) * 4) +#define SXE2_VF_VRC_VFGEN_VFRSTAT (0x3) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VFR (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_COMPLETE (BIT(0)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_VF_ACTIVE (BIT(1)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_MASK \ + (BIT(2)) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF \ + (0x300) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_NO_VFR \ + (0) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_VFR (1) +#define SXE2_VF_VRC_VFGEN_VFRSTAT_FORVF_MASK \ + (BIT(10)) + +#define SXE2_GLGEN_VFLRSTAT(_reg) (0x0033A000 + ((_reg) * 4)) + +#define SXE2_ACCEPT_RULE_TAGGED_S 0 +#define SXE2_ACCEPT_RULE_UNTAGGED_S 16 + +#define SXE2_VF_RXQ_BASE(_VF) (0x000b0800 + ((_VF) * 4)) +#define SXE2_VF_RXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_RXQ_BASE_FIRST_Q_M (0x7FF << SXE2_VF_RXQ_BASE_FIRST_Q_S) +#define SXE2_VF_RXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_RXQ_BASE_Q_NUM_M (0x7FF << SXE2_VF_RXQ_BASE_Q_NUM_S) + +#define SXE2_VF_RXQ_MAPENA(_VF) (0x000b0400 + ((_VF) * 4)) +#define SXE2_VF_RXQ_MAPENA_M BIT(0) + +#define SXE2_VF_TXQ_BASE(_VF) (0x00040400 + ((_VF) * 4)) +#define SXE2_VF_TXQ_BASE_FIRST_Q_S 0 +#define SXE2_VF_TXQ_BASE_FIRST_Q_M (0x3FFF << SXE2_VF_TXQ_BASE_FIRST_Q_S) +#define SXE2_VF_TXQ_BASE_Q_NUM_S 16 +#define SXE2_VF_TXQ_BASE_Q_NUM_M (0xFF << SXE2_VF_TXQ_BASE_Q_NUM_S) + +#define SXE2_VF_TXQ_MAPENA(_VF) (0x00045000 + ((_VF) * 4)) +#define SXE2_VF_TXQ_MAPENA_M BIT(0) + +#define PRI_PTP_BASEADDR 0x2a8000 + +#define GLTSYN (PRI_PTP_BASEADDR + 0x0) +#define GLTSYN_ENA_M BIT(0) + +#define GLTSYN_CMD (PRI_PTP_BASEADDR + 0x4) +#define GLTSYN_CMD_INIT_TIME 0x01 +#define GLTSYN_CMD_INIT_INCVAL 0x02 +#define GLTSYN_CMD_ADJ_TIME 0x04 +#define GLTSYN_CMD_ADJ_TIME_AT_TIME 0x0C +#define GLTSYN_CMD_LATCHING_SHTIME 0x80 + +#define GLTSYN_SYNC (PRI_PTP_BASEADDR + 0x8) +#define GLTSYN_SYNC_PLUS_1NS 0x1 +#define GLTSYN_SYNC_MINUS_1NS 0x2 +#define GLTSYN_SYNC_EXEC 0x3 +#define GLTSYN_SYNC_GEN_PULSE 0x4 + +#define GLTSYN_SEM (PRI_PTP_BASEADDR + 0xC) +#define GLTSYN_SEM_BUSY_M BIT(0) + +#define GLTSYN_STAT (PRI_PTP_BASEADDR + 0x10) +#define GLTSYN_STAT_EVENT0_M BIT(0) +#define GLTSYN_STAT_EVENT1_M BIT(1) +#define GLTSYN_STAT_EVENT2_M BIT(2) + +#define GLTSYN_TIME_SUBNS (PRI_PTP_BASEADDR + 0x20) +#define GLTSYN_TIME_NS (PRI_PTP_BASEADDR + 0x24) +#define GLTSYN_TIME_S_H (PRI_PTP_BASEADDR + 0x28) +#define GLTSYN_TIME_S_L (PRI_PTP_BASEADDR + 0x2C) + +#define GLTSYN_SHTIME_SUBNS (PRI_PTP_BASEADDR + 0x30) +#define GLTSYN_SHTIME_NS (PRI_PTP_BASEADDR + 0x34) +#define GLTSYN_SHTIME_S_H (PRI_PTP_BASEADDR + 0x38) +#define GLTSYN_SHTIME_S_L (PRI_PTP_BASEADDR + 0x3C) + +#define GLTSYN_SHADJ_SUBNS (PRI_PTP_BASEADDR + 0x40) +#define GLTSYN_SHADJ_NS (PRI_PTP_BASEADDR + 0x44) + +#define GLTSYN_INCVAL_NS (PRI_PTP_BASEADDR + 0x50) +#define GLTSYN_INCVAL_SUBNS (PRI_PTP_BASEADDR + 0x54) + +#define GLTSYN_TGT_NS(_i) \ + (PRI_PTP_BASEADDR + 0x60 + ((_i) * 16)) +#define GLTSYN_TGT_S_H(_i) (PRI_PTP_BASEADDR + 0x64 + ((_i) * 16)) +#define GLTSYN_TGT_S_L(_i) (PRI_PTP_BASEADDR + 0x68 + ((_i) * 16)) + +#define GLTSYN_EVENT_NS(_i) \ + (PRI_PTP_BASEADDR + 0xA0 + ((_i) * 16)) + +#define GLTSYN_EVENT_S_H(_i) (PRI_PTP_BASEADDR + 0xA4 + ((_i) * 16)) +#define GLTSYN_EVENT_S_H_MASK (0xFFFF) + +#define GLTSYN_EVENT_S_L(_i) (PRI_PTP_BASEADDR + 0xA8 + ((_i) * 16)) + +#define GLTSYN_AUXOUT(_i) \ + (PRI_PTP_BASEADDR + 0xD0 + ((_i) * 4)) +#define GLTSYN_AUXOUT_OUT_ENA BIT(0) +#define GLTSYN_AUXOUT_OUT_MOD (0x03 << 1) +#define GLTSYN_AUXOUT_OUTLVL BIT(3) +#define GLTSYN_AUXOUT_INT_ENA BIT(4) +#define GLTSYN_AUXOUT_PULSEW (0x1fff << 3) + +#define GLTSYN_CLKO(_i) \ + (PRI_PTP_BASEADDR + 0xE0 + ((_i) * 4)) + +#define GLTSYN_AUXIN(_i) (PRI_PTP_BASEADDR + 0xF4 + ((_i) * 4)) +#define GLTSYN_AUXIN_RISING_EDGE BIT(0) +#define GLTSYN_AUXIN_FALLING_EDGE BIT(1) +#define GLTSYN_AUXIN_ENABLE BIT(4) + +#define CGMAC_CSR_BASE 0x2B4000 + +#define CGMAC_PORT_OFFSET 0x00004000 + +#define PFP_CGM_TX_TSMEM(_port, _i) \ + (CGMAC_CSR_BASE + 0x100 + \ + + CGMAC_PORT_OFFSET * (_port) + ((_i) * 4)) + +#define PFP_CGM_TX_TXHI(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x108 + ((_i) * 8)) +#define PFP_CGM_TX_TXLO(_port, _i) (CGMAC_CSR_BASE + CGMAC_PORT_OFFSET * (_port) + 0x10C + ((_i) * 8)) + +#define CGMAC_CSR_MAC0_OFFSET 0x2B4000 +#define CGMAC_CSR_MAC_OFFSET(_i) (CGMAC_CSR_MAC0_OFFSET + ((_i) * 0x4000)) + +#define PFP_CGM_MAC_TX_TSMEM(_phy, _i) \ + (CGMAC_CSR_MAC_OFFSET(_phy) + 0x100 + \ + ((_i) * 4)) + +#define PFP_CGM_MAC_TX_TXHI(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x108 + ((_i) * 8)) +#define PFP_CGM_MAC_TX_TXLO(_phy, _i) (CGMAC_CSR_MAC_OFFSET(_phy) + 0x10C + ((_i) * 8)) + +#define SXE2_VF_GLINT_CEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_S 11 +#define SXE2_VF_GLINT_CEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_GLINT_CEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_GLINT_CEQCTL(_INT) (0x0026492C + ((_INT) * 4)) + +#define SXE2_VF_PFINT_AEQCTL_MSIX_INDX_M SXE2_BITS_MASK(0x7FF, 0) +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_S 11 +#define SXE2_VF_VPINT_AEQCTL_ITR_INDX_M SXE2_BITS_MASK(0x3, 11) +#define SXE2_VF_VPINT_AEQCTL_CAUSE_ENA_M BIT(30) +#define SXE2_VF_VPINT_AEQCTL(_VF) (0x0026052c + ((_VF) * 4)) + +#define SXE2_IPSEC_TX_BASE (0x2A0000) +#define SXE2_IPSEC_RX_BASE (0x2A2000) + +#define SXE2_IPSEC_RX_IPSIDX_ADDR (SXE2_IPSEC_RX_BASE + 0x0084) +#define SXE2_IPSEC_RX_IPSIDX_RST (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_VBI_SHIFT (18) +#define SXE2_IPSEC_RX_IPSIDX_VBI_MASK (0x00040000) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_SHIFT (17) +#define SXE2_IPSEC_RX_IPSIDX_SWRITE_MASK (0x00020000) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_SHIFT (4) +#define SXE2_IPSEC_RX_IPSIDX_SA_IDX_MASK (0x0000fff0) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_SHIFT (2) +#define SXE2_IPSEC_RX_IPSIDX_TABLE_MASK (0x0000000c) + +#define SXE2_IPSEC_RX_IPSIPID_ADDR (SXE2_IPSEC_RX_BASE + 0x0088) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSIPID_IP_ID_X_MASK (0x000000ff) + +#define SXE2_IPSEC_RX_IPSSPI0_ADDR (SXE2_IPSEC_RX_BASE + 0x008c) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_SHIFT (0) +#define SXE2_IPSEC_RX_IPSSPI0_SPI_X_MASK (0xffffffff) + +#define SXE2_IPSEC_RX_IPSSPI1_ADDR (SXE2_IPSEC_RX_BASE + 0x0090) +#define SXE2_IPSEC_RX_IPSSPI1_SPI_Y_MASK (0xffffffff) + +#define SXE2_PAUSE_STATS_BASE(port) (0x002b2000 + (port) * 0x4000) +#define SXE2_TXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0894) +#define SXE2_TXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0a18) +#define SXE2_TXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a20 + 8 * (pri))) +#define SXE2_TXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0a60 + 8 * (pri))) +#define SXE2_TXPFCXONTOXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0aa0 + 8 * (pri))) +#define SXE2_RXPAUSEXONFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0988) +#define SXE2_RXPAUSEXOFFFRAMES_LO(port) (SXE2_PAUSE_STATS_BASE(port) + 0x0b28) +#define SXE2_RXPFCXONFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b30 + 8 * (pri))) +#define SXE2_RXPFCXOFFFRAMES_LO(port, pri) (SXE2_PAUSE_STATS_BASE(port) + \ + (0x0b70 + 8 * (pri))) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_internal_ver.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_internal_ver.h new file mode 100644 index 0000000000000000000000000000000000000000..b786433c5fb477b368408c4bac3e6281588cea77 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_internal_ver.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_internal_ver.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_INTERNAL_VER_H__ +#define __SXE2_INTERNAL_VER_H__ + +#define SXE2_VER_MAJOR_OFFSET (16) +#define SXE2_MK_VER(major, minor) \ + ((major) << SXE2_VER_MAJOR_OFFSET | (minor)) +#define SXE2_MK_VER_MAJOR(ver) (((ver) >> SXE2_VER_MAJOR_OFFSET) & 0xff) +#define SXE2_MK_VER_MINOR(ver) ((ver) & 0xff) + +#define SXE2_ITR_VER_MAJOR_V100 1 +#define SXE2_ITR_VER_MAJOR_V200 2 + +#define SXE2_ITR_VER_MAJOR 1 +#define SXE2_ITR_VER_MINOR 1 +#define SXE2_ITR_VER SXE2_MK_VER(SXE2_ITR_VER_MAJOR, SXE2_ITR_VER_MINOR) + +#define SXE2_CTRL_VER_IS_V100(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V100) +#define SXE2_CTRL_VER_IS_V200(ver) (SXE2_MK_VER_MAJOR(ver) == SXE2_ITR_VER_MAJOR_V200) + +#define SXE2LIB_ITR_VER_MAJOR 1 +#define SXE2LIB_ITR_VER_MINOR 1 +#define SXE2LIB_ITR_VER SXE2_MK_VER(SXE2LIB_ITR_VER_MAJOR, SXE2LIB_ITR_VER_MINOR) + +#define SXE2_DRV_CLI_VER_MAJOR 1 +#define SXE2_DRV_CLI_VER_MINOR 1 +#define SXE2_DRV_CLI_VER \ + SXE2_MK_VER(SXE2_DRV_CLI_VER_MAJOR, SXE2_DRV_CLI_VER_MINOR) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ioctl.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..defd0b7ab3250490d5591a88920bbf6da70eaccb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_ioctl.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_ioctl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_IOCTL_H__ +#define __SXE2_IOCTL_H__ + +#include "sxe2_internal_ver.h" + +struct sxe2_ioctl_sync_cmd { + u32 ver; + u32 resv; + u64 trace_id; + u32 timeout; + u8 resv1[4]; + void *in_data; + u32 in_len; + u8 resv2[4]; + void *out_data; + u32 out_len; + u8 resv3[4]; +}; + +#define SXE2_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct sxe2_ioctl_sync_cmd) +#define SXE2_CMD_IOCTL_SYNC_DRV_CMD _IOWR('M', 2, struct sxe2_ioctl_sync_cmd) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_misc.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_misc.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9124fc7a74fe99f5752733fb99da84edadeeaa --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_misc.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_MISC_H__ +#define __SXE2_MISC_H__ + +#define REG_FW_MISC_MASK_MODE (0xF) +#define REG_FW_MISC_MASK_RSV0 (0xF0) +#define REG_FW_MISC_MASK_RSV1 (0xF00) +#define REG_FW_MISC_MASK_RSV2 (0xF000) +#define REG_FW_MISC_MASK_POP (0x80000000) + +#define REG_FW_MISC_MASK_MODE_OFST (0) +#define REG_FW_MISC_MASK_RSV0_OFST (4) +#define REG_FW_MISC_MASK_RSV1_OFST (8) +#define REG_FW_MISC_MASK_RSV2_OFST (12) +#define REG_FW_MISC_MASK_POP_OFST (31) + +enum sxe2_nic_mode { + SXE2_NIC_MODE_NORMAL = 0, + SXE2_NIC_MODE_NCD = 1, + + SXE2_NIC_MODE_MAX = 0xF, +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_msg.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb5055f51621a5ea2e75cb876dfd31bc5ee2462 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_msg.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2_MSG_H__ +#define __SXE2_MSG_H__ + +#if defined(SXE2_FW) || defined(SXE2_SUPPORT_UEFI) + +#include "sxe2_drv_type.h" +#endif + +#ifdef PS3_CLI_SXE2 +#include "sxe2_drv_type.h" +#endif + +#ifdef SXE2_LINUX_DRIVER +#include +#endif + +enum sfp_type_identifier { + SXE2_SFP_TYPE_UNKNOW = 0x00, + SXE2_SFP_TYPE_SFP = 0x03, + + SXE2_SFP_TYPE_QSFP_PLUS = 0x0D, + SXE2_SFP_TYPE_QSFP28 = 0x11, + + SXE2_SFP_TYPE_MAX = 0xFF, +}; + +#ifndef SFP_DEFINE +#define SFP_DEFINE + +#define SXE2_SFP_EEP_WR 0x1 +#define SXE2_SFP_EEP_QSFP 0x1 + +enum sfp_bus_addr { + SXE2_SFP_EEP_I2C_ADDR0 = 0xA0, + SXE2_SFP_EEP_I2C_ADDR1 = 0xA2, + + SXE2_SFP_EEP_I2C_ADDR_NR = 0xFFFF, +}; + +struct sxe2_sfp_req { + u8 is_wr; + u8 is_qsfp; + __le16 bus_addr; + __le16 page_cnt; + __le16 offset; + __le16 data_len; + __le16 rvd; + u8 data[]; +}; + +struct sxe2_sfp_resp { + u8 is_wr; + u8 is_qsfp; + __le16 data_len; + u8 data[]; +}; + +enum sfp_page_cnt { + SXE2_SFP_EEP_PAGE_CNT0 = 0, + SXE2_SFP_EEP_PAGE_CNT1, + SXE2_SFP_EEP_PAGE_CNT2, + SXE2_SFP_EEP_PAGE_CNT3, + + SXE2_SFP_EEP_PAGE_CNT20 = 20, + SXE2_SFP_EEP_PAGE_CNT21 = 21, + + SXE2_SFP_EEP_PAGE_CNT_NR = 0xFFFF, +}; + +#define SXE2_SFP_E2P_I2C_7BIT_ADDR0 (SXE2_SFP_EEP_I2C_ADDR0 >> 1) +#define SXE2_SFP_E2P_I2C_7BIT_ADDR1 (SXE2_SFP_EEP_I2C_ADDR1 >> 1) + +#define SXE2_QSFP_PAGE_OFST_START 128 +#define SXE2_SFP_EEP_OFST_MAX 255 +#define SXE2_SFP_EEP_LEN_MAX 256 +#endif + +#ifndef FW_STATE_DEFINE +#define FW_STATE_DEFINE + +#define SXE2_FW_STATUS_MAIN_SHIF (16) +#define SXE2_FW_STATUS_MAIN_MASK (0xFF0000) +#define SXE2_FW_STATUS_SUB_MASK (0xFFFF) +enum Sxe2FwStateMain { + SXE2_FW_STATE_MAIN_UNDEFINED = 0x00, + SXE2_FW_STATE_MAIN_INIT = 0x10000, + SXE2_FW_STATE_MAIN_RUN = 0x20000, + SXE2_FW_STATE_MAIN_ABNOMAL = 0x30000, +}; + +enum Sxe2FwState { + SXE2_FW_START_STATE_UNDEFINED = SXE2_FW_STATE_MAIN_UNDEFINED, + SXE2_FW_START_STATE_INIT_BASE = (SXE2_FW_STATE_MAIN_INIT + 0x1), + SXE2_FW_START_STATE_SCAN_DEVICE = (SXE2_FW_STATE_MAIN_INIT + 0x20), + SXE2_FW_START_STATE_FINISHED = (SXE2_FW_STATE_MAIN_RUN + 0x0), + SXE2_FW_START_STATE_UPGRADE = (SXE2_FW_STATE_MAIN_RUN + 0x1), + SXE2_FW_START_STATE_SYNC = (SXE2_FW_STATE_MAIN_RUN + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x1), + SXE2_FW_RUNNING_STATE_ABNOMAL_CORE1 = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x2), + SXE2_FW_RUNNING_STATE_ABNOMAL_HEART = (SXE2_FW_STATE_MAIN_ABNOMAL + 0x3), + + SXE2_FW_START_STATE_MASK = (SXE2_FW_STATUS_MAIN_MASK | SXE2_FW_STATUS_SUB_MASK), +}; +#endif + +#ifndef LED_DEFINE +#define LED_DEFINE +enum sxe2_led_mode { + SXE2_IDENTIFY_LED_BLINK_ON = 0, + SXE2_IDENTIFY_LED_BLINK_OFF, + SXE2_IDENTIFY_LED_ON, + SXE2_IDENTIFY_LED_OFF, + SXE2_IDENTIFY_LED_RESET, +}; + +struct sxe2_led_ctrl { + u32 mode; + u32 duration; + +}; + +struct sxe2_led_ctrl_resp { + u32 ack; +}; +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_spec.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..20338dcd57c87077dc415a03c92db084a10845cb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_spec.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_spec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_SPEC__ +#define __SXE2_SPEC__ + +#define SXE2_TXSCHED_LAYER_MAX_7 7 +#define SXE2_TXSCHED_LAYER_MAX_4 4 +#define SXE2_TXSCHED_LAYER_MAX_3 3 +#define SXE2_TXSCHED_LEAF_MAX_3072 3072 +#define SXE2_TXSCHED_LEAF_MAX_512 512 +#define SXE2_TXSCHED_LEAF_MAX_256 256 +#define SXE2_TXSCHED_LEAF_MAX_128 128 +#define SXE2_TXSCHED_LEAF_MAX_64 64 + +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 + +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#define SXE2_XDP_TX_Q_NUM 8 + +#ifndef SXE2_TXSCHED_LAYER_MAX +#define SXE2_TXSCHED_LAYER_MAX SXE2_TXSCHED_LAYER_MAX_7 +#endif + +#ifndef SXE2_TXSCHED_LEAF_MAX +#define SXE2_TXSCHED_LEAF_MAX SXE2_TXSCHED_LEAF_MAX_3072 +#endif + +#ifndef SXE2_VSI_PF_ASSURED_NUM +#define SXE2_VSI_PF_ASSURED_NUM 256 +#endif + +#ifndef SXE2_PF_NUM +#define SXE2_PF_NUM 8 +#endif + +#ifndef SXE2_VSI_NUM +#define SXE2_VSI_NUM 768 +#endif + +#ifndef SXE2_QUEUE_NUM +#define SXE2_QUEUE_NUM 2048 +#endif + +#ifndef SXE2_IRQ_NUM +#define SXE2_IRQ_NUM 2048 +#endif + +#ifndef SXE2_VF_NUM +#define SXE2_VF_NUM 256 +#endif + +#ifndef SXE2_MAX_MACVLANS +#define SXE2_MAX_MACVLANS 16 +#endif + +#define SXE2_BUF_SIZE_FW_TQ (8 * 1024) +#define SXE2_BUF_SIZE_FW_RQ (8 * 1024) + +#ifndef SXE2_BUF_SIZE_MBX_TQ +#define SXE2_BUF_SIZE_MBX_TQ (4 * 1024) +#endif + +#ifndef SXE2_BUF_SIZE_MBX_RQ +#define SXE2_BUF_SIZE_MBX_RQ (4 * 1024) +#endif + +#ifndef SXE2_DFLT_IRQS_MAX_CNT +#define SXE2_DFLT_IRQS_MAX_CNT 64 +#endif + +#ifndef SXE2_DFLT_IRQS_MIN_CNT +#define SXE2_DFLT_IRQS_MIN_CNT 8 + +#endif + +#ifndef SXE2_VF_RSS_Q_NUM +#define SXE2_VF_RSS_Q_NUM 16 +#endif + +#ifndef SXE2_IPSEC_RX_SA_DEPTH +#define SXE2_IPSEC_RX_SA_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_RX_DCAM_DEPTH +#define SXE2_IPSEC_RX_DCAM_DEPTH 4096 +#endif + +#ifndef SXE2_IPSEC_TX_SA_DEPTH +#define SXE2_IPSEC_TX_SA_DEPTH 4096 +#endif + +#define SXE2_MACSEC_ENABLE + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_type.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_type.h new file mode 100644 index 0000000000000000000000000000000000000000..bb2dbd8f323a37b78424a64503505907c1906ea4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_type.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_type.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_TYPES_H__ +#define __SXE2_TYPES_H__ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#if defined __BYTE_ORDER__ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN_BITFIELD +#elif __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#endif +#elif defined __BIG_ENDIAN__ +#define __BIG_ENDIAN_BITFIELD +#elif defined __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN_BITFIELD +#elif defined RTE_TOOLCHAIN_MSVC +#define __LITTLE_ENDIAN_BITFIELD +#else +#error "Unknown endianness." +#endif +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef char s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef s8 S8; +typedef s16 S16; +typedef s32 S32; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#define STATIC static + +#define ETH_ALEN 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_uefi_def.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_uefi_def.h new file mode 100644 index 0000000000000000000000000000000000000000..332d0d2e4b154aaf24f08caecf0f82562d952e3d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_uefi_def.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_uefi_def.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_UEFI_DEF_H__ +#define __SXE2_UEFI_DEF_H__ + +#ifdef UEFI_SUPPORT_MIPS +#include +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#define __BIG_ENDIAN_BITFIELD +#endif + +#if BYTE_ORDER == LITTLE_ENDIAN +#define __LITTLE_ENDIAN_BITFIELD +#else +#define __BIG_ENDIAN_BITFIELD +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_version.h b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_version.h new file mode 100644 index 0000000000000000000000000000000000000000..61d6311f3bf3f05b1b19a3bd9ba267dc089e0b49 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/include/sxe2_version.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2_version.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_VER_H__ +#define __SXE2_VER_H__ + +#define SXE2_VERSION "0.0.0.0" +#define SXE2_COMMIT_ID "75c3d1c" +#define SXE2_BRANCH "feature/develop-rc-euler6.6-test" +#define SXE2_BUILD_TIME "2026-04-30 02:40:04" + +#define SXE2_DRV_ARCH "x86_64" +#define SXE2_DRV_NAME "sxe2" +#define SXE2VF_DRV_NAME "sxe2vf" +#define SXE2_DRV_LICENSE "GPL v2" +#define SXE2_DRV_AUTHOR "SXE2" +#define SXE2_DRV_DESCRIPTION "SXE2 Linux Driver" +#define SXE2VF_DRV_DESCRIPTION "SXE2 Virtual Function Linux Driver" + +#define SXE2_FW_NAME "soc" +#define SXE2_FW_ARCH "arm32" + +#ifndef SXE2_CFG_RELEASE +#define SXE2_FW_BUILD_MODE "debug" +#else +#define SXE2_FW_BUILD_MODE "release" +#endif + +#define SXE2_FW_RUN_MODE 6 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.c new file mode 100644 index 0000000000000000000000000000000000000000..84ae9eff2ddab8a4f706d103ef283b603e7e0454 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_ioctl.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2vf_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2vf_queue.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_rx.h" +#include "sxe2_cmd.h" +#include "sxe2vf_vsi.h" +#include "sxe2_log.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2vf_mbx_msg.h" +#include "sxe2vf_ethtool.h" +#include "sxe2vf_com_l2_filter.h" +#include "sxe2vf.h" +#include "sxe2_mbx_public.h" +#include "sxe2vf_com_stats.h" + +static s32 sxe2vf_com_handshake_disable(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + return 0; +} + +static s32 +sxe2vf_com_user_vf_passthrough_to_kernel_pf(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_com_user_data_passthrough_req *req = NULL; + struct sxe2_com_user_data_passthrough_resp *resp = NULL; + struct sxe2vf_msg_params params = { 0 }; + s32 ret; + u32 req_len; + u32 resp_len; + + if (!cmd_buf) + return -EINVAL; + + req_len = sizeof(struct sxe2_com_user_data_passthrough_req) + + cmd_buf->req_len * sizeof(u8); + req = kzalloc(req_len, GFP_KERNEL); + if (!req) { + LOG_ERROR_BDF("kzalloc req failed.\n"); + ret = -ENOMEM; + goto l_end; + } + + if (cmd_buf->req_len > 0 && cmd_buf->req_data) { + if (copy_from_user(req->cmd_buff, cmd_buf->req_data, cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + req->req_len = cmd_buf->req_len; + } + + resp_len = sizeof(struct sxe2_com_user_data_passthrough_resp) + + cmd_buf->resp_len * sizeof(u8); + resp = kzalloc(resp_len, GFP_KERNEL); + if (!resp) { + LOG_ERROR_BDF("kzalloc resp failed.\n"); + ret = -ENOMEM; + goto l_end; + } + req->resp_len = cmd_buf->resp_len; + memcpy(&req->obj, obj, sizeof(struct sxe2_obj)); + req->opcode = cmd_buf->opcode; + req->vsi_id = cmd_buf->vsi_id; + + LOG_INFO_BDF("opcode:%d, req_len=%d, resp_len=%d\n", req->opcode, req->req_len, + req->resp_len); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_PASSTHROUGH_USER_VF_DATA, req, req_len, + resp, resp_len); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("mbx msg send failed.(err:%d)\n", ret); + goto l_end; + } + + if (cmd_buf->resp_len > 0 && cmd_buf->resp_data) { + if (copy_to_user(cmd_buf->resp_data, resp->cmd_buff, cmd_buf->resp_len)) { + ret = -EFAULT; + goto l_end; + } + } + +l_end: + kfree(req); + kfree(resp); + return ret; +} + +STATIC s32 sxe2vf_user_vsi_hw_cfg(struct sxe2vf_adapter *adapter, + struct sxe2_drv_vsi_create_req_resp *vsi_info, + bool is_clear) +{ + struct sxe2_vf_vsi_cfg vsi_cfg = {}; + struct sxe2vf_msg_params params = { 0 }; + s32 ret = 0; + u16 vsi_type = 8; + + memset(&vsi_cfg, 0, sizeof(vsi_cfg)); + vsi_cfg.txq_base_idx = cpu_to_le16(vsi_info->used_queues.base_idx_in_pf); + vsi_cfg.txq_cnt = cpu_to_le16(vsi_info->used_queues.queues_cnt); + vsi_cfg.rxq_base_idx = cpu_to_le16(vsi_info->used_queues.base_idx_in_pf); + vsi_cfg.rxq_cnt = cpu_to_le16(vsi_info->used_queues.queues_cnt); + vsi_cfg.irq_base_idx = cpu_to_le16(vsi_info->used_msix.base_idx_in_func); + vsi_cfg.irq_cnt = cpu_to_le16(vsi_info->used_msix.msix_vectors_cnt); + + vsi_cfg.is_clear = is_clear; + vsi_cfg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vsi_ids[SXE2VF_VSI_TYPE_DPDK]); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_VSI_CFG, &vsi_cfg, sizeof(vsi_cfg), NULL, + 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("mbx msg send failed.(err:%d)\n", ret); + ret = -EINVAL; + goto l_end; + } + + vsi_info->vsi_id = vsi_cfg.vsi_id; + vsi_info->vsi_type = vsi_type; + +l_end: + return ret; +} + +static s32 sxe2vf_com_cap_get(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_dev_caps_resp *resp; + struct sxe2vf_res_caps caps; + s32 ret = 0; + + resp = kmalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + memset(resp, 0, sizeof(*resp)); + + ret = sxe2vf_dpdk_caps_get(adapter, &caps); + if (ret) { + LOG_ERROR_BDF("sxe2vf_dpdk_caps_get failed\n"); + ret = -EINVAL; + goto l_end; + } + + resp->dev_type = SXE2_DEV_T_VF; + + resp->queue_caps.base_idx_in_pf = caps.txq_base; + resp->queue_caps.queues_cnt = caps.txq_cnt; + + resp->msix_caps.msix_vectors_cnt = caps.irq_cnt; + resp->msix_caps.base_idx_in_func = caps.irq_base; + + resp->rss_hash_caps.hash_key_size = caps.rss_key_size; + resp->rss_hash_caps.lut_key_size = caps.rss_lut_size; + + resp->vsi_caps.dpdk_vsi_id = 0xFFFF; + resp->vsi_caps.vsi_type = 0xFF; + + if (adapter->vsi_ctxt.vf_vsi) + resp->vsi_caps.kernel_vsi_id = adapter->vsi_ctxt.vf_vsi->vsi_id; + + resp->txsch_caps.layer_cap = adapter->txsch_cap.layer_cap; + resp->txsch_caps.prio_num = adapter->txsch_cap.prio_num; + resp->txsch_caps.tm_mid_node_num = adapter->txsch_cap.tm_mid_node_num; + resp->cap_flags = SXE2_DEV_CAPS_OFFLOAD_L2 | SXE2_DEV_CAPS_OFFLOAD_VLAN | + SXE2_DEV_CAPS_OFFLOAD_RSS | SXE2_DEV_CAPS_OFFLOAD_FNAV | + SXE2_DEV_CAPS_OFFLOAD_TM; + if (adapter->ipsec_ctxt.max_tx_sa_cnt && adapter->ipsec_ctxt.max_rx_sa_cnt) + resp->cap_flags |= SXE2_DEV_CAPS_OFFLOAD_IPSEC; + + if (copy_to_user(cmd_buf->resp_data, resp, sizeof(*resp))) { + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(*resp); + +l_end: + kfree(resp); + return ret; +} + +static s32 sxe2vf_com_link_info_get(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_link_info_resp resp; + s32 ret = 0; + + memset(&resp, 0, sizeof(resp)); + ret = sxe2vf_com_link_info_request(adapter, &resp.status, &resp.speed); + if (ret) { + LOG_ERROR_BDF("get vf link info failed ret:%d\n", ret); + resp.status = 0; + resp.speed = SXE2_LINK_SPEED_VF_UNKNOW; + } + + if (copy_to_user(cmd_buf->resp_data, &resp, sizeof(resp))) { + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(resp); + +l_end: + return ret; +} + +static s32 sxe2vf_com_main_vsi_create(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_vsi_create_req_resp *req_resp; + + req_resp = (struct sxe2_drv_vsi_create_req_resp *) + sxe2vf_com_req_data_copy_to_kernel(cmd_buf); + + ret = sxe2vf_user_vsi_hw_cfg(adapter, req_resp, false); + if (ret) { + LOG_ERROR_BDF("user vsi create failed ret:%d\n", ret); + ret = -EINVAL; + goto l_end; + } + + if (copy_to_user(cmd_buf->resp_data, req_resp, sizeof(*req_resp))) { + ret = -EFAULT; + goto l_free_vsi; + } + + cmd_buf->resp_len = sizeof(*req_resp); + goto l_end; + +l_free_vsi: + (void)sxe2vf_user_vsi_hw_cfg(adapter, req_resp, true); + +l_end: + kfree(req_resp); + return ret; +} + +static s32 sxe2vf_com_vsi_destory(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_drv_vsi_free_req *free_req; + struct sxe2_drv_vsi_create_req_resp req_resp = {0}; + + free_req = (struct sxe2_drv_vsi_free_req *) + sxe2vf_com_req_data_copy_to_kernel(cmd_buf); + if (!free_req) { + ret = -EINVAL; + goto l_end; + } + req_resp.vsi_id = free_req->vsi_id; + ret = sxe2vf_user_vsi_hw_cfg(adapter, &req_resp, true); + if (ret) { + LOG_ERROR_BDF("user vsi destroy failed ret:%d\n", ret); + ret = -EINVAL; + goto l_end; + } + +l_end: + kfree(free_req); + return ret; +} + +static s32 sxe2vf_com_q_map(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + LOG_ERROR_BDF("VF does not support queue mapping\n"); + return -EINVAL; +} + +static struct sxe2vf_ioctl_cmd_table driver_cmd_table[] = { + {SXE2_DRV_CMD_HANDSHAKE_DISABLE, sxe2vf_com_handshake_disable}, + {SXE2_DRV_CMD_DEV_GET_CAPS, sxe2vf_com_cap_get}, + {SXE2_DRV_CMD_DEV_GET_SWITCHDEV_INFO, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_DEV_GET_INFO, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_DEV_GET_FW_INFO, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_LINK_STATUS_GET, sxe2vf_com_link_info_get}, + + {SXE2_DRV_CMD_VSI_CREATE, sxe2vf_com_main_vsi_create}, + {SXE2_DRV_CMD_VSI_FREE, sxe2vf_com_vsi_destory}, + + {SXE2_DRV_CMD_VSI_STATS_GET, sxe2vf_com_vsi_stat_get}, + {SXE2_DRV_CMD_VSI_STATS_CLEAR, sxe2vf_com_vsi_stat_clear}, + + {SXE2_DRV_CMD_MAC_ADDR_UC, sxe2vf_com_switch_filter_uc}, + {SXE2_DRV_CMD_MAC_ADDR_MC, sxe2vf_com_switch_filter_mc}, + {SXE2_DRV_CMD_VLAN_FILTER_SWITCH, sxe2vf_com_switch_filter_vlan_control}, + {SXE2_DRV_CMD_VLAN_FILTER_ADD_DEL, sxe2vf_com_switch_filter_vlan_rule}, + {SXE2_DRV_CMD_PROMISC_CFG, sxe2vf_com_switch_filter_promisc}, + {SXE2_DRV_CMD_ALLMULTI_CFG, sxe2vf_com_switch_filter_allmulti}, + + {SXE2_DRV_CMD_TXQ_CFG_ENABLE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RXQ_CFG_ENABLE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_EVT_IRQ_BAND_RXQ, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_RXQ_DISABLE, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_TXQ_DISABLE, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_FLOW_FILTER_ADD, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_FLOW_FILTER_DEL, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_ALLOC, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_FREE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_FLOW_FNAV_STAT_QUERY, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_VLAN_OFFLOAD_CFG, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_IPSEC_CAP_GET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_IPSEC_TXSA_ADD, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_IPSEC_RXSA_ADD, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_IPSEC_TXSA_DEL, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_IPSEC_RXSA_DEL, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_IPSEC_RESOURCE_CLEAR, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_SCHED_ROOT_TREE_ALLOC, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_SCHED_ROOT_TREE_RELEASE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_SCHED_ROOT_CHILDREN_DELETE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_SCHED_TM_ADD_MID_NODE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_SCHED_TM_ADD_QUEUE_NODE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_RX_MAP_SET, sxe2vf_com_q_map}, + {SXE2_DRV_CMD_TX_MAP_SET, sxe2vf_com_q_map}, + {SXE2_DRV_CMD_TX_RX_MAP_GET, sxe2vf_com_q_map}, + {SXE2_DRV_CMD_TX_RX_MAP_RESET, sxe2vf_com_q_map}, + {SXE2_DRV_CMD_TX_RX_MAP_INFO_CLEAR, sxe2vf_com_q_map}, + + {SXE2_DRV_CMD_RSS_KEY_SET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RSS_LUT_SET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RSS_FUNC_SET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RSS_HF_ADD, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RSS_HF_DEL, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_RSS_HF_CLEAR, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_VLAN_CFG_QUERY, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_UDPTUNNEL_GET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + {SXE2_DRV_CMD_VSI_SRCVSI_PRUNE, + sxe2vf_com_user_vf_passthrough_to_kernel_pf}, + + {SXE2_DRV_CMD_OPT_EEP_GET, sxe2vf_com_user_vf_passthrough_to_kernel_pf}, +}; + +static s32 sxe2vf_drv_cmd_len_check(struct sxe2_drv_cmd_params *cmd_buf) +{ + return 0; +} + +static s32 sxe2_vf_status_check(struct sxe2vf_adapter *adapter, + struct sxe2_drv_cmd_params *cmd_buf) +{ + + return 0; +} + +static s32 sxe2vf_drv_msg_check(struct sxe2vf_adapter *adapter, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + + ret = sxe2vf_drv_cmd_len_check(cmd_buf); + if (ret) + return ret; + + ret = sxe2_vf_status_check(adapter, cmd_buf); + if (ret) + return ret; + + return ret; +} + +static struct sxe2vf_ioctl_cmd_table *sxe2vf_drv_cmd_handle_get(u32 opcode) +{ + u32 i; + struct sxe2vf_ioctl_cmd_table *cmd_func = NULL; + + for (i = 0; i < ARRAY_SIZE(driver_cmd_table); i++) { + if (driver_cmd_table[i].opcode == opcode) { + cmd_func = &driver_cmd_table[i]; + break; + } + } + + return cmd_func; +} + +s32 sxe2vf_com_cmd_send(void *ad, struct sxe2_obj *obj, struct sxe2_drv_cmd_params *param) +{ + s32 ret; + struct sxe2vf_ioctl_cmd_table *cmd_table = NULL; + struct sxe2vf_adapter *adapter = ad; + + if (sxe2vf_drv_msg_check(adapter, param)) { + ret = -EINVAL; + goto l_end; + } + + cmd_table = sxe2vf_drv_cmd_handle_get(param->opcode); + if (cmd_table && cmd_table->func) { + ret = cmd_table->func(adapter, obj, param); + } else { + LOG_ERROR_BDF("Can't find cmd opcode:0x%x vsi_id:%u trace_id:0x%llx\n.", + param->opcode, param->vsi_id, param->trace_id); + ret = -EINVAL; + } + +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..298a309b2a97fee2dde6d333fea2d4cbbbe2963c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_ioctl.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_ioctl.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_IOCTL_H__ +#define __SXE2_COM_IOCTL_H__ + +#include "sxe2vf.h" + +struct sxe2vf_ioctl_cmd_table { + u32 opcode; + s32 (*func)(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *param); +}; + +s32 sxe2vf_com_cmd_send(void *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *param); + +static inline void *sxe2vf_com_req_data_copy_to_kernel(struct sxe2_drv_cmd_params *param) +{ + void *k_buffer = NULL; + + k_buffer = kmalloc(param->req_len, GFP_KERNEL); + if (!k_buffer || copy_from_user(k_buffer, param->req_data, param->req_len)) + goto l_end; + +l_end: + return k_buffer; +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..56ee3ac1b2bd4a8f68a2e54c3481e699fa94097b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_l2_filter.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2vf_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2vf_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_com_l2_filter.h" + +s32 sxe2vf_com_switch_filter_uc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_mac_filter_cfg_req mac_filter_cfg_req = {0}; + s32 ret = 0; + s32 i; + u16 vsi_id; + u8 addr[ETH_ALEN]; + u8 is_add; + + if (sizeof(struct sxe2_mac_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_mac_filter_cfg_req), cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&mac_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + vsi_id = le16_to_cpu(mac_filter_cfg_req.vsi_id); + is_add = mac_filter_cfg_req.is_add; + for (i = 0; i < ETH_ALEN; i++) + addr[i] = mac_filter_cfg_req.addr[i]; + + if (is_add) + ret = sxe2vf_ucmd_unicast_mac_add(adapter, vsi_id, addr); + else + ret = sxe2vf_ucmd_unicast_mac_del(adapter, vsi_id, addr); + + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s unicast mac addr:%pM fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", addr, ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s unicast mac addr:%pM\n", vsi_id, + is_add ? "add" : "del", addr); + } +l_end: + return ret; +} + +s32 sxe2vf_com_switch_filter_mc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_mac_filter_cfg_req mac_filter_cfg_req = {0}; + s32 ret = 0; + s32 i; + u16 vsi_id; + u8 addr[ETH_ALEN]; + u8 is_add; + + if (sizeof(struct sxe2_mac_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_mac_filter_cfg_req), cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&mac_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(mac_filter_cfg_req.vsi_id); + is_add = mac_filter_cfg_req.is_add; + for (i = 0; i < ETH_ALEN; i++) + addr[i] = mac_filter_cfg_req.addr[i]; + + if (is_add) + ret = sxe2vf_ucmd_multi_broad_mac_add(adapter, vsi_id, addr); + else + ret = sxe2vf_ucmd_multi_broad_mac_del(adapter, vsi_id, addr); + + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s multi mac addr:%pM fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", addr, ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s multi mac addr:%pM\n", vsi_id, + is_add ? "add" : "del", addr); + } +l_end: + return ret; +} + +s32 sxe2vf_com_switch_filter_vlan_control(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vlan_filter_switch_req vlan_filter_switch_req = {0}; + s32 ret = 0; + u16 vsi_id; + bool is_oper_enable; + + if (sizeof(struct sxe2_vlan_filter_switch_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_vlan_filter_switch_req), + cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&vlan_filter_switch_req, cmd_buf->req_data, + cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(vlan_filter_switch_req.vsi_id); + is_oper_enable = (bool)vlan_filter_switch_req.is_oper_enable; + ret = sxe2vf_ucmd_vlan_filter_cfg(adapter, vsi_id, is_oper_enable); + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s vlan filter control fail, ret=%d\n", + vsi_id, is_oper_enable ? "enable" : "disable", ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s vlan filter control.\n", vsi_id, + is_oper_enable ? "enable" : "disable"); +l_end: + return ret; +} + +s32 sxe2vf_com_switch_filter_vlan_rule(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_vlan_filter_cfg_req vlan_filter_cfg_req = {0}; + struct sxe2vf_vlan vlan = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_vlan_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_vlan_filter_cfg_req), cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&vlan_filter_cfg_req, cmd_buf->req_data, cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(vlan_filter_cfg_req.vsi_id); + vlan.vid = le16_to_cpu(vlan_filter_cfg_req.vlan_id); + vlan.tpid = le16_to_cpu(vlan_filter_cfg_req.tpid_id); + vlan.prio = vlan_filter_cfg_req.prio; + is_add = vlan_filter_cfg_req.is_add; + ret = sxe2vf_ucmd_vlan_rule_process(adapter, vsi_id, &vlan, is_add); + if (ret) { + LOG_ERROR_BDF("user driver(vsi=%u) %s vlan tpid:%u vid:%u prio:%u fail, ret=%d\n", + vsi_id, is_add ? "add" : "del", vlan.tpid, vlan.vid, + vlan.prio, ret); + } else { + LOG_DEBUG_BDF("user driver(vsi=%u) %s vlan tpid:%u vid:%u prio:%u\n", + vsi_id, is_add ? "add" : "del", vlan.tpid, vlan.vid, vlan.prio); + } +l_end: + return ret; +} + +s32 sxe2vf_com_switch_filter_promisc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_promisc_filter_cfg_req promisc_filter_cfg_req = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_promisc_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_promisc_filter_cfg_req), + cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&promisc_filter_cfg_req, cmd_buf->req_data, + cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(promisc_filter_cfg_req.vsi_id); + is_add = promisc_filter_cfg_req.is_add; + if (is_add) + ret = sxe2vf_ucmd_promisc_rule_add(adapter, vsi_id); + else + ret = sxe2vf_ucmd_promisc_rule_del(adapter, vsi_id); + + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s promisc fail, ret=%d\n", vsi_id, + is_add ? "set" : "clear", ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s promisc.\n", vsi_id, + is_add ? "set" : "clear"); +l_end: + return ret; +} + +s32 sxe2vf_com_switch_filter_allmulti(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_promisc_filter_cfg_req promisc_filter_cfg_req = {0}; + s32 ret = 0; + u16 vsi_id; + u8 is_add; + + if (sizeof(struct sxe2_promisc_filter_cfg_req) != cmd_buf->req_len) { + LOG_ERROR_BDF("cmd len err %lu != %u\n", + sizeof(struct sxe2_promisc_filter_cfg_req), + cmd_buf->req_len); + ret = -EINVAL; + goto l_end; + } + + if (copy_from_user(&promisc_filter_cfg_req, cmd_buf->req_data, + cmd_buf->req_len)) { + ret = -EFAULT; + goto l_end; + } + + vsi_id = le16_to_cpu(promisc_filter_cfg_req.vsi_id); + is_add = promisc_filter_cfg_req.is_add; + if (is_add) + ret = sxe2vf_ucmd_allmulti_rule_add(adapter, vsi_id); + else + ret = sxe2vf_ucmd_allmulti_rule_del(adapter, vsi_id); + + if (ret) + LOG_ERROR_BDF("user driver(vsi=%u) %s promisc fail, ret=%d\n", vsi_id, + is_add ? "set" : "clear", ret); + else + LOG_DEBUG_BDF("user driver(vsi=%u) %s promisc.\n", vsi_id, + is_add ? "set" : "clear"); + +l_end: + return ret; +} + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..5f3648464894524d52bad61061aa53dbd0984e9c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_l2_filter.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_l2_filter.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2VF_COM_L2_FILTER_H__ +#define __SXE2VF_COM_L2_FILTER_H__ + +#include "sxe2vf.h" +s32 sxe2vf_com_switch_filter_uc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_switch_filter_mc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_switch_filter_vlan_control(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_switch_filter_vlan_rule(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_switch_filter_promisc(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_switch_filter_allmulti(struct sxe2vf_adapter *adapter, + struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.c new file mode 100644 index 0000000000000000000000000000000000000000..5f3d08390180ac3f5d03a3a572574dfc243b509c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_stats.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2vf_com_ioctl.h" +#include "sxe2_ioctl_chnl.h" +#include "sxe2_log.h" +#include "sxe2vf_vsi.h" +#include "sxe2_drv_cmd.h" +#include "sxe2_com_cdev.h" +#include "sxe2vf_com_stats.h" +#include "sxe2_mbx_public.h" + +STATIC s32 sxe2vf_com_get_stats_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2_vf_sw_stats *req, + struct sxe2_vf_hw_stats_rsp *rsp) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret = 0; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_STATS_GET, req, sizeof(*req), rsp, + sizeof(*rsp)); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("get vf stats msg send failed.\n"); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe2vf_com_clear_stats_msg_send(struct sxe2vf_adapter *adapter, + u16 vsi_id_in_dev) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_vsi_res msg = {0}; + s32 ret = 0; + + msg.vsi_id = cpu_to_le16(vsi_id_in_dev); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_STATS_CLEAR, &msg, sizeof(msg), NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("clear vf stats msg send failed.\n"); + goto l_end; + } + +l_end: + return ret; +} + +STATIC void sxe2vf_com_vsi_stats_calc(const struct sxe2_vf_vsi_hw_stats *new_stats, + struct sxe2_vf_vsi_hw_stats *old_stats, + struct sxe2_vf_vsi_hw_stats *stats) +{ + stats->rx_vsi_unicast_packets = new_stats->rx_vsi_unicast_packets - + old_stats->rx_vsi_unicast_packets; + stats->rx_vsi_bytes = new_stats->rx_vsi_bytes - old_stats->rx_vsi_bytes; + stats->tx_vsi_unicast_packets = new_stats->tx_vsi_unicast_packets - + old_stats->tx_vsi_unicast_packets; + stats->tx_vsi_bytes = new_stats->tx_vsi_bytes - old_stats->tx_vsi_bytes; + stats->rx_vsi_multicast_packets = new_stats->rx_vsi_multicast_packets - + old_stats->rx_vsi_multicast_packets; + stats->tx_vsi_multicast_packets = new_stats->tx_vsi_multicast_packets - + old_stats->tx_vsi_multicast_packets; + stats->rx_vsi_broadcast_packets = new_stats->rx_vsi_broadcast_packets - + old_stats->rx_vsi_broadcast_packets; + stats->tx_vsi_broadcast_packets = new_stats->tx_vsi_broadcast_packets - + old_stats->tx_vsi_broadcast_packets; +} + +s32 sxe2vf_com_vsi_stat_get(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + struct sxe2_drv_vsi_stats_req *req = (struct sxe2_drv_vsi_stats_req *) + sxe2vf_com_req_data_copy_to_kernel(cmd_buf); + struct sxe2_drv_vsi_stats_resp resp = {0}; + struct sxe2_vf_sw_stats msg = {0}; + struct sxe2_vf_hw_stats_rsp rsp = {{0}, 0}; + struct sxe2_vf_vsi_hw_stats stats = {0}; + s32 ret = 0; + + msg.sw_stats.rx_bytes = req->sw_stats.rx_bytes; + msg.sw_stats.rx_packets = req->sw_stats.rx_packets; + msg.sw_stats.tx_bytes = req->sw_stats.tx_bytes; + msg.sw_stats.tx_packets = req->sw_stats.tx_packets; + msg.vsi_id = req->vsi_id; + msg.fnav_stats_idx = cpu_to_le16(SXE2_VF_FNAV_INVALID_STAT_IDX); + + ret = sxe2vf_com_get_stats_msg_send(adapter, &msg, &rsp); + if (ret) { + LOG_ERROR_BDF("get vf stats msg send failed.\n"); + goto l_end; + } + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + resp.rx_vsi_unicast_packets = + le64_to_cpu(rsp.hw_stats.rx_vsi_unicast_packets); + resp.rx_vsi_bytes = le64_to_cpu(rsp.hw_stats.rx_vsi_bytes); + resp.tx_vsi_unicast_packets = + le64_to_cpu(rsp.hw_stats.tx_vsi_unicast_packets); + resp.tx_vsi_bytes = le64_to_cpu(rsp.hw_stats.tx_vsi_bytes); + resp.rx_vsi_multicast_packets = + le64_to_cpu(rsp.hw_stats.rx_vsi_multicast_packets); + resp.tx_vsi_multicast_packets = + le64_to_cpu(rsp.hw_stats.tx_vsi_multicast_packets); + resp.rx_vsi_broadcast_packets = + le64_to_cpu(rsp.hw_stats.rx_vsi_broadcast_packets); + resp.tx_vsi_broadcast_packets = + le64_to_cpu(rsp.hw_stats.tx_vsi_broadcast_packets); + } else if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_MIXED) { + struct sxe2_vf_vsi_hw_stats *old_stats = + &adapter->vsi_ctxt.vf_vsi->vsi_stats.parse_vsi_hw_stats; + + sxe2vf_com_vsi_stats_calc(&rsp.hw_stats, old_stats, &stats); + resp.rx_vsi_unicast_packets = stats.rx_vsi_unicast_packets; + resp.rx_vsi_bytes = stats.rx_vsi_bytes; + resp.tx_vsi_unicast_packets = stats.tx_vsi_unicast_packets; + resp.tx_vsi_bytes = stats.tx_vsi_bytes; + resp.rx_vsi_multicast_packets = stats.rx_vsi_multicast_packets; + resp.tx_vsi_multicast_packets = stats.tx_vsi_multicast_packets; + resp.rx_vsi_broadcast_packets = stats.rx_vsi_broadcast_packets; + resp.tx_vsi_broadcast_packets = stats.tx_vsi_broadcast_packets; + } else { + LOG_ERROR_BDF("invalid com mode %d\n", sxe2vf_com_mode_get(adapter)); + ret = -EINVAL; + goto l_end; + } + + if (copy_to_user(cmd_buf->resp_data, &resp, sizeof(resp))) { + LOG_ERROR_BDF("copy_to_user failed.\n"); + ret = -EFAULT; + goto l_end; + } + cmd_buf->resp_len = sizeof(resp); + + LOG_INFO_BDF("sxe2vf com vsi[%d] stats get is completed.\n", req->vsi_id); + +l_end: + kfree(req); + return ret; +} + +s32 sxe2vf_com_vsi_stat_clear(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf) +{ + s32 ret = 0; + struct sxe2_vf_sw_stats msg = {0}; + struct sxe2_vf_hw_stats_rsp rsp = {{0}, 0}; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + ret = sxe2vf_com_clear_stats_msg_send(adapter, cmd_buf->vsi_id); + if (ret) + LOG_INFO_BDF("sxe2vf com vsi[%d] stats clear is completed.\n", + cmd_buf->vsi_id); + goto l_end; + } else if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_MIXED) { + msg.vsi_id = cmd_buf->vsi_id; + msg.fnav_stats_idx = cpu_to_le16(SXE2_VF_FNAV_INVALID_STAT_IDX); + + ret = sxe2vf_com_get_stats_msg_send(adapter, &msg, &rsp); + if (ret) { + LOG_ERROR_BDF("get vf stats msg send failed.\n"); + goto l_end; + } + + memcpy(&adapter->vsi_ctxt.vf_vsi->vsi_stats.parse_vsi_hw_stats, + &rsp.hw_stats, sizeof(rsp.hw_stats)); + } else { + LOG_ERROR_BDF("invalid com mode %d\n", sxe2vf_com_mode_get(adapter)); + ret = -EINVAL; + goto l_end; + } + + LOG_INFO_BDF("sxe2vf com vsi[%d] stats clear is completed.\n", cmd_buf->vsi_id); +l_end: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..ae6f0dca4a804e1f3dfb1188a96cb3107419b86b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/com_parse/sxe2vf_com_stats.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_com_stats.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2_COM_STATS_H__ +#define __SXE2_COM_STATS_H__ + +#include "sxe2vf.h" + +s32 sxe2vf_com_vsi_stat_get(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +s32 sxe2vf_com_vsi_stat_clear(struct sxe2vf_adapter *adapter, struct sxe2_obj *obj, + struct sxe2_drv_cmd_params *cmd_buf); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf.h new file mode 100644 index 0000000000000000000000000000000000000000..eddc94cf7c599b3fe4f22a9b4919dd8159a96323 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_H__ +#define __SXE2VF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2vf_hw.h" +#include "sxe2vf_irq.h" +#include "sxe2vf_mbx_channel.h" +#include "sxe2vf_queue.h" +#include "sxe2vf_mbx_msg.h" +#include "sxe2vf_aux_drv.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2_spec.h" +#include "sxe2vf_rxft.h" +#include "sxe2vf_ipsec.h" +#include "sxe2vf_trace.h" +#include "sxe2_com_cdev.h" + +#define SXE2_VF_ETH_VSI_CNT 1 +#define SXE2_VF_DPDK_VSI_CNT 1 + +#define SXE2VF_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define SXE2VF_DBG_USER BIT_ULL(31) + +#define DEV_NAME_LEN (16) +#define SXE2VF_WORKQUEUE_NAME_LEN (128) + +#define SXE2VF_MSG_LEVEL_DEFAULT_SHT (3) +#define SXE2VF_DMA_BIT_WIDTH_64 (64) +#define SXE2VF_NO_ACTIVE_CNT (10) + +#define SXE2VF_DEV_FUNC_MASK (0x7) +#define SXE2VF_WOKER_DELAY_5MS (5) +#define SXE2VF_WOKER_DELAY_10MS (10) +#define SXE2VF_WOKER_DELAY_20MS (20) +#define SXE2VF_WOKER_DELAY_30MS (30) + +#define SXE2VF_WOKER_DELAY_1S (1 * HZ) +#define SXE2VF_WOKER_DELAY_2S (2 * HZ) +#define SXE2VF_WOKER_DELAY_5S (5 * HZ) + +#define SXE2VF_RESET_DETEC_WAIT_COUNT (100) +#define SXE2VF_RESET_DONE_WAIT_COUNT (250) +#define SXE2VF_RESET_WAIT_MS (50) +#define SXE2VF_CORER_WAIT_DONE_COUNT (1000) +#define SXE2VF_WAIT_CONFIG_ACCESSIBLE_TIMEOUT_MS (60000) +#ifdef SXE2VF_TEST +#define SXE2VF_RESET_ROBACK_WAIT_COUNT (1) +#else +#define SXE2VF_RESET_ROBACK_WAIT_COUNT (18000) +#endif +#ifdef SXE2VF_TEST +#define SXE2VF_RESET_WAIT_COMPLETE_COUNT (1000) +#define SXE2VF_VFLR_DETEC_WAIT_COUNT (4000) +#else +#define SXE2VF_RESET_WAIT_COMPLETE_COUNT (150) +#define SXE2VF_VFLR_DETEC_WAIT_COUNT (300000) +#endif + +#ifdef SXE2VF_TEST +#define SXE2VF_RESET_ACTIVE_WAIT_COUNT (5) +#define SXE2VF_RESET_COMPLETE_WAIT_COUNT (1) +#define SXE2VF_REMOVE_RESET_DETECT_COUNT (1) +#define SXE2VF_VFLR_ACTIVE_WAIT_COUNT (2) +#define SXE2VF_DEVSTATE_PROC_FAIL_CNT (10) +#else +#define SXE2VF_RESET_ACTIVE_WAIT_COUNT (1000) +#define SXE2VF_RESET_COMPLETE_WAIT_COUNT (1000) +#define SXE2VF_REMOVE_RESET_DETECT_COUNT (15000) +#define SXE2VF_VFLR_ACTIVE_WAIT_COUNT (5000) +#define SXE2VF_DEVSTATE_PROC_FAIL_CNT (3000) +#endif + +#define SXE2VF_ACTIVE_WAIT_INTERVAL (2) +#define SXE2VF_RESET_WAIT_MIN (10) +#define SXE2VF_CORER_DONE_WAIT_INTERVAL (3) + +#define SXE2VF_REQUEST_MSG_WAIT_TIME (50) +#define SXE2VF_MSG_REPLY_WAIT_TIMEOUT msecs_to_jiffies(2500) + +#define SXE2VF_DOWN_WAIT_TIMEOUT msecs_to_jiffies(500) + +#define SXE2VF_ADAPTER_TO_DEV(adapter) (&((adapter)->pdev->dev)) + +#define SXE2VF_DEV_TO_ADAPTER(pdev) (netdev_priv(pci_get_drvdata(pdev))) + +#define sxe2vf_for_each_vsi_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->txqs.q_cnt; (i)++) + +#define sxe2vf_for_each_vsi_rxq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->rxqs.q_cnt; (i)++) + +#define sxe2vf_for_each_vsi_irq(vsi, i) for ((i) = 0; (i) < (vsi)->irqs.cnt; (i)++) + +enum sxe2vf_dev_state { + SXE2VF_DEVSTATE_INITIAL = 0, + SXE2VF_DEVSTATE_RESETTING, + SXE2VF_DEVSTATE_STOPPED, + SXE2VF_DEVSTATE_UNACTIVED, + SXE2VF_DEVSTATE_ACTIVED, + SXE2VF_DEVSTATE_RUNNING, + SXE2VF_DEVSTATE_VFR_REQUEST, + SXE2VF_DEVSTATE_VFR_NOTIFY, + SXE2VF_DEVSTATE_FAULT, +}; + +enum sxe2vf_reset_type { + SXE2VF_RESET_NONE = 0, + SXE2VF_RESET_CORER, + SXE2VF_RESET_VFR, + SXE2VF_RESET_MAX, +}; + +enum sxe2vf_probe_post_state { + SXE2VF_PROBE_POST_INIT_UNSTART = 0, + SXE2VF_PROBE_POST_INIT_STARTED = 1, + SXE2VF_PROBE_POST_VER_MATCH = 2, + SXE2VF_PROBE_POST_CAPS_INIT = 3, + SXE2VF_PROBE_POST_IRQ_QUEUE_CFG = 4, + SXE2VF_PROBE_POST_INIT_DONE = 5, + SXE2VF_PROBE_POST_VER_CHK_FAIL = 6, + +}; + +enum sxe2vf_wk_type { + SXE2VF_WK_MONITOR, + SXE2VF_WK_MONITOR_IM, + SXE2VF_WK_MBX, + SXE2VF_WK_NOTIFY_MSG, + SXE2VF_WK_HEALTH, +}; + +enum sxe2vf_adapter_flags { + SXE2VF_FLAG_LEGACY_RX_ENABLE = 0, + SXE2VF_FLAG_LRO_ENABLE = 1, + SXE2VF_FLAG_RXQ_DISABLED = 2, + SXE2VF_FLAG_TXQ_DISABLED = 3, + SXE2VF_FLAG_RESET_NOTIFY = 9, + SXE2VF_FLAG_DRV_PROBE_DONE = 14, + SXE2VF_FLAG_NETDEV_REGISTED = 15, + SXE2VF_FLAG_DRV_UP = 16, + SXE2VF_FLAG_DCB_ENABLE = 17, + SXE2VF_FLAG_FLTR_SYNC = 18, + SXE2VF_FLAG_EVENT_IRQ_DISABLED = 19, + SXE2VF_FLAG_SUSPEND = 20, + SXE2VF_FLAG_FNAV_ENABLE = 21, + SXE2VF_FLAG_UPDATE_NETDEV_FEATURES = 22, + SXE2VF_FLAG_FLR_RUNNING = 23, + SXE2VF_FLAG_FNAV_TUNNEL = 25, + SXE2VF_FLAG_DRV_REMOVING = 26, + SXE2VF_FLAG_MTU_CHANGED = 27, + SXE2VF_FLAG_RXFCS_ENABLE = 28, + SXE2VF_FLAGS_NBITS +}; + +enum { SXE2VF_VSI_CLOSE = 0, + SXE2VF_VSI_DISABLE, + SXE2VF_VSI_MAX, +}; + +#define SXE2VF_STATE_MASK (0xFFFF) + +#define SXE2VF_STATE_INIT_INTERNAL_SHT (16) + +struct sxe2vf_mac_filter { + struct list_head mac_addr_list; + struct list_head tmp_sync_list; + struct list_head tmp_unsync_list; + u8 def_mac_addr[ETH_ALEN]; + u8 cur_mac_addr[ETH_ALEN]; +}; + +struct sxe2vf_filter_context { + struct sxe2vf_mac_filter mac_filter; + struct sxe2vf_vlan_info vlan_info; + u32 cur_promisc_flags; +}; + +struct sxe2vf_switch_context { + struct sxe2vf_filter_context filter_ctxt; + struct sxe2vf_filter_context user_fltr_ctxt; + struct mutex mac_addr_lock; + struct mutex flag_lock; +}; + +struct sxe2vf_vsi_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_unnecessary_inner; + u64 rx_lro_packets; + u64 rx_lro_bytes; + u64 rx_vlan_strip; + u64 rx_pkts_sw_drop; + u64 rx_buff_alloc_err; + u64 rx_pg_alloc_fail; + u64 rx_csum_err; + u64 rx_lro_count; + u64 rx_page_alloc; + u64 rx_non_eop_descs; + u64 rx_pa_err; + + u64 tx_packets; + u64 tx_bytes; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_vlan_insert; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_dropped; + u64 tx_xmit_more; + u64 tx_linearize; + u64 tx_busy; + u64 tx_restart; + u64 tx_tso_linearize_chk; +}; + +struct sxe2vf_vsi_stats { + struct sxe2_vf_vsi_hw_stats vsi_hw_stats; + struct sxe2_vf_vsi_hw_stats parse_vsi_hw_stats; + struct sxe2vf_vsi_sw_stats vsi_sw_stats; +}; + +struct sxe2vf_vsi_qs_stats { + struct sxe2vf_queue_stats *txqs_stats; + struct sxe2vf_queue_stats *rxqs_stats; +}; + +struct sxe2vf_vsi { + struct sxe2vf_adapter *adapter; + struct net_device *netdev; + u16 vsi_id; + enum sxe2vf_vsi_type vsi_type; + struct sxe2vf_vsi_irqs irqs; + struct sxe2vf_vsi_queues txqs; + struct sxe2vf_vsi_queues rxqs; + u16 budget; + DECLARE_BITMAP(state, SXE2VF_VSI_MAX); + struct sxe2vf_vsi_stats vsi_stats; + struct sxe2vf_vsi_qs_stats vsi_qs_stats; +}; + +struct sxe2vf_vsi_context { + u16 vsi_cnt_max; + struct mutex lock; + struct sxe2vf_vsi *vf_vsi; + struct sxe2vf_vsi *dpdk_vf_vsi; + u16 vsi_ids[SXE2_VF_MAX_VSI_CNT]; +}; + +enum { SXE2VF_VF_DISABLE = 0, + SXE2VF_VF_MAX, +}; + +struct sxe2vf_dev_context { + struct mutex vf_lock; + bool remove; + DECLARE_BITMAP(state, SXE2VF_VF_MAX); +}; + +enum { SXE2VF_MONITOR_WORK_DISABLED = 1, + SXE2VF_MBX_WORK_DISABLED = 2, + SXE2VF_HEALTH_WORK_DISABLED = 3, +}; + +struct sxe2vf_work_context { + unsigned long period; + unsigned long state; + enum sxe2vf_probe_post_state post_state; + enum sxe2vf_dev_state dev_state; + enum sxe2vf_reset_type reset_type; + + spinlock_t state_lock; + struct mutex monitor_lock; + struct delayed_work monitor_wk; + struct work_struct mbx_wk; + struct work_struct msg_handle_wk; + struct workqueue_struct *health_wq; + struct delayed_work health_wk; + struct mutex reset_detect_lock; + + u64 tx_timeout_cnt; + u64 corer_cnt; + u64 vfr_cnt; + u8 failed_cnt; + bool is_send; + bool is_clear; +}; + +struct sxe2vf_log_level_context { + u32 msg_enable; + u64 debug_mask; +}; + +struct sxe2vf_msg_context { + wait_queue_head_t reply_waitqueue; +}; + +struct sxe2vf_link_context { + struct mutex link_lock; + bool link_up; + u32 speed; +}; + +struct sxe2vf_rss_cfg { + struct list_head l_node; + struct sxe2vf_rss_hash_cfg hash_cfg; +}; + +struct sxe2vf_rss_context { + u16 rss_lut_type; + u16 rss_key_size; + u16 rss_lut_size; + u8 *key; + u8 *lut; + struct list_head rss_cfgs; + struct mutex rss_cfgs_lock; + bool init; +}; + +struct sxe2vf_fnav_filter { + struct list_head l_node; + u32 filter_loc; + u32 flow_id; + enum sxe2_fnav_flow_type flow_type; + u16 q_index; + u8 act_type; + u8 has_flex_filed; + struct sxe2vf_fnav_filter_full_key full_key; + struct sxe2_fnav_comm_full_msg full_msg; +}; + +struct sxe2vf_fnav_context { + u16 space_bsize; + u16 space_gsize; + u32 filter_cnt; + struct list_head filter_list; + struct mutex filter_list_lock; + bool init; + u16 stat_idx; + u64 fnav_match; +}; + +struct sxe2vf_adapter { + char dev_name[DEV_NAME_LEN]; + struct pci_dev *pdev; + struct net_device *netdev; + struct sxe2vf_hw hw; + u8 pf_id; + u16 vf_id_in_dev; + struct sxe2vf_irq_context irq_ctxt; + struct sxe2vf_queue_context q_ctxt; + struct sxe2vf_vsi_context vsi_ctxt; + struct sxe2vf_dev_context dev_ctxt; + struct sxe2vf_channel_context channel_ctxt; + struct sxe2vf_msg_context msg_ctxt; + struct sxe2vf_switch_context switch_ctxt; + struct sxe2vf_work_context work_ctxt; + struct sxe2vf_log_level_context log_level_ctxt; + struct sxe2vf_link_context link_ctxt; + struct sxe2vf_aux_context aux_ctxt; + struct sxe2vf_rss_context rss_ctxt; + struct sxe2vf_fnav_context fnav_ctxt; + struct sxe2vf_ipsec_context ipsec_ctxt; + struct work_struct com_work; + struct sxe2_com_context com_ctxt; + struct sxe2_vf_txsch_caps txsch_cap; + + DECLARE_BITMAP(flags, SXE2VF_FLAGS_NBITS); +#ifdef SXE2VF_TEST + u32 work_count; +#endif +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + struct dentry *sxe2vf_debugfs_vf; + struct dentry *sxe2vf_debugfs_vf_drv_mode; +#endif + struct sxe2_vf_ver_msg pf_ver; + enum sxe2_com_module drv_mode; +}; + +struct sxe2vf_res_caps { + u16 vsi_id; + u16 txq_base; + u16 txq_cnt; + u16 rxq_base; + u16 rxq_cnt; + u16 irq_base; + u16 irq_cnt; + u16 rss_lut_type; + u16 rss_key_size; + u16 rss_lut_size; +}; + +static inline bool sxe2vf_post_probe_is_done(struct sxe2vf_adapter *adapter) +{ + return (adapter->work_ctxt.post_state == SXE2VF_PROBE_POST_INIT_DONE); +} + +static inline bool sxe2vf_post_probe_is_start(struct sxe2vf_adapter *adapter) +{ + return (adapter->work_ctxt.post_state > SXE2VF_PROBE_POST_INIT_UNSTART); +} + +void sxe2vf_wkq_schedule(struct sxe2vf_adapter *adapter, enum sxe2vf_wk_type type, const u32 delay); + +void sxe2vf_post_state_update(struct sxe2vf_work_context *work_ctxt, enum sxe2vf_probe_post_state post_state); + +void sxe2vf_dev_state_set(struct sxe2vf_adapter *adapter, enum sxe2vf_dev_state new_state, + enum sxe2vf_reset_type new_reset_type); + +void sxe2vf_dev_state_get(struct sxe2vf_adapter *adapter, enum sxe2vf_dev_state *state, + enum sxe2vf_reset_type *reset_type); + +void sxe2vf_wkq_cancel(struct sxe2vf_adapter *adapter, enum sxe2vf_wk_type type); + +s32 sxe2vf_reset_detect(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_dpdk_caps_get(struct sxe2vf_adapter *adapter, struct sxe2vf_res_caps *caps); + +int sxe2vf_com_mode_get(void *adapter); + +int sxe2vf_g_com_mode_get(void); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..21e6006ed42d5a0040b1db2530757c90ba0daad6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_aux_drv.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include "sxe2vf.h" +#include "sxe2vf_netdev.h" +#include "sxe2_log.h" +#include "sxe2vf_irq.h" +#include "sxe2vf_aux_drv.h" +#include "sxe2_version.h" +static DEFINE_IDA(sxe2vf_aux_ida); + +static int sxe2vf_aux_vc_send(struct aux_core_dev_info *cdev_info, u16 vf_id, + u8 *msg, u16 len, u64 session_id) +{ + struct sxe2vf_adapter *adapter; + + if (!cdev_info) + return -EINVAL; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + if (!adapter) + return -EINVAL; + return 0; +} + +static int sxe2vf_aux_vc_send_sync(struct aux_core_dev_info *cdev_info, u8 *msg, + u16 len, u8 *recv_msg, u16 recv_len) +{ + int ret; + struct sxe2vf_adapter *adapter; + + if (!cdev_info || !recv_len || recv_len > MAX_RDMA_MSG_SIZE) + return -EINVAL; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + if (!adapter) + return -EINVAL; + ret = sxe2vf_rdma_msg_send(adapter, msg, len, recv_msg, recv_len); + if (ret) + LOG_ERROR_BDF("failed to send rdma msg to pf,err:%d.\n", ret); + return ret; +} + +static int sxe2vf_aux_request_reset(struct aux_core_dev_info *cdev_info, + enum aux_reset_type reset_type) +{ + struct sxe2vf_adapter *adapter; + + if (!cdev_info) + return -EINVAL; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_VFR_REQUEST, + SXE2VF_RESET_NONE); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + + LOG_INFO_BDF("Trigger reset from rdma.\n"); + return 0; +} + +static int sxe2vf_aux_vc_rdma_qv_map(struct sxe2vf_adapter *adapter, + struct aux_qvlist_info *qvl_info, bool map) +{ + s32 ret; + + ret = sxe2vf_qv_map_msg_send(adapter, qvl_info, map); + if (ret) { + LOG_DEBUG_BDF("rdma qv map err:%d.\n", ret); + ret = -EINVAL; + } + return ret; +} + +static bool sxe2vf_aux_is_adapter_ready(struct sxe2vf_adapter *adapter) +{ + if (test_bit(SXE2VF_FLAG_RESET_NOTIFY, adapter->flags)) { + LOG_DEBUG_BDF("dev reset pending!\n"); + return false; + } + return true; +} + +static int sxe2vf_aux_vc_queue_map_unmap(struct aux_core_dev_info *cdev_info, + struct aux_qvlist_info *qvl_info, bool map) +{ + int err; + struct sxe2vf_adapter *adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + + if (!sxe2vf_aux_is_adapter_ready(adapter)) { + LOG_DEBUG_BDF("Adapter is not ready to map/unmap RDMA queue vector " + "over virtchnl\n"); + return -ENODEV; + } + + err = sxe2vf_aux_vc_rdma_qv_map(adapter, qvl_info, map); + if (err) + LOG_INFO_BDF("Failed to send RDMA queue vector map/unmap message, " + "err %d\n", + err); + + return err; +} + +static s32 sxe2vf_rdma_mac_rule_add(struct sxe2vf_adapter *adapter, u8 *mac) +{ + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + if (!is_multicast_ether_addr(mac)) + return -EADDRNOTAVAIL; + + mutex_lock(&switch_ctxt->mac_addr_lock); + ret = sxe2vf_mac_addr_add(adapter, mac, SXE2VF_MAC_OWNER_ROCE); + if (ret && ret != -EEXIST) { + LOG_DEV_ERR("add mac filter failed, addr %pM, ret %d\n", mac, ret); + } else if (ret == -EEXIST) { + ret = 0; + } + mutex_unlock(&switch_ctxt->mac_addr_lock); + + return ret; +} + +static s32 sxe2vf_rdma_mac_rule_del(struct sxe2vf_adapter *adapter, u8 *mac) +{ + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + if (!is_multicast_ether_addr(mac)) + return -EADDRNOTAVAIL; + + mutex_lock(&switch_ctxt->mac_addr_lock); + ret = sxe2vf_mac_addr_del(adapter, mac, SXE2VF_MAC_OWNER_ROCE); + if (ret && ret != -ENOENT) { + LOG_DEV_ERR("del mac filter failed, addr %pM, ret %d\n", mac, ret); + } else if (ret == -ENOENT) { + ret = 0; + } + mutex_unlock(&switch_ctxt->mac_addr_lock); + + return ret; +} + +static s32 sxe2vf_rdma_send_cmd(struct aux_core_dev_info *cdev_info, + enum sxe2_drv_cmd_opcode opcode, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len) +{ + s32 ret = -EINVAL; + struct sxe2vf_adapter *adapter; + + if (!cdev_info || !msg || !len || len > MAX_RDMA_MSG_SIZE) + return -EINVAL; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + + ret = sxe2vf_aux_mgr_msg_send(adapter, opcode, msg, len, recv_msg, recv_len); + + return ret; +} + +static s32 sxe2vf_rdma_drv_config(struct aux_core_dev_info *cdev_info, u8 op, + u8 *msg) +{ + s32 ret = -EINVAL; + struct sxe2vf_adapter *adapter; + enum aux_rdma_opcode opcode = op; + + if (!cdev_info || !msg) + return -EINVAL; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + switch (opcode) { + case RDMA_MAC_RULE_ADD: + ret = sxe2vf_rdma_mac_rule_add(adapter, msg); + break; + case RDMA_MAC_RULE_DELETE: + ret = sxe2vf_rdma_mac_rule_del(adapter, msg); + break; + default: + LOG_INFO_BDF("opcode(%d) not support.\n", opcode); + break; + } + + return ret; +} + +static int sxe2vf_rdma_dump_pcap_cmd(struct aux_core_dev_info *cdev_info, u8 *mac, + bool is_add) +{ + struct sxe2vf_adapter *adapter; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + + return sxe2vf_rdma_dump_pcap_msg_send(adapter, mac, is_add); +} + +static struct sxe2_core_ops core_ops = { + .request_reset = sxe2vf_aux_request_reset, + .vc_send = sxe2vf_aux_vc_send, + .vc_send_sync = sxe2vf_aux_vc_send_sync, + .vc_queue_vec_map_unmap = sxe2vf_aux_vc_queue_map_unmap, + .rdma_send_cmd = sxe2vf_rdma_send_cmd, + .dump_pcap_cmd = sxe2vf_rdma_dump_pcap_cmd, + .rdma_drv_config = sxe2vf_rdma_drv_config, +}; + +static struct sxe2_auxiliary_drv * +sxe2vf_rdma_aux_drv_get(struct aux_core_dev_info *cdev_info) +{ + struct auxiliary_device *adev; + + adev = cdev_info->adev; + if (!adev || !adev->dev.driver) + return NULL; + return container_of(adev->dev.driver, struct sxe2_auxiliary_drv, + adrv.driver); +} + +static void sxe2vf_adev_release(struct device *dev) +{ + struct sxe2_auxiliary_device *sxe2_adev; + + sxe2_adev = container_of(dev, struct sxe2_auxiliary_device, adev.dev); + kfree(sxe2_adev); +} + +static s32 sxe2vf_aux_dev_add(struct aux_core_dev_info *cdev_info) +{ + struct sxe2_auxiliary_device *sxe2vf_adev; + struct auxiliary_device *adev; + struct sxe2vf_adapter *adapter; + s32 ret; + + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + if (!adapter) + return -EINVAL; + + sxe2vf_adev = kzalloc(sizeof(*sxe2vf_adev), GFP_KERNEL); + if (!sxe2vf_adev) + return -ENOMEM; + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + adev = &sxe2vf_adev->adev; + cdev_info->adev = adev; + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + sxe2vf_adev->cdev_info = cdev_info; + adev->name = SXE2VF_RDMA_NAME; + adev->id = (u32)adapter->aux_ctxt.aux_idx; + adev->dev.release = sxe2vf_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_vfadev_alloc; + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + goto err_vfadev_alloc; + } + + goto out; + +err_vfadev_alloc: + kfree(sxe2vf_adev); + cdev_info->adev = NULL; +out: + return ret; +} + +static void sxe2vf_aux_dev_delete(struct aux_core_dev_info *cdev_info) +{ + struct sxe2vf_adapter *adapter; + struct auxiliary_device *adev; + + if (cdev_info->pdev) + adapter = SXE2VF_DEV_TO_ADAPTER(cdev_info->pdev); + else + return; + + LOG_DEBUG_BDF("release aux device!adev is :0x%pK\n", cdev_info->adev); + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + if (!cdev_info->adev) { + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + return; + } + adev = cdev_info->adev; + cdev_info->adev = NULL; + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + if (adev) { + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + } + + LOG_DEBUG_BDF("release aux device end\n"); +} + +STATIC s32 sxe2vf_aux_device_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + u32 aux_irq_start = adapter->irq_ctxt.rdma_offset; + struct sxe2vf_aux_context *aux = &adapter->aux_ctxt; + struct aux_core_dev_info *cdev_info = &aux->cdev_info; + struct pci_dev *pdev = adapter->pdev; + + if (adapter->aux_ctxt.init) { + LOG_INFO_BDF("aux device is already initialized.\n"); + return 0; + } + + adapter->aux_ctxt.aux_idx = ida_alloc(&sxe2vf_aux_ida, GFP_KERNEL); + if (adapter->aux_ctxt.aux_idx < 0) { + LOG_ERROR_BDF("failed to allocate device ID for aux drvs\n"); + return -ENOMEM; + } + + cdev_info->hw_addr = adapter->hw.reg_base_addr; + cdev_info->cdev_info_id = SXE2_RDMA_INDEX; + + cdev_info->pdev = pdev; + cdev_info->netdev = adapter->netdev; + cdev_info->vport_id = + adapter->vsi_ctxt.vsi_ids[SXE2VF_VSI_TYPE_ETH]; + cdev_info->pf_id = 0; + cdev_info->ops = &core_ops; + cdev_info->ftype = AUX_FUNCTION_TYPE_VF; + cdev_info->ver.major = AUX_MAJOR_VER; + cdev_info->ver.minor = AUX_MINOR_VER; + cdev_info->rdma_caps.gen = AUX_RDMA_GEN_1; + (void)strscpy(cdev_info->drv_ver, SXE2_VERSION, SXE2_DRV_VER_STR_LEN); + + cdev_info->msix_count = adapter->aux_ctxt.num_msix; + if (cdev_info->msix_count) { + cdev_info->msix_entries = + &adapter->irq_ctxt.msix_entries[aux_irq_start]; + } + + adapter->aux_ctxt.init = true; + return ret; +} + +void sxe2vf_aux_init_task(struct work_struct *work) +{ + struct sxe2vf_aux_context *aux = container_of( + work, struct sxe2vf_aux_context, init_task.work); + struct sxe2vf_adapter *adapter = aux->vfadapter; + s32 err; + + err = sxe2vf_aux_device_init(adapter); + if (err) { + LOG_ERROR_BDF("failed to initialize rdma aux driver.\n"); + return; + } + err = sxe2vf_aux_dev_add(&aux->cdev_info); + if (err) { + LOG_ERROR_BDF("failed to add rdma aux driver.\n"); + return; + } + + LOG_INFO_BDF("aux init task done.\n"); +} + +void sxe2vf_auxdrv_init(struct sxe2vf_adapter *adapter) +{ + (void)schedule_delayed_work(&adapter->aux_ctxt.init_task, + msecs_to_jiffies(5)); +} + +static int sxe2vf_rdma_aux_send_event(struct sxe2vf_adapter *adapter, + struct sxe2_rdma_event_info *event) +{ + struct sxe2_auxiliary_drv *iadrv; + struct aux_core_dev_info *cdev_info; + + LOG_DEBUG_BDF("Start send event!\n"); + if (!adapter || !event) + return -EINVAL; + + cdev_info = &adapter->aux_ctxt.cdev_info; + + mutex_lock(&adapter->aux_ctxt.adev_mutex); + if (!cdev_info->adev || !event) { + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + return 0; + } + + device_lock(&cdev_info->adev->dev); + iadrv = sxe2vf_rdma_aux_drv_get(cdev_info); + if (iadrv && iadrv->aux_ops.event_handler) + iadrv->aux_ops.event_handler(cdev_info, event); + + device_unlock(&cdev_info->adev->dev); + mutex_unlock(&adapter->aux_ctxt.adev_mutex); + + LOG_DEBUG_BDF("End send event!\n"); + return 0; +} + +void sxe2vf_auxdrv_send_reset_event(struct sxe2vf_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + + (void)memset(&event, 0x0, sizeof(event)); + set_bit(SXE2_EVENT_NOTIFY_RESET, event.type); + (void)sxe2vf_rdma_aux_send_event(adapter, &event); +} + +int sxe2vf_rdma_aux_send_mtu_changed_event(struct sxe2vf_adapter *adapter) +{ + struct sxe2_rdma_event_info event; + + memset(&event, 0x0, sizeof(event)); + set_bit(SXE2_EVENT_MTU_CHANGED, event.type); + return sxe2vf_rdma_aux_send_event(adapter, &event); +} + +void sxe2vf_auxdrv_deinit(struct sxe2vf_adapter *adapter) +{ + struct aux_core_dev_info *cdev_info = &adapter->aux_ctxt.cdev_info; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + if (adapter->aux_ctxt.init_task.work.func) + (void)cancel_delayed_work_sync(&adapter->aux_ctxt.init_task); + + if (!adapter->aux_ctxt.init) { + LOG_INFO_BDF(" auxdrv already deinit.\n"); + return; + } + + if (cdev_info->adev) { + LOG_INFO_BDF("adev is 0x%p remove:%u.\n", cdev_info->adev, + adapter->dev_ctxt.remove); + if (!adapter->dev_ctxt.remove) + sxe2vf_auxdrv_send_reset_event(adapter); + (void)sxe2vf_aux_dev_delete(cdev_info); + } + + ida_simple_remove(&sxe2vf_aux_ida, adapter->aux_ctxt.aux_idx); + + adapter->aux_ctxt.init = false; + LOG_DEBUG_BDF("End deinit auxdrv!\n"); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..034728a3c0b0273806143653f00bdf13dabdb8f0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_aux_drv.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_aux_drv.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_AUX_DRV_H__ +#define __SXE2VF_AUX_DRV_H__ + +#include "sxe2_drv_aux.h" +#include "sxe2_mbx_public.h" + +#define MAX_RDMA_MSG_SIZE 4096 +#define SXE2VF_RDMA_NAME "roce" +struct sxe2vf_adapter; + +enum sxe2vf_rdma_op_state { + RDMA_OP_IDLE, + RDMA_OP_WORKING, + RDMA_OP_SUCCESS, + RDMA_OP_FAILED, +}; + +struct sxe2vf_rdma_msg { + struct list_head list; + u8 opcode; + u16 msglen; + u8 msg[]; +}; + +struct sxe2vf_rdma_mac_entry { + struct list_head list_entry; + u8 mac_addr[ETH_ALEN]; +}; + +struct sxe2vf_aux_context { + struct aux_core_dev_info cdev_info; + struct sxe2vf_adapter *vfadapter; + u32 num_msix; + int aux_idx; + struct delayed_work init_task; + struct mutex adev_mutex; + bool init; +}; + +void sxe2vf_aux_init_task(struct work_struct *work); +void sxe2vf_auxdrv_init(struct sxe2vf_adapter *adapter); +void sxe2vf_auxdrv_deinit(struct sxe2vf_adapter *adapter); + +void sxe2vf_auxdrv_send_reset_event(struct sxe2vf_adapter *adapter); + +int sxe2vf_rdma_aux_send_mtu_changed_event(struct sxe2vf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..96696177c6d9bf74a62d3603e6732ad3b4ed5264 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_debugfs.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include + +#include "sxe2_version.h" +#include "sxe2vf.h" +#include "sxe2_log.h" +#include "sxe2vf_debugfs.h" +#include "sxe2_com_cdev.h" + +static struct dentry *sxe2vf_debugfs_root; +#ifdef SXE2_CFG_DEBUG +extern int g_vf_switch_stats; +#endif + +static char *g_sxe2vf_com_mode_to_str[] = { + [SXE2_COM_MODULE_KERNEL] = SXE2_COM_KERNEL_MODE_NAME, + [SXE2_COM_MODULE_DPDK] = SXE2_COM_DPDK_MODE_NAME, + [SXE2_COM_MODULE_RDMA] = SXE2_COM_RDMA_MODE_NAME, + [SXE2_COM_MODULE_MIXED] = SXE2_COM_MIXED_MODE_NAME, + [SXE2_COM_MODULE_UNDEFINED] = SXE2_COM_UNDEFINED_MODE_NAME, +}; + +void sxe2vf_info_dump(struct sxe2vf_adapter *adapter) +{ + LOG_DEV_INFO("\t info dump start.\n"); + + LOG_DEV_INFO("\t adapter=%p\n", adapter); + LOG_DEV_INFO("\t adapter.dev_name=%s\n", adapter->dev_name); + LOG_DEV_INFO("\t adapter.pdev=%p\n", adapter->pdev); + LOG_DEV_INFO("\t adapter.netdev=%p\n", adapter->netdev); + LOG_DEV_INFO("\t adapter mode:%d\n", sxe2vf_com_mode_get(adapter)); + + LOG_DEV_INFO("\t adapter.irq_ctxt.max_cnt=%d\n", adapter->irq_ctxt.max_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.event_irq_cnt=%d\n", + SXE2VF_EVENT_MSIX_CNT); + LOG_DEV_INFO("\t adapter.irq_ctxt.event_offset=%d\n", 0); + LOG_DEV_INFO("\t adapter.irq_ctxt.eth_irq_cnt=%d\n", + adapter->irq_ctxt.eth_irq_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.eth_offset=%d\n", + adapter->irq_ctxt.eth_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.dpdk_irq_cnt=%d\n", + adapter->irq_ctxt.dpdk_irq_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.dpdk_offset=%d\n", + adapter->irq_ctxt.dpdk_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.rdma_irq_cnt=%d\n", + adapter->irq_ctxt.rdma_irq_cnt); + LOG_DEV_INFO("\t adapter.irq_ctxt.rdma_offset=%d\n", + adapter->irq_ctxt.rdma_offset); + LOG_DEV_INFO("\t adapter.irq_ctxt.msix_cnt=%d\n", + adapter->irq_ctxt.msix_cnt); + + LOG_DEV_INFO("\t adapter.q_ctxt.max_cnt=%d\n", adapter->q_ctxt.max_cnt); + LOG_DEV_INFO("\t adapter.q_ctxt.q_cnt_req=%d\n", adapter->q_ctxt.q_cnt_req); + LOG_DEV_INFO("\t adapter.q_ctxt.eth_q_cnt=%d\n", adapter->q_ctxt.eth_q_cnt); + LOG_DEV_INFO("\t adapter.q_ctxt.eth_offset=%d\n", + adapter->q_ctxt.eth_offset); + LOG_DEV_INFO("\t adapter.q_ctxt.dpdk_q_cnt=%d\n", + adapter->q_ctxt.dpdk_q_cnt); + LOG_DEV_INFO("\t adapter.q_ctxt.dpdk_offset=%d\n", + adapter->q_ctxt.dpdk_offset); + + LOG_DEV_INFO("\t adapter.vsi_ctxt.vsi_cnt_max=%d\n", + adapter->vsi_ctxt.vsi_cnt_max); + LOG_DEV_INFO("\t adapter.vsi_ctxt.vsi_ids[0]=%d\n", + adapter->vsi_ctxt.vsi_ids[0]); + LOG_DEV_INFO("\t adapter.vsi_ctxt.vsi_ids[1]=%d\n", + adapter->vsi_ctxt.vsi_ids[1]); + + LOG_DEV_INFO("\t adapter.work_ctxt.dev_state=%d\n", + adapter->work_ctxt.dev_state); + LOG_DEV_INFO("\t adapter.work_ctxt.failed_cnt=%d\n", + adapter->work_ctxt.failed_cnt); + LOG_DEV_INFO("\t adapter.work_ctxt.is_send=%d\n", + adapter->work_ctxt.is_send); + + LOG_DEV_INFO("\t adapter.link_ctxt.link_up=%d\n", + adapter->link_ctxt.link_up); + LOG_DEV_INFO("\t adapter.link_ctxt.speed=%d\n", adapter->link_ctxt.speed); + + LOG_DEV_INFO("\t adapter.aux_ctxt.num_msix=%d\n", + adapter->aux_ctxt.num_msix); + LOG_DEV_INFO("\t adapter.aux_ctxt.aux_idx=%d\n", adapter->aux_ctxt.aux_idx); + LOG_DEV_INFO("\t adapter.aux_ctxt.init=%d\n", adapter->aux_ctxt.init); + LOG_DEV_INFO("\t adapter.dev_ctxt.remove=%d\n", adapter->dev_ctxt.remove); + + LOG_DEV_INFO("\t info dump end.\n"); +} + +void sxe2vf_vsi_dump(struct sxe2vf_adapter *adapter) +{ + u16 i; + + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct netdev_queue *ntxq = NULL; + + if (!vsi) { + LOG_DEV_INFO("\t mode:%d, eth vsi null\n", + sxe2vf_com_mode_get(adapter)); + return; + } + LOG_DEV_INFO("\t vsi dump start.\n"); + + LOG_DEV_INFO("\t vsi=%p\n", vsi); + LOG_DEV_INFO("\t ----vsi[%d]----\n", vsi->vsi_id); + LOG_DEV_INFO("\t vsi->vsi_type=%d\n", vsi->vsi_type); + + LOG_DEV_INFO("\t vsi->irqs.cnt=%d\n", vsi->irqs.cnt); + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2vf_for_each_vsi_irq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi irq_data[%d]----\n", + vsi->irqs.irq_data[i]->irq_idx); + LOG_DEV_INFO("\t\t irq_data[%d]->name=%s\n", i, + vsi->irqs.irq_data[i]->name); + LOG_DEV_INFO("\t\t irq_data[%d]->q_cnt=%d\n", i, + vsi->irqs.irq_data[i]->q_cnt); + LOG_DEV_INFO("\t\t irq_data[%d]->rate_limit=%d\n", i, + vsi->irqs.irq_data[i]->rate_limit); + LOG_DEV_INFO("\t\t irq_data[%d]->multiple_polling=%d\n", i, + vsi->irqs.irq_data[i]->multiple_polling); + LOG_DEV_INFO("\t\t irq_data[%d]->q_bitmap=%d\n", i, + vsi->irqs.irq_data[i]->q_bitmap); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_mode=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_mode); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_idx=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_idx); + LOG_DEV_INFO("\t\t irq_data[%d]->tx.itr_setting=%d\n", i, + vsi->irqs.irq_data[i]->tx.itr_setting); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_mode=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_mode); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_idx=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_idx); + LOG_DEV_INFO("\t\t irq_data[%d]->rx.itr_setting=%d\n", i, + vsi->irqs.irq_data[i]->rx.itr_setting); + } + + LOG_DEV_INFO("\t vsi->txqs.q_cnt=%d\n", vsi->txqs.q_cnt); + LOG_DEV_INFO("\t vsi->txqs.depth=%d\n", vsi->txqs.depth); + sxe2vf_for_each_vsi_txq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi txq[%d]----\n", vsi->txqs.q[i]->idx_in_vsi); + LOG_DEV_INFO("\t\t txq[%d]->depth=%d\n", i, vsi->txqs.q[i]->depth); + LOG_DEV_INFO("\t\t txq[%d]->next_to_use=%d\n", i, + vsi->txqs.q[i]->next_to_use); + LOG_DEV_INFO("\t\t txq[%d]->next_to_clean=%d\n", i, + vsi->txqs.q[i]->next_to_clean); + if (vsi->txqs.q[i]->netdev) { + ntxq = netdev_get_tx_queue(vsi->txqs.q[i]->netdev, + vsi->txqs.q[i]->idx_in_vsi); + if (ntxq) + LOG_DEV_INFO("\t\t txq[%d] netdev st=%lu " + "(BIT: 0 - DRV_XOFF; 1 - STACK_XOFF; " + "2- FROZEN)\n", + i, ntxq->state); + } + } + + LOG_DEV_INFO("\t vsi->rxqs.q_cnt=%d\n", vsi->rxqs.q_cnt); + LOG_DEV_INFO("\t vsi->rxqs.depth=%d\n", vsi->rxqs.depth); + sxe2vf_for_each_vsi_rxq(vsi, i) + { + LOG_DEV_INFO("\t ----vsi rxq[%d]----\n", vsi->rxqs.q[i]->idx_in_vsi); + LOG_DEV_INFO("\t\t rxq[%d]->depth=%d\n", i, vsi->rxqs.q[i]->depth); + LOG_DEV_INFO("\t\t rxq[%d]->next_to_use=%d\n", i, + vsi->rxqs.q[i]->next_to_use); + LOG_DEV_INFO("\t\t rxq[%d]->next_to_clean=%d\n", i, + vsi->rxqs.q[i]->next_to_clean); + } + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_DEV_INFO("\t vsi dump end.\n"); +} + +STATIC void sxe2vf_com_info(struct sxe2vf_adapter *adapter) +{ + sxe2_com_info_print(&adapter->com_ctxt); +} + +#ifdef SXE2_CFG_DEBUG +static void sxe2vf_monitor_stats_open(struct sxe2vf_adapter *adapter) +{ + g_vf_switch_stats = 1; +} + +static void sxe2vf_monitor_stats_close(struct sxe2vf_adapter *adapter) +{ + g_vf_switch_stats = 0; +} +#endif + +static struct sxe2vf_debugfs_command command[] = { + {"info dump", sxe2vf_info_dump}, + {"vsi dump", sxe2vf_vsi_dump}, +#ifdef SXE2_CFG_DEBUG + {"stats open", sxe2vf_monitor_stats_open}, + {"stats close", sxe2vf_monitor_stats_close}, +#endif + {"com info", sxe2vf_com_info}, + {"", NULL}, +}; + +static s32 sxe2vf_debugfs_command_match(struct sxe2vf_adapter *adapter, s8 *cmd_buf, + size_t size) +{ + u32 i; + + for (i = 0; strlen(command[i].string) != 0; i++) { + if (!strcmp(cmd_buf, command[i].string)) { + command[i].debugfs_cb(adapter); + goto l_end; + } + } + + return -EINVAL; + +l_end: + return 0; +} + +static void sxe2vf_debugfs_command_help_info(struct sxe2vf_adapter *adapter) +{ + u32 i; + + LOG_DEV_INFO("available commands:\n"); + + for (i = 0; strlen(command[i].string) != 0; i++) + LOG_DEV_INFO("\t %s\n", command[i].string); +} + +STATIC ssize_t sxe2vf_debugfs_command_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + ssize_t ret; + s8 *cmd_buf, *cmd_buf_tmp; + struct sxe2vf_adapter *adapter = file->private_data; + + if (*ppos != 0) { + LOG_DEV_ERR(" don't allow partial writes\n, *ppos!=NULL"); + return -EINVAL; + } + + cmd_buf = memdup_user(buf, count + 1); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = (size_t)cmd_buf_tmp - (size_t)cmd_buf + 1; + } + ret = (ssize_t)count; + + if (sxe2vf_debugfs_command_match(adapter, cmd_buf, count)) { + LOG_DEV_INFO("unknown or invalid command '%s'\n", cmd_buf); + sxe2vf_debugfs_command_help_info(adapter); + ret = -EINVAL; + } + + kfree(cmd_buf); + return ret; +} + +static const struct file_operations sxe2vf_debugfs_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = sxe2vf_debugfs_command_write, +}; + +void sxe2vf_debugfs_create_common_file(struct sxe2vf_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + if (IS_ERR(debugfs_create_file("command", 0600, adapter->sxe2vf_debugfs_vf, + adapter, &sxe2vf_debugfs_command_fops))) { + LOG_DEV_ERR("debugfs file create failed\n"); + } + + return; +#endif +} + +static char *sxe2vf_com_mode_to_str(enum sxe2_com_module com_mode) +{ + if (com_mode >= ARRAY_SIZE(g_sxe2vf_com_mode_to_str)) + return "unknown"; + + return g_sxe2vf_com_mode_to_str[com_mode]; +} + +STATIC bool sxe2vf_drv_mode_check(char *cmd_buf) +{ + if ((!strcmp(cmd_buf, SXE2_COM_KERNEL_MODE_NAME)) || + (!strcmp(cmd_buf, SXE2_COM_MIXED_MODE_NAME)) || + (!strcmp(cmd_buf, SXE2_COM_DPDK_MODE_NAME)) || + (!strcmp(cmd_buf, SXE2_COM_UNDEFINED_MODE_NAME))) + return true; + + return false; +} + +static s32 sxe2vf_com_str_to_mode(char *cmd_buf, enum sxe2_com_module *new_mode) +{ + s32 ret; + u32 i; + + for (i = 0; i < SXE2_COM_MODULE_INVAL; i++) { + if (!strcmp(cmd_buf, g_sxe2vf_com_mode_to_str[i])) { + *new_mode = (enum sxe2_com_module)i; + ret = 0; + goto end; + } + } + + ret = -EINVAL; + +end: + return ret; +} + +STATIC s32 sxe2vf_debugfs_drv_mode_set(struct sxe2vf_adapter *adapter, char *cmd_buf) +{ + s32 ret = 0; + enum sxe2_com_module new_mode = SXE2_COM_MODULE_INVAL; + + if (sxe2vf_drv_mode_check(cmd_buf)) { + ret = sxe2vf_com_str_to_mode(cmd_buf, &new_mode); + if (ret) { + LOG_ERROR_BDF("drv mode buf error.\n"); + goto end; + } + + ret = sxe2vf_drv_mode_set(adapter, new_mode); + if (ret) { + LOG_ERROR_BDF("drv mode configurate failed.\n"); + goto end; + } + + LOG_DEV_INFO("current mode:%s configured mode:%s\n", + g_sxe2vf_com_mode_to_str[adapter->drv_mode], + g_sxe2vf_com_mode_to_str[new_mode]); + } else { + LOG_DEV_INFO("unknown or invalid command '%s'\n", cmd_buf); + LOG_DEV_INFO("supported commands: %s、%s、%s.\n", + SXE2_COM_KERNEL_MODE_NAME, SXE2_COM_MIXED_MODE_NAME, + SXE2_COM_DPDK_MODE_NAME); + ret = -EINVAL; + } + +end: + return ret; +} + +STATIC ssize_t sxe2vf_debugfs_drv_mode_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct sxe2vf_adapter *adapter = file->private_data; + struct sxe2_vf_drv_mode_resp vf_resp = {0}; + char tmp_buf[SXE2_COM_MODE_NAME_SIZE]; + ssize_t len = 0; + s32 ret = 0; + + len = snprintf(tmp_buf, SXE2_COM_MODE_NAME_SIZE, "current mode:%s\n", + sxe2vf_com_mode_to_str(adapter->drv_mode)); + ret = __sxe2vf_drv_mode_get(adapter, &vf_resp, sizeof(vf_resp), + SXE2VF_MSG_RESP_WAIT_NOTIFY); + len += snprintf(tmp_buf + len, SXE2_COM_MODE_NAME_SIZE - len, + "configured mode:%s\n", + ret ? "get failed" + : sxe2vf_com_mode_to_str(vf_resp.drv_mode)); + + return simple_read_from_buffer(buf, count, ppos, &tmp_buf, len); +} + +STATIC ssize_t sxe2vf_debugfs_drv_mode_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + ssize_t ret, tmp_ret; + s8 *cmd_buf, *cmd_buf_tmp; + struct sxe2vf_adapter *adapter = file->private_data; + + if (*ppos != 0) { + LOG_DEV_ERR("dont't allow partial writes.\n, *ppos!=NULL"); + return -EINVAL; + } + + cmd_buf = memdup_user(buf, count + 1); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + + cmd_buf[count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = (size_t)cmd_buf_tmp - (size_t)cmd_buf + 1; + } + + ret = (ssize_t)count; + + tmp_ret = sxe2vf_debugfs_drv_mode_set(adapter, cmd_buf); + if (tmp_ret) + ret = tmp_ret; + + kfree(cmd_buf); + return ret; +} + +static const struct file_operations sxe2vf_debugfs_drv_mode_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe2vf_debugfs_drv_mode_read, + .write = sxe2vf_debugfs_drv_mode_write, +}; + +void sxe2vf_debugfs_create_drv_mode_file(struct sxe2vf_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + if (IS_ERR(debugfs_create_file("drv_mode", 0600, adapter->sxe2vf_debugfs_vf, + adapter, &sxe2vf_debugfs_drv_mode_fops))) { + LOG_DEV_ERR("debugfs file create failed\n"); + } + + return; +#endif +} + +void sxe2vf_debugfs_vf_init(struct sxe2vf_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + const char *name = pci_name(adapter->pdev); + + adapter->sxe2vf_debugfs_vf = debugfs_create_dir(name, sxe2vf_debugfs_root); + if (IS_ERR(adapter->sxe2vf_debugfs_vf)) { + LOG_ERROR("init of vf debugfs failed\n"); + goto l_end; + } + + sxe2vf_debugfs_create_common_file(adapter); + sxe2vf_debugfs_create_drv_mode_file(adapter); + +l_end: + return; + +#endif +} + +void sxe2vf_debugfs_vf_exit(struct sxe2vf_adapter *adapter) +{ +#if defined(CONFIG_DEBUG_FS) || defined(PCLINT) + debugfs_remove_recursive(adapter->sxe2vf_debugfs_vf); + adapter->sxe2vf_debugfs_vf = NULL; +#endif +} + +void sxe2vf_debugfs_init(void) +{ + sxe2vf_debugfs_root = debugfs_create_dir(SXE2VF_DRV_NAME, NULL); + if (IS_ERR(sxe2vf_debugfs_root)) + LOG_ERROR_D("init of debugfs failed\n"); +} + +void sxe2vf_debugfs_exit(void) +{ + debugfs_remove_recursive(sxe2vf_debugfs_root); + sxe2vf_debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..8d22e54d2b1620b108f38e50f647fc2b66fccc0e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_debugfs.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_debugfs.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2VF_DEBUGFS_H__ +#define __SXE2VF_DEBUGFS_H__ + +struct sxe2vf_debugfs_command { + s8 string[50]; + void (*debugfs_cb)(struct sxe2vf_adapter *adapter); +}; + +void sxe2vf_debugfs_init(void); + +void sxe2vf_debugfs_exit(void); + +void sxe2vf_debugfs_create_common_file(struct sxe2vf_adapter *adapter); + +void sxe2vf_debugfs_create_drv_mode_file(struct sxe2vf_adapter *adapter); + +void sxe2vf_debugfs_vf_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_debugfs_vf_exit(struct sxe2vf_adapter *adapter); + +void sxe2vf_debugfs_destroy_files(struct sxe2vf_adapter *adapter); + +void sxe2vf_info_dump(struct sxe2vf_adapter *adapter); + +void sxe2vf_vsi_dump(struct sxe2vf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..18d19bb3a2d3ba1f4d254a31381b85dc16ec2d35 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.c @@ -0,0 +1,2788 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_ethtool.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_SCTP +#include +#endif +#include +#ifndef SXE2_KERNEL_TEST +#include +#endif + +#include "sxe2_compat.h" +#include "sxe2vf.h" +#include "sxe2vf_ethtool.h" +#include "sxe2_log.h" +#include "sxe2_version.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_rxft.h" +#include "sxe2_cmd.h" + +#define SXE2VF_COALESCE_QIDX_INVAL 0xFFFFFFFF +#define SXE2VF_VSI_TX_QC(vsi, q_idx) (&(vsi)->txqs.q[(q_idx)]->irq_data->tx) +#define SXE2VF_VSI_RX_QC(vsi, q_idx) (&(vsi)->rxqs.q[(q_idx)]->irq_data->rx) +#define SXE2VF_VSI_TX_IRQ(vsi, q_idx) (&(vsi)->txqs.q[(q_idx)]->irq_data) +#define SXE2VF_VSI_RX_IRQ(vsi, q_idx) (&(vsi)->rxqs.q[(q_idx)]->irq_data) + +#define ETHTOOL_GRXRINGS 0x0000002d +#define SXE2VF_Q_TYPE_STR_RX "rx" +#define SXE2VF_Q_TYPE_STR_TX "tx" + +static void sxe2_vf_vsi_hw_stats_update(struct sxe2vf_adapter *adapter) +{ + (void)sxe2vf_stats_get_msg_send(adapter); +} +s32 sxe2vf_stats_push_sync(struct sxe2vf_adapter *adapter) +{ + return sxe2vf_stats_push_msg_send(adapter); +} + +STATIC void sxe2vf_fetch_u64_data_per_ring(struct u64_stats_sync *syncp, + struct sxe2vf_queue_stats *stats, + u64 *pkts, u64 *bytes) +{ + u32 start; + + do { + start = u64_stats_fetch_begin(syncp); + *pkts = stats->packets; + *bytes = stats->bytes; + } while (u64_stats_fetch_retry(syncp, start)); +} + +void sxe2vf_vsi_sw_stats_update(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_vsi_sw_stats cur_stats; + struct sxe2vf_vsi_qs_stats *vsi_qs_stats = &vsi->vsi_qs_stats; + struct sxe2vf_queue_stats *txq_stats, *rxq_stats; + u64 pkts, bytes; + u8 j; + + memset(&cur_stats, 0, sizeof(cur_stats)); + + sxe2vf_for_each_vsi_txq(vsi, j) + { + txq_stats = &vsi_qs_stats->txqs_stats[j]; + sxe2vf_fetch_u64_data_per_ring(&txq_stats->syncp, txq_stats, &pkts, + &bytes); + cur_stats.tx_packets += pkts; + cur_stats.tx_bytes += bytes; + cur_stats.tx_restart += txq_stats->tx_stats.tx_restart; + cur_stats.tx_busy += txq_stats->tx_stats.tx_busy; + cur_stats.tx_linearize += txq_stats->tx_stats.tx_linearize; + cur_stats.tx_vlan_insert += txq_stats->tx_stats.tx_vlan_insert; + cur_stats.tx_tso_packets += txq_stats->tx_stats.tx_tso_packets; + cur_stats.tx_tso_bytes += txq_stats->tx_stats.tx_tso_bytes; + cur_stats.tx_csum_none += txq_stats->tx_stats.tx_csum_none; + cur_stats.tx_csum_partial += txq_stats->tx_stats.tx_csum_partial; + cur_stats.tx_csum_partial_inner += + txq_stats->tx_stats.tx_csum_partial_inner; + cur_stats.tx_queue_dropped += txq_stats->tx_stats.tx_queue_dropped; + cur_stats.tx_xmit_more += txq_stats->tx_stats.tx_xmit_more; + cur_stats.tx_tso_linearize_chk += + txq_stats->tx_stats.tx_tso_linearize_chk; + } + + sxe2vf_for_each_vsi_rxq(vsi, j) + { + rxq_stats = &(vsi_qs_stats->rxqs_stats[j]); + sxe2vf_fetch_u64_data_per_ring(&rxq_stats->syncp, rxq_stats, &pkts, + &bytes); + cur_stats.rx_packets += pkts; + cur_stats.rx_bytes += bytes; + cur_stats.rx_buff_alloc_err += rxq_stats->rx_stats.rx_buff_alloc_err; + cur_stats.rx_pg_alloc_fail += rxq_stats->rx_stats.rx_pg_alloc_fail; + cur_stats.rx_lro_count += rxq_stats->rx_stats.rx_lro_count; + cur_stats.rx_lro_packets += rxq_stats->rx_stats.rx_lro_packets; + cur_stats.rx_vlan_strip += rxq_stats->rx_stats.rx_vlan_strip; + cur_stats.rx_csum_err += rxq_stats->rx_stats.rx_csum_err; + cur_stats.rx_csum_unnecessary += + rxq_stats->rx_stats.rx_csum_unnecessary; + cur_stats.rx_csum_none += rxq_stats->rx_stats.rx_csum_none; + cur_stats.rx_csum_complete += rxq_stats->rx_stats.rx_csum_complete; + cur_stats.rx_csum_unnecessary_inner += + rxq_stats->rx_stats.rx_csum_unnecessary_inner; + cur_stats.rx_lro_bytes += rxq_stats->rx_stats.rx_lro_bytes; + cur_stats.rx_pkts_sw_drop += rxq_stats->rx_stats.rx_pkts_sw_drop; + cur_stats.rx_page_alloc += rxq_stats->rx_stats.rx_page_alloc; + cur_stats.rx_non_eop_descs += rxq_stats->rx_stats.rx_non_eop_descs; + cur_stats.rx_pa_err += rxq_stats->rx_stats.rx_pa_err; + } + + memcpy(&vsi->vsi_stats.vsi_sw_stats, &cur_stats, sizeof(cur_stats)); +} + +static void sxe2vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + u32 i = 0; + struct sxe2vf_queue *tx_q, *rx_q; + u64 pkts, bytes; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + u8 j; + + struct sxe2_vf_vsi_hw_stats *hw_stats = &vsi->vsi_stats.vsi_hw_stats; + struct sxe2vf_vsi_sw_stats *sw_stats = &vsi->vsi_stats.vsi_sw_stats; + + u64 rx_offload_success = 0; + u64 rx_error_decrypt_fail = 0; + u64 rx_error_invalid_state = 0; + u64 rx_error_invalid_sp = 0; + u64 tx_offload_success = 0; + u64 tx_error_invalid_state = 0; + u64 tx_error_invalid_sp = 0; + + char *p; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) + goto l_unlock; + + sxe2vf_vsi_sw_stats_update(vsi); + sxe2_vf_vsi_hw_stats_update(adapter); + + for (j = 0; j < SXE2VF_VSI_SW_STATS_LEN; j++) { + p = (char *)sw_stats + sxe2vf_gstrings_vsi_sw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + for (j = 0; j < SXE2VF_VSI_HW_STATS_LEN; j++) { + p = (char *)hw_stats + sxe2vf_gstrings_vsi_hw_stats[j].stats_offset; + data[i++] = *(u64 *)p; + } + + sxe2vf_for_each_vsi_txq(vsi, j) + { + tx_q = READ_ONCE(vsi->txqs.q[j]); + sxe2vf_fetch_u64_data_per_ring(&tx_q->syncp, tx_q->stats, &pkts, + &bytes); + data[i++] = pkts; + data[i++] = bytes; + data[i++] = tx_q->stats->tx_stats.tx_tso_packets; + data[i++] = tx_q->stats->tx_stats.tx_tso_bytes; + data[i++] = tx_q->stats->tx_stats.tx_tso_linearize_chk; + data[i++] = tx_q->stats->tx_stats.tx_vlan_insert; + data[i++] = tx_q->stats->tx_stats.tx_csum_none; + data[i++] = tx_q->stats->tx_stats.tx_csum_partial; + data[i++] = tx_q->stats->tx_stats.tx_csum_partial_inner; + data[i++] = tx_q->stats->tx_stats.tx_busy; + data[i++] = tx_q->stats->tx_stats.tx_queue_dropped; + data[i++] = tx_q->stats->tx_stats.tx_xmit_more; + data[i++] = tx_q->stats->tx_stats.tx_restart; + data[i++] = tx_q->stats->tx_stats.tx_linearize; + } + + sxe2vf_for_each_vsi_rxq(vsi, j) + { + rx_q = READ_ONCE(vsi->rxqs.q[j]); + sxe2vf_fetch_u64_data_per_ring(&rx_q->syncp, rx_q->stats, &pkts, + &bytes); + data[i++] = pkts; + data[i++] = bytes; + data[i++] = rx_q->stats->rx_stats.rx_csum_unnecessary; + data[i++] = rx_q->stats->rx_stats.rx_csum_none; + data[i++] = rx_q->stats->rx_stats.rx_csum_complete; + data[i++] = rx_q->stats->rx_stats.rx_csum_unnecessary_inner; + data[i++] = rx_q->stats->rx_stats.rx_csum_err; + data[i++] = rx_q->stats->rx_stats.rx_lro_packets; + data[i++] = rx_q->stats->rx_stats.rx_lro_bytes; + data[i++] = rx_q->stats->rx_stats.rx_lro_count; + data[i++] = rx_q->stats->rx_stats.rx_vlan_strip; + data[i++] = rx_q->stats->rx_stats.rx_pkts_sw_drop; + data[i++] = rx_q->stats->rx_stats.rx_buff_alloc_err; + data[i++] = rx_q->stats->rx_stats.rx_pg_alloc_fail; + data[i++] = rx_q->stats->rx_stats.rx_page_alloc; + data[i++] = rx_q->stats->rx_stats.rx_non_eop_descs; + data[i++] = rx_q->stats->rx_stats.rx_pa_err; + } + + sxe2vf_for_each_vsi_rxq(vsi, j) + { + rx_q = READ_ONCE(vsi->rxqs.q[j]); + rx_offload_success += rx_q->stats->ipsec_stats.rx_offload_success; + rx_error_decrypt_fail += + rx_q->stats->ipsec_stats.rx_error_decrypt_fail; + rx_error_invalid_state += + rx_q->stats->ipsec_stats.rx_error_invalid_state; + rx_error_invalid_sp += rx_q->stats->ipsec_stats.rx_error_invalid_sp; + } + data[i++] = rx_offload_success; + data[i++] = rx_error_decrypt_fail; + data[i++] = rx_error_invalid_state; + data[i++] = rx_error_invalid_sp; + + sxe2vf_for_each_vsi_txq(vsi, j) + { + tx_q = READ_ONCE(vsi->txqs.q[j]); + tx_offload_success += tx_q->stats->ipsec_stats.tx_offload_success; + tx_error_invalid_state += + tx_q->stats->ipsec_stats.tx_error_invalid_state; + tx_error_invalid_sp += tx_q->stats->ipsec_stats.tx_error_invalid_sp; + } + data[i++] = tx_offload_success; + data[i++] = tx_error_invalid_state; + data[i++] = tx_error_invalid_sp; + + data[i++] = adapter->fnav_ctxt.fnav_match; + +l_unlock: + (void)i; + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +STATIC int sxe2vf_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return (int)SXE2VF_ALL_STATS_LEN(netdev); + case ETH_SS_PRIV_FLAGS: + return SXE2VF_PRIV_FLAG_LEN; + default: + return -EOPNOTSUPP; + } +} + +STATIC void sxe2vf_get_ipsec_strings(struct sxe2vf_adapter *adapter, u8 **data) +{ + u8 *p; + + if (!data) + return; + + p = *data; + + ethtool_sprintf(&p, "ipsec_rx_offload_ok"); + ethtool_sprintf(&p, "ipsec_rx_decrypt_fail"); + ethtool_sprintf(&p, "ipsec_rx_invalid_state"); + ethtool_sprintf(&p, "ipsec_rx_invalid_sp"); + + ethtool_sprintf(&p, "ipsec_tx_offload_ok"); + ethtool_sprintf(&p, "ipsec_tx_invalid_state"); + ethtool_sprintf(&p, "ipsec_tx_invalid_sp"); + + *data = p; +} + +STATIC void sxe2vf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + u32 i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SXE2VF_VSI_SW_STATS_LEN; i++) + ethtool_sprintf(&p, sxe2vf_gstrings_vsi_sw_stats[i] + .stats_string); + for (i = 0; i < SXE2VF_VSI_HW_STATS_LEN; i++) + ethtool_sprintf(&p, sxe2vf_gstrings_vsi_hw_stats[i] + .stats_string); + + sxe2vf_for_each_vsi_txq(vsi, i) + { + ethtool_sprintf(&p, "tx%u_packets", i); + ethtool_sprintf(&p, "tx%u_bytes", i); + ethtool_sprintf(&p, "tx%u_tso_packets", i); + ethtool_sprintf(&p, "tx%u_tso_bytes", i); + ethtool_sprintf(&p, "tx%u_tso_linearize_chk", i); + ethtool_sprintf(&p, "tx%u_added_vlan_packets", i); + ethtool_sprintf(&p, "tx%u_csum_none", i); + ethtool_sprintf(&p, "tx%u_csum_partial", i); + ethtool_sprintf(&p, "tx%u_csum_partial_inner", i); + ethtool_sprintf(&p, "tx%u_stopped", i); + ethtool_sprintf(&p, "tx%u_dropped", i); + ethtool_sprintf(&p, "tx%u_xmit_more", i); + ethtool_sprintf(&p, "tx%u_wake", i); + ethtool_sprintf(&p, "tx%u_linearize", i); + } + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + ethtool_sprintf(&p, "rx%u_packets", i); + ethtool_sprintf(&p, "rx%u_bytes", i); + ethtool_sprintf(&p, "rx%u_csum_unnecessary", i); + ethtool_sprintf(&p, "rx%u_csum_none", i); + ethtool_sprintf(&p, "rx%u_csum_complete", i); + ethtool_sprintf(&p, "rx%u_csum_unnecessary_inner", i); + ethtool_sprintf(&p, "rx%u_csum_err", i); + ethtool_sprintf(&p, "rx%u_lro_packets", i); + ethtool_sprintf(&p, "rx%u_lro_bytes", i); + ethtool_sprintf(&p, "rx%u_lro_count", i); + ethtool_sprintf(&p, "rx%u_removed_vlan_packets", i); + ethtool_sprintf(&p, "rx%u_pkts_sw_drop", i); + ethtool_sprintf(&p, "rx%u_buff_alloc_err", i); + ethtool_sprintf(&p, "rx%u_pg_alloc_fail", i); + ethtool_sprintf(&p, "rx%u_page_alloc", i); + ethtool_sprintf(&p, "rx%u_non_eop_descs", i); + ethtool_sprintf(&p, "rx%u_pa_err", i); + } + sxe2vf_get_ipsec_strings(adapter, &p); + ethtool_sprintf(&p, "fnav_match"); + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < SXE2VF_PRIV_FLAG_LEN; i++) + ethtool_sprintf(&p, sxe2vf_gstrings_priv_flags[i].name); + break; + default: + break; + } +} + +static u32 sxe2vf_get_priv_flags(struct net_device *netdev) +{ + u32 i, flags = 0; + const struct sxe2vf_priv_flag *priv_flag; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + for (i = 0; i < SXE2VF_PRIV_FLAG_LEN; i++) { + priv_flag = &sxe2vf_gstrings_priv_flags[i]; + if (test_bit((int)priv_flag->adapter_flag_bitno, adapter->flags)) + flags |= (u32)BIT(i); + } + return flags; +} + +STATIC void sxe2vf_fnav_tunnel_flag_set(struct sxe2vf_adapter *adapter, u32 flags) +{ + if ((flags & BIT(SXE2VF_PRIV_FLAGS_FNAV_TUNNEL)) && + !test_bit(SXE2VF_FLAG_FNAV_TUNNEL, adapter->flags)) { + set_bit(SXE2VF_FLAG_FNAV_TUNNEL, adapter->flags); + } + + if (!(flags & BIT(SXE2VF_PRIV_FLAGS_FNAV_TUNNEL)) && + test_bit(SXE2VF_FLAG_FNAV_TUNNEL, adapter->flags)) { + clear_bit(SXE2VF_FLAG_FNAV_TUNNEL, adapter->flags); + } +} + +static s32 sxe2vf_legacy_rx_flag_set(struct sxe2vf_adapter *adapter, u32 flags) +{ + s32 ret = 0; + bool need_downup = false; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct net_device *netdev = vsi->netdev; + bool old_legacy_rx = + (bool)test_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags); + + if ((flags & BIT(SXE2VF_PRIV_FLAGS_LEGACY_RX)) && + !test_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags)) { + need_downup = true; + set_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags); + } else if (!(flags & BIT(SXE2VF_PRIV_FLAGS_LEGACY_RX)) && + test_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags)) { + need_downup = true; + clear_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags); + } + + if (need_downup) { + ret = sxe2vf_vsi_reopen_locked(adapter->vsi_ctxt.vf_vsi); + if (ret) { + if (old_legacy_rx) { + set_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, + adapter->flags); + } else { + clear_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, + adapter->flags); + } + LOG_NETDEV_ERR("set legacy rx priv flag err %d\n", ret); + } + } else { + LOG_INFO_BDF("legacy rx priv flag not changed.\n"); + } + + return ret; +} + +static int sxe2vf_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + bool part_failed = false; + + ret = sxe2vf_legacy_rx_flag_set(adapter, flags); + if (ret) + part_failed = true; + + sxe2vf_fnav_tunnel_flag_set(adapter, flags); + + if (part_failed) + ret = -EINVAL; + + return ret; +} + +#ifdef GET_RINGPARAM_NEED_2_PARAMS +static void sxe2vf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#else +static void sxe2vf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#endif +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = SXE2VF_MAX_NUM_DESC; + ring->tx_max_pending = SXE2VF_MAX_NUM_DESC; + ring->rx_pending = adapter->vsi_ctxt.vf_vsi->rxqs.depth; + ring->tx_pending = adapter->vsi_ctxt.vf_vsi->txqs.depth; +} + +static bool sxe2vf_ringparam_changed(struct sxe2vf_adapter *adapter, + struct ethtool_ringparam *ring, u32 *tx_cnt, + u32 *rx_cnt) +{ + bool changed = true; + + *tx_cnt = clamp_t(u32, ring->tx_pending, SXE2VF_MIN_NUM_DESC, + SXE2VF_MAX_NUM_DESC); + *tx_cnt = ALIGN(*tx_cnt, SXE2VF_DESC_ALIGN_32); + + *rx_cnt = clamp_t(u32, ring->rx_pending, SXE2VF_MIN_NUM_DESC, + SXE2VF_MAX_NUM_DESC); + *rx_cnt = ALIGN(*rx_cnt, SXE2VF_DESC_ALIGN_32); + + if ((*tx_cnt == adapter->vsi_ctxt.vf_vsi->txqs.depth) && + (*rx_cnt == adapter->vsi_ctxt.vf_vsi->rxqs.depth)) { + changed = false; + } + + return changed; +} + +static void sxe2vf_ringparam_set_offline(struct sxe2vf_vsi *vsi, u32 tx_size, + u32 rx_size) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct net_device *netdev = vsi->netdev; + + u32 i; + + if (vsi->txqs.depth == tx_size) { + LOG_NETDEV_DEBUG("tx desc depth[%d] not changed.\n", tx_size); + } else { + LOG_NETDEV_DEBUG("link is down, tx desc depth chang from [%d] to " + "[%d] happens\t" + " when link is brought up.\n", + vsi->txqs.depth, tx_size); + sxe2vf_for_each_vsi_txq(vsi, i) + { + vsi->txqs.q[i]->depth = (u16)tx_size; + } + + vsi->txqs.depth = (u16)tx_size; + } + + if (vsi->rxqs.depth == rx_size) { + LOG_NETDEV_DEBUG("rx desc depth[%d] not changed.\n", rx_size); + } else { + LOG_NETDEV_DEBUG("link is down, rx desc depth chang from [%d] to " + "[%d] happens\t" + "when link is brought up.\n", + vsi->rxqs.depth, rx_size); + sxe2vf_for_each_vsi_rxq(vsi, i) + { + vsi->rxqs.q[i]->depth = (u16)rx_size; + } + vsi->rxqs.depth = (u16)rx_size; + } +} + +#ifdef SET_RINGPARAM_NEED_2_PARAMS +static int sxe2vf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *user_param) +#else +static int +sxe2vf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *user_param, + struct kernel_ethtool_ringparam __always_unused *kernel_ring, + struct netlink_ext_ack __always_unused *extack) +#endif +{ + s32 ret = 0; + u32 new_rx_count, new_tx_count; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + u32 old_tx_depth; + u32 old_rx_depth; + + if (user_param->rx_mini_pending || user_param->rx_jumbo_pending) { + LOG_ERROR_BDF("do not support set rx_mini_pending=%u or " + "rx_jumbo_pending=%u\n", + user_param->rx_mini_pending, + user_param->rx_jumbo_pending); + return -EINVAL; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_INFO_BDF("vsi diabled, try later\n"); + ret = -EBUSY; + goto l_end; + } + if (!sxe2vf_ringparam_changed(adapter, user_param, &new_tx_count, + &new_rx_count)) { + LOG_DEBUG_BDF("ring depth not changed, tx_depth=%u, rx_depth=%u\n", + new_tx_count, new_rx_count); + goto l_end; + } + + old_tx_depth = vsi->txqs.depth; + old_rx_depth = vsi->rxqs.depth; + + if (netif_running(netdev)) { + ret = sxe2vf_vsi_close(vsi); + if (ret) { + LOG_DEBUG_BDF("vsi close failed, vsi %d error %d\n", + vsi->vsi_id, ret); + goto l_end; + } + } + + sxe2vf_ringparam_set_offline(vsi, new_tx_count, new_rx_count); + + if (netif_running(netdev)) { + ret = sxe2vf_vsi_open(vsi); + if (ret) { + LOG_ERROR_BDF("vf change tx_depth:%u rx_depth:%u " + "failed.(err:%d)\n", + new_tx_count, new_rx_count, ret); + vsi->txqs.depth = (u16)old_tx_depth; + vsi->rxqs.depth = (u16)old_rx_depth; + sxe2vf_queues_depth_update(vsi); + } + } + +l_end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static void sxe2vf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + ch->other_count = SXE2VF_EVENT_MSIX_CNT; + ch->max_other = SXE2VF_EVENT_MSIX_CNT; + + ch->combined_count = adapter->vsi_ctxt.vf_vsi->txqs.q_cnt; + ch->max_combined = adapter->q_ctxt.eth_q_cnt; +} + +static int sxe2vf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + u32 num_new = ch->combined_count; + s32 ret = 0; +#ifndef SXE2_CFG_RELEASE + u32 old_cnt = adapter->q_ctxt.q_cnt_req; +#endif + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + if (num_new == 0 || num_new > adapter->q_ctxt.eth_q_cnt) + return -EINVAL; + + if (num_new == adapter->vsi_ctxt.vf_vsi->txqs.q_cnt) + return 0; + + if (ch->rx_count || ch->tx_count || + (ch->other_count != SXE2VF_EVENT_MSIX_CNT && ch->other_count != 0)) + return -EINVAL; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + ret = -EBUSY; + goto l_unlock; + } + ret = sxe2vf_set_channels_fnav_check(adapter, num_new); + if (ret) + goto l_unlock; + + adapter->q_ctxt.q_cnt_req = (u16)num_new; + + if (netif_running(netdev)) + (void)sxe2vf_vsi_close(vsi); + + ret = sxe2vf_set_channels_rss_reset(netdev, adapter, num_new); + if (ret) { + (void)sxe2vf_vsi_disable(vsi); + LOG_ERROR_BDF("change channel from %u to %u rss lut reset failed. " + "%d\n", + old_cnt, num_new, ret); + goto l_unlock; + } + + ret = sxe2vf_vsi_rebuild(vsi); + if (ret) { + (void)sxe2vf_vsi_disable(vsi); + LOG_ERROR_BDF("change channel from %u to %u rebuild failed. %d\n", + old_cnt, num_new, ret); + goto l_unlock; + } + + if (netif_running(vsi->netdev)) { + ret = sxe2vf_vsi_open(vsi); + goto l_unlock; + } + + LOG_INFO_BDF("change channel from %u to %u.\n", old_cnt, num_new); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +static void sxe2_vf_get_hw_autoneg_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 advertising) +{ + ks->base.autoneg = link_cfg->current_an_en.current_an; + if (link_cfg->configed_pause_result.tx_en && + link_cfg->configed_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); + } else if (link_cfg->configed_pause_result.tx_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); + } else if (link_cfg->configed_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); + } else { + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); + } +} + +static void +sxe2_vf_get_advertise_fec_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 advertising, u32 supported, u32 linkstate) +{ + if (linkstate) { + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + if (link_cfg->advertis_fec.fec_br) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + + if (link_cfg->advertis_fec.fec_528 || + link_cfg->advertis_fec.fec_544) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + } + } +} + +static void sxe2_vf_get_an_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 advertising, u32 supported, u32 speed) +{ + if (link_cfg->local_an_en.suppert_an) + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + + if (link_cfg->local_an_en.advertis_an) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + } +} + +static void sxe2_vf_get_hw_connect_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 advertising, u32 supported) +{ + switch (link_cfg->optical_module.current_connection) { + case SXE2_FW_CONNECT_MODE_TRANSCEIVER: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case SXE2_FW_CONNECT_MODE_BACKPLANE: + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, Backplane); + ks->base.port = PORT_NONE; + break; + case SXE2_FW_CONNECT_MODE_DAC: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + ks->base.port = PORT_DA; + break; + case SXE2_FW_CONNECT_MODE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + break; + default: + ks->base.port = PORT_OTHER; + break; + } +} + +static void +sxe2_vf_get_hw_part_adver_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 lp_advertising, u32 supported) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + if (link_cfg->sxe2_ana_fsm == SXE2_AN_GOOD) { + if (link_cfg->partner_pause_result.tx_en && + link_cfg->partner_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Pause); + } else if (link_cfg->partner_pause_result.tx_en) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Pause); + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + } else if (link_cfg->partner_pause_result.rx_en) { + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, + Asym_Pause); + } else { + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Pause); + ethtool_link_ksettings_del_link_mode(ks, lp_advertising, + Asym_Pause); + } + } +} + +static void +sxe2_vf_ethtool_support_fec_get(struct support_speed_ability_mode *speed_ability, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + if (speed_ability->ability_speed_100Gcr4 || + speed_ability->ability_speed_100Gkr4) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + + if (speed_ability->ability_speed_50Gcr2 || + speed_ability->ability_speed_50Gkr2) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + + if (speed_ability->ability_speed_25Gcr || + speed_ability->ability_speed_25Gkr || + speed_ability->ability_speed_25Gkrcr || + speed_ability->ability_speed_25Gkrcr_s) { + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + } + + if (speed_ability->ability_speed_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + } +} + +static void sxe2_vf_get_speed_ability(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + struct support_speed_ability_mode *ability, + u8 usr_link_speed) +{ + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + if (ability->ability_speed_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_10G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + } + + if (ability->ability_speed_25Gcr || (ability->ability_speed_25Gkrcr_s)) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_25G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + } + + if (ability->ability_speed_25Gkr) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_25G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + } + + if (ability->ability_speed_50Gcr2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_50G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + } + + if (ability->ability_speed_50Gkr2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseKR2_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_50G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + } + + if (ability->ability_speed_100Gcr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_100G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + } + + if (ability->ability_speed_100Gkr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR4_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_100G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + } + } + + if (ability->ability_speed_100Gsr4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + if (link_cfg->current_an_en.current_an && + (usr_link_speed == FLM_FW_SPEED_100G || + usr_link_speed == FLM_FW_SPEED_AUTO)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseSR4_Full); + } + } + + if (link_cfg->an_publicity.an_mode.speed_ability_10Gkr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + + if (link_cfg->an_publicity.an_mode.speed_ability_25Gkrcr || + link_cfg->an_publicity.an_mode.speed_ability_25Gkrcr_s) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if (link_cfg->an_publicity.an_np_mode.speed_ability_25Gkr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if (link_cfg->an_publicity.an_np_mode.speed_ability_25Gcr) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + + if (link_cfg->an_publicity.an_np_mode.speed_ability_50Gcr2) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + + if (link_cfg->an_publicity.an_np_mode.speed_ability_50Gkr2) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + + if (link_cfg->an_publicity.an_mode.fec_rs528_25g) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if (link_cfg->an_publicity.an_mode.fec_ability_10g) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + + if (link_cfg->an_publicity.an_mode.fec_bsfec_25g) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if (link_cfg->an_publicity.an_mode.fec_en_10g) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + + if (link_cfg->an_publicity.an_mode.Consortium_25g_50g_en) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + + sxe2_vf_ethtool_support_fec_get(ability, ks); +} + +static void +sxe2_vf_get_advertise_link_mode_info(struct ethtool_link_ksettings *ks, + struct flm_ethtool_get_link_resp *link_cfg, + u32 speed) +{ + if (!link_cfg->current_an_en.current_an) { + if (speed == SXE2_LINK_SPEED_VF_10G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 10000baseKR_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_25G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 25000baseCR_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_25G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 25000baseKR_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_50G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 50000baseCR2_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_50G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 50000baseKR2_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_100G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 100000baseCR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_100G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 100000baseKR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + } + + if (speed == SXE2_LINK_SPEED_VF_100G && + ethtool_link_ksettings_test_link_mode(ks, supported, + 100000baseSR4_Full)) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseSR4_Full); + } + } +} + +static void sxe2_vf_get_adpter_base_info(struct net_device *netdev, + struct ethtool_link_ksettings *ks, + u32 speed) +{ + u32 supported; + u32 advertising; + u32 lp_advertising; + struct sxe2_msg_ethtool_info link_cfg; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret; + + (void)ethtool_convert_link_mode_to_legacy_u32(&supported, + ks->link_modes.supported); + (void)ethtool_convert_link_mode_to_legacy_u32(&advertising, + ks->link_modes.advertising); + (void)ethtool_convert_link_mode_to_legacy_u32(&lp_advertising, + ks->link_modes.lp_advertising); + + link_cfg.usr_link_speed = FLM_FW_SPEED_AUTO; + ret = sxe2vf_ethtool_info_request(adapter, &link_cfg); + if (ret) + goto end; + + sxe2_vf_get_speed_ability(ks, &link_cfg.cfg, &link_cfg.ability, + link_cfg.usr_link_speed); + + sxe2_vf_get_hw_autoneg_info(ks, &link_cfg.cfg, advertising); + + sxe2_vf_get_advertise_link_mode_info(ks, &link_cfg.cfg, speed); + + sxe2_vf_get_advertise_fec_info(ks, &link_cfg.cfg, advertising, supported, + adapter->link_ctxt.link_up); + + sxe2_vf_get_an_info(ks, &link_cfg.cfg, advertising, supported, speed); + + sxe2_vf_get_hw_connect_info(ks, &link_cfg.cfg, advertising, supported); + + sxe2_vf_get_hw_part_adver_info(ks, &link_cfg.cfg, lp_advertising, supported); + +end: + return; +} + +STATIC int sxe2vf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_settings) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + + ethtool_link_ksettings_zero_link_mode(link_settings, supported); + link_settings->base.autoneg = AUTONEG_DISABLE; + link_settings->base.port = PORT_NONE; + link_settings->base.duplex = DUPLEX_FULL; + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2vf_link_status_request(adapter); + mutex_unlock(&adapter->vsi_ctxt.lock); + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2_vf_get_adpter_base_info(netdev, link_settings, + adapter->link_ctxt.speed); + mutex_unlock(&adapter->vsi_ctxt.lock); + + if ((!(adapter->link_ctxt.speed)) || (ret)) { + link_settings->base.speed = SPEED_UNKNOWN; + link_settings->base.duplex = DUPLEX_UNKNOWN; + return 0; + } + + switch (adapter->link_ctxt.speed) { + case SXE2_LINK_SPEED_VF_10G: + link_settings->base.speed = SPEED_10000; + break; + case SXE2_LINK_SPEED_VF_25G: + link_settings->base.speed = SPEED_25000; + break; + case SXE2_LINK_SPEED_VF_50G: + link_settings->base.speed = SPEED_50000; + break; + case SXE2_LINK_SPEED_VF_100G: + link_settings->base.speed = SPEED_100000; + break; + default: + break; + } + + return 0; +} + +STATIC void sxe2vf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2_fw_ver_msg *fw_ver = &adapter->hw.fw_ver; + + strscpy(drvinfo->driver, SXE2VF_DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, SXE2_VERSION, sizeof(drvinfo->version)); + (void)snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u.%u", fw_ver->main_version_id, + fw_ver->sub_version_id, fw_ver->fix_version_id, + fw_ver->build_id); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = SXE2VF_PRIV_FLAG_LEN; +} + +static u32 sxe2vf_get_msglevel(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + return adapter->log_level_ctxt.msg_enable; +} + +static void sxe2vf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + adapter->log_level_ctxt.msg_enable = data; +} + +static s32 sxe2vf_get_tx_qc_coalesce(struct ethtool_coalesce *ec, + struct sxe2vf_q_container *qc) +{ + if (!qc->list.cnt) + return -EINVAL; + + ec->use_adaptive_tx_coalesce = SXE2VF_IS_ITR_DYNAMIC(qc); + ec->tx_coalesce_usecs = qc->itr_setting; + + return 0; +} + +static s32 sxe2vf_get_rx_qc_coalesce(struct ethtool_coalesce *ec, + struct sxe2vf_q_container *qc) +{ + if (!qc->list.cnt) + return -EINVAL; + + ec->use_adaptive_rx_coalesce = SXE2VF_IS_ITR_DYNAMIC(qc); + ec->rx_coalesce_usecs = qc->itr_setting; + + return 0; +} + +static s32 sxe2vf_get_queue_coalesce(struct sxe2vf_vsi *vsi, + struct ethtool_coalesce *ec, u32 q_idx) +{ + if (q_idx < vsi->txqs.q_cnt && q_idx < vsi->rxqs.q_cnt) { + if (sxe2vf_get_tx_qc_coalesce(ec, SXE2VF_VSI_TX_QC(vsi, q_idx))) + return -EINVAL; + if (sxe2vf_get_rx_qc_coalesce(ec, SXE2VF_VSI_RX_QC(vsi, q_idx))) + return -EINVAL; + } else if (q_idx < vsi->txqs.q_cnt) { + if (sxe2vf_get_tx_qc_coalesce(ec, SXE2VF_VSI_TX_QC(vsi, q_idx))) + return -EINVAL; + } else if (q_idx < vsi->rxqs.q_cnt) { + if (sxe2vf_get_rx_qc_coalesce(ec, SXE2VF_VSI_RX_QC(vsi, q_idx))) + return -EINVAL; + } else { + return -EINVAL; + } + + return 0; +} + +STATIC s32 sxe2vf_irq_coalesce_get(struct net_device *netdev, + struct ethtool_coalesce *user, u32 q_idx) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, adapter->vsi_ctxt.vf_vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + ret = -EBUSY; + goto end; + } + + if (q_idx == SXE2VF_COALESCE_QIDX_INVAL) + q_idx = 0; + if (sxe2vf_get_queue_coalesce(vsi, user, q_idx)) + ret = -EINVAL; + +end: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC void sxe2vf_invalid_itr_print(struct net_device *netdev, + u32 use_adaptive_coalesce, u32 coalesce_usecs, + const s8 *q_type_str) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (use_adaptive_coalesce) + return; + + if (coalesce_usecs % adapter->irq_ctxt.itr_gran) + LOG_NETDEV_INFO("User set %s-usecs to invalid value %d, device only " + "support\t" + "values that multiple of %d. Rounding down and " + "attempting\t" + "to set %s-usecs to %d\n", + q_type_str, coalesce_usecs, + adapter->irq_ctxt.itr_gran, q_type_str, + rounddown(coalesce_usecs, + adapter->irq_ctxt.itr_gran)); +} + +STATIC void sxe2vf_invalid_coalesce_print(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + sxe2vf_invalid_itr_print(netdev, ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs, SXE2VF_Q_TYPE_STR_TX); + sxe2vf_invalid_itr_print(netdev, ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs, SXE2VF_Q_TYPE_STR_RX); +} + +static s32 sxe2vf_set_qc_itr(struct sxe2vf_q_container *qc, const s8 *q_type_str, + u32 use_adaptive_coalesce, u32 coalesce_usecs) +{ + struct sxe2vf_irq_data *irq_data = qc->list.next->irq_data; + struct sxe2vf_vsi *vsi = irq_data->vsi; + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + struct net_device *netdev = vsi->netdev; + + if (use_adaptive_coalesce) { + if (coalesce_usecs != qc->itr_setting) { + LOG_NETDEV_INFO("%s interrupt throttling cannot be changed\t" + "if adaptive-%s is enabled\n", + q_type_str, q_type_str); + return -EINVAL; + } + qc->itr_mode = SXE2VF_ITR_DYNAMIC; + + } else { + if (coalesce_usecs > + SXE2VF_VF_INT_ITR_INTERVAL_MAX * adapter->irq_ctxt.itr_gran) { + LOG_NETDEV_INFO("Invalid value, %s-usecs range is 0-%d\n", + q_type_str, + SXE2VF_VF_INT_ITR_INTERVAL_MAX * + adapter->irq_ctxt.itr_gran); + return -EINVAL; + } + qc->itr_mode = SXE2VF_ITR_STATIC; + qc->itr_setting = rounddown(coalesce_usecs, + adapter->irq_ctxt.itr_gran); + sxe2vf_hw_int_itr_set(hw, qc->itr_idx, irq_data->irq_idx, + qc->itr_setting / adapter->irq_ctxt.itr_gran); + } + + return 0; +} + +static s32 sxe2vf_set_queue_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, u32 q_idx) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + s32 ret; + + if (q_idx < vsi->txqs.q_cnt && q_idx < vsi->rxqs.q_cnt) { + ret = sxe2vf_set_qc_itr( + SXE2VF_VSI_TX_QC(vsi, q_idx), SXE2VF_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, ec->tx_coalesce_usecs); + if (ret) + return ret; + + ret = sxe2vf_set_qc_itr( + SXE2VF_VSI_RX_QC(vsi, q_idx), SXE2VF_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, ec->rx_coalesce_usecs); + if (ret) + return ret; + } else if (q_idx < vsi->txqs.q_cnt) { + ret = sxe2vf_set_qc_itr( + SXE2VF_VSI_TX_QC(vsi, q_idx), SXE2VF_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, ec->tx_coalesce_usecs); + if (ret) + return ret; + } else if (q_idx < vsi->rxqs.q_cnt) { + ret = sxe2vf_set_qc_itr( + SXE2VF_VSI_RX_QC(vsi, q_idx), SXE2VF_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, ec->rx_coalesce_usecs); + if (ret) + return ret; + } else { + return -EINVAL; + } + + return 0; +} + +static s32 sxe2vf_set_all_queue_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + u16 irq_idx; + s32 ret; + struct sxe2vf_irq_data *irq_data; + + sxe2vf_for_each_vsi_irq(vsi, irq_idx) + { + irq_data = vsi->irqs.irq_data[irq_idx]; + if (SXE2VF_IRQ_HAS_TXQ(irq_data)) { + ret = sxe2vf_set_qc_itr(&irq_data->tx, SXE2VF_Q_TYPE_STR_TX, + ec->use_adaptive_tx_coalesce, + ec->tx_coalesce_usecs); + if (ret) + return ret; + } + if (SXE2VF_IRQ_HAS_RXQ(irq_data)) { + ret = sxe2vf_set_qc_itr(&irq_data->rx, SXE2VF_Q_TYPE_STR_RX, + ec->use_adaptive_rx_coalesce, + ec->rx_coalesce_usecs); + if (ret) + return ret; + } + } + return 0; +} + +STATIC s32 sxe2vf_irq_coalesce_set(struct net_device *netdev, + struct ethtool_coalesce *user, u32 q_idx) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + ret = -EBUSY; + goto l_unlock; + } + + if (q_idx == SXE2VF_COALESCE_QIDX_INVAL) { + ret = sxe2vf_set_all_queue_coalesce(netdev, user); + + } else if (q_idx < vsi->txqs.q_cnt) { + ret = sxe2vf_set_queue_coalesce(netdev, user, q_idx); + + } else { + LOG_NETDEV_INFO("Invalid queue idx, q_idx range is 0 - %d\n", + vsi->txqs.q_cnt - 1); + ret = -EINVAL; + } + + if (ret) + goto l_unlock; + + sxe2vf_invalid_coalesce_print(netdev, user); + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef GET_COALESCE_NEED_2_PARAMS +static int sxe2vf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +static int sxe2vf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + return sxe2vf_irq_coalesce_get(netdev, ec, SXE2VF_COALESCE_QIDX_INVAL); +} + +#ifdef SET_COALESCE_NEED_2_PARAMS +static int sxe2vf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +#else +static int sxe2vf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#endif +{ + return sxe2vf_irq_coalesce_set(netdev, ec, SXE2VF_COALESCE_QIDX_INVAL); +} + +static int sxe2vf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return sxe2vf_irq_coalesce_get(netdev, ec, queue); +} + +static int sxe2vf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return sxe2vf_irq_coalesce_set(netdev, ec, queue); +} + +static int sxe2vf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int ret = -EOPNOTSUPP; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->vsi_ctxt.vf_vsi->rxqs.q_cnt; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fnav_ctxt.filter_cnt; + cmd->data = SXE2VF_MAX_FNAV_FILTERS; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = sxe2vf_ethtool_fnav_filter_get_by_loc(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sxe2vf_ethtool_ntuple_filter_locs_get(adapter, cmd, + (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + sxe2vf_get_rss_flow(adapter, cmd); + ret = 0; + break; + default: + LOG_DEBUG_BDF("command parameters not supported\n, cmd=%u", + cmd->cmd); + break; + } + + return ret; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +STATIC int sxe2vf_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +#else +STATIC int sxe2vf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#endif +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + int ret = 0; + u32 i = 0; +#ifdef HAVE_ETHTOOL_RXFH_PARAM + u32 *indir = rxfh->indir; + u8 *key = rxfh->key; +#endif + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u)ethtool get rxfh state is " + "disable.\n", + vsi->vsi_id); + ret = -EBUSY; + goto l_unlock; + } + +#ifdef HAVE_ETHTOOL_RXFH_PARAM + rxfh->hfunc = ETH_RSS_HASH_TOP; +#else + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; +#endif + + if (indir) { + if (adapter->rss_ctxt.lut) { + for (i = 0; i < adapter->rss_ctxt.rss_lut_size; i++) + indir[i] = (u32)(adapter->rss_ctxt.lut[i]); + } + } + + if (key) { + if (adapter->rss_ctxt.key) + memcpy(key, adapter->rss_ctxt.key, + adapter->rss_ctxt.rss_key_size); + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +STATIC int sxe2vf_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +#else +STATIC int sxe2vf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#endif +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_msg_params params = {0}; + int ret = 0; + u32 i = 0; + u8 *user_key = NULL; + u8 *user_lut = NULL; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; +#ifdef HAVE_ETHTOOL_RXFH_PARAM + const u32 *indir = rxfh->indir; + const u8 *key = rxfh->key; + const u8 hfunc = rxfh->hfunc; +#endif + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u)ethtool set rxfh state is " + "disable.\n", + vsi->vsi_id); + ret = -EBUSY; + goto l_unlock; + } + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + ret = -EOPNOTSUPP; + goto l_unlock; + } + + if (key) { + user_key = devm_kzalloc(dev, adapter->rss_ctxt.rss_key_size, + GFP_KERNEL); + if (!user_key) { + LOG_ERROR_BDF("no memory for user hash key.\n"); + ret = -ENOMEM; + goto l_unlock; + } + memcpy(user_key, key, adapter->rss_ctxt.rss_key_size); + } + + if (indir) { + user_lut = devm_kzalloc(dev, adapter->rss_ctxt.rss_lut_size, + GFP_KERNEL); + if (!user_lut) { + LOG_ERROR_BDF("no memory for user lut.\n"); + ret = -ENOMEM; + goto l_unlock; + } + for (i = 0; i < adapter->rss_ctxt.rss_lut_size; i++) + user_lut[i] = (u8)(indir[i]); + } + + if (key) { + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_KEY, user_key, + adapter->rss_ctxt.rss_key_size, NULL, + 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) mbx msg send set rss " + "key fail.\n", + vsi->vsi_id); + goto l_unlock; + } + if (adapter->rss_ctxt.key) { + memcpy(adapter->rss_ctxt.key, user_key, + adapter->rss_ctxt.rss_key_size); + } + } + + if (indir) { + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_LUT, user_lut, + adapter->rss_ctxt.rss_lut_size, NULL, + 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) mbx msg send set rss " + "lut fail.\n", + vsi->vsi_id); + goto l_unlock; + } + if (adapter->rss_ctxt.lut) { + memcpy(adapter->rss_ctxt.lut, user_lut, + adapter->rss_ctxt.rss_lut_size); + } + } + +l_unlock: + if (user_key) + devm_kfree(dev, user_key); + if (user_lut) + devm_kfree(dev, user_lut); + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC u32 sxe2vf_get_rxft_key_size(struct net_device __always_unused *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + return (u32)adapter->rss_ctxt.rss_key_size; +} + +STATIC u32 sxe2vf_get_rxft_indir_size(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + return (u32)adapter->rss_ctxt.rss_lut_size; +} + +static const u8 eth_addr_full_mask[SXE2_FNAV_ETH_ADDR_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 eth_addr_zero_mask[SXE2_FNAV_ETH_ADDR_LEN] = { + 0, 0, 0, 0, 0, 0, +}; + +static const struct in6_addr ipv6_addr_full_mask = {.in6_u = {.u6_addr8 = { + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + 0xFF, + }}}; + +static const struct in6_addr ipv6_addr_zero_mask = {.in6_u = {.u6_addr8 = { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }}}; + +STATIC void sxe2vf_fnav_fld_convert_msg(unsigned long *flds, + struct sxe2_fnav_comm_proto_hdr *proto_hdr) +{ + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + bitmap_to_arr32(tmp_flds, flds, SXE2_FLOW_FLD_ID_MAX); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + proto_hdr->flds[i] = cpu_to_le32(tmp_flds[i]); +} + +STATIC void sxe2vf_fill_fnav_ip4_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_IPV4); + + if (full_key->ip_mask.tos == U8_MAX) { + proto_hdr->ipv4.tos = full_key->ip_data.tos; + set_bit(SXE2_FLOW_FLD_ID_IPV4_TOS, flds); + } + + if (full_key->ip_mask.proto == U8_MAX) { + proto_hdr->ipv4.proto = full_key->ip_data.proto; + set_bit(SXE2_FLOW_FLD_ID_IPV4_PROT, flds); + } + + if (full_key->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) { + proto_hdr->ipv4.saddr = full_key->ip_data.v4_addrs.src_ip; + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, flds); + } + + if (full_key->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) { + proto_hdr->ipv4.daddr = full_key->ip_data.v4_addrs.dst_ip; + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC void sxe2vf_fill_fnav_ip6_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_IPV6); + + if (full_key->ip_mask.tclass == U8_MAX) { + proto_hdr->ipv6.tc = full_key->ip_data.tclass; + set_bit(SXE2_FLOW_FLD_ID_IPV6_DSCP, flds); + } + + if (full_key->ip_mask.proto == U8_MAX) { + proto_hdr->ipv6.proto = full_key->ip_data.proto; + set_bit(SXE2_FLOW_FLD_ID_IPV6_PROT, flds); + } + + if (!memcmp(&full_key->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&proto_hdr->ipv6.src_ip, &full_key->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, flds); + } + + if (!memcmp(&full_key->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&proto_hdr->ipv6.dst_ip, &full_key->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC void sxe2vf_fill_fnav_tcp_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_TCP); + + if (full_key->ip_mask.src_port == htons(U16_MAX)) { + proto_hdr->l4.src_port = full_key->ip_data.src_port; + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, flds); + } + + if (full_key->ip_mask.dst_port == htons(U16_MAX)) { + proto_hdr->l4.dst_port = full_key->ip_data.dst_port; + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC void sxe2vf_fill_fnav_udp_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_UDP); + + if (full_key->ip_mask.src_port == htons(U16_MAX)) { + proto_hdr->l4.src_port = full_key->ip_data.src_port; + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, flds); + } + + if (full_key->ip_mask.dst_port == htons(U16_MAX)) { + proto_hdr->l4.dst_port = full_key->ip_data.dst_port; + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC void sxe2vf_fill_fnav_sctp_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_SCTP); + + if (full_key->ip_mask.src_port == htons(U16_MAX)) { + proto_hdr->l4.src_port = full_key->ip_data.src_port; + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, flds); + } + + if (full_key->ip_mask.dst_port == htons(U16_MAX)) { + proto_hdr->l4.dst_port = full_key->ip_data.dst_port; + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC void sxe2vf_fill_fnav_eth_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_ETH); + + if (full_key->eth_mask.etype == htons(U16_MAX)) { + proto_hdr->eth.etype = full_key->eth_data.etype; + set_bit(SXE2_FLOW_FLD_ID_ETH_TYPE, flds); + } + + if (!memcmp(&full_key->eth_mask.src, ð_addr_full_mask, + sizeof(full_key->eth_mask.src))) { + memcpy(&proto_hdr->eth.src, &full_key->eth_data.src, + sizeof(proto_hdr->eth.src)); + set_bit(SXE2_FLOW_FLD_ID_ETH_SA, flds); + } + + if (!memcmp(&full_key->eth_mask.dst, ð_addr_full_mask, + sizeof(full_key->eth_mask.dst))) { + memcpy(&proto_hdr->eth.dst, &full_key->eth_data.dst, + sizeof(proto_hdr->eth.dst)); + set_bit(SXE2_FLOW_FLD_ID_ETH_DA, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC bool sxe2vf_ethtool_vlan_seg_valid(struct ethtool_rx_flow_spec *fsp) +{ + bool ret = fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci; + + if (!ret) + return true; + + if (fsp->m_ext.vlan_etype && + !(fsp->h_ext.vlan_etype == cpu_to_be16(ETH_P_8021Q) || + fsp->h_ext.vlan_etype == cpu_to_be16(ETH_P_8021AD))) { + ret = false; + goto l_end; + } + + if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID) { + ret = false; + goto l_end; + } + + if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci && + !fsp->m_ext.vlan_etype) { + LOG_WARN("Filter with proto and vlan require also vlan-etype.\n"); + ret = false; + goto l_end; + } + +l_end: + return ret; +} + +STATIC void sxe2vf_fill_fnav_vlan_hdr(struct sxe2vf_fnav_filter_full_key *full_key, + struct sxe2_fnav_comm_full_msg *full_msg) +{ + struct sxe2_fnav_comm_proto_hdr *proto_hdr = + &full_msg->proto_hdr[full_msg->proto_cnt]; + DECLARE_BITMAP(flds, SXE2_FLOW_FLD_ID_MAX); + + bitmap_zero(flds, SXE2_FLOW_FLD_ID_MAX); + proto_hdr->type = cpu_to_le32(SXE2_FLOW_HDR_VLAN); + + if (full_key->ext_mask.vlan_type == htons(U16_MAX)) { + proto_hdr->vlan.vlan_type = full_key->ext_data.vlan_type; + set_bit(SXE2_FLOW_FLD_ID_S_TPID, flds); + } + + if (full_key->ext_mask.s_vlan_tag == htons(U16_MAX)) { + proto_hdr->vlan.vlan_tci = full_key->ext_data.s_vlan_tag; + set_bit(SXE2_FLOW_FLD_ID_S_TCI, flds); + } + + sxe2vf_fnav_fld_convert_msg(flds, proto_hdr); + full_msg->proto_cnt++; +} + +STATIC s32 sxe2vf_validate_fnav_filter_masks(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter) +{ + if (filter->full_key.eth_mask.etype && + filter->full_key.eth_mask.etype != htons(U16_MAX)) + goto partial_mask; + if (memcmp(filter->full_key.eth_mask.src, ð_addr_full_mask, + SXE2_FNAV_ETH_ADDR_LEN) && + memcmp(filter->full_key.eth_mask.src, ð_addr_zero_mask, + SXE2_FNAV_ETH_ADDR_LEN)) + goto partial_mask; + if (memcmp(filter->full_key.eth_mask.dst, ð_addr_full_mask, + SXE2_FNAV_ETH_ADDR_LEN) && + memcmp(filter->full_key.eth_mask.dst, ð_addr_zero_mask, + SXE2_FNAV_ETH_ADDR_LEN)) + goto partial_mask; + + if (filter->full_key.ext_mask.s_vlan_tag && + filter->full_key.ext_mask.s_vlan_tag != htons(U16_MAX)) + goto partial_mask; + if (filter->full_key.ext_mask.vlan_type && + filter->full_key.ext_mask.vlan_type != htons(U16_MAX)) + goto partial_mask; + + if (filter->full_key.ip_ver == 4) { + if (filter->full_key.ip_mask.v4_addrs.src_ip && + filter->full_key.ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) + goto partial_mask; + + if (filter->full_key.ip_mask.v4_addrs.dst_ip && + filter->full_key.ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) + goto partial_mask; + + if (filter->full_key.ip_mask.tos && + filter->full_key.ip_mask.tos != U8_MAX) + goto partial_mask; + } else if (filter->full_key.ip_ver == 6) { + if (memcmp(&filter->full_key.ip_mask.v6_addrs.src_ip, + &ipv6_addr_zero_mask, sizeof(struct in6_addr)) && + memcmp(&filter->full_key.ip_mask.v6_addrs.src_ip, + &ipv6_addr_full_mask, sizeof(struct in6_addr))) + goto partial_mask; + + if (memcmp(&filter->full_key.ip_mask.v6_addrs.dst_ip, + &ipv6_addr_zero_mask, sizeof(struct in6_addr)) && + memcmp(&filter->full_key.ip_mask.v6_addrs.dst_ip, + &ipv6_addr_full_mask, sizeof(struct in6_addr))) + goto partial_mask; + + if (filter->full_key.ip_mask.tclass && + filter->full_key.ip_mask.tclass != U8_MAX) + goto partial_mask; + } + + if (filter->full_key.ip_mask.proto && + filter->full_key.ip_mask.proto != U8_MAX) + goto partial_mask; + + if (filter->full_key.ip_mask.src_port && + filter->full_key.ip_mask.src_port != htons(U16_MAX)) + goto partial_mask; + + if (filter->full_key.ip_mask.dst_port && + filter->full_key.ip_mask.dst_port != htons(U16_MAX)) + goto partial_mask; + + if (filter->full_key.ip_mask.spi && + filter->full_key.ip_mask.spi != htonl(U32_MAX)) + goto partial_mask; + + if (filter->full_key.ip_mask.l4_header && + filter->full_key.ip_mask.l4_header != htonl(U32_MAX)) + goto partial_mask; + + return 0; + +partial_mask: + return -EOPNOTSUPP; +} + +STATIC s32 sxe2vf_ethtool_fnav_full_key_fill(struct ethtool_rx_flow_spec *fsp, + struct sxe2vf_fnav_filter *filter) +{ + int ret = 0; + struct sxe2vf_fnav_filter_full_key *full_key = &filter->full_key; + + if (fsp->flow_type & FLOW_EXT) { + memcpy(full_key->ext_data.usr_def, fsp->h_ext.data, + sizeof(full_key->ext_data.usr_def)); + full_key->ext_data.vlan_type = fsp->h_ext.vlan_etype; + full_key->ext_data.s_vlan_tag = fsp->h_ext.vlan_tci; + memcpy(full_key->ext_mask.usr_def, fsp->m_ext.data, + sizeof(full_key->ext_mask.usr_def)); + full_key->ext_mask.vlan_type = fsp->m_ext.vlan_etype; + full_key->ext_mask.s_vlan_tag = fsp->m_ext.vlan_tci; + filter->has_flex_filed = SXE2VF_FNAV_HAS_FLEX_FIELD; + } else { + filter->has_flex_filed = SXE2VF_FNAV_NO_FLEX_FIELD; + } + + switch (filter->flow_type) { + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + full_key->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + full_key->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + full_key->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; + full_key->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + full_key->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; + full_key->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + full_key->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + full_key->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + full_key->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + full_key->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + full_key->ip_ver = 4; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + full_key->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + full_key->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + full_key->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + full_key->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; + full_key->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; + full_key->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + full_key->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + full_key->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + full_key->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; + full_key->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + full_key->ip_ver = 4; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + memcpy(&full_key->ip_data.v6_addrs.src_ip, + fsp->h_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); + memcpy(&full_key->ip_data.v6_addrs.dst_ip, + fsp->h_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); + full_key->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; + full_key->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + full_key->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(&full_key->ip_mask.v6_addrs.src_ip, + fsp->m_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); + memcpy(&full_key->ip_mask.v6_addrs.dst_ip, + fsp->m_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); + full_key->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + full_key->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + full_key->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + full_key->ip_ver = 6; + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + memcpy(&full_key->ip_data.v6_addrs.src_ip, + fsp->h_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); + memcpy(&full_key->ip_data.v6_addrs.dst_ip, + fsp->h_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); + full_key->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + full_key->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; + if (!fsp->m_u.usr_ip6_spec.l4_proto) + full_key->ip_data.proto = IPPROTO_NONE; + else + full_key->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(&full_key->ip_mask.v6_addrs.src_ip, + fsp->m_u.usr_ip6_spec.ip6src, sizeof(struct in6_addr)); + memcpy(&full_key->ip_mask.v6_addrs.dst_ip, + fsp->m_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); + full_key->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + full_key->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; + full_key->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + full_key->ip_ver = 6; + break; + case SXE2_FNAV_FLOW_TYPE_ETH: + memcpy(full_key->eth_data.src, fsp->h_u.ether_spec.h_source, + sizeof(full_key->eth_data.src)); + memcpy(full_key->eth_data.dst, fsp->h_u.ether_spec.h_dest, + sizeof(full_key->eth_data.src)); + full_key->eth_data.etype = fsp->h_u.ether_spec.h_proto; + memcpy(full_key->eth_mask.src, fsp->m_u.ether_spec.h_source, + sizeof(full_key->eth_mask.src)); + memcpy(full_key->eth_mask.dst, fsp->m_u.ether_spec.h_dest, + sizeof(full_key->eth_mask.src)); + full_key->eth_mask.etype = fsp->m_u.ether_spec.h_proto; + break; + default: + return -EINVAL; + } + + return ret; +} + +STATIC s32 sxe2vf_ethtool_fnav_valid_param_check(enum sxe2_fnav_flow_type flow_type, + struct ethtool_rx_flow_spec *fsp) +{ + s32 ret = 0; + struct ethtool_tcpip4_spec *l4_ip4_spec = &fsp->m_u.tcp_ip4_spec; + struct ethtool_tcpip6_spec *l4_ip6_spec = &fsp->m_u.tcp_ip6_spec; + struct ethtool_usrip4_spec *usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + struct ethtool_usrip6_spec *usr_ip6_spec = &fsp->m_u.usr_ip6_spec; + struct ethhdr *eth_spec = &fsp->m_u.ether_spec; + + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + if (is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest) && !eth_spec->h_proto && + !fsp->m_ext.vlan_etype && !fsp->m_ext.vlan_tci) { + return -EINVAL; + } + if (!sxe2vf_ethtool_vlan_seg_valid(fsp)) + return -EINVAL; + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + if (!l4_ip4_spec->psrc && !l4_ip4_spec->ip4src && + !l4_ip4_spec->pdst && !l4_ip4_spec->ip4dst && + !l4_ip4_spec->tos) { + return -EINVAL; + } + if ((!is_zero_ether_addr(fsp->m_ext.h_dest)) || + fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci) { + return -EOPNOTSUPP; + } + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + if (ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6src) && + ipv6_addr_any((struct in6_addr *)l4_ip6_spec->ip6dst) && + !l4_ip6_spec->psrc && !l4_ip6_spec->pdst && + !l4_ip6_spec->tclass) { + return -EINVAL; + } + if ((!is_zero_ether_addr(fsp->m_ext.h_dest)) || + fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci) { + return -EOPNOTSUPP; + } + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst && + !usr_ip4_spec->tos && !usr_ip4_spec->proto) { + return -EINVAL; + } + if (fsp->m_u.usr_ip4_spec.l4_4_bytes || + fsp->m_u.usr_ip4_spec.ip_ver || + (!is_zero_ether_addr(fsp->m_ext.h_dest)) || + fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci) { + return -EOPNOTSUPP; + } + if (usr_ip4_spec->proto == 0xFF && + (fsp->h_u.usr_ip4_spec.proto == SXE2VF_FNAV_L4_PROT_TCP || + fsp->h_u.usr_ip4_spec.proto == SXE2VF_FNAV_L4_PROT_UDP || + fsp->h_u.usr_ip4_spec.proto == SXE2VF_FNAV_L4_PROT_SCTP)) { + return -EOPNOTSUPP; + } + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + if (ipv6_addr_any((struct in6_addr *)usr_ip6_spec->ip6src) && + ipv6_addr_any((struct in6_addr *)usr_ip6_spec->ip6dst) && + !usr_ip6_spec->l4_proto && !usr_ip6_spec->tclass) { + return -EINVAL; + } + if (fsp->m_u.usr_ip6_spec.l4_4_bytes || + (!is_zero_ether_addr(fsp->m_ext.h_dest)) || + fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci) { + return -EOPNOTSUPP; + } + if (usr_ip6_spec->l4_proto == 0xFF && + (fsp->h_u.usr_ip6_spec.l4_proto == SXE2VF_FNAV_L4_PROT_TCP || + fsp->h_u.usr_ip6_spec.l4_proto == SXE2VF_FNAV_L4_PROT_UDP || + fsp->h_u.usr_ip6_spec.l4_proto == SXE2VF_FNAV_L4_PROT_SCTP)) { + return -EOPNOTSUPP; + } + break; + default: + ret = -EINVAL; + } + return ret; +} + +STATIC s32 sxe2vf_ethtool_fnav_filter_fill(struct sxe2vf_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, + struct sxe2vf_fnav_filter *filter) +{ + int ret = 0; + u32 ring = 0; + u8 vf = 0; + enum sxe2_fnav_flow_type flow_type; + +#ifdef HAVE_ETHTOOL_FLOW_RSS + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); +#else + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); +#endif + flow_type = sxe2vf_ethtool_flow_to_type(flow_type); + if (flow_type == SXE2_FNAV_FLOW_TYPE_NONE) { +#ifdef HAVE_ETHTOOL_FLOW_RSS + LOG_DEV_ERR("unsupport flow type, fsp->flow_type:%d\n", + fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)); +#else + LOG_DEV_ERR("unsupport flow type, fsp->flow_type:%d\n", + fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)); +#endif + ret = -EINVAL; + goto l_end; + } + filter->flow_type = flow_type; + + filter->filter_loc = fsp->location; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + filter->act_type = SXE2_FNAV_ACTION_DROP; + filter->q_index = 0; + } else if ((~(ETHTOOL_RX_FLOW_SPEC_RING | ETHTOOL_RX_FLOW_SPEC_RING_VF)) & + fsp->ring_cookie) { + LOG_DEV_ERR("failed to add filter. unsupported action %lld.\n", + fsp->ring_cookie); + ret = -EOPNOTSUPP; + goto l_end; + } else { + ring = (u32)ethtool_get_flow_spec_ring(fsp->ring_cookie); + vf = (u8)ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (vf) { + LOG_DEV_ERR("failed to add filter. vf fnav\t" + "not supported on VF queues.\n"); + ret = -EINVAL; + goto l_end; + } + if (ring >= adapter->vsi_ctxt.vf_vsi->rxqs.q_cnt) { + LOG_DEV_ERR("failed to add filter. unsupported q_index " + "%u.\n", + ring); + ret = -EINVAL; + goto l_end; + } + filter->act_type = SXE2_FNAV_ACTION_QUEUE; + filter->q_index = (u16)ring; + } + + ret = sxe2vf_ethtool_fnav_valid_param_check(flow_type, fsp); + if (ret) { + LOG_ERROR_BDF("ethtool cmd has not support param, ret: %d\n", ret); + goto l_end; + } + + ret = sxe2vf_ethtool_fnav_full_key_fill(fsp, filter); + if (ret) { + LOG_ERROR_BDF("failed to add full key, ret: %d\n", ret); + goto l_end; + } + ret = sxe2vf_validate_fnav_filter_masks(adapter, filter); + if (ret) + LOG_ERROR_BDF("failed to add full key, ret: %d\n", ret); + +l_end: + return ret; +} + +STATIC int +sxe2vf_ethtool_parse_ntuple_userdef(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter, + struct sxe2_fnav_comm_user_data *user_data) +{ + u64 value, mask; + u16 flex_offset; + + if (!filter->has_flex_filed) + return 0; + + value = be64_to_cpu(*((__force __be64 *)filter->full_key.ext_data.usr_def)); + mask = be64_to_cpu(*((__force __be64 *)filter->full_key.ext_mask.usr_def)); + + if (!mask) + return 0; + + LOG_DEBUG_BDF("user-def param:0x%llx.\n", value); + + if (!((mask & SXE2VF_USERDEF_FLEX_FLTR_M) == SXE2VF_USERDEF_FLEX_FLTR_M) || + value > SXE2VF_USERDEF_FLEX_FLTR_M) { + LOG_ERROR_BDF("sxe2 vf fnav flex mask=%llu value=%llu is invalid.\n", + mask, value); + return -EINVAL; + } + + flex_offset = (u16)FIELD_GET(SXE2VF_USERDEF_FLEX_OFFS_M, value); + if (flex_offset > SXE2VF_USERDEF_FLEX_MAX_OFFS_VAL) { + LOG_ERROR_BDF("sxe2 vf fnav flex offset = %u is invalid.\n", + flex_offset); + return -EINVAL; + } + + user_data->flex_word = + cpu_to_be16((u16)(value & SXE2VF_USERDEF_FLEX_WORD_M)); + user_data->flex_offset = cpu_to_le16(flex_offset); + user_data->has_flex_filed = filter->has_flex_filed; + + return 0; +} + +s32 sxe2vf_fill_fnav_filter_full_msg(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter) +{ + struct sxe2vf_fnav_filter_full_key *full_key = &filter->full_key; + struct sxe2_fnav_comm_full_msg *full_msg = &filter->full_msg; + int ret = 0; + u32 i = 0; + u8 act_count = 2; + + full_msg->filter_loc = cpu_to_le32(filter->filter_loc); + full_msg->flow_type = cpu_to_le32(filter->flow_type); + + full_msg->action_cnt = act_count; + full_msg->action[0].act_queue.q_index = cpu_to_le16(filter->q_index); + full_msg->action[0].type = cpu_to_le32(filter->act_type); + full_msg->action[1].act_count.stat_ctrl = + cpu_to_le32(SXE2_FNAV_STAT_ENA_PKTS); + full_msg->action[1].act_count.stat_index = + cpu_to_le32(adapter->fnav_ctxt.stat_idx); + full_msg->action[1].type = cpu_to_le32(SXE2_FNAV_ACTION_COUNT); + + full_msg->proto_cnt = 0; + + switch (full_msg->flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + sxe2vf_fill_fnav_eth_hdr(full_key, full_msg); + if (full_key->ext_mask.vlan_type || full_key->ext_mask.s_vlan_tag) { + sxe2vf_fill_fnav_vlan_hdr(full_key, full_msg); + } + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + sxe2vf_fill_fnav_ip4_hdr(full_key, full_msg); + sxe2vf_fill_fnav_tcp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + sxe2vf_fill_fnav_ip4_hdr(full_key, full_msg); + sxe2vf_fill_fnav_udp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + sxe2vf_fill_fnav_ip4_hdr(full_key, full_msg); + sxe2vf_fill_fnav_sctp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + sxe2vf_fill_fnav_ip4_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + sxe2vf_fill_fnav_ip6_hdr(full_key, full_msg); + sxe2vf_fill_fnav_tcp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + sxe2vf_fill_fnav_ip6_hdr(full_key, full_msg); + sxe2vf_fill_fnav_udp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + sxe2vf_fill_fnav_ip6_hdr(full_key, full_msg); + sxe2vf_fill_fnav_sctp_hdr(full_key, full_msg); + break; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + sxe2vf_fill_fnav_ip6_hdr(full_key, full_msg); + break; + default: + ret = -EINVAL; + break; + } + + if (ret || full_msg->proto_cnt == 0) { + LOG_ERROR_BDF("ethtool fnav parse proto failed, ret:%d, " + "proto_cnt:%u.\n", + ret, full_msg->proto_cnt); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2vf_ethtool_parse_ntuple_userdef(adapter, filter, + &full_msg->usr_data); + if (ret) { + LOG_ERROR_BDF("ethtool fnav parse user data failed, ret:%d\n", ret); + goto l_end; + } + + for (i = 0; i < full_msg->proto_cnt; i++) + full_msg->proto_hdr[i].tunnel_level = SXE2_FNAV_TUN_FLAG_ANY; + + if (test_bit(SXE2VF_FLAG_FNAV_TUNNEL, adapter->flags)) { + full_msg->tunn_flag = cpu_to_le32(SXE2_FNAV_TUN_FLAG_ANY); + } else { + full_msg->tunn_flag = cpu_to_le32(SXE2_FNAV_TUN_FLAG_NO_TUNNEL); + } + +l_end: + return ret; +} + +STATIC s32 sxe2vf_fnav_check_and_remove_filter_at_loc(struct sxe2vf_adapter *adapter, + u32 loc) +{ + struct sxe2vf_fnav_filter *filter_old = NULL; + s32 ret = 0; + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + + filter_old = sxe2vf_fnav_find_filter_by_loc_unlock(adapter, loc); + if (filter_old) { + ret = sxe2vf_fnav_del_filter(adapter, filter_old); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav delete filter failed, ret:%d\n", + ret); + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + return ret; +} + +s32 sxe2vf_fnav_add_filter_with_packet(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter) +{ + struct sxe2_fnav_comm_full_msg *filter_msg = NULL; + struct sxe2vf_msg_params params = {0}; + s32 ret = 0; + struct sxe2_vf_fnav_add_filter_resp filter_resp; + + filter_msg = kzalloc(sizeof(struct sxe2_fnav_comm_full_msg), GFP_KERNEL); + if (!filter_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_end; + } + + memcpy(filter_msg, &filter->full_msg, + sizeof(struct sxe2_fnav_comm_full_msg)); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_FNAV_FILTER_ADD, filter_msg, + sizeof(struct sxe2_fnav_comm_full_msg), + &filter_resp, sizeof(filter_resp)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav add filter fail, ret = %d !\n", ret); + } else { + filter->flow_id = filter_resp.flow_id; + LOG_INFO_BDF("sxe2 vf fnav add filter success, flow_id = %u !\n", + filter_resp.flow_id); + } + +l_end: + kfree(filter_msg); + + return ret; +} + +#ifdef SXE2_SUPPORT_ACL +STATIC bool sxevf_is_acl_filter(struct ethtool_rx_flow_spec *fsp) +{ + struct ethtool_tcpip4_spec *tcp_ip4_spec; + struct ethtool_usrip4_spec *usr_ip4_spec; + struct ethhdr *eth_spec; + + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; + + if (tcp_ip4_spec->ip4src && + tcp_ip4_spec->ip4src != htonl(0xFFFFFFFF)) + return true; + + if (tcp_ip4_spec->ip4dst && + tcp_ip4_spec->ip4dst != htonl(0xFFFFFFFF)) + return true; + + if (!tcp_ip4_spec->ip4src && !tcp_ip4_spec->ip4dst && + !tcp_ip4_spec->psrc && !tcp_ip4_spec->pdst && !tcp_ip4_spec->tos) + return true; + + if (tcp_ip4_spec->psrc && tcp_ip4_spec->psrc != htons(0xFFFF)) + return true; + + if (tcp_ip4_spec->pdst && tcp_ip4_spec->pdst != htons(0xFFFF)) + return true; + + break; + case IPV4_USER_FLOW: + usr_ip4_spec = &fsp->m_u.usr_ip4_spec; + + if (usr_ip4_spec->ip4src && + usr_ip4_spec->ip4src != htonl(0xFFFFFFFF)) + return true; + + if (usr_ip4_spec->ip4dst && + usr_ip4_spec->ip4dst != htonl(0xFFFFFFFF)) + return true; + + if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst) + return true; + + break; + case ETHER_FLOW: + eth_spec = &fsp->m_u.ether_spec; + + if (fsp->m_ext.vlan_tci || fsp->m_ext.vlan_etype) + return false; + + if (!is_broadcast_ether_addr(eth_spec->h_dest) && + !is_zero_ether_addr(eth_spec->h_dest)) + return true; + + if (!is_broadcast_ether_addr(eth_spec->h_source) && + !is_zero_ether_addr(eth_spec->h_source)) + return true; + + if (eth_spec->h_proto && eth_spec->h_proto != htons(0xFFFF)) + return true; + + if (!eth_spec->h_proto && is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest)) + return true; + + break; + } + + return false; +} + +STATIC int sxe2vf_ethtool_acl_filter_add(struct sxe2vf_adapter *adapter, + struct ethtool_rx_flow_spec *fsp) +{ + struct sxe2vf_msg_params params = {}; + s32 ret = 0; + + sxe2vf_mbx_msg_dflt_params_fill( + ¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, SXE2_VF_ACL_FILTER_ADD, + fsp, sizeof(struct ethtool_rx_flow_spec), NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf acl add filter fail, ret = %d !\n", ret); + else + LOG_INFO_BDF("sxe2 vf acl add filter success\n"); + + return ret; +} +#endif +STATIC int sxe2vf_ethtool_ntuple_filter_add(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + int ret = 0; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct ethtool_rx_flow_spec *fsp = NULL; + struct sxe2vf_fnav_filter *filter = NULL; + struct sxe2vf_fnav_filter *filter_tmp = NULL; + struct sxe2vf_fnav_filter *pre = NULL; + + if (!test_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags)) { + LOG_DEV_ERR("ntuple feature is not enabled, please type in\t" + " \"ethtool -K {dev} ntuple on\" to enable ntuple " + "firstly.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + if (fsp->flow_type & FLOW_MAC_EXT) { + LOG_DEV_ERR("unsupport flow type \"FLOW_MAC_EXT\".\n"); + ret = -EOPNOTSUPP; + goto l_end; + } +#ifdef SXE2_SUPPORT_ACL + if (sxevf_is_acl_filter(fsp)) { + ret = sxe2vf_ethtool_acl_filter_add(adapter, fsp); + if (ret) + LOG_DEV_ERR("add acl filter failed, ret:%d.\n", ret); + + goto l_end; + } +#endif + if (adapter->fnav_ctxt.filter_cnt >= SXE2VF_MAX_FNAV_FILTERS || + fsp->location >= SXE2VF_MAX_FNAV_FILTERS) { + LOG_ERROR_BDF("location overflow, filter_cnt:%u, max_cnt:%u, " + "location:%u\n", + adapter->fnav_ctxt.filter_cnt, SXE2VF_MAX_FNAV_FILTERS, + fsp->location); + ret = -ENOSPC; + goto l_end; + } + + filter = devm_kzalloc(dev, sizeof(*filter), GFP_KERNEL); + if (!filter) { + LOG_ERROR_BDF("no memory.\n"); + ret = -ENOMEM; + goto l_end; + } + + ret = sxe2vf_ethtool_fnav_filter_fill(adapter, fsp, filter); + if (ret) { + LOG_ERROR_BDF("ethtool fnav filter fill failed, ret:%d\n", ret); + goto l_end; + } + + if (sxe2vf_fnav_is_dup_filter(adapter, filter)) { + LOG_DEV_ERR("duplicate rule is detected\n"); + ret = -EINVAL; + goto l_end; + } + + ret = sxe2vf_fnav_check_and_remove_filter_at_loc(adapter, fsp->location); + if (ret) { + LOG_ERROR_BDF("ethtool fnav filter del same loc=%d failed, ret:%d\n", + fsp->location, ret); + goto l_end; + } + + ret = sxe2vf_fill_fnav_filter_full_msg(adapter, filter); + if (ret) { + LOG_ERROR_BDF("ethtool fnav fill fdir flow cfg failed, ret:%d\n", + ret); + goto l_end; + } + + ret = sxe2vf_fnav_add_filter_with_packet(adapter, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav add filter fail, ret = %d !\n", ret); + } else { + list_for_each_entry(filter_tmp, &adapter->fnav_ctxt.filter_list, + l_node) + { + if (filter_tmp->filter_loc >= filter->filter_loc) + break; + + pre = filter_tmp; + } + if (pre) { + list_add(&filter->l_node, &pre->l_node); + } else { + list_add(&filter->l_node, &adapter->fnav_ctxt.filter_list); + } + adapter->fnav_ctxt.filter_cnt++; + } + +l_end: + if (filter && ret) + devm_kfree(dev, filter); + + return ret; +} +#ifdef SXE2_SUPPORT_ACL +static s32 sxe2vf_acl_del_filter(struct sxe2vf_adapter *adapter, u32 loc) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + struct sxe2vf_acl_filter_del_req del_msg; + + del_msg.filter_id = cpu_to_le32(loc); + + sxe2vf_mbx_msg_dflt_params_fill( + ¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, SXE2_VF_ACL_FILTER_DEL, + &del_msg, sizeof(struct sxe2vf_acl_filter_del_req), NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav del filter fail!\n"); + } else { + LOG_INFO_BDF("sxe2 vf acl del filter success!\n"); + } + + return ret; +} +#endif +STATIC s32 sxe2vf_ethtool_ntuple_filter_del(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + s32 ret = -ENOENT; + struct sxe2vf_fnav_filter *filter; + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (!test_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags)) { + LOG_DEV_ERR("ntuple feature is not enabled, please type \t" + "in \"ethtool -K {dev} ntuple on\" to enable ntuple " + "firstly.\n"); + ret = -EOPNOTSUPP; + goto l_end; + } + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + filter = sxe2vf_fnav_find_filter_by_loc_unlock(adapter, fsp->location); + if (filter) { + ret = sxe2vf_fnav_del_filter(adapter, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav delete filter failed, ret:%d\n", + ret); + } + } +#ifdef SXE2_SUPPORT_ACL + else { + ret = sxe2vf_acl_del_filter(adapter, fsp->location); + if (ret) { + LOG_ERROR_BDF("sxe2 vf acl delete filter failed, ret:%d\n", + ret); + } + } +#endif + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + +l_end: + return ret; +} + +STATIC int sxe2vf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u)ethtool set rxnfc state is " + "disable.\n", + vsi->vsi_id); + ret = -EBUSY; + goto l_unlock; + } + + LOG_DEBUG_BDF("set rxnfc, cmd: %u\n", cmd->cmd); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sxe2vf_set_rss_flow(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = sxe2vf_ethtool_ntuple_filter_add(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sxe2vf_ethtool_ntuple_filter_del(adapter, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + +l_unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static const struct ethtool_ops sxe2vf_ethtool_ops = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE, +#endif + .get_sset_count = sxe2vf_get_sset_count, + .get_strings = sxe2vf_get_strings, + .get_ethtool_stats = sxe2vf_get_ethtool_stats, + + .get_priv_flags = sxe2vf_get_priv_flags, + .set_priv_flags = sxe2vf_set_priv_flags, + + .get_ringparam = sxe2vf_get_ringparam, + .set_ringparam = sxe2vf_set_ringparam, + + .get_channels = sxe2vf_get_channels, + .set_channels = sxe2vf_set_channels, + + .get_link = ethtool_op_get_link, + .get_link_ksettings = sxe2vf_get_link_ksettings, + .get_drvinfo = sxe2vf_get_drvinfo, + + .get_msglevel = sxe2vf_get_msglevel, + .set_msglevel = sxe2vf_set_msglevel, + + .get_per_queue_coalesce = sxe2vf_get_per_queue_coalesce, + .set_per_queue_coalesce = sxe2vf_set_per_queue_coalesce, + .set_coalesce = sxe2vf_set_coalesce, + .get_coalesce = sxe2vf_get_coalesce, + + .get_rxnfc = sxe2vf_get_rxnfc, + .set_rxnfc = sxe2vf_set_rxnfc, + .get_rxfh_key_size = sxe2vf_get_rxft_key_size, + .get_rxfh_indir_size = sxe2vf_get_rxft_indir_size, + .get_rxfh = sxe2vf_get_rxfh, + .set_rxfh = sxe2vf_set_rxfh, +}; + +void sxe2vf_ethtool_ops_init(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxe2vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.h new file mode 100644 index 0000000000000000000000000000000000000000..daaa43c43e204cdb7be513dfd512bc12cab50e9b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ethtool.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_ethtool.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_ETHTOOL_H__ +#define __SXE2VF_ETHTOOL_H__ +#include + +#include "sxe2vf.h" +#include "sxe2vf_netdev.h" + +#define SXE2VF_Q_STATS_LEN (2) + +#define SXE2VF_IPV6_PRIORITY_SHIFT (4) +#define SXE2VF_IPPROTO_L2TPV3 (115) +#define SXE2VF_FNAV_HAS_FLEX_FIELD (1) +#define SXE2VF_FNAV_NO_FLEX_FIELD (0) + +static inline u32 sxe2vf_q_stats_len(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + return (vsi->txqs.q_cnt + vsi->rxqs.q_cnt) * SXE2VF_Q_STATS_LEN; +} + +static inline u32 sxe2vf_txq_stats_len(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + return ((vsi->txqs.q_cnt) * + (sizeof(struct sxe2vf_txq_stats) / sizeof(u64))); +} + +static inline u32 sxe2vf_rxq_stats_len(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + return ((vsi->rxqs.q_cnt) * + (sizeof(struct sxe2vf_rxq_stats) / sizeof(u64))); +} + +enum sxe2vf_priv_flag_index { + SXE2VF_PRIV_FLAGS_LEGACY_RX, + SXE2VF_PRIV_FLAGS_FNAV_TUNNEL, +}; + +struct sxe2vf_priv_flag { + char name[ETH_GSTRING_LEN]; + enum sxe2vf_adapter_flags adapter_flag_bitno; + enum sxe2vf_priv_flag_index priv_flag_bitno; +}; + +static const struct sxe2vf_priv_flag sxe2vf_gstrings_priv_flags[] = { + {"legacy-rx", SXE2VF_FLAG_LEGACY_RX_ENABLE, SXE2VF_PRIV_FLAGS_LEGACY_RX}, + {"fnav-tunnel", SXE2VF_FLAG_FNAV_TUNNEL, SXE2VF_PRIV_FLAGS_FNAV_TUNNEL}, +}; + +enum sxe2_link_vf_get_speed { + SXE2_LINK_SPEED_VF_UNKNOW = 0, + SXE2_LINK_SPEED_VF_10G = 10000, + SXE2_LINK_SPEED_VF_25G = 25000, + SXE2_LINK_SPEED_VF_50G = 50000, + SXE2_LINK_SPEED_VF_100G = 100000, + SXE2_LINK_SPEED_VF_AUTO = 200000, +}; + +enum sxe2_vf_flm_autoneg { FLM_VF_DISAN = 0, FLM_VF_ENAN = 1 }; + +struct sxe2vf_stats { + char stats_string[ETH_GSTRING_LEN]; + u32 sizeof_stats; + u32 stats_offset; +}; +#define SXE2VF_STAT(_type, _name, _stats) \ + { \ + .stats_string = _name, \ + .sizeof_stats = sizeof_field(_type, _stats), \ + .stats_offset = offsetof(_type, _stats) \ + } +#define SXE2VF_VSI_SW_STAT(_name, _stat) \ + SXE2VF_STAT(struct sxe2vf_vsi_sw_stats, _name, _stat) +#define SXE2VF_VSI_HW_STAT(_name, _stat) \ + SXE2VF_STAT(struct sxe2_vf_vsi_hw_stats, _name, _stat) + +static const struct sxe2vf_stats sxe2vf_gstrings_vsi_hw_stats[] = { + SXE2VF_VSI_HW_STAT("rx_vport_unicast_packets", rx_vsi_unicast_packets), + SXE2VF_VSI_HW_STAT("rx_vport_bytes", rx_vsi_bytes), + SXE2VF_VSI_HW_STAT("tx_vport_unicast_packets", tx_vsi_unicast_packets), + SXE2VF_VSI_HW_STAT("tx_vport_bytes", tx_vsi_bytes), + SXE2VF_VSI_HW_STAT("rx_vport_multicast_packets", + rx_vsi_multicast_packets), + SXE2VF_VSI_HW_STAT("tx_vport_multicast_packets", + tx_vsi_multicast_packets), + SXE2VF_VSI_HW_STAT("rx_vport_broadcast_packets", + rx_vsi_broadcast_packets), + SXE2VF_VSI_HW_STAT("tx_vport_broadcast_packets", + tx_vsi_broadcast_packets), +}; + +static const struct sxe2vf_stats sxe2vf_gstrings_vsi_sw_stats[] = { + SXE2VF_VSI_SW_STAT("rx_packets", rx_packets), + SXE2VF_VSI_SW_STAT("rx_bytes", rx_bytes), + SXE2VF_VSI_SW_STAT("tx_packets", tx_packets), + SXE2VF_VSI_SW_STAT("tx_bytes", tx_bytes), + + SXE2VF_VSI_SW_STAT("rx_csum_unnecessary", rx_csum_unnecessary), + SXE2VF_VSI_SW_STAT("rx_csum_none", rx_csum_none), + SXE2VF_VSI_SW_STAT("rx_csum_complete", rx_csum_complete), + SXE2VF_VSI_SW_STAT("rx_csum_unnecessary_inner", + rx_csum_unnecessary_inner), + SXE2VF_VSI_SW_STAT("rx_csum_err", rx_csum_err), + SXE2VF_VSI_SW_STAT("rx_lro_packets", rx_lro_packets), + SXE2VF_VSI_SW_STAT("rx_lro_bytes", rx_lro_bytes), + SXE2VF_VSI_SW_STAT("rx_lro_count", rx_lro_count), + SXE2VF_VSI_SW_STAT("rx_removed_vlan_packets", rx_vlan_strip), + SXE2VF_VSI_SW_STAT("rx_pkts_sw_drop", rx_pkts_sw_drop), + SXE2VF_VSI_SW_STAT("rx_buff_alloc_err", rx_buff_alloc_err), + SXE2VF_VSI_SW_STAT("rx_pg_alloc_fail", rx_pg_alloc_fail), + SXE2VF_VSI_SW_STAT("rx_page_alloc", rx_page_alloc), + SXE2VF_VSI_SW_STAT("rx_non_eop_descs", rx_non_eop_descs), + SXE2VF_VSI_SW_STAT("rx_pa_err", rx_pa_err), + + SXE2VF_VSI_SW_STAT("tx_tso_packets", tx_tso_packets), + SXE2VF_VSI_SW_STAT("tx_tso_bytes", tx_tso_bytes), + SXE2VF_VSI_SW_STAT("tx_tso_linearize_chk", tx_tso_linearize_chk), + SXE2VF_VSI_SW_STAT("tx_added_vlan_packets", tx_vlan_insert), + SXE2VF_VSI_SW_STAT("tx_csum_none", tx_csum_none), + SXE2VF_VSI_SW_STAT("tx_csum_partial", tx_csum_partial), + SXE2VF_VSI_SW_STAT("tx_csum_partial_inner", tx_csum_partial_inner), + SXE2VF_VSI_SW_STAT("tx_stopped", tx_busy), + SXE2VF_VSI_SW_STAT("tx_dropped", tx_queue_dropped), + SXE2VF_VSI_SW_STAT("tx_xmit_more", tx_xmit_more), + SXE2VF_VSI_SW_STAT("tx_wake", tx_restart), + SXE2VF_VSI_SW_STAT("tx_linearize", tx_linearize), +}; + +#define SXE2VF_VSI_HW_STATS_LEN ARRAY_SIZE(sxe2vf_gstrings_vsi_hw_stats) +#define SXE2VF_VSI_SW_STATS_LEN ARRAY_SIZE(sxe2vf_gstrings_vsi_sw_stats) + +#define SXE2VF_IPSEC_STATS_LEN \ + ((sizeof_field(struct sxe2vf_queue_ipsec_stats, tx_error_invalid_sp) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, \ + tx_error_invalid_state) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, tx_offload_success) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, rx_error_invalid_sp) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, \ + rx_error_invalid_state) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, \ + rx_error_decrypt_fail) + \ + sizeof_field(struct sxe2vf_queue_ipsec_stats, rx_offload_success)) / \ + sizeof(__le64)) + +#define SXE2VF_FNAV_MATCH_STATS_LEN \ + (sizeof_field(struct sxe2vf_fnav_context, fnav_match) / sizeof(u64)) + +#define SXE2VF_ALL_STATS_LEN(n) \ + ((SXE2VF_VSI_HW_STATS_LEN + SXE2VF_IPSEC_STATS_LEN + \ + SXE2VF_VSI_SW_STATS_LEN + sxe2vf_q_stats_len(n)) + \ + sxe2vf_rxq_stats_len(n) + sxe2vf_txq_stats_len(n) + \ + SXE2VF_FNAV_MATCH_STATS_LEN) + +#define SXE2VF_PRIV_FLAG_LEN ARRAY_SIZE(sxe2vf_gstrings_priv_flags) + +void sxe2vf_ethtool_ops_init(struct net_device *netdev); +s32 sxe2vf_stats_push_sync(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_fill_fnav_filter_full_msg(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter); + +s32 sxe2vf_fnav_add_filter_with_packet(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter); + +void sxe2vf_vsi_sw_stats_update(struct sxe2vf_vsi *vsi); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.c new file mode 100644 index 0000000000000000000000000000000000000000..3590e6e498fa55108c18cb5fd96bd0d562d545cd --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.c @@ -0,0 +1,1069 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_ipsec.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include +#include +#include "sxe2vf.h" +#include "sxe2vf_netdev.h" +#include "sxe2_log.h" +#include "sxe2vf_ipsec.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_tx.h" + +#ifndef SADB_X_EALG_SM4CBC +#define SADB_X_EALG_SM4CBC 24 +#endif + +#ifndef SADB_X_AALG_SM3_256HMAC +#define SADB_X_AALG_SM3_256HMAC 10 +#endif + +#define SXE2VF_IPSEC_AES_KEY_BITS (256) +#define SXE2VF_IPSEC_SHA256_AUTH_BITS (256) +#define SXE2VF_IPSEC_SHA256_TRUNC_BITS (128) + +#define SXE2VF_IPSEC_SM4_KEY_BITS (128) +#define SXE2VF_IPSEC_SM3_AUTH_BITS (256) +#define SXE2VF_IPSEC_SM3_TRUNC_BITS (96) + +#define SXE2VF_IPSEC_ALG_SM4 (1) +#define SXE2VF_IPSEC_ALG_AES (0) + +enum sxe2vf_rx_ipsec_status { + SXE2VF_IPSEC_DECRYPT_INTIGRITY_SUCCESS, + SXE2VF_IPSEC_PACKET_TOOLONG_OVER_2K, + SXE2VF_IPSEC_SPI_IP_NOT_MATCHED, + SXE2VF_IPSEC_IP_KEY_INVALID, + SXE2VF_IPSEC_PAYLOAD_ICV_NOTALIGNED, + SXE2VF_IPSEC_ICV_CHECK_FAIL, + SXE2VF_IPSEC_DISABLED_BYPASS, + SXE2VF_IPSEC_MACSEC_BYPASS +}; + +static s32 sxe2vf_ipsec_xs_validate(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (xs->props.aalgo != SADB_AALG_NONE) { + if (!xs->aalg) { + LOG_ERROR_BDF("auth instance error.\n"); + return -EINVAL; + } + if (xs->props.aalgo != SADB_X_AALG_SHA2_256HMAC && + xs->props.aalgo != SADB_X_AALG_SM3_256HMAC) { + LOG_ERROR_BDF("sxe2 only support hmac(sha256) or " + "sm3_256hmac.\n"); + return -EINVAL; + } + } + + if (!xs->ealg) { + LOG_ERROR_BDF("sxe2 cannot offload without ealg\n"); + return -EINVAL; + } + + if (xs->props.ealgo != SADB_X_EALG_AESCBC && + xs->props.ealgo != SADB_X_EALG_SM4CBC) { + LOG_ERROR_BDF("sxe2 only support cbc(aes) or sm4cbc.\n"); + return -EINVAL; + } + + if (xs->props.calgo != SADB_X_CALG_NONE) { + LOG_ERROR_BDF("sxe2 do not support compressed xfrm offloads\n"); + return -EINVAL; + } + + if (xs->props.family != AF_INET && xs->props.family != AF_INET6) { + LOG_ERROR_BDF("sxe2 only support IPv4/6 xfrm offloads\n"); + return -EINVAL; + } + + if (xs->props.mode != XFRM_MODE_TUNNEL) { + LOG_ERROR_BDF("sxe2 only support tunnel xfrm offloads\n"); + return -EINVAL; + } + + if (xs->id.proto != IPPROTO_ESP) { + LOG_ERROR_BDF("sxe2 only support ESP xfrm offloads\n"); + return -EINVAL; + } + + if (xs->ealg->alg_key_len != SXE2VF_IPSEC_SM4_KEY_BITS && + xs->ealg->alg_key_len != SXE2VF_IPSEC_AES_KEY_BITS) { + LOG_ERROR_BDF("sxe2 only support 256bit or 128bit key\n"); + return -EINVAL; + } + + if (xs->aalg && xs->aalg->alg_key_len != SXE2VF_IPSEC_SM3_AUTH_BITS && + xs->aalg->alg_key_len != SXE2VF_IPSEC_SHA256_AUTH_BITS) { + LOG_ERROR_BDF("sxe2 check aalg auth and trunc length " + "failed.keylen:%d\n", + xs->aalg->alg_key_len); + return -EINVAL; + } + + if (xs->aalg && xs->aalg->alg_trunc_len != SXE2VF_IPSEC_SHA256_TRUNC_BITS && + xs->aalg->alg_trunc_len != SXE2VF_IPSEC_SM3_TRUNC_BITS) { + LOG_ERROR_BDF("sxe2 check aalg auth and trunc length " + "failed.trunc_len:%d\n", + xs->aalg->alg_trunc_len); + return -EINVAL; + } + return 0; +} + +static int sxe2vf_ipsec_parse_proto_keys(struct xfrm_state *xs, u8 *ealg_key, + u8 *aalg_key) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (!xs->ealg) { + LOG_ERROR_BDF("null ealg.\n"); + return -EINVAL; + } + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + if (xs->ealg->alg_key_len != SXE2VF_IPSEC_AES_KEY_BITS) { + LOG_ERROR_BDF("failed to check ipsec keylen[%d]\n", + xs->ealg->alg_key_len); + return -EINVAL; + } + + memcpy(ealg_key, xs->ealg->alg_key, SXE2VF_IPSEC_AESKEY_LENTH); + + if (xs->aalg) { + if ((xs->aalg->alg_key_len != + SXE2VF_IPSEC_SHA256_AUTH_BITS) || + (xs->aalg->alg_trunc_len != + SXE2VF_IPSEC_SHA256_TRUNC_BITS)) { + LOG_ERROR_BDF("failed to check auth key len [%d = " + "%d]\t" + "or auth trunc len[%d = %d]\n", + xs->aalg->alg_key_len, + SXE2VF_IPSEC_SHA256_AUTH_BITS, + xs->aalg->alg_trunc_len, + SXE2VF_IPSEC_SHA256_TRUNC_BITS); + return -EINVAL; + } + + memcpy(aalg_key, xs->aalg->alg_key, + SXE2VF_IPSEC_AESAUTH_LENTH); + } + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + if (xs->ealg->alg_key_len != SXE2VF_IPSEC_SM4_KEY_BITS) { + LOG_ERROR_BDF("failed to check ealg key len [%d = %d]\n", + xs->ealg->alg_key_len, + SXE2VF_IPSEC_SM4_KEY_BITS); + return -EINVAL; + } + memcpy(ealg_key, xs->ealg->alg_key, SXE2VF_IPSEC_SMKEY_LENGTH); + if (xs->aalg) { + if (xs->aalg->alg_key_len != SXE2VF_IPSEC_SM3_AUTH_BITS || + xs->aalg->alg_trunc_len != SXE2VF_IPSEC_SM3_TRUNC_BITS) { + LOG_ERROR_BDF("failed to check aalg key len [%d = " + "%d] or\t" + "auth trunc len[%d = %d]\n", + xs->aalg->alg_key_len, + SXE2VF_IPSEC_SM3_AUTH_BITS, + xs->aalg->alg_trunc_len, + SXE2VF_IPSEC_SM3_TRUNC_BITS); + return -EINVAL; + } + memcpy(aalg_key, xs->aalg->alg_key, + SXE2VF_IPSEC_SMAUTH_LENTH); + } + } else { + LOG_ERROR_BDF("wrong aelg name:%s .\n", xs->ealg->alg_name); + return -EINVAL; + } + return 0; +} + +bool sxe2vf_is_ipsec_offload_enable(struct net_device *netdev) +{ + return netdev->features & NETIF_F_HW_ESP; +} + +static s32 sxe2vf_ipsec_empty_rxtable_find(struct sxe2vf_adapter *adapter) +{ + u16 i; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + if (ipsec->rx_sa_cnt == ipsec->max_rx_sa_cnt) + goto out_full; + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + if (!ipsec->rx_sa_table[i].used) { + LOG_DEBUG_BDF("get empty rx sa cnt is %d\n", i); + ipsec->rx_sa_table[i].index = i; + return ipsec->rx_sa_table[i].index; + } + } +out_full: + LOG_ERROR_BDF("failed to get empty rx sa, table is full\n"); + return -ENOSPC; +} + +static s32 sxe2vf_ipsec_rx_state_add(struct sxe2vf_adapter *adapter, + struct xfrm_state *xs) +{ + s32 ret; + u32 i; + struct sxe2vf_rx_sa rsa; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + unsigned long flags; + + ret = sxe2vf_ipsec_empty_rxtable_find(adapter); + if (ret < 0) + return ret; + + (void)memset(&rsa, 0x0, sizeof(rsa)); + rsa.xs = xs; + if (xs->aalg) + rsa.is_auth = true; + else + rsa.is_auth = false; + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + rsa.engine = SXE2VF_IPSEC_ALG_AES; + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + rsa.engine = SXE2VF_IPSEC_ALG_SM4; + } else { + rsa.engine = SXE2VF_IPSEC_ALG_AES; + LOG_ERROR_BDF("wrong ealg[%s]\n", xs->ealg->alg_name); + } + + if (xs->props.family == AF_INET6) { + rsa.ipv6 = true; + for (i = 0; i < SXE2VF_IPV6_ADDR_LEN; i++) + rsa.ipaddr[i] = xs->id.daddr.a6[i]; + + } else { + rsa.ipv6 = false; + rsa.ipaddr[0] = xs->id.daddr.a4; + } + + rsa.index = (u16)ret; + rsa.used = true; + rsa.spi = xs->id.spi; + LOG_DEBUG_BDF("xs spi:0x%x rsa.spi:0x%x\n", xs->id.spi, rsa.spi); + ret = sxe2vf_ipsec_parse_proto_keys(xs, rsa.enc_key, rsa.auth_key); + if (ret) { + LOG_ERROR_BDF("failed to get key data for Rx SA table\n"); + return ret; + } + + ret = sxe2vf_ipsec_add_rxsa_msg_send(adapter, &rsa, false); + if (ret) { + LOG_ERROR_BDF("failed to add rx sa,ret:%d\n", ret); + return ret; + } + xs->xso.offload_handle = rsa.index; + (void)memcpy(&ipsec->rx_sa_table[rsa.index], &rsa, sizeof(rsa)); + spin_lock_irqsave(&ipsec->rx_hlist_lock, flags); + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_sa_table[rsa.index].hlist, + rsa.xs->id.spi); + ipsec->rx_sa_cnt++; + spin_unlock_irqrestore(&ipsec->rx_hlist_lock, flags); + LOG_DEBUG_BDF("[count++]current rx sa cnt is %d\n", ipsec->rx_sa_cnt); + return 0; +} + +static s32 sxe2vf_ipsec_empty_txtable_find(struct sxe2vf_adapter *adapter) +{ + u32 i; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + if (ipsec->tx_sa_cnt == ipsec->max_tx_sa_cnt) + goto out_full; + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + if (!ipsec->tx_sa_table[i].used) { + LOG_DEBUG_BDF("get empty tx sa cnt is %d\n", i); + return (s32)i; + } + } +out_full: + LOG_ERROR_BDF("failed to get empty tx sa, table is full\n"); + return -ENOSPC; +} + +static struct sxe2vf_tx_sa *sxe2vf_ipsec_tx_sa_find(struct sxe2vf_adapter *adapter, + u32 index) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct sxe2vf_tx_sa *tsa; + + if (!ipsec->tx_sa_table) { + LOG_ERROR_BDF("tx sa table is NULL\n"); + goto out; + } + + tsa = &ipsec->tx_sa_table[index]; + if (!tsa->used) { + LOG_ERROR_BDF("request sa is not used\n"); + goto out; + } + + return tsa; + +out: + return NULL; +} + +static s32 sxe2vf_ipsec_tx_state_add(struct sxe2vf_adapter *adapter, + struct xfrm_state *xs) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + struct sxe2vf_tx_sa tsa; + + ret = sxe2vf_ipsec_empty_txtable_find(adapter); + if (ret < 0) + return ret; + + (void)memset(&tsa, 0x0, sizeof(tsa)); + tsa.index = (u16)ret; + tsa.xs = xs; + tsa.used = true; + if (xs->aalg) + tsa.is_auth = true; + else + tsa.is_auth = false; + + if (xs->props.ealgo == SADB_X_EALG_AESCBC) { + tsa.engine = SXE2VF_IPSEC_ALG_AES; + } else if (xs->props.ealgo == SADB_X_EALG_SM4CBC) { + tsa.engine = SXE2VF_IPSEC_ALG_SM4; + } else { + tsa.engine = SXE2VF_IPSEC_ALG_AES; + LOG_ERROR_BDF("wrong ealg[%s]\n", xs->ealg->alg_name); + } + + ret = sxe2vf_ipsec_parse_proto_keys(xs, tsa.enc_key, tsa.auth_key); + if (ret) { + LOG_ERROR_BDF("failed to get key data for Tx SA table\n"); + return ret; + } + + tsa.spi = xs->id.spi; + + ret = sxe2vf_ipsec_add_txsa_msg_send(adapter, &tsa, false); + if (ret) { + LOG_ERROR_BDF("failed to add tx sa,ret:%d\n", ret); + return ret; + } + xs->xso.offload_handle = tsa.index; + (void)memcpy(&ipsec->tx_sa_table[tsa.index], &tsa, sizeof(tsa)); + ipsec->tx_sa_cnt++; + LOG_DEBUG_BDF("[count++]current tx sa cnt is %d\n", ipsec->tx_sa_cnt); + + return 0; +} + +#ifdef IPSEC_STATE_ADD_API_NEED_1_PARAMS +static int sxe2vf_ipsec_state_add(struct xfrm_state *xs) +#else +static int sxe2vf_ipsec_state_add(struct xfrm_state *xs, + struct netlink_ext_ack *extack) +#endif +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (adapter->ipsec_ctxt.status != SXE2VF_IPSEC_READY) { + LOG_ERROR_BDF("failed to add sa, because ipsec status is wrong\n"); + ret = -EBUSY; + goto out; + } + + if (!sxe2vf_is_ipsec_offload_enable(netdev)) { + LOG_ERROR_BDF("failed to add sa, because ipsec offload is " + "disable.\n"); + ret = -EINVAL; + goto out; + } + + ret = sxe2vf_ipsec_xs_validate(xs); + if (ret) + goto out; + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + ret = sxe2vf_ipsec_rx_state_add(adapter, xs); + if (ret) + LOG_ERROR_BDF("failed to add rx sa\n"); + } else { + ret = sxe2vf_ipsec_tx_state_add(adapter, xs); + if (ret) + LOG_ERROR_BDF("failed to add tx sa\n"); + } +out: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + return ret; +} + +static struct sxe2vf_rx_sa *sxe2vf_ipsec_rx_sa_find(struct sxe2vf_adapter *adapter, + u32 index, u32 spi) +{ + struct sxe2vf_rx_sa *rsa = NULL; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + if (!ipsec->rx_sa_table) + goto out; + + rsa = &ipsec->rx_sa_table[index]; + if (!rsa->used) + goto out; + + if (rsa->spi == spi) { + LOG_DEBUG_BDF("rsa use:%d sa:[%d %d]\n", rsa->used, rsa->hw_index, + index); + return rsa; + } + +out: + LOG_ERROR_BDF("failed to find rsa[sa:%d,spi:%d]\n", index, spi); + + return NULL; +} + +static struct sxe2vf_rx_sa * +sxe2vf_ipsec_rx_sa_find_rcu(struct sxe2vf_adapter *adapter, u32 index, u32 spi) +{ + struct sxe2vf_rx_sa *rsa = NULL; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + bool hit = false; + + if (!ipsec->rx_sa_table) { + LOG_ERROR_BDF("rx sa table is NULL.\n"); + return NULL; + } + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + { + if (!rsa->used) + continue; + + if (index == rsa->index) { + LOG_DEBUG_BDF("rsa use:%d sa:[%d %d]\n", rsa->used, + rsa->hw_index, index); + hit = true; + break; + } + } + rcu_read_unlock(); + if (!hit) { + LOG_ERROR_BDF("failed to find rsa[sa:%d,spi:%d]\n", index, spi); + return NULL; + } + return rsa; +} + +static void sxe2vf_ipsec_rx_state_clear(struct sxe2vf_adapter *adapter, + struct sxe2vf_rx_sa *rsa) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; + + if (!rsa || !rsa->used) { + LOG_ERROR_BDF("invalid rx sa selected\n"); + return; + } + + ret = sxe2vf_ipsec_clear_sa_msg_send(adapter, SXE2_IPSEC_DIR_RX, + rsa->hw_index); + if (ret) + LOG_ERROR_BDF("failed to delete rxsa,ret:%d\n", ret); + + synchronize_rcu(); + (void)memset(rsa, 0, sizeof(struct sxe2vf_rx_sa)); + ipsec->rx_sa_cnt--; + LOG_DEBUG_BDF("[count--]current rx sa cnt is %d\n", ipsec->rx_sa_cnt); +} + +static void sxe2vf_ipsec_tx_state_clear(struct sxe2vf_adapter *adapter, + struct sxe2vf_tx_sa *tsa) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret; +#ifndef SXE2_CFG_RELEASE + u32 sa_idx = tsa->hw_index; +#endif + + if (!tsa || !tsa->used) { + LOG_ERROR_BDF("failed to find txsa, sa_idx=%d\n", sa_idx); + return; + } + + ret = sxe2vf_ipsec_clear_sa_msg_send(adapter, SXE2_IPSEC_DIR_TX, + tsa->hw_index); + if (ret) + LOG_ERROR_BDF("failed to delete tx sa, ret:%d\n", ret); + + (void)memset(tsa, 0, sizeof(struct sxe2vf_tx_sa)); + ipsec->tx_sa_cnt--; + LOG_DEBUG_BDF("[count--]current tx sa cnt is %d\n", ipsec->tx_sa_cnt); +} + +static void sxe2vf_ipsec_state_delete(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct sxe2vf_rx_sa *rx_sa = NULL; + unsigned long flags; + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + LOG_DEBUG_BDF("delete rx sa.\n"); + rx_sa = sxe2vf_ipsec_rx_sa_find_rcu( + adapter, (u32)(xs->xso.offload_handle), xs->id.spi); + if (!rx_sa) { + LOG_ERROR_BDF("failed to find rxsa ,spi = 0x%x, idx = " + "0x%lx\n", + xs->id.spi, xs->xso.offload_handle); + return; + } + spin_lock_irqsave(&ipsec->rx_hlist_lock, flags); + hash_del_rcu(&rx_sa->hlist); + spin_unlock_irqrestore(&ipsec->rx_hlist_lock, flags); + } +} + +static void sxe2vf_ipsec_state_free(struct xfrm_state *xs) +{ +#ifdef IPSEC_HAVE_REAL_DEV + struct net_device *netdev = xs->xso.real_dev; +#else + struct net_device *netdev = xs->xso.dev; +#endif + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_rx_sa *rx_sa = NULL; + struct sxe2vf_tx_sa *tx_sa = NULL; + + if (adapter->ipsec_ctxt.status != SXE2VF_IPSEC_READY) + LOG_DEBUG_BDF("try to delete sa while resetting\n"); + +#ifdef IPSEC_HAVE_XS_XSO_DIR + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { +#else + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { +#endif + LOG_DEBUG_BDF("delete rx sa.\n"); + rx_sa = sxe2vf_ipsec_rx_sa_find( + adapter, (u32)(xs->xso.offload_handle), xs->id.spi); + if (!rx_sa) { + LOG_ERROR_BDF("failed to find rxsa ,spi = 0x%x, idx = " + "0x%lx\n", + xs->id.spi, xs->xso.offload_handle); + return; + } + + sxe2vf_ipsec_rx_state_clear(adapter, rx_sa); + } else { + LOG_DEBUG_BDF("delete tx sa.\n"); + tx_sa = sxe2vf_ipsec_tx_sa_find(adapter, + (u32)(xs->xso.offload_handle)); + if (!tx_sa) { + LOG_ERROR_BDF("failed to find txsa ,sa idx = 0x%lx\n", + xs->xso.offload_handle); + return; + } + + sxe2vf_ipsec_tx_state_clear(adapter, tx_sa); + } +} + +static bool sxe2vf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != 5) + return false; + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + if (skb->len > SXE2VF_IPSEC_PAYLOAD_LIMIT) + return false; + + return true; +} + +const struct xfrmdev_ops sxe2vf_ipsec_ops = { + .xdo_dev_state_add = sxe2vf_ipsec_state_add, + .xdo_dev_state_delete = sxe2vf_ipsec_state_delete, + .xdo_dev_state_free = sxe2vf_ipsec_state_free, + .xdo_dev_offload_ok = sxe2vf_ipsec_offload_ok, +}; + +s32 sxe2vf_ipsec_init(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + s32 ret = 0; + u32 size; + + memset(ipsec, 0, sizeof(struct sxe2vf_ipsec_context)); + + mutex_init(&ipsec->context_lock); + ipsec->status = SXE2VF_IPSEC_UNINITIALIZED; + ret = sxe2vf_ipsec_get_capa_msg_send(adapter); + if (ret) { + LOG_ERROR_BDF("failed to get ipsec capability.\n"); + goto l_out; + } + LOG_DEBUG_BDF("[init]current ipsec depth:[tx:%d rx:%d]\n", + ipsec->max_tx_sa_cnt, ipsec->max_rx_sa_cnt); + + if (ipsec->max_tx_sa_cnt == 0 && ipsec->max_rx_sa_cnt == 0) { + ret = 0; + goto l_disable; + } + + size = (u32)sizeof(struct sxe2vf_rx_sa) * ipsec->max_rx_sa_cnt; + ipsec->rx_sa_table = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_sa_table) { + LOG_ERROR_BDF("ipsec rx sa table mem:%uB alloc fail.\n", size); + ret = -ENOMEM; + goto l_out; + } + + size = (u32)sizeof(struct sxe2vf_tx_sa) * ipsec->max_tx_sa_cnt; + ipsec->tx_sa_table = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_sa_table) { + LOG_ERROR_BDF("ipsec tx sa table mem:%uB alloc fail.\n", size); + ret = -ENOMEM; + goto l_free_rx_sa; + } + + ipsec->rx_sa_cnt = 0; + ipsec->tx_sa_cnt = 0; + hash_init(ipsec->rx_sa_list); + spin_lock_init(&ipsec->rx_hlist_lock); + (void)memset(&ipsec->hw_stats, 0x0, sizeof(struct sxe2vf_ipsec_hw_stats)); + netdev->xfrmdev_ops = &sxe2vf_ipsec_ops; + ipsec->status = SXE2VF_IPSEC_READY; + +l_disable: + return ret; + +l_free_rx_sa: + kfree(ipsec->rx_sa_table); + ipsec->rx_sa_table = NULL; + +l_out: + ipsec->status = SXE2VF_IPSEC_ERROR; + LOG_ERROR_BDF("failed to init ipsec\n"); + return ret; +} + +void sxe2vf_ipsec_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + ipsec->status = SXE2VF_IPSEC_UNINITIALIZED; + kfree(ipsec->rx_sa_table); + ipsec->rx_sa_table = NULL; + + kfree(ipsec->tx_sa_table); + ipsec->tx_sa_table = NULL; + + mutex_destroy(&ipsec->context_lock); +} + +s32 sxe2vf_ipsec_tx(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first, + struct sxe2vf_tx_offload_info *offload) +{ + struct sxe2vf_adapter *adapter = netdev_priv(txq->netdev); + struct xfrm_state *xs; + struct sec_path *sp; + struct sxe2vf_tx_sa *tsa; + + u32 index; + u64 qw1 = offload->ctxt_desc_qw1; + + if (skb_is_gso(first->skb)) { + LOG_ERROR_BDF("failed to offload ipsec,because tso is on.\n"); + goto out_failed; + } + + sp = skb_sec_path(first->skb); + if (unlikely(!sp->len)) { + LOG_ERROR_BDF("failed to get sec path length = %d\n", sp->len); + txq->stats->ipsec_stats.tx_error_invalid_sp++; + goto out_failed; + } + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + LOG_ERROR_BDF("failed to get xfrm state,xs=%p\n", xs); + txq->stats->ipsec_stats.tx_error_invalid_state++; + goto out_failed; + } + + index = (u32)xs->xso.offload_handle; + tsa = sxe2vf_ipsec_tx_sa_find(adapter, index); + + if (unlikely(!tsa || !tsa->used)) { + LOG_ERROR_BDF("unavailable index=%d\n", index); + txq->stats->ipsec_stats.tx_error_invalid_state++; + goto out_failed; + } + + qw1 |= (u64)SXE2VF_TX_DESC_DTYPE_CTXT; + qw1 |= 0x1 << SXE2VF_TXCD_QW1_IPSEC_EN_S; + if (tsa->is_auth == false) + qw1 |= 0x1 << SXE2VF_TXCD_QW1_IPSEC_MODE_S; + qw1 |= tsa->engine << SXE2VF_TXCD_QW1_IPSEC_ENGINE_MODE_S; + qw1 |= (tsa->hw_index << SXE2VF_TXCD_QW1_IPSEC_SA_IDX_S) & + SXE2VF_TXCD_QW1_IPSEC_SA_IDX_M; + offload->ctxt_desc_qw1 = qw1; + offload->ctxt_desc_ipsec_offset = (u16)skb_transport_offset(first->skb); + + LOG_DEBUG_BDF("tx ipsec offload succeed!qw1:0x%llx, " + "ipsec_offset:0x%x,sa_index:%d\n", + offload->ctxt_desc_qw1, offload->ctxt_desc_ipsec_offset, + tsa->hw_index); + + txq->stats->ipsec_stats.tx_offload_success++; + + return 0; + +out_failed: + + return -1; +} + +static bool sxe2vf_update_ipsec_stats(struct sxe2vf_adapter *adapter, + union sxe2vf_rx_desc *desc, + struct sxe2vf_queue *rxq) +{ + bool ret = false; + u8 ipsec_status = 0; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 status_lrocnt_fdpf_id = le32_to_cpu(desc->wb.status_lrocnt_fdpf_id); + + ipsec_status = SXE2VF_RX_DESC_IPSEC_STATUS_GET(status_lrocnt_fdpf_id); + + switch (ipsec_status) { + case SXE2VF_IPSEC_DECRYPT_INTIGRITY_SUCCESS: + ipsec->hw_stats.rx_decrypt_success++; + ret = false; + break; + case SXE2VF_IPSEC_PACKET_TOOLONG_OVER_2K: + ipsec->hw_stats.over_2k++; + ret = true; + break; + case SXE2VF_IPSEC_SPI_IP_NOT_MATCHED: + ipsec->hw_stats.spi_ip_not_matched++; + ret = true; + break; + case SXE2VF_IPSEC_IP_KEY_INVALID: + ipsec->hw_stats.ip_key_invalid++; + ret = true; + break; + case SXE2VF_IPSEC_PAYLOAD_ICV_NOTALIGNED: + ipsec->hw_stats.not_aligned++; + ret = true; + break; + case SXE2VF_IPSEC_ICV_CHECK_FAIL: + ipsec->hw_stats.icv_fail++; + ret = true; + break; + case SXE2VF_IPSEC_DISABLED_BYPASS: + ipsec->hw_stats.bypass_disable++; + ret = true; + break; + case SXE2VF_IPSEC_MACSEC_BYPASS: + ipsec->hw_stats.bypass_macsec++; + ret = true; + break; + } + + if (ret && ipsec_status != SXE2VF_IPSEC_DISABLED_BYPASS && + ipsec_status != SXE2VF_IPSEC_MACSEC_BYPASS) { + LOG_ERROR_BDF("ipsec offload failed,status is %d\n", ipsec_status); + rxq->stats->ipsec_stats.rx_error_decrypt_fail++; + } + + return ret; +} + +void sxe2vf_ipsec_rx(struct sxe2vf_queue *rxq, union sxe2vf_rx_desc *desc, + struct sk_buff *skb, u16 ptype) +{ + struct sxe2vf_adapter *adapter = netdev_priv(rxq->netdev); + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct xfrm_state *xs = NULL; + struct xfrm_offload *xo = NULL; + struct sxe2vf_rx_sa *rsa; +#ifndef IPSEC_NEED_USE_SECPATH_DUP + struct sec_path *sp; +#endif + __be32 spi; + bool is_ipv4; + bool is_ipv6; + struct iphdr *ip4 = NULL; + struct ipv6hdr *ip6 = NULL; + u8 *c_hdr; + struct sxe2vf_rx_ptype_info ptype_info; + + if (sxe2vf_update_ipsec_stats(adapter, desc, rxq)) + return; + + ptype_info = sxe2vf_rx_ptype_parse(ptype); + is_ipv4 = (ptype_info.outer_ip == SXE2VF_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2VF_PTYPE_OUTER_IPV4); + is_ipv6 = (ptype_info.outer_ip == SXE2VF_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2VF_PTYPE_OUTER_IPV6); + + if (is_ipv4) { + ip4 = (struct iphdr *)(skb->data); + c_hdr = (u8 *)ip4 + (size_t)(ip4->ihl * 4); + } else if (is_ipv6) { + ip6 = (struct ipv6hdr *)(skb->data); + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + LOG_DEBUG_BDF("rx ipsec offload failed.ptype:%d!\n", ptype); + rxq->stats->ipsec_stats.rx_error_invalid_ptype++; + return; + } + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + { + if (spi == rsa->xs->id.spi) { + if (is_ipv4) { + if (ip4->daddr == rsa->xs->id.daddr.a4) { + xs = rsa->xs; + xfrm_state_hold(xs); + break; + } + } else if (is_ipv6) { + if (!ipv6_addr_cmp(&rsa->xs->id.daddr.in6, + &ip6->daddr)) { + xs = rsa->xs; + xfrm_state_hold(xs); + break; + } + } + } + } + rcu_read_unlock(); + + if (unlikely(!xs)) { + LOG_ERROR_BDF("rx ipsec offload failed! invalid xs\n"); + rxq->stats->ipsec_stats.rx_error_invalid_state++; + return; + } + +#ifdef IPSEC_NEED_USE_SECPATH_DUP + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) { + LOG_DEV_ERR("rx ipsec offload failed! invalid sp\n"); + return; + } + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; +#else + sp = secpath_set(skb); + if (unlikely(!sp)) { + LOG_ERROR_BDF("rx ipsec offload failed! invalid sp\n"); + rxq->stats->ipsec_stats.rx_error_invalid_sp++; + return; + } + sp->xvec[sp->len++] = xs; + sp->olen++; +#endif + + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + rxq->stats->ipsec_stats.rx_offload_success++; +} + +s32 sxe2vf_ipsec_stop(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 i; + s32 ret; + struct sxe2vf_rx_sa *rsa; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + + if (ipsec->status != SXE2VF_IPSEC_READY) + goto out_unlock; + + ipsec->status = SXE2VF_IPSEC_RESETTING; + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + if (!ipsec->tx_sa_table[i].used) + continue; + + ret = sxe2vf_ipsec_clear_sa_msg_send(adapter, SXE2_IPSEC_DIR_TX, + ipsec->tx_sa_table[i].hw_index); + if (ret) { + LOG_ERROR_BDF("failed to delete tx " + "sa[index:%d,sa_index:%d],ret=%d\n", + i, ipsec->tx_sa_table[i].hw_index, ret); + } else { + LOG_DEBUG_BDF("delete fw tx_sa[index:%d,sa_index:%d] ok.\n", + i, ipsec->tx_sa_table[i].hw_index); + } + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + rsa = &ipsec->rx_sa_table[i]; + if (!rsa->used) + continue; + + ret = sxe2vf_ipsec_clear_sa_msg_send(adapter, SXE2_IPSEC_DIR_RX, + ipsec->rx_sa_table[i].hw_index); + if (ret) + LOG_ERROR_BDF("failed to delete rx sa[index:%d]\n", i); + else + LOG_DEBUG_BDF("delete fw rx_sa[index:%d] ok.\n", i); + } + +out_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + return 0; +} + +s32 sxe2vf_ipsec_rebuild(struct sxe2vf_adapter *adapter) +{ + u32 i; + s32 ret; + struct sxe2vf_tx_sa *tsa; + struct sxe2vf_rx_sa *rsa; + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + mutex_lock(&adapter->ipsec_ctxt.context_lock); + + if (ipsec->status != SXE2VF_IPSEC_RESETTING) { + LOG_DEBUG_BDF("ipsec rebuild after initial failed\n"); + goto out_unlock; + } + + for (i = 0; i < ipsec->max_tx_sa_cnt; i++) { + tsa = &ipsec->tx_sa_table[i]; + if (!tsa->used) + continue; + + ret = sxe2vf_ipsec_add_txsa_msg_send(adapter, tsa, true); + if (ret) { + sxe2vf_ipsec_state_delete(tsa->xs); + tsa->used = false; + ipsec->tx_sa_cnt--; + LOG_DEV_ERR("failed to request Tx SA[spi:0x%x] in reset, " + "please\t" + "del this sa, ret %d\n", + be32_to_cpu(tsa->spi), ret); + ret = -EIO; + } else { + LOG_DEBUG_BDF("add fw tx_sa[index:%d] ok.\n", i); + } + } + + for (i = 0; i < ipsec->max_rx_sa_cnt; i++) { + rsa = &ipsec->rx_sa_table[i]; + if (!rsa->used) + continue; + + ret = sxe2vf_ipsec_add_rxsa_msg_send(adapter, rsa, true); + if (ret) { + sxe2vf_ipsec_state_delete(rsa->xs); + rsa->used = false; + ipsec->rx_sa_cnt--; + LOG_DEV_ERR("failed to request Tx SA[spi:0x%x] in reset, " + "please\t" + "del this sa, ret %d\n", + be32_to_cpu(rsa->spi), ret); + ret = -EIO; + } else { + LOG_DEBUG_BDF("add fw rx_sa[index:%d] ok.\n", i); + } + } + + ipsec->status = SXE2VF_IPSEC_READY; + +out_unlock: + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + return 0; +} + +bool sxe2vf_is_ipsec_can_not_disable(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + return (!!ipsec->rx_sa_cnt) || (!!ipsec->tx_sa_cnt); +} + +bool sxe2vf_ipsec_conflict_features_check(struct net_device *netdev) +{ + netdev_features_t tso_features = + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6; + + netdev_features_t features = netdev->features; + + if (features & NETIF_F_LRO) + return true; + + if (features & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)) + return true; + + if (features & (NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM)) { + return true; + } + + if (features & (tso_features)) + return true; + + return false; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.h new file mode 100644 index 0000000000000000000000000000000000000000..666e48c625c939b97a50f95c04e523c8f89ab88c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_ipsec.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_ipsec.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2VF_IPSEC_H__ +#define __SXE2VF_IPSEC_H__ + +#include +#ifdef NEED_COMPAT_DIM +#include "sxe2_compat_dim.h" +#else +#include +#endif +#include +#include +#include + +#define SXE2VF_IPSEC_KEY_LENGTH (32) + +#define SXE2VF_IPSEC_AESKEY_LENTH (32) +#define SXE2VF_IPSEC_AESAUTH_LENTH (32) +#define SXE2VF_IPSEC_SMKEY_LENGTH (16) +#define SXE2VF_IPSEC_SMAUTH_LENTH (32) + +#define SXE2VF_IPV6_ADDR_LEN (4) + +#define SXE2VF_IPSEC_PAYLOAD_LIMIT (2048) + +struct sxe2vf_adapter; +union sxe2vf_rx_desc; +struct sxe2vf_tx_offload_info; + +enum sxe2vf_ipsec_status { + SXE2VF_IPSEC_UNINITIALIZED, + SXE2VF_IPSEC_READY, + SXE2VF_IPSEC_RESETTING, + SXE2VF_IPSEC_ERROR, +}; + +struct sxe2vf_rx_sa { + struct hlist_node hlist; + u16 index; + u16 hw_index; + bool used; + struct xfrm_state *xs; + __be32 spi; + u32 ipaddr[SXE2VF_IPV6_ADDR_LEN]; + u8 auth_key[SXE2VF_IPSEC_KEY_LENGTH]; + u8 enc_key[SXE2VF_IPSEC_KEY_LENGTH]; + bool ipv6; + bool is_auth; + bool engine; +}; + +struct sxe2vf_tx_sa { + u16 index; + u16 hw_index; + bool used; + struct xfrm_state *xs; + u8 auth_key[SXE2VF_IPSEC_KEY_LENGTH]; + u8 enc_key[SXE2VF_IPSEC_KEY_LENGTH]; + bool is_auth; + bool engine; + __be32 spi; +}; + +struct sxe2vf_ipsec_hw_stats { + u64 rx_decrypt_success; + u64 over_2k; + u64 spi_ip_not_matched; + u64 ip_key_invalid; + u64 not_aligned; + u64 icv_fail; + u64 bypass_disable; + u64 bypass_macsec; +}; + +struct sxe2vf_ipsec_sw_stats { + u64 tx_error_invalid_sp; + u64 tx_error_invalid_state; + u64 tx_offload_success; + + u64 rx_error_invalid_sp; + u64 rx_error_invalid_state; + u64 rx_error_invalid_ptype; + u64 rx_error_decrypt_fail; + u64 rx_offload_success; +}; + +struct sxe2vf_ipsec_context { + u32 max_tx_sa_cnt; + u32 max_rx_sa_cnt; + struct sxe2vf_tx_sa *tx_sa_table; + struct sxe2vf_rx_sa *rx_sa_table; + u32 rx_sa_cnt; + u32 tx_sa_cnt; + enum sxe2vf_ipsec_status status; + struct sxe2vf_ipsec_hw_stats hw_stats; + DECLARE_HASHTABLE(rx_sa_list, 12); + struct mutex context_lock; + spinlock_t rx_hlist_lock; +}; + +s32 sxe2vf_ipsec_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_ipsec_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_ipsec_tx(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first, + struct sxe2vf_tx_offload_info *offload); +void sxe2vf_ipsec_rx(struct sxe2vf_queue *rxq, union sxe2vf_rx_desc *desc, + struct sk_buff *skb, u16 ptype); + +s32 sxe2vf_ipsec_stop(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_ipsec_rebuild(struct sxe2vf_adapter *adapter); + +bool sxe2vf_is_ipsec_can_not_disable(struct sxe2vf_adapter *adapter); + +bool sxe2vf_ipsec_conflict_features_check(struct net_device *netdev); + +bool sxe2vf_is_ipsec_offload_enable(struct net_device *netdev); + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..4603f2d8dd047564fbd6e4d48070eb9016b97397 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.c @@ -0,0 +1,1387 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_irq.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include "sxe2_compat.h" +#include "sxe2vf.h" +#include "sxe2vf_irq.h" +#include "sxe2_log.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_mbx_channel.h" + +int dpdk_irq_cnt = SXE2VF_DPDK_MSIX_MIN_CNT; +module_param(dpdk_irq_cnt, int, 0644); +MODULE_PARM_DESC(dpdk_irq_cnt, "dpdk vf irq cnt"); + +#define sxe2vf_for_each_queue(post, head) \ + for (post = (head).next; post; post = post->next) + +#define SXE2VF_DIM_DFLT_PROFILE_IDX 1 + +static const u16 tx_itr_profile[] = { + 2, + 8, + 40, + 128, + 256 +}; + +static const u16 rx_itr_profile[] = { + 2, + 8, + 16, + 32, + 64 +}; + +irqreturn_t sxe2vf_event_irq_handler(int irq, void *data) +{ + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)data; + + LOG_INFO_BDF("dev_name:%s event irq:%d triggered.\n", adapter->dev_name, + irq); + + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MBX, 0); + + return IRQ_HANDLED; +} + +STATIC void sxe2vf_vsi_queues_free(struct sxe2vf_vsi *vsi) +{ + u16 i; + + if (vsi->rxqs.q) { + sxe2vf_for_each_vsi_rxq(vsi, i) + { + if (vsi->rxqs.q[i]) { + kfree_rcu(vsi->rxqs.q[i], rcu); + WRITE_ONCE(vsi->rxqs.q[i], NULL); + } + } + kfree(vsi->rxqs.q); + vsi->rxqs.q = NULL; + } + + if (vsi->txqs.q) { + sxe2vf_for_each_vsi_txq(vsi, i) + { + if (vsi->txqs.q[i]) { + kfree_rcu(vsi->txqs.q[i], rcu); + WRITE_ONCE(vsi->txqs.q[i], NULL); + } + } + kfree(vsi->txqs.q); + vsi->txqs.q = NULL; + } + + LOG_DEBUG("vsi:%pK tx/rx queues free.\n", vsi); +} + +void sxe2vf_vsi_queues_deinit(struct sxe2vf_vsi *vsi) +{ + sxe2vf_vsi_queues_free(vsi); +} + +STATIC s32 sxe2vf_vsi_queues_alloc(struct sxe2vf_vsi *vf_vsi) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = vf_vsi->adapter; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_queue *q; + u16 i; + + vf_vsi->txqs.q = kcalloc(vf_vsi->txqs.q_cnt, sizeof(*vf_vsi->txqs.q), + GFP_KERNEL); + if (!vf_vsi->txqs.q) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc txqs failed, count: %d, size: %zu.\n", + vf_vsi->txqs.q_cnt, sizeof(*vf_vsi->txqs.q)); + goto l_failed; + } + + vf_vsi->rxqs.q = kcalloc(vf_vsi->rxqs.q_cnt, sizeof(*vf_vsi->rxqs.q), + GFP_KERNEL); + if (!vf_vsi->rxqs.q) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc rxqs failed, count: %d, size: %zu.\n", + vf_vsi->rxqs.q_cnt, sizeof(*vf_vsi->rxqs.q)); + goto l_failed; + } + + sxe2vf_for_each_vsi_txq(vf_vsi, i) + { + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + ret = -ENOMEM; + LOG_DEV_ERR("txq size: %zu alloc failed.\n", sizeof(*q)); + goto l_failed; + } + q->vsi = vf_vsi; + q->idx_in_vsi = i; + q->dev = dev; + q->netdev = vf_vsi->netdev; + q->depth = vf_vsi->txqs.depth; + u64_stats_init(&q->syncp); + WRITE_ONCE(vf_vsi->txqs.q[i], q); + } + + sxe2vf_for_each_vsi_rxq(vf_vsi, i) + { + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) { + ret = -ENOMEM; + LOG_DEV_ERR("rxq size: %zu alloc failed.\n", sizeof(*q)); + goto l_failed; + } + q->vsi = vf_vsi; + q->idx_in_vsi = i; + q->dev = dev; + q->depth = vf_vsi->rxqs.depth; + q->netdev = vf_vsi->netdev; + u64_stats_init(&q->syncp); + WRITE_ONCE(vf_vsi->rxqs.q[i], q); + } + return ret; + +l_failed: + sxe2vf_vsi_queues_free(vf_vsi); + return ret; +} + +STATIC void sxe2vf_vsi_queues_num_set(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 num_queues; + + if (adapter->q_ctxt.q_cnt_req) + num_queues = adapter->q_ctxt.q_cnt_req; + else + num_queues = adapter->q_ctxt.eth_q_cnt; + + vsi->txqs.q_cnt = num_queues; + vsi->rxqs.q_cnt = num_queues; +} + +STATIC void sxe2vf_vsi_queues_cfg(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + + vsi->txqs.depth = (u16)(vsi->txqs.depth ?: SXE2VF_DFLT_NUM_TX_DESC); + vsi->rxqs.depth = (u16)(vsi->rxqs.depth ?: SXE2VF_DFLT_NUM_RX_DESC); + + LOG_INFO_BDF("vsi:%u queue_cnt:%u txq_depth:%u rxq_depth:%u.\n", vsi->vsi_id, + vsi->txqs.q_cnt, vsi->txqs.depth, vsi->rxqs.depth); +} + +s32 sxe2vf_vsi_queues_init(struct sxe2vf_vsi *vf_vsi) +{ + sxe2vf_vsi_queues_num_set(vf_vsi); + + sxe2vf_vsi_queues_cfg(vf_vsi); + + return sxe2vf_vsi_queues_alloc(vf_vsi); +} + +STATIC s32 sxe2vf_msix_entries_alloc(struct sxe2vf_adapter *adapter, u16 msix_cnt) +{ + s32 ret = 0; + u16 i; + + adapter->irq_ctxt.msix_entries = + kcalloc(msix_cnt, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->irq_ctxt.msix_entries) { + ret = -ENOMEM; + LOG_DEV_ERR("msi-x irq entry num:%u per size:%lu kcalloc failed, " + "ret=%d\n", + msix_cnt, sizeof(struct msix_entry), ret); + goto l_end; + } + + for (i = 0; i < msix_cnt; i++) + adapter->irq_ctxt.msix_entries[i].entry = i; + +l_end: + return ret; +} + +static void sxe2vf_msix_entries_free(struct sxe2vf_adapter *adapter) +{ + kfree(adapter->irq_ctxt.msix_entries); + adapter->irq_ctxt.msix_entries = NULL; +} + +STATIC s32 sxe2vf_msix_enable(struct sxe2vf_adapter *adapter, s32 min_msix, + s32 msix_cnt) +{ + s32 ret; + + ret = sxe2vf_msix_entries_alloc(adapter, (u16)msix_cnt); + if (ret) + return ret; + + ret = pci_enable_msix_range(adapter->pdev, adapter->irq_ctxt.msix_entries, + min_msix, msix_cnt); + if (ret < 0) { + LOG_ERROR_BDF("enable msix range[%d-%d] failed, ret=%d\n", min_msix, + msix_cnt, ret); + goto l_ena_failed; + } + LOG_INFO_BDF("enable msix range[%d-%d] suc, ret=%d\n", min_msix, msix_cnt, + ret); + return ret; + +l_ena_failed: + sxe2vf_msix_entries_free(adapter); + return ret; +} + +#define SXE2VF_RDMA_OTHER_IRQCNT 1 + +s32 sxe2vf_msix_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + s32 fact_cnt; + u16 eth_fact = 0; + u16 dpdk_fact = 0; + u16 rdma_fact = 0; + u16 left_cnt = (u16)(adapter->irq_ctxt.max_cnt - (u16)SXE2VF_EVENT_MSIX_CNT); + u16 eth_expect = 0; + u16 dpdk_expect = 0; + u16 rdma_expect = 0; + int mode = sxe2vf_com_mode_get(adapter); + u16 msix_min = sxe2vf_irq_cnt_min_get(adapter); + + if (mode == SXE2_COM_MODULE_KERNEL) { + left_cnt -= SXE2VF_RDMA_MSIX_MIN_CNT; + eth_expect = (u16)(min3((u16)SXE2_VF_ETH_Q_NUM, + (u16)(num_online_cpus()), left_cnt)); + rdma_expect = (u16)(adapter->irq_ctxt.max_cnt - + (u16)SXE2VF_EVENT_MSIX_CNT - eth_expect); + } else if (mode == SXE2_COM_MODULE_DPDK) { + dpdk_expect = (u16)(min((u16)(SXE2VF_DPDK_MSIX_MAX_CNT), left_cnt)); + } else { + left_cnt -= (SXE2VF_DPDK_MSIX_MIN_CNT + SXE2VF_RDMA_MSIX_MIN_CNT); + eth_expect = (u16)(min3((u16)SXE2_VF_ETH_Q_NUM, + (u16)(num_online_cpus()), left_cnt)); + left_cnt = (u16)(adapter->irq_ctxt.max_cnt - + (u16)SXE2VF_EVENT_MSIX_CNT - + (u16)SXE2VF_DPDK_MSIX_MIN_CNT - eth_expect); + rdma_expect = (u16)(min(((u16)num_online_cpus()), (left_cnt))); + dpdk_expect = (u16)(adapter->irq_ctxt.max_cnt - + (u16)SXE2VF_EVENT_MSIX_CNT - eth_expect - + rdma_expect); + } + + fact_cnt = sxe2vf_msix_enable(adapter, msix_min, adapter->irq_ctxt.max_cnt); + if (fact_cnt < 0) { + ret = -ENOSPC; + LOG_INFO_BDF("cpu:%u q_max_cnt:%u irq_caps:%d eth_expect:%u " + "dpdk_expect:%u\t" + "rdma_expect:%u fact_cnt:%u msix enable failed " + "ret:%d\n", + num_online_cpus(), adapter->q_ctxt.max_cnt, + adapter->irq_ctxt.max_cnt, eth_expect, dpdk_expect, + rdma_expect, fact_cnt, ret); + goto l_end; + } + + adapter->irq_ctxt.msix_cnt = (u16)fact_cnt; + + if (fact_cnt < adapter->irq_ctxt.max_cnt) { + fact_cnt -= SXE2VF_EVENT_MSIX_CNT; + if (mode == SXE2_COM_MODULE_KERNEL) { + left_cnt = (u16)(fact_cnt - (u16)SXE2VF_RDMA_MSIX_MIN_CNT); + eth_fact = (u16)min_t(int, left_cnt, eth_expect); + left_cnt = (u16)((u16)fact_cnt - eth_fact); + rdma_fact = (u16)(min(left_cnt, rdma_expect)); + } else if (mode == SXE2_COM_MODULE_DPDK) { + dpdk_fact = (u16)min_t(int, fact_cnt, dpdk_expect); + } else { + left_cnt = (u16)((u16)fact_cnt - + (u16)SXE2VF_RDMA_MSIX_MIN_CNT - + (u16)SXE2VF_DPDK_MSIX_MIN_CNT); + eth_fact = (u16)min_t(int, left_cnt, eth_expect); + left_cnt = (u16)(fact_cnt - (u16)SXE2VF_DPDK_MSIX_MIN_CNT - + eth_fact); + rdma_fact = (u16)(min(left_cnt, rdma_expect)); + left_cnt = (u16)((u16)fact_cnt - (u16)eth_fact - + (u16)rdma_fact); + dpdk_fact = (u16)(min(left_cnt, dpdk_expect)); + } + } else { + eth_fact = eth_expect; + rdma_fact = rdma_expect; + dpdk_fact = dpdk_expect; + } + adapter->aux_ctxt.num_msix = rdma_fact; + adapter->irq_ctxt.rdma_irq_cnt = (u16)adapter->aux_ctxt.num_msix; + adapter->irq_ctxt.dpdk_irq_cnt = dpdk_fact; + adapter->irq_ctxt.eth_irq_cnt = eth_fact; + + adapter->irq_ctxt.eth_offset = SXE2VF_EVENT_MSIX_CNT; + adapter->irq_ctxt.rdma_offset = + adapter->irq_ctxt.eth_irq_cnt + adapter->irq_ctxt.eth_offset; + adapter->irq_ctxt.dpdk_offset = adapter->irq_ctxt.rdma_irq_cnt + + adapter->irq_ctxt.rdma_offset; + + LOG_INFO_BDF("cpu:%u q_max_cnt:%u irq_caps:%d eth_expect:%u dpdk_expect:%u " + "rdma_expect:%u\t" + "msix_cnt:%u eth_fact:%u dpdk_fact:%u rdma_fact:%u\t" + "eth offset:%d dpdk offset:%d rdma offset:%d mode:%d ret:%d\n", + num_online_cpus(), adapter->q_ctxt.max_cnt, + adapter->irq_ctxt.max_cnt, eth_expect, dpdk_expect, rdma_expect, + fact_cnt, eth_fact, dpdk_fact, rdma_fact, + adapter->irq_ctxt.eth_offset, adapter->irq_ctxt.dpdk_offset, + adapter->irq_ctxt.rdma_offset, mode, ret); + +l_end: + return ret; +} + +s32 sxe2vf_queue_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + u16 eth_queue_cnt = 0; + u16 dpdk_q_cnt = 0; + int mode = sxe2vf_com_mode_get(adapter); + + if (mode == SXE2_COM_MODULE_KERNEL) { + eth_queue_cnt = (u16)min3(adapter->q_ctxt.max_cnt, + (u16)SXE2_VF_ETH_Q_NUM, + adapter->irq_ctxt.eth_irq_cnt); + } else if (mode == SXE2_COM_MODULE_DPDK) { + dpdk_q_cnt = (u16)min_t(u16, adapter->q_ctxt.max_cnt, + (u16)SXE2_VF_DPDK_Q_NUM); + } else { + eth_queue_cnt = (u16)min3((u16)(adapter->q_ctxt.max_cnt - + SXE2VF_DPDK_QUEUE_CNT_MIN), + (u16)SXE2_VF_ETH_Q_NUM, + adapter->irq_ctxt.eth_irq_cnt); + dpdk_q_cnt = (u16)min((u16)(adapter->q_ctxt.max_cnt - eth_queue_cnt), + (u16)SXE2_VF_DPDK_Q_NUM); + } + + adapter->q_ctxt.eth_q_cnt = eth_queue_cnt; + adapter->q_ctxt.eth_offset = 0; + adapter->q_ctxt.dpdk_q_cnt = dpdk_q_cnt; + adapter->q_ctxt.dpdk_offset = + adapter->q_ctxt.eth_q_cnt + adapter->q_ctxt.eth_offset; + + LOG_INFO_BDF("eth irq cnt:%u eth_q_cnt:%u offset:%d dpdk_q_cnt:%u offset:%d " + "mode:%d\n", + adapter->irq_ctxt.eth_irq_cnt, adapter->q_ctxt.eth_q_cnt, + adapter->q_ctxt.eth_offset, adapter->q_ctxt.dpdk_q_cnt, + adapter->q_ctxt.dpdk_offset, mode); + + return ret; +} + +void sxe2vf_queue_deinit(struct sxe2vf_adapter *adapter) +{ + adapter->q_ctxt.eth_q_cnt = 0; + adapter->q_ctxt.dpdk_q_cnt = 0; +} + +STATIC s32 sxe2vf_vsi_irq_data_alloc(struct sxe2vf_vsi *vsi, u16 idx) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_irq_data *irq_data; + + irq_data = kzalloc(sizeof(*irq_data), GFP_KERNEL); + if (!irq_data) { + LOG_DEV_ERR("irq_data alloc failed.\n"); + ret = -ENOMEM; + goto l_end; + } + + irq_data->vsi = vsi; + irq_data->irq_idx = idx; + + vsi->irqs.irq_data[idx] = irq_data; + +l_end: + return ret; +} + +STATIC void sxe2vf_vsi_irqs_data_free(struct sxe2vf_vsi *vsi) +{ + u16 i; + + if (vsi->irqs.irq_data) { + sxe2vf_for_each_vsi_irq(vsi, i) + { + kfree(vsi->irqs.irq_data[i]); + vsi->irqs.irq_data[i] = NULL; + } + kfree(vsi->irqs.irq_data); + vsi->irqs.irq_data = NULL; + } +} + +STATIC s32 sxe2vf_vsi_irqs_data_alloc(struct sxe2vf_vsi *vsi) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 i; + + vsi->irqs.irq_data = kcalloc(vsi->irqs.cnt, sizeof(*vsi->irqs.irq_data), + GFP_KERNEL); + if (!vsi->irqs.irq_data) { + ret = -ENOMEM; + LOG_DEV_ERR("alloc irq_data failed, count: %d, size: %zu.\n", + vsi->irqs.cnt, sizeof(*vsi->irqs.irq_data)); + goto l_end; + } + + sxe2vf_for_each_vsi_irq(vsi, i) + { + ret = sxe2vf_vsi_irq_data_alloc(vsi, i); + if (ret) + goto l_failed; + } + + return ret; + +l_failed: + sxe2vf_vsi_irqs_data_free(vsi); +l_end: + return ret; +} + +STATIC void sxe2vf_queue_add(struct sxe2vf_queue *queue, struct sxe2vf_list *head) +{ + queue->next = head->next; + head->next = queue; + head->cnt++; +} + +STATIC void sxe2vf_map_txq_to_irq(struct sxe2vf_vsi *vsi, u16 cnt, u16 q_idx, + u16 irq_idx) +{ + struct sxe2vf_queue *queue; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[irq_idx]; + + while (cnt) { + queue = vsi->txqs.q[q_idx]; + queue->irq_data = irq_data; + sxe2vf_queue_add(queue, &irq_data->tx.list); + irq_data->q_bitmap |= (u32)BIT(q_idx); + cnt--; + q_idx++; + } +} + +STATIC void sxe2vf_map_rxq_to_irq(struct sxe2vf_vsi *vsi, u16 cnt, u16 q_idx, + u16 irq_idx) +{ + struct sxe2vf_queue *queue; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[irq_idx]; + + while (cnt) { + queue = vsi->rxqs.q[q_idx]; + queue->irq_data = irq_data; + sxe2vf_queue_add(queue, &irq_data->rx.list); + cnt--; + q_idx++; + } +} + +STATIC void sxe2vf_vsi_queues_irqs_map(struct sxe2vf_vsi *vsi) +{ + u16 irq_cnt = vsi->irqs.cnt; + u16 txq_remain = vsi->txqs.q_cnt; + u16 rxq_remain = vsi->rxqs.q_cnt; + u16 i; + u16 txq_cnt, rxq_cnt, txq_idx = 0, rxq_idx = 0; + + sxe2vf_for_each_vsi_irq(vsi, i) + { + txq_cnt = (u16)DIV_ROUND_UP(txq_remain, irq_cnt - i); + rxq_cnt = (u16)DIV_ROUND_UP(rxq_remain, irq_cnt - i); + + sxe2vf_map_txq_to_irq(vsi, txq_cnt, txq_idx, i); + sxe2vf_map_rxq_to_irq(vsi, rxq_cnt, rxq_idx, i); + + txq_idx += txq_cnt; + rxq_idx += rxq_cnt; + txq_remain -= txq_cnt; + rxq_remain -= rxq_cnt; + } +} + +STATIC void sxe2vf_vsi_queues_irqs_unmap(struct sxe2vf_vsi *vsi) +{ + u16 i; + struct sxe2vf_queue *queue; + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_adapter *adapter = vsi->adapter; + + sxe2vf_for_each_vsi_irq(vsi, i) + { + if (!vsi->irqs.irq_data) + return; + + irq_data = vsi->irqs.irq_data[i]; + + sxe2vf_for_each_queue(queue, irq_data->tx.list) + { + queue->irq_data = NULL; + queue->next = NULL; + irq_data->tx.list.cnt--; + } + + sxe2vf_for_each_queue(queue, irq_data->rx.list) + { + queue->irq_data = NULL; + queue->next = NULL; + irq_data->rx.list.cnt--; + } + + LOG_WARN_BDF("irq_cnt:%u i:%u state:0x%lx napi del.\n", + vsi->irqs.cnt, i, irq_data->napi.state); + } +} + +static void sxe2vf_vsi_irq_itr_cfg(struct sxe2vf_vsi *vsi, u16 idx) +{ + u16 itr_gran; + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[idx]; + + irq_data->tx.itr_idx = SXE2VF_TX_ITR_IDX; + irq_data->tx.itr_setting = SXE2VF_TX_DFLT_ITR; + + irq_data->rx.itr_idx = SXE2VF_RX_ITR_IDX; + irq_data->rx.itr_setting = SXE2VF_RX_DFLT_ITR; + + irq_data->tx.itr_mode = SXE2VF_ITR_DYNAMIC; + irq_data->rx.itr_mode = SXE2VF_ITR_DYNAMIC; + + itr_gran = adapter->irq_ctxt.itr_gran; + + sxe2vf_hw_int_itr_set(hw, irq_data->tx.itr_idx, idx, + SXE2VF_VF_INT_ITR_INTERVAL_MAX & + (irq_data->tx.itr_setting / itr_gran)); + sxe2vf_hw_int_itr_set(hw, irq_data->rx.itr_idx, idx, + SXE2VF_VF_INT_ITR_INTERVAL_MAX & + (irq_data->rx.itr_setting / itr_gran)); +} + +static void sxe2vf_vsi_irq_itr_clear(struct sxe2vf_vsi *vsi, u16 idx) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[idx]; + + sxe2vf_hw_int_itr_set(hw, irq_data->tx.itr_idx, idx, 0); + sxe2vf_hw_int_itr_set(hw, irq_data->rx.itr_idx, idx, 0); +} + +s32 sxe2vf_vsi_irqs_decfg(struct sxe2vf_vsi *vsi) +{ + u16 i; + struct sxe2vf_irq_data *irq_data; + s32 ret; + struct sxe2vf_adapter *adapter = vsi->adapter; + + if (vsi->irqs.cnt == 0) { + LOG_ERROR_BDF("vsi:%u irq has been deinit.\n", vsi->vsi_id); + return 0; + } + + ret = sxe2vf_irq_map_clear(vsi); + if (ret) + LOG_ERROR_BDF("vsi:%u irq map clear failed.\n", vsi->vsi_id); + + sxe2vf_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + + sxe2vf_vsi_irq_itr_clear(vsi, i); + + netif_napi_del(&irq_data->napi); + } + + sxe2vf_vsi_queues_irqs_unmap(vsi); + + return ret; +} + +s32 sxe2vf_vsi_irqs_cfg(struct sxe2vf_vsi *vsi) +{ + u16 i; + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret = 0; + + sxe2vf_vsi_queues_irqs_map(vsi); + + sxe2vf_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + + sxe2vf_vsi_irq_itr_cfg(vsi, i); + + if (cpu_online(i)) + cpumask_set_cpu(i, &irq_data->affinity_mask); + + netif_napi_add(vsi->netdev, &irq_data->napi, sxe2vf_napi_poll, + NAPI_POLL_WEIGHT); + } + + ret = sxe2vf_irq_map_setup(vsi); + if (ret) { + (void)sxe2vf_vsi_irqs_decfg(vsi); + LOG_ERROR_BDF("vsi:%d irq map failed.\n", vsi->vsi_id); + } + + return ret; +} + +void sxe2vf_msix_deinit(struct sxe2vf_adapter *adapter) +{ + if (!adapter->irq_ctxt.msix_entries) + return; + + pci_disable_msix(adapter->pdev); + sxe2vf_msix_entries_free(adapter); + + adapter->irq_ctxt.eth_irq_cnt = 0; + adapter->aux_ctxt.num_msix = 0; + LOG_INFO_BDF("pci msix disabled msi_enable:%u.\n", + adapter->pdev->msix_enabled); +} + +void sxe2vf_event_irq_enable(struct sxe2vf_adapter *adapter) +{ + clear_bit(SXE2VF_FLAG_EVENT_IRQ_DISABLED, adapter->flags); + + sxe2vf_hw_event_irq_enable(&adapter->hw); + + LOG_INFO_BDF("mbx event irq enabled.\n"); +} + +s32 sxe2vf_event_irq_request(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct msix_entry *msix_entries = adapter->irq_ctxt.msix_entries; + + (void)snprintf(adapter->irq_ctxt.event_int_name, + sizeof(adapter->irq_ctxt.event_int_name) - 1, "%s-%s:event", + dev_driver_string(dev), dev_name(dev)); + + ret = request_irq(msix_entries[SXE2VF_EVENT_IRQ_IDX].vector, + sxe2vf_event_irq_handler, 0, + adapter->irq_ctxt.event_int_name, adapter); + if (ret) { + LOG_DEV_ERR("request_irq for %s failed, ret=%d\n", + adapter->irq_ctxt.event_int_name, ret); + memset(adapter->irq_ctxt.event_int_name, 0, + sizeof(adapter->irq_ctxt.event_int_name)); + goto l_end; + } + + LOG_INFO_BDF("mbx event irq:%s request irq.\n", + adapter->irq_ctxt.event_int_name); + +l_end: + return ret; +} + +void sxe2vf_event_irq_disable(struct sxe2vf_adapter *adapter) +{ + struct msix_entry *msix_entries = adapter->irq_ctxt.msix_entries; + + set_bit(SXE2VF_FLAG_EVENT_IRQ_DISABLED, adapter->flags); + + if (msix_entries) + synchronize_irq(msix_entries[SXE2VF_EVENT_IRQ_IDX].vector); + + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MBX); + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_NOTIFY_MSG); + + sxe2vf_notify_msg_list_clear(adapter); + + sxe2vf_hw_event_irq_disable(&adapter->hw); + + LOG_INFO_BDF("mbx event irq disabled.\n"); +} + +STATIC void sxe2vf_event_irq_free(struct sxe2vf_adapter *adapter) +{ + struct msix_entry *msix_entries = adapter->irq_ctxt.msix_entries; + + sxe2vf_event_irq_disable(adapter); + + if (strlen(adapter->irq_ctxt.event_int_name)) + free_irq(msix_entries[SXE2VF_EVENT_IRQ_IDX].vector, adapter); + + memset(adapter->irq_ctxt.event_int_name, 0, + sizeof(adapter->irq_ctxt.event_int_name)); + + LOG_INFO_BDF("event irq freed.\n"); +} + +s32 sxe2vf_irq_init(struct sxe2vf_adapter *adapter) +{ + s32 ret; + + ret = sxe2vf_msix_init(adapter); + if (ret) + goto l_end; + + ret = sxe2vf_event_irq_request(adapter); + if (ret) + goto l_event_irq_failed; + + sxe2vf_event_irq_enable(adapter); + + return 0; + +l_event_irq_failed: + sxe2vf_msix_deinit(adapter); +l_end: + return ret; +} + +s32 sxe2vf_main_vsi_create(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + mutex_lock(&adapter->vsi_ctxt.lock); + adapter->vsi_ctxt.vf_vsi = sxe2vf_vsi_create(adapter); + mutex_unlock(&adapter->vsi_ctxt.lock); + + if (!adapter->vsi_ctxt.vf_vsi) + ret = -ENOMEM; + + return ret; +} + +STATIC void sxe2vf_vsi_irqs_num_set(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 q_cnt = (u16)max(vsi->txqs.q_cnt, vsi->rxqs.q_cnt); + u16 q_irq_cnt = adapter->irq_ctxt.eth_irq_cnt; + + vsi->irqs.cnt = (u16)min(q_irq_cnt, q_cnt); + + LOG_INFO_BDF("vsi:%u txqs_cnt:%u rxqs_cnt:%u queue irq cnt:%u q_cnt:%u " + "q_irq_cnt:%u.\n", + vsi->vsi_id, vsi->txqs.q_cnt, vsi->rxqs.q_cnt, vsi->irqs.cnt, + q_cnt, q_irq_cnt); +} + +STATIC void sxe2vf_vsi_irqs_num_clear(struct sxe2vf_vsi *vsi) +{ + vsi->irqs.cnt = 0; +} + +s32 sxe2vf_vsi_irqs_init(struct sxe2vf_vsi *vsi) +{ + sxe2vf_vsi_irqs_num_set(vsi); + + return sxe2vf_vsi_irqs_data_alloc(vsi); +} + +void sxe2vf_vsi_irqs_deinit(struct sxe2vf_vsi *vsi) +{ + sxe2vf_vsi_irqs_data_free(vsi); + + sxe2vf_vsi_irqs_num_clear(vsi); +} + +void sxe2vf_irq_deinit(struct sxe2vf_adapter *adapter) +{ + sxe2vf_event_irq_free(adapter); + + sxe2vf_msix_deinit(adapter); +} + +STATIC void sxe2vf_vsi_irqs_coalesce_deinit(struct sxe2vf_vsi *vsi) +{ + kfree(vsi->irqs.coalesce); + vsi->irqs.coalesce = NULL; +} + +STATIC void sxe2vf_napi_irq_enable(struct sxe2vf_hw *hw, + struct sxe2vf_irq_data *irq_data) +{ + u32 value = 0; + + if (irq_data->multiple_polling) { + irq_data->multiple_polling = false; + value = SXE2VF_DYN_CTL_INTENABLE | + SXE2VF_DYN_CTL_CLEARPBA | + SXE2VF_DYN_CTL_SWINT_TRIG | + (SXE2VF_ITR_IDX_NONE + << SXE2VF_DYN_CTL_ITR_IDX_SHIFT); + + sxe2vf_hw_irq_dyn_ctl(hw, irq_data->irq_idx, value); + } else { + sxe2vf_hw_irq_enable(hw, irq_data->irq_idx); + } +} + +static void sxe2vf_net_dim(u16 event_ctr, u64 packets, u64 bytes, struct dim *dim) +{ + struct dim_sample dim_sample = {}; + + dim_update_sample(event_ctr, packets, bytes, &dim_sample); + dim_sample.comp_ctr = 0; + + if (ktime_ms_delta(dim_sample.time, dim->start_sample.time) >= 1000) + dim->state = DIM_START_MEASURE; + + net_dim(dim, dim_sample); +} + +STATIC void sxe2vf_dynamic_itr(struct sxe2vf_irq_data *irq_data) +{ + struct sxe2vf_q_container *tqc = &irq_data->tx; + struct sxe2vf_q_container *rqc = &irq_data->rx; + struct sxe2vf_queue *queue; + + if (SXE2VF_IS_ITR_DYNAMIC(tqc)) { + u64 packets = 0, bytes = 0; + + sxe2vf_for_each_queue(queue, irq_data->tx.list) + { + packets += queue->stats->packets; + bytes += queue->stats->bytes; + } + sxe2vf_net_dim(irq_data->event_ctr, packets, bytes, + &irq_data->tx.dim); + } + + if (SXE2VF_IS_ITR_DYNAMIC(rqc)) { + u64 packets = 0, bytes = 0; + + sxe2vf_for_each_queue(queue, irq_data->rx.list) + { + packets += queue->stats->packets; + bytes += queue->stats->bytes; + } + sxe2vf_net_dim(irq_data->event_ctr, packets, bytes, + &irq_data->rx.dim); + } +} + +int sxe2vf_napi_poll(struct napi_struct *napi, int weight) +{ + struct sxe2vf_irq_data *irq_data = + container_of(napi, struct sxe2vf_irq_data, napi); + struct sxe2vf_queue *txq; + struct sxe2vf_queue *rxq; + int total_cleaned = 0; + int budget_per_ring; + bool complete = true; + struct sxe2vf_adapter *adapter = irq_data->vsi->adapter; + s32 clean; + + sxe2vf_for_each_queue(txq, irq_data->tx.list) + { + bool wd; + + wd = sxe2vf_txq_irq_clean(txq, weight); + + if (!wd) + complete = false; + } + + if (unlikely(weight <= 0)) + return weight; + + if (unlikely(irq_data->rx.list.cnt > 1)) + budget_per_ring = max_t(int, ((u32)weight / irq_data->rx.list.cnt), + 1); + else + budget_per_ring = weight; + + sxe2vf_trace(irq_rxclean_begin, irq_data, total_cleaned); + sxe2vf_for_each_queue(rxq, irq_data->rx.list) + { + sxe2vf_trace(rxq_clean_begin, rxq); + clean = sxe2vf_rxq_irq_clean(rxq, budget_per_ring); + sxe2vf_trace(rxq_clean_end, rxq, clean); + total_cleaned += clean; + if (clean >= budget_per_ring) + complete = false; + } + sxe2vf_trace(irq_rxclean_end, irq_data, total_cleaned); + + if (!complete) { + irq_data->multiple_polling = true; + return weight; + } + + if (napi_complete_done(napi, total_cleaned)) { + sxe2vf_dynamic_itr(irq_data); + sxe2vf_napi_irq_enable(&adapter->hw, irq_data); + } + + return min_t(int, total_cleaned, (weight - 1)); +} + +STATIC void sxe2vf_vsi_irq_disable(struct sxe2vf_vsi *vsi, u16 idx) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[idx]; + + synchronize_irq(adapter->irq_ctxt.msix_entries[irq_data->irq_idx].vector); + + if (irq_data->rx.list.next || irq_data->tx.list.next) + napi_disable(&irq_data->napi); + + cancel_work_sync(&irq_data->tx.dim.work); + cancel_work_sync(&irq_data->rx.dim.work); + + sxe2vf_hw_irq_disable(hw, irq_data->irq_idx); +} + +STATIC void sxe2vf_vsi_irq_enable(struct sxe2vf_vsi *vsi, u16 idx) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[idx]; + + if (irq_data->rx.list.next || irq_data->tx.list.next) + napi_enable(&irq_data->napi); + + sxe2vf_irq_itr_init(irq_data); + + sxe2vf_hw_irq_enable(hw, irq_data->irq_idx); + + sxe2vf_hw_irq_trigger(hw, irq_data->irq_idx); +} + +STATIC inline void sxe2vf_itr_set(struct sxe2vf_irq_data *irq_data, + struct sxe2vf_q_container *qc, u16 itr) +{ + struct sxe2vf_hw *hw = &irq_data->vsi->adapter->hw; + struct sxe2vf_adapter *adapter = irq_data->vsi->adapter; + + sxe2vf_hw_int_itr_set(hw, qc->itr_idx, irq_data->irq_idx, + (itr / adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); +} + +STATIC void sxe2vf_dim_work_tx(struct work_struct *work) +{ + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_q_container *qc; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + irq_data = (struct sxe2vf_irq_data *)dim->priv; + qc = &irq_data->tx; + + WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_itr_profile)); + + itr = tx_itr_profile[dim->profile_ix]; + + sxe2vf_itr_set(irq_data, qc, itr); + + dim->state = DIM_START_MEASURE; +} + +STATIC void sxe2vf_dim_work_rx(struct work_struct *work) +{ + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_q_container *qc; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + irq_data = (struct sxe2vf_irq_data *)dim->priv; + qc = &irq_data->rx; + + WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_itr_profile)); + + itr = rx_itr_profile[dim->profile_ix]; + + sxe2vf_itr_set(irq_data, qc, itr); + + dim->state = DIM_START_MEASURE; +} + +STATIC void sxe2vf_dim_init(struct sxe2vf_irq_data *irq_data) +{ + struct sxe2vf_q_container *qc; + u16 itr; + + qc = &irq_data->tx; + INIT_WORK(&qc->dim.work, sxe2vf_dim_work_tx); + qc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qc->dim.profile_ix = SXE2VF_DIM_DFLT_PROFILE_IDX; + qc->dim.priv = irq_data; + + itr = (u16)(SXE2VF_IS_ITR_DYNAMIC(qc) ? tx_itr_profile[qc->dim.profile_ix] + : qc->itr_setting); + + sxe2vf_itr_set(irq_data, qc, itr); + + qc = &irq_data->rx; + INIT_WORK(&qc->dim.work, sxe2vf_dim_work_rx); + qc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qc->dim.profile_ix = SXE2VF_DIM_DFLT_PROFILE_IDX; + qc->dim.priv = irq_data; + + itr = (u16)(SXE2VF_IS_ITR_DYNAMIC(qc) ? rx_itr_profile[qc->dim.profile_ix] + : qc->itr_setting); + sxe2vf_itr_set(irq_data, qc, itr); +} + +void sxe2vf_irq_itr_init(struct sxe2vf_irq_data *irq_data) +{ + sxe2vf_dim_init(irq_data); +} + +void sxe2vf_queue_irq_disable(struct sxe2vf_adapter *adapter) +{ + u16 i; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + sxe2vf_for_each_vsi_irq(vsi, i) + { + sxe2vf_vsi_irq_disable(vsi, i); + } + + LOG_DEBUG_BDF("queue irq disabled.\n"); +} + +void sxe2vf_queue_irq_enable(struct sxe2vf_vsi *vsi) +{ + u16 i; + + sxe2vf_for_each_vsi_irq(vsi, i) + { + sxe2vf_vsi_irq_enable(vsi, i); + } +} + +STATIC void sxe2vf_vsi_get_q_idx(struct sxe2vf_vsi *vsi, u16 irq_idx, u16 *txq, + u16 *rxq) +{ + u16 txq_per_irq, txq_remainder, rxq_per_irq, rxq_remainder; + + txq_per_irq = vsi->txqs.q_cnt / vsi->irqs.cnt; + rxq_per_irq = vsi->rxqs.q_cnt / vsi->irqs.cnt; + txq_remainder = vsi->txqs.q_cnt % vsi->irqs.cnt; + rxq_remainder = vsi->rxqs.q_cnt % vsi->irqs.cnt; + + *txq = (u16)((txq_per_irq * irq_idx) + + (irq_idx < txq_remainder ? irq_idx : txq_remainder)); + *rxq = (u16)((rxq_per_irq * irq_idx) + + (irq_idx < rxq_remainder ? irq_idx : rxq_remainder)); +} + +STATIC void sxe2vf_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct sxe2vf_irq_data *irq_data = container_of( + notify, struct sxe2vf_irq_data, affinity_notify); + + cpumask_copy(&irq_data->affinity_mask, mask); +} + +STATIC void sxe2vf_irq_affinity_release(struct kref __always_unused *ref) +{ +} + +STATIC irqreturn_t sxe2vf_msix_ring_irq_handler(int __always_unused irq, void *data) +{ + struct sxe2vf_irq_data *irq_data = (struct sxe2vf_irq_data *)data; + + if (!SXE2VF_IRQ_HAS_TXQ(irq_data) && !SXE2VF_IRQ_HAS_RXQ(irq_data)) + goto l_end; + + irq_data->event_ctr++; + napi_schedule_irqoff(&irq_data->napi); +l_end: + return IRQ_HANDLED; +} + +STATIC s32 sxe2vf_vsi_irq_request(struct sxe2vf_vsi *vsi, s8 *base_name, u16 idx) +{ + s32 ret = 0; + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 rx_idx, tx_idx; + u32 irq_num; + + irq_data = vsi->irqs.irq_data[idx]; + irq_num = adapter->irq_ctxt.msix_entries[idx + SXE2VF_EVENT_MSIX_CNT].vector; + + sxe2vf_vsi_get_q_idx(vsi, idx, &tx_idx, &rx_idx); + + if (SXE2VF_IRQ_HAS_TXQ(irq_data) && SXE2VF_IRQ_HAS_RXQ(irq_data)) { + if (irq_data->rx.list.cnt == 1) + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "TxRx", rx_idx); + else + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d-%d", base_name, "TxRx", rx_idx, + rx_idx + irq_data->rx.list.cnt - 1); + } else if (SXE2VF_IRQ_HAS_TXQ(irq_data)) { + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "Tx", tx_idx); + } else if (SXE2VF_IRQ_HAS_RXQ(irq_data)) { + (void)snprintf(irq_data->name, sizeof(irq_data->name) - 1, + "%s-%s-%d", base_name, "Rx", rx_idx); + } else { + LOG_WARN_BDF("irq[%u] bind no queues.\n", idx); + goto l_end; + } + + ret = request_irq(irq_num, sxe2vf_msix_ring_irq_handler, 0, irq_data->name, + irq_data); + if (ret) { + memset(irq_data->name, 0, sizeof(irq_data->name)); + LOG_DEV_ERR("irq_idx:%u vector:%u MSI-X request_irq failed err:%d\n", + idx, irq_num, ret); + goto l_end; + } + + irq_data->affinity_notify.notify = sxe2vf_irq_affinity_notify; + irq_data->affinity_notify.release = sxe2vf_irq_affinity_release; + ret = irq_set_affinity_notifier(irq_num, &irq_data->affinity_notify); + if (ret) { + LOG_DEV_ERR("irq_idx:%u vector:%u MSI-X set affinity notifier NOK, " + "ret:%d\n", + idx, irq_num, ret); + } + + ret = irq_set_affinity_hint(irq_num, &irq_data->affinity_mask); + if (ret) { + LOG_DEV_ERR("irq_idx:%u vector:%u MSI-X set affinity hint NOK, " + "ret:%d\n", + idx, irq_num, ret); + } + + LOG_INFO_BDF("irq_cnt:%u idx:%u vector:%u request irq len:%lu.\n", + vsi->irqs.cnt, idx, irq_num, strlen(irq_data->name)); + +l_end: + return ret; +} + +s32 sxe2vf_vsi_irqs_request(struct sxe2vf_vsi *vsi) +{ + s32 ret; + s8 base_name[SXE2VF_IRQ_NAME_MAX_LEN]; + struct sxe2vf_adapter *adapter = vsi->adapter; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + u16 i; + unsigned int irq_num; + + (void)snprintf(base_name, sizeof(base_name) - 1, "%s-%s", + dev_driver_string(dev), vsi->netdev->name); + + sxe2vf_for_each_vsi_irq(vsi, i) + { + ret = sxe2vf_vsi_irq_request(vsi, base_name, i); + if (ret) + goto l_end; + } + + return 0; + +l_end: + while (i) { + i--; + irq_num = adapter->irq_ctxt.msix_entries[i + SXE2VF_EVENT_MSIX_CNT] + .vector; + (void)irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, vsi->irqs.irq_data[i]); + memset(vsi->irqs.irq_data[i]->name, 0, + sizeof(vsi->irqs.irq_data[i]->name)); + } + return ret; +} + +STATIC void sxe2vf_vsi_irq_free(struct sxe2vf_vsi *vsi, u16 idx) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_irq_data *irq_data = vsi->irqs.irq_data[idx]; + u32 irq_num = adapter->irq_ctxt.msix_entries[idx + SXE2VF_EVENT_MSIX_CNT] + .vector; + + if (!strlen(irq_data->name)) { + LOG_WARN_BDF("irq:%u not request.\n", irq_data->irq_idx); + return; + } + + (void)irq_set_affinity_notifier(irq_num, NULL); + + (void)irq_set_affinity_hint(irq_num, NULL); + synchronize_irq(irq_num); + free_irq(irq_num, irq_data); + + memset(irq_data->name, 0, sizeof(irq_data->name)); + + LOG_INFO_BDF("irq_cnt:%u irq_idx:%u vector:%u freed.\n", vsi->irqs.cnt, + idx + SXE2VF_EVENT_MSIX_CNT, irq_num); +} + +void sxe2vf_vsi_irqs_free(struct sxe2vf_vsi *vsi) +{ + u16 i; + + sxe2vf_for_each_vsi_irq(vsi, i) + { + sxe2vf_vsi_irq_free(vsi, i); + } +} + +s32 sxe2vf_irq_cfg(struct sxe2vf_vsi *vsi) +{ + s32 ret; + + ret = sxe2vf_vsi_irqs_request(vsi); + if (ret) + return ret; + + sxe2vf_queue_irq_enable(vsi); + + return ret; +} + +STATIC void sxe2vf_vsi_deinit(struct sxe2vf_adapter *adapter) +{ + kfree(adapter->vsi_ctxt.vf_vsi); + adapter->vsi_ctxt.vf_vsi = NULL; +} + +static s32 __sxe2vf_vsi_hw_cfg(struct sxe2vf_adapter *adapter, bool is_clear) +{ + struct sxe2_vf_vsi_cfg vsi_cfg = {}; + s32 ret; + struct sxe2vf_msg_params params = {0}; + + vsi_cfg.txq_base_idx = cpu_to_le16(adapter->q_ctxt.eth_offset); + vsi_cfg.txq_cnt = cpu_to_le16(adapter->q_ctxt.eth_q_cnt); + vsi_cfg.rxq_base_idx = cpu_to_le16(adapter->q_ctxt.eth_offset); + vsi_cfg.rxq_cnt = cpu_to_le16(adapter->q_ctxt.eth_q_cnt); + vsi_cfg.irq_base_idx = cpu_to_le16(adapter->irq_ctxt.eth_offset); + vsi_cfg.irq_cnt = cpu_to_le16(adapter->irq_ctxt.eth_irq_cnt); + vsi_cfg.is_clear = is_clear; + vsi_cfg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vsi_ids[SXE2VF_VSI_TYPE_ETH]); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_VSI_CFG, &vsi_cfg, sizeof(vsi_cfg), + NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + + LOG_INFO_BDF("vsi:%d cfg msg:0x%x ret:%d.\n", vsi_cfg.vsi_id, + SXE2_VF_VSI_CFG, ret); + + return ret; +} + +s32 sxe2vf_vsi_hw_cfg(struct sxe2vf_adapter *adapter) +{ + return __sxe2vf_vsi_hw_cfg(adapter, false); +} + +s32 sxe2vf_vsi_hw_decfg(struct sxe2vf_adapter *adapter) +{ + return __sxe2vf_vsi_hw_cfg(adapter, true); +} + +void sxe2vf_vsi_destroy(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + vsi = adapter->vsi_ctxt.vf_vsi; + if (vsi) { + (void)sxe2vf_vsi_close(vsi); + (void)sxe2vf_vsi_irqs_decfg(vsi); + (void)sxe2vf_vsi_hw_decfg(adapter); + sxe2vf_vsi_irqs_deinit(vsi); + sxe2vf_vsi_qs_stats_deinit(vsi); + sxe2vf_vsi_queues_deinit(vsi); + sxe2vf_vsi_irqs_coalesce_deinit(vsi); + } + sxe2vf_vsi_deinit(adapter); + mutex_unlock(&adapter->vsi_ctxt.lock); + + LOG_DEBUG_BDF("vsi destroyed.\n"); +} + +STATIC struct sxe2vf_vsi *sxe2vf_vsi_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi = NULL; + + vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); + if (!vsi) { + LOG_DEV_ERR("alloc vsi struct failed.\n"); + goto l_end; + } + + vsi->adapter = adapter; + vsi->netdev = adapter->netdev; + vsi->vsi_id = adapter->vsi_ctxt.vsi_ids[SXE2VF_VSI_TYPE_ETH]; + + set_bit(SXE2VF_VSI_CLOSE, vsi->state); + adapter->vsi_ctxt.vf_vsi = vsi; + +l_end: + return vsi; +} + +struct sxe2vf_vsi *sxe2vf_vsi_create(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi; + + vsi = sxe2vf_vsi_init(adapter); + if (!vsi) + goto l_end; + + if (sxe2vf_vsi_queues_init(vsi) != 0) + goto l_queue_init_failed; + + if (sxe2vf_vsi_qs_stats_init(vsi)) + goto l_qs_stats_init_failed; + + if (sxe2vf_vsi_irqs_init(vsi) != 0) + goto l_irq_init_failed; + + if (sxe2vf_vsi_hw_cfg(adapter) != 0) + goto l_vsi_cfg_failed; + + if (sxe2vf_vsi_irqs_cfg(vsi)) { + LOG_ERROR_BDF("vsi:%u irq cfg failed.\n", vsi->vsi_id); + goto l_irqs_cfg_failed; + } + + return vsi; + +l_irqs_cfg_failed: + (void)sxe2vf_vsi_hw_decfg(adapter); +l_vsi_cfg_failed: + sxe2vf_vsi_irqs_deinit(vsi); +l_irq_init_failed: + sxe2vf_vsi_qs_stats_deinit(vsi); +l_qs_stats_init_failed: + sxe2vf_vsi_queues_deinit(vsi); +l_queue_init_failed: + sxe2vf_vsi_deinit(adapter); +l_end: + return NULL; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..82c53d17ee31e7b79856bf1a94ea710e7690ca23 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_irq.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_irq.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_IRQ_H__ +#define __SXE2VF_IRQ_H__ + +#include +#ifdef NEED_COMPAT_DIM +#include "sxe2_compat_dim.h" +#else +#include +#endif +#include +#include +#include + +#include "sxe2vf_regs.h" + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +struct sxe2vf_queue; +struct sxe2vf_adapter; + +#define SXE2VF_EVENT_IRQ_IDX 0 +#define SXE2VF_IRQ_NAME_MAX_LEN (IFNAMSIZ + 16) +#define SXE2VF_LAN_MSIX_MIN_CNT 1 +#define SXE2VF_EVENT_MSIX_CNT 1 +#define SXE2VF_DPDK_MSIX_MIN_CNT 1 +#define SXE2VF_RDMA_MSIX_MIN_CNT 1 +#define SXE2VF_DPDK_MSIX_MAX_CNT 16 + +#define SXE2VF_IRQ_MAX_CNT (64 + SXE2VF_EVENT_MSIX_CNT) +#define SXE2VF_DFLT_NUM_RX_DESC 512 +#define SXE2VF_DFLT_NUM_TX_DESC 512 +#define SXE2VF_MAX_NUM_DESC 8160 +#define SXE2VF_MIN_NUM_DESC \ + 64 +#define SXE2VF_DESC_ALIGN_32 32 +#define SXE2VF_MIN_LRO_ITR 2 +#define SXE2VF_VF_INT_ITR_INTERVAL_MAX 0xFFF + +enum sxe2vf_itr_mode { + SXE2VF_ITR_STATIC = 0, + SXE2VF_ITR_DYNAMIC = 1, +}; + +#define SXE2VF_ITR_20K 50 +#define SXE2VF_TX_ITR_IDX SXE2VF_ITR_IDX_1 +#define SXE2VF_RX_ITR_IDX SXE2VF_ITR_IDX_0 +#define SXE2VF_TX_DFLT_ITR SXE2VF_ITR_20K +#define SXE2VF_RX_DFLT_ITR SXE2VF_ITR_20K + +#define sxe2vf_for_txq_range(i, start, end) \ + for ((i) = start; (i) < (end); \ + (i)++) + +#define sxe2vf_for_rxq_range(i, start, end) \ + for ((i) = start; (i) < (end); \ + (i)++) + +#define SXE2VF_IRQ_HAS_TXQ(irq_data) \ + ((irq_data)->tx.list.next) +#define SXE2VF_IRQ_HAS_RXQ(irq_data) \ + ((irq_data)->rx.list.next) +#define SXE2VF_IS_ITR_DYNAMIC(qc) \ + ((qc)->itr_mode == SXE2VF_ITR_DYNAMIC) + +struct sxe2vf_list { + struct sxe2vf_queue *next; + u16 cnt; +}; + +struct sxe2vf_q_container { + struct sxe2vf_list list; + struct dim dim; + u16 itr_idx; + u16 itr_setting; + u16 itr_mode; +}; + +struct sxe2vf_irq_data { + u16 irq_idx; + u8 rate_limit; + u8 q_cnt; + u32 q_bitmap; + u8 multiple_polling : 1; + u8 pad : 7; + u8 reserve[3]; + u16 event_ctr; + struct sxe2vf_vsi *vsi; + struct napi_struct napi; + struct sxe2vf_q_container tx; + struct sxe2vf_q_container rx; + s8 name[SXE2VF_IRQ_NAME_MAX_LEN]; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; +}; + +struct sxe2vf_vsi_coalesce { + u8 tx_valid; + u8 rx_valid; + u8 rate_limit; + u16 tx_itr; + u16 rx_itr; + u16 tx_itr_mode; + u16 rx_itr_mode; +}; + +struct sxe2vf_vsi_irqs { + u16 cnt; + struct sxe2vf_irq_data **irq_data; + struct sxe2vf_vsi_coalesce + *coalesce; +}; + +struct sxe2vf_irq_context { + u16 max_cnt; + struct msix_entry *msix_entries; + s8 event_int_name[SXE2VF_IRQ_NAME_MAX_LEN]; + u16 eth_irq_cnt; + u16 eth_offset; + u16 dpdk_irq_cnt; + u16 dpdk_offset; + u16 rdma_irq_cnt; + u16 rdma_offset; + u16 msix_cnt; + u16 itr_gran; +}; + +irqreturn_t sxe2vf_event_irq_handler(int irq, void *data); + +void sxe2vf_queue_irq_deinit(struct sxe2vf_adapter *adapter); + +int sxe2vf_napi_poll(struct napi_struct *napi, int weight); + +s32 sxe2vf_irq_cfg(struct sxe2vf_vsi *vsi); + +void sxe2vf_queue_irq_disable(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vsi_irqs_configure(struct sxe2vf_vsi *vsi); + +void sxe2vf_vsi_destroy(struct sxe2vf_adapter *adapter); + +struct sxe2vf_vsi *sxe2vf_vsi_create(struct sxe2vf_adapter *adapter); + +void sxe2vf_vsi_irqs_free(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_main_vsi_create(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_irq_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_irq_deinit(struct sxe2vf_adapter *adapter); + +void sxe2vf_queue_irq_enable(struct sxe2vf_vsi *vsi); + +void sxe2vf_event_irq_disable(struct sxe2vf_adapter *adapter); + +void sxe2vf_event_irq_enable(struct sxe2vf_adapter *adapter); + +void sxe2vf_vsi_queues_deinit(struct sxe2vf_vsi *vsi); + +void sxe2vf_vsi_irqs_deinit(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_queues_init(struct sxe2vf_vsi *vf_vsi); + +s32 sxe2vf_vsi_irqs_init(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_msix_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_msix_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_event_irq_request(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vsi_irqs_request(struct sxe2vf_vsi *vsi); + +void sxe2vf_irq_itr_init(struct sxe2vf_irq_data *irq_data); + +s32 sxe2vf_queue_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_queue_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vsi_irqs_decfg(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_irqs_cfg(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_hw_cfg(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vsi_hw_decfg(struct sxe2vf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.c new file mode 100644 index 0000000000000000000000000000000000000000..0b5242f52d9afde804e784b7853ed2e04a61ce55 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.c @@ -0,0 +1,1682 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_l2_filter.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2vf.h" +#include "sxe2vf_rx.h" +#include "sxe2_log.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_netdev.h" + +static inline void +sxe2vf_switch_mac_node_del_and_free(struct sxe2vf_addr_node *mac_node) +{ + if (mac_node) { + list_del(&mac_node->list); + kfree(mac_node); + } +} + +struct sxe2vf_addr_node *sxe2vf_addr_find(struct sxe2vf_adapter *adapter, + const u8 *macaddr) +{ + struct sxe2vf_addr_node *f; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + + list_for_each_entry(f, &filter->mac_addr_list, list) + { + if (ether_addr_equal(macaddr, f->mac.macaddr)) + return f; + } + return NULL; +} + +static struct sxe2vf_addr_node *sxe2vf_addr_node_add(struct sxe2vf_adapter *adapter, + const u8 *macaddr) +{ + struct sxe2vf_addr_node *f = NULL; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + + f = sxe2vf_addr_find(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) { + LOG_ERROR_BDF("create list node for macaddr:%pM failed.\n", + macaddr); + return f; + } + + list_add_tail(&f->list, &filter->mac_addr_list); + + ether_addr_copy(f->mac.macaddr, macaddr); + } + + f->mac.attr.is_vf_mac = (u8)ether_addr_equal(macaddr, filter->cur_mac_addr); + + LOG_INFO_BDF("mac list node addr:%pM attr:0x%x.\n", f->mac.macaddr, + *(u8 *)&f->mac.attr); + + return f; +} + +s32 sxe2vf_addr_node_del(struct sxe2vf_adapter *adapter, const u8 *macaddr) +{ + s32 ret = 0; + struct sxe2vf_addr_node *f; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + + f = sxe2vf_addr_find(adapter, macaddr); + if (!f) { + LOG_ERROR_BDF("mac addr:%pM not exist\n", macaddr); + ret = -EINVAL; + goto l_out; + } + + f->mac.attr.is_vf_mac = (u8)ether_addr_equal(macaddr, filter->cur_mac_addr); + sxe2vf_switch_mac_node_del_and_free(f); + +l_out: + return ret; +} + +static void sxe2vf_addr_list_clear(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2vf_addr_node *f; + struct sxe2vf_addr_node *ftmp; + + list_for_each_entry_safe(f, ftmp, &filter->mac_addr_list, list) + { + sxe2vf_switch_mac_node_del_and_free(f); + f = NULL; + } +} + +static void sxe2vf_vlan_list_clear(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2vf_vlan_node *vlan_node; + struct sxe2vf_vlan_node *vlan_node_tmp; + + list_for_each_entry_safe(vlan_node, vlan_node_tmp, &vlan_info->vlan_list, + list) + { + list_del(&vlan_node->list); + kfree(vlan_node); + vlan_node = NULL; + } +} + +STATIC struct sxe2vf_addr_node * +sxe2vf_user_addr_find_unlock(struct sxe2vf_adapter *adapter, const u8 *macaddr) +{ + struct sxe2vf_addr_node *f; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + + list_for_each_entry(f, &filter->mac_addr_list, list) + { + if (ether_addr_equal(macaddr, f->mac.macaddr)) + return f; + } + return NULL; +} + +int sxe2vf_mac_addr_add(struct sxe2vf_adapter *adapter, const u8 *addr, + enum sxe2vf_mac_owner owner) +{ + s32 ret = 0; + struct sxe2vf_addr_node *f; + u16 vsi_id; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + lockdep_assert_held(&switch_ctxt->mac_addr_lock); + + f = sxe2vf_addr_node_add(adapter, addr); + if (!f) { + LOG_ERROR_BDF("add user mac addr:%pM node failed.\n", addr); + return -ENOMEM; + } + + if (is_multicast_ether_addr(addr) || + !sxe2vf_user_addr_find_unlock(adapter, addr)) { + vsi_id = adapter->vsi_ctxt.vf_vsi->vsi_id; + if (f->usage == 0) { + ret = sxe2vf_mac_msg_send(adapter, &f->mac, true, false, + vsi_id); + if (ret) { + LOG_ERROR_BDF("add mac %pM failed %d\n", addr, ret); + (void)sxe2vf_addr_node_del(adapter, addr); + return ret; + } + } else if (owner == SXE2VF_MAC_OWNER_NETDEV) { + (void)sxe2vf_mac_msg_send(adapter, &f->mac, true, false, + vsi_id); + } + } + + set_bit((int)owner, &f->usage); + + LOG_INFO_BDF("add mac %pM done\n", addr); + + return ret; +} + +int sxe2vf_mac_addr_del(struct sxe2vf_adapter *adapter, const u8 *addr, + enum sxe2vf_mac_owner owner) +{ + s32 ret = 0; + struct sxe2vf_addr_node *f; + u16 vsi_id; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + lockdep_assert_held(&switch_ctxt->mac_addr_lock); + + f = sxe2vf_addr_find(adapter, addr); + if (!f) { + LOG_ERROR_BDF("mac addr:%pM not exist\n", addr); + return -ENOENT; + } + + clear_bit((int)owner, &f->usage); + + if (is_multicast_ether_addr(addr) || + !sxe2vf_user_addr_find_unlock(adapter, addr)) { + if (f->usage == 0) { + vsi_id = adapter->vsi_ctxt.vf_vsi->vsi_id; + ret = sxe2vf_mac_msg_send(adapter, &f->mac, false, false, + vsi_id); + if (ret) { + LOG_ERROR_BDF("del mac %pM failed err:%d\n", addr, + ret); + set_bit((int)owner, &f->usage); + goto l_out; + } else { + (void)sxe2vf_addr_node_del(adapter, addr); + } + LOG_INFO_BDF("del mac %pM done\n", addr); + } else { + LOG_INFO_BDF("Do not need to del mac %pM, because it is in " + "using\n", + addr); + } + } else { + (void)sxe2vf_addr_node_del(adapter, addr); + } + +l_out: + return ret; +} + +#ifdef SXE2VF_MAC_VLAN_CLEAR + +static int sxe2vf_mac_addr_clear(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + mutex_lock(&switch_ctxt->mac_addr_lock); + ret = sxe2vf_mac_clear_msg_send(adapter); + if (ret) + LOG_INFO_BDF("clear mac list failed\n"); + + sxe2vf_addr_list_clear(adapter); + mutex_unlock(&switch_ctxt->mac_addr_lock); + + LOG_INFO_BDF("mac list clear.\n"); + + return ret; +} + +static int sxe2vf_vlan_clear(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + s32 ret = 0; + + mutex_lock(&vlan_info->vlan_lock); + ret = sxe2vf_vlan_clear_msg_send(adapter); + if (ret) + LOG_INFO_BDF("clear vlan list failed\n"); + + sxe2vf_vlan_list_clear(adapter); + mutex_unlock(&vlan_info->vlan_lock); + + LOG_INFO_BDF("vlan list clear.\n"); + + return ret; +} +#endif +s32 sxe2vf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_sync_addr_node *f; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + s32 ret = 0; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) { + LOG_ERROR_BDF("create sync list node for macaddr:%pM failed.\n", + addr); + ret = -ENOMEM; + goto l_out; + } + + list_add_tail(&f->list, &filter->tmp_sync_list); + + ether_addr_copy(f->macaddr, addr); + + LOG_INFO_BDF("mac list node sync addr:%pM.\n", f->macaddr); + +l_out: + return ret; +} + +s32 sxe2vf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_sync_addr_node *f; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + s32 ret = 0; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) { + LOG_ERROR_BDF("create unsync list node for macaddr:%pM failed.\n", + addr); + ret = -ENOMEM; + goto l_out; + } + + list_add_tail(&f->list, &filter->tmp_unsync_list); + + ether_addr_copy(f->macaddr, addr); + + LOG_INFO_BDF("mac list node unsync addr:%pM.\n", f->macaddr); + +l_out: + return ret; +} + +int sxe2vf_set_mac_address(struct net_device *netdev, void *p) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2vf_addr_node *f; + struct sockaddr *addr = p; + u8 old[ETH_ALEN] = {0}; + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + if (!is_valid_ether_addr(addr->sa_data)) { + LOG_ERROR_BDF("invalid user mac addr:%pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + LOG_ERROR_BDF("user mac addr:%pM equal cur mac addr, skip set\n", + addr->sa_data); + return ret; + } + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, adapter->vsi_ctxt.vf_vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return -EBUSY; + } + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + f = sxe2vf_addr_find(adapter, filter->cur_mac_addr); + if (f) { + ether_addr_copy(old, f->mac.macaddr); + + ret = sxe2vf_mac_addr_del(adapter, old, SXE2VF_MAC_OWNER_NETDEV); + if (ret) + LOG_ERROR_BDF("del mac addr:%pM failed.\n", old); + } + + ether_addr_copy(filter->cur_mac_addr, addr->sa_data); + + ret = sxe2vf_mac_addr_add(adapter, addr->sa_data, SXE2VF_MAC_OWNER_NETDEV); + if (ret) { + LOG_ERROR_BDF("add mac addr:%pM failed.\n", addr->sa_data); + goto l_rollback; + } else { + if (!ether_addr_equal(netdev->dev_addr, filter->cur_mac_addr)) { + eth_hw_addr_set(netdev, filter->cur_mac_addr); + LOG_INFO_BDF("vf mac change to %pM.\n", + filter->cur_mac_addr); + } + } + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + + LOG_INFO_BDF("set mac %pM ret:%d.\n", addr->sa_data, ret); + + return ret; + +l_rollback: + + if (sxe2vf_mac_addr_add(adapter, old, SXE2VF_MAC_OWNER_NETDEV)) + LOG_ERROR_BDF("rollback add old mac addr:%pM failed.\n", old); + + f = sxe2vf_addr_find(adapter, old); + if (f) + f->mac.attr.is_vf_mac = 1; + + f = sxe2vf_addr_find(adapter, addr->sa_data); + if (f) + f->mac.attr.is_vf_mac = 0; + + ether_addr_copy(filter->cur_mac_addr, old); + + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + + return ret; +} + +bool sxe2vf_promisc_mode_changed(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + (void)netdev; + return (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags ^ netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +void sxe2vf_set_rx_mode(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + set_bit(SXE2VF_FLAG_FLTR_SYNC, adapter->flags); + + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); +} + +#define SET_ENABLE_FLAG_BY_VLAN_TAG(member, NET_FLAG) \ + do { \ + LOG_INFO_BDF("feature request %llx dev %llx " #NET_FLAG "\n", \ + request &NET_FLAG, *dev_features &NET_FLAG); \ + if ((request & NET_FLAG) ^ (*dev_features & NET_FLAG)) { \ + member = false; \ + if (request & NET_FLAG) \ + member = true; \ + LOG_INFO_BDF("dev_request %llx\n", *dev_features); \ + } \ + } while (0) +s32 sxe2vf_vlan_filter_cfg(struct net_device *netdev, netdev_features_t request) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vlan_filter *filter_offload = + &adapter->switch_ctxt.filter_ctxt.vlan_info.filter_offload; + netdev_features_t *dev_features = + &adapter->switch_ctxt.filter_ctxt.vlan_info.dev_features; + if (adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist) + request &= ~SXE2VF_VLAN_FILTER_FEATURES; + + (void)memset(filter_offload, SXE2_VF_VLAN_STATUS_INVALID, + sizeof(*filter_offload)); + + SET_ENABLE_FLAG_BY_VLAN_TAG(filter_offload->ctag_filter_enable, + NETIF_F_HW_VLAN_CTAG_FILTER); + SET_ENABLE_FLAG_BY_VLAN_TAG(filter_offload->stag_filter_enable, + NETIF_F_HW_VLAN_STAG_FILTER); + + if (filter_offload->ctag_filter_enable != SXE2_VF_VLAN_STATUS_INVALID) { + ret = sxe2vf_vlan_filter_msg_send(adapter, false); + if (!ret) { + *dev_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER); + *dev_features |= (request & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER)); + } + } + LOG_INFO_BDF("ctag_filter_enable:%u stag_filter_enable:%u.\n", + filter_offload->ctag_filter_enable, + filter_offload->stag_filter_enable); + return ret; +} + +#ifdef SXE2VF_MAC_VLAN_CLEAR +void sxe2vf_l2_filter_clear(struct sxe2vf_adapter *adapter) +{ + sxe2vf_mac_addr_clear(adapter); + + sxe2vf_vlan_clear(adapter); +} +#endif + +void sxe2vf_filter_list_destroy(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + sxe2vf_addr_list_clear(adapter); + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + (void)mutex_lock(&vlan_info->vlan_lock); + sxe2vf_vlan_list_clear(adapter); + (void)mutex_unlock(&vlan_info->vlan_lock); +} + +void sxe2vf_vlan_feature_update(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + u8 port_vlan_exist = vlan_info->port_vlan_exist; + u8 is_switchdev = vlan_info->is_switchdev; + + if (port_vlan_exist) { + netdev->features &= + ~(SXE2VF_VLAN_FILTER_FEATURES | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); + netdev->hw_features &= + ~(SXE2VF_VLAN_FILTER_FEATURES | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); + } + + if (is_switchdev) { + netdev->features &= + ~(SXE2VF_VLAN_FILTER_FEATURES | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX); + netdev->hw_features &= + ~(SXE2VF_VLAN_FILTER_FEATURES | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX); + } + + LOG_INFO_BDF("feature:0x%llx hw_feature:0x%llx.\n", netdev->features, + netdev->hw_features); +} + +s32 sxe2vf_vlan_offload_cfg(struct net_device *netdev, netdev_features_t request) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2vf_vlan_offload *vlan_offload = &vlan_info->vlan_offload; + netdev_features_t *dev_features = &vlan_info->dev_features; + if (vlan_info->port_vlan_exist) { + LOG_INFO_BDF("port vlan exist, disable stag offload.\n"); + request &= ~(NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); + } + + (void)memset(vlan_offload, SXE2_VF_VLAN_STATUS_INVALID, + sizeof(*vlan_offload)); + + SET_ENABLE_FLAG_BY_VLAN_TAG(vlan_offload->stag_strip_enable, + NETIF_F_HW_VLAN_STAG_RX); + SET_ENABLE_FLAG_BY_VLAN_TAG(vlan_offload->stag_insert_enable, + NETIF_F_HW_VLAN_STAG_TX); + SET_ENABLE_FLAG_BY_VLAN_TAG(vlan_offload->ctag_strip_enable, + NETIF_F_HW_VLAN_CTAG_RX); + SET_ENABLE_FLAG_BY_VLAN_TAG(vlan_offload->ctag_insert_enable, + NETIF_F_HW_VLAN_CTAG_TX); + + ret = sxe2vf_vlan_offload_msg_send(adapter); + if (!ret) { + *dev_features &= + ~(NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX); + *dev_features |= (request & (NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX)); + } + + LOG_INFO_BDF("ctag_strip:%u stag_strip:%u\t" + " ctag_insert:%u stag_insert:%u.\n", + vlan_offload->ctag_strip_enable, + vlan_offload->stag_strip_enable, + vlan_offload->ctag_insert_enable, + vlan_offload->stag_insert_enable); + return ret; +} + +s32 sxe2vf_vlan_cfg(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + netdev_features_t req; + + sxe2vf_vlan_feature_update(adapter); + req = adapter->netdev->features; + (void)req; + ret = sxe2vf_vlan_filter_cfg(adapter->netdev, req); + if (ret) + return ret; + + ret = sxe2vf_vlan_offload_cfg(adapter->netdev, req); + return ret; +} + +s32 sxe2vf_vlan_cfg_rebuild(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + if (!adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist && + !adapter->switch_ctxt.filter_ctxt.vlan_info.is_switchdev) { + adapter->netdev->hw_features |= + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + adapter->netdev->features |= SXE2VF_VLAN_FILTER_FEATURES; + } else { + adapter->netdev->features &= ~SXE2VF_VLAN_FILTER_FEATURES; + } + + ret = sxe2vf_vlan_filter_cfg(adapter->netdev, adapter->netdev->features); + if (ret) + return ret; + + ret = sxe2vf_vlan_offload_cfg(adapter->netdev, adapter->netdev->features); + return ret; +} + +static struct sxe2vf_vlan_node *sxe2vf_vlan_find(struct sxe2vf_adapter *adapter, + struct sxe2vf_vlan vlan) +{ + struct sxe2vf_vlan_node *f; + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + + list_for_each_entry(f, &vlan_info->vlan_list, list) + { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +bool sxe2vf_vlan_cnt_is_valid(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + + return vlan_info->cnt < vlan_info->max_cnt; +} + +STATIC int sxe2vf_vlan_process(struct sxe2vf_adapter *adapter, + struct sxe2vf_vlan vlan, bool add) +{ + int ret = 0; + struct sxe2vf_vlan_node *f = NULL; + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + + (void)mutex_lock(&vlan_info->vlan_lock); + + if (add) { + f = sxe2vf_vlan_find(adapter, vlan); + if (f) + goto l_unlock; + + if (!sxe2vf_vlan_cnt_is_valid(adapter)) { + LOG_DEV_ERR("vlan cnt:%u exceed max support cnt:%u, try to " + "delete\t" + "or disable exists vlans.\n", + vlan_info->cnt, vlan_info->max_cnt); + ret = -EPERM; + goto l_unlock; + } + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) { + LOG_ERROR_BDF("vlan tpid:%u vid:%u prio:%u alloc failed.\n", + vlan.tpid, vlan.vid, vlan.prio); + ret = -ENOMEM; + goto l_unlock; + } + + ret = sxe2vf_vlan_msg_send(adapter, &vlan, true); + if (ret == -EEXIST) { + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("add vlan mbx msg send failed ret %d.\n", ret); + kfree(f); + goto l_unlock; + } + + f->vlan = vlan; + list_add_tail(&f->list, &vlan_info->vlan_list); + vlan_info->cnt++; + } else { + f = sxe2vf_vlan_find(adapter, vlan); + if (!f) + goto l_unlock; + + ret = sxe2vf_vlan_msg_send(adapter, &vlan, false); + if (ret == -ENOENT) { + ret = 0; + } else if (ret) { + LOG_ERROR_BDF("add vlan mbx msg send failed ret %d.\n", ret); + goto l_unlock; + } + + list_del(&f->list); + kfree(f); + vlan_info->cnt--; + } + +l_unlock: + (void)mutex_unlock(&vlan_info->vlan_lock); + return ret; +} + +int sxe2vf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vlan vlan; + u16 proto_u16; + int ret = 0; + + if (!vid && be16_to_cpu(proto) == ETH_P_8021Q) + return 0; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + + if (test_bit(SXE2VF_VSI_DISABLE, adapter->vsi_ctxt.vf_vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + ret = -EBUSY; + goto l_unlock; + } + proto_u16 = be16_to_cpu(proto); + vlan = SXE2VF_VLAN(proto_u16, vid, 0); + + ret = sxe2vf_vlan_process(adapter, vlan, true); + if (ret) { + LOG_ERROR_BDF("add vlan failed ret:%d.\n", ret); + goto l_unlock; + } + + LOG_INFO_BDF("vlan tpid:0x%x vid:%u prio:%u add request.\n", vlan.tpid, + vlan.vid, vlan.prio); + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +int sxe2vf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vlan vlan; + u16 proto_u16; + int ret = 0; + + if (!vid && be16_to_cpu(proto) == ETH_P_8021Q) + return 0; + + (void)mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, adapter->vsi_ctxt.vf_vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + ret = -EBUSY; + goto l_unlock; + } + proto_u16 = be16_to_cpu(proto); + vlan = SXE2VF_VLAN(proto_u16, vid, 0); + + ret = sxe2vf_vlan_process(adapter, vlan, false); + if (ret) { + LOG_ERROR_BDF("del vlan failed ret:%d.\n", ret); + goto l_unlock; + } + + LOG_INFO_BDF("vlan tag:0x%x delete request.\n", *(u16 *)&vlan); + +l_unlock: + (void)mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +s32 sxe2vf_l2_filter_cfg_sync(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2vf_sync_addr_node *list_itr = NULL; + struct sxe2vf_sync_addr_node *tmp = NULL; + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + if (!test_bit(SXE2VF_FLAG_FLTR_SYNC, adapter->flags)) + return 0; + + clear_bit(SXE2VF_FLAG_FLTR_SYNC, adapter->flags); + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + INIT_LIST_HEAD(&filter->tmp_sync_list); + INIT_LIST_HEAD(&filter->tmp_unsync_list); + + netif_addr_lock_bh(netdev); + (void)__dev_uc_sync(netdev, sxe2vf_addr_sync, sxe2vf_addr_unsync); + (void)__dev_mc_sync(netdev, sxe2vf_addr_sync, sxe2vf_addr_unsync); + netif_addr_unlock_bh(netdev); + + list_for_each_entry_safe(list_itr, tmp, &filter->tmp_sync_list, list) + { + ret = sxe2vf_mac_addr_add(adapter, list_itr->macaddr, + SXE2VF_MAC_OWNER_UC_MC); + if (ret == -EEXIST) { + LOG_WARN_BDF("mac filter exist, addr %pM\n", + list_itr->macaddr); + } else if (ret) { + LOG_DEV_ERR("add mac filter failed, addr %pM, ret %d\n", + list_itr->macaddr, ret); + } + list_del(&list_itr->list); + kfree(list_itr); + list_itr = NULL; + } + + list_for_each_entry_safe(list_itr, tmp, &filter->tmp_unsync_list, list) + { + ret = sxe2vf_mac_addr_del(adapter, list_itr->macaddr, + SXE2VF_MAC_OWNER_UC_MC); + if (ret == -ENOENT) { + LOG_WARN_BDF("mac filter not exist, addr %pM\n", + list_itr->macaddr); + } else if (ret) { + LOG_DEV_ERR("delete mac filter failed, addr %pM, ret %d\n", + list_itr->macaddr, ret); + } + list_del(&list_itr->list); + kfree(list_itr); + list_itr = NULL; + } + + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + rtnl_lock(); + ret = sxe2vf_promisc_set_msg_send(adapter); + rtnl_unlock(); + if (ret) + LOG_ERROR_BDF("promisc set mbx msg send failed ret %d.\n", ret); + + return ret; +} + +s32 sxe2vf_dev_mac_add(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + s32 ret; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + ret = sxe2vf_mac_addr_add(adapter, filter->cur_mac_addr, + SXE2VF_MAC_OWNER_NETDEV); + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + if (ret) { + LOG_ERROR_BDF("vf dev mac:%pM add failed.\n", filter->cur_mac_addr); + return ret; + } + + if (is_valid_ether_addr(filter->cur_mac_addr) && + !ether_addr_equal(adapter->netdev->dev_addr, filter->cur_mac_addr)) { + LOG_WARN_BDF("change vf dev mac from %pM to %pM.\n", + adapter->netdev->dev_addr, filter->cur_mac_addr); + eth_hw_addr_set(adapter->netdev, filter->cur_mac_addr); + } + + return ret; +} + +static void sxe2vf_vlan_rules_restore(struct sxe2vf_adapter *adapter) +{ + s32 ret; + struct sxe2vf_vlan_node *f; + struct sxe2vf_vlan_node *tmp = NULL; + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2vf_vlan vlan; + + (void)mutex_lock(&vlan_info->vlan_lock); + + list_for_each_entry_safe(f, tmp, &vlan_info->vlan_list, list) + { + vlan = SXE2VF_VLAN(f->vlan.tpid, f->vlan.vid, f->vlan.prio); + ret = sxe2vf_vlan_msg_send(adapter, &vlan, true); + if (ret && (ret != -EEXIST)) { + LOG_DEV_ERR("add vlan filter tpid:0x%x vid:%u prio:%u " + "failed ret %d\n", + vlan.tpid, vlan.vid, vlan.prio, ret); + list_del(&f->list); + kfree(f); + vlan_info->cnt--; + } + } + + (void)mutex_unlock(&vlan_info->vlan_lock); +} + +static void sxe2vf_mac_rules_restore(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2vf_addr_node *f; + struct sxe2vf_addr_node *tmp = NULL; + u16 vsi_id = adapter->vsi_ctxt.vf_vsi->vsi_id; + s32 ret; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + list_for_each_entry_safe(f, tmp, &filter->mac_addr_list, list) + { + if (ether_addr_equal(filter->cur_mac_addr, f->mac.macaddr)) + continue; + ret = sxe2vf_mac_msg_send(adapter, &f->mac, true, false, vsi_id); + if (ret && (ret != -EEXIST)) { + LOG_ERROR_BDF("restore mac %pM, usage %lx failed %d\n", + f->mac.macaddr, f->usage, ret); + sxe2vf_switch_mac_node_del_and_free(f); + } + } + + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); +} + +static void sxe2vf_promisc_rules_restore(struct sxe2vf_adapter *adapter) +{ + struct sxe2_vf_promisc_msg msg = {0}; + u32 promisc_flags = 0; + s32 ret = 0; + + if (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_ALLMULTI) + promisc_flags |= SXE2_VF_PROMISC_MULTICAST; + if (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC) + promisc_flags |= SXE2_VF_PROMISC | SXE2_VF_PROMISC_MULTICAST; + + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + promisc_flags |= SXE2_VF_VLAN_FILTER; + + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + msg.flags = cpu_to_le32(promisc_flags); + + ret = sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_PROMISC_CFG, (u8 *)&msg, + sizeof(msg)); + if (ret) + LOG_ERROR_BDF("set promisc msg handle result:%d.\n", ret); +} + +void sxe2vf_l2_filter_rules_restore(struct sxe2vf_adapter *adapter) +{ + if (sxe2vf_dev_mac_add(adapter)) + return; + + sxe2vf_mac_rules_restore(adapter); + + if (!adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist && + !adapter->switch_ctxt.filter_ctxt.vlan_info.is_switchdev) { + sxe2vf_vlan_rules_restore(adapter); + } + + sxe2vf_promisc_rules_restore(adapter); +} + +STATIC s32 sxe2vf_ucmd_com_mode_unicast_mac_add(struct sxe2vf_adapter *adapter, + u16 vsi_id, const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_mac_filter *user_mac_fltr; + struct sxe2vf_addr_node *eth_mac_node; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u add unicast mac %pM\n", vsi_id, addr); + + user_mac_fltr = &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in user mac list.\n", addr); + ret = -EEXIST; + goto l_end; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for macaddr:%pM failed.\n", addr); + ret = -ENOMEM; + goto l_end; + } + + ether_addr_copy(user_mac_node->mac.macaddr, addr); + + eth_mac_node = sxe2vf_addr_find(adapter, addr); + if (eth_mac_node) { + ret = sxe2vf_mac_update_msg_send(adapter, addr, true); + if (ret) { + LOG_ERROR_BDF("mac %pM rule update to user failed %d\n", + addr, ret); + kfree(user_mac_node); + goto l_end; + } + } else { + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, true, true, + vsi_id); + if (ret) { + LOG_ERROR_BDF("user add mac %pM failed %d\n", addr, ret); + kfree(user_mac_node); + goto l_end; + } + } + + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +STATIC s32 sxe2vf_ucmd_user_mode_unicast_mac_add(struct sxe2vf_adapter *adapter, + u16 vsi_id, const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_mac_filter *user_mac_fltr; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u add unicast mac %pM\n", vsi_id, addr); + + user_mac_fltr = &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in user mac list.\n", addr); + ret = -EEXIST; + goto l_end; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for macaddr:%pM failed.\n", addr); + ret = -ENOMEM; + goto l_end; + } + + ether_addr_copy(user_mac_node->mac.macaddr, addr); + + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, true, true, vsi_id); + if (ret) { + LOG_ERROR_BDF("user add mac %pM failed %d\n", addr, ret); + kfree(user_mac_node); + goto l_end; + } + + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +s32 sxe2vf_ucmd_unicast_mac_add(struct sxe2vf_adapter *adapter, u16 vsi_id, + const u8 *addr) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + ret = sxe2vf_ucmd_user_mode_unicast_mac_add(adapter, vsi_id, addr); + } else { + ret = sxe2vf_ucmd_com_mode_unicast_mac_add(adapter, vsi_id, addr); + } + + return ret; +} + +STATIC s32 sxe2vf_ucmd_com_mode_unicast_mac_del(struct sxe2vf_adapter *adapter, + u16 vsi_id, const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_addr_node *eth_mac_node; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u del unicast mac %pM\n", vsi_id, addr); + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (!user_mac_node) { + LOG_WARN_BDF("mac:%pM is not in user mac list.\n", addr); + goto l_end; + } + + eth_mac_node = sxe2vf_addr_find(adapter, addr); + if (eth_mac_node) { + ret = sxe2vf_mac_update_msg_send(adapter, addr, false); + if (ret) { + LOG_ERROR_BDF("mac %pM rule update to kernel failed %d\n", + addr, ret); + goto l_end; + } + } else { + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, false, true, + vsi_id); + if (ret) { + LOG_ERROR_BDF("user del mac %pM failed %d\n", addr, ret); + goto l_end; + } + } + + sxe2vf_switch_mac_node_del_and_free(user_mac_node); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +STATIC s32 sxe2vf_ucmd_user_mode_unicast_mac_del(struct sxe2vf_adapter *adapter, + u16 vsi_id, const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u del unicast mac %pM\n", vsi_id, addr); + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (!user_mac_node) { + LOG_WARN_BDF("mac:%pM is not in user mac list.\n", addr); + goto l_end; + } + + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, false, true, vsi_id); + if (ret) { + LOG_ERROR_BDF("user del mac %pM failed %d\n", addr, ret); + goto l_end; + } + + sxe2vf_switch_mac_node_del_and_free(user_mac_node); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +s32 sxe2vf_ucmd_unicast_mac_del(struct sxe2vf_adapter *adapter, u16 vsi_id, + const u8 *addr) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + ret = sxe2vf_ucmd_user_mode_unicast_mac_del(adapter, vsi_id, addr); + } else { + ret = sxe2vf_ucmd_com_mode_unicast_mac_del(adapter, vsi_id, addr); + } + + return ret; +} + +int sxe2vf_ucmd_multi_broad_mac_add(struct sxe2vf_adapter *adapter, u16 vsi_id, + const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_mac_filter *user_mac_fltr; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u add multi broad mac %pM\n", vsi_id, addr); + + user_mac_fltr = &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (user_mac_node) { + LOG_ERROR_BDF("mac:%pM has been in user mac list.\n", addr); + ret = -EEXIST; + goto l_end; + } + + user_mac_node = kzalloc(sizeof(*user_mac_node), GFP_KERNEL); + if (!user_mac_node) { + LOG_ERROR_BDF("create list node for macaddr:%pM failed.\n", addr); + ret = -ENOMEM; + goto l_end; + } + + ether_addr_copy(user_mac_node->mac.macaddr, addr); + + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, true, true, vsi_id); + if (ret) { + LOG_ERROR_BDF("user add mac %pM failed %d\n", addr, ret); + kfree(user_mac_node); + goto l_end; + } + + list_add_tail(&user_mac_node->list, &user_mac_fltr->mac_addr_list); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +int sxe2vf_ucmd_multi_broad_mac_del(struct sxe2vf_adapter *adapter, u16 vsi_id, + const u8 *addr) +{ + s32 ret = 0; + struct sxe2vf_addr_node *user_mac_node; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u del multi broad mac %pM\n", vsi_id, addr); + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + user_mac_node = sxe2vf_user_addr_find_unlock(adapter, addr); + if (!user_mac_node) { + LOG_WARN_BDF("mac:%pM is not in user mac list.\n", addr); + goto l_end; + } + + ret = sxe2vf_mac_msg_send(adapter, &user_mac_node->mac, false, true, vsi_id); + if (ret) { + LOG_ERROR_BDF("user del mac %pM failed %d\n", addr, ret); + goto l_end; + } + + sxe2vf_switch_mac_node_del_and_free(user_mac_node); + +l_end: + mutex_unlock(&switch_ctxt->mac_addr_lock); + return ret; +} + +STATIC int sxe2vf_ucmd_com_mode_promisc_rule_add(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u add promisc rule\n", vsi_id); + + mutex_lock(&adapter->switch_ctxt.flag_lock); + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC) { + LOG_ERROR_BDF("user vf has been set promisc\n"); + ret = -EEXIST; + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= IFF_PROMISC; + if (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, true, + true); + if (ret) { + LOG_ERROR_BDF("promisc rule update to user failed %d\n", + ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_PROMISC); + } + } else { + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_PROMISC); + } + } + +l_end: + mutex_unlock(&adapter->switch_ctxt.flag_lock); + return ret; +} + +STATIC int sxe2vf_ucmd_user_mode_promisc_rule_add(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u add promisc rule\n", vsi_id); + + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC) { + LOG_ERROR_BDF("user vf has been set promisc\n"); + ret = -EEXIST; + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= IFF_PROMISC; + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_PROMISC); + } + +l_end: + return ret; +} + +int sxe2vf_ucmd_promisc_rule_add(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2vf_ucmd_user_mode_promisc_rule_add(adapter, vsi_id); + else + ret = sxe2vf_ucmd_com_mode_promisc_rule_add(adapter, vsi_id); + + return ret; +} + +STATIC int sxe2vf_ucmd_com_mode_promisc_rule_del(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u del promisc rule\n", vsi_id); + + mutex_lock(&adapter->switch_ctxt.flag_lock); + if (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC)) { + LOG_WARN_BDF("user vf has not been set promisc\n"); + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_PROMISC); + if (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, false, + true); + if (ret) { + LOG_ERROR_BDF("promisc rule update to kernel failed %d\n", + ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= + IFF_PROMISC; + } + } else { + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= + IFF_PROMISC; + } + } + +l_end: + mutex_unlock(&adapter->switch_ctxt.flag_lock); + return ret; +} + +STATIC int sxe2vf_ucmd_user_mode_promisc_rule_del(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u del promisc rule\n", vsi_id); + + if (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC)) { + LOG_WARN_BDF("user vf has not been set promisc\n"); + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_PROMISC); + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= IFF_PROMISC; + } + +l_end: + return ret; +} + +int sxe2vf_ucmd_promisc_rule_del(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2vf_ucmd_user_mode_promisc_rule_del(adapter, vsi_id); + else + ret = sxe2vf_ucmd_com_mode_promisc_rule_del(adapter, vsi_id); + + return ret; +} + +STATIC int sxe2vf_ucmd_com_mode_allmulti_rule_add(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u add allmulti rule\n", vsi_id); + + mutex_lock(&adapter->switch_ctxt.flag_lock); + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_ALLMULTI) { + LOG_ERROR_BDF("user vf has been set promisc\n"); + ret = -EEXIST; + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= IFF_ALLMULTI; + if ((adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_ALLMULTI) || + (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC)) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, true, + false); + if (ret) { + LOG_ERROR_BDF("allmulti rule update to user failed %d\n", + ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_ALLMULTI); + } + } else { + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_ALLMULTI); + } + } + +l_end: + mutex_unlock(&adapter->switch_ctxt.flag_lock); + return ret; +} + +STATIC int sxe2vf_ucmd_user_mode_allmulti_rule_add(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u add allmulti rule\n", vsi_id); + + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_ALLMULTI) { + LOG_ERROR_BDF("user vf has been set promisc\n"); + ret = -EEXIST; + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= IFF_ALLMULTI; + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set promisc failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_ALLMULTI); + } + +l_end: + return ret; +} + +int sxe2vf_ucmd_allmulti_rule_add(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2vf_ucmd_user_mode_allmulti_rule_add(adapter, vsi_id); + else + ret = sxe2vf_ucmd_com_mode_allmulti_rule_add(adapter, vsi_id); + + return ret; +} + +STATIC int sxe2vf_ucmd_com_mode_allmulti_rule_del(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u del allmulti rule\n", vsi_id); + + mutex_lock(&adapter->switch_ctxt.flag_lock); + if (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & + IFF_ALLMULTI)) { + LOG_WARN_BDF("user vf has not been set allmulti\n"); + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_ALLMULTI); + if ((adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_ALLMULTI) || + (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC)) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, false, + false); + if (ret) { + LOG_ERROR_BDF("allmulti rule update to kernel failed %d\n", + ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= + IFF_ALLMULTI; + } + } else { + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set allmulti failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= + IFF_PROMISC; + } + } + +l_end: + mutex_unlock(&adapter->switch_ctxt.flag_lock); + return ret; +} + +STATIC int sxe2vf_ucmd_user_mode_allmulti_rule_del(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + LOG_INFO_BDF("User vf vsi:%u del allmulti rule\n", vsi_id); + + if (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & + IFF_ALLMULTI)) { + LOG_WARN_BDF("user vf has not been set allmulti\n"); + goto l_end; + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_ALLMULTI); + ret = sxe2vf_user_promisc_set_msg_send(adapter, vsi_id); + if (ret) { + LOG_ERROR_BDF("user set allmulti failed %d\n", ret); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags |= + IFF_ALLMULTI; + } + +l_end: + return ret; +} + +int sxe2vf_ucmd_allmulti_rule_del(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + ret = sxe2vf_ucmd_user_mode_allmulti_rule_del(adapter, vsi_id); + else + ret = sxe2vf_ucmd_com_mode_allmulti_rule_del(adapter, vsi_id); + + return ret; +} + +s32 sxe2vf_ucmd_vlan_filter_cfg(struct sxe2vf_adapter *adapter, u16 vsi_id, + bool is_open) +{ + s32 ret = 0; + struct sxe2vf_vlan_filter *filter_offload = + &adapter->switch_ctxt.user_fltr_ctxt.vlan_info + .filter_offload; + + LOG_INFO_BDF("User vf vsi:%u %s vlan filter\n", vsi_id, + is_open ? "open" : "close"); + + if (is_open && adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist) { + LOG_WARN_BDF("can not open vlan filter, when port vlan exist.\n"); + goto l_end; + } + + if (is_open) { + filter_offload->ctag_filter_enable = 1; + filter_offload->stag_filter_enable = 1; + } else { + filter_offload->ctag_filter_enable = 0; + filter_offload->stag_filter_enable = 0; + } + + ret = sxe2vf_vlan_filter_msg_send(adapter, true); + if (ret) { + if (is_open) { + filter_offload->ctag_filter_enable = 0; + filter_offload->stag_filter_enable = 0; + } else { + filter_offload->ctag_filter_enable = 1; + filter_offload->stag_filter_enable = 1; + } + } + + LOG_INFO_BDF("ctag_filter_enable:%u stag_filter_enable:%u.\n", + filter_offload->ctag_filter_enable, + filter_offload->stag_filter_enable); + +l_end: + return ret; +} + +int sxe2vf_ucmd_vlan_rule_process(struct sxe2vf_adapter *adapter, u16 vsi_id, + struct sxe2vf_vlan *vlan, bool is_add) +{ + LOG_INFO_BDF("User vf vsi:%u %s vlan rule, vid:%u, tpid:%u\n", vsi_id, + is_add ? "add" : "del", vlan->vid, vlan->tpid); + + return sxe2vf_user_vlan_msg_send(adapter, vsi_id, vlan, is_add); +} + +static void sxe2vf_user_unicast_mac_rest(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2vf_mac_filter *user_mac_fltr; + struct sxe2vf_mac_filter *eth_mac_fltr; + struct sxe2vf_addr_node *eth_node; + struct sxe2vf_addr_node *user_node; + struct sxe2vf_addr_node *tmp_1; + struct sxe2vf_addr_node *tmp_2; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + eth_mac_fltr = &adapter->switch_ctxt.filter_ctxt.mac_filter; + user_mac_fltr = &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + + list_for_each_entry_safe(user_node, tmp_1, &user_mac_fltr->mac_addr_list, + list) + { + if (is_unicast_ether_addr(user_node->mac.macaddr)) { + list_for_each_entry_safe(eth_node, tmp_2, + ð_mac_fltr->mac_addr_list, list) + { + if (!memcmp(user_node->mac.macaddr, + eth_node->mac.macaddr, + sizeof(user_node->mac.macaddr))) { + ret = sxe2vf_mac_update_msg_send( + adapter, + user_node->mac.macaddr, + false); + if (ret) { + LOG_ERROR_BDF("user vsi %u mac %pM, " + "update to\t" + "eth vf failed.\n", + vsi_id, + user_node->mac.macaddr); + } + break; + } + } + } + sxe2vf_switch_mac_node_del_and_free(user_node); + } + + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); +} + +static void sxe2vf_user_promisc_allmulti_rest(struct sxe2vf_adapter *adapter, + u16 vsi_id) +{ + s32 ret = 0; + + mutex_lock(&adapter->switch_ctxt.flag_lock); + + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_ALLMULTI && + (adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_ALLMULTI || + adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC)) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, false, + false); + if (ret) + LOG_ERROR_BDF("user vf vsi_id :%u allmulti rule update\t" + "to eth failed %d\n", + vsi_id, ret); + } + + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC && + adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC) { + ret = sxe2vf_user_promisc_update_msg_send(adapter, vsi_id, false, + true); + if (ret) + LOG_ERROR_BDF("user vf vsi_id :%u promisc rule update\t" + " to eth failed %d\n", + vsi_id, ret); + } + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_ALLMULTI); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= (~IFF_PROMISC); + + mutex_unlock(&adapter->switch_ctxt.flag_lock); +} + +s32 sxe2vf_user_l2_feature_clean(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + s32 ret = 0; + struct sxe2vf_mac_filter *user_mac_fltr; + struct sxe2vf_addr_node *user_node; + struct sxe2vf_addr_node *tmp; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + LOG_INFO_BDF("User vf vsi:%u clean l2 feature.\n", vsi_id); + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) { + user_mac_fltr = &adapter->switch_ctxt.user_fltr_ctxt.mac_filter; + (void)mutex_lock(&switch_ctxt->mac_addr_lock); + list_for_each_entry_safe(user_node, tmp, + &user_mac_fltr->mac_addr_list, list) + { + sxe2vf_switch_mac_node_del_and_free(user_node); + } + (void)mutex_unlock(&switch_ctxt->mac_addr_lock); + + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_ALLMULTI); + adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags &= + (~IFF_PROMISC); + } else { + sxe2vf_user_unicast_mac_rest(adapter, vsi_id); + sxe2vf_user_promisc_allmulti_rest(adapter, vsi_id); + } + + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.h new file mode 100644 index 0000000000000000000000000000000000000000..65a52952c28c050e79bcb77b3820922247b4dda9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_l2_filter.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_l2_filter.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_L2_FILTER_H__ +#define __SXE2VF_L2_FILTER_H__ +#include +#include + +#define SXE2VF_VLAN_OFFLOAD_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX) + +#define SXE2VF_VLAN_FILTER_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define SXE2VF_VLAN(tpid, vid, prio) ((struct sxe2vf_vlan){ tpid, vid, prio }) + +struct sxe2vf_mac_attr { + u8 is_vf_mac : 1; + u8 reserve : 7; +}; + +struct sxe2vf_mac { + u8 macaddr[ETH_ALEN]; + struct sxe2vf_mac_attr attr; +}; + +enum sxe2vf_mac_owner { + SXE2VF_MAC_OWNER_NETDEV = 0, + SXE2VF_MAC_OWNER_UC_MC, + SXE2VF_MAC_OWNER_ROCE, +}; + +struct sxe2vf_addr_node { + struct list_head list; + struct sxe2vf_mac mac; + unsigned long usage; +}; + +struct sxe2vf_sync_addr_node { + struct list_head list; + u8 macaddr[ETH_ALEN]; +}; + +struct sxe2vf_vlan { + u16 tpid; + u16 vid; + u8 prio; +}; + +struct sxe2vf_vlan_node { + struct list_head list; + struct sxe2vf_vlan vlan; +}; + +struct sxe2vf_vlan_offload { + u8 stag_strip_enable; + u8 ctag_strip_enable; + u8 stag_insert_enable; + u8 ctag_insert_enable; +}; + +struct sxe2vf_vlan_filter { + u8 ctag_filter_enable; + u8 stag_filter_enable; +}; + +struct sxe2vf_vlan_info { + struct list_head vlan_list; + struct mutex vlan_lock; + u8 port_vlan_exist; + u8 is_switchdev; + u16 max_cnt; + u16 cnt; + struct sxe2vf_vlan_offload vlan_offload; + struct sxe2vf_vlan_filter filter_offload; + netdev_features_t dev_features; +}; + +s32 sxe2vf_addr_sync(struct net_device *netdev, const u8 *addr); + +s32 sxe2vf_addr_unsync(struct net_device *netdev, const u8 *addr); + +int sxe2vf_set_mac_address(struct net_device *netdev, void *p); + +void sxe2vf_set_rx_mode(struct net_device *netdev); + +s32 sxe2vf_l2_filter_cfg_sync(struct sxe2vf_adapter *adapter); + +void sxe2vf_l2_filter_clear(struct sxe2vf_adapter *adapter); + +bool sxe2vf_promisc_mode_changed(struct sxe2vf_adapter *adapter); + +void sxe2vf_filter_list_destroy(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vlan_cfg(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vlan_cfg_rebuild(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vlan_offload_cfg(struct net_device *netdev, + netdev_features_t request); + +s32 sxe2vf_vlan_filter_cfg(struct net_device *netdev, + netdev_features_t request); + +int sxe2vf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); + +int sxe2vf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); + +bool sxe2vf_vlan_cnt_is_valid(struct sxe2vf_adapter *adapter); + +void sxe2vf_vlan_feature_update(struct sxe2vf_adapter *adapter); + +int sxe2vf_mac_addr_add(struct sxe2vf_adapter *adapter, const u8 *addr, + enum sxe2vf_mac_owner owner); + +int sxe2vf_mac_addr_del(struct sxe2vf_adapter *adapter, const u8 *addr, + enum sxe2vf_mac_owner owner); + +s32 sxe2vf_dev_mac_add(struct sxe2vf_adapter *adapter); + +void sxe2vf_l2_filter_rules_restore(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_addr_node_del(struct sxe2vf_adapter *adapter, const u8 *macaddr); +struct sxe2vf_addr_node *sxe2vf_addr_find(struct sxe2vf_adapter *adapter, + const u8 *macaddr); + +s32 sxe2vf_ucmd_unicast_mac_add(struct sxe2vf_adapter *adapter, u16 vsi_id, const u8 *addr); + +s32 sxe2vf_ucmd_multi_broad_mac_add(struct sxe2vf_adapter *adapter, u16 vsi_id, const u8 *addr); + +s32 sxe2vf_ucmd_unicast_mac_del(struct sxe2vf_adapter *adapter, u16 vsi_id, const u8 *addr); + +s32 sxe2vf_ucmd_multi_broad_mac_del(struct sxe2vf_adapter *adapter, u16 vsi_id, const u8 *addr); + +s32 sxe2vf_ucmd_promisc_rule_add(struct sxe2vf_adapter *adapter, u16 vsi_id); + +s32 sxe2vf_ucmd_promisc_rule_del(struct sxe2vf_adapter *adapter, u16 vsi_id); + +s32 sxe2vf_ucmd_allmulti_rule_add(struct sxe2vf_adapter *adapter, u16 vsi_id); + +s32 sxe2vf_ucmd_allmulti_rule_del(struct sxe2vf_adapter *adapter, u16 vsi_id); +s32 sxe2vf_ucmd_vlan_filter_cfg(struct sxe2vf_adapter *adapter, + u16 vsi_id, bool is_open); +s32 sxe2vf_ucmd_vlan_rule_process(struct sxe2vf_adapter *adapter, u16 vsi_id, struct sxe2vf_vlan *vlan, + bool add); + +s32 sxe2vf_user_l2_feature_clean(struct sxe2vf_adapter *adapter, u16 vsi_id); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_main.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_main.c new file mode 100644 index 0000000000000000000000000000000000000000..05ba5801c3fab9a932166a66248c93de91a2ed69 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_main.c @@ -0,0 +1,2006 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_main.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxe2_compat.h" +#include "sxe2_version.h" +#include "sxe2_log.h" +#include "sxe2vf_pci.h" +#include "sxe2vf.h" +#include "sxe2vf_mbx_msg.h" +#include "sxe2_mbx_public.h" +#include "sxe2vf_netdev.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_aux_drv.h" +#include "sxe2vf_ethtool.h" +#include "sxe2vf_rxft.h" +#include "sxe2vf_debugfs.h" +#include "sxe2vf_com_ioctl.h" + +#define CREATE_TRACE_POINTS +#include "sxe2vf_trace.h" +#undef CREATE_TRACE_POINTS + +#ifdef SXE2_CFG_DEBUG +int vf_reg_log; +module_param(vf_reg_log, int, 0644); +MODULE_PARM_DESC(vf_reg_log, "reg read/write log, 0-off 1-on."); + +int g_vf_switch_stats = 1; +module_param(g_vf_switch_stats, int, 0644); +MODULE_PARM_DESC(g_vf_switch_stats, "stats open/close, 0-off 1-on."); + +#endif + +static int com_mode = SXE2_COM_MODULE_UNDEFINED; +module_param(com_mode, uint, 0644); +MODULE_PARM_DESC(com_mode, "driver mode. kernel:0, dpdk:1, mixed:2(default)"); + +STATIC int msg_debug = -1; +module_param(msg_debug, int, 0644); +#ifndef CONFIG_DYNAMIC_DEBUG +MODULE_PARM_DESC(msg_debug, + "netif level (0=none,...,16=all), debug_mask (0x8XXXXXXXX)"); +#else +MODULE_PARM_DESC(msg_debug, "netif level (0=none,...,16=all)"); +#endif + +#ifndef SXE2VF_DRV_NAME +#define SXE2VF_DRV_NAME "SXE2VF" +#endif + +#ifndef SXE2VF_DRV_DESCRIPTION +#define SXE2VF_DRV_DESCRIPTION "LD 1160-2X Virtual Function" +#endif + +#define SXE2VF_STOP_DROP_TIMEOUT 1000 +#define SXE2VF_STOP_DROP_DONE_INTERVAL 1 + +int sxe2vf_com_mode_get(void *adapter) +{ + return ((struct sxe2vf_adapter *)adapter)->drv_mode; +} + +int sxe2vf_g_com_mode_get(void) +{ + return com_mode; +} + +static inline u32 sxe2vf_readl(void __iomem *reg) +{ + return readl(reg); +} + +static inline void sxe2vf_writel(u32 value, void __iomem *reg) +{ + writel(value, reg); +} + +static void sxe2vf_com_ctxt_fill(void *adapter) +{ + struct sxe2vf_adapter *vf_adapter = adapter; + + vf_adapter->com_ctxt.pdev = vf_adapter->pdev; + vf_adapter->com_ctxt.func_type = SXE2_VF; + vf_adapter->com_ctxt.pf_id = vf_adapter->pf_id; + vf_adapter->com_ctxt.vf_id = vf_adapter->vf_id_in_dev; +} + +struct sxe2_com_ops g_com_ops = { + .cmd_exec = sxe2vf_com_cmd_send, + .get_irq_num = sxe2vf_dpdk_irq_cnt_get, + .get_vector = sxe2vf_dpdk_irq_vector_idx_get, + .release = sxe2vf_dpdk_resource_release, + .com_ctxt_fill = sxe2vf_com_ctxt_fill, + .com_mode_get = sxe2vf_com_mode_get, +}; + +struct workqueue_struct *sxe2vf_wq; +struct workqueue_struct *sxe2vf_mbx_wq; +struct workqueue_struct *sxe2vf_msg_handle_wq; +struct workqueue_struct *sxe2vf_health_wq; + +STATIC const struct pci_device_id sxe2vf_pci_tbl[] = { + {SXE2VF_PCI_VENDOR_ID_1, SXE2VF_PCI_DEVICE_ID_1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + {SXE2VF_PCI_VENDOR_ID_2, SXE2VF_PCI_DEVICE_ID_2, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + {SXE2VF_PCI_VENDOR_ID_1, SXE2VF_PCI_DEVICE_ID_10B4, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + {SXE2VF_PCI_VENDOR_ID_206F, SXE2VF_PCI_DEVICE_ID_1, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0}, + { + 0, + }}; + +s32 sxe2vf_dpdk_caps_get(struct sxe2vf_adapter *adapter, + struct sxe2vf_res_caps *caps) +{ + s32 ret = 0; + + if (!adapter || !caps) { + ret = -EINVAL; + LOG_ERROR_BDF("param invalid.\n"); + goto l_end; + } + + caps->txq_base = adapter->q_ctxt.dpdk_offset; + caps->txq_cnt = adapter->q_ctxt.dpdk_q_cnt; + + caps->rxq_base = adapter->q_ctxt.dpdk_offset; + caps->rxq_cnt = adapter->q_ctxt.dpdk_q_cnt; + + caps->irq_base = adapter->irq_ctxt.dpdk_offset; + caps->irq_cnt = adapter->irq_ctxt.dpdk_irq_cnt; + + caps->rss_key_size = adapter->rss_ctxt.rss_key_size; + caps->rss_lut_size = adapter->rss_ctxt.rss_lut_size; + caps->rss_lut_type = adapter->rss_ctxt.rss_lut_type; + + LOG_INFO_BDF("dpdk vf txq base:%d cnt:%d rxq base:%d cnt:%d irq base:%d\t" + "cnt:%d\n" + "\t rss_key_size:%u lut_size:%u lut_type:%u.\n", + caps->txq_base, caps->txq_cnt, caps->rxq_base, caps->rxq_cnt, + caps->irq_base, caps->irq_cnt, caps->rss_key_size, + caps->rss_lut_size, caps->rss_lut_type); + +l_end: + return ret; +} + +STATIC struct sxe2vf_adapter *sxe2vf_adapter_create(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct sxe2vf_adapter *adapter = NULL; + const char *device_name = dev_name(&pdev->dev); + + netdev = alloc_etherdev_mq(sizeof(struct sxe2vf_adapter), + SXE2VF_QUEUES_CNT_MAX); + if (!netdev) { + LOG_ERROR("queue max:%d device[pci_id %u] net device alloc failed\n", + SXE2VF_QUEUES_CNT_MAX, pdev->dev.id); + return adapter; + } + + adapter = netdev_priv(netdev); + + adapter->pdev = pdev; + adapter->netdev = netdev; + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + (void)SXE2_STRCPY(adapter->dev_name, device_name, + min_t(u32, (strlen(device_name) + 1), DEV_NAME_LEN)); + + LOG_INFO_BDF("adapter:0x%pK netdev:0x%pK pdev:0x%pK\n", adapter, netdev, + pdev); + + return adapter; +} + +STATIC int sxe2vf_pci_init(struct sxe2vf_adapter *adapter) +{ + int ret; + struct pci_dev *pdev = adapter->pdev; + resource_size_t bar; + unsigned long len; + + ret = pci_enable_device(pdev); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] enable device failed\n", + adapter->pdev->dev.id); + return ret; + } + + ret = dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXE2VF_DMA_BIT_WIDTH_64)); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] set dma bit mask failed\n", + adapter->pdev->dev.id); + goto l_pci_disable; + } + + ret = pci_request_regions(pdev, SXE2VF_DRV_NAME); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] request IO memory failed\n", + pdev->dev.id); + goto l_pci_disable; + } + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif + + pci_set_master(pdev); + + bar = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + + adapter->hw.reg_base_addr = ioremap(bar, len); + if (!adapter->hw.reg_base_addr) { + ret = -EIO; + LOG_DEV_ERR("device[pci_id %u] \t" + "ioremap[bar:0x%llx, len:%zu] failed\n", + pdev->dev.id, (u64)bar, len); + goto l_pci_release_regions; + } + + pci_set_drvdata(pdev, adapter->netdev); + + (void)pci_save_state(pdev); + + LOG_INFO_BDF("bar_base_paddr = 0x%llx, \t" + "bar len:%zu, reg_base_addr:%pK\n", + (u64)bar, len, adapter->hw.reg_base_addr); + return 0; + +l_pci_release_regions: + pci_release_regions(pdev); +l_pci_disable: + pci_disable_device(pdev); + return ret; +} + +STATIC void sxe2vf_msg_level_init(struct sxe2vf_adapter *adapter) +{ + adapter->log_level_ctxt.msg_enable = + netif_msg_init(msg_debug, SXE2VF_DFLT_NETIF_M); +} + +STATIC void sxe2vf_hw_base_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_hw *hw; + + hw = &adapter->hw; + hw->adapter = adapter; + + hw->reg_read = sxe2vf_readl; + hw->reg_write = sxe2vf_writel; +} + +void sxe2vf_post_state_update(struct sxe2vf_work_context *work_ctxt, + enum sxe2vf_probe_post_state post_state) +{ + work_ctxt->post_state = post_state; + LOG_INFO("post state update to %u.\n", post_state); +} + +STATIC void sxe2vf_queue_work(struct sxe2vf_adapter *adapter, + struct workqueue_struct *wq, struct work_struct *dwork) +{ + if (!queue_work(wq, dwork)) + LOG_WARN_BDF("work was already on a queue.\n"); +} + +STATIC void sxe2vf_queue_delayed_work(struct sxe2vf_adapter *adapter, + struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + if (!queue_delayed_work(wq, dwork, delay)) + LOG_WARN_BDF("work was already on a queue.\n"); +} + +void sxe2vf_wkq_schedule(struct sxe2vf_adapter *adapter, enum sxe2vf_wk_type type, + const u32 delay) +{ + switch (type) { + case SXE2VF_WK_MONITOR: + if (!test_bit(SXE2VF_MONITOR_WORK_DISABLED, + &adapter->work_ctxt.state)) + sxe2vf_queue_delayed_work(adapter, sxe2vf_wq, + &adapter->work_ctxt.monitor_wk, + msecs_to_jiffies(delay)); + break; + + case SXE2VF_WK_MONITOR_IM: + if (!test_bit(SXE2VF_MONITOR_WORK_DISABLED, + &adapter->work_ctxt.state)) + sxe2vf_queue_work(adapter, sxe2vf_wq, + &adapter->work_ctxt.monitor_wk.work); + break; + + case SXE2VF_WK_MBX: + if (!test_bit(SXE2VF_FLAG_EVENT_IRQ_DISABLED, adapter->flags)) + sxe2vf_queue_work(adapter, sxe2vf_mbx_wq, + &adapter->work_ctxt.mbx_wk); + break; + + case SXE2VF_WK_NOTIFY_MSG: + if (!test_bit(SXE2VF_FLAG_EVENT_IRQ_DISABLED, adapter->flags)) + sxe2vf_queue_work(adapter, sxe2vf_msg_handle_wq, + &adapter->work_ctxt.msg_handle_wk); + break; + + case SXE2VF_WK_HEALTH: + if (!test_bit(SXE2VF_HEALTH_WORK_DISABLED, + &adapter->work_ctxt.state)) + sxe2vf_queue_delayed_work(adapter, + adapter->work_ctxt.health_wq, + &adapter->work_ctxt.health_wk, + msecs_to_jiffies(delay)); + break; + } +} + +void sxe2vf_wkq_cancel(struct sxe2vf_adapter *adapter, enum sxe2vf_wk_type type) +{ + struct sxe2vf_work_context *work_ctxt = &adapter->work_ctxt; + + switch (type) { + case SXE2VF_WK_MONITOR: + case SXE2VF_WK_MONITOR_IM: + set_bit(SXE2VF_MONITOR_WORK_DISABLED, &work_ctxt->state); + if (work_ctxt->monitor_wk.work.func) + cancel_delayed_work_sync(&work_ctxt->monitor_wk); + break; + + case SXE2VF_WK_MBX: + if (work_ctxt->mbx_wk.func) + cancel_work_sync(&work_ctxt->mbx_wk); + break; + + case SXE2VF_WK_NOTIFY_MSG: + if (work_ctxt->msg_handle_wk.func) + cancel_work_sync(&work_ctxt->msg_handle_wk); + break; + + case SXE2VF_WK_HEALTH: + set_bit(SXE2VF_HEALTH_WORK_DISABLED, &work_ctxt->state); + if (work_ctxt->health_wk.work.func) + cancel_delayed_work_sync(&work_ctxt->health_wk); + break; + } + + LOG_INFO_BDF("work state:0x%lx type:%u %s.\n", work_ctxt->state, type, + current->comm); +} + +STATIC void sxe2vf_wkq_cancel_all(struct sxe2vf_adapter *adapter) +{ + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_HEALTH); + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MONITOR); + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MBX); + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_NOTIFY_MSG); +} + +static void sxe2vf_mac_addr_init(struct sxe2vf_adapter *adapter) +{ + u8 *cur_mac = (u8 *)&adapter->switch_ctxt.filter_ctxt.mac_filter + .cur_mac_addr; + struct net_device *netdev = adapter->netdev; + + if (!is_valid_ether_addr(cur_mac)) { + eth_hw_addr_random(netdev); + LOG_DEV_INFO("current mac addr:%pM invalid, using random mac:%pM\n", + cur_mac, netdev->dev_addr); + ether_addr_copy(cur_mac, netdev->dev_addr); + } else { + LOG_INFO_BDF(" current mac addr:%pM.\n", cur_mac); + eth_hw_addr_set(netdev, cur_mac); + ether_addr_copy(netdev->perm_addr, cur_mac); + } +} + +static void sxe2vf_eth_deinit(struct sxe2vf_adapter *adapter) +{ + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + sxe2vf_auxdrv_deinit(adapter); + + sxe2vf_netdev_unregister(adapter); + + sxe2vf_ipsec_deinit(adapter); + + sxe2vf_rss_deinit(adapter); + + sxe2vf_fnav_deinit(adapter); + + sxe2vf_vsi_destroy(adapter); +} + +static void sxe2vf_irq_queue_decfg_pre(struct sxe2vf_adapter *adapter) +{ + if (adapter->com_work.func) + (void)cancel_work_sync(&adapter->com_work); + + sxe2_com_deinit(&adapter->com_ctxt); + + sxe2vf_eth_deinit(adapter); +} + +static void sxe2vf_irq_queue_decfg_post(struct sxe2vf_adapter *adapter) +{ + sxe2vf_irq_deinit(adapter); +} + +STATIC void sxe2vf_probe_post_deinit(struct sxe2vf_adapter *adapter, + enum sxe2vf_probe_post_state state) +{ + switch (state) { + case SXE2VF_PROBE_POST_INIT_DONE: + case SXE2VF_PROBE_POST_IRQ_QUEUE_CFG: + sxe2vf_debugfs_vf_exit(adapter); + sxe2vf_irq_queue_decfg_pre(adapter); + fallthrough; + + case SXE2VF_PROBE_POST_CAPS_INIT: + sxe2vf_func_caps_deinit(adapter); + fallthrough; + + case SXE2VF_PROBE_POST_VER_MATCH: + case SXE2VF_PROBE_POST_INIT_STARTED: + case SXE2VF_PROBE_POST_VER_CHK_FAIL: + sxe2vf_mbx_channel_deinit(adapter); + sxe2vf_irq_queue_decfg_post(adapter); + break; + + default: + LOG_ERROR_BDF("invalid post state:0x%x.\n", state); + break; + } + + LOG_INFO_BDF("probe post deinit from state %d.\n", state); +} + +static s32 sxe2vf_eth_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return ret; + + sxe2vf_netdev_init(adapter); + + sxe2vf_mac_addr_init(adapter); + + ret = sxe2vf_main_vsi_create(adapter); + if (ret) { + LOG_ERROR_BDF("create main vsi failed, ret=%d\n", ret); + return ret; + } + + ret = sxe2vf_rss_init(adapter); + if (ret) + goto l_vsi_destroy; + + ret = sxe2vf_fnav_init(adapter); + if (ret) + goto l_rss_deinit; + + ret = sxe2vf_ipsec_init(adapter); + if (ret) { + LOG_DEV_ERR("ipsec initial failed.\n"); + goto l_fnav_deinit; + } + + ret = sxe2vf_vlan_cfg(adapter); + if (ret) + goto l_ipsec_deinit; + + ret = sxe2vf_netdev_register(adapter); + if (ret) + goto l_ipsec_deinit; + + sxe2vf_auxdrv_init(adapter); + return ret; + +l_ipsec_deinit: + sxe2vf_ipsec_deinit(adapter); + +l_fnav_deinit: + sxe2vf_fnav_deinit(adapter); + +l_rss_deinit: + sxe2vf_rss_deinit(adapter); + +l_vsi_destroy: + sxe2vf_vsi_destroy(adapter); + + return ret; +} + +STATIC s32 sxe2vf_irq_queue_cfg(struct sxe2vf_adapter *adapter) +{ + s32 ret; + + ret = sxe2vf_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("init irq failed, ret=%d\n", ret); + return ret; + } + + ret = sxe2vf_queue_init(adapter); + if (ret) { + LOG_ERROR_BDF("queue init failed, ret=%d\n", ret); + goto l_irq_deinit; + } + + (void)sxe2vf_vsi_hw_decfg(adapter); + + ret = sxe2vf_eth_init(adapter); + if (ret) { + LOG_ERROR_BDF("eth init failed, ret=%d\n", ret); + goto l_queue_deinit; + } + + (void)schedule_work(&adapter->com_work); + + return ret; + +l_queue_deinit: + sxe2vf_queue_deinit(adapter); + +l_irq_deinit: + sxe2vf_irq_deinit(adapter); + + return ret; +} + +STATIC bool sxe2vf_reset_is_detected(struct sxe2vf_adapter *adapter) +{ + u32 i; + enum sxe2vf_dev_state state; + enum sxe2vf_reset_type reset_type; + + for (i = 0; i < SXE2VF_REMOVE_RESET_DETECT_COUNT; i++) { + sxe2vf_dev_state_get(adapter, &state, &reset_type); + + if ((state != SXE2VF_DEVSTATE_RUNNING && + state != SXE2VF_DEVSTATE_VFR_REQUEST && + state != SXE2VF_DEVSTATE_VFR_NOTIFY) || + sxe2vf_reset_detect(adapter) || + (sxe2vf_reg_read(&adapter->hw, SXE2VF_MBX_RQ_LEN) == + SXE2VF_REG_INVAL_VALUE && + pci_wait_for_pending_transaction(adapter->pdev))) { + LOG_INFO_BDF("detected reset_type:0x%x state:0x%x reg:0x%x\t" + "value:0x%x.\n", + reset_type, state, SXE2VF_MBX_RQ_LEN, + sxe2vf_reg_read(&adapter->hw, + SXE2VF_MBX_RQ_LEN)); + return true; + } + msleep(SXE2VF_ACTIVE_WAIT_INTERVAL); + } + + LOG_DEV_ERR("reset detect fail.reset_type:0x%x state:0x%x.\n", reset_type, + state); + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + + return false; +} + +static s32 sxe2vf_dev_cfg_clear(struct sxe2vf_adapter *adapter) +{ + if (sxe2vf_mbx_channel_init(adapter)) { + LOG_ERROR_BDF("mbx channel init failed.\n"); + goto l_error; + } + + (void)sxe2vf_reset_msg_send(adapter); + + sxe2vf_mbx_channel_deinit(adapter); + + adapter->work_ctxt.is_send = true; + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_VFR_REQUEST, + SXE2VF_RESET_NONE); + + return 0; + +l_error: + sxe2vf_mbx_channel_deinit(adapter); + return -EIO; +} + +static s32 sxe2vf_probe_post_prepare(struct sxe2vf_adapter *adapter) +{ + sxe2vf_event_irq_disable(adapter); + + if (sxe2vf_mbx_channel_init(adapter)) { + LOG_DEV_ERR("mbx channel init failed.\n"); + goto l_error; + } + + return 0; + +l_error: + sxe2vf_mbx_channel_deinit(adapter); + return -EIO; +} + +STATIC void sxe2vf_probe_post_build(struct sxe2vf_adapter *adapter) +{ + u32 new; + s32 ret = 0; + struct sxe2vf_work_context *work_ctxt = &adapter->work_ctxt; + + switch (work_ctxt->post_state) { + case SXE2VF_PROBE_POST_INIT_STARTED: + if (sxe2vf_probe_post_prepare(adapter)) + goto l_retry; + + new = SXE2VF_PROBE_POST_VER_MATCH; + break; + + case SXE2VF_PROBE_POST_VER_MATCH: + ret = sxe2vf_drv_ver_match(adapter); + if (ret) { + if (ret == -ETIMEDOUT) + goto l_retry; + new = SXE2VF_PROBE_POST_VER_CHK_FAIL; + } else { + new = SXE2VF_PROBE_POST_CAPS_INIT; + } + break; + + case SXE2VF_PROBE_POST_CAPS_INIT: + + ret = sxe2vf_drv_mode_get(adapter, SXE2VF_MSG_RESP_WAIT_POLLING); + if (ret) { + adapter->drv_mode = SXE2_COM_MODULE_MIXED; + LOG_ERROR_BDF("get drv mode failed, ret=%d\n", ret); + } + + if (sxe2vf_func_caps_init(adapter)) + goto l_retry; + + new = SXE2VF_PROBE_POST_IRQ_QUEUE_CFG; + break; + + case SXE2VF_PROBE_POST_IRQ_QUEUE_CFG: + if (sxe2vf_irq_queue_cfg(adapter)) + goto l_retry; + + sxe2vf_post_state_update(work_ctxt, SXE2VF_PROBE_POST_INIT_DONE); + + set_bit(SXE2VF_FLAG_DRV_PROBE_DONE, adapter->flags); + + LOG_INFO_BDF("probe post done.\n"); + + sxe2vf_debugfs_vf_init(adapter); + + return; + + case SXE2VF_PROBE_POST_VER_CHK_FAIL: + return; + default: + LOG_ERROR_BDF("invalid probe post state:0x%x.\n", + work_ctxt->post_state); + SXE2_BUG(); + goto l_retry; + } + + sxe2vf_post_state_update(work_ctxt, new); + return; + +l_retry: + return; +} + +static s32 sxe2vf_hw_caps_recfg(struct sxe2vf_adapter *adapter) +{ + s32 ret; + + ret = sxe2vf_func_caps_init(adapter); + if (ret) { + LOG_ERROR_BDF("hw caps get fail during reset.\n"); + return ret; + } + + sxe2vf_event_irq_enable(adapter); + + return ret; +} + +static void sxe2vf_eth_stop(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + s32 ret; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + sxe2vf_auxdrv_send_reset_event(adapter); + + (void)sxe2vf_ipsec_stop(adapter); + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2vf_vsi_disable(vsi); + if (ret) + LOG_INFO_BDF("vsi:%d disable failed.(err:%d)\n", vsi->vsi_id, ret); + mutex_unlock(&adapter->vsi_ctxt.lock); +} + +static s32 sxe2vf_rebuild_prepare(struct sxe2vf_adapter *adapter) +{ + s32 ret; + + sxe2vf_event_irq_disable(adapter); + + ret = sxe2vf_mbx_channel_init(adapter); + if (ret) { + LOG_ERROR_BDF("mbx channel init failed after reset.(err:%d)\n", ret); + goto l_end; + } + + ret = sxe2vf_hw_caps_recfg(adapter); + if (ret) { + LOG_ERROR_BDF("irq queue recfg failed.(err:%d)\n", ret); + sxe2vf_mbx_channel_deinit(adapter); + goto l_caps_get_fail; + } + + return ret; + +l_caps_get_fail: + sxe2vf_mbx_channel_deinit(adapter); + +l_end: + return ret; +} + +STATIC void __sxe2vf_vf_stop(struct sxe2vf_adapter *adapter) +{ + mutex_lock(&adapter->dev_ctxt.vf_lock); + + if (!test_and_set_bit(SXE2VF_VF_DISABLE, adapter->dev_ctxt.state)) { + sxe2_com_disable(&adapter->com_ctxt); + + sxe2vf_eth_stop(adapter); + + sxe2vf_mbx_channel_deinit(adapter); + + sxe2vf_event_irq_disable(adapter); + } + mutex_unlock(&adapter->dev_ctxt.vf_lock); +} + +STATIC void sxe2vf_vf_stop(struct sxe2vf_adapter *adapter) +{ + if (!sxe2vf_post_probe_is_start(adapter)) + return; + + if (sxe2vf_post_probe_is_done(adapter)) { + __sxe2vf_vf_stop(adapter); + adapter->work_ctxt.failed_cnt = 0; + } else { + sxe2vf_probe_post_deinit(adapter, adapter->work_ctxt.post_state); + sxe2vf_post_state_update(&adapter->work_ctxt, + SXE2VF_PROBE_POST_INIT_STARTED); + } +} + +static s32 sxe2vf_eth_rebuild(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return ret; + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2vf_vsi_rebuild(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + if (ret) { + LOG_ERROR_BDF("vsi:%u rebuild failed.(err:%d)\n", vsi->vsi_id, ret); + goto l_out; + } + + (void)sxe2vf_ipsec_rebuild(adapter); + + sxe2vf_auxdrv_init(adapter); + + rtnl_lock(); + ret = sxe2vf_vlan_cfg_rebuild(adapter); + if (ret) { + rtnl_unlock(); + goto l_out; + } + + set_bit(SXE2VF_FLAG_UPDATE_NETDEV_FEATURES, adapter->flags); + + sxe2vf_adv_cfg_restore(adapter); + + (void)netif_set_real_num_rx_queues(adapter->netdev, vsi->rxqs.q_cnt); + (void)netif_set_real_num_tx_queues(adapter->netdev, vsi->txqs.q_cnt); + rtnl_unlock(); + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_FLAG_DRV_UP, adapter->flags)) { + ret = __sxe2vf_vsi_open(vsi, false, true); + if (ret) { + mutex_unlock(&adapter->vsi_ctxt.lock); + LOG_ERROR_BDF("vsi:%d open failed.(err:%d)\n", vsi->vsi_id, + ret); + goto l_out; + } + } + clear_bit(SXE2VF_VSI_DISABLE, vsi->state); + mutex_unlock(&adapter->vsi_ctxt.lock); + +l_out: + return ret; +} + +STATIC s32 __sxe2vf_vf_rebuild(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + mutex_lock(&adapter->dev_ctxt.vf_lock); + + sxe2vf_auxdrv_deinit(adapter); + + ret = sxe2vf_rebuild_prepare(adapter); + if (ret) { + LOG_ERROR_BDF("vf enable failed during vfr.ret:%d\n", ret); + goto l_err; + } + + ret = sxe2vf_eth_rebuild(adapter); + if (ret) { + LOG_ERROR_BDF("vf enable failed during vfr.ret:%d\n", ret); + goto l_eth_rebuild_failed; + } + + sxe2_com_enable(&adapter->com_ctxt); + + clear_bit(SXE2VF_VF_DISABLE, adapter->dev_ctxt.state); + mutex_unlock(&adapter->dev_ctxt.vf_lock); + + LOG_INFO_BDF("vf rebuild done.\n"); + + return ret; + +l_eth_rebuild_failed: + sxe2vf_mbx_channel_deinit(adapter); + sxe2vf_event_irq_disable(adapter); + +l_err: + mutex_unlock(&adapter->dev_ctxt.vf_lock); + return ret; +} + +STATIC s32 sxe2vf_vf_rebuild(struct sxe2vf_adapter *adapter) +{ + if (sxe2vf_post_probe_is_done(adapter)) { + if (__sxe2vf_vf_rebuild(adapter)) + return -EIO; + } else { + adapter->work_ctxt.post_state = + adapter->work_ctxt.post_state + ?: SXE2VF_PROBE_POST_INIT_STARTED; + sxe2vf_probe_post_build(adapter); + } + + return 0; +} + +STATIC s32 sxe2vf_update_features(struct sxe2vf_adapter *adapter) +{ + if (test_bit(SXE2VF_FLAG_UPDATE_NETDEV_FEATURES, adapter->flags)) { + if (!rtnl_trylock()) + return -EBUSY; + + netdev_update_features(adapter->netdev); + rtnl_unlock(); + clear_bit(SXE2VF_FLAG_UPDATE_NETDEV_FEATURES, adapter->flags); + } + return 0; +} + +void sxe2vf_dev_state_get(struct sxe2vf_adapter *adapter, + enum sxe2vf_dev_state *state, + enum sxe2vf_reset_type *reset_type) +{ + struct sxe2vf_work_context *work_ctxt = &adapter->work_ctxt; + unsigned long flags; + + spin_lock_irqsave(&work_ctxt->state_lock, flags); + + if (state) + *state = work_ctxt->dev_state; + if (reset_type) + *reset_type = work_ctxt->reset_type; + + spin_unlock_irqrestore(&work_ctxt->state_lock, flags); +} + +void sxe2vf_dev_state_set(struct sxe2vf_adapter *adapter, + enum sxe2vf_dev_state new_state, + enum sxe2vf_reset_type new_reset_type) +{ + enum sxe2vf_dev_state cur_state; + enum sxe2vf_reset_type cur_reset_type; + unsigned long flags; + struct sxe2vf_work_context *work_ctxt = &adapter->work_ctxt; + + spin_lock_irqsave(&work_ctxt->state_lock, flags); + + cur_state = work_ctxt->dev_state; + cur_reset_type = work_ctxt->reset_type; + + if (cur_state == SXE2VF_DEVSTATE_FAULT) + goto l_unlock; + + switch (new_state) { + case SXE2VF_DEVSTATE_STOPPED: + if (cur_state == SXE2VF_DEVSTATE_RESETTING && + cur_reset_type == SXE2VF_RESET_CORER) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + + case SXE2VF_DEVSTATE_UNACTIVED: + if ((cur_state == SXE2VF_DEVSTATE_RESETTING && + new_reset_type == SXE2VF_RESET_VFR) || + cur_state == SXE2VF_DEVSTATE_INITIAL || + cur_state == SXE2VF_DEVSTATE_STOPPED) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + + case SXE2VF_DEVSTATE_ACTIVED: + if (cur_state == SXE2VF_DEVSTATE_UNACTIVED) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + + case SXE2VF_DEVSTATE_RUNNING: + if (cur_state == SXE2VF_DEVSTATE_ACTIVED) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + + case SXE2VF_DEVSTATE_VFR_REQUEST: + if (cur_state == SXE2VF_DEVSTATE_RUNNING || + cur_state == SXE2VF_DEVSTATE_ACTIVED) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + case SXE2VF_DEVSTATE_VFR_NOTIFY: + if (cur_state == SXE2VF_DEVSTATE_RUNNING) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = SXE2VF_RESET_NONE; + } + break; + + case SXE2VF_DEVSTATE_RESETTING: + if ((cur_state != SXE2VF_DEVSTATE_STOPPED && + new_reset_type == SXE2VF_RESET_VFR) || + new_reset_type == SXE2VF_RESET_CORER) { + work_ctxt->dev_state = new_state; + work_ctxt->reset_type = new_reset_type; + } + break; + + case SXE2VF_DEVSTATE_INITIAL: + case SXE2VF_DEVSTATE_FAULT: + work_ctxt->dev_state = new_state; + LOG_DEBUG_BDF("new state:0x%x\n", new_state); + break; + + default: + LOG_ERROR_BDF("Invalid device state %d\n", new_state); + break; + } + +l_unlock: + spin_unlock_irqrestore(&work_ctxt->state_lock, flags); + LOG_DEBUG_BDF("cur_state:%u cur_reset_type:%u,\t" + "new_state:%u new_reset_type:%u, final state:%u\t" + "reset_type:%u\n", + cur_state, cur_reset_type, new_state, new_reset_type, + work_ctxt->dev_state, work_ctxt->reset_type); +} + +static bool sxe2vf_hw_corer_check_lock(struct sxe2vf_hw *hw) +{ + struct sxe2vf_adapter *adapter = (struct sxe2vf_adapter *)hw->adapter; + bool ret; + + mutex_lock(&adapter->work_ctxt.reset_detect_lock); + ret = sxe2vf_hw_corer_check(hw); + mutex_unlock(&adapter->work_ctxt.reset_detect_lock); + + return ret; +} + +static bool sxe2vf_hw_vfr_is_checked_lock(struct sxe2vf_hw *hw) +{ + struct sxe2vf_adapter *adapter = hw->adapter; + bool ret = false; + + mutex_lock(&adapter->work_ctxt.reset_detect_lock); + ret = sxe2vf_hw_vfr_is_checked(hw); + mutex_unlock(&adapter->work_ctxt.reset_detect_lock); + + return ret; +} + +s32 sxe2vf_reset_detect(struct sxe2vf_adapter *adapter) +{ + s32 ret = false; + + if (sxe2vf_hw_corer_check_lock(&adapter->hw)) { + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_RESETTING, + SXE2VF_RESET_CORER); + ret = true; + } else if (sxe2vf_hw_vfr_is_checked_lock(&adapter->hw)) { + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_RESETTING, + SXE2VF_RESET_VFR); + ret = true; + } + return ret; +} + +STATIC s32 sxe2vf_pci_drop_stop(struct sxe2vf_adapter *adapter) +{ + u32 cnt; + struct sxe2vf_hw *hw = &adapter->hw; + + sxe2vf_hw_corer_stop_drop(hw); + + for (cnt = 0; cnt < SXE2VF_STOP_DROP_TIMEOUT; cnt++) { + msleep(SXE2VF_STOP_DROP_DONE_INTERVAL); + if (sxe2vf_hw_corer_stop_drop_done(hw)) + break; + } + + if (cnt == SXE2VF_STOP_DROP_TIMEOUT) { + LOG_ERROR_BDF("stop PCIe drop timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static s32 sxe2vf_stopped_state_proc(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + sxe2vf_vf_stop(adapter); + + ret = sxe2vf_pci_drop_stop(adapter); + if (!ret) + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_UNACTIVED, + SXE2VF_RESET_NONE); + + return ret; +} + +static void sxe2vf_common_err_handle(struct sxe2vf_adapter *adapter, + u32 fail_max_cnt, enum sxe2vf_dev_state state) +{ + struct sxe2vf_work_context *work_ctxt = &adapter->work_ctxt; + + work_ctxt->failed_cnt++; + + LOG_WARN_BDF("post state:%u dev_state:%u reset_type:%u \t" + "fail cnt:%u max:%u\n", + work_ctxt->post_state, work_ctxt->dev_state, + work_ctxt->reset_type, work_ctxt->failed_cnt, fail_max_cnt); + + if (work_ctxt->failed_cnt > fail_max_cnt || + work_ctxt->post_state == SXE2VF_PROBE_POST_VER_CHK_FAIL) + sxe2vf_dev_state_set(adapter, state, SXE2VF_RESET_NONE); +} + +static s32 sxe2vf_unactive_state_proc(struct sxe2vf_adapter *adapter) +{ + u32 i; + + for (i = 0; i < SXE2VF_RESET_ACTIVE_WAIT_COUNT; i++) { + if (sxe2vf_hw_vf_is_active(&adapter->hw)) { + adapter->work_ctxt.failed_cnt = 0; + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_ACTIVED, + SXE2VF_RESET_NONE); + return 0; + } + msleep(SXE2VF_ACTIVE_WAIT_INTERVAL); + } + + LOG_ERROR_BDF("wait vf dev active timeout failed_cnt:%d.\n", + adapter->work_ctxt.failed_cnt); + return -ETIMEDOUT; +} + +static s32 sxe2vf_active_state_proc(struct sxe2vf_adapter *adapter) +{ + pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); + + if (!adapter->work_ctxt.is_clear) { + if (!sxe2vf_dev_cfg_clear(adapter)) { + adapter->work_ctxt.is_clear = true; + LOG_INFO_BDF("vf hw cfg clear msg send done\n"); + } + return 0; + } + + if (sxe2vf_vf_rebuild(adapter)) + return -EIO; + + adapter->work_ctxt.failed_cnt = 0; + if (sxe2vf_post_probe_is_done(adapter)) + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_RUNNING, + SXE2VF_RESET_NONE); + + return 0; +} + +STATIC void sxe2vf_mtu_changed_handler(struct sxe2vf_adapter *adapter) +{ + if (test_and_clear_bit(SXE2VF_FLAG_MTU_CHANGED, adapter->flags)) + (void)sxe2vf_rdma_aux_send_mtu_changed_event(adapter); +} + +STATIC s32 sxe2vf_running_state_proc(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return ret; + + ret = sxe2vf_l2_filter_cfg_sync(adapter); + if (ret) + goto l_out; + + ret = sxe2vf_update_features(adapter); + if (ret) + goto l_out; + +#ifdef SXE2_CFG_DEBUG + if (g_vf_switch_stats) + ret = sxe2vf_stats_push_sync(adapter); +#else + ret = sxe2vf_stats_push_sync(adapter); +#endif + + sxe2vf_mtu_changed_handler(adapter); + +l_out: + return ret; +} + +static s32 sxe2vf_vfr_req_state_proc(struct sxe2vf_adapter *adapter) +{ + if (!adapter->work_ctxt.is_send && sxe2vf_reset_msg_send(adapter)) + return -EIO; + + adapter->work_ctxt.is_send = true; + + return 0; +} + +static s32 sxe2vf_corer_done(struct sxe2vf_adapter *adapter) +{ + u32 i; + + for (i = 0; i < SXE2VF_CORER_WAIT_DONE_COUNT; i++) { + if (sxe2vf_hw_corer_done(&adapter->hw)) + break; + msleep(SXE2VF_CORER_DONE_WAIT_INTERVAL); + } + + if (i == SXE2VF_RESET_DETEC_WAIT_COUNT) { + LOG_ERROR_BDF("wait core reset done failed.\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static s32 sxe2vf_reseting_state_proc(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + adapter->work_ctxt.is_send = false; + adapter->work_ctxt.failed_cnt = 0; + adapter->work_ctxt.is_clear = true; + + if (adapter->work_ctxt.reset_type == SXE2VF_RESET_CORER) { + sxe2vf_waitq_entry_cancel(adapter); + + ret = sxe2vf_corer_done(adapter); + if (!ret) { + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_STOPPED, + SXE2VF_RESET_CORER); + sxe2vf_auxdrv_send_reset_event(adapter); + } + } else if (adapter->work_ctxt.reset_type == SXE2VF_RESET_VFR) { + sxe2vf_waitq_entry_cancel(adapter); + + sxe2vf_vf_stop(adapter); + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_UNACTIVED, + SXE2VF_RESET_VFR); + } else { + ret = -EINVAL; + LOG_ERROR_BDF("dev state:%u invalid reset type:%u.\n", + adapter->work_ctxt.dev_state, + adapter->work_ctxt.reset_type); + } + + return ret; +} + +static s32 sxe2vf_fault_state_proc(struct sxe2vf_adapter *adapter) +{ + sxe2vf_vf_stop(adapter); + set_bit(SXE2VF_MONITOR_WORK_DISABLED, &adapter->work_ctxt.state); + + return 0; +} + +static s32 sxe2vf_reset_notify_state_proc(struct sxe2vf_adapter *adapter) +{ + sxe2vf_auxdrv_send_reset_event(adapter); + + return 0; +} + +STATIC s32 sxe2vf_dev_state_proc(struct sxe2vf_adapter *adapter, u32 *delay) +{ + s32 ret = 0; + enum sxe2vf_dev_state state; + enum sxe2vf_reset_type reset_type; + + *delay = SXE2VF_WOKER_DELAY_10MS; + + (void)sxe2vf_reset_detect(adapter); + + sxe2vf_dev_state_get(adapter, &state, &reset_type); + + switch (state) { + case SXE2VF_DEVSTATE_INITIAL: + case SXE2VF_DEVSTATE_STOPPED: + ret = sxe2vf_stopped_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_UNACTIVED: + ret = sxe2vf_unactive_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_ACTIVED: + ret = sxe2vf_active_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_RUNNING: + if (!sxe2vf_running_state_proc(adapter)) + *delay = SXE2VF_WOKER_DELAY_2S; + break; + + case SXE2VF_DEVSTATE_VFR_REQUEST: + ret = sxe2vf_vfr_req_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_RESETTING: + ret = sxe2vf_reseting_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_FAULT: + ret = sxe2vf_fault_state_proc(adapter); + break; + + case SXE2VF_DEVSTATE_VFR_NOTIFY: + (void)sxe2vf_reset_notify_state_proc(adapter); + *delay = SXE2VF_WOKER_DELAY_5MS; + LOG_WARN_BDF("rcv vf reset notify.\n"); + break; + + default: + LOG_ERROR_BDF("Invalid device state %d\n", state); + break; + } + + if (ret) + sxe2vf_common_err_handle(adapter, SXE2VF_DEVSTATE_PROC_FAIL_CNT, + SXE2VF_DEVSTATE_FAULT); + + return ret; +} + +STATIC void sxe2vf_monitor_wk_cb(struct work_struct *work) +{ + struct sxe2vf_work_context *work_ctxt = + container_of(to_delayed_work(work), + struct sxe2vf_work_context, monitor_wk); + struct sxe2vf_adapter *adapter = + container_of(work_ctxt, struct sxe2vf_adapter, work_ctxt); + u32 delay = SXE2VF_WOKER_DELAY_2S; + + LOG_DEBUG_BDF("monitor work scheduled by %s.\n", current->comm); + + if (test_bit(SXE2VF_MONITOR_WORK_DISABLED, &work_ctxt->state)) { + LOG_INFO_BDF("monitor work disabled no need rescheduled.\n"); + return; + } + + if (!mutex_trylock(&work_ctxt->monitor_lock)) { + LOG_INFO_BDF("state lock occupied, probe post work delay\t" + "scheduled.\n"); + goto l_reschedule; + } + + (void)sxe2vf_dev_state_proc(adapter, &delay); + + mutex_unlock(&work_ctxt->monitor_lock); + +l_reschedule: + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR, delay); +} + +STATIC void sxe2vf_mbx_wk_cb(struct work_struct *work) +{ + struct sxe2vf_work_context *wk = + container_of(work, struct sxe2vf_work_context, mbx_wk); + struct sxe2vf_adapter *adapter = + container_of(wk, struct sxe2vf_adapter, work_ctxt); + + (void)wk; + + (void)sxe2vf_mbx_msg_rcv(adapter); + + sxe2vf_hw_event_irq_enable(&adapter->hw); +} + +STATIC int sxe2vf_wait_config_space_accessible(struct sxe2vf_adapter *adapter) +{ + u32 delay = 1; + u32 val; + u32 timeout = SXE2VF_WAIT_CONFIG_ACCESSIBLE_TIMEOUT_MS; + + (void)pci_read_config_dword(adapter->pdev, SXE2VF_PCIE_SYS_READY, &val); + while (val == SXE2VF_REG_INVAL_VALUE) { + if (delay > timeout) { + LOG_DEV_ERR("configuration space inaccessible. please check\t" + "the device.\n"); + return -ENOTTY; + } + + msleep(delay); + delay *= 2; + (void)pci_read_config_dword(adapter->pdev, PCI_COMMAND, &val); + } + + return 0; +} + +STATIC void sxe2vf_health_wk_cb(struct work_struct *health_wk) +{ + struct sxe2vf_work_context *work_ctxt = + container_of(to_delayed_work(health_wk), + struct sxe2vf_work_context, health_wk); + struct sxe2vf_adapter *adapter = + container_of(work_ctxt, struct sxe2vf_adapter, work_ctxt); + enum sxe2vf_dev_state state; + + if (test_bit(SXE2VF_HEALTH_WORK_DISABLED, &work_ctxt->state)) { + LOG_INFO_BDF("health work disabled no need rescheduled.\n"); + return; + } + + if (sxe2vf_wait_config_space_accessible(adapter)) { + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_FAULT, + SXE2VF_RESET_NONE); + sxe2vf_waitq_entry_cancel(adapter); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + return; + } + + if (sxe2vf_reset_detect(adapter)) { + sxe2vf_waitq_entry_cancel(adapter); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + } + + sxe2vf_dev_state_get(adapter, &state, SXE2VF_RESET_NONE); + if (state != SXE2VF_DEVSTATE_FAULT) + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_HEALTH, + SXE2VF_WOKER_DELAY_2S); +} + +STATIC void sxe2vf_mbx_waitq_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mbx_waitq *waitq = &adapter->channel_ctxt.waitq; + + spin_lock_init(&waitq->lock); + init_waitqueue_head(&waitq->wq); + hash_init(waitq->table); +} + +static s32 sxe2vf_health_init(struct sxe2vf_adapter *adapter) +{ + char *name; + s32 ret = 0; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + name = kmalloc(SXE2VF_WORKQUEUE_NAME_LEN, GFP_KERNEL); + if (!name) { + LOG_DEV_ERR("sxe2vf health workqueue name alloc failed.\n"); + goto l_out; + } + + snprintf(name, SXE2VF_WORKQUEUE_NAME_LEN, "sxe2vf_health%s", dev_name(dev)); + + adapter->work_ctxt.health_wq = create_singlethread_workqueue(name); + kfree(name); + if (!adapter->work_ctxt.health_wq) { + LOG_PR_ERR("failed to create health workqueue\n"); + ret = -EIO; + goto l_out; + } + + INIT_DELAYED_WORK(&adapter->work_ctxt.health_wk, sxe2vf_health_wk_cb); + +l_out: + return ret; +} + +static void sxe2vf_health_deinit(struct sxe2vf_adapter *adapter) +{ + destroy_workqueue(adapter->work_ctxt.health_wq); +} + +STATIC void sxe2vf_com_wk_cb(struct work_struct *work) +{ + s32 ret; + struct sxe2vf_adapter *adapter = + container_of(work, struct sxe2vf_adapter, com_work); + + ret = sxe2_com_init(&adapter->com_ctxt, adapter, &g_com_ops); + if (ret) + LOG_DEV_ERR("sxe2_com_init failed: %d.\n", ret); +} + +STATIC s32 sxe2vf_sw_base_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + mutex_init(&adapter->link_ctxt.link_lock); + + mutex_init(&adapter->vsi_ctxt.lock); + mutex_init(&adapter->dev_ctxt.vf_lock); + + mutex_init(&adapter->channel_ctxt.rxq.lock); + mutex_init(&adapter->channel_ctxt.txq.lock); + mutex_init(&adapter->channel_ctxt.list.lock); + + mutex_init(&adapter->switch_ctxt.mac_addr_lock); + mutex_init(&adapter->switch_ctxt.flag_lock); + mutex_init(&adapter->switch_ctxt.filter_ctxt.vlan_info.vlan_lock); + mutex_init(&adapter->switch_ctxt.user_fltr_ctxt.vlan_info.vlan_lock); + + mutex_init(&adapter->work_ctxt.reset_detect_lock); + + INIT_LIST_HEAD(&adapter->switch_ctxt.filter_ctxt.mac_filter.mac_addr_list); + INIT_LIST_HEAD(&adapter->switch_ctxt.filter_ctxt.vlan_info.vlan_list); + INIT_LIST_HEAD(&adapter->switch_ctxt.user_fltr_ctxt.mac_filter + .mac_addr_list); + INIT_LIST_HEAD(&adapter->switch_ctxt.user_fltr_ctxt.vlan_info.vlan_list); + INIT_LIST_HEAD(&adapter->channel_ctxt.list.head); + + init_waitqueue_head(&adapter->msg_ctxt.reply_waitqueue); + + mutex_init(&adapter->rss_ctxt.rss_cfgs_lock); + INIT_LIST_HEAD(&adapter->rss_ctxt.rss_cfgs); + + INIT_DELAYED_WORK(&adapter->aux_ctxt.init_task, sxe2vf_aux_init_task); + adapter->aux_ctxt.vfadapter = adapter; + mutex_init(&adapter->aux_ctxt.adev_mutex); + + mutex_init(&adapter->work_ctxt.monitor_lock); + + INIT_DELAYED_WORK(&adapter->work_ctxt.monitor_wk, sxe2vf_monitor_wk_cb); + INIT_WORK(&adapter->work_ctxt.mbx_wk, sxe2vf_mbx_wk_cb); + INIT_WORK(&adapter->work_ctxt.msg_handle_wk, sxe2vf_notify_msg_wk_cb); + + INIT_WORK(&adapter->com_work, sxe2vf_com_wk_cb); + ATOMIC_INIT_NOTIFIER_HEAD(&adapter->com_ctxt.irqs.irq_nh); + + sxe2vf_mbx_waitq_init(adapter); + + spin_lock_init(&adapter->work_ctxt.state_lock); + + sxe2vf_trace_id_init(); + + sxe2vf_cmd_session_id_init(); + + ret = sxe2vf_health_init(adapter); + + return ret; +} + +STATIC void sxe2vf_sw_base_deinit(struct sxe2vf_adapter *adapter) +{ + sxe2vf_health_deinit(adapter); + + mutex_destroy(&adapter->aux_ctxt.adev_mutex); + + sxe2vf_filter_list_destroy(adapter); + + mutex_destroy(&adapter->vsi_ctxt.lock); + + mutex_destroy(&adapter->work_ctxt.monitor_lock); + mutex_destroy(&adapter->dev_ctxt.vf_lock); + + mutex_destroy(&adapter->channel_ctxt.rxq.lock); + mutex_destroy(&adapter->channel_ctxt.txq.lock); + mutex_destroy(&adapter->channel_ctxt.list.lock); + + mutex_destroy(&adapter->link_ctxt.link_lock); + mutex_destroy(&adapter->switch_ctxt.filter_ctxt.vlan_info.vlan_lock); + mutex_destroy(&adapter->switch_ctxt.user_fltr_ctxt.vlan_info.vlan_lock); + mutex_destroy(&adapter->switch_ctxt.flag_lock); + mutex_destroy(&adapter->switch_ctxt.mac_addr_lock); + + mutex_destroy(&adapter->rss_ctxt.rss_cfgs_lock); + + mutex_destroy(&adapter->work_ctxt.reset_detect_lock); +} + +STATIC int sxe2vf_probe(struct pci_dev *pdev, + const struct pci_device_id __always_unused *ent) +{ + int ret; + struct sxe2vf_adapter *adapter; + + adapter = sxe2vf_adapter_create(pdev); + if (!adapter) { + LOG_ERROR("can't probe virtual\n"); + return -ENOMEM; + } + + sxe2vf_msg_level_init(adapter); + + ret = sxe2vf_pci_init(adapter); + if (ret) { + LOG_DEV_ERR("pci init failed, ret=%d\n", ret); + goto l_pci_init_fail; + } + + sxe2vf_hw_base_init(adapter); + + ret = sxe2vf_sw_base_init(adapter); + if (ret) { + LOG_DEV_ERR("sw base init failed. ret:%d.\n", ret); + goto l_pci_init_fail; + } + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_INITIAL, SXE2VF_RESET_NONE); + + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR, + SXE2VF_WOKER_DELAY_5MS * (adapter->pdev->devfn & + SXE2VF_DEV_FUNC_MASK)); + + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_HEALTH, SXE2VF_WOKER_DELAY_5MS); + + return ret; + +l_pci_init_fail: + free_netdev(adapter->netdev); + + return ret; +} + +STATIC void sxe2vf_pci_deinit(struct sxe2vf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + + iounmap(adapter->hw.reg_base_addr); + +#ifdef HAVE_PCI_ENABLE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(adapter->pdev); +#endif + + free_netdev(netdev); + + pci_set_drvdata(pdev, NULL); + + pci_release_regions(pdev); + + pci_disable_device(pdev); +} + +STATIC void sxe2vf_remove(struct pci_dev *pdev) +{ + struct sxe2vf_adapter *adapter = SXE2VF_DEV_TO_ADAPTER(pdev); + + if (!adapter) { + LOG_WARN("adapter NULL, skip vf remove oper.\n"); + return; + } + + LOG_INFO_BDF("vf driver remove start.\n"); + + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MONITOR); + + adapter->dev_ctxt.remove = true; + sxe2vf_auxdrv_deinit(adapter); + + (void)sxe2vf_reset_msg_send(adapter); + + if (sxe2vf_reset_is_detected(adapter)) { + set_bit(SXE2VF_FLAG_DRV_REMOVING, adapter->flags); + sxe2vf_waitq_entry_cancel(adapter); + } + + sxe2vf_probe_post_deinit(adapter, adapter->work_ctxt.post_state); + + sxe2vf_wkq_cancel_all(adapter); + + sxe2vf_sw_base_deinit(adapter); + + LOG_INFO_BDF("vf remove done.\n"); + + sxe2vf_pci_deinit(adapter); +} + +STATIC bool sxe2vf_wait_reset_done(struct sxe2vf_adapter *adapter) +{ + u32 i; + + for (i = 0; i < SXE2VF_RESET_COMPLETE_WAIT_COUNT; i++) { + if (sxe2vf_hw_vfr_is_complete(&adapter->hw)) { + sxe2vf_hw_vfr_clear(&adapter->hw); + LOG_INFO_BDF("wait vf reset done success\n"); + return true; + } + msleep(SXE2VF_ACTIVE_WAIT_INTERVAL); + } + LOG_WARN_BDF("wait vf reset done timeout\n"); + return false; +} + +STATIC pci_ers_result_t sxe2vf_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = (struct net_device *)pdev->dev.driver_data; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; + + LOG_DEV_WARN("pci err:%u detected begin.\n", state); + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable pci err:0x%x\n", + __func__, state); + goto l_out; + } + + if (!test_bit(SXE2VF_FLAG_SUSPEND, adapter->flags)) { + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MONITOR); + sxe2vf_vf_stop(adapter); + } + + pci_disable_device(pdev); + + ret = state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT + : PCI_ERS_RESULT_NEED_RESET; + +l_out: + LOG_DEV_WARN("pci err:%u detected end ret:%d.\n", state, ret); + + return ret; +} + +STATIC pci_ers_result_t sxe2vf_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = (struct net_device *)pdev->dev.driver_data; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + pci_ers_result_t pci_ret = PCI_ERS_RESULT_DISCONNECT; + s32 ret; + bool reset_done = false; + + LOG_DEV_WARN("pci err slot reset begin.\n"); + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable\n", __func__); + goto l_out; + } + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_DEV_ERR("Cannot re-enable PCI device after reset, error %d\n", + ret); + goto l_out; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + (void)pci_save_state(pdev); + (void)pci_wake_from_d3(pdev, false); + + reset_done = sxe2vf_wait_reset_done(adapter); + if (reset_done) + pci_ret = PCI_ERS_RESULT_RECOVERED; + +l_out: + LOG_DEV_WARN("pci err slot reset end(%d) ret:%d.\n", reset_done, pci_ret); + return pci_ret; +} + +STATIC void sxe2vf_pci_err_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = (struct net_device *)pdev->dev.driver_data; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + LOG_DEV_WARN("pci err resume begin.\n"); + + if (!adapter) { + LOG_DEV_ERR("%s failed, device is unrecoverable\n", __func__); + goto l_out; + } + + if (test_bit(SXE2VF_FLAG_SUSPEND, adapter->flags)) { + LOG_DEV_ERR("%s failed to resume normal operations!\n", __func__); + goto l_out; + } + + pci_restore_msi_state(pdev); + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_INITIAL, SXE2VF_RESET_NONE); + clear_bit(SXE2VF_MONITOR_WORK_DISABLED, &adapter->work_ctxt.state); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); +l_out: + LOG_DEV_WARN("pci err resume end.\n"); +} + +#ifdef CONFIG_PM +static int __maybe_unused sxe2vf_pm_suspend(struct device *dev) +{ + s32 ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = (struct net_device *)pdev->dev.driver_data; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("vf pm suspend was called\n"); + + if (test_and_set_bit(SXE2VF_FLAG_SUSPEND, adapter->flags)) + goto l_end; + + sxe2vf_wkq_cancel(adapter, SXE2VF_WK_MONITOR); + + sxe2vf_vf_stop(adapter); + + sxe2vf_irq_deinit(adapter); + +l_end: + LOG_DEBUG_BDF("vf pm suspend end msix_enabled:%u current_state:%d ret=%d\n", + adapter->pdev->msix_enabled, adapter->pdev->current_state, + ret); + return ret; +} + +static int __maybe_unused sxe2vf_pm_resume(struct device *dev) +{ + s32 ret = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = (struct net_device *)pdev->dev.driver_data; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("vf pm resume was called\n"); + + if (!test_bit(SXE2VF_FLAG_SUSPEND, adapter->flags)) + goto l_end; + + pci_set_master(pdev); + + ret = pci_set_power_state(pdev, PCI_D0); + if (ret) { + LOG_ERROR_BDF("pci_set_power_state with error code:%d\n", ret); + goto l_end; + } + + pci_restore_state(pdev); + ret = pci_save_state(pdev); + if (ret) { + LOG_ERROR_BDF("pci_save_state with error code:%d.\n", ret); + goto l_end; + } + + LOG_DEBUG_BDF("pm resume msix_enabled:%u current_state:%d.\n", + adapter->pdev->msix_enabled, adapter->pdev->current_state); + + if (test_bit(SXE2VF_FLAG_DRV_PROBE_DONE, adapter->flags)) { + ret = sxe2vf_msix_init(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2vf_msix_init resume failed %d.\n", ret); + goto l_end; + } + ret = sxe2vf_event_irq_request(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2vf_irq_init resume failed %d.\n", ret); + sxe2vf_msix_deinit(adapter); + goto l_end; + } + } + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_INITIAL, SXE2VF_RESET_NONE); + clear_bit(SXE2VF_MONITOR_WORK_DISABLED, &adapter->work_ctxt.state); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + + clear_bit(SXE2VF_FLAG_SUSPEND, adapter->flags); + +l_end: + LOG_DEBUG_BDF("vf pm resume end, ret=%d\n", ret); + return ret; +} +#endif + +static int __maybe_unused sxe2vf_pm_resume(struct device *dev); +static int __maybe_unused sxe2vf_pm_suspend(struct device *dev); + +static __maybe_unused SIMPLE_DEV_PM_OPS(sxe2vf_pm_ops, sxe2vf_pm_suspend, + sxe2vf_pm_resume); + +STATIC const struct pci_error_handlers sxe2vf_pci_err_handler = { + .error_detected = sxe2vf_pci_err_detected, + .slot_reset = sxe2vf_pci_err_slot_reset, + .resume = sxe2vf_pci_err_resume, +}; + +static struct pci_driver sxe2vf_pci_driver = { + .name = SXE2VF_DRV_NAME, + .id_table = sxe2vf_pci_tbl, + .probe = sxe2vf_probe, + .remove = sxe2vf_remove, +#ifdef CONFIG_PM + .driver.pm = &sxe2vf_pm_ops, +#endif + .err_handler = &sxe2vf_pci_err_handler, +}; + +STATIC s32 sxe2vf_wq_create(void) +{ + s32 ret = 0; + + sxe2vf_wq = alloc_workqueue("%s-MONITOR", WQ_UNBOUND, 0, SXE2VF_DRV_NAME); + if (!sxe2vf_wq) { + LOG_PR_ERR("failed to create %s driver workqueue\n", + SXE2VF_DRV_NAME); + ret = -ENOMEM; + } + + sxe2vf_mbx_wq = alloc_workqueue("%s-MBX", WQ_UNBOUND, 0, SXE2VF_DRV_NAME); + if (!sxe2vf_mbx_wq) { + LOG_PR_ERR("failed to create %s driver workqueue\n", + SXE2VF_DRV_NAME); + goto l_monitor_wq_destroy; + } + + sxe2vf_msg_handle_wq = + alloc_workqueue("%s-MSG-HANDLE", 0, 0, SXE2VF_DRV_NAME); + if (!sxe2vf_msg_handle_wq) { + LOG_PR_ERR("failed to create msg handle workqueue\n"); + goto l_mbx_wq_destroy; + } + + return ret; + +l_mbx_wq_destroy: + destroy_workqueue(sxe2vf_mbx_wq); + sxe2vf_mbx_wq = NULL; + +l_monitor_wq_destroy: + destroy_workqueue(sxe2vf_wq); + sxe2vf_wq = NULL; + + return ret; +} + +STATIC void sxe2vf_wq_destroy(void) +{ + destroy_workqueue(sxe2vf_wq); + sxe2vf_wq = NULL; + + destroy_workqueue(sxe2vf_mbx_wq); + sxe2vf_mbx_wq = NULL; + + destroy_workqueue(sxe2vf_msg_handle_wq); + sxe2vf_msg_handle_wq = NULL; +} + +STATIC int __init sxe2vf_init(void) +{ + int ret; + + LOG_PR_INFO("%s init start, version[%s], commit_id[%s], branch[%s],\t" + "build_time[%s]\n", + SXE2VF_DRV_DESCRIPTION, SXE2_VERSION, SXE2_COMMIT_ID, + SXE2_BRANCH, SXE2_BUILD_TIME); + +#ifndef SXE2_CFG_RELEASE + ret = sxe2_log_init(true); + if (ret < 0) { + LOG_PR_ERR("sxe2 log init fail.(err:%d)\n", ret); + return ret; + } +#endif + + ret = sxe2vf_wq_create(); + if (ret) + goto l_log_exit; + + ret = sxe2_com_adapter_register(SXE2_VF); + if (ret) { + LOG_ERROR("register dpdk char dev failed\n"); + goto l_wq_destroy; + } + + sxe2vf_debugfs_init(); + + ret = pci_register_driver(&sxe2vf_pci_driver); + if (ret) { + LOG_PR_ERR("register pci driver:%s failed\n", SXE2VF_DRV_NAME); + goto l_com_unregister; + } + + return 0; + +l_com_unregister: + sxe2vf_debugfs_exit(); + sxe2_com_adapter_unregister(); +l_wq_destroy: + sxe2vf_wq_destroy(); +l_log_exit: +#ifndef SXE2_CFG_RELEASE + sxe2_log_exit(); +#endif + return ret; +} + +STATIC void __exit sxe2vf_exit(void) +{ + pci_unregister_driver(&sxe2vf_pci_driver); + + sxe2vf_debugfs_exit(); + + sxe2_com_adapter_unregister(); + + sxe2vf_wq_destroy(); + +#ifndef SXE2_CFG_RELEASE + sxe2_log_exit(); +#endif +} + +MODULE_DEVICE_TABLE(pci, sxe2vf_pci_tbl); +MODULE_INFO(build_time, SXE2_BUILD_TIME); +MODULE_INFO(branch, SXE2_BRANCH); +MODULE_INFO(arch, SXE2_DRV_ARCH); +MODULE_INFO(commit_id, SXE2_COMMIT_ID); +MODULE_DESCRIPTION(SXE2VF_DRV_DESCRIPTION); +MODULE_AUTHOR(SXE2_DRV_AUTHOR); +MODULE_VERSION(SXE2_VERSION); +MODULE_LICENSE(SXE2_DRV_LICENSE); + +module_init(sxe2vf_init); +module_exit(sxe2vf_exit); diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.c new file mode 100644 index 0000000000000000000000000000000000000000..af269b6a61fba021b17ec810d217e8d24f96b5e4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.c @@ -0,0 +1,1287 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_mbx_channel.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "linux/random.h" +#include "sxe2vf.h" +#include "sxe2_cmd.h" +#include "sxe2vf_mbx_channel.h" +#include "sxe2_log.h" +#include "sxe2_mbx_public.h" +#include "sxe2vf_mbx_msg.h" + +#define SXE2VF_MSG_HANDLING_MAX_CNT (1024) + +STATIC atomic64_t g_msg_session_id; + +void sxe2vf_cmd_session_id_init(void) +{ + u64 session_id; + + get_random_bytes(&session_id, sizeof(session_id)); + + atomic64_set(&g_msg_session_id, session_id); +} + +STATIC inline u64 sxe2vf_msg_session_id_alloc(void) +{ + return (u64)atomic64_add_return(SXE2_MSG_SESSION_ADD_ONE, &g_msg_session_id); +} + +STATIC s32 sxe2vf_mbx_desc_alloc(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_queue *queue) +{ + struct sxe2vf_mbx_ring *desc = &queue->desc; + size_t size = queue->depth * sizeof(struct sxe2vf_mbx_desc); + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + desc->va = dma_alloc_coherent(dev, size, &desc->pa, GFP_KERNEL); + if (!desc->va) { + LOG_DEV_ERR("alloc mbx desc dma mem failed, size %zu.\n", size); + return -ENOMEM; + } + + desc->size = size; + + DATA_DUMP(desc->va, (u32)size, "desc alloc"); + + return 0; +} + +STATIC void sxe2vf_mbx_desc_free(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_queue *queue) +{ + struct sxe2vf_mbx_ring *desc = &queue->desc; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + if (!desc->va) + return; + dma_free_coherent(dev, desc->size, desc->va, desc->pa); + desc->va = NULL; +} + +STATIC s32 sxe2vf_mbx_bufs_alloc(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_queue *queue) +{ + struct sxe2vf_mbx_ring *buf; + size_t size = queue->depth * sizeof(struct sxe2vf_mbx_ring); + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + u16 i; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) { + LOG_DEV_ERR("alloc mbx buf va memory failed, cnt %d, size %zu.\n", + queue->depth, sizeof(*buf)); + return -ENOMEM; + } + + buf->size = size; + for (i = 0; i < queue->depth; i++) { + buf[i].size = queue->buf_size; + buf[i].va = dma_alloc_coherent(dev, buf[i].size, &buf[i].pa, + GFP_KERNEL); + if (!buf[i].va) { + LOG_DEV_ERR("buf[%d] alloc dma mem failed, size %zu.\n", i, + buf[i].size); + ret = -ENOMEM; + goto l_alloc_failed; + } + } + + queue->buf = buf; + return 0; + +l_alloc_failed: + while (i) { + i--; + dma_free_coherent(dev, buf[i].size, buf[i].va, buf[i].pa); + buf[i].va = NULL; + } + kfree(buf); + return ret; +} + +STATIC void sxe2vf_mbx_bufs_free(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_queue *queue) +{ + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_mbx_ring *buf; + u16 i; + + if (!queue->buf) + return; + + for (i = 0; i < queue->depth; i++) { + buf = &queue->buf[i]; + dma_free_coherent(dev, buf->size, buf->va, buf->pa); + buf->va = NULL; + } + kfree(queue->buf); + queue->buf = NULL; +} + +STATIC s32 sxe2vf_mbx_txq_init(struct sxe2vf_adapter *adapter) +{ + s32 ret; + struct sxe2vf_mbx_queue *q = &adapter->channel_ctxt.txq; + + mutex_lock(&q->lock); + + q->depth = SXE2VF_MBX_Q_DESC_CNT; + q->buf_size = SXE2VF_MBX_BUF_SIZE; + + q->ntc = 0; + q->ntu = 0; + + ret = sxe2vf_mbx_desc_alloc(adapter, q); + if (ret) { + LOG_ERROR_BDF("mbx txq desc alloc fail.\n"); + goto l_desc_free; + } + + ret = sxe2vf_mbx_bufs_alloc(adapter, q); + if (ret) { + LOG_ERROR_BDF("mbx txq buf alloc fail.\n"); + goto l_desc_free; + } + + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + ret = sxe2vf_hw_mbx_txq_enable(&adapter->hw, q->depth, q->desc.pa); + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + if (ret) { + LOG_ERROR_BDF("mbx txq enable failed.\n"); + goto l_bufs_free; + } + mutex_unlock(&q->lock); + + LOG_INFO_BDF("mbx txq depth:%u buf size:%u raw data size:%lu\t" + "out_hdr_len:%lu in_hdr_len:%lu \t" + "desc va:%pK pa:0x%llx.\n", + q->depth, q->buf_size, SXE2VF_MBX_RAW_MSG_MAX_SIZE, + SXE2VF_CMD_HDR_SIZE, SXE2VF_MBX_MSG_HDR_SIZE, q->desc.va, + q->desc.pa); + + return ret; + +l_bufs_free: + sxe2vf_mbx_bufs_free(adapter, q); + +l_desc_free: + sxe2vf_mbx_desc_free(adapter, q); + q->depth = 0; + q->buf_size = 0; + mutex_unlock(&q->lock); + return ret; +} + +static void sxe2vf_mbx_txq_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mbx_queue *q = &adapter->channel_ctxt.txq; + + mutex_lock(&q->lock); + sxe2vf_hw_mbx_txq_disable(&adapter->hw); + + sxe2vf_mbx_desc_free(adapter, q); + sxe2vf_mbx_bufs_free(adapter, q); + q->depth = 0; + + mutex_unlock(&q->lock); +} + +STATIC void sxe2vf_mbx_rxq_desc_fill(struct sxe2vf_mbx_queue *queue, u16 idx) +{ + struct sxe2vf_mbx_desc *desc; + struct sxe2vf_mbx_ring *buf; + + desc = SXE2VF_MBX_Q_DESC(queue, idx); + buf = &queue->buf[idx]; + + (void)memset(desc, 0, sizeof(*desc)); + (void)memset(buf->va, 0, buf->size); + + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_BUF); + if (buf->size > SXE2VF_MBX_DESC_LB_SIZE) + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_LB); + + desc->opcode = cpu_to_le16(SXE2_VF_PF_TO_VF); + + desc->data_len = cpu_to_le16((u16)buf->size); + desc->buf_addr_h = cpu_to_le32(upper_32_bits(buf->pa)); + desc->buf_addr_l = cpu_to_le32(lower_32_bits(buf->pa)); +} + +static s32 sxe2vf_mbx_rxq_init(struct sxe2vf_adapter *adapter) +{ + s32 ret; + u16 i; + struct sxe2vf_mbx_queue *q = &adapter->channel_ctxt.rxq; + + mutex_lock(&q->lock); + + q->depth = SXE2VF_MBX_Q_DESC_CNT; + q->buf_size = SXE2VF_MBX_BUF_SIZE; + + q->ntc = 0; + q->ntu = 0; + + ret = sxe2vf_mbx_desc_alloc(adapter, q); + if (ret) { + LOG_ERROR_BDF("mbx rxq desc alloc fail.\n"); + goto l_desc_free; + } + + ret = sxe2vf_mbx_bufs_alloc(adapter, q); + if (ret) { + LOG_ERROR_BDF("mbx rxq buf alloc fail.\n"); + goto l_desc_free; + } + + for (i = 0; i < q->depth; i++) + sxe2vf_mbx_rxq_desc_fill(q, i); + + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + ret = sxe2vf_hw_mbx_rxq_enable(&adapter->hw, q->depth, q->desc.pa); + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + if (ret) { + LOG_ERROR_BDF("mbx rxq enable failed.\n"); + goto l_bufs_free; + } + mutex_unlock(&q->lock); + + LOG_INFO_BDF("mbx rxq depth:%u buf size:%u raw data size:%lu\t" + "out_hdr_len:%lu\t" + "in_hdr_len:%lu desc va:%pK pa:0x%llx.\n", + q->depth, q->buf_size, SXE2VF_MBX_RAW_MSG_MAX_SIZE, + SXE2VF_CMD_HDR_SIZE, SXE2VF_MBX_MSG_HDR_SIZE, q->desc.va, + q->desc.pa); + return ret; + +l_bufs_free: + sxe2vf_mbx_bufs_free(adapter, q); + +l_desc_free: + sxe2vf_mbx_desc_free(adapter, q); + q->depth = 0; + q->buf_size = 0; + mutex_unlock(&q->lock); + return ret; +} + +static void sxe2vf_mbx_rxq_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mbx_queue *q = &adapter->channel_ctxt.rxq; + + mutex_lock(&q->lock); + sxe2vf_hw_mbx_rxq_disable(&adapter->hw); + + sxe2vf_mbx_desc_free(adapter, q); + sxe2vf_mbx_bufs_free(adapter, q); + q->depth = 0; + + mutex_unlock(&q->lock); +} + +s32 sxe2vf_mbx_channel_init(struct sxe2vf_adapter *adapter) +{ + s32 ret; + + ret = sxe2vf_mbx_rxq_init(adapter); + if (ret) { + LOG_ERROR_BDF("mbx rxq init failed.\n"); + return ret; + } + + ret = sxe2vf_mbx_txq_init(adapter); + if (ret) { + LOG_ERROR_BDF("mbx txq init failed.\n"); + goto l_rxq_deinit; + } + + return ret; + +l_rxq_deinit: + sxe2vf_mbx_rxq_deinit(adapter); + + return ret; +} + +static void sxe2vf_mbx_channel_disable(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_msg_params params = {0}; + s32 ret; + + if (!sxe2vf_hw_mbx_txq_is_enable(hw) && !sxe2vf_hw_mbx_rxq_is_enable(hw)) { + LOG_INFO_BDF("mbx channel is not enable.\n"); + return; + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NO_RESP, + SXE2_VF_MBX_DISABLE, NULL, 0, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("mbx channel disable msg send failed.(err:%d)\n", ret); +} + +void sxe2vf_mbx_channel_deinit(struct sxe2vf_adapter *adapter) +{ + sxe2vf_mbx_channel_disable(adapter); + + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + + sxe2vf_mbx_rxq_deinit(adapter); + sxe2vf_mbx_txq_deinit(adapter); + + sxe2vf_hw_mbx_regs_dump(&adapter->hw); +} + +void sxe2vf_mbx_resource_free(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mbx_queue *txq = &adapter->channel_ctxt.txq; + struct sxe2vf_mbx_queue *rxq = &adapter->channel_ctxt.rxq; + + mutex_lock(&rxq->lock); + sxe2vf_mbx_desc_free(adapter, rxq); + sxe2vf_mbx_bufs_free(adapter, rxq); + rxq->depth = 0; + mutex_unlock(&rxq->lock); + + mutex_lock(&txq->lock); + sxe2vf_mbx_desc_free(adapter, txq); + sxe2vf_mbx_bufs_free(adapter, txq); + txq->depth = 0; + mutex_unlock(&txq->lock); +} + +STATIC void sxe2vf_mbx_tx_unprepare(struct sxe2vf_msg_ctxt *msg_ctxt) +{ + struct sxe2vf_cmd_hdr *out_hdr = NULL; + struct sxe2vf_mbx_msg_hdr *inner_hdr = NULL; + + kfree(msg_ctxt->full_msg); + + out_hdr = msg_ctxt->rcv_buf; + inner_hdr = (struct sxe2vf_mbx_msg_hdr *)((u8 *)msg_ctxt->rcv_buf + + out_hdr->hdr_len); + + if (msg_ctxt->rcv_buf) { + if (msg_ctxt->msg_raw->out_len) + (void)memcpy(msg_ctxt->msg_raw->out_data, + msg_ctxt->rcv_buf + out_hdr->hdr_len + + inner_hdr->data_offset, + msg_ctxt->msg_raw->out_len); + kfree(msg_ctxt->rcv_buf); + } +} + +STATIC s32 sxe2vf_mbx_tx_prepare(struct sxe2vf_adapter *adapter, + struct sxe2vf_msg_params *msg_raw, + struct sxe2vf_msg_ctxt *msg_ctxt) +{ + struct sxe2vf_cmd_hdr *out_hdr; + struct sxe2vf_mbx_msg_hdr *inner_hdr; + u32 len; + u32 timeout = msg_raw->timeout ? msg_raw->timeout : SXE2VF_MSG_DFLT_TIMEOUT; + s32 ret = 0; + + if (msg_raw->in_len > SXE2VF_MBX_RAW_MSG_MAX_SIZE || + msg_raw->out_len > SXE2VF_MBX_RAW_MSG_MAX_SIZE) { + LOG_ERROR_BDF("opcode:0x%x in_len:%u out_len:%u exceed max:%lu.\n", + msg_raw->opcode, msg_raw->in_len, msg_raw->out_len, + SXE2VF_MBX_RAW_MSG_MAX_SIZE); + return -EINVAL; + } + + msg_ctxt->adapter = adapter; + msg_ctxt->expired_time = jiffies + secs_to_jiffies(timeout); + msg_ctxt->opcode = msg_raw->opcode; + msg_ctxt->session_id = sxe2vf_msg_session_id_alloc(); + msg_ctxt->msg_raw = msg_raw; + + len = msg_raw->in_len + SXE2VF_MBX_FULL_HDR_SIZE; + + msg_ctxt->full_msg = kzalloc(len, GFP_KERNEL); + if (!msg_ctxt->full_msg) { + LOG_ERROR_BDF("opcode:0x%x mbx msg buffer mem:%uB malloc failed.\n", + msg_raw->opcode, len); + return -ENOMEM; + } + + out_hdr = (struct sxe2vf_cmd_hdr *)(msg_ctxt->full_msg); + out_hdr->magic_code = cpu_to_le32(SXE2_VF_MBX_MAGIC); + out_hdr->trace_id = cpu_to_le64(msg_raw->trace_id); + out_hdr->session_id = cpu_to_le64(msg_ctxt->session_id); + out_hdr->hdr_len = cpu_to_le16((u16)sizeof(*out_hdr)); + out_hdr->cmd_type = (u8)((msg_raw->opcode == SXE2_VF_MBX_DISABLE) + ? cpu_to_le16(SXE2VF_MSG_TYPE_DRV_TO_HW) + : cpu_to_le16(SXE2VF_MSG_TYPE_VF_TO_PF)); + out_hdr->in_len = cpu_to_le16((u16)len); + out_hdr->out_len = cpu_to_le16(msg_raw->out_len); + out_hdr->multi_packet = SXE2_CMD_HDR_MULTI_START | SXE2_CMD_HDR_MULTI_END; + + inner_hdr = (struct sxe2vf_mbx_msg_hdr *)((u8 *)out_hdr + + SXE2VF_CMD_HDR_SIZE); + inner_hdr->op_code = cpu_to_le32(msg_raw->opcode); + inner_hdr->data_offset = cpu_to_le32(SXE2VF_MBX_MSG_HDR_SIZE); + inner_hdr->data_len = cpu_to_le32((u32)msg_raw->in_len); + + if (msg_raw->in_len) + (void)memcpy(out_hdr->body + SXE2VF_MBX_MSG_HDR_SIZE, + msg_raw->in_data, msg_raw->in_len); + + msg_ctxt->rcv_len = msg_raw->out_len + SXE2VF_MBX_FULL_HDR_SIZE; + msg_ctxt->rcv_buf = kzalloc(msg_ctxt->rcv_len, GFP_KERNEL); + if (!msg_ctxt->rcv_buf) { + LOG_ERROR_BDF("opcode:0x%x mbx msg rcv buf:%uB malloc failed.\n", + msg_raw->opcode, msg_ctxt->rcv_len); + ret = -ENOMEM; + goto l_free; + } + + LOG_INFO_BDF("trace_id:0x%llx opcode:0x%x in_len:%u out_len:%u sid:0x%llx\t" + "prepare to send.\n", + msg_raw->trace_id, msg_raw->opcode, msg_raw->in_len, + msg_raw->out_len, msg_ctxt->session_id); + + goto l_out; + +l_free: + kfree(msg_ctxt->full_msg); + +l_out: + return ret; +} + +STATIC u16 sxe2vf_mbx_txq_desc_clean(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_queue *tq) +{ + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_mbx_desc *desc; + struct sxe2vf_mbx_ring *buf; + + while (sxe2vf_hw_mbx_txq_h_read(hw) != tq->ntc) { + desc = SXE2VF_MBX_Q_DESC(tq, tq->ntc); + buf = &tq->buf[tq->ntc]; + + (void)memset(desc, 0, sizeof(*desc)); + (void)memset(buf->va, 0, buf->size); + SXE2VF_RING_IDX_INC(tq->ntc, tq->depth); + } + + return SXE2VF_MBX_Q_DESC_UNUSED(tq); +} + +STATIC struct sxe2vf_mbx_desc * +sxe2vf_mbx_txq_desc_fill(struct sxe2vf_adapter *adapter, + struct sxe2vf_cmd_hdr *full_msg) +{ + struct sxe2vf_mbx_queue *txq = &adapter->channel_ctxt.txq; + struct sxe2vf_mbx_desc *desc; + struct sxe2vf_mbx_ring *buf; + + LOG_DEBUG_BDF("tq get the #%dth desc\n", txq->ntu); + + desc = SXE2VF_MBX_Q_DESC(txq, txq->ntu); + buf = &txq->buf[txq->ntu]; + SXE2VF_RING_IDX_INC(txq->ntu, txq->depth); + + desc->opcode = (full_msg->cmd_type == SXE2VF_MSG_TYPE_DRV_TO_HW) + ? cpu_to_le16(SXE2_VF_DRV_TO_HW) + : cpu_to_le16(SXE2_VF_VF_TO_PF); + + desc->data_len = cpu_to_le16(full_msg->in_len); + + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_NO_INTR); + if (full_msg->in_len) { + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_BUF); + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_READ); + if (full_msg->in_len > SXE2VF_MBX_DESC_LB_SIZE) + desc->flags |= cpu_to_le16(SXE2VF_MBX_DESC_LB); + + (void)memcpy(buf->va, full_msg, full_msg->in_len); + desc->buf_addr_h = cpu_to_le32(upper_32_bits(buf->pa)); + desc->buf_addr_l = cpu_to_le32(lower_32_bits(buf->pa)); + } + + return desc; +} + +bool sxe2vf_mbx_tx_done(struct sxe2vf_mbx_desc *desc) +{ + unsigned long expired_time = (jiffies + secs_to_jiffies(SXE2VF_MBX_TIMEOUT)); + + udelay(5); + + do { + if (SXE2VF_HW_DONE(desc)) + return true; + + (void)msleep(SXE2VF_MBX_CHECK_INT); + } while (time_before(jiffies, expired_time)); + + return false; +} + +STATIC s32 sxe2vf_mbx_desc_err_trans(u16 desc_ret) +{ + s32 ret; + + switch (desc_ret) { + case 0: + ret = 0; + break; + case SXE2VF_MBX_DESC_ERR_DES_ERR: + case SXE2VF_MBX_DESC_ERR_BUF_ERR: + case SXE2VF_MBX_DESC_ERR_BUF_NUM_ERR: + case SXE2VF_MBX_DESC_ERR_SRC_BUSY: + ret = -EAGAIN; + break; + default: + ret = -EIO; + break; + } + + return ret; +} + +STATIC s32 __sxe2vf_mbx_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_cmd_hdr *full_msg) +{ + s32 ret = 0; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_mbx_queue *txq = &adapter->channel_ctxt.txq; + struct sxe2vf_mbx_desc *desc = NULL; + struct sxe2vf_mbx_msg_hdr *inner_hdr = + (struct sxe2vf_mbx_msg_hdr *)(full_msg->body); + u32 head; + +#ifdef SXE2_CFG_RELEASE + UNUSED(inner_hdr); +#endif + + mutex_lock(&txq->lock); + + if (!txq->depth) { + LOG_ERROR_BDF("opcode:0x%x in_len:%u mbx txq disabled,\n" + "\t no permit send msg.\n", + inner_hdr->op_code, full_msg->in_len); + ret = -EIO; + goto l_unlock; + } + + head = sxe2vf_hw_mbx_txq_h_read(hw); + if (head >= txq->depth) { + LOG_ERROR_BDF("opcode:0x%x in_len:%u head:0x%x depth:0x%x mbx txq\t" + "overflow\n", + inner_hdr->op_code, full_msg->in_len, head, + txq->depth); + ret = -EIO; + goto l_unlock; + } + + if (sxe2vf_mbx_txq_desc_clean(adapter, txq) == 0) { + LOG_ERROR_BDF("opcode:0x%x in_len:%u head:0x%x mbx txq desc use\t" + "up\n", + inner_hdr->op_code, full_msg->in_len, head); + ret = -EAGAIN; + goto l_unlock; + } + + desc = sxe2vf_mbx_txq_desc_fill(adapter, full_msg); + + DATA_DUMP(desc, sizeof(*desc), "mbx tq desc before"); + DATA_DUMP(full_msg, full_msg->in_len, "mbx tq buf"); + + sxe2vf_hw_mbx_txq_t_write(hw, txq->ntu); + + if (!sxe2vf_mbx_tx_done(desc)) { + sxe2vf_hw_mbx_txq_fault_clear(hw, (u32 *)&ret); + LOG_DEBUG_BDF("desc[%u] opcode:0x%x in_len:%u mbx txq hw\t" + "fault:0x%x\n", + txq->ntu, inner_hdr->op_code, full_msg->in_len, ret); + if (!ret) + ret = -ETIMEDOUT; + else + ret = -EOVERFLOW; + goto l_unlock; + } + + ret = sxe2vf_mbx_desc_err_trans(le16_to_cpu(desc->ret)); + + LOG_DEBUG_BDF("opcode:0x%x in_len:%u head:0x%x mbx txq send done.\n" + "\t(desc return:0x%x)\n", + inner_hdr->op_code, full_msg->in_len, head, + le16_to_cpu(desc->ret)); + +l_unlock: + if (desc) + DATA_DUMP(desc, sizeof(*desc), "mbx tq desc after"); + if (ret) + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + mutex_unlock(&txq->lock); + return ret; +} + +STATIC s32 sxe2vf_msg_wait_entry_add(struct sxe2vf_msg_params *msg_raw, + struct sxe2vf_msg_ctxt *msg_ctxt) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = msg_ctxt->adapter; + struct sxe2vf_wait_entry *wait_entry; + struct sxe2vf_mbx_waitq *waitq = &adapter->channel_ctxt.waitq; + + wait_entry = kzalloc(sizeof(*wait_entry), GFP_KERNEL); + if (!wait_entry) { + ret = -ENOMEM; + LOG_ERROR_BDF("opcode:0x%x traceid:0x%llx section_id:0x%llx \t" + "alloc wait_entry failed.\n", + msg_raw->opcode, msg_raw->trace_id, + msg_ctxt->session_id); + goto l_end; + } + wait_entry->session_id = msg_ctxt->session_id; + wait_entry->state = SXE2VF_MSG_STATE_WAITING; + wait_entry->rcv_len = msg_ctxt->rcv_len; + wait_entry->rcv_buf = msg_ctxt->rcv_buf; + + spin_lock(&waitq->lock); + hash_add(waitq->table, &wait_entry->entry, wait_entry->session_id); + spin_unlock(&waitq->lock); + + msg_ctxt->wait_entry = wait_entry; + +l_end: + return ret; +} + +STATIC void sxe2vf_cmd_wait_list_del(struct sxe2vf_msg_ctxt *msg_ctxt) +{ + struct sxe2vf_mbx_waitq *waitq = &msg_ctxt->adapter->channel_ctxt.waitq; + + if (!msg_ctxt->wait_entry) + return; + + spin_lock(&waitq->lock); + hash_del(&msg_ctxt->wait_entry->entry); + spin_unlock(&waitq->lock); + + kfree(msg_ctxt->wait_entry); + msg_ctxt->wait_entry = NULL; +} + +STATIC s32 sxe2vf_msg_rsp_waitq(struct sxe2vf_msg_ctxt *msg_ctxt) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = msg_ctxt->adapter; + unsigned long timeout; + + while (1) { + timeout = msg_ctxt->expired_time - jiffies; + + if (time_after(jiffies, msg_ctxt->expired_time)) + break; + + ret = wait_event_timeout(adapter->channel_ctxt.waitq.wq, + (msg_ctxt->wait_entry->state != + SXE2VF_MSG_STATE_WAITING), + timeout); + if (!ret || msg_ctxt->wait_entry->state != SXE2VF_MSG_STATE_WAITING) + break; + } + + switch (msg_ctxt->wait_entry->state) { + case SXE2VF_MSG_STATE_WAITING: + ret = -ETIMEDOUT; + msg_ctxt->wait_entry->state = SXE2VF_MSG_STATE_CANCELED; + LOG_WARN_BDF("traceid:0x%llx opcode:0x%x timeout exit wait.\n", + msg_ctxt->msg_raw->trace_id, msg_ctxt->opcode); + goto l_end; + case SXE2VF_MSG_STATE_CANCELED: + ret = -ECANCELED; + LOG_WARN_BDF("traceid:0x%llx opcode:0x%x canceled exit wait.\n", + msg_ctxt->msg_raw->trace_id, msg_ctxt->opcode); + goto l_end; + case SXE2VF_MSG_STATE_FAULT: + ret = -EFAULT; + LOG_WARN_BDF("traceid:0x%llx opcode:0x%x fault exit wait.\n", + msg_ctxt->msg_raw->trace_id, msg_ctxt->opcode); + goto l_end; + case SXE2VF_MSG_STATE_DONE: + ret = 0; + LOG_INFO_BDF("traceid:0x%llx opcode:0x%x done.\n", + msg_ctxt->msg_raw->trace_id, msg_ctxt->opcode); + break; + default: + LOG_DEV_WARN("Unexpected wait queue state: %d.\n", + msg_ctxt->wait_entry->state); + SXE2_BUG(); + break; + } + +l_end: + if (ret) + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + + return ret; +} + +STATIC s32 sxe2vf_mbx_rx_prepare(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv **mbx_rcv) +{ + u16 len = sizeof(struct sxe2vf_mbx_rcv) + adapter->channel_ctxt.rxq.buf_size; + + *mbx_rcv = kzalloc(len, GFP_KERNEL); + if (!*mbx_rcv) { + LOG_ERROR_BDF("mbx rx prepare malloc failed: size %u.\n", len); + return -ENOMEM; + } + + (*mbx_rcv)->buf_len = adapter->channel_ctxt.rxq.buf_size; + + return 0; +} + +STATIC void sxe2vf_mbx_rx_unprepare(struct sxe2vf_mbx_rcv *mbx_rcv) +{ + kfree(mbx_rcv); +} + +STATIC s32 __sxe2vf_mbx_msg_rcv(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *msg) +{ + s32 ret = 0; + struct sxe2vf_hw *hw = &adapter->hw; + struct sxe2vf_mbx_queue *rxq = &adapter->channel_ctxt.rxq; + struct sxe2vf_mbx_desc *desc; + u16 ntc; + struct sxe2vf_mbx_ring *buf; + u16 data_len; + + mutex_lock(&rxq->lock); + ntc = rxq->ntc; + + if (!rxq->depth) { + LOG_ERROR_BDF("rxq disabled cannot rcv msg.\n"); + ret = -ENODATA; + goto l_unlock; + } + + desc = SXE2VF_MBX_Q_DESC(rxq, ntc); + buf = &rxq->buf[ntc]; + + if (!(desc->flags & SXE2VF_MBX_DONE)) { + ret = -ENODATA; + goto l_unlock; + } + + LOG_DEBUG_BDF("rxq get the #%dth desc\n", ntc); + DATA_DUMP(desc, sizeof(*desc), "mbx rq desc"); + + if (le16_to_cpu(desc->ret)) { + LOG_ERROR_BDF("rxq recv event failed, ret: %d.\n", + le16_to_cpu(desc->ret)); + ret = -EIO; + goto l_ntc_inc; + } + + data_len = le16_to_cpu(desc->data_len); + if (data_len > adapter->channel_ctxt.rxq.buf_size) { + ret = -EINVAL; + LOG_ERROR_BDF("rxq recv event failed, data_len: %d invalid\t" + "buf_len:%u.\n", + data_len, adapter->channel_ctxt.rxq.buf_size); + goto l_ntc_inc; + } + DATA_DUMP(buf->va, data_len, "mbx rq buf"); + + (void)memcpy(&msg->desc, desc, sizeof(msg->desc)); + if (data_len) { + (void)memcpy(msg->buf, buf->va, data_len); + msg->buf_len = data_len; + } + + sxe2vf_mbx_rxq_desc_fill(rxq, ntc); + + sxe2vf_hw_mbx_rxq_t_write(hw, ntc); + +l_ntc_inc: + SXE2VF_RING_IDX_INC(rxq->ntc, rxq->depth); + +l_unlock: + mutex_unlock(&rxq->lock); + return ret; +} + +static void sxe2vf_mbx_rcv_clear(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *mbx_rcv) +{ + mbx_rcv->buf_len = adapter->channel_ctxt.rxq.buf_size; + (void)memset(mbx_rcv->buf, 0, mbx_rcv->buf_len); + (void)memset(&mbx_rcv->desc, 0, sizeof(mbx_rcv->desc)); +} + +STATIC s32 sxe2vf_rcv_msg_valid(struct sxe2vf_cmd_hdr *out_hdr, + struct sxe2vf_mbx_msg_hdr *inner_hdr) +{ + s32 ret = 0; + u32 opcode = le32_to_cpu(inner_hdr->op_code); + + if (le32_to_cpu(out_hdr->magic_code) != SXE2_VF_MBX_MAGIC) { + LOG_ERROR("traceid:0x%llx recv cmd magic:0x%x check failed.\n", + out_hdr->trace_id, le32_to_cpu(out_hdr->magic_code)); + ret = -EIO; + return ret; + } + ret = (s32)le32_to_cpu(out_hdr->ret); + if (ret < 0) { + LOG_ERROR("traceid:0x%llx recv cmd failed, ret: %d.\n", + out_hdr->trace_id, ret); + ret = -EIO; + return ret; + } + + if (opcode >= SXE2_VF_OPCODE_NR) { + LOG_ERROR("traceid:0x%llx recv cmd opcode(%d) invalid, ret: %d.\n", + out_hdr->trace_id, opcode, ret); + ret = -EIO; + return ret; + } + + return ret; +} + +STATIC void sxe2vf_msg_waitq_wakeup(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *mbx_rcv) +{ + struct sxe2vf_wait_entry *wait_entry; + bool found = false; + struct sxe2vf_cmd_hdr *out_hdr = (struct sxe2vf_cmd_hdr *)mbx_rcv->buf; + struct sxe2vf_mbx_waitq *waitq = &adapter->channel_ctxt.waitq; + + spin_lock(&waitq->lock); + hash_for_each_possible(waitq->table, wait_entry, entry, + le64_to_cpu(out_hdr->session_id)) + { + if (wait_entry->state != SXE2VF_MSG_STATE_WAITING || + le64_to_cpu(out_hdr->session_id) != wait_entry->session_id) { + continue; + } + + found = true; + + SXE2_BUG_ON(mbx_rcv->buf_len > wait_entry->rcv_len); + if (mbx_rcv->buf_len > wait_entry->rcv_len) { + LOG_ERROR_BDF("rcv msg buf_len %d exceed caller\t" + "out_len:%d.\n", + mbx_rcv->buf_len, wait_entry->rcv_len); + wait_entry->state = SXE2VF_MSG_STATE_FAULT; + break; + } + (void)memcpy(wait_entry->rcv_buf, mbx_rcv->buf, mbx_rcv->buf_len); + + wait_entry->state = SXE2VF_MSG_STATE_DONE; + } + spin_unlock(&waitq->lock); + + if (found) + wake_up(&adapter->channel_ctxt.waitq.wq); +} + +void sxe2vf_waitq_entry_cancel(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_wait_entry *wait_entry; + struct sxe2vf_mbx_waitq *waitq = &adapter->channel_ctxt.waitq; + u32 bkt; + + spin_lock(&waitq->lock); + hash_for_each(waitq->table, bkt, wait_entry, entry) wait_entry->state = + SXE2VF_MSG_STATE_CANCELED; + spin_unlock(&waitq->lock); + + wake_up(&waitq->wq); +} + +static void sxe2vf_notify_msg_list_add(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *mbx_rcv) +{ + struct sxe2vf_notify_msg_list *list = &adapter->channel_ctxt.list; + + mutex_lock(&list->lock); + list_add_tail(&mbx_rcv->node, &list->head); + mutex_unlock(&list->lock); +} + +static void sxe2vf_notify_msg_list_del(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv **mbx_rcv) +{ + struct sxe2vf_notify_msg_list *list = &adapter->channel_ctxt.list; + + mutex_lock(&list->lock); + + if (list_empty(&list->head)) { + *mbx_rcv = NULL; + mutex_unlock(&list->lock); + return; + } + + *mbx_rcv = list_first_entry(&list->head, struct sxe2vf_mbx_rcv, node); + list_del(&(*mbx_rcv)->node); + mutex_unlock(&list->lock); +} + +void sxe2vf_notify_msg_list_clear(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_notify_msg_list *list = &adapter->channel_ctxt.list; + struct sxe2vf_mbx_rcv *mbx_rcv; + + mutex_lock(&list->lock); + while (!list_empty(&list->head)) { + mbx_rcv = list_first_entry(&list->head, struct sxe2vf_mbx_rcv, node); + list_del(&mbx_rcv->node); + kfree(mbx_rcv); + } + mutex_unlock(&list->lock); +} + +STATIC void __sxe2vf_notify_msg_handle(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *msg) +{ + struct sxe2vf_mbx_msg_table *msg_table = sxe2vf_mbx_msg_table_get(); + struct sxe2vf_mbx_msg_hdr *inner_hdr; + struct sxe2vf_cmd_hdr *out_hdr; + u32 opcode; + s32 ret; + + out_hdr = (struct sxe2vf_cmd_hdr *)msg->buf; + inner_hdr = (struct sxe2vf_mbx_msg_hdr *)(msg->buf + out_hdr->hdr_len); + opcode = le32_to_cpu(inner_hdr->op_code); + + if (opcode >= SXE2_VF_OPCODE_NR) { + LOG_ERROR("opcode(%d) invalid exceed max value:0x%x.\n", opcode, + SXE2_VF_OPCODE_NR); + return; + } + + if (msg_table[opcode].func) { + ret = msg_table[opcode].func(adapter, ((u8 *)inner_hdr + inner_hdr->data_offset)); + if (ret) + LOG_ERROR_BDF("opcode:0x%x vf handle result:%d.\n", opcode, + ret); + } else { + LOG_ERROR_BDF("opcode:0x%x invalid\n", opcode); + } +} + +void sxe2vf_notify_msg_wk_cb(struct work_struct *work) +{ + struct sxe2vf_work_context *wk = container_of(work, + struct sxe2vf_work_context, + msg_handle_wk); + struct sxe2vf_adapter *adapter = + container_of(wk, struct sxe2vf_adapter, work_ctxt); + struct sxe2vf_mbx_rcv *msg; + int schedule_count_th = 0; + + while (1) { + sxe2vf_notify_msg_list_del(adapter, &msg); + if (!msg) + break; + + __sxe2vf_notify_msg_handle(adapter, msg); + + kfree(msg); + + schedule_count_th++; + if (schedule_count_th == SXE2VF_MSG_HANDLING_MAX_CNT) { + schedule_count_th = 0; + cond_resched(); + } + } +} + +STATIC void sxe2vf_notify_msg_handle(struct sxe2vf_adapter *adapter, + struct sxe2vf_mbx_rcv *mbx_rcv) +{ + struct sxe2vf_mbx_rcv *tmp_msg; + u32 len = sizeof(*tmp_msg) + mbx_rcv->buf_len; + + tmp_msg = kzalloc(len, GFP_KERNEL); + if (!tmp_msg) { + LOG_DEV_ERR("malloc failed, size: %u.\n", len); + return; + } + memcpy(tmp_msg, mbx_rcv, len); + INIT_LIST_HEAD(&tmp_msg->node); + + sxe2vf_notify_msg_list_add(adapter, tmp_msg); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_NOTIFY_MSG, 0); +} + +s32 sxe2vf_mbx_msg_rcv(struct sxe2vf_adapter *adapter) +{ + s32 ret; + struct sxe2vf_mbx_rcv *mbx_rcv; + struct sxe2vf_cmd_hdr *out_hdr; + struct sxe2vf_mbx_msg_hdr *inner_hdr; + u16 idx = 0; + u32 opcode; + + ret = sxe2vf_mbx_rx_prepare(adapter, &mbx_rcv); + if (ret) + return ret; + + do { + ret = __sxe2vf_mbx_msg_rcv(adapter, mbx_rcv); + if (ret == -ENODATA) + goto l_free; + + out_hdr = (struct sxe2vf_cmd_hdr *)mbx_rcv->buf; + inner_hdr = SXE2_MBX_MSG_HDR_PTR(out_hdr); + opcode = le32_to_cpu(inner_hdr->op_code); + + ret = sxe2vf_rcv_msg_valid(out_hdr, inner_hdr); + if (ret) + goto l_free; + + switch (out_hdr->cmd_type) { + case SXE2VF_MSG_TYPE_PF_TO_VF: + sxe2vf_notify_msg_handle(adapter, mbx_rcv); + break; + case SXE2VF_MSG_TYPE_PF_REPLY_VF: + sxe2vf_msg_waitq_wakeup(adapter, mbx_rcv); + break; + default: + LOG_ERROR_BDF("unknown cmd type:%d opcode:0x%x.\n", + out_hdr->cmd_type, opcode); + break; + } + + idx++; + + sxe2vf_mbx_rcv_clear(adapter, mbx_rcv); + } while (idx < SXE2VF_MBX_RQ_WEIGHT); + +l_free: + sxe2vf_mbx_rx_unprepare(mbx_rcv); + if (ret == -ENODATA) + ret = -ETIMEDOUT; + + return ret; +} + +STATIC s32 sxe2vf_msg_rsp_polling(struct sxe2vf_msg_ctxt *msg_ctxt) +{ + s32 ret; + struct sxe2vf_adapter *adapter = msg_ctxt->adapter; + struct sxe2vf_mbx_rcv *mbx_rcv; + struct sxe2vf_cmd_hdr *out_hdr; + struct sxe2vf_mbx_msg_hdr *inner_hdr; + + ret = sxe2vf_mbx_rx_prepare(adapter, &mbx_rcv); + if (ret) + return ret; + + do { + ret = __sxe2vf_mbx_msg_rcv(adapter, mbx_rcv); + if (ret == 0) { + out_hdr = (struct sxe2vf_cmd_hdr *)mbx_rcv->buf; + inner_hdr = (struct sxe2vf_mbx_msg_hdr *)(mbx_rcv->buf + + out_hdr->hdr_len); + + ret = sxe2vf_rcv_msg_valid(out_hdr, inner_hdr); + if (ret) + goto l_end; + + if (le32_to_cpu(inner_hdr->op_code) != msg_ctxt->opcode || + le64_to_cpu(out_hdr->session_id) != + msg_ctxt->session_id) { + LOG_ERROR_BDF("recv invalid cmd traceid:0x%llx\t" + "opcode:0x%x\t" + "session id: 0x%llx.\n", + le64_to_cpu(out_hdr->trace_id), + le32_to_cpu(inner_hdr->op_code), + le64_to_cpu(out_hdr->session_id)); + sxe2vf_mbx_rcv_clear(adapter, mbx_rcv); + ret = -ETIMEDOUT; + continue; + } + + SXE2_BUG_ON(mbx_rcv->buf_len > msg_ctxt->rcv_len); + (void)memcpy(msg_ctxt->rcv_buf, mbx_rcv->buf, + mbx_rcv->buf_len); + goto l_end; + } else if (ret != -ENODATA) { + LOG_ERROR_BDF("[trace id 0x%llx] opcode:0x%x rq receive\t" + "error ret %d.\n", + msg_ctxt->msg_raw->trace_id, + msg_ctxt->msg_raw->opcode, ret); + break; + } + (void)msleep(SXE2VF_MSG_WB_WAIT_INTERVAL); + } while (!sxe2vf_dev_state_check(adapter) && + time_before(jiffies, (unsigned long)msg_ctxt->expired_time)); + +l_end: + sxe2vf_mbx_rx_unprepare(mbx_rcv); + if (ret == -ENODATA) { + ret = -ETIMEDOUT; + sxe2vf_hw_mbx_regs_dump(&adapter->hw); + LOG_ERROR_BDF("[trace id 0x%llx] opcode:0x%x polling timeout ret\t" + "%d.\n", + msg_ctxt->msg_raw->trace_id, msg_ctxt->msg_raw->opcode, + ret); + } + return ret; +} + +s32 sxe2vf_err_code_trans_mbx(s32 err) +{ + s32 ret; + + if (err > -SXE2_VF_ERR_PARAM) + return err; + + switch (err) { + case SXE2_VF_ERR_SUCCESS: + ret = 0; + break; + case -SXE2_VF_ERR_NO_MEMORY: + ret = -ENOMEM; + break; + case -SXE2_VF_ERR_NOT_SUPPORTED: + ret = -EOPNOTSUPP; + break; + case -SXE2_VF_ERR_PARAM: + case -SXE2_VF_ERR_INVALID_VF_ID: + ret = -EINVAL; + break; + case -SXE2_VF_ERR_HANDLE_ERROR: + case -SXE2_VF_ERR_CQP_COMPL_ERROR: + case -SXE2_VF_ERR_ADMIN_QUEUE_ERROR: + case -SXE2_VF_ERR_PF_STATUS_ABNORMAL: + default: + ret = -EIO; + break; + } + + return ret; +} + +s32 sxe2vf_dev_state_check(struct sxe2vf_adapter *adapter) +{ + enum sxe2vf_dev_state state; + enum sxe2vf_reset_type reset_type; + s32 ret = SXE2_VF_ERR_SUCCESS; + + sxe2vf_dev_state_get(adapter, &state, &reset_type); + if (test_bit(SXE2VF_FLAG_DRV_REMOVING, adapter->flags) || + state == SXE2VF_DEVSTATE_STOPPED || state == SXE2VF_DEVSTATE_RESETTING || + state == SXE2VF_DEVSTATE_FAULT) + ret = -SXE2_VF_ERR_VF_STATUS_ABNORMAL; + return ret; +} + +s32 sxe2vf_mbx_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_msg_params *msg_raw) +{ + s32 ret; + struct sxe2vf_msg_ctxt msg_ctxt; + struct sxe2vf_cmd_hdr *out_hdr; + struct sxe2vf_mbx_msg_hdr *inner_hdr; + u16 retry_cnt = 0; + + ret = sxe2vf_dev_state_check(adapter); + if (ret != SXE2_VF_ERR_SUCCESS) { + LOG_WARN_BDF("opcode:0x%x no need send during pre check fail,\t" + "ret:%d.\n", + msg_raw->opcode, ret); + return -EIO; + } + + ret = sxe2vf_mbx_tx_prepare(adapter, msg_raw, &msg_ctxt); + if (ret) + return ret; + + if (msg_raw->mode == SXE2VF_MSG_RESP_WAIT_NOTIFY) { + ret = sxe2vf_msg_wait_entry_add(msg_raw, &msg_ctxt); + if (ret) + goto l_free; + } + + do { + ret = __sxe2vf_mbx_msg_send(adapter, msg_ctxt.full_msg); + if (ret != -EAGAIN) + break; + + mdelay(SXE2VF_MSG_RETRY_INTERVAL); + + } while (++retry_cnt < SXE2VF_MSG_RETRY_COUNT); + + if (ret == -EAGAIN) { + ret = -EBUSY; + goto l_list_del; + } else if (ret == -ECANCELED) { + goto l_cancel; + } else if (ret) { + goto l_list_del; + } + + if (msg_raw->mode == SXE2VF_MSG_RESP_WAIT_NOTIFY) { + ret = sxe2vf_msg_rsp_waitq(&msg_ctxt); + } else if (msg_raw->mode == SXE2VF_MSG_RESP_WAIT_POLLING) { + ret = sxe2vf_msg_rsp_polling(&msg_ctxt); + } else { + LOG_INFO_BDF("vf msg opcode:0x%x trace_id:0x%llx in_len:%u no need\t" + "resp.\n", + msg_raw->opcode, msg_raw->trace_id, msg_raw->in_len); + goto l_free; + } + + if (ret == -ECANCELED || ret == -ETIMEDOUT) + goto l_cancel; + else if (ret) + goto l_list_del; + + out_hdr = msg_ctxt.rcv_buf; + ret = (s32)le32_to_cpu(out_hdr->ret); + if (unlikely(ret < 0)) { + LOG_ERROR_BDF("vf msg opcode:0x%x trace_id:0x%llx in_len:%u pf\t" + "handled fail.(err:%d)\n", + msg_raw->opcode, msg_raw->trace_id, msg_raw->in_len, + ret); + ret = -EIO; + } else { + inner_hdr = (struct sxe2vf_mbx_msg_hdr *)(msg_ctxt.rcv_buf + + out_hdr->hdr_len); + ret = (s32)le32_to_cpu(inner_hdr->err_code); + if (ret) { + LOG_ERROR_BDF("vf msg opcode:0x%x trace_id:0x%llx\t" + "rcv_len:%u\t" + "out_hdr ret:%d inner_hdr ret:%d.\n", + msg_raw->opcode, msg_raw->trace_id, + msg_ctxt.rcv_len, le32_to_cpu(out_hdr->ret), + le32_to_cpu(inner_hdr->err_code)); + } + ret = sxe2vf_err_code_trans_mbx((s32)le32_to_cpu(inner_hdr->err_code)); + } + + goto l_list_del; + +l_cancel: +l_list_del: + if (msg_raw->mode == SXE2VF_MSG_RESP_WAIT_NOTIFY) + sxe2vf_cmd_wait_list_del(&msg_ctxt); +l_free: + sxe2vf_mbx_tx_unprepare(&msg_ctxt); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..0c48481278b5e1336bc427d665236b65a737bdc4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_channel.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_mbx_channel.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#include +#include + +#include "sxe2_mbx_public.h" + +#ifndef __SXE2VF_MBX_CHANNEL_H__ +#define __SXE2VF_MBX_CHANNEL_H__ + +struct sxe2vf_adapter; + +#define SXE2VF_MSG_RETRY_INTERVAL 10 +#define SXE2VF_MSG_RETRY_COUNT 3 +#define SXE2VF_MSG_WB_WAIT_INTERVAL 10 +#define SXE2VF_MSG_RQ_WEIGHT 256 + +#define SXE2VF_MBX_Q_DESC_CNT 32 + +#define SXE2VF_MBX_SEND_RETRY_CNT 5 + +#define SXE2VF_MBX_DESC_LB_SIZE 512 + +#define SXE2VF_MBX_DESC_DD BIT(0) +#define SXE2VF_MBX_DESC_COMPLETE BIT(1) +#define SXE2VF_MBX_DESC_ERROR BIT(2) +#define SXE2VF_MBX_DESC_LB BIT(9) +#define SXE2VF_MBX_DESC_READ BIT(10) +#define SXE2VF_MBX_DESC_BUF BIT(12) +#define SXE2VF_MBX_DESC_NO_INTR BIT(13) + +#define SXE2VF_MBX_DONE (SXE2VF_MBX_DESC_DD | SXE2VF_MBX_DESC_COMPLETE | SXE2VF_MBX_DESC_ERROR) + +#define SXE2VF_MBX_BUF_SIZE 4096 + +#define SXE2VF_MBX_RQ_WEIGHT 256 + +#define SXE2VF_MBX_TIMEOUT (1) +#define SXE2VF_MBX_CHECK_INT (1) + +#define SXE2VF_MBX_MSG_HTABLE_ORDER 8 + +#ifndef secs_to_jiffies +#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) +#endif + +#define SXE2VF_MSG_DFLT_TIMEOUT (60) + +#define SXE2_MSG_SESSION_ADD_ONE (1ULL) + +enum sxe2vf_desc_err_code { + SXE2VF_MBX_DESC_ERR_NONE = 0, + SXE2VF_MBX_DESC_ERR_DES_ERR, + SXE2VF_MBX_DESC_ERR_BUF_ERR, + SXE2VF_MBX_DESC_ERR_BUF_NUM_ERR, + SXE2VF_MBX_DESC_ERR_SRC_BUSY, + SXE2VF_MBX_DESC_ERR_DATA_LEN_LACK, + SXE2VF_MBX_DESC_ERR_DATA_LEN_LACK2, + SXE2VF_MBX_DESC_ERR_SESSION_BUFFER_OV, + SXE2VF_MBX_DESC_ERR_CMD_BUFFER_OV, + SXE2VF_MBX_DESC_ERR_IN_OUT_LEN_LACK, + SXE2VF_MBX_DESC_ERR_UNKNOW_OPCODE, + SXE2VF_MBX_DESC_ERR_UNKNOW_CMD_TYPE, + SXE2VF_MBX_DESC_ERR_ADMINQ_STATE, + SXE2VF_MBX_DESC_ERR_FIND_JOB, + SXE2VF_MBX_DESC_ERR_NONE_START, + SXE2VF_MBX_DESC_ERR_NR, +}; + +struct sxe2vf_mbx_ring { + void *va; + dma_addr_t pa; + size_t size; +}; + +struct sxe2vf_mbx_desc { + __le16 flags; + __le16 opcode; + __le16 data_len; + __le16 ret; + __le32 custom0; + __le32 custom1; + __le32 custom2; + __le32 custom3; + __le32 buf_addr_h; + __le32 buf_addr_l; +}; + +#define SXE2VF_MBX_Q_DESC(queue, i) (&(((struct sxe2vf_mbx_desc *)((queue)->desc.va))[i])) +#define SXE2VF_MBX_Q_BUF(queue, i) ((queue)->buf[i].va) + +#define SXE2VF_MBX_Q_DESC_UNUSED(queue) \ + ((u16)((((queue)->ntc > (queue)->ntu) ? 0 : \ + (queue)->depth) + (queue)->ntc - (queue)->ntu - 1)) + +#define SXE2VF_RING_IDX_INC(i, depth) \ + do { \ + (i)++; \ + if ((i) == (depth)) { \ + (i) = 0; \ + } \ + } while (0) + +#define SXE2VF_HW_DONE(desc) (le16_to_cpu((desc)->flags) & SXE2VF_MBX_DONE) + +struct sxe2vf_mbx_queue { + u16 depth; + u16 buf_size; + u16 ntu; + u16 ntc; + struct sxe2vf_mbx_ring desc; + struct sxe2vf_mbx_ring *buf; + struct mutex lock; +}; + +struct sxe2vf_mbx_waitq { + spinlock_t lock; + wait_queue_head_t wq; + DECLARE_HASHTABLE(table, SXE2VF_MBX_MSG_HTABLE_ORDER); +}; + +enum sxe2vf_msg_state { + SXE2VF_MSG_STATE_WAITING = 0, + SXE2VF_MSG_STATE_DONE, + SXE2VF_MSG_STATE_CANCELED, + SXE2VF_MSG_STATE_FAULT, +}; + +struct sxe2vf_wait_entry { + struct hlist_node entry; + u64 session_id; + enum sxe2vf_msg_state state; + u16 rcv_len; + void *rcv_buf; +}; + +struct sxe2vf_msg_ctxt { + struct sxe2vf_adapter *adapter; + enum sxe2_vf_opcode opcode; + u64 session_id; + unsigned long expired_time; + struct sxe2vf_cmd_hdr *full_msg; + struct sxe2vf_wait_entry *wait_entry; + struct sxe2vf_msg_params *msg_raw; + u16 rcv_len; + void *rcv_buf; +}; + +struct sxe2vf_notify_msg_list { + struct list_head head; + struct mutex lock; +}; + +struct sxe2vf_channel_context { + struct sxe2vf_mbx_queue rxq; + struct sxe2vf_mbx_queue txq; + struct sxe2vf_mbx_waitq waitq; + struct sxe2vf_notify_msg_list list; +}; + +enum sxe2vf_resp_wait_mode { + SXE2VF_MSG_RESP_WAIT_NO_RESP, + SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2VF_MSG_RESP_WAIT_POLLING, +}; + +struct sxe2vf_msg_params { + u64 trace_id; + u32 err_code; + u32 opcode; + u16 in_len; + void *in_data; + u16 out_len; + void *out_data; + enum sxe2vf_resp_wait_mode mode; + u32 timeout; +}; + +struct sxe2vf_mbx_rcv { + struct list_head node; + u16 buf_len; + struct sxe2vf_mbx_desc desc; + u8 buf[]; +}; + +void sxe2vf_mbx_channel_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mbx_channel_init(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mbx_msg_rcv(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mbx_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_msg_params *params_s); + +bool sxe2vf_mbx_tx_done(struct sxe2vf_mbx_desc *desc); + +void sxe2vf_mbx_resource_free(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_err_code_trans_mbx(s32 err); + +void sxe2vf_cmd_session_id_init(void); + +void sxe2vf_waitq_entry_cancel(struct sxe2vf_adapter *adapter); + +void sxe2vf_notify_msg_list_clear(struct sxe2vf_adapter *adapter); + +void sxe2vf_notify_msg_wk_cb(struct work_struct *work); + +s32 sxe2vf_dev_state_check(struct sxe2vf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.c new file mode 100644 index 0000000000000000000000000000000000000000..d23bce7864898e5d2220621226654245469491f9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.c @@ -0,0 +1,1632 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_mbx_msg.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include "sxe2vf.h" +#include "sxe2vf_mbx_msg.h" +#include "sxe2_mbx_public.h" +#include "sxe2_log.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_regs.h" +#include "sxe2vf_netdev.h" +#include "sxe2vf_ethtool.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_irq.h" + +STATIC DEFINE_PER_CPU(union sxe2vf_trace_info, sxe2vf_trace_id); + +#define SXE2VF_TRACE_ID_CHIP_OUT_COUNT_MASK \ + 0x0003FFFFFFFFFFFFLLU +#define SXE2VF_TRACE_ID_CHIP_OUT_CPUID_MASK 0x3FFLLU +#define SXE2VF_TRACE_ID_CHIP_OUT_TYPE_MASK 0xFLLU +#define SXE2VF_VT_RESOURCE_SIZE \ + (sizeof(struct sxe2_vf_vfres_msg) + \ + (SXE2_VF_MAX_VSI_CNT * sizeof(struct sxe2_vf_vsi_res))) + +STATIC void sxe2vf_trace_id_alloc(u64 *trace_id) +{ + union sxe2vf_trace_info *trace; + u64 trace_id_count; + + preempt_disable(); + trace = this_cpu_ptr(&sxe2vf_trace_id); + + trace_id_count = trace->sxe2vf_trace_id_param.count; + ++trace_id_count; + trace->sxe2vf_trace_id_param.count = + (trace_id_count & SXE2VF_TRACE_ID_CHIP_OUT_COUNT_MASK); + + *trace_id = trace->id; + preempt_enable(); +} + +void sxe2vf_mbx_msg_dflt_params_fill(struct sxe2vf_msg_params *params, + enum sxe2vf_resp_wait_mode mode, + enum sxe2_vf_opcode opc, void *in_data, + u32 in_len, void *out_data, u32 out_len) +{ + params->opcode = opc; + params->mode = mode; + params->in_data = in_data; + params->in_len = (u16)in_len; + params->out_data = out_data; + params->out_len = (u16)out_len; + + sxe2vf_trace_id_alloc(¶ms->trace_id); +} + +s32 sxe2vf_mbx_common_msg_send(struct sxe2vf_adapter *adapter, + enum sxe2_vf_opcode opcode, u8 *msg, u16 len) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret = 0; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, opcode, + msg, len, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("opcode:0x%x send failed ret:%d.\n", opcode, ret); + + return ret; +} + +#define SXE2VF_UCMD_TXQ_MODE_DEFAULT 0 + +s32 sxe2vf_txq_cfg_request(struct sxe2vf_adapter *adapter) +{ + u32 i; + s32 ret; + struct sxe2vf_queue *txq; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_txq_ctxt_msg *msg; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2_vf_txq_ctxt *ctxt; + u32 len; + u16 limit; + u16 start; + u16 send; + s32 left; + + limit = (SXE2VF_MBX_RAW_MSG_MAX_SIZE - sizeof(struct sxe2_vf_txq_ctxt_msg)) / + sizeof(struct sxe2_vf_txq_ctxt); + left = vsi->txqs.q_cnt; + start = 0; + while (left > 0) { + send = (u16)min_t(u16, limit, (u16)left); + len = sizeof(*msg) + sizeof(struct sxe2_vf_txq_ctxt) * send; + + msg = kzalloc(len, GFP_KERNEL); + if (!msg) { + LOG_ERROR_BDF("txq msg mem %uB alloc failed.\n", len); + return -ENOMEM; + } + + msg->vsi_id = cpu_to_le16(vsi->vsi_id); + msg->q_cnt = cpu_to_le16(send); + ctxt = msg->ctxs; + + sxe2vf_for_txq_range(i, start, start + send) + { + txq = vsi->txqs.q[i]; + ctxt = &msg->ctxs[i - start]; + ctxt->vsi_id = cpu_to_le16(vsi->vsi_id); + ctxt->depth = cpu_to_le16(txq->depth); + ctxt->dma_addr = cpu_to_le64(txq->desc.dma); + ctxt->queue_id = cpu_to_le16((u16)i); + ctxt->sched_mode = cpu_to_le32(SXE2VF_UCMD_TXQ_MODE_DEFAULT); + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_TXQ_CFG_AND_ENABLE, msg, len, + NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("txq cfg and enable start:%u send:%u\t" + "failed.\n", + start, send); + kfree(msg); + goto l_err; + } + + left -= send; + start += send; + + kfree(msg); + LOG_INFO_BDF("txq cnt:%u limit:%u send:%u next start:%u.\n", + vsi->txqs.q_cnt, limit, send, start); + } + +l_err: + return ret; +} + +s32 sxe2vf_reset_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NO_RESP, + SXE2_VF_RESET_REQUEST, NULL, 0, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("vf reset msg send failed.(err:%d)\n", ret); + + return ret; +} + +u16 sxe2vf_irq_cnt_min_get(struct sxe2vf_adapter *adapter) +{ + u16 irq_cnt = SXE2VF_EVENT_MSIX_CNT; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_KERNEL) + irq_cnt += SXE2VF_LAN_MSIX_MIN_CNT + SXE2VF_RDMA_MSIX_MIN_CNT; + else if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + irq_cnt += SXE2VF_DPDK_MSIX_MIN_CNT; + else + irq_cnt += SXE2VF_LAN_MSIX_MIN_CNT + SXE2VF_DPDK_MSIX_MIN_CNT + + SXE2VF_RDMA_MSIX_MIN_CNT; + + LOG_INFO_BDF("mode:%d min irq cnt:%u\n", sxe2vf_com_mode_get(adapter), + irq_cnt); + + return irq_cnt; +} + +STATIC s32 sxe2vf_irqs_num_validate(struct sxe2vf_adapter *adapter, + struct sxe2_vf_vfres_msg *vf_res) +{ + u16 irq_cnt = le16_to_cpu(vf_res->max_vectors); + u16 irq_min = sxe2vf_irq_cnt_min_get(adapter); + + if (irq_cnt < irq_min || irq_cnt > SXE2VF_IRQ_MAX_CNT) { + LOG_ERROR_BDF("irq cnt invalid:%d min irq cnt:%u max irq cnt:%u\n", + irq_cnt, irq_min, SXE2VF_IRQ_MAX_CNT); + return -EINVAL; + } + + return 0; +} + +STATIC u16 sxe2vf_queue_cnt_min_get(struct sxe2vf_adapter *adapter) +{ + u16 queue_cnt; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_KERNEL) + queue_cnt = SXE2VF_ETH_QUEUE_CNT_MIN; + else if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + queue_cnt = SXE2VF_DPDK_QUEUE_CNT_MIN; + else + queue_cnt = SXE2VF_ETH_QUEUE_CNT_MIN + SXE2VF_DPDK_QUEUE_CNT_MIN; + + LOG_INFO_BDF("mode:%d min queue cnt:%u\n", sxe2vf_com_mode_get(adapter), + queue_cnt); + + return queue_cnt; +} + +STATIC s32 sxe2vf_queues_num_validate(struct sxe2vf_adapter *adapter, + struct sxe2_vf_vfres_msg *vf_res) +{ + u16 q_cnt = le16_to_cpu(vf_res->q_cnt); + u16 q_min = sxe2vf_queue_cnt_min_get(adapter); + u16 q_max = SXE2_VF_ETH_Q_NUM + SXE2_VF_DPDK_Q_NUM; + + if (q_cnt < q_min || q_cnt > q_max) { + LOG_ERROR_BDF("irq cnt invalid:%d min:%d max:%u\n", q_cnt, q_min, + q_max); + return -EINVAL; + } + + return 0; +} + +s32 sxe2vf_rxq_cfg_request(struct sxe2vf_adapter *adapter) +{ + s32 ret; + u16 i; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2_vf_rxq_msg *rxq_msg; + struct sxe2_vf_rxq_ctxt *ctxt; + struct sxe2vf_queue *rxq; + u16 frame_size = (u16)(adapter->netdev->mtu + SXE2VF_PACKET_HDR_PAD); + struct sxe2vf_msg_params params = {0}; + u32 len; + u16 limit; + u16 start; + u16 send; + s32 left; + + limit = (SXE2VF_MBX_RAW_MSG_MAX_SIZE - sizeof(struct sxe2_vf_rxq_msg)) / + sizeof(struct sxe2_vf_rxq_ctxt); + left = vsi->rxqs.q_cnt; + start = 0; + + while (left > 0) { + send = (u16)min_t(u16, limit, (u16)left); + len = sizeof(*rxq_msg) + sizeof(struct sxe2_vf_rxq_ctxt) * send; + + rxq_msg = kzalloc(len, GFP_KERNEL); + if (!rxq_msg) { + LOG_ERROR_BDF("rxq msg mem %uB alloc failed.\n", len); + return -ENOMEM; + } + + rxq_msg->vsi_id = cpu_to_le16(vsi->vsi_id); + rxq_msg->q_cnt = cpu_to_le16(send); + rxq_msg->max_frame_size = cpu_to_le16(frame_size); + ctxt = rxq_msg->ctxt; + + sxe2vf_for_rxq_range(i, start, start + send) + { + rxq = vsi->rxqs.q[i]; + ctxt->buf_len = cpu_to_le16(ALIGN(rxq->rx_buf_len, + BIT_ULL(SXE2VF_RXQ_CTX_DBUFF_SHIFT))); + ctxt->depth = cpu_to_le16(rxq->depth); + ctxt->dma_addr = cpu_to_le64(rxq->desc.dma); + ctxt->queue_id = cpu_to_le16(i); + + if (test_bit(SXE2VF_RXQ_RXFCS_ENABLED, &rxq->flags)) + ctxt->keep_crc_en = true; + else + ctxt->keep_crc_en = false; + + if (test_bit(SXE2VF_RXQ_LRO_ENABLED, &rxq->flags)) + ctxt->lro_status = true; + else + ctxt->lro_status = false; + ctxt++; + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_RXQ_CFG_AND_ENABLE, rxq_msg, + len, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("rxq cfg and enable start:%u send:%u\t" + "failed.\n", + start, send); + kfree(rxq_msg); + goto l_err; + } + + left -= send; + start += send; + + kfree(rxq_msg); + + LOG_INFO_BDF("rxq cnt:%u limit:%u send:%u next start:%u.\n", + vsi->rxqs.q_cnt, limit, send, start); + } + +l_err: + return ret; +} + +STATIC void sxe2vf_sw_caps_set(struct sxe2vf_adapter *adapter, + struct sxe2_vf_vfres_msg *vf_res) +{ + struct sxe2vf_addr_node *f; + u8 *cur_mac_addr = adapter->switch_ctxt.filter_ctxt.mac_filter.cur_mac_addr; + s32 ret; + u8 i; + + adapter->pf_id = (u8)(le16_to_cpu(vf_res->parent_pfid)); + adapter->vf_id_in_dev = le16_to_cpu(vf_res->vf_id_in_dev); + + adapter->irq_ctxt.max_cnt = le16_to_cpu(vf_res->max_vectors); + adapter->q_ctxt.max_cnt = le16_to_cpu(vf_res->q_cnt); + adapter->vsi_ctxt.vsi_cnt_max = le16_to_cpu(vf_res->num_vsis); + + for (i = 0; i < adapter->vsi_ctxt.vsi_cnt_max; i++) + adapter->vsi_ctxt.vsi_ids[i] = + le16_to_cpu(vf_res->vsi_res[i].vsi_id); + + adapter->rss_ctxt.rss_lut_type = le16_to_cpu(vf_res->rxft_cap.rss_lut_type); + adapter->rss_ctxt.rss_key_size = le16_to_cpu(vf_res->rxft_cap.rss_key_size); + adapter->rss_ctxt.rss_lut_size = le16_to_cpu(vf_res->rxft_cap.rss_lut_size); + adapter->fnav_ctxt.space_bsize = + le16_to_cpu(vf_res->rxft_cap.fnav_space_bsize); + adapter->fnav_ctxt.space_gsize = + le16_to_cpu(vf_res->rxft_cap.fnav_space_gsize); + + adapter->irq_ctxt.itr_gran = le16_to_cpu(vf_res->itr_gran); + if (!adapter->irq_ctxt.itr_gran) + adapter->irq_ctxt.itr_gran = SXE2VF_PFG_INT_CTL_ITR_GRAN_0; + + if (is_valid_ether_addr(vf_res->addr)) { + f = sxe2vf_addr_find(adapter, cur_mac_addr); + if (f) { + clear_bit(SXE2VF_MAC_OWNER_NETDEV, &f->usage); + if (f->usage == 0) { + ret = sxe2vf_addr_node_del(adapter, cur_mac_addr); + if (ret) + LOG_ERROR_BDF("del mac addr node:%pM\t" + "failed.\n", + cur_mac_addr); + } + } else { + LOG_ERROR_BDF("del mac addr:%pM failed.\n", cur_mac_addr); + } + ether_addr_copy(cur_mac_addr, vf_res->addr); + } + + adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist = + vf_res->port_vlan_exsit; + adapter->switch_ctxt.filter_ctxt.vlan_info.is_switchdev = + vf_res->is_switchdev; + adapter->switch_ctxt.filter_ctxt.vlan_info.max_cnt = + le16_to_cpu(vf_res->max_vlan_cnt); + adapter->switch_ctxt.filter_ctxt.vlan_info.dev_features = 0; + adapter->aux_ctxt.cdev_info.pf_cnt = vf_res->pf_cnt; + + adapter->hw.fw_ver.main_version_id = vf_res->fw_ver.main_version_id; + adapter->hw.fw_ver.sub_version_id = vf_res->fw_ver.sub_version_id; + adapter->hw.fw_ver.fix_version_id = vf_res->fw_ver.fix_version_id; + adapter->hw.fw_ver.build_id = vf_res->fw_ver.build_id; + adapter->txsch_cap.layer_cap = vf_res->vf_txsch_cap.layer_cap; + adapter->txsch_cap.tm_mid_node_num = vf_res->vf_txsch_cap.tm_mid_node_num; + adapter->txsch_cap.prio_num = vf_res->vf_txsch_cap.prio_num; + + LOG_INFO_BDF("vsi cnt:%u hw_vsi_id[0]:%u hw_vsi_id[1]:%u queue cnt:%u\t" + "irq cnt:%u itr_gran:%u def_mac:%pM port_vlan_exist:%u\t" + "is_switchdev:%u\n" + "vlan_max_cnt:%u pf_cnt:%u main_ver:%u sub_ver:%u fix_ver:%u\t" + "build_id:%u mode:%d.\n", + adapter->vsi_ctxt.vsi_cnt_max, adapter->vsi_ctxt.vsi_ids[0], + adapter->vsi_ctxt.vsi_ids[1], adapter->q_ctxt.max_cnt, + adapter->irq_ctxt.max_cnt, adapter->irq_ctxt.itr_gran, + adapter->switch_ctxt.filter_ctxt.mac_filter.cur_mac_addr, + adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist, + adapter->switch_ctxt.filter_ctxt.vlan_info.is_switchdev, + adapter->switch_ctxt.filter_ctxt.vlan_info.max_cnt, + adapter->aux_ctxt.cdev_info.pf_cnt, + adapter->hw.fw_ver.main_version_id, + adapter->hw.fw_ver.sub_version_id, + adapter->hw.fw_ver.fix_version_id, adapter->hw.fw_ver.build_id, + adapter->drv_mode); +} + +static u16 sxe2vf_vsi_cnt_min_get(struct sxe2vf_adapter *adapter) +{ + u16 vsi_cnt; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_KERNEL) + vsi_cnt = SXE2_VF_ETH_VSI_CNT; + else if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + vsi_cnt = SXE2_VF_DPDK_VSI_CNT; + else + vsi_cnt = SXE2_VF_ETH_VSI_CNT + SXE2_VF_DPDK_VSI_CNT; + + LOG_INFO_BDF("mode:%d min vsi cnt:%u\n", sxe2vf_com_mode_get(adapter), + vsi_cnt); + + return vsi_cnt; +} + +STATIC s32 sxe2vf_vsi_num_validate(struct sxe2vf_adapter *adapter, + struct sxe2_vf_vfres_msg *vf_res) +{ + if (vf_res->num_vsis < sxe2vf_vsi_cnt_min_get(adapter)) { + LOG_ERROR_BDF("vsi cnt invalid:%u\n", vf_res->num_vsis); + return -EINVAL; + } else if (le16_to_cpu(vf_res->num_vsis) > SXE2_VF_MAX_VSI_CNT) { + LOG_INFO_BDF("vsi num received:%d exceeds max num supported:%d\n", + le16_to_cpu(vf_res->num_vsis), SXE2_VF_MAX_VSI_CNT); + + vf_res->num_vsis = cpu_to_le16(SXE2_VF_MAX_VSI_CNT); + } + + return 0; +} +STATIC void sxe2vf_hw_stats_to_cpu(struct sxe2_vf_vsi_hw_stats *stats, + struct sxe2_vf_vsi_hw_stats *new_stats) +{ + new_stats->rx_vsi_unicast_packets = + le64_to_cpu(stats->rx_vsi_unicast_packets); + new_stats->rx_vsi_bytes = le64_to_cpu(stats->rx_vsi_bytes); + new_stats->tx_vsi_unicast_packets = + le64_to_cpu(stats->tx_vsi_unicast_packets); + new_stats->tx_vsi_bytes = le64_to_cpu(stats->tx_vsi_bytes); + new_stats->rx_vsi_multicast_packets = + le64_to_cpu(stats->rx_vsi_multicast_packets); + new_stats->tx_vsi_multicast_packets = + le64_to_cpu(stats->tx_vsi_multicast_packets); + new_stats->rx_vsi_broadcast_packets = + le64_to_cpu(stats->rx_vsi_broadcast_packets); + new_stats->tx_vsi_broadcast_packets = + le64_to_cpu(stats->tx_vsi_broadcast_packets); +} + +STATIC s32 sxe2vf_stats_get_reply_process(struct sxe2vf_adapter *adapter, + struct sxe2_vf_hw_stats_rsp *rsp_stats) +{ + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2_vf_vsi_hw_stats *new_stats = &vsi->vsi_stats.vsi_hw_stats; + struct sxe2_vf_vsi_hw_stats *stats = &rsp_stats->hw_stats; + + sxe2vf_hw_stats_to_cpu(stats, new_stats); + + return 0; +} + +s32 sxe2vf_stats_get_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2vf_vsi_sw_stats *cur_stats = &vsi->vsi_stats.vsi_sw_stats; + struct sxe2vf_msg_params params = {0}; + s32 ret; + struct sxe2_vf_sw_stats msg = {0}; + struct sxe2_vf_hw_stats_rsp rsp = {{0}, 0}; + + sxe2vf_vsi_sw_stats_update(vsi); + msg.sw_stats.rx_bytes = cpu_to_le64(cur_stats->rx_bytes); + msg.sw_stats.rx_packets = cpu_to_le64(cur_stats->rx_packets); + msg.sw_stats.tx_bytes = cpu_to_le64(cur_stats->tx_bytes); + msg.sw_stats.tx_packets = cpu_to_le64(cur_stats->tx_packets); + + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + + msg.fnav_stats_idx = cpu_to_le16(adapter->fnav_ctxt.stat_idx); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_STATS_GET, &msg, sizeof(msg), &rsp, + sizeof(rsp)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("get vf stats msg send failed.\n"); + goto l_out; + } + + adapter->fnav_ctxt.fnav_match = le64_to_cpu(rsp.fnav_match); + ret = sxe2vf_stats_get_reply_process(adapter, &rsp); +l_out: + if (ret) + LOG_ERROR_BDF("vf stats get failed.\n"); + + return ret; +} + +s32 sxe2vf_stats_push_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2vf_vsi_sw_stats *cur_stats = &vsi->vsi_stats.vsi_sw_stats; + struct sxe2vf_msg_params params = {0}; + s32 ret; + struct sxe2_vf_sw_stats msg = {0}; + struct sxe2_vf_hw_stats_rsp rsp = {{0}, 0}; + + sxe2vf_vsi_sw_stats_update(vsi); + msg.sw_stats.rx_bytes = cpu_to_le64(cur_stats->rx_bytes); + msg.sw_stats.rx_packets = cpu_to_le64(cur_stats->rx_packets); + msg.sw_stats.tx_bytes = cpu_to_le64(cur_stats->tx_bytes); + msg.sw_stats.tx_packets = cpu_to_le64(cur_stats->tx_packets); + + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + + msg.fnav_stats_idx = cpu_to_le16(adapter->fnav_ctxt.stat_idx); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_STATS_PUSH, &msg, sizeof(msg), &rsp, + sizeof(rsp)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("push vf stats msg send failed.\n"); + goto l_out; + } + + ret = sxe2vf_stats_get_reply_process(adapter, &rsp); +l_out: + if (ret) + LOG_ERROR_BDF("vf stats push failed.\n"); + + return ret; +} + +s32 sxe2vf_irq_map_setup(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2_vf_irq_map_msg *vsi_irqs_map; + struct sxe2_vf_irq_map *irq_map; + struct sxe2vf_irq_data *irq_data; + u32 len; + s32 ret; + u16 i, q_irq_cnt = vsi->irqs.cnt; + struct sxe2vf_msg_params params = {0}; + + len = (sizeof(struct sxe2_vf_irq_map_msg) + + (q_irq_cnt * sizeof(struct sxe2_vf_irq_map))); + vsi_irqs_map = kzalloc(len, GFP_KERNEL); + if (!vsi_irqs_map) { + LOG_ERROR_BDF("vsi irq map alloc failed.\n"); + return -ENOMEM; + } + + vsi_irqs_map->num_irqs = cpu_to_le16(q_irq_cnt); + vsi_irqs_map->vsi_id = cpu_to_le16(vsi->vsi_id); + + sxe2vf_for_each_vsi_irq(vsi, i) + { + irq_data = vsi->irqs.irq_data[i]; + irq_map = &vsi_irqs_map->irq_maps[i]; + + irq_map->irq_id = cpu_to_le16(i); + irq_map->rxq_map = cpu_to_le16((u16)irq_data->q_bitmap); + irq_map->txq_map = cpu_to_le16((u16)irq_data->q_bitmap); + irq_map->txitr_idx = cpu_to_le16(irq_data->tx.itr_idx); + irq_map->rxitr_idx = cpu_to_le16(irq_data->rx.itr_idx); + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IRQ_MAP, vsi_irqs_map, len, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + + LOG_INFO_BDF("itr qs map msg:0x%x ret:%d.\n", SXE2_VF_IRQ_MAP, ret); + + kfree(vsi_irqs_map); + return ret; +} + +s32 sxe2vf_irq_map_clear(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2_vf_irq_unmap_msg vsi_irqs_unmap = {}; + + struct sxe2vf_msg_params params = {0}; + s32 ret; + + vsi_irqs_unmap.vsi_id = cpu_to_le16(vsi->vsi_id); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IRQ_UNMAP, &vsi_irqs_unmap, + sizeof(struct sxe2_vf_irq_unmap_msg), NULL, + 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + + LOG_INFO_BDF("itr unmap msg:0x%x ret:%d.\n", SXE2_VF_IRQ_UNMAP, ret); + + return ret; +} + +STATIC void sxe2vf_link_update(struct sxe2vf_adapter *adapter, + struct sxe2_vf_link_msg *link_msg) +{ + struct net_device *netdev = adapter->netdev; + + lockdep_assert_held(&adapter->vsi_ctxt.lock); + + if (!adapter->vsi_ctxt.vf_vsi) { + LOG_INFO_BDF("vf vsi not create yet.\n"); + return; + } + + adapter->link_ctxt.speed = le32_to_cpu(link_msg->speed); + adapter->link_ctxt.link_up = link_msg->status; + if (adapter->vsi_ctxt.vf_vsi) { + if (adapter->link_ctxt.link_up && + !test_bit(SXE2VF_VSI_CLOSE, adapter->vsi_ctxt.vf_vsi->state)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + LOG_INFO_BDF("link update speed:%u link_up:%u vsi state:0x%lx\t" + "carrier:%d.\n", + adapter->link_ctxt.speed, adapter->link_ctxt.link_up, + *adapter->vsi_ctxt.vf_vsi->state, + netif_carrier_ok(netdev)); + } +} + +STATIC s32 sxe2vf_links_msg_func(struct sxe2vf_adapter *adapter, void *body) +{ + struct sxe2_vf_link_msg *link_msg = (struct sxe2_vf_link_msg *)body; + + mutex_lock(&adapter->vsi_ctxt.lock); + sxe2vf_link_update(adapter, link_msg); + (void)sxe2_com_irq_notifier_call_chain(&adapter->com_ctxt, + SXE2_COM_EC_LINK_CHG); + mutex_unlock(&adapter->vsi_ctxt.lock); + + return 0; +} + +s32 sxe2vf_vlan_offload_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2vf_vlan_offload *vlan_offload = &vlan_info->vlan_offload; + struct sxe2_vf_vlan_offload_cfg vlan_cfg = {0}; + enum sxe2_vf_opcode opcode = SXE2_VF_VLAN_OFFLOAD_CFG; + s32 ret; + + vlan_cfg.ctag_insert_enable = vlan_offload->ctag_insert_enable; + vlan_cfg.stag_insert_enable = vlan_offload->stag_insert_enable; + vlan_cfg.ctag_strip_enable = vlan_offload->ctag_strip_enable; + vlan_cfg.stag_strip_enable = vlan_offload->stag_strip_enable; + + ret = sxe2vf_mbx_common_msg_send(adapter, opcode, (u8 *)&vlan_cfg, + sizeof(vlan_cfg)); + if (ret) + LOG_WARN_BDF("vlan offload msg send result:%d.\n", ret); + + LOG_INFO_BDF("vlan offload msg handle.\n"); + return ret; +} + +s32 sxe2vf_vlan_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_vlan *vlan, + bool add) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_vlan_filter_msg *msg; + u16 len; + s32 ret; + enum sxe2_vf_opcode opcode; + + len = (u16)struct_size(msg, elem, 1); + msg = kzalloc(len, GFP_KERNEL); + if (!msg) { + LOG_ERROR_BDF("vlan tpid:0x%x vid:%u prio:%u alloc failed.\n", + vlan->tpid, vlan->vid, vlan->prio); + return -ENOMEM; + } + + msg->vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + msg->vlan_cnt = cpu_to_le16(1); + msg->elem[0].tpid = cpu_to_le16(vlan->tpid); + msg->elem[0].vid = cpu_to_le16(vlan->vid); + + if (add) + opcode = SXE2_VF_VLAN_ADD; + else + opcode = SXE2_VF_VLAN_DEL; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, opcode, + msg, len, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("vlan tpid:0x%x vid:%u prio:%u send failed %u.\n", + vlan->tpid, vlan->vid, vlan->prio, ret); + kfree(msg); + + return ret; +} + +s32 sxe2vf_user_vlan_msg_send(struct sxe2vf_adapter *adapter, u16 vsi_id, + struct sxe2vf_vlan *vlan, bool is_add) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_user_vlan_msg msg = {0}; + s32 ret; + + msg.is_add = is_add; + msg.vsi_id = cpu_to_le16(vsi_id); + msg.vlan.tpid = cpu_to_le16(vlan->tpid); + msg.vlan.vid = cpu_to_le16(vlan->vid); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_USER_VLAN_PROCESS, &msg, sizeof(msg), + NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("vlan tpid:0x%x vid:%u prio:%u send failed %u.\n", + vlan->tpid, vlan->vid, vlan->prio, ret); + + return ret; +} + +STATIC bool sxe2vf_is_in_reset(struct sxe2vf_adapter *adapter) +{ + enum sxe2vf_dev_state state; + enum sxe2vf_reset_type reset_type; + + sxe2vf_dev_state_get(adapter, &state, &reset_type); + + if (state == SXE2VF_DEVSTATE_STOPPED || state == SXE2VF_DEVSTATE_RESETTING) + return true; + else + return false; +} + +STATIC bool sxe2vf_hw_vfr_is_occur(struct sxe2vf_hw *hw) +{ + u32 val; + bool ret = false; + struct sxe2vf_adapter *adapter = hw->adapter; + + val = sxe2vf_reg_read(hw, SXE2VF_MBX_RQ_LEN); + ret = val & SXE2VF_MBX_Q_LEN_VFE_M; + if (!!ret) + LOG_INFO_BDF("vf reset occur\n"); + + return ret; +} + +void sxe2vf_wait_in_resetting(struct sxe2vf_adapter *adapter, bool is_close) +{ + s32 ret = 0; + u16 detect_times; + u16 over_times = SXE2VF_RESET_DETEC_WAIT_COUNT; + + if (sxe2vf_is_in_reset(adapter)) { + LOG_INFO_BDF("vf is in resetting or removing.(err:%d)\n", ret); + return; + } + + ret = sxe2vf_reset_msg_send(adapter); + if (ret) { + LOG_DEV_INFO("reset request failed.(err:%d)\n", ret); + return; + } + + if (!is_close) + over_times = SXE2VF_RESET_ROBACK_WAIT_COUNT; + + for (detect_times = 0; detect_times < over_times; detect_times++) { + if (sxe2vf_hw_vfr_is_occur(&adapter->hw)) + break; + msleep(SXE2VF_RESET_WAIT_MIN); + } + + if (detect_times >= over_times) + LOG_DEV_INFO("vf wait resetting time out(%d), rc:%d.\n", over_times, + ret); +} + +s32 sxe2vf_txrxq_dis_request(struct sxe2vf_adapter *adapter, bool is_close) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_qps_dis_msg msg = {0}; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + s32 ret = 0; + + msg.qps_cnt = cpu_to_le16(vsi->txqs.q_cnt); + msg.vsi_id = cpu_to_le16(vsi->vsi_id); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_QUEUES_DISABLE, &msg, sizeof(msg), + NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("txrx queues disable failed.(err:%d)\n", ret); + sxe2vf_wait_in_resetting(adapter, is_close); + } + return ret; +} + +s32 sxe2vf_promisc_set_msg_send(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct sxe2_vf_promisc_msg msg = {0}; + u32 promisc_flags = 0; + s32 ret = 0; + + mutex_lock(&adapter->switch_ctxt.flag_lock); + + if (!sxe2vf_promisc_mode_changed(adapter)) { + LOG_INFO_BDF("promisc mode is not change.\n"); + goto l_out; + } + + adapter->switch_ctxt.filter_ctxt.cur_promisc_flags = netdev->flags; + + if ((adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_ALLMULTI) && + (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & + SXE2_VF_PROMISC_MULTICAST))) + promisc_flags |= SXE2_VF_PROMISC_MULTICAST; + + if ((adapter->switch_ctxt.filter_ctxt.cur_promisc_flags & IFF_PROMISC) && + (!(adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & + SXE2_VF_PROMISC))) + promisc_flags |= SXE2_VF_PROMISC | SXE2_VF_PROMISC_MULTICAST; + + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + promisc_flags |= SXE2_VF_VLAN_FILTER; + + msg.is_user = false; + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + msg.flags = cpu_to_le32(promisc_flags); + + ret = sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_PROMISC_CFG, (u8 *)&msg, + sizeof(msg)); + if (ret) + LOG_ERROR_BDF("set promisc msg handle result:%d.\n", ret); + +l_out: + mutex_unlock(&adapter->switch_ctxt.flag_lock); + return ret; +} + +s32 sxe2vf_user_promisc_set_msg_send(struct sxe2vf_adapter *adapter, u16 vsi_id) +{ + struct sxe2_vf_promisc_msg msg = {0}; + u32 promisc_flags = 0; + s32 ret = 0; + + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_ALLMULTI) + promisc_flags |= SXE2_VF_PROMISC_MULTICAST; + if (adapter->switch_ctxt.user_fltr_ctxt.cur_promisc_flags & IFF_PROMISC) + promisc_flags |= SXE2_VF_PROMISC; + + msg.is_user = true; + msg.vsi_id = cpu_to_le16(vsi_id); + msg.flags = cpu_to_le32(promisc_flags); + + ret = sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_PROMISC_CFG, (u8 *)&msg, + sizeof(msg)); + if (ret) + LOG_ERROR_BDF("set promisc msg handle result:%d.\n", ret); + + return ret; +} + +s32 sxe2vf_user_promisc_update_msg_send(struct sxe2vf_adapter *adapter, u16 vsi_id, + bool to_user, bool is_promisc) +{ + struct sxe2_vf_promisc_update_msg msg = {0}; + s32 ret = 0; + + msg.to_user = to_user; + msg.is_promisc = is_promisc; + if (to_user) + msg.vsi_id = cpu_to_le16(vsi_id); + else + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + + ret = sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_PROMISC_UPDATE, (u8 *)&msg, + sizeof(msg)); + if (ret) + LOG_ERROR_BDF("update promisc msg handle result:%d.\n", ret); + + return ret; +} + +static void sxe2vf_addr_type_set(struct sxe2_vf_addr *msg, + const struct sxe2vf_mac *mac_info) +{ + msg->type = mac_info->attr.is_vf_mac ? SXE2_VF_MAC_TYPE_P + : SXE2_VF_MAC_TYPE_C; +} + +s32 sxe2vf_mac_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_mac *mac_info, + bool add, bool is_user, u16 vsi_id) +{ + struct sxe2_vf_addr_msg *msg; + u16 len; + s32 ret = 0; + enum sxe2_vf_opcode opcode; + + len = (u16)struct_size(msg, elem, 1); + msg = kzalloc(len, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto l_out; + } + + msg->is_user = is_user; + msg->vsi_id = vsi_id; + + msg->addr_cnt = cpu_to_le16(1); + ether_addr_copy(msg->elem[0].addr, mac_info->macaddr); + sxe2vf_addr_type_set(&msg->elem[0], mac_info); + + if (add) + opcode = SXE2_VF_MAC_ADDR_ADD; + else + opcode = SXE2_VF_MAC_ADDR_DEL; + + ret = sxe2vf_mbx_common_msg_send(adapter, opcode, (u8 *)msg, len); + if (ret) + LOG_ERROR_BDF("mac opcode:0x%x send failed ret %d.\n", opcode, ret); + + kfree(msg); + + LOG_INFO_BDF("mac msg handle.\n"); + +l_out: + return ret; +} + +s32 sxe2vf_mac_update_msg_send(struct sxe2vf_adapter *adapter, const u8 *macaddr, + bool to_user) +{ + struct sxe2_vf_addr_update_msg msg = {0}; + s32 ret = 0; + + msg.to_user = to_user; + ether_addr_copy(msg.addr, macaddr); + msg.vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + + ret = sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_MAC_ADDR_UPDATE, + (u8 *)(&msg), sizeof(msg)); + if (ret) + LOG_ERROR_BDF("mac opcode:0x%x send failed ret %d.\n", + SXE2_VF_MAC_ADDR_UPDATE, ret); + + LOG_INFO_BDF("mac msg handle.\n"); + return ret; +} + +s32 sxe2vf_vlan_filter_msg_send(struct sxe2vf_adapter *adapter, bool is_user) +{ + struct sxe2vf_vlan_filter *filter_offload; + struct sxe2_vf_vlan_filter_cfg filter_cfg = {0}; + enum sxe2_vf_opcode opcode = SXE2_VF_VLAN_FILTER_CFG; + s32 ret; + + if (is_user) + filter_offload = &adapter->switch_ctxt.user_fltr_ctxt.vlan_info + .filter_offload; + else + filter_offload = &adapter->switch_ctxt.filter_ctxt.vlan_info + .filter_offload; + + filter_cfg.is_user = is_user; + filter_cfg.ctag_filter_enable = filter_offload->ctag_filter_enable; + filter_cfg.stag_filter_enable = filter_offload->stag_filter_enable; + + ret = sxe2vf_mbx_common_msg_send(adapter, opcode, (u8 *)&filter_cfg, + sizeof(filter_cfg)); + if (ret) + LOG_WARN_BDF("vlan filter msg send failed %d.\n", ret); + + LOG_INFO_BDF("vlan filter msg handle.\n"); + return ret; +} + +#ifdef SXE2VF_MAC_VLAN_CLEAR +static void sxe2vf_vlan_msg_size_modify(u16 *count, u32 *len) +{ + struct sxe2_vf_vlan_filter_msg *msg; + + *len = struct_size(msg, elem, *count); + + if (*len > + SXE2VF_MBX_RAW_MSG_MAX_SIZE) { + LOG_WARN("Too many vlan changes in one request\n"); + *count = (SXE2VF_MBX_RAW_MSG_MAX_SIZE - + sizeof(struct sxe2_vf_vlan_filter_msg)) / + sizeof(struct sxe2_vf_vlan); + *len = struct_size(msg, elem, *count); + } +} + +static void sxe2vf_addr_msg_size_modify(u16 *count, u32 *len) +{ + struct sxe2_vf_addr_msg *msg; + + *len = struct_size(msg, elem, *count); + + if (*len > + SXE2VF_MBX_RAW_MSG_MAX_SIZE) { + LOG_WARN("Too many MAC changes in one request\n"); + *count = (SXE2VF_MBX_RAW_MSG_MAX_SIZE - + sizeof(struct sxe2_vf_addr_msg)) / + sizeof(struct sxe2_vf_addr); + *len = struct_size(msg, elem, *count); + } +} + +s32 sxe2vf_vlan_clear_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + struct sxe2_vf_vlan_filter_msg *msg; + struct sxe2vf_vlan_node *f; + u32 len; + u32 buf_len; + s32 ret = 0; + u16 i = 0; + u16 count = 0; + u16 left = 0; + + list_for_each_entry(f, &vlan_info->vlan_list, list) left++; + + if (!left) + goto l_out; + + count = left; + sxe2vf_vlan_msg_size_modify(&count, &len); + buf_len = len; + msg = kzalloc(len, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto l_out; + } + + list_for_each_entry(f, &vlan_info->vlan_list, list) + { + msg->elem->tpid = cpu_to_le16(f->vlan.tpid); + msg->elem->vid = cpu_to_le16(f->vlan.vid); + i++; + left--; + if (i == count || left == 0) { + msg->vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + msg->vlan_cnt = cpu_to_le16(count); + + sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_VLAN_DEL, + (u8 *)msg, len); + + count = left; + sxe2vf_vlan_msg_size_modify(&count, &len); + + (void)memset(msg, 0, buf_len); + } + } + + kfree(msg); + + LOG_INFO_BDF("mac clear msg handle.\n"); + +l_out: + return ret; +} + +s32 sxe2vf_mac_clear_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2_vf_addr_msg *msg; + struct sxe2vf_addr_node *f; + struct sxe2vf_addr_node *ftmp; + u32 len; + u32 buf_len; + s32 ret = 0; + u16 i = 0; + u16 count = 0; + u16 left = 0; + + list_for_each_entry(f, &filter->mac_addr_list, list) left++; + + if (!left) + goto l_out; + + count = left; + sxe2vf_addr_msg_size_modify(&count, &len); + buf_len = len; + msg = kzalloc(len, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto l_out; + } + + list_for_each_entry_safe(f, ftmp, &filter->mac_addr_list, list) + { + ether_addr_copy(msg->elem[i].addr, f->mac.macaddr); + sxe2vf_addr_type_set(&msg->elem[i], &f->mac); + i++; + left--; + if (i == count || left == 0) { + msg->vsi_id = cpu_to_le16(adapter->vsi_ctxt.vf_vsi->vsi_id); + msg->addr_cnt = cpu_to_le16(count); + + sxe2vf_mbx_common_msg_send(adapter, SXE2_VF_MAC_ADDR_DEL, + (u8 *)msg, len); + + count = left; + sxe2vf_addr_msg_size_modify(&count, &len); + + (void)memset(msg, 0, buf_len); + } + } + + kfree(msg); + + LOG_INFO_BDF("mac clear msg handle.\n"); + +l_out: + return ret; +} +#endif +s32 sxe2vf_qv_map_msg_send(struct sxe2vf_adapter *adapter, + struct aux_qvlist_info *qvl_info, bool map) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_qv_map_msg *qvmap_msg; + s32 ret; + u32 i; + u32 size; + enum sxe2_vf_opcode opcode; + + if (!qvl_info || !qvl_info->num_vectors) { + LOG_INFO_BDF("Invalid MSIX vector information from IDC driver\n"); + return -EINVAL; + } + + size = (u32)(sizeof(struct sxe2_vf_qv_map_msg) + + (sizeof(struct sxe2_vf_qv_info) * qvl_info->num_vectors)); + + qvmap_msg = kzalloc(size, GFP_KERNEL); + if (!qvmap_msg) { + LOG_INFO_BDF("memory not enough! buffer is nullptr.\n"); + return -ENOMEM; + } + + qvmap_msg->num_vectors = qvl_info->num_vectors; + + for (i = 0; i < qvmap_msg->num_vectors; i++) { + struct aux_qv_info *aux_qv_info = &qvl_info->qv_info[i]; + struct sxe2_vf_qv_info *vc_qv_info = &qvmap_msg->qv_info[i]; + + vc_qv_info->v_idx = aux_qv_info->v_idx; + vc_qv_info->ceq_idx = aux_qv_info->ceq_idx; + vc_qv_info->aeq_idx = aux_qv_info->aeq_idx; + vc_qv_info->itr_idx = aux_qv_info->itr_idx; + } + + opcode = map ? SXE2_VF_QV_MAP : SXE2_VF_QV_UNMAP; + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, opcode, + qvmap_msg, size, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + kfree(qvmap_msg); + return ret; +} + +s32 sxe2vf_aux_mgr_msg_send(struct sxe2vf_adapter *adapter, u32 opcode, u8 *req_msg, + u16 req_len, u8 *recv_msg, u16 recv_len) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_rdma_mgr_cmd_msg *auxmgr_msg; + u32 size; + s32 ret; + + size = req_len + sizeof(*auxmgr_msg); + auxmgr_msg = kzalloc(size, GFP_KERNEL); + if (!auxmgr_msg) { + LOG_INFO_BDF("invalid params! buffer is nullptr.\n"); + return -ENOMEM; + } + + auxmgr_msg->opcode = opcode; + auxmgr_msg->msg_len = req_len; + auxmgr_msg->resv_len = recv_len; + (void)memcpy(auxmgr_msg->msg, req_msg, req_len); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_RDMA_MGR_CMD, auxmgr_msg, size, + recv_msg, recv_len); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + kfree(auxmgr_msg); + return ret; +} + +s32 sxe2vf_link_status_request(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret; + struct sxe2_vf_link_msg link_msg = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_LINK_STATUS_GET, NULL, 0, &link_msg, + sizeof(struct sxe2_vf_link_msg)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (!ret) + sxe2vf_link_update(adapter, &link_msg); + + return ret; +} + +s32 sxe2vf_com_link_info_request(struct sxe2vf_adapter *adapter, u8 *link_state, + u32 *link_speed) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret; + struct sxe2_vf_link_msg link_msg = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_LINK_STATUS_GET, NULL, 0, &link_msg, + sizeof(struct sxe2_vf_link_msg)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (!ret) { + *link_speed = le32_to_cpu(link_msg.speed); + *link_state = link_msg.status; + } + + if (!link_msg.status) + *link_state = SXE2_LINK_SPEED_VF_UNKNOW; + + return ret; +} + +s32 sxe2vf_ethtool_info_request(struct sxe2vf_adapter *adapter, + struct sxe2_msg_ethtool_info *link_cfg) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_GET_ETHTOOL_INFO, NULL, 0, link_cfg, + sizeof(struct sxe2_msg_ethtool_info)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_INFO_BDF("vf get ethtool info failed (err:%d).\n", ret); + + return ret; +} + +s32 sxe2vf_rdma_msg_send(struct sxe2vf_adapter *adapter, u8 *msg, u16 len, + u8 *recv_msg, u16 recv_len) +{ + struct sxe2vf_msg_params params = {0}; + s32 ret; + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_RDMA, msg, len, recv_msg, recv_len); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("rdma msg send failed.(err:%d)\n", ret); + + return ret; +} + +STATIC s32 sxe2vf_reset_msg_func(struct sxe2vf_adapter *adapter, void *body) +{ + LOG_INFO_BDF("rcv reset notify flag:0x%lx.\n", *adapter->flags); + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_VFR_NOTIFY, SXE2VF_RESET_NONE); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + + return 0; +} + +struct sxe2vf_mbx_msg_table pf_msg_table[SXE2_VF_OPCODE_NR] = { + [SXE2_VF_LINK_UPDATE_NOTIFY] = {SXE2_VF_LINK_UPDATE_NOTIFY, + sxe2vf_links_msg_func}, + [SXE2_VF_RESET_NOTIFY] = {SXE2_VF_RESET_NOTIFY, + sxe2vf_reset_msg_func}, +}; + +struct sxe2vf_mbx_msg_table *sxe2vf_mbx_msg_table_get(void) +{ + return &pf_msg_table[0]; +} + +void sxe2vf_trace_id_init(void) +{ + u32 cpu; + union sxe2vf_trace_info *id; + + for_each_possible_cpu(cpu) + { + id = (union sxe2vf_trace_info *)&per_cpu(sxe2vf_trace_id, cpu); + + id->sxe2vf_trace_id_param.cpu_id = + (cpu & SXE2VF_TRACE_ID_CHIP_OUT_CPUID_MASK); + id->sxe2vf_trace_id_param.count = 0; + id->sxe2vf_trace_id_param.type = + (SXE2VF_MSG_TYPE_VF_TO_PF & + SXE2VF_TRACE_ID_CHIP_OUT_TYPE_MASK); + } +} + +s32 sxe2vf_drv_ver_match(struct sxe2vf_adapter *adapter) +{ + struct sxe2_vf_ver_msg vf_ver; + struct sxe2_vf_ver_msg pf_ver; + struct sxe2vf_msg_params params = {0}; + s32 ret; + u16 major = 0; + u16 minor = 0; + + vf_ver.major = cpu_to_le16(SXE2_VF_VERSION_MAJOR); + vf_ver.minor = cpu_to_le16(SXE2_VF_VERSION_MINOR); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_POLLING, + SXE2_VF_VERSION_MATCH, &vf_ver, + sizeof(struct sxe2_vf_ver_msg), &pf_ver, + sizeof(struct sxe2_vf_ver_msg)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (!ret) { + major = le16_to_cpu(pf_ver.major); + minor = le16_to_cpu(pf_ver.minor); + + if (major != SXE2_VF_VERSION_MAJOR) { + ret = -EIO; + LOG_DEV_ERR("unsupport pf version expected %d.%d\t" + "received %d.%d\n", + SXE2_VF_VERSION_MAJOR, SXE2_VF_VERSION_MINOR, + major, minor); + goto l_out; + } + adapter->pf_ver.major = major; + adapter->pf_ver.minor = minor; + } else { + ret = -ETIMEDOUT; + LOG_ERROR_BDF("get pf version fail!\n"); + } + +l_out: + LOG_INFO_BDF("opcode:0x%x vf version:%d.%d pf version:%d.%d\t" + "ret:%d.\n", + SXE2_VF_VERSION_MATCH, SXE2_VF_VERSION_MAJOR, + SXE2_VF_VERSION_MINOR, major, minor, ret); + + return ret; +} + +s32 sxe2vf_func_caps_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_vfres_msg_req vf_req; + struct sxe2_vf_vfres_msg vf_res; + s32 ret; + + vf_req.driver_type = SXE2_DRIVER_TYPE_VF; + vf_req.support_sw_stats = 1; + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_POLLING, + SXE2_VF_HW_RES_GET, &vf_req, sizeof(vf_req), + &vf_res, sizeof(struct sxe2_vf_vfres_msg)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("hw caps init failed.%d\n", ret); + goto l_out; + } + + ret = sxe2vf_irqs_num_validate(adapter, &vf_res); + if (ret) { + LOG_ERROR_BDF("irq cnt: invalid.%d\n", ret); + goto l_out; + } + + ret = sxe2vf_queues_num_validate(adapter, &vf_res); + if (ret) { + LOG_ERROR_BDF("queue cnt: invalid.%d\n", ret); + goto l_out; + } + + ret = sxe2vf_vsi_num_validate(adapter, &vf_res); + if (ret) { + LOG_ERROR_BDF("vsi cnt invalid.%d\n", ret); + goto l_out; + } + + sxe2vf_sw_caps_set(adapter, &vf_res); + +l_out: + return ret; +} + +void sxe2vf_func_caps_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2_vf_vfres_msg vf_res; + + (void)memset(&vf_res, 0, sizeof(vf_res)); + + sxe2vf_sw_caps_set(adapter, &vf_res); +} + +s32 __sxe2vf_drv_mode_get(struct sxe2vf_adapter *adapter, + struct sxe2_vf_drv_mode_resp *vf_resp, u32 resp_len, + enum sxe2vf_resp_wait_mode mode) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, mode, SXE2_VF_DRV_MODE_GET, NULL, 0, + vf_resp, resp_len); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("drv mode get failed.%d\n", ret); + + return ret; +} + +s32 sxe2vf_drv_mode_get(struct sxe2vf_adapter *adapter, + enum sxe2vf_resp_wait_mode mode) +{ + s32 ret = 0; + struct sxe2_vf_drv_mode_resp vf_resp = {0}; + + ret = __sxe2vf_drv_mode_get(adapter, &vf_resp, sizeof(vf_resp), mode); + if (!ret) { + if (vf_resp.drv_mode != SXE2_COM_MODULE_UNDEFINED) { + adapter->drv_mode = vf_resp.drv_mode; + goto end; + } else { + ret = -EINVAL; + } + } + + if (sxe2vf_g_com_mode_get() != SXE2_COM_MODULE_UNDEFINED) { + adapter->drv_mode = sxe2vf_g_com_mode_get(); + ret = 0; + } + +end: + return ret; +} + +s32 sxe2vf_drv_mode_set(struct sxe2vf_adapter *adapter, enum sxe2_com_module type) +{ + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_drv_mode_req vf_req; + s32 ret; + + vf_req.drv_mode = type; + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NO_RESP, + SXE2_VF_DRV_MODE_SET, &vf_req, + sizeof(vf_req), NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("drv mode set failed.%d\n", ret); + + return ret; +} + +s32 sxe2vf_ipsec_get_capa_msg_send(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_get_capa_response msg; + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IPSEC_GET_CAPA, NULL, 0, &msg, + sizeof(struct sxe2vf_get_capa_response)); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (!ret) { + adapter->ipsec_ctxt.max_rx_sa_cnt = msg.rx_sa_cnt; + adapter->ipsec_ctxt.max_tx_sa_cnt = msg.tx_sa_cnt; + } else { + adapter->ipsec_ctxt.max_rx_sa_cnt = 0; + adapter->ipsec_ctxt.max_tx_sa_cnt = 0; + } + + return ret; +} + +s32 sxe2vf_ipsec_add_txsa_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_tx_sa *sa_info, bool is_restore) +{ + struct sxe2_vf_ipsec_sa_add_msg req; + struct sxe2_vf_ipsec_sa_add_resp resp; + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + req.dir = SXE2_IPSEC_DIR_TX; + req.sa_idx = SXE2_IPSEC_INVAILID_SA_IDX; + + req.mode = 0; + if (sa_info->is_auth) + req.mode |= SXE2_MBX_IPSEC_AUTH; + + if (sa_info->engine) + req.mode |= SXE2_MBX_IPSEC_SM4; + + (void)memcpy(req.enc_key, sa_info->enc_key, SXE2_MBX_IPSEC_KEY_LEN); + (void)memcpy(req.auth_key, sa_info->auth_key, SXE2_MBX_IPSEC_KEY_LEN); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IPSEC_SA_ADD, + &req, sizeof(struct sxe2_vf_ipsec_sa_add_msg), &resp, + sizeof(struct sxe2_vf_ipsec_sa_add_resp)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_DEV_ERR("Failed to call mbx add sa.error code :%d\n", ret); + else + sa_info->hw_index = (u16)resp.sa_idx; + + return ret; +} + +s32 sxe2vf_ipsec_add_rxsa_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_rx_sa *sa_info, bool is_restore) +{ + struct sxe2_vf_ipsec_sa_add_msg req; + struct sxe2_vf_ipsec_sa_add_resp resp; + struct sxe2vf_msg_params params = {0}; + s32 ret = 0; + + req.dir = SXE2_IPSEC_DIR_RX; + req.sa_idx = SXE2_IPSEC_INVAILID_SA_IDX; + + req.mode = 0; + if (sa_info->is_auth) + req.mode |= SXE2_MBX_IPSEC_AUTH; + + if (sa_info->engine) + req.mode |= SXE2_MBX_IPSEC_SM4; + + if (sa_info->ipv6) { + req.mode |= SXE2_MBX_IPSEC_IPV6; + + (void)memcpy(req.addr, sa_info->ipaddr, SCBGE_MBX_IPSEC_IPV6_LEN); + } else { + (void)memcpy(req.addr, sa_info->ipaddr, SCBGE_MBX_IPSEC_IPV4_LEN); + } + req.spi = sa_info->spi; + (void)memcpy(req.enc_key, sa_info->enc_key, SXE2_MBX_IPSEC_KEY_LEN); + (void)memcpy(req.auth_key, sa_info->auth_key, SXE2_MBX_IPSEC_KEY_LEN); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IPSEC_SA_ADD, + &req, sizeof(struct sxe2_vf_ipsec_sa_add_msg), &resp, + sizeof(struct sxe2_vf_ipsec_sa_add_resp)); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_DEV_ERR("Failed to call mbx add sa.error code :%d\n", ret); + else + sa_info->hw_index = (u16)resp.sa_idx; + + return ret; +} + +s32 sxe2vf_ipsec_clear_sa_msg_send(struct sxe2vf_adapter *adapter, u8 direction, + u32 sa_index) +{ + struct sxe2_vf_ipsec_sa_del_msg req; + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + req.dir = direction; + req.sa_idx = cpu_to_le16((u16)sa_index); + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_IPSEC_SA_CLEAR, + &req, sizeof(struct sxe2_vf_ipsec_sa_del_msg), NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("Failed to call mbx delete sa.error code :%d\n", ret); + } + + return ret; +} + +s32 sxe2vf_rdma_dump_pcap_msg_send(struct sxe2vf_adapter *adapter, u8 *mac, + bool is_add) +{ + struct sxe2vf_rdma_dump_pcap_msg req; + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + (void)memcpy(req.mac, mac, ETH_ALEN); + req.is_add = is_add; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_RDMA_DUMP_PCAP, &req, sizeof(req), + NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("Failed to call mbx delete sa.error code :%d\n", ret); + } + + return ret; +} + +#ifdef SXE2_SUPPORT_ACL +s32 sxe2vf_acl_filter_clear_msg_send(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_ACL_FILTER_CLEAR, NULL, 0, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("Failed to call mbx clear acl filter.error code :%d\n", + ret); + } + + return ret; +} +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.h new file mode 100644 index 0000000000000000000000000000000000000000..e223fa486d0538bb0b9d37adef62d1c6eac59719 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_mbx_msg.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_mbx_msg.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_MBX_MSG_H__ +#define __SXE2VF_MBX_MSG_H__ + +#include "sxe2vf_mbx_channel.h" +#include "sxe2_mbx_public.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_aux_drv.h" +#include "sxe2vf_ipsec.h" +#include "sxe2_cmd.h" +#include "sxe2_com_cdev.h" + +struct sxe2vf_adapter; + +struct sxe2vf_mbx_msg_table { + u32 opcode; + s32 (*func)(struct sxe2vf_adapter *adapter, void *body); +}; + +struct sxe2vf_msg_req_table { + s32 (*func)(struct sxe2vf_adapter *adapter); +}; + +union sxe2vf_trace_info { + u64 id; + struct { + u64 count : 50; + u64 cpu_id : 10; + u64 type : 4; + } sxe2vf_trace_id_param; +}; + +struct sxe2vf_mbx_msg_table *sxe2vf_mbx_msg_table_get(void); + +s32 sxe2vf_drv_ver_match(struct sxe2vf_adapter *adapter); + +void sxe2vf_trace_id_init(void); + +s32 sxe2vf_promisc_set_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mac_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_mac *mac_info, bool add, bool is_user, + u16 vsi_id); + +s32 sxe2vf_mac_clear_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vlan_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_vlan *vlan, bool add); + +s32 sxe2vf_vlan_clear_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vlan_filter_msg_send(struct sxe2vf_adapter *adapter, bool is_user); + +s32 sxe2vf_vlan_offload_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_res_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_rdma_msg_send(struct sxe2vf_adapter *adapter, u8 *msg, + u16 len, u8 *recv_msg, u16 recv_len); + +s32 sxe2vf_txq_cfg_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_vsi *vsi); + +s32 sxe2vf_txq_stop_msg_send(struct sxe2vf_adapter *adapter, struct sxe2vf_queue *txq); + +s32 sxe2vf_reset_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_stats_get_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_stats_push_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_qv_map_msg_send(struct sxe2vf_adapter *adapter, + struct aux_qvlist_info *qv_info, bool map); + +s32 sxe2vf_aux_mgr_msg_send(struct sxe2vf_adapter *adapter, u32 opcode, + u8 *req_msg, u16 req_len, u8 *recv_msg, + u16 recv_len); + +s32 sxe2vf_ipsec_get_capa_msg_send(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_ipsec_add_txsa_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_tx_sa *sa_info, bool is_restore); + +s32 sxe2vf_ipsec_add_rxsa_msg_send(struct sxe2vf_adapter *adapter, + struct sxe2vf_rx_sa *sa_info, bool is_restore); + +s32 sxe2vf_ipsec_clear_sa_msg_send(struct sxe2vf_adapter *adapter, u8 direction, u32 sa_index); + +void sxe2vf_mbx_msg_dflt_params_fill(struct sxe2vf_msg_params *params, + enum sxe2vf_resp_wait_mode mode, + enum sxe2_vf_opcode opc, void *in_data, + u32 in_len, void *out_data, u32 out_len); + +s32 sxe2vf_irq_map_setup(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_irq_map_clear(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_func_caps_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_func_caps_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_txq_cfg_request(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_rxq_cfg_request(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_txrxq_dis_request(struct sxe2vf_adapter *adapter, bool is_close); + +s32 sxe2vf_ethtool_info_request(struct sxe2vf_adapter *adapter, + struct sxe2_msg_ethtool_info *link_cfg); + +s32 sxe2vf_link_status_request(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mbx_common_msg_send(struct sxe2vf_adapter *adapter, + enum sxe2_vf_opcode opcode, u8 *msg, u16 len); + +s32 sxe2vf_rdma_dump_pcap_msg_send(struct sxe2vf_adapter *adapter, u8 *mac, bool is_add); + +u16 sxe2vf_irq_cnt_min_get(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_mac_update_msg_send(struct sxe2vf_adapter *adapter, const u8 *macaddr, bool to_user); + +s32 sxe2vf_user_promisc_update_msg_send(struct sxe2vf_adapter *adapter, + u16 vsi_id, bool to_user, bool is_promisc); + +s32 sxe2vf_user_promisc_set_msg_send(struct sxe2vf_adapter *adapter, u16 vsi_id); + +s32 sxe2vf_user_vlan_msg_send(struct sxe2vf_adapter *adapter, + u16 vsi_id, struct sxe2vf_vlan *vlan, bool is_add); + +s32 sxe2vf_com_link_info_request(struct sxe2vf_adapter *adapter, u8 *link_state, u32 *link_speed); +#ifdef SXE2_SUPPORT_ACL +s32 sxe2vf_acl_filter_clear_msg_send(struct sxe2vf_adapter *adapter); +#endif + +s32 sxe2vf_drv_mode_set(struct sxe2vf_adapter *adapter, enum sxe2_com_module type); + +s32 __sxe2vf_drv_mode_get(struct sxe2vf_adapter *adapter, + struct sxe2_vf_drv_mode_resp *vf_resp, u32 resp_len, + enum sxe2vf_resp_wait_mode mode); + +s32 sxe2vf_drv_mode_get(struct sxe2vf_adapter *adapter, enum sxe2vf_resp_wait_mode mode); + +void sxe2vf_wait_in_resetting(struct sxe2vf_adapter *adapter, bool is_close); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.c new file mode 100644 index 0000000000000000000000000000000000000000..116c57de90053a8b70c99292bfd8f33035f0cdb2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.c @@ -0,0 +1,833 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_netdev.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_log.h" +#include "sxe2vf_ethtool.h" +#include "sxe2vf_netdev.h" +#include "sxe2vf.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_rxft.h" +#include "sxe2vf_ipsec.h" + +#define SXE2VF_TSO_MIN_MTU 576 +#define SXE2VF_WAIT_POST_COMPLETE_COUNT 2000 +#define SXE2VF_WAIT_POST_10MS 10 + +#define SXE2VF_SET_FEATURE(features, feature, enable) \ + do { \ + typeof(feature) __feature = (feature); \ + if (enable) \ + *features |= __feature; \ + else \ + *features &= ~__feature; \ + } while (0) + +static inline int sxe2vf_conflict_features_chk(u64 changed_features, u64 features, + u64 con1, u64 con2) +{ + if ((changed_features & con1 && features & con1) && + (changed_features & con2 && features & con2)) + return -EINVAL; + + return 0; +} + +STATIC s32 sxe2vf_open(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi; + s32 i; + s32 ret; + + for (i = 0; i < SXE2VF_WAIT_POST_COMPLETE_COUNT; i++) { + msleep(SXE2VF_WAIT_POST_10MS); + if (test_bit(SXE2VF_FLAG_DRV_PROBE_DONE, adapter->flags)) + break; + } + + if (i == SXE2VF_WAIT_POST_COMPLETE_COUNT) { + LOG_DEV_ERR("probe post not complete, try later.\n"); + return -EIO; + } + + vsi = adapter->vsi_ctxt.vf_vsi; + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + LOG_INFO_BDF("vsi disabled, try open later flag:0x%lx.\n", + *adapter->flags); + ret = -EBUSY; + goto unlock; + } + + if (!test_bit(SXE2VF_VSI_CLOSE, vsi->state)) { + LOG_DEV_INFO("vf already open flag:0x%lx.\n", *adapter->flags); + ret = 0; + goto l_set_bit; + } + + ret = sxe2vf_vsi_open(vsi); + if (ret) { + LOG_ERROR_BDF("vsi open failed %d.\n", ret); + goto unlock; + } + + LOG_INFO_BDF("netdev opened.\n"); + +l_set_bit: + set_bit(SXE2VF_FLAG_DRV_UP, adapter->flags); + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +STATIC s32 sxe2vf_stop(struct net_device *netdev) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi; + struct sxe2vf_mac_filter *filter = + &adapter->switch_ctxt.filter_ctxt.mac_filter; + struct sxe2vf_sync_addr_node *list_itr = NULL; + struct sxe2vf_sync_addr_node *tmp = NULL; + s32 ret = 0; + struct sxe2vf_switch_context *switch_ctxt = &adapter->switch_ctxt; + + vsi = adapter->vsi_ctxt.vf_vsi; + + mutex_lock(&adapter->vsi_ctxt.lock); + + clear_bit(SXE2VF_FLAG_DRV_UP, adapter->flags); + + if (test_bit(SXE2VF_VSI_DISABLE, vsi->state)) + goto unlock; + + ret = sxe2vf_vsi_close(vsi); + if (!ret) + LOG_INFO_BDF("netdev stopped.\n"); + + mutex_lock(&switch_ctxt->mac_addr_lock); + INIT_LIST_HEAD(&filter->tmp_unsync_list); + + netif_addr_lock_bh(netdev); + __dev_uc_unsync(netdev, sxe2vf_addr_unsync); + __dev_mc_unsync(netdev, sxe2vf_addr_unsync); + netif_addr_unlock_bh(netdev); + + list_for_each_entry_safe(list_itr, tmp, &filter->tmp_unsync_list, list) { + (void)sxe2vf_mac_addr_del(adapter, list_itr->macaddr, + SXE2VF_MAC_OWNER_UC_MC); + list_del(&list_itr->list); + kfree(list_itr); + } + + mutex_unlock(&switch_ctxt->mac_addr_lock); + +unlock: + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static int sxe2vf_set_vlan_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 status = 0; + s32 ret = 0; + + if (adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist && + (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + LOG_DEV_ERR("port vlan exist, stag offload not support.\n"); + ret = -EOPNOTSUPP; + return ret; + } + + if (adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist && + (features & SXE2VF_VLAN_FILTER_FEATURES)) { + LOG_DEV_ERR("port vlan exist, vlan filter not support.\n"); + ret = -EOPNOTSUPP; + return ret; + } + + status = sxe2vf_vlan_offload_cfg(netdev, + features & SXE2VF_VLAN_OFFLOAD_FEATURES); + if (!status) { + SXE2VF_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_CTAG_RX, + (features & NETIF_F_HW_VLAN_CTAG_RX)); + SXE2VF_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_CTAG_TX, + (features & NETIF_F_HW_VLAN_CTAG_TX)); + SXE2VF_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_STAG_RX, + (features & NETIF_F_HW_VLAN_STAG_RX)); + SXE2VF_SET_FEATURE(oper_features, NETIF_F_HW_VLAN_STAG_TX, + (features & NETIF_F_HW_VLAN_STAG_TX)); + } else { + ret = status; + } + + status = sxe2vf_vlan_filter_cfg(netdev, + features & SXE2VF_VLAN_FILTER_FEATURES); + if (!status) { + SXE2VF_SET_FEATURE(oper_features, SXE2VF_VLAN_FILTER_FEATURES, + (features & SXE2VF_VLAN_FILTER_FEATURES)); + } else { + ret = status; + } + + LOG_INFO_BDF("current features 0x%llx, request features 0x%llx\n", + netdev->features, features); + return ret; +} + +static s32 sxe2vf_set_lro_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + bool need_reset = false; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + bool lro_ena = !!(features & NETIF_F_LRO); + bool old_lro_feature = + (bool)test_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags); + + if (!(features & NETIF_F_LRO)) { + if (test_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags)) { + clear_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags); + need_reset = true; + LOG_DEBUG_BDF("lro disabled and need reset\n"); + } + } else { + if (!(features & NETIF_F_RXCSUM)) { + LOG_NETDEV_ERR("Cannot simultaneously enable lro and\t" + "disable rx csum.\n"); + return -EOPNOTSUPP; + } + + if (!(test_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags))) { + set_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags); + need_reset = true; + LOG_DEBUG_BDF("lro enabled and need reset\n"); + } + } + + if (need_reset) { + ret = sxe2vf_vsi_reopen(adapter->vsi_ctxt.vf_vsi); + if (ret) { + LOG_NETDEV_ERR("set_features down_up err %d\n", ret); + if (old_lro_feature) + set_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags); + else + clear_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags); + } else { + SXE2VF_SET_FEATURE(oper_features, NETIF_F_LRO, lro_ena); + } + } + + return ret; +} + +static s32 sxe2vf_set_rxfcs_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + bool need_reset = false; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + bool rxfcs_ena = !!(features & NETIF_F_RXFCS); + bool old_rxfcs_feature = + (bool)test_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags); + + if (!(features & NETIF_F_RXFCS)) { + if (test_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags)) { + clear_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags); + need_reset = true; + LOG_DEBUG_BDF("rxfcs disabled and need reset\n"); + } + } else { + if (!(test_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags))) { + set_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags); + need_reset = true; + LOG_DEBUG_BDF("rxfcs enabled and need reset\n"); + } + } + + if (need_reset) { + ret = sxe2vf_vsi_reopen(adapter->vsi_ctxt.vf_vsi); + if (ret) { + LOG_NETDEV_ERR("set_features down_up err %d\n", ret); + if (old_rxfcs_feature) + set_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags); + else + clear_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags); + } else { + SXE2VF_SET_FEATURE(oper_features, NETIF_F_RXFCS, rxfcs_ena); + } + } + + return ret; +} + +STATIC int sxe2vf_set_fnav_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + netdev_features_t changed_features = netdev->features ^ features; + bool fnav_ena = !!(features & NETIF_F_NTUPLE); + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (!(changed_features & NETIF_F_NTUPLE)) + goto l_end; + + if (fnav_ena) { + if (!test_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags)) + set_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags); + + SXE2VF_SET_FEATURE(oper_features, NETIF_F_NTUPLE, fnav_ena); + goto l_end; + } + + if (!test_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags)) { + SXE2VF_SET_FEATURE(oper_features, NETIF_F_NTUPLE, fnav_ena); + goto l_end; + } + + ret = sxe2vf_fnav_all_filter_del(adapter); + if (!ret) { + SXE2VF_SET_FEATURE(oper_features, NETIF_F_NTUPLE, fnav_ena); + } else { + LOG_ERROR_BDF("delete all filter failed, ret:%d", ret); + goto l_end; + } +#ifdef SXE2_SUPPORT_ACL + ret = sxe2vf_acl_filter_clear_msg_send(adapter); + if (ret) { + LOG_ERROR_BDF("send acl filter clear msg failed, ret:%d", ret); + goto l_end; + } +#endif + clear_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags); +l_end: + LOG_INFO_BDF("sxe2 vf fnav set feature done, fnav_ena:%d ret:%d\n", fnav_ena, + ret); + return ret; +} + +static s32 sxe2vf_set_rxcsum_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + bool rxcsum_ena = !!(features & NETIF_F_RXCSUM); + + SXE2VF_SET_FEATURE(oper_features, NETIF_F_RXCSUM, rxcsum_ena); + + return ret; +} + +STATIC s32 sxe2vf_set_ipsec_features(struct net_device *netdev, + netdev_features_t features, + netdev_features_t *oper_features) +{ + s32 ret = 0; + netdev_features_t changed_features = netdev->features ^ features; + bool ipsec_ena = !!(features & NETIF_F_HW_ESP); + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (changed_features & NETIF_F_HW_ESP) { + mutex_lock(&adapter->ipsec_ctxt.context_lock); + if (ipsec_ena) { + LOG_DEBUG_BDF("Enable ipsec offload(off to on).\n"); + if (sxe2vf_ipsec_conflict_features_check(netdev)) { + LOG_DEV_ERR("failed to enable ipsec offload,\t" + "please disable tx segmentation offload\t" + "features,\t" + "tx vlan offload feature and LRO\t" + "offload feature.\n"); + ret = -EINVAL; + } else { + if (netdev->mtu >= SXE2VF_IPSEC_PAYLOAD_LIMIT) { + LOG_NETDEV_WARN("SXE2:Current mtu is %d.\t" + "The maximum encryption\t" + "length of IPsec is 2k.\t" + "If the packet length is\t" + "greater than 2k,\t" + "the hardware ipsec\t" + "offloading may fail.\n", + netdev->mtu); + } + } + } else { + LOG_DEBUG_BDF("Disable ipsec offload switch(on to off).\n"); + if (sxe2vf_is_ipsec_can_not_disable(adapter)) { + LOG_DEV_ERR("Can not disable ipsec offload,\t" + "please delete all xfrm state before\t" + "disable ipsec offload\n"); + ret = -EINVAL; + } + } + if (!ret) + SXE2VF_SET_FEATURE(oper_features, NETIF_F_HW_ESP, ipsec_ena); + + mutex_unlock(&adapter->ipsec_ctxt.context_lock); + } + + return ret; +} + +static s32 sxe2vf_conflict_features_check(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed_features = netdev->features ^ features; + netdev_features_t conflict1, conflict2; + + (void)changed_features; + (void)conflict1; + (void)conflict2; + + conflict1 = NETIF_F_HW_ESP; + conflict2 = 0 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL +#ifdef NETIF_F_GSO_UDP_L4 + | NETIF_F_GSO_UDP_L4 +#endif + | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6 | NETIF_F_LRO | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM; + + (void)sxe2vf_conflict_features_chk(changed_features, features, conflict1, + conflict2); + + return 0; +} + +static s32 sxe2vf_set_features(struct net_device *netdev, netdev_features_t features) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + netdev_features_t oper_features; + bool part_failed = false; + + ret = sxe2vf_conflict_features_check(netdev, features); + if (ret) { + LOG_DEV_ERR("some features are conflict\n"); + return ret; + } + + mutex_lock(&adapter->vsi_ctxt.lock); + if (test_bit(SXE2VF_VSI_DISABLE, adapter->vsi_ctxt.vf_vsi->state)) { + LOG_INFO_BDF("vsi disabled, try later\n"); + mutex_unlock(&adapter->vsi_ctxt.lock); + return -EBUSY; + } + + oper_features = netdev->features; + ret = sxe2vf_set_rxfcs_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2vf_set_lro_features(netdev, features, &oper_features); + if (ret) { + part_failed = true; + goto skip_rxcsum; + } + + ret = sxe2vf_set_rxcsum_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + +skip_rxcsum: + ret = sxe2vf_set_ipsec_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2vf_set_vlan_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + ret = sxe2vf_set_fnav_features(netdev, features, &oper_features); + if (ret) + part_failed = true; + + if (part_failed) { + netdev->features = oper_features; + ret = -EINVAL; + } + + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +static netdev_features_t sxe2vf_fix_ipsec_features(struct sxe2vf_adapter *adapter, + struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t tso_features; + + if (netdev->features & NETIF_F_HW_ESP) { + tso_features = NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP6; + if (features & (tso_features)) { + LOG_DEV_ERR("ipsec is conflicted with tx segmentation\t" + "offload.\n"); + features &= ~(tso_features); + } + if (features & + (NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM)) { + LOG_DEV_ERR("ipsec is conflicted with tx Checksum\t" + "offload.\n"); + features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM); + } + if (features & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)) { + LOG_DEV_ERR("ipsec is conflicted with tx VLAN offload.\n"); + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + } + if (features & NETIF_F_LRO) { + LOG_DEV_ERR("ipsec is conflicted with LRO.\n"); + features &= ~(NETIF_F_LRO); + } + } + return features; +} + +static netdev_features_t sxe2vf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vlan_info *vlan_info = + &adapter->switch_ctxt.filter_ctxt.vlan_info; + u8 port_vlan_exist = vlan_info->port_vlan_exist; + u8 is_switchdev = vlan_info->is_switchdev; + netdev_features_t request_features; + + request_features = features; + + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + if (req_vlan_fltr != cur_vlan_fltr) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else { + LOG_DEV_WARN("802.1Q and 802.1ad VLAN filtering must be\t" + "either both on or both off.\n" + "VLAN filtering has been enabled for both\t" + "types.\n"); + if (!cur_ctag && !cur_stag) + features |= NETIF_VLAN_FILTERING_FEATURES; + else + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } + } + + if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && + (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + LOG_DEV_WARN("cannot support CTAG and STAG VLAN stripping and/or\t" + "insertion simultaneously.\n" + "since CTAG and STAG offloads are mutually exclusive,\t" + "clearing STAG offload settings\n"); + features &= ~(NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX); + } + + if (port_vlan_exist) { + features &= ~(SXE2VF_VLAN_FILTER_FEATURES | NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX); + } + + if (is_switchdev) { + features &= ~(SXE2VF_VLAN_FILTER_FEATURES | NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX); + } + + features = sxe2vf_fix_ipsec_features(adapter, netdev, features); + + LOG_DEBUG_BDF("request features %llx, fix features %llx\n", request_features, + features); + return features; +} + +static netdev_features_t +sxe2vf_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, + netdev_features_t features) +{ + size_t len; + bool gso = skb_is_gso(skb); + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + if (gso && (skb_shinfo(skb)->gso_size < SXE2VF_TXCD_QW1_MSS_MIN)) { + LOG_WARN_BDF("gso size < 88, not support\n"); + features &= ~NETIF_F_GSO_MASK; + } + + len = (size_t)skb_network_offset(skb); + if (len > SXE2VF_TXDD_MACLEN_MAX || len & 0x1) { + LOG_WARN_BDF("The mac header exceeds the max length,\t" + "not support tso/csum, maclen = %zu\n", + len); + goto out_rm_features; + } + + len = skb_network_header_len(skb); + if (len > SXE2VF_TXDD_IPLEN_MAX || len & 0x1) { + LOG_WARN_BDF("The ip header exceeds the max length,\t" + "not support tso/csum, iplen = %zu\n", + len); + goto out_rm_features; + } + + if (skb->encapsulation) { + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = (size_t)(skb_inner_network_header(skb) - + skb_transport_header(skb)); + if (len > SXE2VF_TXDD_L4LEN_MAX || len & 0x1) { + LOG_WARN_BDF("tunnel:The inner L4 header exceeds\t" + "the max length,\t" + "not support tso/csum, l4 len = %zu\n", + len); + goto out_rm_features; + } + } + + len = skb_inner_network_header_len(skb); + if (len > SXE2VF_TXDD_IPLEN_MAX || len & 0x1) { + LOG_WARN_BDF("tunnel:The inner ip header exceeds the max\t" + "length,\t" + "not support tso/csum, ip len = %zu\n", + len); + goto out_rm_features; + } + } + + return features; + +out_rm_features: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static int sxe2vf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + s32 ret; + u32 old_mtu = netdev->mtu; + + if (new_mtu == (int)netdev->mtu) { + LOG_NETDEV_WARN("MTU is already %u\n", netdev->mtu); + return 0; + } + + netdev->mtu = (unsigned int)new_mtu; + + ret = sxe2vf_vsi_reopen_locked(adapter->vsi_ctxt.vf_vsi); + if (ret) { + netdev->mtu = old_mtu; + LOG_NETDEV_ERR("changing MTU from %u to %d failed.\n", old_mtu, + new_mtu); + return ret; + } + + if (new_mtu > SXE2VF_IPSEC_PAYLOAD_LIMIT && + sxe2vf_is_ipsec_offload_enable(netdev)) { + LOG_NETDEV_WARN("SXE2:the maximum encryption length of IPsec is\t" + "2k.\n" + "If the packet length is greater than 2k, the\t" + "hardware ipsec offloading may fail.\n"); + } + + LOG_NETDEV_INFO("changing MTU from %u to %d\n", old_mtu, new_mtu); + + set_bit(SXE2VF_FLAG_MTU_CHANGED, adapter->flags); + + return ret; +} + +#ifdef HAVE_RTNL_LINK_NDO_GET_STATS64 +STATIC struct rtnl_link_stats64 *sxe2vf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +STATIC void sxe2vf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2vf_vsi_sw_stats *cur_stats = &vsi->vsi_stats.vsi_sw_stats; + + (void)sxe2vf_vsi_sw_stats_update(vsi); + + stats->tx_packets = cur_stats->tx_packets; + stats->rx_packets = cur_stats->rx_packets; + stats->tx_bytes = cur_stats->tx_bytes; + stats->rx_bytes = cur_stats->rx_bytes; +#ifdef HAVE_RTNL_LINK_NDO_GET_STATS64 + return stats; +#endif +} + +STATIC const struct net_device_ops sxe2vf_netdev_ops = { + .ndo_open = sxe2vf_open, + .ndo_stop = sxe2vf_stop, + .ndo_start_xmit = sxe2vf_xmit, + .ndo_set_mac_address = sxe2vf_set_mac_address, + .ndo_set_rx_mode = sxe2vf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = sxe2vf_change_mtu, + .ndo_vlan_rx_add_vid = sxe2vf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sxe2vf_vlan_rx_kill_vid, + .ndo_features_check = sxe2vf_features_check, + .ndo_fix_features = sxe2vf_fix_features, + .ndo_set_features = sxe2vf_set_features, + .ndo_setup_tc = NULL, + .ndo_get_stats64 = sxe2vf_get_stats64, +}; + +STATIC void sxe2vf_netdev_feature_init(struct net_device *netdev) +{ + netdev_features_t defaults; + netdev_features_t lro_features = 0; + netdev_features_t csum_features; + netdev_features_t tso_features; + netdev_features_t vlan_features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + defaults = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_NTUPLE | NETIF_F_RXHASH; + + csum_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + tso_features = NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_GSO_IPXIP4 | +#ifdef NETIF_F_GSO_UDP_L4 + NETIF_F_GSO_UDP_L4 | +#endif + NETIF_F_GSO_IPXIP6; + + vlan_features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + netdev->hw_features |= defaults | csum_features | tso_features | + NETIF_F_HW_TC | NETIF_F_LRO | vlan_features | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_RXFCS; + + netdev->features |= defaults | csum_features | tso_features | lro_features | + vlan_features | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_FILTER; + + netdev->hw_enc_features |= defaults | csum_features | tso_features; + + netdev->vlan_features |= defaults | csum_features | tso_features; + + if (netdev->wanted_features) { + if (!(netdev->wanted_features & NETIF_F_TSO) || + netdev->mtu < SXE2VF_TSO_MIN_MTU) + netdev->features &= ~NETIF_F_TSO; + if (!(netdev->wanted_features & NETIF_F_TSO6) || + netdev->mtu < SXE2VF_TSO_MIN_MTU) + netdev->features &= ~NETIF_F_TSO6; + if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) + netdev->features &= ~NETIF_F_TSO_ECN; + if (!(netdev->wanted_features & NETIF_F_GRO)) + netdev->features &= ~NETIF_F_GRO; + if (!(netdev->wanted_features & NETIF_F_GSO)) + netdev->features &= ~NETIF_F_GSO; + } + + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; +} + +#ifdef HAVE_NETDEV_MIN_MAX_MTU +STATIC void sxe2vf_mtu_range_init(struct net_device *netdev) +{ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = SXE2VF_FRAME_SIZE_MAX - SXE2VF_PACKET_HDR_PAD; +} +#endif + +STATIC void sxe2vf_netdev_ops_init(struct net_device *netdev) +{ + netdev->netdev_ops = &sxe2vf_netdev_ops; + netdev->watchdog_timeo = SXE2VF_NETDEV_WATCHDOG_TIMEOUT; +} + +void sxe2vf_netdev_init(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + sxe2vf_netdev_feature_init(netdev); + + sxe2vf_netdev_ops_init(netdev); + + sxe2vf_ethtool_ops_init(netdev); + +#ifdef HAVE_NETDEV_MIN_MAX_MTU + sxe2vf_mtu_range_init(netdev); +#endif +} + +s32 sxe2vf_netdev_register(struct sxe2vf_adapter *adapter) +{ + s32 ret; + struct net_device *netdev = adapter->netdev; + + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + LOG_INFO_BDF("net dev carrier off link down.\n"); + + ret = register_netdev(netdev); + if (ret) { + LOG_ERROR_BDF("netdev register failed.(err:%d).\n", ret); + return ret; + } + + set_bit(SXE2VF_FLAG_NETDEV_REGISTED, adapter->flags); + + return ret; +} + +void sxe2vf_netdev_unregister(struct sxe2vf_adapter *adapter) +{ + + if (test_and_clear_bit(SXE2VF_FLAG_NETDEV_REGISTED, adapter->flags)) + unregister_netdev(adapter->netdev); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.h new file mode 100644 index 0000000000000000000000000000000000000000..c1cf3286d9d768006c9a46faa28d13ba45cbdc79 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_netdev.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_netdev.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_NETDEV_H__ +#define __SXE2VF_NETDEV_H__ + +#include +#include "sxe2vf.h" + +struct sxe2vf_adapter; + +#define SXE2VF_FRAME_SIZE_MAX 9728 +#define SXE2VF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +#define SXE2VF_NETDEV_WATCHDOG_TIMEOUT (5 * HZ) +#define NETIF_VLAN_FILTERING_FEATURES \ + (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +void sxe2vf_netdev_init(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_netdev_register(struct sxe2vf_adapter *adapter); + +void sxe2vf_netdev_unregister(struct sxe2vf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_pci.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..9689f496c050df78a4e24d87c71fa703d109176a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_pci.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_pci.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef _SXE2VF_PCI_H_ +#define _SXE2VF_PCI_H_ + +#define SXE2VF_PCI_VENDOR_ID_1 0x1ff2 +#define SXE2VF_PCI_DEVICE_ID_1 0x10b2 + +#define SXE2VF_PCI_VENDOR_ID_2 0x1d94 +#define SXE2VF_PCI_DEVICE_ID_2 0x126f + +#define SXE2VF_PCI_DEVICE_ID_10B4 0x10b4 + +#define SXE2VF_PCI_VENDOR_ID_206F 0x206f +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_queue.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..78bd2d4285d09edf13e062e6b50871767cffbc56 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_queue.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_queue.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_QUEUE_H__ +#define __SXE2VF_QUEUE_H__ + +struct sxe2vf_irq_data; +struct sxe2vf_vsi; + +#define SXE2VF_QUEUES_CNT_MAX 16 + +#define SXE2VF_ETH_QUEUE_CNT_MIN 1 +#define SXE2VF_DPDK_QUEUE_CNT_MIN 1 + +#define sxe2vf_for_each_queue(post, head) \ + for (post = (head).next; post; \ + post = post->next) + +#define SXE2VF_DESC_IDLE(queue) \ +({ \ + typeof(queue) __q = (queue); \ + ((u16)((((__q)->next_to_clean > (__q)->next_to_use) ? \ + 0 : \ + (__q)->depth) + \ + ((__q)->next_to_clean - (__q)->next_to_use - 1))); \ +}) + +struct sxe2vf_desc_ring { + void *base_addr; + u8 __iomem *tail; + dma_addr_t dma; +}; + +struct sxe2vf_rx_buf { + dma_addr_t dma; + struct page *page; + u32 pg_offset; + u32 drv_refcnt; + struct sk_buff *skb; +}; + +struct sxe2vf_tx_buf { + void *next_to_watch; + union { + struct sk_buff *skb; + }; + u32 bytecount; + u16 gso_segs; + u32 tx_features; + DEFINE_DMA_UNMAP_LEN(len); + DEFINE_DMA_UNMAP_ADDR(dma); +}; + +struct sxe2vf_txq_stats { + u64 tx_restart; + u64 tx_busy; + u64 tx_linearize; + u64 tx_tso_linearize_chk; + u64 tx_vlan_insert; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_dropped; + u64 tx_xmit_more; +}; + +struct sxe2vf_rxq_stats { + u64 rx_lro_count; + u64 rx_lro_packets; + u64 rx_non_eop_descs; + u64 rx_page_alloc; + u64 rx_pg_alloc_fail; + u64 rx_buff_alloc_err; + u64 rx_csum_err; + u64 rx_vlan_strip; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_unnecessary_inner; + u64 rx_lro_bytes; + u64 rx_pkts_sw_drop; + u64 rx_pa_err; +}; + +struct sxe2vf_queue_ipsec_stats { + u64 tx_error_invalid_sp; + u64 tx_error_invalid_state; + u64 tx_offload_success; + u64 rx_error_invalid_sp; + u64 rx_error_invalid_state; + u64 rx_error_invalid_ptype; + u64 rx_error_decrypt_fail; + u64 rx_offload_success; +}; + +struct sxe2vf_queue_stats { + u64 packets; + u64 bytes; + union { + struct sxe2vf_rxq_stats rx_stats; + struct sxe2vf_txq_stats tx_stats; + }; + struct sxe2vf_queue_ipsec_stats ipsec_stats; + struct rcu_head rcu; + struct u64_stats_sync syncp; +}; + +struct sxe2vf_queue { + struct sxe2vf_queue *next; + struct device *dev; + struct net_device *netdev; + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_desc_ring desc; + union { + struct sxe2vf_rx_buf *rx_buf; + struct sxe2vf_tx_buf *tx_buf; + }; + + struct sxe2vf_vsi *vsi; + + u16 depth; + u16 idx_in_vsi; + + u16 next_to_use; + u16 next_to_clean; + union { + struct { + u16 rx_offset; + u16 rx_buf_len; + u16 next_to_alloc; + }; + + struct { + u16 next_rs; + u16 next_dd; + u16 q_handle; + }; + }; + + struct sxe2vf_queue_stats *stats; + struct u64_stats_sync syncp; + + struct rcu_head rcu; + + struct sk_buff *skb; + unsigned long flags; +} ____cacheline_internodealigned_in_smp; + +struct sxe2vf_vsi_queues { + u16 q_cnt; + u16 depth; + struct sxe2vf_queue **q; +}; + +struct sxe2vf_queue_context { + u16 max_cnt; + u16 q_cnt_req; + u16 eth_q_cnt; + u16 eth_offset; + u16 dpdk_q_cnt; + u16 dpdk_offset; +}; + +static inline bool sxe2vf_desc_status0_err_test(__le16 desc_status, + const u16 bit) +{ + return !!(desc_status & cpu_to_le16(bit)); +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.c new file mode 100644 index 0000000000000000000000000000000000000000..2b59c0d99061586f7066c3243710de58e5436d17 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.c @@ -0,0 +1,1033 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_rx.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include + +#include "sxe2_compat.h" +#include "sxe2vf.h" +#include "sxe2vf_rx.h" +#include "sxe2_log.h" +#include "sxe2vf_netdev.h" +#include "sxe2vf_l2_filter.h" +#include "sxe2vf_regs.h" + +STATIC s32 sxe2vf_rx_ring_alloc(struct sxe2vf_queue *rxq) +{ + s32 ret; + u32 size; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + rxq->rx_buf = kcalloc(rxq->depth, sizeof(*rxq->rx_buf), + GFP_KERNEL | __GFP_ZERO); + if (!rxq->rx_buf) { + ret = -ENOMEM; + LOG_ERROR_BDF("rxq:%d alloc %luB buffer info failed.(err:%d)\n", + rxq->idx_in_vsi, rxq->depth * sizeof(*rxq->rx_buf), + ret); + return ret; + } + + size = SXE2VF_RX_DESC_SIZE(rxq); + rxq->desc.base_addr = dma_alloc_coherent(rxq->dev, size, &rxq->desc.dma, + GFP_KERNEL | __GFP_ZERO); + if (!rxq->desc.base_addr) { + ret = -ENOMEM; + LOG_DEV_ERR("rxq:%d alloc %uB descriptor failed.(err:%d)\n", + rxq->idx_in_vsi, size, ret); + goto l_desc_alloc_failed; + } + + rxq->next_to_use = 0; + rxq->next_to_clean = 0; + + LOG_DEBUG_BDF("rxq[%u] depth:%u rx_buf:%p size:%lu\t" + "descriptor base va:%p dma:0x%llx size:%u.\n", + rxq->idx_in_vsi, rxq->depth, rxq->rx_buf, + rxq->depth * sizeof(*rxq->rx_buf), rxq->desc.base_addr, + rxq->desc.dma, size); + + return 0; + +l_desc_alloc_failed: + kfree(rxq->rx_buf); + rxq->rx_buf = NULL; + + return ret; +} + +void sxe2vf_rx_ring_free(struct sxe2vf_queue *rxq) +{ + kfree(rxq->rx_buf); + rxq->rx_buf = NULL; + + if (rxq->desc.base_addr) { + dma_free_coherent(rxq->dev, SXE2VF_RX_DESC_SIZE(rxq), + rxq->desc.base_addr, rxq->desc.dma); + rxq->desc.base_addr = NULL; + } + + LOG_DEBUG("rxq[%u] ring memory free.\n", rxq->idx_in_vsi); +} + +static void sxe2vf_rx_page_free(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *buf_info) +{ + dma_unmap_page_attrs(rxq->dev, buf_info->dma, SXE2VF_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2VF_RX_DMA_ATTR); + + __page_frag_cache_drain(buf_info->page, buf_info->drv_refcnt); + + buf_info->page = NULL; + buf_info->pg_offset = 0; +} + +static void sxe2vf_rx_ring_clean(struct sxe2vf_queue *rxq) +{ + struct device *dev = rxq->dev; + struct sxe2vf_rx_buf *rx_buf_info; + u16 i; + + if (!rxq->rx_buf) + return; + + for (i = 0; i < rxq->depth; i++) { + rx_buf_info = &rxq->rx_buf[i]; + if (!rx_buf_info->page) + continue; + + dma_sync_single_range_for_cpu(dev, rx_buf_info->dma, + rx_buf_info->pg_offset, + rxq->rx_buf_len, DMA_FROM_DEVICE); + sxe2vf_rx_page_free(rxq, rx_buf_info); + } + + memset(rxq->rx_buf, 0, SXE2VF_RX_BUF_LEN(rxq)); + memset(rxq->desc.base_addr, 0, SXE2VF_RX_DESC_SIZE(rxq)); + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; +} + +void sxe2vf_rx_rings_clean(struct sxe2vf_vsi *vsi) +{ + u16 i; + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + sxe2vf_rx_ring_clean(vsi->rxqs.q[i]); + } +} + +STATIC void sxe2vf_rx_rings_free(struct sxe2vf_vsi *vsi) +{ + u16 i; + struct sxe2vf_queue *rxq; + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + rxq = vsi->rxqs.q[i]; + sxe2vf_rx_ring_free(rxq); + } +} + +static void sxe2vf_rx_ring_res_free(struct sxe2vf_queue *rxq) +{ + sxe2vf_rx_ring_clean(rxq); + + sxe2vf_rx_ring_free(rxq); +} + +void sxe2vf_rx_rings_res_free(struct sxe2vf_vsi *vsi) +{ + u16 i; + + if (!vsi) + return; + + sxe2vf_for_each_vsi_rxq(vsi, i) sxe2vf_rx_ring_res_free(vsi->rxqs.q[i]); +} + +static s32 sxe2vf_rx_rings_alloc(struct sxe2vf_vsi *vsi) +{ + s32 ret; + u16 i; + struct sxe2vf_queue *rxq; + struct sxe2vf_adapter *adapter = vsi->adapter; + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + rxq = vsi->rxqs.q[i]; + + ret = sxe2vf_rx_ring_alloc(rxq); + if (ret) { + LOG_ERROR_BDF("vsi:%d rxq:%d ring alloc failed.(%d)\n", + vsi->vsi_id, i, ret); + goto l_failed; + } + } + + return ret; + +l_failed: + while (i--) + sxe2vf_rx_ring_free(vsi->rxqs.q[i]); + + return ret; +} + +static u32 sxe2vf_rx_buf_len_set(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + u32 rx_buf_len = SXE2VF_RXBUF_2048; + +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + if (!test_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags)) { + struct net_device *netdev = adapter->netdev; + + rx_buf_len = SXE2VF_RXBUF_3072; + + if (!SXE2VF_PAD_RX_LEN_EXCEED_2K && netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = SXE2VF_RXBUF_1536 - NET_IP_ALIGN; + } +#endif + + LOG_DEBUG_BDF("vsi:%u type:%u rx_buf_len:%u.\n", vsi->vsi_id, vsi->vsi_type, + rx_buf_len); + + return rx_buf_len; +} + +static void sxe2vf_rxq_lro_switch(struct sxe2vf_queue *rxq) +{ + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2VF_FLAG_LRO_ENABLE, adapter->flags)) { + set_bit(SXE2VF_RXQ_LRO_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u lro enabled.\n", + rxq->vsi->vsi_id, rxq->vsi->vsi_type, rxq->idx_in_vsi); + } else { + clear_bit(SXE2VF_RXQ_LRO_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u lro disabled.\n", + rxq->vsi->vsi_id, rxq->vsi->vsi_type, rxq->idx_in_vsi); + } +} + +static void sxe2vf_rxq_rxfcs_switch(struct sxe2vf_queue *rxq) +{ + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2VF_FLAG_RXFCS_ENABLE, adapter->flags)) { + set_bit(SXE2VF_RXQ_RXFCS_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u rxfcs enabled.\n", + rxq->vsi->vsi_id, rxq->vsi->vsi_type, rxq->idx_in_vsi); + } else { + clear_bit(SXE2VF_RXQ_RXFCS_ENABLED, &rxq->flags); + LOG_INFO_BDF("vsi:%u type:%u rxq idx_in_vsi:%u rxfcs disabled.\n", + rxq->vsi->vsi_id, rxq->vsi->vsi_type, rxq->idx_in_vsi); + } +} + +static void sxe2vf_rxq_skb_build_switch(struct sxe2vf_queue *rxq) +{ + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2VF_FLAG_LEGACY_RX_ENABLE, adapter->flags)) + clear_bit(SXE2VF_RXQ_BUILD_SKB_ENABLED, &rxq->flags); + else + set_bit(SXE2VF_RXQ_BUILD_SKB_ENABLED, &rxq->flags); +} + +static void sxe2vf_rxq_switch_cfg(struct sxe2vf_queue *rxq, u32 rx_buf_len) +{ + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + rxq->desc.tail = (u8 __iomem *)(adapter->hw.reg_base_addr + + (u64)SXE2VF_RXQ_TAIL((u64)rxq->idx_in_vsi)); + rxq->rx_buf_len = (u16)rx_buf_len; + + sxe2vf_rxq_lro_switch(rxq); + sxe2vf_rxq_rxfcs_switch(rxq); + sxe2vf_rxq_skb_build_switch(rxq); + rxq->rx_offset = sxe2vf_rx_offset_get(rxq); +} + +static bool sxe2vf_rx_page_alloc(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *buf_info) +{ + struct page *page; + dma_addr_t dma; + + if (likely(buf_info->page)) { + LOG_DEBUG("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p order:%u\t" + "dma:%llu pg_offset:%u drv_refcnt:0x%x reused.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_clean, + rxq->next_to_alloc, buf_info->page, + SXE2VF_RX_PAGE_ORDER(rxq), buf_info->dma, + buf_info->pg_offset, buf_info->drv_refcnt); + return true; + } + + page = dev_alloc_pages(SXE2VF_RX_PAGE_ORDER(rxq)); + if (unlikely(!page)) { + rxq->stats->rx_stats.rx_pg_alloc_fail++; + LOG_ERROR("rxq[%u] ntu:%u ntc:%u nta:%u rx pg_offset:%u\t" + "drv_refcnt:0x%x page alloc fail.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_clean, + rxq->next_to_alloc, buf_info->pg_offset, + buf_info->drv_refcnt); + return false; + } + + dma = dma_map_page_attrs(rxq->dev, page, 0, SXE2VF_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2VF_RX_DMA_ATTR); + + if (dma_mapping_error(rxq->dev, dma)) { + __free_pages(page, SXE2VF_RX_PAGE_ORDER(rxq)); + rxq->stats->rx_stats.rx_pg_alloc_fail++; + LOG_ERROR("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p dma:%llu\t" + "pg_offset:%u drv_refcnt:0x%x dma map fail.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_clean, + rxq->next_to_alloc, page, dma, buf_info->pg_offset, + buf_info->drv_refcnt); + return false; + } + rxq->stats->rx_stats.rx_page_alloc++; + buf_info->dma = dma; + buf_info->page = page; + buf_info->pg_offset = rxq->rx_offset; + + page_ref_add(page, USHRT_MAX - 1); + buf_info->drv_refcnt = USHRT_MAX; + + LOG_DEBUG("rxq[%u] ntu:%u ntc:%u nta:%u rx page:%p order:%u\t" + "dma:%llu pg_offset:%u drv_refcnt:0x%x.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_clean, + rxq->next_to_alloc, page, SXE2VF_RX_PAGE_ORDER(rxq), dma, + buf_info->pg_offset, buf_info->drv_refcnt); + + return true; +} + +static void sxe2vf_rxq_tail_update(struct sxe2vf_queue *rxq, u16 new) +{ +#ifndef SXE2_CFG_RELEASE + u16 prev_ntu = rxq->next_to_use; +#endif + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + rxq->next_to_use = new; + rxq->next_to_alloc = new; + + wmb(); + writel(new, rxq->desc.tail); + + LOG_DEBUG_BDF("rxq[%u] ntu:%u nta:%u prev:%u tail:%u update read:0x%x.\n", + rxq->idx_in_vsi, rxq->next_to_use, rxq->next_to_alloc, + prev_ntu, new, readl(rxq->desc.tail)); +} + +bool sxe2vf_rx_buffers_alloc(struct sxe2vf_queue *rxq, u16 cnt) +{ + union sxe2vf_rx_desc *desc; + struct sxe2vf_rx_buf *buf_info; + u16 ntu = rxq->next_to_use; + + if (!cnt) + return false; + + desc = SXE2VF_RX_DESC(rxq, ntu); + buf_info = &rxq->rx_buf[ntu]; + + do { + if (!sxe2vf_rx_page_alloc(rxq, buf_info)) + break; + + dma_sync_single_range_for_device(rxq->dev, buf_info->dma, + buf_info->pg_offset, + rxq->rx_buf_len, DMA_FROM_DEVICE); + desc->read.pkt_addr = + cpu_to_le64(buf_info->dma + buf_info->pg_offset); + + desc++; + buf_info++; + ntu++; + + if (unlikely(ntu == rxq->depth)) { + desc = SXE2VF_RX_DESC(rxq, 0); + buf_info = rxq->rx_buf; + ntu = 0; + } + + desc->wb.status0_err = 0; + + cnt--; + } while (cnt); + + if (rxq->next_to_use != ntu) + sxe2vf_rxq_tail_update(rxq, ntu); + + return !!cnt; +} + +s32 sxe2vf_rx_hw_cfg(struct sxe2vf_vsi *vsi) +{ + u16 i; + u32 rx_buf_len; + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret; + + rx_buf_len = sxe2vf_rx_buf_len_set(vsi); + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + sxe2vf_rxq_switch_cfg(vsi->rxqs.q[i], rx_buf_len); + } + + ret = sxe2vf_rxq_cfg_request(adapter); + if (!ret) { + sxe2vf_for_each_vsi_rxq(vsi, i) + { + (void)sxe2vf_rx_buffers_alloc(vsi->rxqs.q[i], + SXE2VF_DESC_IDLE(vsi->rxqs.q[i])); + } + } + + return ret; +} + +s32 sxe2vf_rx_cfg(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret; + + ret = sxe2vf_rx_rings_alloc(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u type:%u rx ring resource alloc\t" + "failed.(err:%d)\n", + vsi->vsi_id, vsi->vsi_type, ret); + return ret; + } + + ret = sxe2vf_rx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("rx hw configure failed, ret=%d\n", ret); + sxe2vf_wait_in_resetting(adapter, false); + goto l_rxq_cfg_failed; + } + + return 0; + +l_rxq_cfg_failed: + sxe2vf_rx_rings_free(vsi); + return ret; +} + +static void sxe2vf_rx_first_pkt_sync(struct sxe2vf_queue *rxq, struct sk_buff *skb) +{ + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (test_bit(SXE2VF_RXQ_BUILD_SKB_ENABLED, &rxq->flags)) { + unsigned long mask = (unsigned long)SXE2VF_RX_PAGE_SIZE(rxq) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; + + dma_sync_single_range_for_cpu(rxq->dev, SXE2VF_SKB_PRIV(skb)->dma, + offset, skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rxq->dev, SXE2VF_SKB_PRIV(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), DMA_FROM_DEVICE); + } + + if (unlikely(SXE2VF_SKB_PRIV(skb)->page_released)) { + dma_unmap_page_attrs(rxq->dev, SXE2VF_SKB_PRIV(skb)->dma, + SXE2VF_RX_PAGE_SIZE(rxq), DMA_FROM_DEVICE, + SXE2VF_RX_DMA_ATTR); + } + + LOG_DEBUG_BDF("rxq[%u] skb:%p nr_frags:%u dma:%llu sync first packet\n", + rxq->idx_in_vsi, skb, skb_shinfo(skb)->nr_frags, + SXE2VF_SKB_PRIV(skb)->dma); +} + +static struct sxe2vf_rx_buf *sxe2vf_rx_buffer_get(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, + const u32 size, s32 *pg_cnt) +{ + struct sxe2vf_rx_buf *rx_buffer = &rxq->rx_buf[rxq->next_to_clean]; + struct sk_buff *skb = rx_buffer->skb; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + *pg_cnt = +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + page_count(rx_buffer->page); +#else + 0; +#endif + + prefetchw(rx_buffer->page); + + if (!size) + return rx_buffer; + + if (sxe2vf_is_non_eop(rxq, rx_desc)) { + if (!skb) + goto l_skip_sync; + } else { + if (skb) + sxe2vf_rx_first_pkt_sync(rxq, skb); + } + + dma_sync_single_range_for_cpu(rxq->dev, rx_buffer->dma, rx_buffer->pg_offset, + size, DMA_FROM_DEVICE); + +l_skip_sync: + rx_buffer->drv_refcnt--; + + LOG_DEBUG_BDF("rxq[%u] ntc:%u pkt len:%u skb:%p drv_refcnt:0x%x\t" + "pg_offset:%u rx_buffer[%u]:%p get\n", + rxq->idx_in_vsi, rxq->next_to_clean, size, skb, + rx_buffer->drv_refcnt, rx_buffer->pg_offset, + rxq->next_to_clean, rx_buffer); + + return rx_buffer; +} + +static void sxe2vf_rxq_stats_update(struct sxe2vf_queue *rxq, u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&rxq->syncp); + rxq->stats->bytes += bytes; + rxq->stats->packets += pkts; + u64_stats_update_end(&rxq->syncp); +} + +static void sxe2vf_rx_page_reuse(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *old_buff) +{ + struct sxe2vf_rx_buf *new_buff; + u16 nta = rxq->next_to_alloc; + + new_buff = &rxq->rx_buf[nta]; + + nta++; + rxq->next_to_alloc = (nta < rxq->depth) ? nta : 0; + + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->pg_offset = old_buff->pg_offset; + new_buff->drv_refcnt = old_buff->drv_refcnt; +} + +static bool sxe2vf_page_is_reusable(struct sxe2vf_rx_buf *rx_buf, int pg_refcnt) +{ + u32 drv_refcnt = rx_buf->drv_refcnt; + struct page *page = rx_buf->page; + + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + if (unlikely((pg_refcnt - (int)drv_refcnt) > 1)) + return false; +#else + if (rx_buf->pg_offset > SXE2VF_LAST_OFFSET) + return false; +#endif + + if (unlikely(drv_refcnt == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buf->drv_refcnt = USHRT_MAX; + } + + return true; +} + +static void sxe2vf_rx_pg_offset_update(struct sxe2vf_rx_buf *rx_buf, u32 size) +{ +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + rx_buf->pg_offset ^= size; +#else + rx_buf->pg_offset += size; +#endif +} + +static void sxe2vf_skb_frag_add(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *rx_buf, struct sk_buff *skb, + u32 size) +{ +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + u32 truesize = SXE2VF_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(size + rxq->rx_offset); +#endif + + if (!size) + return; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + (int)rx_buf->pg_offset, (int)size, truesize); + + sxe2vf_rx_pg_offset_update(rx_buf, truesize); +} + +static void sxe2vf_rx_buffer_put(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, + struct sxe2vf_rx_buf *rx_buf, struct sk_buff *skb, + int pg_refcnt) +{ + u16 ntc = rxq->next_to_clean + 1; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + u8 lro_cnt = sxe2vf_rx_desc_lro_cnt(rxq, rx_desc); + + ntc = (ntc < rxq->depth) ? ntc : 0; + rxq->next_to_clean = ntc; + + if (!rx_buf) + return; + + if (lro_cnt) + SXE2VF_SKB_PRIV(skb)->lro_cnt += lro_cnt; + + if (sxe2vf_page_is_reusable(rx_buf, pg_refcnt)) { + sxe2vf_rx_page_reuse(rxq, rx_buf); + } else { + if (SXE2VF_SKB_PRIV(skb)->dma == rx_buf->dma) + SXE2VF_SKB_PRIV(skb)->page_released = true; + else + dma_unmap_page_attrs(rxq->dev, rx_buf->dma, + SXE2VF_RX_PAGE_SIZE(rxq), + DMA_FROM_DEVICE, SXE2VF_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buf->page, rx_buf->drv_refcnt); + } + + rx_buf->page = NULL; + rx_buf->skb = NULL; + + LOG_DEBUG_BDF("rxq[%u] ntc:%u nta:%u skb:%p nr_frags:%u lro_cnt:%u\t" + "rx_buffer[%u]:%p put\n", + rxq->idx_in_vsi, rxq->next_to_clean, rxq->next_to_alloc, skb, + skb_shinfo(skb)->nr_frags, SXE2VF_SKB_PRIV(skb)->lro_cnt, + ntc - 1, rx_buf); +} + +static struct sk_buff *sxe2vf_skb_build(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *rx_buf, u32 size, + union sxe2vf_rx_desc *rx_desc) +{ + void *va; +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + u32 truesize = SXE2VF_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(SXE2VF_SKB_PAD_VALUE + size); +#endif + struct sk_buff *skb; + + if (!rx_buf) + return NULL; + + va = page_address(rx_buf->page) + rx_buf->pg_offset; + net_prefetch(va); + + skb = napi_build_skb(va - SXE2VF_SKB_PAD_VALUE, truesize); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, SXE2VF_SKB_PAD_VALUE); + (void)__skb_put(skb, size); + + if (sxe2vf_is_non_eop(rxq, rx_desc)) + SXE2VF_SKB_PRIV(skb)->dma = rx_buf->dma; + + sxe2vf_rx_pg_offset_update(rx_buf, truesize); + + return skb; +} + +static struct sk_buff *sxe2vf_skb_construct(struct sxe2vf_queue *rxq, + struct sxe2vf_rx_buf *rx_buf, u32 size, + union sxe2vf_rx_desc *rx_desc) +{ + void *va; + u32 headlen; + struct sk_buff *skb; +#if (PAGE_SIZE < SXE2VF_PAGE_SIZE_8KB) + u32 truesize = SXE2VF_RX_PAGE_SIZE(rxq) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(size); +#endif + + if (!rx_buf) + return NULL; + + va = page_address(rx_buf->page) + rx_buf->pg_offset; + net_prefetch(va); + + skb = __napi_alloc_skb(&rxq->irq_data->napi, SXE2VF_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + headlen = size; + if (headlen > SXE2VF_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, va, SXE2VF_RX_HDR_SIZE); + + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + size -= headlen; + if (size) { + if (sxe2vf_is_non_eop(rxq, rx_desc)) + SXE2VF_SKB_PRIV(skb)->dma = rx_buf->dma; + + skb_add_rx_frag(skb, 0, rx_buf->page, + (int)(rx_buf->pg_offset + headlen), (int)size, + truesize); + + sxe2vf_rx_pg_offset_update(rx_buf, truesize); + } else { + rx_buf->drv_refcnt++; + } + + return skb; +} + +STATIC enum pkt_hash_types sxe2vf_hash_type_get(u16 ptype) +{ + struct sxe2vf_rx_ptype_info ptype_info = sxe2vf_rx_ptype_parse(ptype); + + if (!ptype_info.known) + return PKT_HASH_TYPE_NONE; + if (ptype_info.hash_layer == SXE2VF_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + if (ptype_info.hash_layer == SXE2VF_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + if (ptype_info.outer_ip == SXE2VF_PTYPE_OUTER_L2) + return PKT_HASH_TYPE_L2; + + return PKT_HASH_TYPE_NONE; +} + +static void sxe2vf_lro_stats_update(struct sxe2vf_queue *rxq, struct sk_buff *skb) +{ + u32 hdr_len; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (!SXE2VF_SKB_PRIV(skb)->lro_cnt) { + LOG_DEBUG_BDF("rxq[%u] not lro packet\n", rxq->idx_in_vsi); + return; + } + + rxq->stats->rx_stats.rx_lro_count += SXE2VF_SKB_PRIV(skb)->lro_cnt - 1; + rxq->stats->rx_stats.rx_lro_packets++; + + hdr_len = skb_headlen(skb); + + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - hdr_len), + SXE2VF_SKB_PRIV(skb)->lro_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + rxq->stats->rx_stats.rx_lro_bytes += skb->len; + + SXE2VF_SKB_PRIV(skb)->lro_cnt = 0; +} + +STATIC void sxe2vf_skb_hash_set(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, struct sk_buff *skb, + u16 ptype) +{ + u32 rss_hash_vld; + u32 hash; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (!(rxq->netdev->features & NETIF_F_RXHASH)) { + LOG_DEBUG_BDF("rxq[%u] rss hash offload off netdev\t" + "feature:0x%llx.\n", + rxq->idx_in_vsi, rxq->netdev->features); + return; + } + + rss_hash_vld = (le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) >> + SXE2VF_RX_DESC_STATUS2_RSS_VLD) & + SXE2VF_RX_DESC_RSS_VLD_MASK; + if (!rss_hash_vld) { + LOG_DEBUG_BDF("rxq[%u] rss_vld:0x%x\n", rxq->idx_in_vsi, + rss_hash_vld); + return; + } + + hash = le32_to_cpu(rx_desc->wb.filter_status); + + skb_set_hash(skb, hash, sxe2vf_hash_type_get(ptype)); + LOG_DEBUG_BDF("rxq[%u] hash:0x%x ptype:%u\n", rxq->idx_in_vsi, hash, ptype); +} + +static void sxe2vf_rx_csum_check(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, struct sk_buff *skb, + u16 ptype) +{ + struct sxe2vf_rx_ptype_info ptype_info; + u16 status0_err = 0; + bool ipv4 = 0; + bool ipv6 = 0; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; + + if (!(rxq->netdev->features & NETIF_F_RXCSUM)) { + rxq->stats->rx_stats.rx_csum_none++; + LOG_DEBUG_BDF("rxq[%u] rx csum offload off netdev feature:0x%llx.\n", + rxq->idx_in_vsi, rxq->netdev->features); + return; + } + + status0_err = le16_to_cpu(rx_desc->wb.status0_err); + ptype_info = sxe2vf_rx_ptype_parse(ptype); + + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + if (!(status0_err & BIT(SXE2VF_RX_DESC_STATUS0_L3L4_P)) || + !(ptype_info.known && ptype_info.outer_ip)) + goto l_out; + + ipv4 = (ptype_info.outer_ip == SXE2VF_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2VF_PTYPE_OUTER_IPV4); + + ipv6 = (ptype_info.outer_ip == SXE2VF_PTYPE_OUTER_IP) && + (ptype_info.outer_ip_ver == SXE2VF_PTYPE_OUTER_IPV6); + + if (ipv4 && (status0_err & (BIT(SXE2VF_RX_DESC_ERROR_IPE) | + BIT(SXE2VF_RX_DESC_ERROR_EIPE)))) + goto checksum_fail; + + if (ipv6 && (le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) & + (BIT(SXE2VF_RX_DESC_STATUS2_IPV6EXADD)))) + goto l_out; + + if (status0_err & BIT(SXE2VF_RX_DESC_ERROR_L4E)) + goto checksum_fail; + + if (rx_desc->wb.rxdid_src & BIT(SXE2VF_RX_DESC_EUDPE)) + goto checksum_fail; + + if (ptype_info.tunnel_type >= SXE2VF_PTYPE_TUNNEL_IP_GRENAT) { + skb->csum_level = 1; + rxq->stats->rx_stats.rx_csum_unnecessary_inner++; + } + + switch (ptype_info.inner_prot) { + case SXE2VF_PTYPE_INNER_PROT_UDP: + case SXE2VF_PTYPE_INNER_PROT_TCP: + case SXE2VF_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + rxq->stats->rx_stats.rx_csum_unnecessary++; + break; + default: + break; + } + rxq->stats->rx_stats.rx_csum_complete++; + + return; + +checksum_fail: + rxq->stats->rx_stats.rx_csum_err++; + +l_out: + LOG_DEBUG_BDF("rxq[%u] rxcsum offload:0x%llx status0_err:0x%x\t" + "ptype:0x%x ptype_info:0x%x pktl_hdrl_status2:0x%x\n" + "rxdid_src:%u ipv4:%u ipv6:%u ip_summed:%d csum_level:%d\t" + "csum_err:0x%llx\n", + rxq->idx_in_vsi, rxq->netdev->features & NETIF_F_RXCSUM, + status0_err, ptype, *(u32 *)&ptype_info, + le32_to_cpu(rx_desc->wb.pktl_hdrl_status2), + rx_desc->wb.rxdid_src, ipv4, ipv6, skb->ip_summed, + skb->csum_level, rxq->stats->rx_stats.rx_csum_err); +} + +static inline u16 sxe2vf_vlan_tag_get(union sxe2vf_rx_desc *rx_desc) +{ + if (le16_to_cpu(rx_desc->wb.status0_err) & + BIT(SXE2VF_RX_DESC_STATUS0_L2TAG1_P)) + return le16_to_cpu(rx_desc->wb.l2tag1); + + return 0; +} + +static void sxe2vf_skb_vlan_tag_put(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, + struct sk_buff *skb) +{ + netdev_features_t features = rxq->netdev->features; + u16 vlan_tag = sxe2vf_vlan_tag_get(rx_desc); + bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + rxq->stats->rx_stats.rx_vlan_strip++; + } else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); + } + + LOG_INFO("rxq:%u vlan tag strip ctag:%s stag:%s vlan tag:0x%x.\n", + rxq->idx_in_vsi, + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "enabled" : "disabled", + (features & NETIF_F_HW_VLAN_STAG_RX) ? "enabled" : "disabled", + vlan_tag); +} + +static bool sxe2vf_rx_desc_ipsec_check(union sxe2vf_rx_desc *rx_desc) +{ + return (le32_to_cpu(rx_desc->wb.status_lrocnt_fdpf_id) >> + SXE2VF_RX_DESC_IPSEC_PKT) & + 0x1; +} + +static void sxe2vf_skb_field_fill(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc, struct sk_buff *skb, + u16 ptype) +{ + sxe2vf_lro_stats_update(rxq, skb); + + sxe2vf_skb_hash_set(rxq, rx_desc, skb, ptype); + + skb->protocol = eth_type_trans(skb, rxq->netdev); + + sxe2vf_rx_csum_check(rxq, rx_desc, skb, ptype); + + skb_record_rx_queue(skb, rxq->idx_in_vsi); + + if (sxe2vf_rx_desc_ipsec_check(rx_desc)) + sxe2vf_ipsec_rx(rxq, rx_desc, skb, ptype); + + sxe2vf_skb_vlan_tag_put(rxq, rx_desc, skb); +} + +int sxe2vf_rxq_irq_clean(struct sxe2vf_queue *rxq, int budget) +{ + u32 total_rx_bytes = 0; + u32 total_rx_pkts = 0; + u16 idle = SXE2VF_DESC_IDLE(rxq); + u32 offset = rxq->rx_offset; + struct sk_buff *skb = rxq->skb; + bool failure; + struct sxe2vf_adapter *adapter = rxq->vsi->adapter; +#ifdef SXE2_CFG_DEBUG +#define SXE2VF_LOG_STR_LEN 32 + char str[SXE2VF_LOG_STR_LEN] = {0}; +#endif + +#ifdef SXE2_CFG_RELEASE + UNUSED(offset); +#endif + + LOG_DEBUG_BDF("rxq[%u] ntc:%u ntu:%u nta:%u budget:%u,\t" + "rxq_offset:%u descriptor idle:%u skb:%p vsi:%p\n", + rxq->idx_in_vsi, rxq->next_to_clean, rxq->next_to_use, + rxq->next_to_alloc, budget, offset, idle, skb, rxq->vsi); + + while (likely(total_rx_pkts < (u32)budget)) { + union sxe2vf_rx_desc *rx_desc; + struct sxe2vf_rx_buf *rx_buf; + u32 size; + s32 pg_refcnt; + u16 rx_ptype; + + rx_desc = SXE2VF_RX_DESC(rxq, rxq->next_to_clean); +#ifdef SXE2_CFG_DEBUG + + (void)snprintf(str, SXE2VF_LOG_STR_LEN - 1, "rxq[%d] desc[%d]", + rxq->idx_in_vsi, rxq->next_to_clean); + DATA_DUMP(rx_desc, sizeof(*rx_desc), str); +#endif + + if (!sxe2vf_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2VF_RX_DESC_STATUS0_DD))) + break; + + dma_rmb(); + + sxe2vf_trace(rx_pkt_clean_begin, rxq); + + size = le32_to_cpu(rx_desc->wb.pktl_hdrl_status2) & + SXE2VF_RX_DESC_PKT_LEN_MASK; + + rx_buf = sxe2vf_rx_buffer_get(rxq, rx_desc, size, &pg_refcnt); + + if (skb) { + sxe2vf_skb_frag_add(rxq, rx_buf, skb, size); + } else if (test_bit(SXE2VF_RXQ_BUILD_SKB_ENABLED, + &rxq->flags)) { + skb = sxe2vf_skb_build(rxq, rx_buf, size, rx_desc); + } else { + skb = sxe2vf_skb_construct(rxq, rx_buf, size, rx_desc); + } + + if (!skb) { + rxq->stats->rx_stats.rx_buff_alloc_err++; + if (rx_buf) + rx_buf->drv_refcnt++; + break; + } + + sxe2vf_rx_buffer_put(rxq, rx_desc, rx_buf, skb, pg_refcnt); + idle++; + + if (sxe2vf_is_non_eop(rxq, rx_desc)) { + rxq->rx_buf[rxq->next_to_clean].skb = skb; + rxq->stats->rx_stats.rx_non_eop_descs++; + continue; + } + + if (unlikely(sxe2vf_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2VF_RX_DESC_ERROR_PA_ERR)))) { + rxq->stats->rx_stats.rx_pa_err++; + } + + if (unlikely(sxe2vf_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2VF_RX_DESC_ERROR_RXE)) || + sxe2vf_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2VF_RX_DESC_ERROR_OVERSIZE)))) { + rxq->stats->rx_stats.rx_pkts_sw_drop++; + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + total_rx_bytes += skb->len; + + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_status1) & + SXE2VF_RX_DESC_PTYPE_MASK; + + sxe2vf_skb_field_fill(rxq, rx_desc, skb, rx_ptype); + + sxe2vf_trace(rx_pkt_clean_end, rxq); + + (void)napi_gro_receive(&rxq->irq_data->napi, skb); + skb = NULL; + + total_rx_pkts++; + } + + failure = sxe2vf_rx_buffers_alloc(rxq, idle); + + rxq->skb = skb; + + sxe2vf_rxq_stats_update(rxq, total_rx_pkts, total_rx_bytes); + + return failure ? budget : (int)total_rx_pkts; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.h new file mode 100644 index 0000000000000000000000000000000000000000..19b3358ced474042f25083cd7f9f3307210f8a3e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rx.h @@ -0,0 +1,972 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_rx.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_RX_H__ +#define __SXE2VF_RX_H__ + +#include +#include + +struct sxe2vf_vsi; + +#define SXE2VF_RXQ_CTX_DBUFF_SHIFT 7 + +#define SXE2VF_PAGE_SIZE_8KB 8192 + +#define SXE2VF_RXBUF_3072 3072 +#define SXE2VF_RXBUF_2048 2048 +#define SXE2VF_RXBUF_1536 1536 +#define SXE2VF_RX_HDR_SIZE 256 + +#define SXE2VF_RX_HWTAIL_VALUE_MASK (~0x7) + +enum { + SXE2VF_RXQ_BUILD_SKB_ENABLED = 0, + SXE2VF_RXQ_LRO_ENABLED, + SXE2VF_RTXQ_VLAN_TAG_LOC_L2TAG1, + SXE2VF_RXQ_VLAN_TAG_LOC_L2TAG2_2, + SXE2VF_TXQ_VLAN_TAG_LOC_L2TAG2, + SXE2VF_RXQ_RXFCS_ENABLED, +}; + +#if (PAGE_SIZE < 8192) + +#define SXE2VF_PAD_RX_LEN_EXCEED_2K \ + ((unsigned int)(NET_SKB_PAD + SXE2VF_RXBUF_1536) > \ + SKB_WITH_OVERHEAD(SXE2VF_RXBUF_2048)) + +static u16 sxe2vf_skb_pad_cal(void) +{ + u16 rx_buf_len; + u16 value; + + if (SXE2VF_PAD_RX_LEN_EXCEED_2K) + rx_buf_len = SXE2VF_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = SXE2VF_RXBUF_1536; + + rx_buf_len -= NET_IP_ALIGN; + + value = ALIGN(rx_buf_len, PAGE_SIZE / 2); + + value = SKB_WITH_OVERHEAD(value) - rx_buf_len; + + return value; +} + +#define SXE2VF_SKB_PAD_VALUE sxe2vf_skb_pad_cal() + +#else +#define SXE2VF_PAD_RX_LEN_EXCEED_2K false +#define SXE2VF_SKB_PAD_VALUE (NET_SKB_PAD + NET_IP_ALIGN) +#define SXE2VF_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - SXE2VF_RXBUF_2048) +#endif + +#define SXE2VF_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define SXE2VF_RX_DESC_EUDPE (6) + +#define SXE2VF_HBUFF_LEN_MIN (2) + +#define SXE2VF_RX_DESC_PKT_LEN_SHIFT (0) + +#define SXE2VF_RX_DESC_PKT_LEN_MASK (0x3FFF) + +#define SXE2VF_RX_DESC_LROCNT_MASK (0xF) +#define SXE2VF_RX_DESC_LROCNT_SHIFT (0) + +#define SXE2VF_RX_DESC_RXDID_MASK (0x7) + +#define SXE2VF_RX_DESC_PTYPE_MASK (0x3FF) + +#define SXE2VF_RX_DESC_RSS_VLD_MASK (0x1) + +#define SXE2VF_RX_DESC_PHY_PORT_MASK (0x3) + +#define SXE2VF_RX_DESC_IPSEC_PKT (21) +#define SXE2VF_RX_DESC_IPSEC_ENGINE (22) +#define SXE2VF_RX_DESC_IPSEC_MODE (23) +#define SXE2VF_RX_DESC_IPSEC_STATUS (24) +#define SXE2VF_RX_DESC_IPSEC_STATUS_MASK (0x7) +#define SXE2VF_RX_DESC_IPSEC_STATUS_GET(qw) \ + (((qw) >> SXE2VF_RX_DESC_IPSEC_STATUS) & \ + SXE2VF_RX_DESC_IPSEC_STATUS_MASK) + +enum sxe2_rx_desc_status0_err { + SXE2VF_RX_DESC_STATUS0_DD = 0, + SXE2VF_RX_DESC_STATUS0_EOP = 1, + SXE2VF_RX_DESC_STATUS0_L2TAG1_P = 2, + SXE2VF_RX_DESC_STATUS0_L3L4_P = 3, + SXE2VF_RX_DESC_STATUS0_CRCP = 4, + SXE2VF_RX_DESC_STATUS0_SECP = 5, + SXE2VF_RX_DESC_STATUS0_SECTAG = 6, + + SXE2VF_RX_DESC_ERROR_RXE = 7, + SXE2VF_RX_DESC_ERROR_PA_ERR = 8, + SXE2VF_RX_DESC_ERROR_PKT_HBO = 9, + SXE2VF_RX_DESC_ERROR_IPE = 10, + SXE2VF_RX_DESC_ERROR_L4E = 11, + SXE2VF_RX_DESC_ERROR_EIPE = 12, + SXE2VF_RX_DESC_ERROR_OVERSIZE = 13, + SXE2VF_RX_DESC_ERROR_SEC_ERR = 14, + +}; + +enum sxe2_rx_desc_status1 { + SXE2VF_RX_DESC_STATUS1_START = 10, + SXE2VF_RX_DESC_STATUS1_SECE = 0 + SXE2VF_RX_DESC_STATUS1_START, + SXE2VF_RX_DESC_STATUS1_EXT_UDP_0 = 1 + SXE2VF_RX_DESC_STATUS1_START, + SXE2VF_RX_DESC_STATUS1_UMBCAST = 2 + SXE2VF_RX_DESC_STATUS1_START, + SXE2VF_RX_DESC_STATUS1_FLTSTAT = 4 + SXE2VF_RX_DESC_STATUS1_START, +}; + +enum sxe2_rx_desc_status2 { + SXE2VF_RX_DESC_STATUS2_START = 26, + SXE2VF_RX_DESC_STATUS2_FIM = 0 + SXE2VF_RX_DESC_STATUS2_START, + SXE2VF_RX_DESC_STATUS2_LPBK = 1 + SXE2VF_RX_DESC_STATUS2_START, + SXE2VF_RX_DESC_STATUS2_IPV6EXADD = 2 + SXE2VF_RX_DESC_STATUS2_START, + SXE2VF_RX_DESC_STATUS2_RSS_VLD = 3 + SXE2VF_RX_DESC_STATUS2_START, + SXE2VF_RX_DESC_STATUS2_ACL_HIT = 4 + SXE2VF_RX_DESC_STATUS2_START, + SXE2VF_RX_DESC_STATUS2_INT_UDP_0 = 5 + SXE2VF_RX_DESC_STATUS2_START, +}; + +enum { + SXE2VF_PTYPE_OUTER_L2 = 0, + SXE2VF_PTYPE_OUTER_IP = 1, +}; + +enum { + SXE2VF_PTYPE_OUTER_NONE = 0, + SXE2VF_PTYPE_OUTER_IPV4 = 1, + SXE2VF_PTYPE_OUTER_IPV6 = 2, +}; + +enum { + SXE2VF_PTYPE_NOT_FRAG = 0, + SXE2VF_PTYPE_FRAG = 1, +}; + +enum { + SXE2VF_PTYPE_TUNNEL_NONE = 0, + SXE2VF_PTYPE_TUNNEL_IP_IP = 1, + SXE2VF_PTYPE_TUNNEL_IP_MAC = 2, + SXE2VF_PTYPE_TUNNEL_IP_MAC_VLAN = 3, + SXE2VF_PTYPE_TUNNEL_IP_GTPU = 4, + SXE2VF_PTYPE_TUNNEL_IP_GRENAT = 5, + SXE2VF_PTYPE_TUNNEL_IP_GRENAT_MAC = 6, + SXE2VF_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 7, +}; + +enum { + SXE2VF_PTYPE_TUNNEL_END_NONE = 0, + SXE2VF_PTYPE_TUNNEL_END_IPV4 = 1, + SXE2VF_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum { + SXE2VF_PTYPE_INNER_PROT_NONE = 0, + SXE2VF_PTYPE_INNER_PROT_UDP = 1, + SXE2VF_PTYPE_INNER_PROT_TCP = 2, + SXE2VF_PTYPE_INNER_PROT_SCTP = 3, + SXE2VF_PTYPE_INNER_PROT_ICMP = 4, + SXE2VF_PTYPE_INNER_PROT_TIMESYNC = 5, +}; + +enum { + SXE2VF_PTYPE_PAYLOAD_LAYER_NONE = 0, + SXE2VF_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + SXE2VF_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + SXE2VF_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +struct sxe2vf_rx_ptype_info { + u32 known : 1; + u32 outer_ip : 1; + u32 outer_ip_ver : 2; + u32 outer_frag : 1; + u32 tunnel_type : 3; + u32 tunnel_end_prot : 2; + u32 tunnel_end_frag : 1; + u32 inner_prot : 4; + u32 payload_layer : 3; + u32 hash_layer : 3; +}; + +#define SXE2VF_PTYPE_UNUSED_ENTRY(PTYPE) \ + [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +#define SXE2VF_PTYPE(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, \ + PL, HL) \ + [PTYPE] = { \ + 1, \ + SXE2VF_PTYPE_OUTER_##OUTER_IP, \ + SXE2VF_PTYPE_OUTER_##OUTER_IP_VER, \ + SXE2VF_PTYPE_##OUTER_FRAG, \ + SXE2VF_PTYPE_TUNNEL_##T, \ + SXE2VF_PTYPE_TUNNEL_END_##TE, \ + SXE2VF_PTYPE_##TEF, \ + SXE2VF_PTYPE_INNER_PROT_##I, \ + SXE2VF_PTYPE_PAYLOAD_LAYER_##PL, \ + SXE2VF_PTYPE_PAYLOAD_LAYER_##HL, \ + } + +#define SXE2VF_PTYPE_NOF SXE2VF_PTYPE_NOT_FRAG +#define SXE2VF_PTYPE_FRG SXE2VF_PTYPE_FRAG + +static const struct sxe2vf_rx_ptype_info sxe2vf_ptype_table[BIT(10)] = { + SXE2VF_PTYPE_UNUSED_ENTRY(0), + SXE2VF_PTYPE(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(3), SXE2VF_PTYPE_UNUSED_ENTRY(4), + SXE2VF_PTYPE_UNUSED_ENTRY(5), + SXE2VF_PTYPE(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(8), SXE2VF_PTYPE_UNUSED_ENTRY(9), + SXE2VF_PTYPE(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(12), SXE2VF_PTYPE_UNUSED_ENTRY(13), + SXE2VF_PTYPE_UNUSED_ENTRY(14), SXE2VF_PTYPE_UNUSED_ENTRY(15), + SXE2VF_PTYPE_UNUSED_ENTRY(16), SXE2VF_PTYPE_UNUSED_ENTRY(17), + SXE2VF_PTYPE_UNUSED_ENTRY(18), SXE2VF_PTYPE_UNUSED_ENTRY(19), + SXE2VF_PTYPE_UNUSED_ENTRY(20), SXE2VF_PTYPE_UNUSED_ENTRY(21), + + SXE2VF_PTYPE(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(25), + SXE2VF_PTYPE(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(32), + SXE2VF_PTYPE(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(39), + SXE2VF_PTYPE(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3, PAY3), + + SXE2VF_PTYPE(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(47), + SXE2VF_PTYPE(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(54), + SXE2VF_PTYPE(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2VF_PTYPE(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(62), + SXE2VF_PTYPE(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(69), + SXE2VF_PTYPE(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, + PAY3, PAY3), + + SXE2VF_PTYPE(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, + PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(77), + SXE2VF_PTYPE(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, + PAY4, PAY4), + SXE2VF_PTYPE(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, + PAY4, PAY4), + SXE2VF_PTYPE(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, + PAY4, PAY3), + + SXE2VF_PTYPE(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, + PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(84), + SXE2VF_PTYPE(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, + PAY4, PAY4), + SXE2VF_PTYPE(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, + PAY4, PAY4), + SXE2VF_PTYPE(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, + PAY4, PAY3), + + SXE2VF_PTYPE(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(91), + SXE2VF_PTYPE(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(98), + SXE2VF_PTYPE(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(105), + SXE2VF_PTYPE(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2VF_PTYPE(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(113), + SXE2VF_PTYPE(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(120), + SXE2VF_PTYPE(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2VF_PTYPE(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(128), + SXE2VF_PTYPE(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(135), + SXE2VF_PTYPE(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, + PAY3, PAY3), + + SXE2VF_PTYPE(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, + PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(143), + SXE2VF_PTYPE(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, + PAY4, PAY4), + SXE2VF_PTYPE(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, + PAY4, PAY4), + SXE2VF_PTYPE(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, + PAY4, PAY3), + + SXE2VF_PTYPE(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, + PAY3, PAY3), + SXE2VF_PTYPE(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, + PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(150), + SXE2VF_PTYPE(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, + PAY4, PAY4), + SXE2VF_PTYPE(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, + PAY4, PAY4), + SXE2VF_PTYPE(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, + PAY4, PAY3), + SXE2VF_PTYPE_UNUSED_ENTRY(154), SXE2VF_PTYPE_UNUSED_ENTRY(155), + SXE2VF_PTYPE_UNUSED_ENTRY(156), SXE2VF_PTYPE_UNUSED_ENTRY(157), + SXE2VF_PTYPE_UNUSED_ENTRY(158), SXE2VF_PTYPE_UNUSED_ENTRY(159), + + SXE2VF_PTYPE(160, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(161, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(162, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(163, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(164, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(165, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(166, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(167, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + + [168 ... 254] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + [255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + [256 ... 271] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(272, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(273, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(274, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(275, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(276, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(277), + SXE2VF_PTYPE(278, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + + [279 ... 326] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(327, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(328, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(329, IP, IPV4, NOF, IP_GTPU, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(330, IP, IPV6, NOF, IP_GTPU, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(331, IP, IPV4, NOF, IP_GTPU, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(332, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(333, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(334, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(335, IP, IPV4, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(336, IP, IPV6, NOF, IP_GTPU, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(337, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(338, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(339, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(340, IP, IPV6, NOF, IP_GTPU, IPV4, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(341, IP, IPV4, NOF, IP_GTPU, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(342, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(343, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(344, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(345, IP, IPV4, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(346, IP, IPV6, NOF, IP_GTPU, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(347, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(348, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(349, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(350, IP, IPV6, NOF, IP_GTPU, IPV6, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(351, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(352, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(353, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(354, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + + [355 ... 359] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(360, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(361, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + + [362 ... 370] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(371, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + [372 ... 380] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + SXE2VF_PTYPE(381, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + [382 ... 390] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + SXE2VF_PTYPE(391, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + + [392 ... 395] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(396, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(397, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(398, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(399, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(400, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(401, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(402, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(403, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(404, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(405, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(406, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(407, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + + SXE2VF_PTYPE(408, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(409, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(410, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(411, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(412, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(413, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(414, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(415, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(416, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(417, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(418, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(419, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(420, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(421, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(422, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(423, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(424, IP, IPV4, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(425, IP, IPV6, NOF, NONE, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(426, IP, IPV4, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(427, IP, IPV6, NOF, NONE, IPV6, NOF, NONE, PAY3, PAY3), + + [428 ... 768] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(769, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE(770, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE(771, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(772), + SXE2VF_PTYPE(773, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2, PAY2), + SXE2VF_PTYPE_UNUSED_ENTRY(774), SXE2VF_PTYPE_UNUSED_ENTRY(775), + + SXE2VF_PTYPE(776, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(777, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(778, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(779, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(780, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(781, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(782), SXE2VF_PTYPE_UNUSED_ENTRY(783), + + SXE2VF_PTYPE(784, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(785, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(786, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(787, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(788, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE(789, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(790), SXE2VF_PTYPE_UNUSED_ENTRY(791), + + SXE2VF_PTYPE(792, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(793, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(794, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(795, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(796, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(797, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(798, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(799, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(800, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(801, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(802), SXE2VF_PTYPE_UNUSED_ENTRY(803), + SXE2VF_PTYPE_UNUSED_ENTRY(804), SXE2VF_PTYPE_UNUSED_ENTRY(805), + SXE2VF_PTYPE_UNUSED_ENTRY(806), SXE2VF_PTYPE_UNUSED_ENTRY(807), + + SXE2VF_PTYPE(808, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(809, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(810, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(811, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(812, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(813, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(814, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(815, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(816, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(817, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE_UNUSED_ENTRY(818), SXE2VF_PTYPE_UNUSED_ENTRY(819), + + SXE2VF_PTYPE(820, IP, IPV6, NOF, IP_MAC, NONE, NOF, NONE, PAY3, PAY3), + + SXE2VF_PTYPE(821, IP, IPV6, NOF, IP_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(822, IP, IPV6, NOF, IP_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(823, IP, IPV6, NOF, IP_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(824, IP, IPV6, NOF, IP_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(825, IP, IPV6, NOF, IP_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(826, IP, IPV6, NOF, IP_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(827, IP, IPV6, NOF, IP_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(828, IP, IPV6, NOF, IP_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(829, IP, IPV6, NOF, IP_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(830, IP, IPV6, NOF, IP_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(831, IP, IPV6, NOF, IP_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(832, IP, IPV6, NOF, IP_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(832, IP, IPV6, NOF, IP_MAC_VLAN, NONE, NOF, NONE, PAY3, + PAY3), + + SXE2VF_PTYPE(836, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(837, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(838, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE(839, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(840, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(841, IP, IPV6, NOF, IP_MAC_VLAN, IPV4, NOF, ICMP, PAY4, + PAY3), + + SXE2VF_PTYPE(842, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, FRG, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(843, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, NONE, PAY3, + PAY3), + SXE2VF_PTYPE(844, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, UDP, PAY4, + PAY4), + SXE2VF_PTYPE(845, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, TCP, PAY4, + PAY4), + SXE2VF_PTYPE(846, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, SCTP, PAY4, + PAY4), + SXE2VF_PTYPE(847, IP, IPV6, NOF, IP_MAC_VLAN, IPV6, NOF, ICMP, PAY4, + PAY3), + + [848 ... 867] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + + SXE2VF_PTYPE(878, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(877, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(876, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(879, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(880, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(875, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(874, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(871, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(870, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(872, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(873, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(869, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(868, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(891, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(890, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(889, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(892, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(893, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(888, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(887, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(884, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(883, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(885, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(886, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(882, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(881, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(904, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(903, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(902, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(905, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(906, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(901, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(900, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(897, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(896, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(898, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(899, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(895, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(894, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(917, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(916, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(915, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(918, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(919, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(914, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(913, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(910, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(909, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(911, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(912, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(908, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(907, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(930, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(929, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(928, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(931, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(932, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(927, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(926, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(923, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(922, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(924, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(925, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(921, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(920, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(943, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(942, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(941, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(944, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(945, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(940, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(939, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(936, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(935, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(937, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(938, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(934, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(933, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(956, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(955, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(954, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(957, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(958, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(953, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(952, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(949, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(948, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(950, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(951, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(947, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(946, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(969, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(968, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(967, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(970, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(971, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(966, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(965, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(962, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(961, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(963, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(964, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(960, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(959, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(982, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(981, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(980, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(983, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(984, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(979, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(978, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(975, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(974, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(976, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(977, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(973, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(972, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(995, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(994, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(993, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(996, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(997, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(992, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(991, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(988, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(987, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(989, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(990, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(986, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(985, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(1008, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(1007, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1006, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1009, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(1010, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(1005, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(1004, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(1001, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1000, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1002, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(1003, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(999, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(998, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(1021, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY4, PAY4), + + SXE2VF_PTYPE(1020, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1019, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1022, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(1023, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(1018, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(1017, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4, PAY3), + + SXE2VF_PTYPE(1014, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1013, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3, PAY3), + SXE2VF_PTYPE(1015, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4, PAY4), + SXE2VF_PTYPE(1016, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4, PAY4), + SXE2VF_PTYPE(1012, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4, PAY4), + SXE2VF_PTYPE(1011, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4, PAY3), +}; + +static inline struct sxe2vf_rx_ptype_info sxe2vf_rx_ptype_parse(u16 ptype) +{ + return sxe2vf_ptype_table[ptype]; +} + +struct sxe2vf_skb_private_data { + dma_addr_t dma; + u16 lro_cnt; + bool page_released; +}; + +static inline u32 sxe2vf_rx_page_order(struct sxe2vf_queue *rxq) +{ + u32 order = 0; + +#if (PAGE_SIZE < 8192) + if (rxq->rx_buf_len > (PAGE_SIZE / 2)) + order = 1; +#endif + return order; +} + +#define SXE2VF_RX_PAGE_ORDER(rxq) (sxe2vf_rx_page_order(rxq)) +#define SXE2VF_RX_PAGE_SIZE(rxq) (PAGE_SIZE << SXE2VF_RX_PAGE_ORDER(rxq)) + +#define SXE2VF_SKB_PRIV(skb) ((struct sxe2vf_skb_private_data *)(skb)->cb) + +union sxe2vf_rx_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + u8 rxdid_src; + u8 mirror; + __le16 l2tag1; + __le32 filter_status; + + __le16 status0_err; + __le16 ptype_status1; + __le32 pktl_hdrl_status2; + + __le32 status_lrocnt_fdpf_id; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + u8 acl_pf_id; + u8 sw_pf_id; + __le16 flow_id; + __le32 fd_filter_id; + + } wb; +}; + +#define SXE2VF_RX_DESC(rxq, ridx) \ + (&(((union sxe2vf_rx_desc *)((rxq)->desc.base_addr))[ridx])) + +#define SXE2VF_RX_DESC_SIZE(rxq) \ + (ALIGN((rxq)->depth * sizeof(union sxe2vf_rx_desc), PAGE_SIZE)) + +#define SXE2VF_RX_BUF_LEN(rxq) ((rxq)->depth * sizeof(struct sxe2vf_rx_buf)) + +static inline bool sxe2vf_is_non_eop(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc) +{ + if (likely(sxe2vf_desc_status0_err_test(rx_desc->wb.status0_err, + BIT(SXE2VF_RX_DESC_STATUS0_EOP)))) + return false; + + return true; +} + +static inline u8 sxe2vf_rx_desc_lro_cnt(struct sxe2vf_queue *rxq, + union sxe2vf_rx_desc *rx_desc) +{ + if (test_bit(SXE2VF_RXQ_LRO_ENABLED, &rxq->flags)) { + return (le32_to_cpu(rx_desc->wb.status_lrocnt_fdpf_id) >> + SXE2VF_RX_DESC_LROCNT_SHIFT) & + SXE2VF_RX_DESC_LROCNT_MASK; + } + + return 0; +} + +static inline u16 sxe2vf_rx_offset_get(struct sxe2vf_queue *rxq) +{ + return test_bit(SXE2VF_RXQ_BUILD_SKB_ENABLED, &rxq->flags) ? + SXE2VF_SKB_PAD_VALUE : + 0; +} + +s32 sxe2vf_rx_cfg(struct sxe2vf_vsi *vsi); + +u16 sxe2vf_frame_size_set(struct sxe2vf_adapter *adapter); + +void sxe2vf_rx_ring_free(struct sxe2vf_queue *rxq); + +bool sxe2vf_rx_buffers_alloc(struct sxe2vf_queue *rxq, u16 cnt); + +void sxe2vf_rx_rings_res_free(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_rxqs_stop(struct sxe2vf_vsi *vsi); + +void sxe2vf_rx_rings_clean(struct sxe2vf_vsi *vsi); + +int sxe2vf_rxq_irq_clean(struct sxe2vf_queue *rxq, int budget); + +s32 sxe2vf_rx_hw_cfg(struct sxe2vf_vsi *vsi); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.c new file mode 100644 index 0000000000000000000000000000000000000000..4ab0b7442242c5fa76d7909083892b1f7e640aa7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.c @@ -0,0 +1,1342 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_rxft.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include +#include +#include "sxe2vf.h" +#include "sxe2vf_ethtool.h" +#include "sxe2_log.h" +#include "sxe2_version.h" +#include "sxe2_mbx_public.h" + +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_ip4; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_tcp4; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_udp4; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_sctp4; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_ip6; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_tcp6; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_udp6; +static struct sxe2vf_rss_hash_cfg default_vf_rss_cfg_sctp6; + +static struct sxe2vf_rss_hash_cfg *default_vf_rss_cfgs[] = { + &default_vf_rss_cfg_ip4, &default_vf_rss_cfg_tcp4, + &default_vf_rss_cfg_udp4, &default_vf_rss_cfg_sctp4, + + &default_vf_rss_cfg_ip6, &default_vf_rss_cfg_tcp6, + &default_vf_rss_cfg_udp6, &default_vf_rss_cfg_sctp6, +}; + +static void sxe2vf_default_rss_cfg_create(void) +{ + default_vf_rss_cfg_ip4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_vf_rss_cfg_ip4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_vf_rss_cfg_ip4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_vf_rss_cfg_ip4.hash_flds); + + default_vf_rss_cfg_tcp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_vf_rss_cfg_tcp4.headers); + set_bit(SXE2_FLOW_HDR_TCP, default_vf_rss_cfg_tcp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_vf_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_vf_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, default_vf_rss_cfg_tcp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, default_vf_rss_cfg_tcp4.hash_flds); + + default_vf_rss_cfg_udp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_vf_rss_cfg_udp4.headers); + set_bit(SXE2_FLOW_HDR_UDP, default_vf_rss_cfg_udp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_vf_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_vf_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, default_vf_rss_cfg_udp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, default_vf_rss_cfg_udp4.hash_flds); + + default_vf_rss_cfg_sctp4.symm = false; + set_bit(SXE2_FLOW_HDR_IPV4, default_vf_rss_cfg_sctp4.headers); + set_bit(SXE2_FLOW_HDR_SCTP, default_vf_rss_cfg_sctp4.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, default_vf_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, default_vf_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, default_vf_rss_cfg_sctp4.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, default_vf_rss_cfg_sctp4.hash_flds); + + default_vf_rss_cfg_ip6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_vf_rss_cfg_ip6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_vf_rss_cfg_ip6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_vf_rss_cfg_ip6.hash_flds); + + default_vf_rss_cfg_tcp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_vf_rss_cfg_tcp6.headers); + set_bit(SXE2_FLOW_HDR_TCP, default_vf_rss_cfg_tcp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_vf_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_vf_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, default_vf_rss_cfg_tcp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, default_vf_rss_cfg_tcp6.hash_flds); + + default_vf_rss_cfg_udp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_vf_rss_cfg_udp6.headers); + set_bit(SXE2_FLOW_HDR_UDP, default_vf_rss_cfg_udp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_vf_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_vf_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, default_vf_rss_cfg_udp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, default_vf_rss_cfg_udp6.hash_flds); + + default_vf_rss_cfg_sctp6.symm = false; + set_bit(SXE2_FLOW_HDR_IPV6, default_vf_rss_cfg_sctp6.headers); + set_bit(SXE2_FLOW_HDR_SCTP, default_vf_rss_cfg_sctp6.headers); + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, default_vf_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, default_vf_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, default_vf_rss_cfg_sctp6.hash_flds); + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, default_vf_rss_cfg_sctp6.hash_flds); +} + +struct sxe2vf_rss_cfg *sxe2vf_find_rss_cfg_by_hdrs(struct sxe2vf_adapter *adapter, + unsigned long *hdrs) +{ + struct sxe2vf_rss_cfg *rss_cfg = NULL; + + if (list_empty(&adapter->rss_ctxt.rss_cfgs)) + goto l_out; + + list_for_each_entry(rss_cfg, &adapter->rss_ctxt.rss_cfgs, l_node) { + if (bitmap_equal(rss_cfg->hash_cfg.headers, hdrs, + SXE2_FLOW_HDR_MAX)) { + goto l_out; + } + } + rss_cfg = NULL; +l_out: + return rss_cfg; +} + +STATIC void +sxe2vf_rss_hash_cfg_convert_hash_msg(struct sxe2vf_rss_hash_cfg *hash_cfg, + struct sxe2_vf_rss_hash_msg *rss_hash_msg) +{ + u32 tmp_headers[BITS_TO_U32(SXE2_FLOW_HDR_MAX)]; + u32 tmp_flds[BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX)]; + u32 i = 0; + + bitmap_to_arr32(tmp_headers, hash_cfg->headers, SXE2_FLOW_HDR_MAX); + bitmap_to_arr32(tmp_flds, hash_cfg->hash_flds, SXE2_FLOW_FLD_ID_MAX); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_HDR_MAX); i++) + rss_hash_msg->headers[i] = cpu_to_le32(tmp_headers[i]); + + for (i = 0; i < BITS_TO_U32(SXE2_FLOW_FLD_ID_MAX); i++) + rss_hash_msg->hash_flds[i] = cpu_to_le32(tmp_flds[i]); + + rss_hash_msg->symm = (hash_cfg->symm ? 1 : 0); + rss_hash_msg->hdr_type = cpu_to_le32(SXE2_RSS_ANY_HEADERS); +} + +STATIC s32 sxe2vf_add_rss_cfg_func(struct sxe2vf_adapter *adapter, + struct sxe2vf_rss_hash_cfg *hash_cfg) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_rss_hash_msg rss_hash_msg = {0}; + + sxe2vf_rss_hash_cfg_convert_hash_msg(hash_cfg, &rss_hash_msg); + + sxe2vf_mbx_msg_dflt_params_fill( + ¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, SXE2_VF_ADD_RSS_CFG, + &rss_hash_msg, sizeof(struct sxe2_vf_rss_hash_msg), NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss send mbx add rss cfg fail!\n"); + + return ret; +} + +STATIC s32 sxe2vf_clear_rss_cfg_func(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_CLEAR_RSS_CFG, NULL, 0, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss send mbx clear rss cfg fail!\n"); + + return ret; +} + +STATIC s32 sxe2vf_set_rss_hash_ctrl_func(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_rss_hash_ctrl hash_ctrl; + + hash_ctrl.hash_func = SXE2_RSS_HASH_FUNC_TOEPLITZ; + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_HASH_CTRL, &hash_ctrl, + sizeof(struct sxe2_vf_rss_hash_ctrl), NULL, + 0); + + if (ret) + LOG_ERROR_BDF("sxe2 vf rss send mbx clear rss cfg fail!\n"); + + return ret; +} + +STATIC void sxe2vf_rss_clear_cfg(struct sxe2vf_adapter *adapter) +{ + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_rss_cfg *rss_cfg; + struct sxe2vf_rss_cfg *tmp; + s32 ret = 0; + + if (list_empty(&adapter->rss_ctxt.rss_cfgs)) + goto l_end; + + mutex_lock(&adapter->rss_ctxt.rss_cfgs_lock); + ret = sxe2vf_clear_rss_cfg_func(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss send mbx clear rss cfg fail, ret = %d!\n", + ret); + } + + list_for_each_entry_safe(rss_cfg, tmp, &adapter->rss_ctxt.rss_cfgs, l_node) { + list_del(&rss_cfg->l_node); + devm_kfree(dev, rss_cfg); + } + mutex_unlock(&adapter->rss_ctxt.rss_cfgs_lock); + +l_end: + return; +} + +s32 sxe2vf_rss_add_cfg(struct sxe2vf_adapter *adapter, + struct sxe2vf_rss_hash_cfg *hash_cfg, bool is_default) +{ + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_rss_cfg *old_cfg; + struct sxe2vf_rss_cfg *new_cfg; + s32 ret = 0; + + mutex_lock(&adapter->rss_ctxt.rss_cfgs_lock); + + old_cfg = sxe2vf_find_rss_cfg_by_hdrs(adapter, hash_cfg->headers); + if (old_cfg) { + if (bitmap_equal(old_cfg->hash_cfg.hash_flds, hash_cfg->hash_flds, + SXE2_FLOW_FLD_ID_MAX)) { + goto l_end; + } else { + if (is_default) + ret = 0; + else + ret = sxe2vf_add_rss_cfg_func(adapter, hash_cfg); + + if (!ret) + bitmap_copy(old_cfg->hash_cfg.hash_flds, + hash_cfg->hash_flds, + SXE2_FLOW_FLD_ID_MAX); + else + LOG_ERROR_BDF("sxe2 vf rss failed to add rss cfg.\n"); + } + } else { + new_cfg = devm_kzalloc(dev, sizeof(*new_cfg), GFP_KERNEL); + if (!new_cfg) { + LOG_ERROR_BDF("sxe2 vf rss failed to alloc rss_cfg memory.\n"); + ret = -ENOMEM; + goto l_end; + } + if (is_default) + ret = 0; + else + ret = sxe2vf_add_rss_cfg_func(adapter, hash_cfg); + + if (!ret) { + bitmap_copy(new_cfg->hash_cfg.hash_flds, hash_cfg->hash_flds, + SXE2_FLOW_FLD_ID_MAX); + bitmap_copy(new_cfg->hash_cfg.headers, hash_cfg->headers, + SXE2_FLOW_HDR_MAX); + list_add_tail(&new_cfg->l_node, &adapter->rss_ctxt.rss_cfgs); + } else { + LOG_ERROR_BDF("sxe2 vf rss failed to add rss cfg.\n"); + devm_kfree(dev, new_cfg); + } + } + +l_end: + mutex_unlock(&adapter->rss_ctxt.rss_cfgs_lock); + return ret; +} + +void sxe2vf_analysis_hdrs(struct ethtool_rxnfc *nfc, unsigned long *hdrs) +{ + bitmap_zero(hdrs, SXE2_FLOW_HDR_MAX); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_TCP, hdrs); + break; + case UDP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_UDP, hdrs); + break; + case SCTP_V4_FLOW: + set_bit(SXE2_FLOW_HDR_IPV4, hdrs); + set_bit(SXE2_FLOW_HDR_SCTP, hdrs); + break; + case TCP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_TCP, hdrs); + break; + case UDP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_UDP, hdrs); + break; + case SCTP_V6_FLOW: + set_bit(SXE2_FLOW_HDR_IPV6, hdrs); + set_bit(SXE2_FLOW_HDR_SCTP, hdrs); + break; + default: + break; + } +} + +void sxe2vf_analysis_hash_flds(struct ethtool_rxnfc *nfc, unsigned long *hash_flds) +{ + bitmap_zero(hash_flds, SXE2_FLOW_FLD_ID_MAX); + if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + if (nfc->data & RXH_IP_SRC) + set_bit(SXE2_FLOW_FLD_ID_IPV4_SA, hash_flds); + + if (nfc->data & RXH_IP_DST) + set_bit(SXE2_FLOW_FLD_ID_IPV4_DA, hash_flds); + + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + if (nfc->data & RXH_IP_SRC) + set_bit(SXE2_FLOW_FLD_ID_IPV6_SA, hash_flds); + + if (nfc->data & RXH_IP_DST) + set_bit(SXE2_FLOW_FLD_ID_IPV6_DA, hash_flds); + + break; + default: + break; + } + } + + if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) + set_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, hash_flds); + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, hash_flds); + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) { + set_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, hash_flds); + } + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, hash_flds); + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (nfc->data & RXH_L4_B_0_1) + set_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, hash_flds); + if (nfc->data & RXH_L4_B_2_3) + set_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, hash_flds); + break; + default: + break; + } + } +} + +void sxe2vf_get_rss_flow(struct sxe2vf_adapter *adapter, struct ethtool_rxnfc *nfc) +{ + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hash_flds, SXE2_FLOW_FLD_ID_MAX); + struct sxe2vf_rss_cfg *hash_cfg; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + + nfc->data = 0; + +#ifdef SXE2_CFG_RELEASE + UNUSED(vsi); +#endif + + sxe2vf_analysis_hdrs(nfc, headers); + if (bitmap_empty(headers, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) nfc input hdrs is empty.\n", + vsi->vsi_id); + return; + } + + mutex_lock(&adapter->rss_ctxt.rss_cfgs_lock); + hash_cfg = sxe2vf_find_rss_cfg_by_hdrs(adapter, headers); + if (!hash_cfg) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) can not find same hdrs in hash cfg.\n", + vsi->vsi_id); + mutex_unlock(&adapter->rss_ctxt.rss_cfgs_lock); + return; + } + bitmap_copy(hash_flds, hash_cfg->hash_cfg.hash_flds, SXE2_FLOW_FLD_ID_MAX); + mutex_unlock(&adapter->rss_ctxt.rss_cfgs_lock); + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_SA, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_IPV6_SA, hash_flds)) { + nfc->data |= (u64)RXH_IP_SRC; + } + + if (test_bit(SXE2_FLOW_FLD_ID_IPV4_DA, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_IPV6_DA, hash_flds)) { + nfc->data |= (u64)RXH_IP_DST; + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_SRC_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_UDP_SRC_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_SCTP_SRC_PORT, hash_flds)) { + nfc->data |= (u64)RXH_L4_B_0_1; + } + + if (test_bit(SXE2_FLOW_FLD_ID_TCP_DST_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_UDP_DST_PORT, hash_flds) || + test_bit(SXE2_FLOW_FLD_ID_SCTP_DST_PORT, hash_flds)) { + nfc->data |= (u64)RXH_L4_B_2_3; + } +} + +int sxe2vf_set_rss_flow(struct sxe2vf_adapter *adapter, struct ethtool_rxnfc *nfc) +{ + int ret = 0; + struct sxe2vf_rss_hash_cfg hash_cfg; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + +#ifdef SXE2_CFG_RELEASE + UNUSED(vsi); +#endif + + sxe2vf_analysis_hdrs(nfc, hash_cfg.headers); + if (bitmap_empty(hash_cfg.headers, SXE2_FLOW_HDR_MAX)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) invalid field type!\n", + vsi->vsi_id); + return -EINVAL; + } + + sxe2vf_analysis_hash_flds(nfc, hash_cfg.hash_flds); + if (bitmap_empty(hash_cfg.hash_flds, SXE2_FLOW_FLD_ID_MAX)) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) invalid field type!\n", + vsi->vsi_id); + return -EINVAL; + } + + ret = sxe2vf_rss_add_cfg(adapter, &hash_cfg, false); + return ret; +} + +s32 sxe2vf_rss_default_flow_set(struct sxe2vf_adapter *adapter) +{ + u32 i; + s32 ret = 0; + struct sxe2vf_rss_hash_cfg *cfg; + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2vf_msg_params params = {0}; + +#ifdef SXE2_CFG_RELEASE + UNUSED(vsi); +#endif + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_ADD_DEFAULT_RSS_CFG, NULL, 0, NULL, + 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss send mbx add default rss cfg fail!\n"); + goto l_end; + } + sxe2vf_default_rss_cfg_create(); + + for (i = 0; i < ARRAY_SIZE(default_vf_rss_cfgs); i++) { + cfg = default_vf_rss_cfgs[i]; + ret = sxe2vf_rss_add_cfg(adapter, cfg, true); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss (id: %u) add default cfg failed.\n", + vsi->vsi_id); + sxe2vf_rss_clear_cfg(adapter); + break; + } + } + +l_end: + return ret; +} + +STATIC s32 sxe2vf_rss_replay_hash_cfg(struct sxe2vf_adapter *adapter) +{ + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + struct sxe2vf_rss_cfg *rss_cfg; + struct sxe2vf_rss_cfg *tmp; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_REPLAY_RSS_CFG, NULL, 0, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + mutex_lock(&adapter->rss_ctxt.rss_cfgs_lock); + list_for_each_entry_safe(rss_cfg, tmp, &adapter->rss_ctxt.rss_cfgs, + l_node) + { + list_del(&rss_cfg->l_node); + devm_kfree(dev, rss_cfg); + } + mutex_unlock(&adapter->rss_ctxt.rss_cfgs_lock); + LOG_DEV_WARN("sxe2 vf replay rss cfg fail in rebuild, ret=%d!\t" + "please type \"ethtool -N [devname] rx-flow-hash\"recfg rss\n", + ret); + } + + return ret; +} + +STATIC s32 sxe2vf_rss_hash_key_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + u8 *key_msg = NULL; + s32 ret = 0; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + key_msg = kzalloc(adapter->rss_ctxt.rss_key_size, GFP_KERNEL); + if (!key_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_out; + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_KEY, key_msg, + adapter->rss_ctxt.rss_key_size, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set hash key fail!\n"); + + kfree(key_msg); + +l_out: + + if (adapter->rss_ctxt.key) { + devm_kfree(dev, adapter->rss_ctxt.key); + adapter->rss_ctxt.key = NULL; + } + + return ret; +} + +STATIC s32 sxe2vf_rss_hash_lut_deinit(struct sxe2vf_adapter *adapter) +{ + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_msg_params params = {0}; + u8 *lut_msg = NULL; + s32 ret = 0; + + lut_msg = kzalloc(adapter->rss_ctxt.rss_lut_size, GFP_KERNEL); + if (!lut_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_out; + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_LUT, lut_msg, + adapter->rss_ctxt.rss_lut_size, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set lut fail!\n"); + + kfree(lut_msg); + +l_out: + if (adapter->rss_ctxt.lut) { + devm_kfree(dev, adapter->rss_ctxt.lut); + adapter->rss_ctxt.lut = NULL; + } + + return ret; +} + +void sxe2vf_rss_deinit(struct sxe2vf_adapter *adapter) +{ + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + if (!adapter->rss_ctxt.init) { + LOG_INFO_BDF("sxe2 vf rss not init or already deinit!\n"); + return; + } + + (void)sxe2vf_rss_hash_key_deinit(adapter); + + (void)sxe2vf_rss_hash_lut_deinit(adapter); + + sxe2vf_rss_clear_cfg(adapter); + + adapter->rss_ctxt.init = false; + LOG_INFO_BDF("sxe2 vf rss deinit success!\n"); +} + +STATIC s32 sxe2vf_rss_hash_key_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + u8 *key_msg = NULL; + s32 ret = 0; + + if (adapter->rss_ctxt.rss_key_size == 0) { + LOG_ERROR_BDF("sxe2 vf rss hash key init failed. key size is 0!\n"); + ret = -EINVAL; + goto l_out; + } + + key_msg = devm_kzalloc(dev, adapter->rss_ctxt.rss_key_size, GFP_KERNEL); + if (!key_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_out; + } + if (adapter->rss_ctxt.key) { + memcpy(key_msg, adapter->rss_ctxt.key, + adapter->rss_ctxt.rss_key_size); + } else { + netdev_rss_key_fill((void *)key_msg, adapter->rss_ctxt.rss_key_size); + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_KEY, key_msg, + adapter->rss_ctxt.rss_key_size, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss set hash key fail!\n"); + if (adapter->rss_ctxt.key) + memset(adapter->rss_ctxt.key, 0, + adapter->rss_ctxt.rss_key_size); + + devm_kfree(dev, key_msg); + goto l_out; + } + + if (!adapter->rss_ctxt.key) + adapter->rss_ctxt.key = key_msg; + else + devm_kfree(dev, key_msg); +l_out: + return ret; +} + +STATIC s32 sxe2vf_rss_hash_lut_init(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_msg_params params = {0}; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + u8 *lut_msg = NULL; + s32 ret = 0; + u16 i = 0; + + if (adapter->rss_ctxt.rss_lut_size == 0) { + LOG_ERROR_BDF("sxe2 vf rss hash lut init failed. lut size is 0!\n"); + ret = -EINVAL; + goto l_out; + } + + lut_msg = devm_kzalloc(dev, adapter->rss_ctxt.rss_lut_size, GFP_KERNEL); + if (!lut_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_out; + } + if (adapter->rss_ctxt.lut) { + memcpy(lut_msg, adapter->rss_ctxt.lut, + adapter->rss_ctxt.rss_lut_size); + } else { + for (i = 0; i < adapter->rss_ctxt.rss_lut_size; i++) + lut_msg[i] = (u8)(i % adapter->vsi_ctxt.vf_vsi->rxqs.q_cnt); + } + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_LUT, lut_msg, + adapter->rss_ctxt.rss_lut_size, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss set lut fail!\n"); + if (adapter->rss_ctxt.lut) + memset(adapter->rss_ctxt.lut, 0, + adapter->rss_ctxt.rss_lut_size); + + devm_kfree(dev, lut_msg); + goto l_out; + } + + if (!adapter->rss_ctxt.lut) + adapter->rss_ctxt.lut = lut_msg; + else + devm_kfree(dev, lut_msg); + +l_out: + return ret; +} + +s32 sxe2vf_rss_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + ret = sxe2vf_set_rss_hash_ctrl_func(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss init hash ctrl fail, ret=%u !\n", ret); + goto l_end; + } + + ret = sxe2vf_rss_hash_key_init(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss init hash key fail, ret=%u !\n", ret); + goto l_end; + } + + ret = sxe2vf_rss_hash_lut_init(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss init hash lut fail, ret=%u !\n", ret); + (void)sxe2vf_rss_hash_key_deinit(adapter); + goto l_end; + } + + ret = sxe2vf_rss_default_flow_set(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss add default flow fail, ret=%u !\n", ret); + ret = 0; + } + + adapter->rss_ctxt.init = true; + + LOG_INFO_BDF("sxe2 vf rss init success!\n"); + +l_end: + return ret; +} + +s32 sxe2vf_rss_rebuild(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + ret = sxe2vf_set_rss_hash_ctrl_func(adapter); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss init hash ctrl fail, ret=%u !\n", ret); + + ret = sxe2vf_rss_hash_key_init(adapter); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reinit hash key fail, ret=%u !\n", ret); + + ret = sxe2vf_rss_hash_lut_init(adapter); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss reinit hash lut fail, ret=%u !\n", ret); + + ret = sxe2vf_rss_replay_hash_cfg(adapter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf rss reinit hash cdf fail, ret=%u !\n", ret); + ret = 0; + } + + LOG_INFO_BDF("sxe2 vf rss rebuild done!\n"); + + return ret; +} + +s32 sxe2vf_set_channels_rss_reset(struct net_device *netdev, + struct sxe2vf_adapter *adapter, u32 new_queue) +{ + struct sxe2vf_msg_params params = {0}; + u8 *lut_msg = NULL; + s32 ret = 0; + u16 i = 0; + + if (!netif_is_rxfh_configured(netdev)) { + if (adapter->rss_ctxt.lut) { + for (i = 0; i < adapter->rss_ctxt.rss_lut_size; i++) + adapter->rss_ctxt.lut[i] = (u8)(i % new_queue); + } + LOG_DEV_INFO("rx queue size change, clearing user lut,\t" + "re-run ethtool [-x|-X] to [check|set] settings if needed.\n"); + + lut_msg = kzalloc(adapter->rss_ctxt.rss_lut_size, GFP_KERNEL); + if (!lut_msg) { + ret = -ENOMEM; + LOG_ERROR_BDF("No memory!\n"); + goto l_out; + } + for (i = 0; i < adapter->rss_ctxt.rss_lut_size; i++) + lut_msg[i] = (u8)(i % new_queue); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_SET_RSS_LUT, lut_msg, + adapter->rss_ctxt.rss_lut_size, NULL, + 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf rss set lut fail!\n"); + } + kfree(lut_msg); +l_out: + return ret; +} + +int sxe2vf_set_channels_fnav_check(struct sxe2vf_adapter *adapter, u32 new_cnt) +{ + int ret = 0; + struct sxe2vf_fnav_filter *filter = NULL; + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + list_for_each_entry(filter, &adapter->fnav_ctxt.filter_list, l_node) { + if (filter->act_type == SXE2_FNAV_ACTION_QUEUE && + filter->q_index >= new_cnt) { + ret = -EINVAL; + LOG_ERROR_BDF("change channel fnav check failed, loc=%u,\t" + "q_id=%u.\n", + filter->filter_loc, filter->q_index); + break; + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + return ret; +} + +struct sxe2vf_fnav_filter * +sxe2vf_fnav_find_filter_by_loc_unlock(struct sxe2vf_adapter *adapter, u32 loc) +{ + struct sxe2vf_fnav_filter *filter_tmp = NULL; + struct sxe2vf_fnav_filter *filter_find = NULL; + + list_for_each_entry(filter_tmp, &adapter->fnav_ctxt.filter_list, l_node) + { + if (loc == filter_tmp->filter_loc) { + filter_find = filter_tmp; + break; + } + if (loc < filter_tmp->filter_loc) + break; + } + + return filter_find; +} + +u32 sxe2vf_flow_type_to_ethtool_flow(enum sxe2_fnav_flow_type flow_type) +{ + switch (flow_type) { + case SXE2_FNAV_FLOW_TYPE_ETH: + return ETHER_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_TCP: + return TCP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_UDP: + return UDP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_SCTP: + return SCTP_V4_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV4_OTHER: + return IPV4_USER_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_TCP: + return TCP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_UDP: + return UDP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_SCTP: + return SCTP_V6_FLOW; + case SXE2_FNAV_FLOW_TYPE_IPV6_OTHER: + return IPV6_USER_FLOW; + default: + return 0; + } +} + +enum sxe2_fnav_flow_type sxe2vf_ethtool_flow_to_type(u32 flow) +{ + enum sxe2_fnav_flow_type flow_type; + + switch (flow) { + case ETHER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_ETH; + break; + case TCP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_TCP; + break; + case UDP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_UDP; + break; + case SCTP_V4_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_SCTP; + break; + case IPV4_USER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV4_OTHER; + break; + case TCP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_TCP; + break; + case UDP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_UDP; + break; + case SCTP_V6_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_SCTP; + break; + case IPV6_USER_FLOW: + flow_type = SXE2_FNAV_FLOW_TYPE_IPV6_OTHER; + break; + default: + flow_type = SXE2_FNAV_FLOW_TYPE_NONE; + break; + } + + return flow_type; +} + +int sxe2vf_ethtool_fnav_filter_get_by_loc(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + int ret = 0; + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct sxe2vf_fnav_filter *filter = NULL; + struct sxe2vf_fnav_filter_full_key *full_key = NULL; + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + + filter = sxe2vf_fnav_find_filter_by_loc_unlock(adapter, fsp->location); + if (!filter) { + LOG_ERROR_BDF("filter in loc[%u] is not found.\n", fsp->location); + ret = -EINVAL; + goto l_unlock; + } + full_key = &filter->full_key; + + fsp->flow_type = sxe2vf_flow_type_to_ethtool_flow(filter->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + if (filter->act_type == SXE2_FNAV_ACTION_DROP) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = filter->q_index; + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = full_key->ip_data.v4_addrs.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = full_key->ip_data.v4_addrs.dst_ip; + fsp->h_u.tcp_ip4_spec.psrc = full_key->ip_data.src_port; + fsp->h_u.tcp_ip4_spec.pdst = full_key->ip_data.dst_port; + fsp->h_u.tcp_ip4_spec.tos = full_key->ip_data.tos; + fsp->m_u.tcp_ip4_spec.ip4src = full_key->ip_mask.v4_addrs.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = full_key->ip_mask.v4_addrs.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = full_key->ip_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = full_key->ip_mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = full_key->ip_mask.tos; + break; + case IPV4_USER_FLOW: + fsp->h_u.usr_ip4_spec.ip4src = full_key->ip_data.v4_addrs.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = full_key->ip_data.v4_addrs.dst_ip; + fsp->h_u.usr_ip4_spec.l4_4_bytes = full_key->ip_data.l4_header; + fsp->h_u.usr_ip4_spec.tos = full_key->ip_data.tos; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = full_key->ip_data.proto; + fsp->m_u.usr_ip4_spec.ip4src = full_key->ip_mask.v4_addrs.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = full_key->ip_mask.v4_addrs.dst_ip; + fsp->m_u.usr_ip4_spec.l4_4_bytes = full_key->ip_mask.l4_header; + fsp->m_u.usr_ip4_spec.tos = full_key->ip_mask.tos; + fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; + fsp->m_u.usr_ip4_spec.proto = full_key->ip_mask.proto; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, + &full_key->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, + &full_key->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = full_key->ip_data.src_port; + fsp->h_u.tcp_ip6_spec.pdst = full_key->ip_data.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = full_key->ip_data.tclass; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, + &full_key->ip_mask.v6_addrs.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, + &full_key->ip_mask.v6_addrs.dst_ip, sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = full_key->ip_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = full_key->ip_mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = full_key->ip_mask.tclass; + break; + case IPV6_USER_FLOW: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, + &full_key->ip_data.v6_addrs.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, + &full_key->ip_data.v6_addrs.dst_ip, sizeof(struct in6_addr)); + fsp->h_u.usr_ip6_spec.l4_4_bytes = full_key->ip_data.l4_header; + fsp->h_u.usr_ip6_spec.tclass = full_key->ip_data.tclass; + fsp->h_u.usr_ip6_spec.l4_proto = full_key->ip_data.proto; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, + &full_key->ip_mask.v6_addrs.src_ip, sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, + &full_key->ip_mask.v6_addrs.dst_ip, sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = full_key->ip_mask.l4_header; + fsp->m_u.usr_ip6_spec.tclass = full_key->ip_mask.tclass; + fsp->m_u.usr_ip6_spec.l4_proto = full_key->ip_mask.proto; + break; + case ETHER_FLOW: + fsp->h_u.ether_spec.h_proto = full_key->eth_data.etype; + fsp->m_u.ether_spec.h_proto = full_key->eth_mask.etype; + memcpy(fsp->h_u.ether_spec.h_source, full_key->eth_data.src, + sizeof(fsp->h_u.ether_spec.h_source)); + memcpy(fsp->m_u.ether_spec.h_source, full_key->eth_mask.src, + sizeof(fsp->m_u.ether_spec.h_source)); + memcpy(fsp->h_u.ether_spec.h_dest, full_key->eth_data.dst, + sizeof(fsp->h_u.ether_spec.h_dest)); + memcpy(fsp->m_u.ether_spec.h_dest, full_key->eth_mask.dst, + sizeof(fsp->m_u.ether_spec.h_dest)); + break; + default: + ret = -EINVAL; + break; + } + + if (filter->has_flex_filed) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, full_key->ext_data.usr_def, + sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, full_key->ext_mask.usr_def, + sizeof(fsp->m_ext.data)); + fsp->h_ext.vlan_etype = full_key->ext_data.vlan_type; + fsp->m_ext.vlan_etype = full_key->ext_mask.vlan_type; + fsp->h_ext.vlan_tci = full_key->ext_data.s_vlan_tag; + fsp->m_ext.vlan_tci = full_key->ext_mask.s_vlan_tag; + } + +l_unlock: + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + return ret; +} + +int sxe2vf_ethtool_ntuple_filter_locs_get(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *filter_locs) +{ + int ret = 0; + unsigned int cnt = 0; + struct sxe2vf_fnav_filter *filter = NULL; + + cmd->data = SXE2VF_MAX_FNAV_FILTERS; + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + list_for_each_entry(filter, &adapter->fnav_ctxt.filter_list, l_node) { + if (cnt == cmd->rule_cnt) { + ret = -EMSGSIZE; + LOG_ERROR_BDF("sxe2 vf fnav filter cnt is over cmdCnt=%d,\t" + "vsi id=%d.\n", + cmd->rule_cnt, + adapter->vsi_ctxt.vf_vsi->vsi_id); + break; + } + filter_locs[cnt] = filter->filter_loc; + cnt++; + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + + if (!ret) + cmd->rule_cnt = cnt; + + return ret; +} + +STATIC s32 sxe2vf_fnav_alloc_stat_idx(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_fnav_context *fnav_ctxt = &adapter->fnav_ctxt; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_fnav_stat_msg stat_msg; + struct sxe2_vf_fnav_stat_alloc_req_msg stat_req; + + fnav_ctxt->stat_idx = SXE2_VF_FNAV_INVALID_STAT_IDX; + stat_req.need_update = true; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_FNAV_ALLOC_STAT, &stat_req, + sizeof(stat_req), &stat_msg, + sizeof(struct sxe2_vf_fnav_stat_msg)); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav alloc stat idx fail!\n"); + goto l_end; + } + fnav_ctxt->stat_idx = le16_to_cpu(stat_msg.stat_index); + +l_end: + return ret; +} + +STATIC s32 sxe2vf_fnav_free_stat_idx(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_fnav_context *fnav_ctxt = &adapter->fnav_ctxt; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_fnav_stat_msg stat_msg; + + stat_msg.stat_index = cpu_to_le16(fnav_ctxt->stat_idx); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, + SXE2VF_MSG_RESP_WAIT_NOTIFY, SXE2_VF_FNAV_FREE_STAT, + &stat_msg, sizeof(struct sxe2_vf_fnav_stat_msg), NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav free stat idx fail!\n"); + goto l_end; + } + fnav_ctxt->stat_idx = SXE2_VF_FNAV_INVALID_STAT_IDX; + +l_end: + return ret; +} + +STATIC s32 sxe2vf_fnav_match_clear(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_msg_params params = {0}; + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_FNAV_MATCH_CLEAR, NULL, 0, NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("sxe2 vf fnav match clear fail!\n"); + + return ret; +} + +s32 sxe2vf_fnav_init(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_fnav_context *filter_ctxt = &adapter->fnav_ctxt; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + ret = sxe2vf_fnav_match_clear(adapter); + if (ret) + goto l_end; + + ret = sxe2vf_fnav_alloc_stat_idx(adapter); + if (ret) + goto l_end; + + adapter->fnav_ctxt.fnav_match = 0; + + clear_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags); + if (adapter->fnav_ctxt.space_bsize > 0 || + adapter->fnav_ctxt.space_gsize > 0) { + set_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags); + } + + filter_ctxt->filter_cnt = 0; + mutex_init(&filter_ctxt->filter_list_lock); + INIT_LIST_HEAD(&filter_ctxt->filter_list); + + filter_ctxt->init = true; + LOG_INFO_BDF("sxe2 vf fnav init success.\n"); + +l_end: + return ret; +} + +void sxe2vf_fnav_deinit(struct sxe2vf_adapter *adapter) +{ + struct sxe2vf_fnav_context *filter_ctxt = &adapter->fnav_ctxt; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_fnav_filter *filter, *tmp; + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return; + + if (!filter_ctxt->init) { + LOG_INFO_BDF("sxe2 vf fnav already deinit!\n"); + return; + } + + clear_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags); + + mutex_lock(&filter_ctxt->filter_list_lock); + list_for_each_entry_safe(filter, tmp, &filter_ctxt->filter_list, l_node) + { + list_del(&filter->l_node); + devm_kfree(dev, filter); + } + filter_ctxt->filter_cnt = 0; + mutex_unlock(&filter_ctxt->filter_list_lock); + + mutex_destroy(&filter_ctxt->filter_list_lock); + + (void)sxe2vf_fnav_free_stat_idx(adapter); + + filter_ctxt->init = false; + + LOG_INFO_BDF("sxe2 vf fnav deinit success.\n"); +} + +bool sxe2vf_fnav_is_dup_filter(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter) +{ + struct sxe2vf_fnav_filter *tmp; + bool ret = false; + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + list_for_each_entry(tmp, &adapter->fnav_ctxt.filter_list, l_node) { + if (tmp->flow_type != filter->flow_type) + continue; + + if (!memcmp(&tmp->full_key, &filter->full_key, + sizeof(filter->full_key))) { + ret = true; + break; + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + + if (ret) { + if (filter->filter_loc == tmp->filter_loc && + filter->q_index != tmp->q_index) { + ret = false; + } + } + + return ret; +} + +s32 sxe2vf_fnav_del_filter(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter) +{ + s32 ret = 0; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + struct sxe2vf_fnav_context *fnav_ctxt = &adapter->fnav_ctxt; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_fnav_filter_del_msg del_msg; + + del_msg.flow_id = cpu_to_le32(filter->flow_id); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_FNAV_FILTER_DEL, &del_msg, + sizeof(struct sxe2_vf_fnav_filter_del_msg), + NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav del filter fail!\n"); + } else { + LOG_INFO_BDF("sxe2 vf fnav del filter success, flow_id = %u !\n", + filter->flow_id); + list_del(&filter->l_node); + devm_kfree(dev, filter); + fnav_ctxt->filter_cnt--; + } + + return ret; +} + +s32 sxe2vf_fnav_all_filter_del(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_fnav_filter *filter, *tmp; + struct sxe2vf_msg_params params = {0}; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_FNAV_FILTER_CLEAR, NULL, 0, NULL, 0); + + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav del filter fail!\n"); + goto l_end; + } + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + list_for_each_entry_safe(filter, tmp, &adapter->fnav_ctxt.filter_list, + l_node) { + list_del(&filter->l_node); + devm_kfree(dev, filter); + adapter->fnav_ctxt.filter_cnt--; + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + + LOG_DEBUG_BDF("sxe2 vf fnav del all filter done ret:%d.\n", ret); +l_end: + return ret; +} + +s32 sxe2vf_fnav_rebuild(struct sxe2vf_adapter *adapter) +{ + s32 ret = 0; + struct sxe2vf_fnav_filter *filter, *tmp; + struct device *dev = SXE2VF_ADAPTER_TO_DEV(adapter); + + if (sxe2vf_com_mode_get(adapter) == SXE2_COM_MODULE_DPDK) + return 0; + + if (!test_bit(SXE2VF_FLAG_FNAV_ENABLE, adapter->flags)) { + LOG_INFO_BDF("sxe2 vf fnav switch is disable!\n"); + goto l_end; + } + + ret = sxe2vf_fnav_alloc_stat_idx(adapter); + + mutex_lock(&adapter->fnav_ctxt.filter_list_lock); + list_for_each_entry_safe(filter, tmp, &adapter->fnav_ctxt.filter_list, + l_node) { + if (!ret) { + filter->full_msg.action[1].act_count.stat_ctrl = + cpu_to_le32(SXE2_FNAV_STAT_ENA_PKTS); + filter->full_msg.action[1].act_count.stat_index = + cpu_to_le32(adapter->fnav_ctxt.stat_idx); + filter->full_msg.action[1].type = + cpu_to_le32(SXE2_FNAV_ACTION_COUNT); + filter->full_msg.action_cnt = 2; + } else { + filter->full_msg.action_cnt = 1; + } + ret = sxe2vf_fnav_add_filter_with_packet(adapter, filter); + if (ret) { + LOG_ERROR_BDF("sxe2 vf fnav set filter failed,\t" + "filter_loc=%d ret=%d !\n", + filter->filter_loc, ret); + list_del(&filter->l_node); + devm_kfree(dev, filter); + adapter->fnav_ctxt.filter_cnt--; + } else { + LOG_INFO_BDF("sxe2 vf fnav set filter success,\t" + "filter_loc=%d ret=%d !\n", + filter->filter_loc, ret); + } + } + mutex_unlock(&adapter->fnav_ctxt.filter_list_lock); + +l_end: + LOG_INFO_BDF("sxe2 vf fnav rebuild filter done, vsi id=%d. ret:%d\n", + adapter->vsi_ctxt.vf_vsi->vsi_id, ret); + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.h new file mode 100644 index 0000000000000000000000000000000000000000..abe5099fac85c0ac3c1927dc903b4f65acbc3339 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_rxft.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_rxft.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2VF_RXFT_H__ +#define __SXE2VF_RXFT_H__ + +#include +#include +#include +#include +#include "sxe2vf.h" + +struct sxe2vf_rss_hash_cfg { + DECLARE_BITMAP(headers, SXE2_FLOW_HDR_MAX); + DECLARE_BITMAP(hash_flds, SXE2_FLOW_FLD_ID_MAX); + bool symm; +}; + +struct sxe2vf_rss_cfg * +sxe2vf_find_rss_cfg_by_hdrs(struct sxe2vf_adapter *adapter, + unsigned long *hdrs); + +void sxe2vf_analysis_hdrs(struct ethtool_rxnfc *nfc, unsigned long *hdrs); + +void sxe2vf_analysis_hash_flds(struct ethtool_rxnfc *nfc, + unsigned long *hash_flds); + +void sxe2vf_get_rss_flow(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *nfc); + +int sxe2vf_set_rss_flow(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *nfc); + +void sxe2vf_rss_delete_cfg(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_rss_add_cfg(struct sxe2vf_adapter *adapter, + struct sxe2vf_rss_hash_cfg *hash_cfg, bool is_default); + +s32 sxe2vf_rss_default_flow_set(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_rss_init(struct sxe2vf_adapter *adapter); + +void sxe2vf_rss_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_rss_rebuild(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_set_channels_rss_reset(struct net_device *netdev, + struct sxe2vf_adapter *adapter, u32 new_queue); + +#define SXE2VF_MAX_FNAV_FILTERS (128) +#define SXE2VF_FLEX_WORD_NUM (2) +#define SXE2VF_USERDEF_FLEX_WORD_M GENMASK(15, 0) +#define SXE2VF_USERDEF_FLEX_OFFS_S 16 +#define SXE2VF_USERDEF_FLEX_OFFS_M GENMASK(31, SXE2VF_USERDEF_FLEX_OFFS_S) +#define SXE2VF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) +#define SXE2VF_USERDEF_FLEX_MAX_OFFS_VAL 0x1FE + +#define SXE2VF_FNAV_L4_PROT_TCP 6 +#define SXE2VF_FNAV_L4_PROT_UDP 17 +#define SXE2VF_FNAV_L4_PROT_SCTP 132 + +struct sxe2vf_ipv4_addrs { + __be32 src_ip; + __be32 dst_ip; +}; + +struct sxe2vf_ipv6_addrs { + struct in6_addr src_ip; + struct in6_addr dst_ip; +}; + +struct sxe2vf_fnav_eth { + u8 src[SXE2_FNAV_ETH_ADDR_LEN]; + u8 dst[SXE2_FNAV_ETH_ADDR_LEN]; + __be16 etype; +}; + +struct sxe2vf_fnav_ip { + union { + struct sxe2vf_ipv4_addrs v4_addrs; + struct sxe2vf_ipv6_addrs v6_addrs; + }; + __be16 src_port; + __be16 dst_port; + __be32 l4_header; + __be32 spi; + union { + u8 tos; + u8 tclass; + }; + u8 proto; +}; + +struct sxe2vf_fnav_extra { + __be32 usr_def[SXE2VF_FLEX_WORD_NUM]; + __be16 vlan_type; + __be16 s_vlan_tag; + __be16 c_vlan_tag; +}; + +struct sxe2vf_fnav_filter_full_key { + struct sxe2vf_fnav_eth eth_data; + struct sxe2vf_fnav_eth eth_mask; + + struct sxe2vf_fnav_ip ip_data; + struct sxe2vf_fnav_ip ip_mask; + + struct sxe2vf_fnav_extra ext_data; + struct sxe2vf_fnav_extra ext_mask; + + u8 ip_ver; +}; + +void sxe2vf_fnav_deinit(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_fnav_init(struct sxe2vf_adapter *adapter); + +u32 sxe2vf_fnav_max_filter_cnt_get(struct sxe2vf_adapter *adapter); + +struct sxe2vf_fnav_filter * +sxe2vf_fnav_find_filter_by_loc_unlock(struct sxe2vf_adapter *adapter, u32 loc); + +u32 sxe2vf_flow_type_to_ethtool_flow(enum sxe2_fnav_flow_type flow_type); + +enum sxe2_fnav_flow_type sxe2vf_ethtool_flow_to_type(u32 flow); + +int sxe2vf_ethtool_fnav_filter_get_by_loc(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd); + +int sxe2vf_ethtool_ntuple_filter_locs_get(struct sxe2vf_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *filter_locs); + +bool sxe2vf_fnav_filter_cmp(struct sxe2vf_fnav_filter *fltrA, + struct sxe2vf_fnav_filter *fltrB); + +struct sxe2vf_fnav_filter * +sxe2vf_fnav_filter_search_for_dup(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter); + +s32 sxe2vf_fnav_del_filter(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter); + +s32 sxe2vf_fnav_all_filter_del(struct sxe2vf_adapter *adapter); + +bool sxe2vf_fnav_is_dup_filter(struct sxe2vf_adapter *adapter, + struct sxe2vf_fnav_filter *filter); + +s32 sxe2vf_fnav_rebuild(struct sxe2vf_adapter *adapter); + +int sxe2vf_set_channels_fnav_check(struct sxe2vf_adapter *adapter, u32 new_cnt); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.c new file mode 100644 index 0000000000000000000000000000000000000000..c1ac7f660c784727eca7f4cd9f05170db0a0da91 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.c @@ -0,0 +1,1163 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_tx.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2vf.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_queue.h" +#include "sxe2vf_netdev.h" +#include "sxe2_log.h" +#include "sxe2vf_mbx_msg.h" +#include "sxe2_mbx_public.h" +#include "sxe2vf_regs.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_ipsec.h" +#ifdef HAVE_SCTP +#include +#endif + +#define SXE2VF_MIN_TX_LEN 17 +#define SXE2VF_DFLT_IRQ_WORK 256 +#define SXE2VF_CACHE_LINE_BYTES 64 +#define SXE2VF_DESCS_PER_CACHE_LINE \ + (SXE2VF_CACHE_LINE_BYTES / sizeof(union sxe2vf_tx_data_desc)) +#define SXE2VF_DESCS_FOR_CTXT_DESC 1 +#define SXE2VF_DESCS_FOR_SKB_DATA_PTR 1 +#define SXE2VF_MAX_DATA_DESC_PER_SKB 15 +#define SXE2VF_DATA_DESC_USED_FOR_TSO 6 + +#define SXE2VF_TSO_SEG_DESC_USE_FOR_FRAGMENT 13 + +#define SXE2VF_TX_DESC_NEEDED \ + (MAX_SKB_FRAGS + SXE2VF_DESCS_FOR_CTXT_DESC + SXE2VF_DESCS_PER_CACHE_LINE + \ + SXE2VF_DESCS_FOR_SKB_DATA_PTR) +#define SXE2VF_TX_WAKE_THRESHOLD (SXE2VF_TX_DESC_NEEDED * 2) +#define SXE2VF_MAX_READ_REQ_SIZE 4096 +#define SXE2VF_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define SXE2VF_MAX_DATA_PER_TXD_ALIGNED \ + (~(SXE2VF_MAX_READ_REQ_SIZE - 1) & SXE2VF_MAX_DATA_PER_TXD) + +#define SXE2VF_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(frag_size) \ + ((((frag_size)*85) >> 20) + 1) + +static inline u16 sxe2vf_tx_desc_unused_count(struct sxe2vf_queue *txq) +{ + u16 ntc = txq->next_to_clean; + u16 ntu = txq->next_to_use; + u16 unused_tx_desc_cnt; + + unused_tx_desc_cnt = (u16)(((ntc > ntu) ? 0 : txq->depth) + ntc - ntu - 1); + return unused_tx_desc_cnt; +} + +static s32 sxe2vf_maybe_stop_tx(struct sxe2vf_queue *txq, u16 desc_cnt) +{ + s32 ret = 0; + + netif_stop_subqueue(txq->netdev, txq->idx_in_vsi); + + smp_mb(); + + if (likely(sxe2vf_tx_desc_unused_count(txq) < desc_cnt)) { + ret = -EBUSY; + goto l_end; + } + + netif_start_subqueue(txq->netdev, txq->idx_in_vsi); + + ++txq->stats->tx_stats.tx_restart; + +l_end: + return ret; +} + +static u32 sxe2vf_tx_desc_count(struct sk_buff *skb) +{ + u32 count = 0, size = skb_headlen(skb); + u32 nr_frags = skb_shinfo(skb)->nr_frags; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + for (;;) { + count += SXE2VF_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +static inline struct sxe2vf_tx_buf * +sxe2vf_tx_first_buffer_get(struct sk_buff *skb, struct sxe2vf_queue *txq) +{ + struct sxe2vf_tx_buf *first_buf; + + first_buf = &txq->tx_buf[txq->next_to_use]; + first_buf->skb = skb; + first_buf->bytecount = max_t(u32, skb->len, ETH_ZLEN); + first_buf->gso_segs = 1; + first_buf->tx_features = 0; + LOG_DEBUG(">>> next_to_use:%u buf:%pK skb:%pK.\n", txq->next_to_use, + &txq->tx_buf[txq->next_to_use], skb); + return first_buf; +} + +static void sxe2vf_tx_buffer_unmap(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *tx_buf) +{ + if (tx_buf->skb) { + dev_kfree_skb_any(tx_buf->skb); + + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_single(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + } + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); +} + +static bool sxe2vf_chk_linearize_for_tso(struct sk_buff *skb) +{ + u32 i; + bool ret; + s32 nr_frags, sum; + const skb_frag_t *frag, *stale; + + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < (SXE2VF_MAX_DATA_DESC_PER_SKB - 1)) { + ret = false; + goto l_end; + } + + sum = 1 - skb_shinfo(skb)->gso_size; + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < SXE2VF_TSO_SEG_DESC_USE_FOR_FRAGMENT - 1; i++) + sum += (s32)skb_frag_size(frag++); + nr_frags -= SXE2VF_TSO_SEG_DESC_USE_FOR_FRAGMENT; + + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = (s32)skb_frag_size(stale); + + sum += (s32)skb_frag_size(frag++); + + if (stale_size > SXE2VF_MAX_DATA_PER_TXD) { + int align_pad = -((int)skb_frag_off(stale)) & + (SXE2VF_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= SXE2VF_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= SXE2VF_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > SXE2VF_MAX_DATA_PER_TXD); + } + + if (sum < 0) { + ret = true; + goto l_end; + } + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + ret = false; +l_end: + return ret; +} + +static void sxe2vf_tx_dma_err(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *first_buf, u16 ntu) +{ + struct sxe2vf_tx_buf *tx_buf; + + for (;;) { + tx_buf = &txq->tx_buf[ntu]; + + sxe2vf_tx_buffer_unmap(txq, tx_buf); + if (tx_buf == first_buf) + break; + + if (ntu == 0) + ntu += txq->depth; + + --ntu; + } + + txq->next_to_use = ntu; +} + +static inline void sxe2vf_tx_desc_update(struct sxe2vf_queue *txq, + union sxe2vf_tx_data_desc **desc, u16 *ntu) +{ + ++(*ntu); + ++(*desc); + if (txq->depth == *ntu) { + *desc = SXE2VF_TX_DESC(txq, 0); + *ntu = 0; + } +} + +static inline __le64 +sxe2vf_tx_data_desc_qword1_setup(struct sxe2vf_tx_offload_info *offload, u32 size) +{ + return cpu_to_le64(SXE2VF_TX_DESC_DTYPE_DATA | + ((u64)offload->data_desc_cmd + << SXE2VF_TX_DATA_DESC_CMD_SHIFT) | + ((u64)offload->data_desc_offset + << SXE2VF_TX_DATA_DESC_OFFSET_SHIFT) | + ((u64)size << SXE2VF_TX_DATA_DESC_BUF_SZ_SHIFT) | + ((u64)offload->data_desc_l2tag1 + << SXE2VF_TX_DATA_DESC_L2TAG1_SHIFT)); +} + +static s32 sxe2vf_tx_desc_ring_map(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload, + union sxe2vf_tx_data_desc **desc, u16 *ntu) +{ + u32 max_data; + dma_addr_t dma; + skb_frag_t *frag; + struct sk_buff *skb = first_buf->skb; + u32 map_size = skb_headlen(skb); + u32 remaining_size = skb->data_len; + struct sxe2vf_tx_buf *tx_buf = first_buf; + struct sxe2vf_adapter *adapter = netdev_priv(txq->netdev); + + LOG_DEBUG_BDF("skb dma map start, line_size=%u, \t" + "total_frag_len=%u, skb_len=%u\n", + skb_headlen(skb), skb->data_len, skb->len); + + dma = dma_map_single(txq->dev, skb->data, map_size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(txq->dev, dma)) { + LOG_ERROR_BDF("tx dma map failed\n"); + goto l_dma_err; + } + + dma_unmap_len_set(tx_buf, len, map_size); + dma_unmap_addr_set(tx_buf, dma, dma); + + (*desc)->read.buf_addr = cpu_to_le64(dma); + + max_data = SXE2VF_MAX_DATA_PER_TXD_ALIGNED; + + while (unlikely(map_size > SXE2VF_MAX_DATA_PER_TXD)) { + (*desc)->read.cmd_type_offset_bsz = + sxe2vf_tx_data_desc_qword1_setup(offload, + max_data); + + sxe2vf_tx_desc_update(txq, desc, ntu); + dma += max_data; + map_size -= max_data; + max_data = SXE2VF_MAX_DATA_PER_TXD_ALIGNED; + (*desc)->read.buf_addr = cpu_to_le64(dma); + } + + if (likely(!remaining_size)) { + offload->data_desc_cmd |= SXE2VF_TX_DATA_DESC_CMD_EOP | + SXE2VF_TX_DATA_DESC_CMD_RS; + (*desc)->read.cmd_type_offset_bsz = + sxe2vf_tx_data_desc_qword1_setup(offload, + map_size); + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, " + "remaining_size=%u, \t" + "desc_ptr=%p, dma_addr=%#llx, " + "desc.buffer_addr = %#llx, \t" + "cmd_type=0x%llx\n", + map_size, remaining_size, *desc, (u64)dma, + (*desc)->read.buf_addr, + (*desc)->read.cmd_type_offset_bsz); + break; + } + + (*desc)->read.cmd_type_offset_bsz = + sxe2vf_tx_data_desc_qword1_setup(offload, map_size); + + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, remaining_size=%u, " + "\t" + "desc_ptr=%p, dma_addr=%#llx, desc.buffer_addr = " + "%#llx, \t" + "cmd_type=0x%llx\n", + map_size, remaining_size, *desc, (u64)dma, + (*desc)->read.buf_addr, + (*desc)->read.cmd_type_offset_bsz); + sxe2vf_tx_desc_update(txq, desc, ntu); + + map_size = skb_frag_size(frag); + remaining_size -= map_size; + + dma = skb_frag_dma_map(txq->dev, frag, 0, map_size, DMA_TO_DEVICE); + + tx_buf = &txq->tx_buf[*ntu]; + } + + LOG_DEBUG_BDF("skb dma map end\n"); + return 0; + +l_dma_err: + sxe2vf_tx_dma_err(txq, first_buf, *ntu); + return -ENOMEM; +} + +static inline struct netdev_queue * +sxe2vf_queue_to_netdevq(const struct sxe2vf_queue *queue) +{ + return netdev_get_tx_queue(queue->netdev, queue->idx_in_vsi); +} + +static inline void +sxe2vf_tx_desc_setup_for_csum(struct sxe2vf_tx_offload_info *offload, u32 l2_len, + u32 l3_len, u32 l4_len, u32 cmd) +{ + offload->data_desc_offset |= ((l2_len / SXE2VF_BYTES_PER_WORD) + << SXE2VF_TX_DATA_DESC_MACLEN_SHIFT) | + ((l3_len / SXE2VF_BYTES_PER_DWORD) + << SXE2VF_TX_DATA_DESC_IPLEN_SHIFT) | + (l4_len << SXE2VF_TX_DATA_DESC_L4_LEN_SHIFT); + + offload->data_desc_cmd |= cmd; +} + +static s32 sxe2vf_xmit_pkt(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + s32 ret; + u16 ntu = txq->next_to_use; + union sxe2vf_tx_data_desc *desc = SXE2VF_TX_DESC(txq, ntu); + struct sxe2vf_adapter *adapter = netdev_priv(txq->netdev); + bool xmit_more; + + ret = sxe2vf_tx_desc_ring_map(txq, first_buf, offload, &desc, &ntu); + if (ret) + goto l_end; + + ntu++; + if (ntu == txq->depth) + ntu = 0; + + wmb(); + + first_buf->next_to_watch = desc; + + txq->next_to_use = ntu; + + if (unlikely(sxe2vf_tx_desc_unused_count(txq) < SXE2VF_TX_DESC_NEEDED)) { + ret = sxe2vf_maybe_stop_tx(txq, SXE2VF_TX_DESC_NEEDED); + if (ret < 0) { + LOG_WARN_BDF("the desc is not enough in the queue[%u],\t" + "to stop the queue, \t" + "desc_cnt < SXE2VF_TX_DESC_NEEDED[%u]\n", + txq->idx_in_vsi, (u32)SXE2VF_TX_DESC_NEEDED); + } + } + + xmit_more = netdev_xmit_more(); + txq->stats->tx_stats.tx_xmit_more += xmit_more; + if (__netdev_tx_sent_queue(sxe2vf_queue_to_netdevq(txq), + first_buf->bytecount, xmit_more)) { + writel(ntu, txq->desc.tail); + } + + return 0; + +l_end: + return ret; +} + +static bool sxe2vf_chk_linearize(struct sk_buff *skb, u32 desc_needed) +{ + if (likely(desc_needed < SXE2VF_MAX_DATA_DESC_PER_SKB)) + return false; + + if (skb_is_gso(skb)) + return sxe2vf_chk_linearize_for_tso(skb); + + return desc_needed != SXE2VF_MAX_DATA_DESC_PER_SKB; +} + +static inline void +sxe2vf_tx_desc_setup_for_tso(struct sxe2vf_tx_offload_info *offload, u64 tso_len, + u64 mss) +{ + offload->ctxt_desc_qw1 = + (u64)(SXE2VF_TX_DESC_DTYPE_CTXT | + (SXE2VF_TX_CTXT_DESC_CMD_TSO + << SXE2VF_TX_CTXT_DESC_CMD_SHIFT) | + (tso_len << SXE2VF_TX_CTXT_DESC_TSO_LEN_SHIFT) | + (mss << SXE2VF_TX_CTXT_DESC_MSS_SHIFT)); +} + +static s32 sxe2vf_tso(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + s32 ret; + u32 paylen; + union sxe2vf_ip_hdr ip; + union sxe2vf_l4_hdr l4; + u8 l4_start, header_len; + u64 cd_mss, cd_tso_len; + struct sk_buff *skb = first_buf->skb; + struct sxe2vf_adapter *adapter = offload->adapter; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) { + ret = 0; + goto l_end; + } + + ret = skb_cow_head(skb, 0); + if (ret < 0) { + LOG_ERROR_BDF("skb cow head failed, ret=%d\n", ret); + goto l_end; + } + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + l4_start = (u8)(l4.hdr - skb->data); + + paylen = skb->len - l4_start; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + if (ip.v4->version == 4) + ip.v4->frag_off |= htons(IP_DF); + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + l4_start = (u8)(l4.hdr - skb->data); + + paylen = skb->len - l4_start; +#ifdef NETIF_F_GSO_UDP_L4 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)sizeof(l4.udp) + l4_start; + } else { + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)((l4.tcp->doff * 4) + l4_start); + } +#else + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + header_len = (u8)((l4.tcp->doff * 4) + l4_start); +#endif + first_buf->gso_segs = skb_shinfo(skb)->gso_segs; + first_buf->bytecount += (first_buf->gso_segs - 1) * header_len; + + txq->stats->tx_stats.tx_tso_packets += first_buf->gso_segs; + txq->stats->tx_stats.tx_tso_bytes += first_buf->bytecount; + + cd_tso_len = skb->len - header_len; + cd_mss = skb_shinfo(skb)->gso_size; + + first_buf->tx_features |= SXE2VF_TX_FEATURE_TSO; + + sxe2vf_tx_desc_setup_for_tso(offload, cd_tso_len, cd_mss); + + return 0; + +l_end: + return ret; +} + +static s32 sxe2vf_tx_csum(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + s32 ret; + bool gso_ena; + u8 l4_proto = 0; + unsigned char *exthdr; + union sxe2vf_ip_hdr ip; + union sxe2vf_l4_hdr l4; + __be16 frag_off, protocol; + u32 l4_len = 0, l3_len, l2_len; + u32 cmd = 0, tunnel = 0; + struct sk_buff *skb = first_buf->skb; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + txq->stats->tx_stats.tx_csum_none++; + return 0; + } + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + l2_len = (u32)(ip.hdr - skb->data); + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IP)) + first_buf->tx_features |= SXE2VF_TX_FEATURE_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + first_buf->tx_features |= SXE2VF_TX_FEATURE_IPV6; + + if (skb->encapsulation) { + if (first_buf->tx_features & SXE2VF_TX_FEATURE_IPV4) { + tunnel |= (first_buf->tx_features & SXE2VF_TX_FEATURE_TSO) + ? SXE2VF_TX_CTXT_DESC_IPV4 + : SXE2VF_TX_CTXT_DESC_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; + + } else if (first_buf->tx_features & SXE2VF_TX_FEATURE_IPV6) { + tunnel |= SXE2VF_TX_CTXT_DESC_EIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + if (ret < 0) { + ret = -1; + goto l_end; + } + } + + switch (l4_proto) { + case IPPROTO_UDP: + tunnel |= SXE2VF_TX_CTXT_DESC_UDP_TUNNE; + first_buf->tx_features |= SXE2VF_TX_FEATURE_TUNNEL; + break; + case IPPROTO_GRE: + tunnel |= SXE2VF_TX_CTXT_DESC_GRE_TUNNE; + first_buf->tx_features |= SXE2VF_TX_FEATURE_TUNNEL; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + first_buf->tx_features |= SXE2VF_TX_FEATURE_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (first_buf->tx_features & SXE2VF_TX_FEATURE_TSO) { + ret = -1; + goto l_end; + } + + (void)skb_checksum_help(skb); + ret = 0; + goto l_end; + } + + tunnel |= ((l4.hdr - ip.hdr) / 4) + << SXE2VF_TX_CTXT_DESC_EIPLEN_SHIFT; + + ip.hdr = skb_inner_network_header(skb); + + tunnel |= ((ip.hdr - l4.hdr) / 2) + << SXE2VF_TX_CTXT_DESC_NATLEN_SHIFT; + + gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; + if ((first_buf->tx_features & SXE2VF_TX_FEATURE_TSO) && !gso_ena && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + tunnel |= SXE2VF_TX_CTXT_DESC_L4T_CS_MASK; + } + + offload->ctxt_desc_tunnel |= tunnel; + offload->ctxt_desc_qw1 |= (u64)SXE2VF_TX_DESC_DTYPE_CTXT; + + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + first_buf->tx_features &= + ~(SXE2VF_TX_FEATURE_IPV4 | SXE2VF_TX_FEATURE_IPV6); + if (ip.v4->version == 4) + first_buf->tx_features |= SXE2VF_TX_FEATURE_IPV4; + if (ip.v6->version == 6) + first_buf->tx_features |= SXE2VF_TX_FEATURE_IPV6; + txq->stats->tx_stats.tx_csum_partial_inner++; + } else { + txq->stats->tx_stats.tx_csum_partial++; + } + + if (first_buf->tx_features & SXE2VF_TX_FEATURE_IPV4) { + l4_proto = ip.v4->protocol; + if (first_buf->tx_features & SXE2VF_TX_FEATURE_TSO) + cmd |= SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV4_CSUM; + else + cmd |= SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV4; + } else if (first_buf->tx_features & SXE2VF_TX_FEATURE_IPV6) { + cmd |= SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV6; + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + (void)ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + } else { + ret = -1; + goto l_end; + } + + l3_len = (u32)(l4.hdr - ip.hdr); + + switch (l4_proto) { + case IPPROTO_TCP: + cmd |= SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_TCP; + l4_len = l4.tcp->doff; + break; + case IPPROTO_UDP: + cmd |= SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_UDP; + l4_len = (sizeof(struct udphdr) >> 2); + break; +#ifdef HAVE_SCTP + case IPPROTO_SCTP: + cmd |= SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_SCTP; + l4_len = sizeof(struct sctphdr) >> 2; + break; +#endif + + default: + if (first_buf->tx_features & SXE2VF_TX_FEATURE_TSO) + return -1; + (void)skb_checksum_help(skb); + ret = 0; + goto l_end; + } + + sxe2vf_tx_desc_setup_for_csum(offload, l2_len, l3_len, l4_len, cmd); + first_buf->tx_features |= SXE2VF_TX_FEATURE_MACLEN; + + return 0; +l_end: + return ret; +} + +static inline void +sxe2vf_tx_desc_setup_for_vlan(struct sxe2vf_tx_offload_info *offload, u16 vlan_tci) +{ + offload->data_desc_l2tag1 = vlan_tci; + offload->data_desc_cmd |= SXE2VF_TX_DATA_DESC_CMD_IL2TAG1; +} + +static void sxe2vf_tx_vlan(struct sxe2vf_queue *txq, struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + struct sk_buff *skb = first_buf->skb; + union sxe2vf_ip_hdr ip; + u32 l2_len; + + if (!skb_vlan_tag_present(skb) && eth_type_vlan((u16)skb->protocol)) + return; + + if (skb_vlan_tag_present(skb)) { + txq->stats->tx_stats.tx_vlan_insert++; + sxe2vf_tx_desc_setup_for_vlan(offload, skb_vlan_tag_get(skb)); + LOG_INFO("txq:%u vlan tag:0x%x insert offload.\n", txq->idx_in_vsi, + skb_vlan_tag_get(skb)); + } + + if ((offload->data_desc_cmd & SXE2VF_TX_DATA_DESC_CMD_IL2TAG1) && + !(first_buf->tx_features & SXE2VF_TX_FEATURE_MACLEN)) { + ip.hdr = skb_network_header(skb); + l2_len = (u32)(ip.hdr - skb->data); + offload->data_desc_offset |= ((l2_len / SXE2VF_BYTES_PER_WORD) + << SXE2VF_TX_DATA_DESC_MACLEN_SHIFT); + } +} + +static inline void sxe2vf_tx_port_vlan(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + struct sk_buff *skb = first_buf->skb; + union sxe2vf_ip_hdr ip; + u32 l2_len; + + if (txq->vsi->adapter->switch_ctxt.filter_ctxt.vlan_info.port_vlan_exist && + !(first_buf->tx_features & SXE2VF_TX_FEATURE_MACLEN)) { + ip.hdr = skb_network_header(skb); + l2_len = (u32)(ip.hdr - skb->data); + offload->data_desc_offset |= ((l2_len / SXE2VF_BYTES_PER_WORD) + << SXE2VF_TX_DATA_DESC_MACLEN_SHIFT); + } +} + +static s32 sxe2vf_tx_feature_offload(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *first_buf, + struct sxe2vf_tx_offload_info *offload) +{ + s32 ret; + u16 ntu = txq->next_to_use; + struct sxe2vf_tx_context_desc *ctxt_desc; + + offload->adapter = txq->vsi->adapter; + + ret = sxe2vf_tso(txq, first_buf, offload); + if (ret < 0) + goto l_end; + + if (xfrm_offload(first_buf->skb)) { + ret = sxe2vf_ipsec_tx(txq, first_buf, offload); + if (ret) + goto l_end; + } + + ret = sxe2vf_tx_csum(txq, first_buf, offload); + if (ret < 0) + goto l_end; + sxe2vf_tx_vlan(txq, first_buf, offload); + sxe2vf_tx_port_vlan(txq, first_buf, offload); + if (offload->ctxt_desc_qw1 & SXE2VF_TX_DESC_DTYPE_CTXT) { + ctxt_desc = SXE2VF_TX_CTXT_DESC(txq, ntu); + ntu++; + txq->next_to_use = (ntu < txq->depth) ? ntu : 0; + + ctxt_desc->tunneling_params = cpu_to_le32(offload->ctxt_desc_tunnel); + ctxt_desc->l2tag2 = cpu_to_le16(offload->ctxt_desc_l2tag2); + ctxt_desc->qw1 = cpu_to_le64(offload->ctxt_desc_qw1); + ctxt_desc->ipset_offset = + cpu_to_le16(offload->ctxt_desc_ipsec_offset); + } + +l_end: + return ret; +} + +static netdev_tx_t sxe2vf_queue_xmit(struct sk_buff *skb, struct sxe2vf_queue *txq) +{ + netdev_tx_t ret = NETDEV_TX_OK; + struct sxe2vf_tx_buf *first_buf = NULL; + struct sxe2vf_tx_offload_info offload = {}; + s32 res; + u32 need_desc_count; + struct sxe2vf_adapter *adapter = netdev_priv(txq->netdev); + + sxe2vf_trace(queue_xmit, txq, skb); + need_desc_count = sxe2vf_tx_desc_count(skb); + if (sxe2vf_chk_linearize(skb, need_desc_count)) { + if (__skb_linearize(skb)) + goto l_free; + + need_desc_count = SXE2VF_ESTIMATE_DESC_COUNT_FOR_SKB_FRAG(skb->len); + txq->stats->tx_stats.tx_linearize++; + } + if (unlikely(sxe2vf_tx_desc_unused_count(txq) < need_desc_count)) { + if (sxe2vf_maybe_stop_tx(txq, (u16)need_desc_count)) { + txq->stats->tx_stats.tx_busy++; + ret = NETDEV_TX_BUSY; + LOG_WARN_BDF("txq desc is not enough, txq_idx=%d need desc " + "%u max %lu", + txq->idx_in_vsi, need_desc_count, + SXE2VF_TX_DESC_NEEDED); + goto l_end; + } + } + + first_buf = sxe2vf_tx_first_buffer_get(skb, txq); + + res = sxe2vf_tx_feature_offload(txq, first_buf, &offload); + if (res < 0) { + LOG_ERROR_BDF("tx offload failed, tx queue->idx=%u\n", + txq->idx_in_vsi); + goto l_free; + } + res = sxe2vf_xmit_pkt(txq, first_buf, &offload); + if (res) + LOG_ERROR_BDF("tx dma mapping err, queue idx=%u\n", txq->idx_in_vsi); + + return NETDEV_TX_OK; +l_free: + dev_kfree_skb_any(skb); + if (first_buf) + first_buf->skb = NULL; + +l_end: + sxe2vf_trace(queue_xmit_drop, txq, skb); + return ret; +} + +netdev_tx_t sxe2vf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + netdev_tx_t ret = NETDEV_TX_OK; + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + struct sxe2vf_vsi *vsi = adapter->vsi_ctxt.vf_vsi; + struct sxe2vf_queue *txq; + + txq = vsi->txqs.q[skb->queue_mapping]; + LOG_DEBUG_BDF("skb:%pK netdev:%pK netdev_priv:%pK \t" + "vsi:%pK txq:%pK idx:%u.\n", + skb, netdev, adapter, vsi, txq, skb->queue_mapping); + if (!txq) { + ret = NETDEV_TX_BUSY; + goto l_end; + } + + if (skb_put_padto(skb, SXE2VF_MIN_TX_LEN)) { + ret = NETDEV_TX_OK; + goto l_end; + } + + return sxe2vf_queue_xmit(skb, txq); +l_end: + return ret; +} + +static s32 sxe2vf_tx_ring_alloc(struct sxe2vf_queue *txq, struct sxe2vf_vsi *vsi) +{ + u32 size; + struct device *dev = txq->dev; + struct sxe2vf_adapter *adapter = txq->vsi->adapter; + struct sxe2vf_desc_ring *desc_ring = &txq->desc; + + txq->tx_buf = kcalloc(txq->depth, sizeof(struct sxe2vf_tx_buf), + GFP_KERNEL | __GFP_ZERO); + if (!txq->tx_buf) { + LOG_ERROR_BDF("unable to allocate memory for tx buf ring\n"); + goto l_end; + } + + size = ALIGN(txq->depth * sizeof(union sxe2vf_tx_data_desc), PAGE_SIZE); + desc_ring->base_addr = dma_alloc_coherent(dev, size, &desc_ring->dma, + GFP_KERNEL | __GFP_ZERO); + if (!desc_ring->base_addr) { + LOG_DEV_ERR("unable to allocate memory \t" + "for the Tx descriptor ring, size=%u\n", + size); + goto l_alloc_failed; + } + + txq->netdev = vsi->netdev; + txq->next_to_use = 0; + txq->next_to_clean = 0; + + LOG_DEBUG_BDF(">>>> txq[%u]:%pK tx_buf:%pK.\n", txq->idx_in_vsi, txq, + txq->tx_buf); + return 0; + +l_alloc_failed: + kfree(txq->tx_buf); + txq->tx_buf = NULL; +l_end: + return -ENOMEM; +} + +static void sxe2vf_tx_ring_free(struct sxe2vf_queue *txq) +{ + u32 size; + + kfree(txq->tx_buf); + txq->tx_buf = NULL; + + if (txq->desc.base_addr) { + size = ALIGN(txq->depth * sizeof(union sxe2vf_tx_data_desc), + PAGE_SIZE); + dma_free_coherent(txq->dev, size, txq->desc.base_addr, + txq->desc.dma); + txq->desc.base_addr = NULL; + } +} + +static s32 sxe2vf_tx_rings_alloc(struct sxe2vf_vsi *vsi) +{ + s32 ret; + u16 i; + struct sxe2vf_adapter *adapter = vsi->adapter; + + sxe2vf_for_each_vsi_txq(vsi, i) + { + ret = sxe2vf_tx_ring_alloc(vsi->txqs.q[i], vsi); + if (ret) { + LOG_ERROR_BDF("txq[%d] ring alloc failed, ret=%d\n", i, ret); + goto l_end; + } + } + return 0; +l_end: + while (i--) + sxe2vf_tx_ring_free(vsi->txqs.q[i]); + + return ret; +} + +s32 sxe2vf_tx_hw_cfg(struct sxe2vf_vsi *vsi) +{ + return sxe2vf_txq_cfg_request(vsi->adapter); +} + +static void sxe2vf_tx_ring_clean(struct sxe2vf_queue *txq) +{ + u16 i; + u32 size; + + if (!txq->tx_buf) + return; + + for (i = 0; i < txq->depth; i++) + sxe2vf_tx_buffer_unmap(txq, &txq->tx_buf[i]); + + (void)memset(txq->tx_buf, 0, sizeof(*txq->tx_buf) * txq->depth); + + size = ALIGN(txq->depth * sizeof(union sxe2vf_tx_data_desc), PAGE_SIZE); + (void)memset(txq->desc.base_addr, 0, size); + + txq->next_to_use = 0; + txq->next_to_clean = 0; + + netdev_tx_reset_queue(sxe2vf_queue_to_netdevq(txq)); +} + +void sxe2vf_tx_rings_clean(struct sxe2vf_vsi *vsi) +{ + u16 i; + + sxe2vf_for_each_vsi_txq(vsi, i) + { + sxe2vf_tx_ring_clean(vsi->txqs.q[i]); + } +} + +static void sxe2vf_tx_ring_res_free(struct sxe2vf_queue *txq) +{ + sxe2vf_tx_ring_clean(txq); + sxe2vf_tx_ring_free(txq); +} + +void sxe2vf_tx_rings_res_free(struct sxe2vf_vsi *vsi) +{ + u32 i; + + if (!vsi) + return; + + sxe2vf_for_each_vsi_txq(vsi, i) + sxe2vf_tx_ring_res_free(vsi->txqs.q[i]); +} + +static void sxe2vf_tx_rings_free(struct sxe2vf_vsi *vsi) +{ + u32 i; + + sxe2vf_for_each_vsi_txq(vsi, i) + sxe2vf_tx_ring_free(vsi->txqs.q[i]); +} + +STATIC void sxe2vf_tx_configure_tail(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + u32 i; + struct sxe2vf_queue *txq; + + for (i = 0; i < vsi->txqs.q_cnt; i++) { + txq = vsi->txqs.q[i]; + txq->desc.tail = adapter->hw.reg_base_addr + (u32)SXE2VF_TXQ_TAIL(i); + } +} + +s32 sxe2vf_tx_cfg(struct sxe2vf_vsi *vsi) +{ + s32 ret; + struct sxe2vf_adapter *adapter = vsi->adapter; + + ret = sxe2vf_tx_rings_alloc(vsi); + if (ret) { + LOG_ERROR_BDF("tx resource alloc failed, ret=%d\n", ret); + goto l_end; + } + sxe2vf_tx_configure_tail(vsi); + + ret = sxe2vf_tx_hw_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("tx hw configure failed, ret=%d\n", ret); + sxe2vf_wait_in_resetting(adapter, false); + goto l_free; + } + + return 0; + +l_free: + sxe2vf_tx_rings_free(vsi); +l_end: + return ret; +} + +static inline void sxe2vf_tx_desc_buf_update(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf **tx_buf, + union sxe2vf_tx_data_desc **tx_desc, + u32 *ntc) +{ + (*tx_buf)++; + (*tx_desc)++; + ++(*ntc); + if (unlikely(!(*ntc))) { + *ntc -= txq->depth; + *tx_buf = txq->tx_buf; + *tx_desc = SXE2VF_TX_DESC(txq, 0); + } +} + +static inline void sxe2vf_tx_skb_unmap(struct sxe2vf_queue *txq, + struct sxe2vf_tx_buf *tx_buf) +{ + dma_unmap_single(txq->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(tx_buf, len, 0); +} + +static void sxe2vf_tx_desc_ring_unmap(struct sxe2vf_queue *txq, s32 napi_budget, + u16 *budget, + struct sxe2vf_queue_stats *queue_stats) +{ + struct sxe2vf_tx_buf *tx_buf; + u32 ntc = txq->next_to_clean; + union sxe2vf_tx_data_desc *tx_desc; + union sxe2vf_tx_data_desc *eop_desc; + struct sxe2vf_adapter *adapter = netdev_priv(txq->netdev); + + tx_buf = &txq->tx_buf[ntc]; + tx_desc = SXE2VF_TX_DESC(txq, ntc); + ntc -= txq->depth; + + do { + eop_desc = tx_buf->next_to_watch; + + if (!eop_desc) + break; + + LOG_DEBUG_BDF("tx queue clean start: queue idx=%u, \t" + "next_to_use=%d, next_to_clean=%d, budget=%d, \t" + "next_to_watch=%pK, eop_desc.wb.dd=%#08llx\n", + txq->idx_in_vsi, txq->next_to_use, txq->next_to_clean, + *budget, tx_buf->next_to_watch, + ((union sxe2vf_tx_data_desc *)tx_buf->next_to_watch) + ->wb.dd); + + smp_rmb(); + + sxe2vf_trace(txq_irq_clean, txq, tx_desc, tx_buf); + + if (!(eop_desc->wb.dd & + cpu_to_le64(SXE2VF_TX_DESC_DTYPE_DESC_DONE))) { + break; + } + + tx_buf->next_to_watch = NULL; + + queue_stats->bytes += tx_buf->bytecount; + queue_stats->packets += tx_buf->gso_segs; + + napi_consume_skb(tx_buf->skb, napi_budget); + + LOG_DEBUG_BDF("tx queue clean: budget=%d, bytes=%llu, packet=%llu\n", + *budget, queue_stats->bytes, queue_stats->packets); + + sxe2vf_tx_skb_unmap(txq, tx_buf); + tx_buf->skb = NULL; + + while (tx_desc != eop_desc) { + sxe2vf_trace(txq_irq_clean_unmap, txq, tx_desc, tx_buf); + sxe2vf_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + + if (dma_unmap_len(tx_buf, len)) + sxe2vf_tx_skb_unmap(txq, tx_buf); + } + sxe2vf_trace(txq_irq_clean_unmap_eop, txq, tx_desc, tx_buf); + sxe2vf_tx_desc_buf_update(txq, &tx_buf, &tx_desc, &ntc); + + prefetch(tx_desc); + + --*budget; + } while (likely(*budget)); + + ntc += txq->depth; + txq->next_to_clean = (u16)ntc; + + LOG_DEBUG_BDF("tx queue clean end: queue idx=%u, next_to_use=%d, \t" + "next_to_clean=%d, budget=%d\n", + txq->idx_in_vsi, txq->next_to_use, txq->next_to_clean, + *budget); +} + +static inline void sxe2vf_tx_pkt_stats_update(struct sxe2vf_queue *txq, + struct sxe2vf_queue_stats *stats) +{ + u64_stats_update_begin(&txq->syncp); + txq->stats->bytes += stats->bytes; + txq->stats->packets += stats->packets; + u64_stats_update_end(&txq->syncp); +} + +bool sxe2vf_txq_irq_clean(struct sxe2vf_queue *txq, s32 napi_budget) +{ + u16 budget = SXE2VF_DFLT_IRQ_WORK; + struct sxe2vf_queue_stats queue_stats = {}; + struct sxe2vf_adapter *adapter = txq->vsi->adapter; + + sxe2vf_tx_desc_ring_unmap(txq, napi_budget, &budget, &queue_stats); + + sxe2vf_tx_pkt_stats_update(txq, &queue_stats); + + netdev_tx_completed_queue(sxe2vf_queue_to_netdevq(txq), + (u32)queue_stats.packets, (u32)queue_stats.bytes); + + if (unlikely(queue_stats.packets && netif_carrier_ok(txq->netdev) && + (sxe2vf_tx_desc_unused_count(txq) >= + SXE2VF_TX_WAKE_THRESHOLD))) { + + smp_mb(); + + if (netif_tx_queue_stopped(sxe2vf_queue_to_netdevq(txq))) { + netif_tx_wake_queue(sxe2vf_queue_to_netdevq(txq)); + ++txq->stats->tx_stats.tx_restart; + LOG_WARN_BDF("\n\n txq idx=%u, wake_up\n\n", + txq->idx_in_vsi); + } + } + + return !!budget; +} + +void sxe2vf_tx_timeout(struct net_device *netdev, + __always_unused unsigned int txqueue) +{ + struct sxe2vf_adapter *adapter = netdev_priv(netdev); + + sxe2vf_dev_state_set(adapter, SXE2VF_DEVSTATE_VFR_REQUEST, + SXE2VF_RESET_NONE); + sxe2vf_wkq_schedule(adapter, SXE2VF_WK_MONITOR_IM, 0); + + LOG_WARN_BDF("vf tx timeout detected queue:%u cnt:%llu.\n", txqueue, + adapter->work_ctxt.tx_timeout_cnt++); +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.h new file mode 100644 index 0000000000000000000000000000000000000000..b780a67d25fa0b17accb83e9c8cf5bd0c382d54b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_tx.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_tx.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ +#ifndef __SXE2VF_TX_H__ +#define __SXE2VF_TX_H__ +#include +#include +#include +#include +#include +#include +struct sxe2vf_adapter; + +#define SXE2VF_TX_DESC(q, i) \ + (&(((union sxe2vf_tx_data_desc *)((q)->desc.base_addr))[i])) + +#define SXE2VF_TX_CTXT_DESC(q, i) \ + (&(((struct sxe2vf_tx_context_desc *)((q)->desc.base_addr))[i])) + +#define SXE2VF_TX_DATA_DESC_CMD_SHIFT 4 +#define SXE2VF_TX_DATA_DESC_OFFSET_SHIFT 16 +#define SXE2VF_TX_DATA_DESC_BUF_SZ_SHIFT 34 +#define SXE2VF_TX_DATA_DESC_L2TAG1_SHIFT 48 +#define SXE2VF_TX_DATA_DESC_CMD_MASK (0xFFFUL << SXE2VF_TX_DATA_DESC_CMD_SHIFT) +#define SXE2VF_TX_DATA_DESC_OFFSET_MASK \ + (0x3FFFFULL << SXE2VF_TX_DATA_DESC_OFFSET_SHIFT) +#define SXE2VF_TX_DATA_DESC_BUF_SZ_MASK \ + (0x3FFFULL << SXE2VF_TX_DATA_DESC_BUF_SZ_SHIFT) +#define SXE2VF_TX_DATA_DESC_L2TAG1_MASK \ + (0xFFFFULL << SXE2VF_TX_DATA_DESC_L2TAG1_SHIFT) + +#define SXE2VF_TX_CTXT_DESC_EIPLEN_SHIFT 2 +#define SXE2VF_TX_CTXT_DESC_NATLEN_SHIFT 12 +#define SXE2VF_TX_CTXT_DESC_L4T_CS_SHIFT 23 +#define SXE2VF_TX_CTXT_DESC_L4TUNT_SHIFT 9 +#define SXE2VF_TX_CTXT_DESC_UDP_TUNNE BIT_ULL(SXE2VF_TX_CTXT_DESC_L4TUNT_SHIFT) +#define SXE2VF_TX_CTXT_DESC_GRE_TUNNE \ + (0x2ULL << SXE2VF_TX_CTXT_DESC_L4TUNT_SHIFT) +#define SXE2VF_TX_CTXT_DESC_L4T_CS_MASK \ + BIT_ULL(SXE2VF_TX_CTXT_DESC_L4T_CS_SHIFT) + +#define SXE2VF_TX_CTXT_DESC_CMD_SHIFT 4 +#define SXE2VF_TX_CTXT_DESC_TSO_LEN_SHIFT 30 +#define SXE2VF_TX_CTXT_DESC_MSS_SHIFT 50 +#define SXE2VF_TX_CTXT_DESC_VSI_SHIFT 50 +#define SXE2VF_TX_CTXT_DESC_CMD_MASK (0x7FUL << SXE2VF_TX_CTXT_DESC_CMD_SHIFT) +#define SXE2VF_TX_CTXT_DESC_TSO_LEN_MASK \ + (0x3FFFFULL << SXE2VF_TX_CTXT_DESC_TSO_LEN_SHIFT) +#define SXE2VF_TX_CTXT_DESC_VSI_MASK (0x3FFULL << SXE2VF_TX_CTXT_DESC_VSI_SHIFT) +#define SXE2VF_BYTES_PER_WORD 2 +#define SXE2VF_BYTES_PER_DWORD 4 + +enum sxe2vf_txdd_offset_fields_relative_shift { + SXE2VF_TXDD_MACLEN_S = 0, + SXE2VF_TXDD_IPLEN_S = 7, + SXE2VF_TXDD_L4LEN_S = 14 +}; + +#define SXE2VF_TXDD_MACLEN_M (0x7FUL << SXE2VF_TXDD_MACLEN_S) +#define SXE2VF_TXDD_IPLEN_M (0x7FUL << SXE2VF_TXDD_IPLEN_S) +#define SXE2VF_TXDD_L4LEN_M (0xFUL << SXE2VF_TXDD_L4LEN_S) + +#define SXE2VF_TXCD_QW1_IPSEC_MODE_S 11 +#define SXE2VF_TXCD_QW1_IPSEC_MODE_M BIT_ULL(SXE2VF_TXCD_QW1_IPSEC_MODE_S) +#define SXE2VF_TXCD_QW1_IPSEC_EN_S 12 +#define SXE2VF_TXCD_QW1_IPSEC_EN_M BIT_ULL(SXE2VF_TXCD_QW1_IPSEC_EN_S) +#define SXE2VF_TXCD_QW1_IPSEC_ENGINE_MODE_S 13 +#define SXE2VF_TXCD_QW1_IPSEC_ENGINE_MODE_M \ + BIT_ULL(SXE2VF_TXCD_QW1_IPSEC_ENGINE_MODE_S) +#define SXE2VF_TXCD_QW1_IPSEC_SA_IDX_S 16 +#define SXE2VF_TXCD_QW1_IPSEC_SA_IDX_M \ + (0x1FFF << SXE2VF_TXCD_QW1_IPSEC_SA_IDX_S) + +#define SXE2VF_TXDD_MACLEN_MAX \ + ((SXE2VF_TXDD_MACLEN_M >> SXE2VF_TXDD_MACLEN_S) * SXE2VF_BYTES_PER_WORD) +#define SXE2VF_TXDD_IPLEN_MAX \ + ((SXE2VF_TXDD_IPLEN_M >> SXE2VF_TXDD_IPLEN_S) * SXE2VF_BYTES_PER_DWORD) +#define SXE2VF_TXDD_L4LEN_MAX \ + ((SXE2VF_TXDD_L4LEN_M >> SXE2VF_TXDD_L4LEN_S) * SXE2VF_BYTES_PER_DWORD) +#define SXE2VF_TXCD_QW1_MSS_MIN 88 + +enum sxe2vf_tx_features { + SXE2VF_TX_FEATURE_TSO = BIT(0), + SXE2VF_TX_FEATURE_HW_VLAN = BIT(1), + SXE2VF_TX_FEATURE_MACLEN = BIT(2), + SXE2VF_TX_FEATURE_DUMMY_PKT = BIT(3), + SXE2VF_TX_FEATURE_TSYN = BIT(4), + SXE2VF_TX_FEATURE_IPV4 = BIT(5), + SXE2VF_TX_FEATURE_IPV6 = BIT(6), + SXE2VF_TX_FEATURE_TUNNEL = BIT(7), + SXE2VF_TX_FEATURE_HW_OUTER_SINGLE_VLAN = BIT(8), +}; + +struct sxe2vf_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 ipset_offset; + __le64 qw1; +}; + +union sxe2vf_tx_data_desc { + struct { + __le64 buf_addr; + __le64 cmd_type_offset_bsz; + } read; + struct { + __le64 rsvd; + __le64 dd; + } wb; +}; + +struct sxe2vf_tx_offload_info { + struct sxe2vf_adapter *adapter; + u32 data_desc_cmd; + u32 data_desc_offset; + u32 data_desc_l2tag1; + u32 ctxt_desc_tunnel; + u64 ctxt_desc_qw1; + u16 ctxt_desc_ipsec_offset; + u16 ctxt_desc_l2tag2; +}; + +enum sxe2vf_tx_desc_type { + SXE2VF_TX_DESC_DTYPE_DATA = 0x0, + SXE2VF_TX_DESC_DTYPE_CTXT = 0x1, + SXE2VF_TX_DESC_DTYPE_FLTR_PROG = 0x8, + SXE2VF_TX_DESC_DTYPE_DESC_DONE = 0xF, +}; + +enum sxe2vf_tx_data_desc_cmd_bits { + SXE2VF_TX_DATA_DESC_CMD_EOP = 0x0001, + SXE2VF_TX_DATA_DESC_CMD_RS = 0x0002, + SXE2VF_TX_DATA_DESC_CMD_MACSEC = 0x0004, + SXE2VF_TX_DATA_DESC_CMD_IL2TAG1 = 0x0008, + SXE2VF_TX_DATA_DESC_CMD_DUMMY = 0x0010, + SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV6 = 0x0020, + SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV4 = 0x0040, + SXE2VF_TX_DATA_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, + SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_TCP = 0x0100, + SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_SCTP = 0x0200, + SXE2VF_TX_DATA_DESC_CMD_L4T_EOFT_UDP = 0x0300, + SXE2VF_TX_DATA_DESC_CMD_RE = 0x0400, +}; + +enum sxe2vf_tx_ctxt_desc_cmd_bits { + SXE2VF_TX_CTXT_DESC_CMD_TSO = 0x01, + SXE2VF_TX_CTXT_DESC_CMD_TSYN = 0x02, + SXE2VF_TX_CTXT_DESC_CMD_IL2TAG2 = 0x04, + SXE2VF_TX_CTXT_DESC_CMD_IL2TAG2_IL2H = 0x08, + SXE2VF_TX_CTXT_DESC_CMD_SWTCH_NOTAG = 0x00, + SXE2VF_TX_CTXT_DESC_CMD_SWTCH_UPLINK = 0x10, + SXE2VF_TX_CTXT_DESC_CMD_SWTCH_LOCAL = 0x20, + SXE2VF_TX_CTXT_DESC_CMD_SWTCH_VSI = 0x30, + SXE2VF_TX_CTXT_DESC_CMD_RESERVED = 0x40 +}; + +enum sxe2vf_tx_ctxt_desc_eipt_bits { + SXE2VF_TX_CTXT_DESC_EIPT_NONE = 0x0, + SXE2VF_TX_CTXT_DESC_EIPT_IPV6 = 0x1, + SXE2VF_TX_CTXT_DESC_IPV4_NO_CSUM = 0x2, + SXE2VF_TX_CTXT_DESC_IPV4 = 0x3, +}; + +enum sxe2vf_tx_data_desc_len_fields_shift { + SXE2VF_TX_DATA_DESC_MACLEN_SHIFT = 0, + SXE2VF_TX_DATA_DESC_IPLEN_SHIFT = 7, + SXE2VF_TX_DATA_DESC_L4_LEN_SHIFT = 14 +}; + +union sxe2vf_ip_hdr { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union sxe2vf_l4_hdr { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +s32 sxe2vf_tx_cfg(struct sxe2vf_vsi *vsi); +void sxe2vf_tx_rings_res_free(struct sxe2vf_vsi *vsi); + +void sxe2vf_tx_rings_clean(struct sxe2vf_vsi *vsi); + +bool sxe2vf_txq_irq_clean(struct sxe2vf_queue *txq, s32 napi_budget); +s32 sxe2vf_txqs_stop(struct sxe2vf_vsi *vsi); +netdev_tx_t sxe2vf_xmit(struct sk_buff *skb, struct net_device *netdev); +void sxe2vf_tx_timeout(struct net_device *netdev, + __always_unused unsigned int txqueue); + +s32 sxe2vf_tx_hw_cfg(struct sxe2vf_vsi *vsi); + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.c b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.c new file mode 100644 index 0000000000000000000000000000000000000000..bb78efeb0de0387987e699253876344db7cc76d0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_vsi.c + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#include +#include + +#include "sxe2_log.h" +#include "sxe2vf_netdev.h" +#include "sxe2vf.h" +#include "sxe2vf_tx.h" +#include "sxe2vf_rx.h" +#include "sxe2vf_vsi.h" +#include "sxe2vf_rxft.h" + +#ifdef SXE2VF_MAC_VLAN_CLEAR +void sxe2vf_adv_cfg_clear(struct sxe2vf_adapter *adapter) +{ + sxe2vf_l2_filter_clear(adapter); +} +#endif + +void sxe2vf_adv_cfg_restore(struct sxe2vf_adapter *adapter) +{ + sxe2vf_l2_filter_rules_restore(adapter); + + (void)sxe2vf_fnav_rebuild(adapter); + + (void)sxe2vf_rss_rebuild(adapter); +} + +static void sxe2vf_link_down(struct sxe2vf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + adapter->link_ctxt.link_up = false; + LOG_INFO_BDF("tx carrier off link down.\n"); +} + +static s32 sxe2vf_txrx_queues_disable(struct sxe2vf_adapter *adapter) +{ + return sxe2vf_txrxq_dis_request(adapter, true); +} + +STATIC s32 __sxe2vf_vsi_close(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret = 0; + + sxe2vf_link_down(adapter); + + ret = sxe2vf_txrx_queues_disable(adapter); + + sxe2vf_queue_irq_disable(adapter); + + sxe2vf_tx_rings_clean(vsi); + sxe2vf_rx_rings_clean(vsi); + + return ret; +} + +s32 __sxe2vf_vsi_open(struct sxe2vf_vsi *vsi, bool is_change, bool need_up) +{ + s32 ret; + struct sxe2vf_adapter *adapter = vsi->adapter; + + ret = sxe2vf_tx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("open: tx config err, ret=%d\n", ret); + return ret; + } + + ret = sxe2vf_rx_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("open: rx config err, ret=%d\n", ret); + (void)sxe2vf_txrxq_dis_request(adapter, false); + goto l_tx_fail; + } + + ret = sxe2vf_dev_mac_add(adapter); + if (ret) { + LOG_ERROR_BDF("vf dev mac add failed.(err:%d)\n", ret); + goto l_rx_fail; + } + + ret = sxe2vf_irq_cfg(vsi); + if (ret) + goto l_rx_fail; + + if (is_change) { + (void)netif_set_real_num_rx_queues(adapter->netdev, vsi->rxqs.q_cnt); + (void)netif_set_real_num_tx_queues(adapter->netdev, vsi->txqs.q_cnt); + } + + clear_bit(SXE2VF_VSI_CLOSE, vsi->state); + + if (need_up) + (void)sxe2vf_link_status_request(adapter); + + return 0; + +l_rx_fail: + (void)sxe2vf_txrxq_dis_request(adapter, false); + sxe2vf_rx_rings_res_free(vsi); + +l_tx_fail: + sxe2vf_tx_rings_res_free(vsi); + + return ret; +} + +s32 sxe2vf_vsi_open(struct sxe2vf_vsi *vsi) +{ + return __sxe2vf_vsi_open(vsi, true, true); +} + +s32 sxe2vf_vsi_close(struct sxe2vf_vsi *vsi) +{ + s32 ret = 0; + + LOG_INFO("vsi:%u state:0x%lx.\n", vsi->vsi_id, *vsi->state); + if (!test_and_set_bit(SXE2VF_VSI_CLOSE, vsi->state)) { + ret = __sxe2vf_vsi_close(vsi); + + sxe2vf_vsi_irqs_free(vsi); + + sxe2vf_tx_rings_res_free(vsi); + + sxe2vf_rx_rings_res_free(vsi); + } + + return ret; +} + +s32 sxe2vf_vsi_disable(struct sxe2vf_vsi *vsi) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = vsi->adapter; + + if (!test_and_set_bit(SXE2VF_VSI_DISABLE, vsi->state)) { + ret = sxe2vf_vsi_close(vsi); + if (ret) + LOG_ERROR_BDF("vsi:%d close failed.(err:%d)\n", vsi->vsi_id, + ret); + } + + return ret; +} + +void sxe2vf_queues_depth_update(struct sxe2vf_vsi *vf_vsi) +{ + u16 i; + + sxe2vf_for_each_vsi_txq(vf_vsi, i) + { + vf_vsi->txqs.q[i]->depth = vf_vsi->txqs.depth; + } + + sxe2vf_for_each_vsi_rxq(vf_vsi, i) + { + vf_vsi->rxqs.q[i]->depth = vf_vsi->rxqs.depth; + } +} + +s32 sxe2vf_vsi_reopen(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret = 0; + + if (!test_bit(SXE2VF_VSI_CLOSE, vsi->state)) { + ret = sxe2vf_vsi_close(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u down fail.(err:%d).\n", vsi->vsi_id, + ret); + goto l_out; + } + + sxe2vf_queues_depth_update(vsi); + + ret = __sxe2vf_vsi_open(vsi, false, true); + if (ret) { + LOG_ERROR_BDF("vsi:%u up fail.(err:%d).\n", vsi->vsi_id, + ret); + goto l_out; + } + + LOG_INFO_BDF("vsi:%d down-up done.\n", vsi->vsi_id); + } + +l_out: + return ret; +} + +s32 sxe2vf_vsi_reopen_locked(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_adapter *adapter = vsi->adapter; + s32 ret; + + mutex_lock(&adapter->vsi_ctxt.lock); + ret = sxe2vf_vsi_reopen(vsi); + mutex_unlock(&adapter->vsi_ctxt.lock); + return ret; +} + +void sxe2vf_vsi_qs_stats_deinit(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_vsi_qs_stats *vsi_qs_stat = &vsi->vsi_qs_stats; + + kfree(vsi_qs_stat->txqs_stats); + vsi_qs_stat->txqs_stats = NULL; + + kfree(vsi_qs_stat->rxqs_stats); + vsi_qs_stat->rxqs_stats = NULL; +} + +s32 sxe2vf_vsi_qs_stats_init(struct sxe2vf_vsi *vsi) +{ + struct sxe2vf_vsi_qs_stats *vsi_qs_stats; + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 i; + + vsi_qs_stats = &vsi->vsi_qs_stats; + + if (!vsi_qs_stats->txqs_stats) { + vsi_qs_stats->txqs_stats = kcalloc(SXE2_VF_ETH_Q_NUM, + sizeof(*vsi_qs_stats->txqs_stats), + GFP_KERNEL); + if (!vsi_qs_stats->txqs_stats) { + LOG_ERROR_BDF("alloc txqs stats failed, count: %d, size: " + "%zu.\n", + SXE2_VF_ETH_Q_NUM, + sizeof(*vsi_qs_stats->txqs_stats)); + goto err_out; + } + + for (i = 0; i < SXE2_VF_ETH_Q_NUM; i++) + u64_stats_init(&vsi_qs_stats->txqs_stats[i].syncp); + } + + if (!vsi_qs_stats->rxqs_stats) { + vsi_qs_stats->rxqs_stats = kcalloc(SXE2_VF_ETH_Q_NUM, + sizeof(*vsi_qs_stats->rxqs_stats), + GFP_KERNEL); + if (!vsi_qs_stats->rxqs_stats) { + LOG_ERROR_BDF("alloc rxqs stats failed, count: %d, size: " + "%zu.\n", + SXE2_VF_ETH_Q_NUM, + sizeof(*vsi_qs_stats->rxqs_stats)); + goto err_out; + } + + for (i = 0; i < SXE2_VF_ETH_Q_NUM; i++) + u64_stats_init(&vsi_qs_stats->rxqs_stats[i].syncp); + } + + sxe2vf_for_each_vsi_txq(vsi, i) + { + struct sxe2vf_queue *txq = vsi->txqs.q[i]; + + txq->stats = &vsi_qs_stats->txqs_stats[i]; + } + + sxe2vf_for_each_vsi_rxq(vsi, i) + { + struct sxe2vf_queue *rxq = vsi->rxqs.q[i]; + + rxq->stats = &vsi_qs_stats->rxqs_stats[i]; + } + + return 0; + +err_out: + sxe2vf_vsi_qs_stats_deinit(vsi); + return -ENOMEM; +} + +STATIC void sxe2vf_vsi_coalesce_store(struct sxe2vf_vsi *vsi, + struct sxe2vf_vsi_coalesce *coalesce) +{ + s32 idx; + struct sxe2vf_irq_data *irq_data; + + if (!vsi->irqs.irq_data) + return; + + sxe2vf_for_each_vsi_irq(vsi, idx) + { + irq_data = vsi->irqs.irq_data[idx]; + coalesce[idx].tx_itr = (u16)irq_data->tx.itr_setting; + coalesce[idx].rx_itr = (u16)irq_data->rx.itr_setting; + coalesce[idx].rate_limit = irq_data->rate_limit; + coalesce[idx].tx_itr_mode = irq_data->tx.itr_mode; + coalesce[idx].rx_itr_mode = irq_data->rx.itr_mode; + + if (SXE2VF_IRQ_HAS_TXQ(irq_data)) + coalesce[idx].tx_valid = true; + if (SXE2VF_IRQ_HAS_RXQ(irq_data)) + coalesce[idx].rx_valid = true; + } +} + +STATIC void sxe2vf_vsi_coalesce_set(struct sxe2vf_vsi *vsi, + struct sxe2vf_vsi_coalesce *coalesce, + u16 old_irq_cnt) +{ + s32 i; + u16 default_coalesce_tx = coalesce[0].tx_itr; + u16 default_coalesce_rx = coalesce[0].rx_itr; + u16 default_tx_itr_mode = coalesce[0].tx_itr_mode; + u16 default_rx_itr_mode = coalesce[0].rx_itr_mode; + struct sxe2vf_irq_data *irq_data; + struct sxe2vf_adapter *adapter = vsi->adapter; + struct sxe2vf_hw *hw = &adapter->hw; + + for (i = 0; i < old_irq_cnt && i < vsi->irqs.cnt; i++) { + irq_data = vsi->irqs.irq_data[i]; + if (SXE2VF_IRQ_HAS_TXQ(irq_data) && coalesce[i].tx_valid) { + irq_data->tx.itr_mode = coalesce[i].tx_itr_mode; + irq_data->tx.itr_setting = coalesce[i].tx_itr; + sxe2vf_hw_int_itr_set( + hw, irq_data->tx.itr_idx, irq_data->irq_idx, + (irq_data->tx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + } else if (SXE2VF_IRQ_HAS_TXQ(irq_data)) { + irq_data->tx.itr_mode = default_tx_itr_mode; + irq_data->tx.itr_setting = default_coalesce_tx; + sxe2vf_hw_int_itr_set( + hw, irq_data->tx.itr_idx, irq_data->irq_idx, + (irq_data->tx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + } + if (SXE2VF_IRQ_HAS_RXQ(irq_data) && coalesce[i].rx_valid) { + irq_data->rx.itr_mode = coalesce[i].rx_itr_mode; + irq_data->rx.itr_setting = coalesce[i].rx_itr; + sxe2vf_hw_int_itr_set( + hw, irq_data->rx.itr_idx, irq_data->irq_idx, + (irq_data->rx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + } else if (SXE2VF_IRQ_HAS_RXQ(irq_data)) { + irq_data->rx.itr_mode = default_rx_itr_mode; + irq_data->rx.itr_setting = default_coalesce_rx; + sxe2vf_hw_int_itr_set( + hw, irq_data->rx.itr_idx, irq_data->irq_idx, + (irq_data->rx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + } + } + for (; i < vsi->irqs.cnt; i++) { + irq_data = vsi->irqs.irq_data[i]; + + irq_data->tx.itr_setting = default_coalesce_tx; + sxe2vf_hw_int_itr_set( + hw, irq_data->tx.itr_idx, irq_data->irq_idx, + (irq_data->tx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + + irq_data->rx.itr_setting = default_coalesce_rx; + sxe2vf_hw_int_itr_set( + hw, irq_data->rx.itr_idx, irq_data->irq_idx, + (irq_data->rx.itr_setting / + adapter->irq_ctxt.itr_gran) & + SXE2VF_VF_INT_ITR_INTERVAL_MAX); + } +} + +s32 sxe2vf_vsi_irq_cfg_record(struct sxe2vf_vsi *vsi) +{ + s32 ret = 0; + struct sxe2vf_adapter *adapter = vsi->adapter; + + if (!vsi->irqs.coalesce) { + if (vsi->irqs.cnt == 0) { + LOG_ERROR_BDF("vsi:%d irqs cnt invalid\n", vsi->vsi_id); + ret = -EINVAL; + goto l_out; + } + + vsi->irqs.coalesce = + kcalloc(vsi->irqs.cnt, sizeof(*vsi->irqs.coalesce), + GFP_KERNEL); + if (!vsi->irqs.coalesce) { + ret = -ENOMEM; + LOG_ERROR_BDF("vsi:%d irqs coalesce alloc failed\n", + vsi->vsi_id); + goto l_out; + } + sxe2vf_vsi_coalesce_store(vsi, vsi->irqs.coalesce); + } + +l_out: + return ret; +} + +s32 sxe2vf_vsi_rebuild(struct sxe2vf_vsi *vsi) +{ + s32 ret; + struct sxe2vf_adapter *adapter = vsi->adapter; + u16 old_irq_cnt = vsi->irqs.cnt; + + ret = sxe2vf_vsi_irq_cfg_record(vsi); + if (ret) + return ret; + + (void)sxe2vf_vsi_irqs_decfg(vsi); + sxe2vf_vsi_irqs_deinit(vsi); + + sxe2vf_vsi_queues_deinit(vsi); + + ret = sxe2vf_vsi_queues_init(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u queues init failed during vsi " + "rebuild.(err:%d)\n", + vsi->vsi_id, ret); + return ret; + } + + if (sxe2vf_vsi_qs_stats_init(vsi)) { + LOG_ERROR_BDF("vsi:%u qs stats init failed.(err:%d)\n", vsi->vsi_id, + ret); + goto l_queues_deinit; + } + + ret = sxe2vf_vsi_irqs_init(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u irqs init failed during vsi " + "rebuild.(err:%d)\n", + vsi->vsi_id, ret); + goto l_queues_deinit; + } + + ret = sxe2vf_vsi_irqs_cfg(vsi); + if (ret) { + LOG_ERROR_BDF("vsi:%u irq cfg failed %d.\n", vsi->vsi_id, ret); + goto l_irq_deinit; + } + + sxe2vf_vsi_coalesce_set(vsi, vsi->irqs.coalesce, old_irq_cnt); + kfree(vsi->irqs.coalesce); + vsi->irqs.coalesce = NULL; + + return ret; + +l_irq_deinit: + sxe2vf_vsi_irqs_deinit(vsi); +l_queues_deinit: + sxe2vf_vsi_queues_deinit(vsi); + + return ret; +} + +s32 sxe2vf_dpdk_irq_cnt_get(void *pf_adapter) +{ + struct sxe2vf_adapter *adapter = pf_adapter; + + return adapter->irq_ctxt.dpdk_irq_cnt; +} + +s32 sxe2vf_dpdk_irq_vector_idx_get(void *adapter, u16 irq_idx) +{ + struct sxe2vf_adapter *vf_adapter = adapter; + u16 offset = vf_adapter->irq_ctxt.dpdk_offset + irq_idx; + + if (!vf_adapter->irq_ctxt.msix_entries) + return -EINVAL; + + return (s32)vf_adapter->irq_ctxt.msix_entries[offset].vector; +} + +s32 sxe2vf_dpdk_resource_release(void *pf_adapter, struct sxe2_obj *obj) +{ + s32 ret; + struct sxe2vf_adapter *adapter = pf_adapter; + struct sxe2vf_msg_params params = {0}; + struct sxe2_vf_user_driver_release msg = {0}; + u16 vsi_id = adapter->vsi_ctxt.vsi_ids[SXE2VF_VSI_TYPE_DPDK]; + + msg.func_id = obj->vf_id; + msg.drv_id = obj->drv_id; + + (void)sxe2vf_user_l2_feature_clean(adapter, vsi_id); + + sxe2vf_mbx_msg_dflt_params_fill(¶ms, SXE2VF_MSG_RESP_WAIT_NOTIFY, + SXE2_VF_USER_DRIVER_RELEASE, &msg, + sizeof(msg), NULL, 0); + ret = sxe2vf_mbx_msg_send(adapter, ¶ms); + if (ret) + LOG_ERROR_BDF("dpdk resource release failed.(err:%d)\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.h b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.h new file mode 100644 index 0000000000000000000000000000000000000000..ed1ca51e4923a82f87cfd9cabf883dec4565037f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe2vf/sxe2vf/sxe2vf_vsi.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * Copyright (C), 2020, Linkdata Technologies Co., Ltd. + * + * @file: sxe2vf_vsi.h + * @author: Linkdata + * @date: 2025.02.16 + * @brief: + * @note: + */ + +#ifndef __SXE2VF_VSI_H__ +#define __SXE2VF_VSI_H__ + +#ifdef SXE2_TEST +#define STATIC +#else +#define STATIC static +#endif + +#ifdef SXE2_TEST +#define SXE2VF_REALLOC(p, new_n, new_size, gfp, old_n) \ + sxe2_krealloc_array(p, new_n, new_size, gfp, old_n) +#else +#define SXE2VF_REALLOC(p, new_n, new_size, gfp) \ + krealloc_array(p, new_n, new_size, gfp) +#endif + +s32 sxe2vf_vsi_reopen_locked(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_close(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_disable(struct sxe2vf_vsi *adapter); + +s32 sxe2vf_vsi_open(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_rebuild(struct sxe2vf_vsi *vsi); + +void sxe2vf_adv_cfg_restore(struct sxe2vf_adapter *adapter); + +s32 sxe2vf_vsi_reopen(struct sxe2vf_vsi *vsi); + +s32 __sxe2vf_vsi_open(struct sxe2vf_vsi *vsi, bool is_change, bool need_up); + +void sxe2vf_queues_depth_update(struct sxe2vf_vsi *vf_vsi); + +void sxe2vf_vsi_qs_stats_deinit(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_qs_stats_init(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_vsi_irq_cfg_record(struct sxe2vf_vsi *vsi); + +s32 sxe2vf_dpdk_irq_cnt_get(void *adapter); +s32 sxe2vf_dpdk_irq_vector_idx_get(void *adapter, u16 irq_idx); + +s32 sxe2vf_dpdk_resource_release(void *adapter, struct sxe2_obj *obj); + +#endif