From 044b03dad6d51e17f0e89bb8dd23652b8898b512 Mon Sep 17 00:00:00 2001 From: Lizhi He Date: Tue, 2 Sep 2025 14:14:15 +0800 Subject: [PATCH 001/126] dma-mapping: benchmark: add support for UB devices commit 72204766a0326d01d1c357357038bd74c1e73825 openEuler The current dma_map benchmark only supports platform devices and PCI devices. This patch adds support for UB devices. Signed-off-by: Xiaofeng Liu Signed-off-by: Li Wentao Signed-off-by: Lizhi He Signed-off-by: zhaolichang <943677312@qq.com> --- kernel/dma/map_benchmark.c | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index cc19a3efea89..23aeba267cfa 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -18,6 +18,9 @@ #include #include #include +#ifdef CONFIG_UB_UBUS +#include +#endif struct map_benchmark_data { struct map_benchmark bparam; @@ -351,6 +354,19 @@ static struct pci_driver map_benchmark_pci_driver = { .probe = map_benchmark_pci_probe, }; +#ifdef CONFIG_UB_UBUS +static int map_benchmark_ub_probe(struct ub_entity *uent, + const struct ub_device_id *id) +{ + return __map_benchmark_probe(&uent->dev); +} + +static struct ub_driver map_benchmark_ub_driver = { + .name = "dma_map_benchmark", + .probe = map_benchmark_ub_probe, +}; +#endif + static int __init map_benchmark_init(void) { int ret; @@ -360,16 +376,30 @@ static int __init map_benchmark_init(void) return ret; ret = platform_driver_register(&map_benchmark_platform_driver); - if (ret) { - pci_unregister_driver(&map_benchmark_pci_driver); - return ret; - } + if (ret) + goto err_reg_platform; + +#ifdef CONFIG_UB_UBUS + ret = ub_register_driver(&map_benchmark_ub_driver); + if (ret) + goto err_reg_ub; +#endif return 0; +#ifdef CONFIG_UB_UBUS +err_reg_ub: + platform_driver_unregister(&map_benchmark_platform_driver); +#endif +err_reg_platform: + pci_unregister_driver(&map_benchmark_pci_driver); + return ret; } static void __exit map_benchmark_cleanup(void) { +#ifdef CONFIG_UB_UBUS + ub_unregister_driver(&map_benchmark_ub_driver); +#endif platform_driver_unregister(&map_benchmark_platform_driver); pci_unregister_driver(&map_benchmark_pci_driver); } -- Gitee From bc6c8ddc2ce05a8093d7de08adee76885da246cb Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Fri, 14 Nov 2025 14:18:53 +0800 Subject: [PATCH 002/126] ub:ubus: call ub_host_probe inside register_ub_manage_subsystem_ops commit 59de5029039dc2a046518ce1cdf5e3bf7bc3ac02 openEuler Call ub_host_probe inside register_ub_manage_subsystem_ops so that ub_host_probe can become static function, same to ub_host_remove. Query entity and port na first, then do the config read in ub_fm_flush_ubc_info because config read will check na. Fix some comment description issues. Signed-off-by: Yahui Liu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/pool.c | 20 ++-- drivers/ub/ubus/ubus.h | 2 - drivers/ub/ubus/ubus_driver.c | 100 +++++++++++-------- drivers/ub/ubus/ubus_driver.h | 2 - drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c | 7 -- include/ub/ubus/ubus.h | 98 +++++++++--------- 6 files changed, 121 insertions(+), 108 deletions(-) diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index 2aeb8d57ee9a..e86b19b58f63 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -456,6 +456,18 @@ int ub_fm_flush_ubc_info(struct ub_bus_controller *ubc) if (!buf) goto out; + ret = ub_query_ent_na(ubc->uent, buf); + if (ret) { + dev_err(dev, "update cluster ubc cna failed, ret=%d\n", ret); + goto free_buf; + } + + ret = ub_query_port_na(ubc->uent, buf); + if (ret) { + dev_err(dev, "update cluster ubc port cna failed, ret=%d\n", ret); + goto free_buf; + } + ret = ub_cfg_read_word(ubc->uent, UB_UPI, &upi); if (ret) { dev_err(dev, "update cluster upi failed, ret=%d\n", ret); @@ -489,14 +501,6 @@ int ub_fm_flush_ubc_info(struct ub_bus_controller *ubc) ubc->uent->fm_cna = fm_cna & UB_FM_CNA_MASK; dev_info(dev, "update cluster ubc fm cna to %#x\n", ubc->uent->fm_cna); - ret = ub_query_ent_na(ubc->uent, buf); - if (ret) { - dev_err(dev, "update cluster ubc cna failed, ret=%d\n", ret); - goto free_buf; - } - - ret = ub_query_port_na(ubc->uent, buf); - free_buf: kfree(buf); out: diff --git a/drivers/ub/ubus/ubus.h b/drivers/ub/ubus/ubus.h index a4b46402e32d..d26e0816b89b 100644 --- a/drivers/ub/ubus/ubus.h +++ b/drivers/ub/ubus/ubus.h @@ -56,8 +56,6 @@ static inline bool ub_entity_test_priv_flag(struct ub_entity *uent, int bit) return test_bit(bit, &uent->priv_flags); } -int ub_host_probe(void); -void ub_host_remove(void); struct ub_bus_controller *ub_find_bus_controller(u32 ctl_no); struct ub_manage_subsystem_ops { diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index b4c6f0d3b760..9431bbccd3b0 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -33,45 +33,12 @@ MODULE_PARM_DESC(entity_flex_en, "Entity Flexible enable: default: 0"); DECLARE_RWSEM(ub_bus_sem); +#define UBC_GUID_VENDOR_SHIFT 48 +#define UBC_GUID_VENDOR_MASK GENMASK(15, 0) + static DEFINE_MUTEX(manage_subsystem_ops_mutex); static const struct ub_manage_subsystem_ops *manage_subsystem_ops; -int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) -{ - if (!ops) - return -EINVAL; - - mutex_lock(&manage_subsystem_ops_mutex); - if (!manage_subsystem_ops) { - manage_subsystem_ops = ops; - mutex_unlock(&manage_subsystem_ops_mutex); - pr_info("ub manage subsystem ops register successfully\n"); - return 0; - } - - pr_warn("ub manage subsystem ops has been registered\n"); - mutex_unlock(&manage_subsystem_ops_mutex); - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(register_ub_manage_subsystem_ops); - -void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) -{ - if (!ops) - return; - - mutex_lock(&manage_subsystem_ops_mutex); - if (manage_subsystem_ops == ops) { - manage_subsystem_ops = NULL; - pr_info("ub manage subsystem ops unregister successfully\n"); - } else { - pr_warn("ub manage subsystem ops is not registered by this vendor\n"); - } - mutex_unlock(&manage_subsystem_ops_mutex); -} -EXPORT_SYMBOL_GPL(unregister_ub_manage_subsystem_ops); - const struct ub_manage_subsystem_ops *get_ub_manage_subsystem_ops(void) { return manage_subsystem_ops; @@ -653,7 +620,7 @@ static void ubus_driver_resource_drain(void) ub_static_cluster_instance_drain(); } -int ub_host_probe(void) +static int ub_host_probe(void) { int ret; @@ -724,9 +691,8 @@ int ub_host_probe(void) ub_bus_type_uninit(); return ret; } -EXPORT_SYMBOL_GPL(ub_host_probe); -void ub_host_remove(void) +static void ub_host_remove(void) { message_rx_uninit(); if (manage_subsystem_ops && manage_subsystem_ops->ras_handler_remove) @@ -741,7 +707,61 @@ void ub_host_remove(void) unregister_ub_cfg_ops(); ub_bus_type_uninit(); } -EXPORT_SYMBOL_GPL(ub_host_remove); + +int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) +{ + struct ub_bus_controller *ubc; + int ret; + + if (!ops) { + pr_err("ub manage subsystem ops is NULL\n"); + return -EINVAL; + } + + mutex_lock(&manage_subsystem_ops_mutex); + if (!manage_subsystem_ops) { + list_for_each_entry(ubc, &ubc_list, node) { + if (((ubc->attr.ubc_guid_high >> UBC_GUID_VENDOR_SHIFT) & + UBC_GUID_VENDOR_MASK) == ops->vendor) { + manage_subsystem_ops = ops; + ret = ub_host_probe(); + if (ret) + manage_subsystem_ops = NULL; + else + pr_info("ub manage subsystem ops register successfully\n"); + + mutex_unlock(&manage_subsystem_ops_mutex); + return ret; + } + } + pr_warn("ub manage subsystem ops is not match with any of ub controller\n"); + } else { + pr_warn("ub manage subsystem ops has been registered\n"); + } + mutex_unlock(&manage_subsystem_ops_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(register_ub_manage_subsystem_ops); + +void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) +{ + if (!ops) { + pr_err("ub manage subsystem ops is NULL\n"); + return; + } + + mutex_lock(&manage_subsystem_ops_mutex); + if (manage_subsystem_ops == ops) { + ub_host_remove(); + manage_subsystem_ops = NULL; + pr_info("ub manage subsystem ops unregister successfully\n"); + } else { + pr_warn("ub manage subsystem ops is not registered by this vendor\n"); + } + mutex_unlock(&manage_subsystem_ops_mutex); +} +EXPORT_SYMBOL_GPL(unregister_ub_manage_subsystem_ops); static int __init ubus_driver_init(void) { diff --git a/drivers/ub/ubus/ubus_driver.h b/drivers/ub/ubus/ubus_driver.h index f2bff32bbee9..b2eab906fa31 100644 --- a/drivers/ub/ubus/ubus_driver.h +++ b/drivers/ub/ubus/ubus_driver.h @@ -8,7 +8,5 @@ extern struct rw_semaphore ub_bus_sem; extern struct bus_type ub_service_bus_type; -int ub_host_probe(void); -void ub_host_remove(void); #endif /* __UBUS_DRIVER_H__ */ diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c index be86f055cb34..3627b0e8f018 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.c @@ -53,10 +53,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) if (ret) return ret; - ret = ub_host_probe(); - if (ret) - goto host_probe_fail; - ret = platform_driver_register(drv); if (ret) goto platform_driver_register_fail; @@ -64,8 +60,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) return 0; platform_driver_register_fail: - ub_host_remove(); -host_probe_fail: unregister_ub_manage_subsystem_ops(&hisi_ub_manage_subsystem_ops); return ret; } @@ -73,7 +67,6 @@ static int __init hisi_ubus_driver_register(struct platform_driver *drv) static void __exit hisi_ubus_driver_unregister(struct platform_driver *drv) { platform_driver_unregister(drv); - ub_host_remove(); unregister_ub_manage_subsystem_ops(&hisi_ub_manage_subsystem_ops); } diff --git a/include/ub/ubus/ubus.h b/include/ub/ubus/ubus.h index a81d652a18ff..ca3ba63c226a 100644 --- a/include/ub/ubus/ubus.h +++ b/include/ub/ubus/ubus.h @@ -193,7 +193,7 @@ struct ub_entity { u32 token_id; u32 token_value; - /* mue & ue info */ + /* MUE & UE info */ u8 is_mue; u16 total_ues; u16 num_ues; @@ -205,7 +205,7 @@ struct ub_entity { /* entity topology info */ struct list_head node; struct ub_bus_controller *ubc; - struct ub_entity *pue; /* ue/mue connected to their mue */ + struct ub_entity *pue; /* UE/MUE connected to their MUE */ int topo_rank; /* The levels of Breadth-First Search */ /* entity port info */ @@ -334,10 +334,10 @@ struct ub_dynids { * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). * Intended to stop any idling operations. * @virt_configure: Optional driver callback to allow configuration of - * ues. This function is called to enable or disable ues. + * UEs. This function is called to enable or disable UEs. * @virt_notify: Optional driver callback to notify the driver about - * changes in ue status. This function is called - * when the status of a ue changes. + * changes in UE status. This function is called + * when the status of a UE changes. * @activate: Activate a specific entity. This function is called to * activate an entity by its index. * @deactivate: Deactivate a specific entity. This function is called to @@ -510,14 +510,14 @@ void ub_bus_type_iommu_ops_set(const struct iommu_ops *ops); const struct iommu_ops *ub_bus_type_iommu_ops_get(void); /** - * ub_get_ent_by_eid() - Searching for UB Devices by EID. + * ub_get_ent_by_eid() - Searching for UB entity by EID. * @eid: entity EID. * * Traverse the UB bus device linked list and search for the device with * the target EID. You need to call ub_entity_put() after using it. * * Context: Any context. - * Return: The device found, or NULL if not found. + * Return: The entity found, or NULL if not found. */ struct ub_entity *ub_get_ent_by_eid(unsigned int eid); @@ -566,8 +566,7 @@ struct ub_entity *ub_get_entity(unsigned int vendor, unsigned int entity, * @uent: UB entity. * @enable: Enable or disable. * - * Enables or disables the entity access bus and the path through which - * the bus accesses the entity. + * Enable or disable the communication channel between entity and user host. * * Context: Any context. */ @@ -588,31 +587,31 @@ int ub_set_user_info(struct ub_entity *uent); * ub_unset_user_info() - Deinitialize host information for the entity. * @uent: UB entity. * - * Clearing the Host Information of a entity. + * Clearing the host information of an entity. * * Context: Any context. */ void ub_unset_user_info(struct ub_entity *uent); /** - * ub_enable_entities() - Enable ues of mue in batches. - * @pue: UB mue. + * ub_enable_entities() - Enable UEs of MUE in batches. + * @pue: UB MUE. * @nums: Number of enabled entities. * - * Create ues in batches, initialize them, and add them to the system. + * Create and initialize UEs in batches and add to the system. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or nums over - * mue's total ue nums, or %-ENOMEM if the system is out of memory, + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or nums over + * MUE's total UE nums, or %-ENOMEM if the system is out of memory, * or other failed negative values. */ int ub_enable_entities(struct ub_entity *pue, int nums); /** - * ub_disable_entities() - Disable ues of mue in batches. - * @pue: UB mue. + * ub_disable_entities() - Disable UEs of MUE in batches. + * @pue: UB MUE. * - * Remove all enabled ues under the mue from the system. + * Remove all enabled UEs under the MUE from the system. * * Context: Any context. */ @@ -620,29 +619,29 @@ void ub_disable_entities(struct ub_entity *pue); /** * ub_enable_ue() - Enable a single ue. - * @pue: UB mue. + * @pue: UB MUE. * @entity_idx: Number of the entity to be enabled. * - * Create a specified ue under mue, initialize the ue, + * Create a specified UE under MUE, initialize the ue, * and add it to the system. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or @entity_idx - * is no longer in the ue range of mue, or %-EEXIST if entity has been + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or @entity_idx + * is no longer in the UE range of MUE, or %-EEXIST if entity has been * enabled, or other failed negative values. */ int ub_enable_ue(struct ub_entity *pue, int entity_idx); /** * ub_disable_ue() - Disable a single ue. - * @pue: UB mue. + * @pue: UB MUE. * @entity_idx: Number of the entity to be disabled. * - * Remove a specified ue. + * Remove a specified UE. * * Context: Any context. - * Return: 0 if success, or %-EINVAL if @pue type is not mue or @entity_idx - * is no longer in the ue range of mue, or %-ENODEV if entity hasn't + * Return: 0 if success, or %-EINVAL if @pue type is not MUE or @entity_idx + * is no longer in the UE range of MUE, or %-ENODEV if entity hasn't * been enabled. */ int ub_disable_ue(struct ub_entity *pue, int entity_idx); @@ -662,7 +661,7 @@ bool ub_get_entity_flex_en(void); * @uent: UB entity. * * Return the EID of bus instance if the entity has already been bound, - * or controller's EID. + * otherwise return controller's EID. * * Context: Any context. * Return: positive number if success, or %-EINVAL if @dev is %NULL, @@ -743,7 +742,7 @@ void ub_unregister_share_port(struct ub_entity *uent, u16 port_id, * ub_reset_entity() - Function entity level reset. * @ent: UB entity. * - * Reset a single entity without affecting other entities, If you want to reuse + * Reset a single entity without affecting other entities. If you want to reuse * the entity after reset, you need to re-initialize it. * * Context: Any context @@ -757,7 +756,7 @@ int ub_reset_entity(struct ub_entity *ent); * ub_device_reset() - Device level reset. * @ent: UB entity. * - * Reset Device, include all entities under the device, If you want to reuse + * Reset device, including all entities under the device. If you want to reuse * the device after reset, you need to re-initialize it. * * Context: Any context @@ -771,10 +770,10 @@ int ub_device_reset(struct ub_entity *ent); * @uent: UB entity. * @vdm_pld: Vendor private message payload context. * - * Send a vendor private message to the entity. Response will put in - * vdm_pld->rsp_pld, and will fill in vdm->rsp_pld_len. + * Send a vendor private message to the entity. Response will be put in + * vdm_pld->rsp_pld, and response length is stored in vdm->rsp_pld_len. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or %-EINVAL if parameters invalid, * or %-ENOMEM if system out of memory, or other failed negative values. */ @@ -796,10 +795,10 @@ unsigned int ub_irq_calc_affinity_vectors(unsigned int minvec, void ub_disable_intr(struct ub_entity *uent); /** - * ub_intr_vec_count() - Interrupt Vectors Supported by a entity. + * ub_intr_vec_count() - Interrupt Vectors Supported by an entity. * @uent: UB entity. * - * Querying the Number of Interrupt Vectors Supported by a entity. + * Querying the Number of Interrupt Vectors Supported by an entity. * For interrupt type 2. * * Context: Any context. @@ -808,10 +807,10 @@ void ub_disable_intr(struct ub_entity *uent); u32 ub_intr_vec_count(struct ub_entity *uent); /** - * ub_int_type1_vec_count() - Interrupt Vectors Supported by a entity. + * ub_int_type1_vec_count() - Interrupt Vectors Supported by an entity. * @uent: UB entity. * - * Querying the Number of Interrupt Vectors Supported by a entity. + * Querying the Number of Interrupt Vectors Supported by an entity. * For interrupt type 1. * * Context: Any context. @@ -864,7 +863,7 @@ static inline int ub_alloc_irq_vectors(struct ub_entity *uent, int ub_irq_vector(struct ub_entity *uent, unsigned int nr); /** - * ub_irq_get_affinity() - Get a entity interrupt vector affinity + * ub_irq_get_affinity() - Get an entity interrupt vector affinity * @uent: the UB entity to operate on * @nr: entity-relative interrupt vector index (0-based); has different * meanings, depending on interrupt mode: @@ -884,7 +883,7 @@ const struct cpumask *ub_irq_get_affinity(struct ub_entity *uent, int nr); * @uent: UB entity. * @entity_idx: Number of the entity to be activated. * - * Context: Any context, It will take device_trylock()/device_unlock() + * Context: Any context, it will take device_trylock()/device_unlock() * Return: 0 if success, or %-EINVAL if the device doesn't match the driver, * or %-EBUSY if can't get device_trylock(), or other failed negative values. */ @@ -895,7 +894,7 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); * @uent: UB entity. * @entity_idx: Number of the entity to be deactivated. * - * Context: Any context, It will take device_trylock()/device_unlock() + * Context: Any context, it will take device_trylock()/device_unlock() * Return: 0 if success, or %-EINVAL if the entity doesn't match the driver, * or %-EBUSY if can't get device_trylock(), or other failed negative values. */ @@ -910,7 +909,7 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx); * Initiate configuration access to the specified address of the entity * configuration space and read 1 byte. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or negative value if failed. */ int ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val); @@ -925,7 +924,7 @@ int ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val); * Initiate configuration access to the specified address of the entity * configuration space and write 1 byte. * - * Context: Any context, It will take spin_lock_irqsave()/spin_unlock_restore() + * Context: Any context, it will take spin_lock_irqsave()/spin_unlock_restore() * Return: 0 if success, or negative value if failed. */ int ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val); @@ -937,7 +936,7 @@ int ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val); * @uent: UB entity pointer. * * Context: Any context. - * Return: uent, or NULL if @uent is NULL. + * Return: @uent itself, or NULL if @uent is NULL. */ struct ub_entity *ub_entity_get(struct ub_entity *uent); @@ -955,9 +954,9 @@ void ub_entity_put(struct ub_entity *uent); * @max_num: Buffer size. * @real_num: Real entities num. * - * All ub bus controllers in the system are returned. Increase the reference - * counting of all entities by 1. Remember to call ub_put_bus_controller() after - * using it. + * All ub bus controllers in the system are collected in @uents. Increase the + * reference counting of all entities by 1. Remember to call + * ub_put_bus_controller() after using it. * * Context: Any context. * Return: 0 if success, or %-EINVAL if input parameter is NULL, @@ -1020,8 +1019,8 @@ void ub_unregister_driver(struct ub_driver *drv); * ub_stop_ent() - Stop the entity. * @uent: UB entity. * - * Call device_release_driver(), user can't use it again, if it's a mue, - * will stop all ues under it, if it's entity0, will stop all entity under it. + * Call device_release_driver(), user can't use it again. If it's a MUE, + * will stop all UEs under it. If it's entity0, will stop all entities under it. * * Context: Any context. */ @@ -1031,8 +1030,9 @@ void ub_stop_ent(struct ub_entity *uent); * ub_stop_and_remove_ent() - Stop and remove the entity from system. * @uent: UB entity. * - * Call device_release_driver() and device_unregister(), if it's a mue, - * will remove all ues under it, if it's entity0, will remove all entity under it. + * Call device_release_driver() and device_unregister(). If it's a MUE, + * will remove all UEs under it. If it's entity0, will remove all entities + * under it. * * Context: Any context. */ -- Gitee From 9852f4d9eea03209c14a018f376e7c25eb5caad7 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 20 Nov 2025 11:25:40 +0800 Subject: [PATCH 003/126] ub:hisi-ubus: Adding compatibility Interfaces for ub memory commit 56bce93e4090d11862d320e1025418bad27d1f45 openEuler Adding southbound and northbound compatibility Interfaces for ub memory Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/memory.c | 4 +- drivers/ub/ubus/memory.h | 4 +- drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 4 + drivers/ub/ubus/vendor/hisilicon/memory.c | 90 +++++++++++++++----- 4 files changed, 79 insertions(+), 23 deletions(-) diff --git a/drivers/ub/ubus/memory.c b/drivers/ub/ubus/memory.c index e7b3144db4bd..e7fcdf76f28e 100644 --- a/drivers/ub/ubus/memory.c +++ b/drivers/ub/ubus/memory.c @@ -114,7 +114,7 @@ void ub_mem_drain_start(u32 scna) } if (mem_device->ops && mem_device->ops->mem_drain_start) - mem_device->ops->mem_drain_start(mem_device); + mem_device->ops->mem_drain_start(ubc); else dev_warn(mem_device->dev, "ub mem_device ops mem_drain_start is null.\n"); } @@ -138,7 +138,7 @@ int ub_mem_drain_state(u32 scna) } if (mem_device->ops && mem_device->ops->mem_drain_state) - return mem_device->ops->mem_drain_state(mem_device); + return mem_device->ops->mem_drain_state(ubc); dev_warn(mem_device->dev, "ub memory decoder ops mem_drain_state is null.\n"); return 0; diff --git a/drivers/ub/ubus/memory.h b/drivers/ub/ubus/memory.h index 7c841b466f3e..f96f7a290616 100644 --- a/drivers/ub/ubus/memory.h +++ b/drivers/ub/ubus/memory.h @@ -29,8 +29,8 @@ struct ub_mem_ras_ctx { }; struct ub_mem_device_ops { - void (*mem_drain_start)(struct ub_mem_device *mem_device); - int (*mem_drain_state)(struct ub_mem_device *mem_device); + void (*mem_drain_start)(struct ub_bus_controller *ubc); + int (*mem_drain_state)(struct ub_bus_controller *ubc); bool (*mem_validate_pa)(struct ub_bus_controller *ubc, u64 pa_start, u64 pa_end, bool cacheable); diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 9aa3ba5521c1..092695e9d43c 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -12,6 +12,10 @@ #define MB_SIZE_OFFSET 20 #define HI_UBC_PRIVATE_DATA_RESERVED 3 #define HI_UBC_PRIVATE_DATA_RESERVED2 111 +#define UB_MEM_VERSION_INVALID 0xffffffff +#define UB_MEM_VERSION_0 0 +#define UB_MEM_VERSION_1 1 +#define UB_MEM_VERSION_2 2 struct hi_mem_pa_info { u64 decode_addr; diff --git a/drivers/ub/ubus/vendor/hisilicon/memory.c b/drivers/ub/ubus/vendor/hisilicon/memory.c index 4d4f80f847fc..fa9747171eea 100644 --- a/drivers/ub/ubus/vendor/hisilicon/memory.c +++ b/drivers/ub/ubus/vendor/hisilicon/memory.c @@ -24,9 +24,13 @@ #define MEM_EVENT_MAX_NUM 16 #define MAR_ERR_ADDR_COUNT 10 #define MAR_ERR_ADDR_SIZE 2 +#define MEM_DECODER_NUMBER_V1 5 +#define MEM_DECODER_NUMBER_V2 2 #define hpa_gen(addr_h, addr_l) (((u64)(addr_h) << 32) | (addr_l)) +static u8 ub_mem_num; + struct ub_mem_decoder { struct device *dev; struct ub_entity *uent; @@ -58,25 +62,26 @@ struct hi_get_ubmem_event_pld { static bool hi_mem_validate_pa(struct ub_bus_controller *ubc, u64 pa_start, u64 pa_end, bool cacheable); -static void hi_mem_drain_start(struct ub_mem_device *mem_device) +static void hi_mem_drain_start(struct ub_bus_controller *ubc) { - struct ub_mem_decoder *decoder, *data = mem_device->priv_data; + struct ub_mem_decoder *decoder, *data = ubc->mem_device->priv_data; if (!data) { - dev_err(mem_device->dev, "ubc mem_decoder is null.\n"); + dev_err(&ubc->dev, "ubc mem_decoder is null.\n"); return; } - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { decoder = &data[i]; writel(0, decoder->base_reg + DRAIN_ENABLE_REG_OFFSET); writel(1, decoder->base_reg + DRAIN_ENABLE_REG_OFFSET); } } -static int hi_mem_drain_state(struct ub_mem_device *mem_device) +static int hi_mem_drain_state(struct ub_bus_controller *ubc) { - struct ub_mem_decoder *decoder, *data = mem_device->priv_data; + struct ub_mem_decoder *decoder, *data = ubc->mem_device->priv_data; + struct ub_mem_device *mem_device = ubc->mem_device; int val = 0; if (!data) { @@ -84,7 +89,7 @@ static int hi_mem_drain_state(struct ub_mem_device *mem_device) return 0; } - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { decoder = &data[i]; val = readb(decoder->base_reg + DRAIN_STATE_REG_OFFSET) & 0x1; dev_info_ratelimited(decoder->dev, "ub memory decoder[%d] drain state, val=%d\n", @@ -246,16 +251,25 @@ static irqreturn_t hi_mem_ras_irq(int irq, void *context) return IRQ_WAKE_THREAD; } -static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int mar_id) +static bool is_ub_mem_version_valid(struct ub_bus_controller *ubc) +{ + struct hi_ubc_private_data *data = ubc->data; + + if (!data || data->ub_mem_version == UB_MEM_VERSION_INVALID) + return false; + return true; +} + +static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int index) { - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; struct ub_mem_decoder *decoder, *priv_data = ubc->mem_device->priv_data; + struct hi_ubc_private_data *data = ubc->data; - decoder = &priv_data[mar_id]; + decoder = &priv_data[index]; decoder->dev = &ubc->dev; decoder->uent = ubc->uent; - decoder->base_reg = ioremap(data->mem_pa_info[mar_id].decode_addr, + decoder->base_reg = ioremap(data->mem_pa_info[index].decode_addr, SZ_64); if (!decoder->base_reg) { dev_err(decoder->dev, "ub mem decoder base reg ioremap failed.\n"); @@ -265,24 +279,47 @@ static int hi_mem_decoder_create_one(struct ub_bus_controller *ubc, int mar_id) return 0; } -static void hi_mem_decoder_remove_one(struct ub_bus_controller *ubc, int mar_id) +static void hi_mem_decoder_remove_one(struct ub_bus_controller *ubc, int index) { struct ub_mem_decoder *priv_data = ubc->mem_device->priv_data; - iounmap(priv_data[mar_id].base_reg); + iounmap(priv_data[index].base_reg); +} + +static u8 get_mem_decoder_number(struct hi_ubc_private_data *data) +{ + switch (data->ub_mem_version) { + case UB_MEM_VERSION_0: + case UB_MEM_VERSION_1: + return MEM_DECODER_NUMBER_V1; + case UB_MEM_VERSION_2: + return MEM_DECODER_NUMBER_V2; + default: + return 0; + } } int hi_mem_decoder_create(struct ub_bus_controller *ubc) { struct ub_mem_device *mem_device; + struct hi_ubc_private_data *data; void *priv_data; int ret; + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to create mem decoder\n"); + return 0; + } + + ub_mem_num = get_mem_decoder_number(data); + if (!ub_mem_num) + return -EINVAL; + mem_device = kzalloc(sizeof(*mem_device), GFP_KERNEL); if (!mem_device) return -ENOMEM; - priv_data = kcalloc(MEM_INFO_NUM, sizeof(struct ub_mem_decoder), + priv_data = kcalloc(ub_mem_num, sizeof(struct ub_mem_decoder), GFP_KERNEL); if (!priv_data) { kfree(mem_device); @@ -296,7 +333,7 @@ int hi_mem_decoder_create(struct ub_bus_controller *ubc) mem_device->priv_data = priv_data; ubc->mem_device = mem_device; - for (int i = 0; i < MEM_INFO_NUM; i++) { + for (int i = 0; i < ub_mem_num; i++) { ret = hi_mem_decoder_create_one(ubc, i); if (ret) { dev_err(&ubc->dev, "hi mem create decoder %d failed\n", i); @@ -318,7 +355,12 @@ void hi_mem_decoder_remove(struct ub_bus_controller *ubc) if (!ubc->mem_device) return; - for (int i = 0; i < MEM_INFO_NUM; i++) + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to remove mem decoder\n"); + return; + } + + for (int i = 0; i < ub_mem_num; i++) hi_mem_decoder_remove_one(ubc, i); kfree(ubc->mem_device->priv_data); @@ -333,7 +375,12 @@ void hi_register_ubmem_irq(struct ub_bus_controller *ubc) u32 usi_idx; if (!ubc->mem_device) { - pr_err("mem device is NULL!\n"); + pr_err("register ubmem irq failed, mem device is NULL!\n"); + return; + } + + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to register_ubmem_irq\n"); return; } @@ -371,6 +418,11 @@ void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc) return; } + if (!is_ub_mem_version_valid(ubc)) { + dev_info(&ubc->dev, "Don't need to unregister_ubmem_irq\n"); + return; + } + irq_num = ubc->mem_device->ubmem_irq_num; if (irq_num < 0) return; @@ -404,8 +456,8 @@ static bool hi_mem_validate_pa(struct ub_bus_controller *ubc, return false; } - data = (struct hi_ubc_private_data *)ubc->data; - for (u16 i = 0; i < MEM_INFO_NUM; i++) { + data = ubc->data; + for (u16 i = 0; i < ub_mem_num; i++) { if (ub_hpa_valid(pa_start, pa_end, data->mem_pa_info[i].cc_base_addr, data->mem_pa_info[i].cc_base_size) && -- Gitee From a4c4c88ede1c99eda4cc5d280fddd571be1f72d7 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:11:22 +0800 Subject: [PATCH 004/126] ub: udma: Support query jfs and jetty context from hw. commit ba0d9456a61f3f559ea978da1639302deb0182c7 openEuler This patch adds the ability to query jfs and jetty context from hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 281 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_dfx.h | 4 + drivers/ub/urma/hw/udma/udma_jetty.h | 143 ++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 50 +++++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 480 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index f6920f879eea..5580fc474b9d 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -13,6 +13,172 @@ bool dfx_switch; +static int to_udma_trans_mode(uint32_t type, struct udma_dev *dev, + enum ubcore_transport_mode *trans_mode) +{ + switch (type) { + case JETTY_UM: + *trans_mode = UBCORE_TP_UM; + break; + case JETTY_RC: + *trans_mode = UBCORE_TP_RC; + break; + case JETTY_RM: + *trans_mode = UBCORE_TP_RM; + break; + default: + dev_err(dev->dev, "transport mode error, type = %u.\n", type); + return -EINVAL; + } + + return 0; +} + +static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, + enum ubcore_jetty_state *jetty_state) +{ + switch (state) { + case JETTY_RESET: + *jetty_state = UBCORE_JETTY_STATE_RESET; + break; + case JETTY_READY: + *jetty_state = UBCORE_JETTY_STATE_READY; + break; + case JETTY_ERROR: + *jetty_state = UBCORE_JETTY_STATE_ERROR; + break; + case JETTY_SUSPEND: + *jetty_state = UBCORE_JETTY_STATE_SUSPENDED; + break; + default: + dev_err(dev->dev, "JFS context state error, state = %u.\n", state); + return -EINVAL; + } + + return 0; +} + +int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jfs_ctx; + uint32_t wqe_bb_depth; + int ret; + + mbox_attr.tag = jfs->jfs_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfs_ctx = (struct udma_jetty_ctx *)mailbox->buf; + + ret = to_udma_jetty_ctx_state(jfs_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jfs_ctx; + + cfg->priority = jfs_ctx->sl; + cfg->flag = jfs->jfs_cfg.flag; + cfg->max_sge = jfs->jfs_cfg.max_sge; + cfg->max_rsge = jfs->jfs_cfg.max_rsge; + cfg->err_timeout = jfs_ctx->ta_timeout; + wqe_bb_depth = 1 << jfs_ctx->sqe_bb_shift; + cfg->depth = wqe_bb_depth / udma_jfs->sq.sqe_bb_cnt; + cfg->rnr_retry = jfs_ctx->rnr_retry_num; + cfg->max_inline_data = jfs->jfs_cfg.max_inline_data; + + ret = to_udma_trans_mode(jfs_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jfs_ctx; + + if (udma_jfs->sq.buf.kva) { + cfg->jfc = jfs->jfs_cfg.jfc; + cfg->eid_index = jfs_ctx->seid_idx; + } + +err_jfs_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct ubase_mbx_attr jfr_mbox_attr = {}; + struct ubase_cmd_mailbox *jetty_mailbox; + struct ubase_cmd_mailbox *jfr_mailbox; + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jetty_ctx *jetty_ctx; + struct udma_jfr_ctx *jfr_ctx; + uint32_t wqe_bb_depth; + int ret; + + mbox_attr.tag = jetty->jetty_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + jetty_mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!jetty_mailbox) + return -ENOMEM; + + jfr_mbox_attr.tag = udma_jetty->jfr->ubcore_jfr.jfr_id.id; + jfr_mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + jfr_mailbox = udma_mailbox_query_ctx(udma_dev, &jfr_mbox_attr); + if (!jfr_mailbox) { + udma_free_cmd_mailbox(udma_dev, jetty_mailbox); + return -ENOMEM; + } + + jetty_ctx = (struct udma_jetty_ctx *)jetty_mailbox->buf; + jfr_ctx = (struct udma_jfr_ctx *)jfr_mailbox->buf; + + wqe_bb_depth = 1 << jetty_ctx->sqe_bb_shift; + cfg->id = jetty->jetty_id.id; + cfg->jfs_depth = wqe_bb_depth / udma_jetty->sq.sqe_bb_cnt; + cfg->jfr_depth = 1 << jfr_ctx->rqe_shift; + cfg->flag = jetty->jetty_cfg.flag; + cfg->max_send_sge = jetty->jetty_cfg.max_send_sge; + cfg->max_send_rsge = jetty->jetty_cfg.max_send_rsge; + cfg->max_recv_sge = jetty->jetty_cfg.max_recv_sge; + cfg->max_inline_data = jetty->jetty_cfg.max_inline_data; + cfg->priority = jetty_ctx->sl; + cfg->rnr_retry = jetty_ctx->rnr_retry_num; + cfg->err_timeout = jetty_ctx->ta_timeout; + cfg->min_rnr_timer = jetty->jetty_cfg.min_rnr_timer; + + ret = to_udma_trans_mode(jetty_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jetty_ctx; + + cfg->token_value.token = 0; + + ret = to_udma_jetty_ctx_state(jetty_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jetty_ctx; + + attr->rx_threshold = to_udma_rx_threshold(jfr_ctx->limit_wl); + + if (udma_jetty->sq.buf.kva) { + cfg->eid_index = jetty_ctx->seid_idx; + cfg->send_jfc = jetty->jetty_cfg.send_jfc; + cfg->recv_jfc = jetty->jetty_cfg.recv_jfc; + cfg->jfr = jetty->jetty_cfg.jfr; + cfg->jetty_grp = jetty->jetty_cfg.jetty_grp; + } + +err_jetty_ctx: + jfr_ctx->token_value = 0; + udma_free_cmd_mailbox(udma_dev, jfr_mailbox); + udma_free_cmd_mailbox(udma_dev, jetty_mailbox); + + return ret; +} + static int udma_query_res_list(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, struct ubcore_res_val *val, @@ -139,6 +305,119 @@ static int udma_query_res_rc(struct udma_dev *udma_dev, return 0; } +static int udma_query_res_jetty(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_val *res_jetty = (struct ubcore_res_jetty_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + enum ubcore_jetty_state jetty_state; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jettyc; + struct udma_dfx_jetty *jetty; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jetty, val, "jetty"); + + read_lock(&udma_dev->dfx_info->jetty.rwlock); + jetty = (struct udma_dfx_jetty *)xa_load(&udma_dev->dfx_info->jetty.table, key->key); + if (!jetty) { + read_unlock(&udma_dev->dfx_info->jetty.rwlock); + dev_err(udma_dev->dev, "failed to query jetty, jetty_id = %u.\n", + key->key); + return -EINVAL; + } + res_jetty->jfs_depth = jetty->jfs_depth; + read_unlock(&udma_dev->dfx_info->jetty.rwlock); + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jettyc = (struct udma_jetty_ctx *)mailbox->buf; + res_jetty->jetty_id = key->key; + + ret = to_udma_jetty_ctx_state(jettyc->state, udma_dev, &jetty_state); + if (ret) + goto err_res_jetty_ctx; + + res_jetty->state = jetty_state; + res_jetty->recv_jfc_id = jettyc->rx_jfcn; + res_jetty->send_jfc_id = jettyc->tx_jfcn; + res_jetty->priority = jettyc->sl; + res_jetty->jfr_id = jettyc->jfrn_l | + jettyc->jfrn_h << JETTY_CTX_JFRN_H_OFFSET; + jettyc->sqe_base_addr_l = 0; + jettyc->sqe_base_addr_h = 0; + jettyc->user_data_l = 0; + jettyc->user_data_h = 0; + + udma_dfx_ctx_print(udma_dev, "Jetty", key->key, sizeof(*jettyc) / sizeof(uint32_t), + (uint32_t *)jettyc); +err_res_jetty_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + +static int udma_query_res_jfs(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfs_val *res_jfs = (struct ubcore_res_jfs_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + enum ubcore_jetty_state jfs_state; + struct ubase_cmd_mailbox *mailbox; + struct udma_jetty_ctx *jfsc; + struct udma_dfx_jfs *jfs; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfs, val, "jfs"); + + read_lock(&udma_dev->dfx_info->jfs.rwlock); + jfs = (struct udma_dfx_jfs *)xa_load(&udma_dev->dfx_info->jfs.table, key->key); + if (!jfs) { + read_unlock(&udma_dev->dfx_info->jfs.rwlock); + dev_err(udma_dev->dev, "failed to query jfs, jfs_id = %u.\n", + key->key); + return -EINVAL; + } + res_jfs->depth = jfs->depth; + read_unlock(&udma_dev->dfx_info->jfs.rwlock); + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfsc = (struct udma_jetty_ctx *)mailbox->buf; + res_jfs->jfs_id = key->key; + + ret = to_udma_jetty_ctx_state(jfsc->state, udma_dev, &jfs_state); + if (ret) + goto err_res_jetty_ctx; + + res_jfs->state = jfs_state; + res_jfs->priority = jfsc->sl; + res_jfs->jfc_id = jfsc->tx_jfcn; + jfsc->sqe_base_addr_l = 0; + jfsc->sqe_base_addr_h = 0; + jfsc->user_data_l = 0; + jfsc->user_data_h = 0; + + udma_dfx_ctx_print(udma_dev, "JFS", key->key, sizeof(*jfsc) / sizeof(uint32_t), + (uint32_t *)jfsc); +err_res_jetty_ctx: + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + static int udma_query_res_seg(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { @@ -208,6 +487,8 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, + [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, + [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, [UBCORE_RES_KEY_DEV_TA] = udma_query_res_dev_ta, diff --git a/drivers/ub/urma/hw/udma/udma_dfx.h b/drivers/ub/urma/hw/udma/udma_dfx.h index 92c0db1aa744..febfde3b84ec 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.h +++ b/drivers/ub/urma/hw/udma/udma_dfx.h @@ -47,6 +47,10 @@ static inline uint32_t to_udma_rx_threshold(uint32_t limit_wl) } } +int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); +int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, struct ubcore_res_val *val); int udma_dfx_init(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 00a3c41b39b6..5ee8f8f4403b 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -6,6 +6,16 @@ #include "udma_common.h" +#define JETTY_CTX_JFRN_H_OFFSET 12 + +enum jetty_state { + JETTY_RESET, + JETTY_READY, + JETTY_ERROR, + JETTY_SUSPEND, + STATE_NUM, +}; + struct udma_jetty { struct ubcore_jetty ubcore_jetty; struct udma_jfr *jfr; @@ -17,6 +27,139 @@ struct udma_jetty { bool ue_rx_closed; }; +enum jetty_type { + JETTY_RAW_OR_NIC, + JETTY_UM, + JETTY_RC, + JETTY_RM, + JETTY_TYPE_RESERVED, +}; + +struct udma_jetty_ctx { + /* DW0 */ + uint32_t ta_timeout : 2; + uint32_t rnr_retry_num : 3; + uint32_t type : 3; + uint32_t sqe_bb_shift : 4; + uint32_t sl : 4; + uint32_t state : 3; + uint32_t jfs_mode : 1; + uint32_t sqe_token_id_l : 12; + /* DW1 */ + uint32_t sqe_token_id_h : 8; + uint32_t err_mode : 1; + uint32_t rsv : 1; + uint32_t cmp_odr : 1; + uint32_t rsv1 : 1; + uint32_t sqe_base_addr_l : 20; + /* DW2 */ + uint32_t sqe_base_addr_h; + /* DW3 */ + uint32_t rsv2; + /* DW4 */ + uint32_t tx_jfcn : 20; + uint32_t jfrn_l : 12; + /* DW5 */ + uint32_t jfrn_h : 8; + uint32_t rsv3 : 4; + uint32_t rx_jfcn : 20; + /* DW6 */ + uint32_t seid_idx : 10; + uint32_t pi_type : 1; + uint32_t rsv4 : 21; + /* DW7 */ + uint32_t user_data_l; + /* DW8 */ + uint32_t user_data_h; + /* DW9 */ + uint32_t sqe_position : 1; + uint32_t sqe_pld_position : 1; + uint32_t sqe_pld_tokenid : 20; + uint32_t rsv5 : 10; + /* DW10 */ + uint32_t tpn : 24; + uint32_t rsv6 : 8; + /* DW11 */ + uint32_t rmt_eid : 20; + uint32_t rsv7 : 12; + /* DW12 */ + uint32_t rmt_tokenid : 20; + uint32_t rsv8 : 12; + /* DW13 - DW15 */ + uint32_t rsv8_1[3]; + /* DW16 */ + uint32_t next_send_ssn : 16; + uint32_t src_order_wqe : 16; + /* DW17 */ + uint32_t src_order_ssn : 16; + uint32_t src_order_sgme_cnt : 16; + /* DW18 */ + uint32_t src_order_sgme_send_cnt : 16; + uint32_t CI : 16; + /* DW19 */ + uint32_t wqe_sgmt_send_cnt : 20; + uint32_t src_order_wqebb_num : 4; + uint32_t src_order_wqe_vld : 1; + uint32_t no_wqe_send_cnt : 4; + uint32_t so_lp_vld : 1; + uint32_t fence_lp_vld : 1; + uint32_t strong_fence_lp_vld : 1; + /* DW20 */ + uint32_t PI : 16; + uint32_t sq_db_doing : 1; + uint32_t ost_rce_credit : 15; + /* DW21 */ + uint32_t sq_db_retrying : 1; + uint32_t wmtp_rsv0 : 31; + /* DW22 */ + uint32_t wait_ack_timeout : 1; + uint32_t wait_rnr_timeout : 1; + uint32_t cqe_ie : 1; + uint32_t cqe_sz : 1; + uint32_t wml_rsv0 : 28; + /* DW23 */ + uint32_t wml_rsv1 : 32; + /* DW24 */ + uint32_t next_rcv_ssn : 16; + uint32_t next_cpl_bb_idx : 16; + /* DW25 */ + uint32_t next_cpl_sgmt_num : 20; + uint32_t we_rsv0 : 12; + /* DW26 */ + uint32_t next_cpl_bb_num : 4; + uint32_t next_cpl_cqe_en : 1; + uint32_t next_cpl_info_vld : 1; + uint32_t rpting_cqe : 1; + uint32_t not_rpt_cqe : 1; + uint32_t flush_ssn : 16; + uint32_t flush_ssn_vld : 1; + uint32_t flush_vld : 1; + uint32_t flush_cqe_done : 1; + uint32_t we_rsv1 : 5; + /* DW27 */ + uint32_t rcved_cont_ssn_num : 20; + uint32_t we_rsv2 : 12; + /* DW28 */ + uint32_t sq_timer; + /* DW29 */ + uint32_t rnr_cnt : 3; + uint32_t abt_ssn : 16; + uint32_t abt_ssn_vld : 1; + uint32_t taack_timeout_flag : 1; + uint32_t we_rsv3 : 9; + uint32_t err_type_l : 2; + /* DW30 */ + uint32_t err_type_h : 7; + uint32_t sq_flush_ssn : 16; + uint32_t we_rsv4 : 9; + /* DW31 */ + uint32_t avail_sgmt_ost : 10; + uint32_t read_op_cnt : 10; + uint32_t we_rsv5 : 12; + /* DW32 - DW63 */ + uint32_t taack_nack_bm[32]; +}; + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index cb1ecbaf3572..858e36d3a27a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -30,6 +30,56 @@ struct udma_jfr { struct completion ae_comp; }; +struct udma_jfr_ctx { + /* DW0 */ + uint32_t state : 2; + uint32_t limit_wl : 2; + uint32_t rqe_size_shift : 3; + uint32_t token_en : 1; + uint32_t rqe_shift : 4; + uint32_t rnr_timer : 5; + uint32_t record_db_en : 1; + uint32_t rqe_token_id_l : 14; + /* DW1 */ + uint32_t rqe_token_id_h : 6; + uint32_t type : 3; + uint32_t rsv : 3; + uint32_t rqe_base_addr_l : 20; + /* DW2 */ + uint32_t rqe_base_addr_h; + /* DW3 */ + uint32_t rqe_position : 1; + uint32_t pld_position : 1; + uint32_t pld_token_id : 20; + uint32_t rsv1 : 10; + /* DW4 */ + uint32_t token_value; + /* DW5 */ + uint32_t user_data_l; + /* DW6 */ + uint32_t user_data_h; + /* DW7 */ + uint32_t pi : 16; + uint32_t ci : 16; + /* DW8 */ + uint32_t idx_que_addr_l; + /* DW9 */ + uint32_t idx_que_addr_h : 20; + uint32_t jfcn_l : 12; + /* DW10 */ + uint32_t jfcn_h : 8; + uint32_t record_db_addr_l : 24; + /* DW11 */ + uint32_t record_db_addr_m; + /* DW12 */ + uint32_t record_db_addr_h : 2; + uint32_t cqeie : 1; + uint32_t cqesz : 1; + uint32_t rsv2 : 28; + /* padding */ + uint32_t reserved[3]; +}; + static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *jfr) { return container_of(jfr, struct udma_jfr, ubcore_jfr); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5bf8631260a5..ce6b9db0ea06 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -173,6 +173,8 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .query_jfs = udma_query_jfs, + .query_jetty = udma_query_jetty, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From b4ac9cc201fcc60c3c8e52d85b01fa078f68e697 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:13:06 +0800 Subject: [PATCH 005/126] ub: udma: Support query jfr context from hw. commit b63f4fc5c01bd57625360a6580b1cfdcc0cf833c openEuler This patch adds the ability to query jfr context from hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 130 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_dfx.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 135 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index 5580fc474b9d..96e6185c437f 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -34,6 +34,27 @@ static int to_udma_trans_mode(uint32_t type, struct udma_dev *dev, return 0; } +static int to_udma_jfr_ctx_state(uint32_t state, struct udma_dev *dev, + enum ubcore_jfr_state *jfr_state) +{ + switch (state) { + case JETTY_RESET: + *jfr_state = UBCORE_JFR_STATE_RESET; + break; + case JETTY_READY: + *jfr_state = UBCORE_JFR_STATE_READY; + break; + case JETTY_ERROR: + *jfr_state = UBCORE_JFR_STATE_ERROR; + break; + default: + dev_err(dev->dev, "JFR context state error, state = %u.\n", state); + return -EINVAL; + } + + return 0; +} + static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, enum ubcore_jetty_state *jetty_state) { @@ -58,6 +79,54 @@ static int to_udma_jetty_ctx_state(uint32_t state, struct udma_dev *dev, return 0; } +int udma_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfr_ctx *jfr_ctx; + int ret; + + mbox_attr.tag = jfr->jfr_id.id; + mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfr_ctx = (struct udma_jfr_ctx *)mailbox->buf; + + attr->rx_threshold = to_udma_rx_threshold(jfr_ctx->limit_wl); + + ret = to_udma_jfr_ctx_state(jfr_ctx->state, udma_dev, &attr->state); + if (ret) + goto err_jfr_ctx; + + cfg->id = jfr->jfr_id.id; + cfg->flag = jfr->jfr_cfg.flag; + cfg->max_sge = 1 << jfr_ctx->rqe_size_shift; + cfg->depth = 1 << jfr_ctx->rqe_shift; + cfg->token_value.token = 0; + cfg->flag.bs.token_policy = UBCORE_TOKEN_NONE; + cfg->min_rnr_timer = jfr_ctx->rnr_timer; + + ret = to_udma_trans_mode(jfr_ctx->type, udma_dev, &cfg->trans_mode); + if (ret) + goto err_jfr_ctx; + + if (udma_jfr->rq.buf.kva) { + cfg->eid_index = jfr->jfr_cfg.eid_index; + cfg->jfc = jfr->jfr_cfg.jfc; + } + +err_jfr_ctx: + jfr_ctx->token_value = 0; + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_attr *attr) { @@ -418,6 +487,66 @@ static int udma_query_res_jfs(struct udma_dev *udma_dev, return ret; } +static int udma_query_res_jfr(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfr_val *res_jfr = (struct ubcore_res_jfr_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + enum ubcore_jfr_state jfr_state; + struct udma_jfr_ctx *jfrc; + uint32_t *jfr_id; + int ret; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfr, val, "jfr"); + + jfr_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jfr.table, key->key); + if (!jfr_id) { + dev_err(udma_dev->dev, "failed to query jfr, jfr_id = %u.\n", + key->key); + return -EINVAL; + } + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFR_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfrc = (struct udma_jfr_ctx *)mailbox->buf; + res_jfr->jfr_id = key->key; + + ret = to_udma_jfr_ctx_state(jfrc->state, udma_dev, &jfr_state); + if (ret) + goto err_res_jfr_ctx; + + res_jfr->state = jfr_state; + res_jfr->depth = 1 << jfrc->rqe_shift; + res_jfr->jfc_id = jfrc->jfcn_l | + jfrc->jfcn_h << JFR_JFCN_H_OFFSET; + jfrc->rqe_base_addr_l = 0; + jfrc->rqe_base_addr_h = 0; + jfrc->token_en = 0; + jfrc->token_value = 0; + jfrc->user_data_l = 0; + jfrc->user_data_h = 0; + jfrc->idx_que_addr_l = 0; + jfrc->idx_que_addr_h = 0; + jfrc->record_db_addr_l = 0; + jfrc->record_db_addr_m = 0; + jfrc->record_db_addr_h = 0; + + udma_dfx_ctx_print(udma_dev, "JFR", key->key, sizeof(*jfrc) / sizeof(uint32_t), + (uint32_t *)jfrc); +err_res_jfr_ctx: + jfrc->token_value = 0; + udma_free_cmd_mailbox(udma_dev, mailbox); + + return ret; +} + static int udma_query_res_seg(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) { @@ -488,6 +617,7 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, + [UBCORE_RES_KEY_JFR] = udma_query_res_jfr, [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, diff --git a/drivers/ub/urma/hw/udma/udma_dfx.h b/drivers/ub/urma/hw/udma/udma_dfx.h index febfde3b84ec..dcdf23646c1c 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.h +++ b/drivers/ub/urma/hw/udma/udma_dfx.h @@ -47,6 +47,8 @@ static inline uint32_t to_udma_rx_threshold(uint32_t limit_wl) } } +int udma_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); int udma_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, struct ubcore_jfs_attr *attr); int udma_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 858e36d3a27a..bffb68b3cdbd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -8,6 +8,8 @@ #include "udma_ctx.h" #include "udma_common.h" +#define JFR_JFCN_H_OFFSET 12U + struct udma_jfr_idx_que { struct udma_buf buf; struct udma_table jfr_idx_table; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index ce6b9db0ea06..dcf0ae79d583 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -174,6 +174,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .query_jfs = udma_query_jfs, + .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From d66418d853e2ac53263dca31bab25c52a1eff5d3 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:15:06 +0800 Subject: [PATCH 006/126] ub: udma: Support query table item from hw. commit a49e6f591920eda270ee67aa7c7a313131903714 openEuler This patch adds the ability to query table item from hardware, Such as jfc context and jetty group context. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dfx.c | 111 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 7 ++ drivers/ub/urma/hw/udma/udma_jfc.h | 71 +++++++++++++++++ 3 files changed, 189 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dfx.c b/drivers/ub/urma/hw/udma/udma_dfx.c index 96e6185c437f..7d186d85531d 100644 --- a/drivers/ub/urma/hw/udma/udma_dfx.c +++ b/drivers/ub/urma/hw/udma/udma_dfx.c @@ -432,6 +432,110 @@ static int udma_query_res_jetty(struct udma_dev *udma_dev, return ret; } +static int udma_query_res_jetty_grp(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jetty_group_val *res_jetty_grp = + (struct ubcore_res_jetty_group_val *)val->addr; + struct udma_jetty_grp_ctx *jetty_grpc; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + uint32_t *jetty_grp_id; + int i; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jetty_grp, + val, "jetty_grp"); + + jetty_grp_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jetty_grp.table, key->key); + if (!jetty_grp_id) { + dev_err(udma_dev->dev, "failed to query jetty grp, jetty_grp_id = %u.\n", + key->key); + return -EINVAL; + } + + res_jetty_grp->jetty_cnt = 0; + res_jetty_grp->jetty_list = vmalloc(sizeof(*res_jetty_grp->jetty_list) * + NUM_JETTY_PER_GROUP); + if (!res_jetty_grp->jetty_list) + return -ENOMEM; + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JETTY_GROUP_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) { + vfree(res_jetty_grp->jetty_list); + res_jetty_grp->jetty_list = NULL; + return -ENOMEM; + } + + jetty_grpc = (struct udma_jetty_grp_ctx *)mailbox->buf; + for (i = 0; i < NUM_JETTY_PER_GROUP; ++i) { + if (jetty_grpc->valid & BIT(i)) { + res_jetty_grp->jetty_list[res_jetty_grp->jetty_cnt] = + jetty_grpc->start_jetty_id + i; + ++res_jetty_grp->jetty_cnt; + } + } + + if (res_jetty_grp->jetty_cnt == 0) { + vfree(res_jetty_grp->jetty_list); + res_jetty_grp->jetty_list = NULL; + } + + udma_dfx_ctx_print(udma_dev, "Jetty_grp", key->key, sizeof(*jetty_grpc) / sizeof(uint32_t), + (uint32_t *)jetty_grpc); + udma_free_cmd_mailbox(udma_dev, mailbox); + + return 0; +} + +static int udma_query_res_jfc(struct udma_dev *udma_dev, + struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + struct ubcore_res_jfc_val *res_jfc = (struct ubcore_res_jfc_val *)val->addr; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *jfcc; + uint32_t *jfc_id; + + if (key->key_cnt == 0) + return udma_query_res_list(udma_dev, &udma_dev->dfx_info->jfc, val, "jfc"); + + jfc_id = (uint32_t *)xa_load(&udma_dev->dfx_info->jfc.table, key->key); + if (!jfc_id) { + dev_err(udma_dev->dev, "failed to query jfc, jfc_id = %u.\n", + key->key); + return -EINVAL; + } + + mbox_attr.tag = key->key; + mbox_attr.op = UDMA_CMD_QUERY_JFC_CONTEXT; + mailbox = udma_mailbox_query_ctx(udma_dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfcc = (struct udma_jfc_ctx *)mailbox->buf; + res_jfc->jfc_id = key->key; + res_jfc->state = jfcc->state; + res_jfc->depth = 1 << (jfcc->shift + UDMA_JFC_DEPTH_SHIFT_BASE); + jfcc->cqe_va_l = 0; + jfcc->cqe_va_h = 0; + jfcc->token_en = 0; + jfcc->cqe_token_value = 0; + jfcc->record_db_addr_l = 0; + jfcc->record_db_addr_h = 0; + jfcc->remote_token_value = 0; + + udma_dfx_ctx_print(udma_dev, "JFC", key->key, sizeof(*jfcc) / sizeof(uint32_t), + (uint32_t *)jfcc); + udma_free_cmd_mailbox(udma_dev, mailbox); + + return 0; +} + static int udma_query_res_jfs(struct udma_dev *udma_dev, struct ubcore_res_key *key, struct ubcore_res_val *val) @@ -616,12 +720,19 @@ typedef int (*udma_query_res_handler)(struct udma_dev *udma_dev, static udma_query_res_handler g_udma_query_res_handlers[] = { [0] = NULL, + [UBCORE_RES_KEY_VTP] = NULL, + [UBCORE_RES_KEY_TP] = NULL, + [UBCORE_RES_KEY_TPG] = NULL, + [UBCORE_RES_KEY_UTP] = NULL, [UBCORE_RES_KEY_JFS] = udma_query_res_jfs, [UBCORE_RES_KEY_JFR] = udma_query_res_jfr, [UBCORE_RES_KEY_JETTY] = udma_query_res_jetty, + [UBCORE_RES_KEY_JETTY_GROUP] = udma_query_res_jetty_grp, + [UBCORE_RES_KEY_JFC] = udma_query_res_jfc, [UBCORE_RES_KEY_RC] = udma_query_res_rc, [UBCORE_RES_KEY_SEG] = udma_query_res_seg, [UBCORE_RES_KEY_DEV_TA] = udma_query_res_dev_ta, + [UBCORE_RES_KEY_DEV_TP] = NULL, }; int udma_query_res(struct ubcore_device *dev, struct ubcore_res_key *key, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5ee8f8f4403b..e1c578783a48 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -160,6 +160,13 @@ struct udma_jetty_ctx { uint32_t taack_nack_bm[32]; }; +struct udma_jetty_grp_ctx { + uint32_t start_jetty_id : 16; + uint32_t rsv : 11; + uint32_t jetty_number : 5; + uint32_t valid; +}; + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index e225efdece4c..8cb7271739d7 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -7,6 +7,8 @@ #include "udma_dev.h" #include "udma_ctx.h" +#define UDMA_JFC_DEPTH_SHIFT_BASE 6 + struct udma_jfc { struct ubcore_jfc base; uint32_t jfcn; @@ -27,6 +29,75 @@ struct udma_jfc { uint32_t cq_shift; }; +struct udma_jfc_ctx { + /* DW0 */ + uint32_t state : 2; + uint32_t arm_st : 2; + uint32_t shift : 4; + uint32_t cqe_size : 1; + uint32_t record_db_en : 1; + uint32_t jfc_type : 1; + uint32_t inline_en : 1; + uint32_t cqe_va_l : 20; + /* DW1 */ + uint32_t cqe_va_h; + /* DW2 */ + uint32_t cqe_token_id : 20; + uint32_t cq_cnt_mode : 1; + uint32_t rsv0 : 3; + uint32_t ceqn : 8; + /* DW3 */ + uint32_t cqe_token_value : 24; + uint32_t rsv1 : 8; + /* DW4 */ + uint32_t pi : 22; + uint32_t cqe_coalesce_cnt : 10; + /* DW5 */ + uint32_t ci : 22; + uint32_t cqe_coalesce_period : 3; + uint32_t rsv2 : 7; + /* DW6 */ + uint32_t record_db_addr_l; + /* DW7 */ + uint32_t record_db_addr_h : 26; + uint32_t rsv3 : 6; + /* DW8 */ + uint32_t push_usi_en : 1; + uint32_t push_cqe_en : 1; + uint32_t token_en : 1; + uint32_t rsv4 : 9; + uint32_t tpn : 20; + /* DW9 ~ DW12 */ + uint32_t rmt_eid[4]; + /* DW13 */ + uint32_t seid_idx : 10; + uint32_t rmt_token_id : 20; + uint32_t rsv5 : 2; + /* DW14 */ + uint32_t remote_token_value; + /* DW15 */ + uint32_t int_vector : 16; + uint32_t stars_en : 1; + uint32_t rsv6 : 15; + /* DW16 */ + uint32_t poll : 1; + uint32_t cqe_report_timer : 24; + uint32_t se : 1; + uint32_t arm_sn : 2; + uint32_t rsv7 : 4; + /* DW17 */ + uint32_t se_cqe_idx : 24; + uint32_t rsv8 : 8; + /* DW18 */ + uint32_t wr_cqe_idx : 22; + uint32_t rsv9 : 10; + /* DW19 */ + uint32_t cqe_cnt : 24; + uint32_t rsv10 : 8; + /* DW20 ~ DW31 */ + uint32_t rsv11[12]; +}; + static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) { return container_of(jfc, struct udma_jfc, base); -- Gitee From b5c5db8cdf1eb218f5c6d8a40d215013ffea4d9c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:16:51 +0800 Subject: [PATCH 007/126] ub: udma: Support create jfs. commit 30b22bb34e1059055f57a01f01520afe1b104545 openEuler This patch adds the ability to create jfs, During the creation process, driver will create jfs context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_common.h | 16 ++ drivers/ub/urma/hw/udma/udma_dev.h | 4 + drivers/ub/urma/hw/udma/udma_jetty.c | 263 +++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 40 +++ drivers/ub/urma/hw/udma/udma_jfs.c | 357 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 26 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 8 files changed, 709 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_jetty.c create mode 100644 drivers/ub/urma/hw/udma/udma_jfs.c diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index a087da421b2e..7b1c82ab51dd 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -3,6 +3,6 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o \ - udma_dfx.o + udma_dfx.o udma_jfs.o udma_jetty.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index aba7b4afddb3..b1b129ee4449 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -6,6 +6,7 @@ #include #include +#include "udma_ctx.h" #include "udma_dev.h" struct udma_jetty_grp { @@ -81,6 +82,21 @@ void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_ void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); + +static inline void udma_alloc_kernel_db(struct udma_dev *dev, + struct udma_jetty_queue *queue) +{ + queue->dwqe_addr = dev->k_db_base + JETTY_DSQE_OFFSET + + UDMA_HW_PAGE_SIZE * queue->id; + queue->db_addr = queue->dwqe_addr + UDMA_DOORBELL_OFFSET; +} + +static inline uint8_t to_ta_timeout(uint32_t err_timeout) +{ +#define TA_TIMEOUT_DIVISOR 8 + return err_timeout / TA_TIMEOUT_DIVISOR; +} + static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) { return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 67e8847d66a6..469e55e93d7a 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -17,8 +17,12 @@ extern bool dump_aux_info; #define UBCORE_MAX_DEV_NAME 64 +#define WQE_BB_SIZE_SHIFT 6 + #define MAX_JETTY_IN_JETTY_GRP 32 +#define UDMA_USER_DATA_H_OFFSET 32U + #define MAX_WQEBB_IN_SQE 4 #define JETTY_DSQE_OFFSET 0x1000 diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c new file mode 100644 index 000000000000..a92fbc7d5d11 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include "udma_dev.h" +#include +#include "udma_cmd.h" +#include "udma_jfr.h" +#include "udma_jfs.h" +#include "udma_jfc.h" +#include "udma_jetty.h" + +bool well_known_jetty_pgsz_check = true; + +static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) +{ + struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; + int id; + + id = ida_alloc_range(&ida_table->ida, cfg_id, cfg_id, GFP_KERNEL); + if (id < 0) { + dev_err(udma_dev->dev, "user specify id %u has been used, ret = %d.\n", cfg_id, id); + return id; + } + + return 0; +} + +static int udma_user_specify_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) +{ + if (cfg_id < udma_dev->caps.jetty.start_idx) + return udma_specify_rsvd_jetty_id(udma_dev, cfg_id); + + return udma_specify_adv_id(udma_dev, &udma_dev->jetty_table.bitmap_table, + cfg_id); +} + +int udma_alloc_jetty_id(struct udma_dev *udma_dev, uint32_t *idx, + struct udma_res *jetty_res) +{ + struct udma_group_bitmap *bitmap = &udma_dev->jetty_table.bitmap_table; + struct ida *ida = &udma_dev->rsvd_jetty_ida_table.ida; + uint32_t min = jetty_res->start_idx; + uint32_t next = jetty_res->next_idx; + uint32_t max; + int ret; + + if (jetty_res->max_cnt == 0) { + dev_err(udma_dev->dev, "ida alloc failed max_cnt is 0.\n"); + return -EINVAL; + } + + max = jetty_res->start_idx + jetty_res->max_cnt - 1; + + if (jetty_res != &udma_dev->caps.jetty) { + ret = ida_alloc_range(ida, next, max, GFP_KERNEL); + if (ret < 0) { + ret = ida_alloc_range(ida, min, max, GFP_KERNEL); + if (ret < 0) { + dev_err(udma_dev->dev, + "ida alloc failed %d.\n", ret); + return ret; + } + } + + *idx = (uint32_t)ret; + } else { + ret = udma_adv_id_alloc(udma_dev, bitmap, idx, false, next); + if (ret) { + ret = udma_adv_id_alloc(udma_dev, bitmap, idx, false, min); + if (ret) { + dev_err(udma_dev->dev, + "bitmap alloc failed %d.\n", ret); + return ret; + } + } + } + + jetty_res->next_idx = (*idx + 1) > max ? min : (*idx + 1); + + return 0; +} + +static int udma_alloc_normal_jetty_id(struct udma_dev *udma_dev, uint32_t *idx) +{ + int ret; + + ret = udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.jetty); + if (ret == 0) + return 0; + + ret = udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.user_ctrl_normal_jetty); + if (ret == 0) + return 0; + + return udma_alloc_jetty_id(udma_dev, idx, &udma_dev->caps.public_jetty); +} + +#define CFGID_CHECK(a, b) ((a) >= (b).start_idx && (a) < (b).start_idx + (b).max_cnt) + +static int udma_verify_jetty_type_dwqe(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.stars_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, cache lock st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.stars_jetty.start_idx, + udma_dev->caps.stars_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_ccu(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.ccu_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, ccu st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.ccu_jetty.start_idx, + udma_dev->caps.ccu_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_normal(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!CFGID_CHECK(cfg_id, udma_dev->caps.user_ctrl_normal_jetty)) { + dev_err(udma_dev->dev, + "user id %u error, user ctrl normal st idx %u cnt %u.\n", + cfg_id, + udma_dev->caps.user_ctrl_normal_jetty.start_idx, + udma_dev->caps.user_ctrl_normal_jetty.max_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type_urma_normal(struct udma_dev *udma_dev, + uint32_t cfg_id) +{ + if (!(CFGID_CHECK(cfg_id, udma_dev->caps.public_jetty) || + CFGID_CHECK(cfg_id, udma_dev->caps.hdc_jetty) || + CFGID_CHECK(cfg_id, udma_dev->caps.jetty))) { + dev_err(udma_dev->dev, + "user id %u error, ccu st idx %u cnt %u, stars st idx %u, normal st idx %u cnt %u.\n", + cfg_id, udma_dev->caps.ccu_jetty.start_idx, + udma_dev->caps.ccu_jetty.max_cnt, + udma_dev->caps.stars_jetty.start_idx, + udma_dev->caps.jetty.start_idx, + udma_dev->caps.jetty.max_cnt); + return -EINVAL; + } + + if (well_known_jetty_pgsz_check && PAGE_SIZE != UDMA_HW_PAGE_SIZE) { + dev_err(udma_dev->dev, "Does not support specifying Jetty ID on non-4KB page systems.\n"); + return -EINVAL; + } + + return 0; +} + +static int udma_verify_jetty_type(struct udma_dev *udma_dev, + enum udma_jetty_type jetty_type, uint32_t cfg_id) +{ + int (*udma_cfg_id_check[UDMA_JETTY_TYPE_MAX])(struct udma_dev *udma_dev, + uint32_t cfg_id) = { + udma_verify_jetty_type_dwqe, + udma_verify_jetty_type_ccu, + udma_verify_jetty_type_normal, + udma_verify_jetty_type_urma_normal + }; + + if (jetty_type < UDMA_JETTY_TYPE_MAX) { + if (!cfg_id) + return 0; + + return udma_cfg_id_check[jetty_type](udma_dev, cfg_id); + } + + dev_err(udma_dev->dev, "invalid jetty type 0x%x.\n", jetty_type); + return -EINVAL; +} + +static int udma_alloc_jetty_id_own(struct udma_dev *udma_dev, uint32_t *id, + enum udma_jetty_type jetty_type) +{ + int ret; + + switch (jetty_type) { + case UDMA_CACHE_LOCK_DWQE_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, + &udma_dev->caps.stars_jetty); + break; + case UDMA_NORMAL_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, + &udma_dev->caps.user_ctrl_normal_jetty); + break; + case UDMA_CCU_JETTY_TYPE: + ret = udma_alloc_jetty_id(udma_dev, id, &udma_dev->caps.ccu_jetty); + break; + default: + ret = udma_alloc_normal_jetty_id(udma_dev, id); + break; + } + + if (ret) + dev_err(udma_dev->dev, + "udma alloc jetty id own failed, type = %d, ret = %d.\n", + jetty_type, ret); + + return ret; +} + +int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp) +{ + int ret; + + if (udma_verify_jetty_type(udma_dev, sq->jetty_type, cfg_id)) + return -EINVAL; + + if (cfg_id > 0 && !jetty_grp) { + ret = udma_user_specify_jetty_id(udma_dev, cfg_id); + if (ret) + return ret; + + sq->id = cfg_id; + } else { + ret = udma_alloc_jetty_id_own(udma_dev, &sq->id, sq->jetty_type); + } + + return ret; +} + +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) +{ +#define UDMA_TA_TIMEOUT_MAX_INDEX 3 + uint32_t time[] = { + UDMA_TA_TIMEOUT_128MS, + UDMA_TA_TIMEOUT_1000MS, + UDMA_TA_TIMEOUT_8000MS, + UDMA_TA_TIMEOUT_64000MS, + }; + uint8_t index; + + index = to_ta_timeout(err_timeout); + if (index > UDMA_TA_TIMEOUT_MAX_INDEX) + index = UDMA_TA_TIMEOUT_MAX_INDEX; + + sq->ta_timeout = time[index]; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index e1c578783a48..fb70231278b7 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -6,7 +6,24 @@ #include "udma_common.h" +#define SQE_TOKEN_ID_L_MASK GENMASK(11, 0) +#define SQE_TOKEN_ID_H_OFFSET 12U +#define SQE_TOKEN_ID_H_MASK GENMASK(7, 0) +#define SQE_VA_L_OFFSET 12U +#define SQE_VA_L_VALID_BIT GENMASK(19, 0) +#define SQE_VA_H_OFFSET 32U +#define SQE_VA_H_VALID_BIT GENMASK(31, 0) #define JETTY_CTX_JFRN_H_OFFSET 12 +#define AVAIL_SGMT_OST_INIT 512 + +#define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) + +#define UDMA_TA_TIMEOUT_128MS 128 +#define UDMA_TA_TIMEOUT_1000MS 1000 +#define UDMA_TA_TIMEOUT_8000MS 8000 +#define UDMA_TA_TIMEOUT_64000MS 64000 + +#define UDMA_MAX_PRIORITY 16 enum jetty_state { JETTY_RESET, @@ -27,6 +44,11 @@ struct udma_jetty { bool ue_rx_closed; }; +enum jfsc_mode { + JFS, + JETTY, +}; + enum jetty_type { JETTY_RAW_OR_NIC, JETTY_UM, @@ -167,6 +189,20 @@ struct udma_jetty_grp_ctx { uint32_t valid; }; +static inline uint32_t to_udma_type(uint32_t trans_mode) +{ + switch (trans_mode) { + case UBCORE_TP_RM: + return JETTY_RM; + case UBCORE_TP_RC: + return JETTY_RC; + case UBCORE_TP_UM: + return JETTY_UM; + default: + return JETTY_TYPE_RESERVED; + } +} + static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *jetty) { return container_of(jetty, struct udma_jetty, ubcore_jetty); @@ -182,4 +218,8 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu return container_of(queue, struct udma_jetty, sq); } +int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); + #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c new file mode 100644 index 000000000000..002636a03c21 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt +#define pr_fmt(fmt) "UDMA: " fmt + +#include +#include +#include +#include +#include +#include +#include "udma_common.h" +#include "udma_dev.h" +#include +#include "udma_cmd.h" +#include "udma_jetty.h" +#include "udma_segment.h" +#include "udma_jfs.h" + +int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_create_jetty_ucmd *ucmd) +{ + int ret; + + if (ucmd->sqe_bb_cnt == 0 || ucmd->buf_len == 0) { + dev_err(dev->dev, "invalid param, sqe_bb_cnt=%u, buf_len=%u.\n", + ucmd->sqe_bb_cnt, ucmd->buf_len); + return -EINVAL; + } + + sq->sqe_bb_cnt = ucmd->sqe_bb_cnt; + sq->buf.entry_cnt = ucmd->buf_len >> WQE_BB_SIZE_SHIFT; + if (sq->non_pin) { + sq->buf.addr = ucmd->buf_addr; + } else { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &sq->buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jetty/jfs queue addr, ret = %d.\n", + ret); + return ret; + } + } + + return 0; +} + +int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_cfg *jfs_cfg) +{ + uint32_t wqe_bb_depth; + uint32_t sqe_bb_cnt; + uint32_t size; + int ret; + + if (!jfs_cfg->flag.bs.lock_free) + spin_lock_init(&sq->lock); + + sq->max_inline_size = jfs_cfg->max_inline_data; + sq->max_sge_num = jfs_cfg->max_sge; + sq->tid = dev->tid; + sq->lock_free = jfs_cfg->flag.bs.lock_free; + + sqe_bb_cnt = sq_cal_wqebb_num(SQE_WRITE_NOTIFY_CTL_LEN, jfs_cfg->max_sge); + sq->sqe_bb_cnt = sqe_bb_cnt > (uint32_t)MAX_WQEBB_NUM ? (uint32_t)MAX_WQEBB_NUM : + sqe_bb_cnt; + + wqe_bb_depth = roundup_pow_of_two(sq->sqe_bb_cnt * jfs_cfg->depth); + sq->buf.entry_size = UDMA_JFS_WQEBB_SIZE; + size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); + sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; + + ret = udma_k_alloc_buf(dev, size, &sq->buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); + return ret; + } + + sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!sq->wrid) { + udma_k_free_buf(dev, size, &sq->buf); + dev_err(dev->dev, + "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", + sq->id, sq->buf.entry_cnt); + return -ENOMEM; + } + + udma_alloc_kernel_db(dev, sq); + sq->kva_curr = sq->buf.kva; + + return 0; +} + +void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + uint32_t size; + + if (sq->buf.kva) { + size = sq->buf.entry_cnt * sq->buf.entry_size; + udma_k_free_buf(dev, size, &sq->buf); + kfree(sq->wrid); + return; + } + if (sq->non_pin) + return; + + unpin_queue_addr(sq->buf.umem); +} + +void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, void *mb_buf) +{ + struct udma_jetty_ctx *ctx = (struct udma_jetty_ctx *)mb_buf; + uint8_t i; + + ctx->state = JETTY_READY; + ctx->jfs_mode = JFS; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->sl = dev->udma_sl[UDMA_DEFAULT_SL_NUM]; + if (ctx->type == JETTY_RM || ctx->type == JETTY_RC) { + for (i = 0; i < dev->udma_total_sl_num; i++) + if (cfg->priority == dev->udma_sl[i]) + ctx->sl = cfg->priority; + } else if (ctx->type == JETTY_UM) { + ctx->sl = dev->unic_sl[UDMA_DEFAULT_SL_NUM]; + for (i = 0; i < dev->unic_sl_num; i++) + if (cfg->priority == dev->unic_sl[i]) + ctx->sl = cfg->priority; + } + ctx->sqe_base_addr_l = (jfs->sq.buf.addr >> SQE_VA_L_OFFSET) & + (uint32_t)SQE_VA_L_VALID_BIT; + ctx->sqe_base_addr_h = (jfs->sq.buf.addr >> SQE_VA_H_OFFSET) & + (uint32_t)SQE_VA_H_VALID_BIT; + ctx->sqe_token_id_l = jfs->sq.tid & (uint32_t)SQE_TOKEN_ID_L_MASK; + ctx->sqe_token_id_h = (jfs->sq.tid >> SQE_TOKEN_ID_H_OFFSET) & + (uint32_t)SQE_TOKEN_ID_H_MASK; + ctx->sqe_bb_shift = ilog2(roundup_pow_of_two(jfs->sq.buf.entry_cnt)); + ctx->tx_jfcn = cfg->jfc->id; + ctx->ta_timeout = to_ta_timeout(cfg->err_timeout); + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_RNR_RETRY)) + ctx->rnr_retry_num = cfg->rnr_retry; + + ctx->user_data_l = jfs->jfs_addr; + ctx->user_data_h = jfs->jfs_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->seid_idx = cfg->eid_index; + ctx->err_mode = cfg->flag.bs.error_suspend; + ctx->cmp_odr = cfg->flag.bs.outorder_comp; + ctx->avail_sgmt_ost = AVAIL_SGMT_OST_INIT; + ctx->pi_type = jfs->pi_type; + ctx->sqe_pld_tokenid = jfs->sq.tid & (uint32_t)SQE_PLD_TOKEN_ID_MASK; + ctx->next_send_ssn = get_random_u16(); + ctx->next_rcv_ssn = ctx->next_send_ssn; +} + +void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs) +{ + struct udma_dfx_jfs *jfs; + int ret; + + jfs = (struct udma_dfx_jfs *)xa_load(&udma_dev->dfx_info->jfs.table, + udma_jfs->sq.id); + if (jfs) { + dev_warn(udma_dev->dev, "jfs_id(%u) already exists in DFX.\n", + udma_jfs->sq.id); + return; + } + + jfs = kzalloc(sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return; + + jfs->id = udma_jfs->sq.id; + jfs->depth = udma_jfs->sq.buf.entry_cnt / udma_jfs->sq.sqe_bb_cnt; + + write_lock(&udma_dev->dfx_info->jfs.rwlock); + ret = xa_err(xa_store(&udma_dev->dfx_info->jfs.table, udma_jfs->sq.id, + jfs, GFP_KERNEL)); + if (ret) { + write_unlock(&udma_dev->dfx_info->jfs.rwlock); + dev_err(udma_dev->dev, "store jfs_id(%u) to table failed in DFX.\n", + udma_jfs->sq.id); + kfree(jfs); + return; + } + + ++udma_dev->dfx_info->jfs.cnt; + write_unlock(&udma_dev->dfx_info->jfs.rwlock); +} + +static int udma_create_hw_jfs_ctx(struct udma_dev *dev, struct udma_jfs *jfs, + struct ubcore_jfs_cfg *cfg) +{ + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + int ret; + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jfs priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + udma_init_jfsc(dev, cfg, jfs, &ctx); + attr.tag = jfs->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) { + dev_err(dev->dev, "failed to upgrade JFSC, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + struct udma_context *uctx; + unsigned long byte; + + if (udata) { + if (!udata->udrv_data) { + dev_err(dev->dev, "udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jfs in_len %u or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jfs udata, ret = %lu.\n", byte); + return -EFAULT; + } + + uctx = to_udma_context(udata->uctx); + jfs->sq.tid = uctx->tid; + jfs->jfs_addr = ucmd->jetty_addr; + jfs->pi_type = ucmd->pi_type; + jfs->sq.non_pin = ucmd->non_pin; + jfs->sq.jetty_type = (enum udma_jetty_type)ucmd->jetty_type; + jfs->sq.id = ucmd->jfs_id; + } else { + jfs->jfs_addr = (uintptr_t)&jfs->sq; + jfs->sq.jetty_type = (enum udma_jetty_type)UDMA_URMA_NORMAL_JETTY_TYPE; + } + + return 0; +} + +static int udma_alloc_jfs_sq(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + int ret; + + ret = udma_get_user_jfs_cmd(dev, jfs, udata, &ucmd); + if (ret) + goto err_get_user_cmd; + + ret = alloc_jetty_id(dev, &jfs->sq, jfs->sq.id, NULL); + if (ret) { + dev_err(dev->dev, "failed to alloc_id.\n"); + goto err_alloc_id; + } + jfs->ubcore_jfs.jfs_id.id = jfs->sq.id; + jfs->ubcore_jfs.jfs_cfg = *cfg; + udma_set_query_flush_time(&jfs->sq, cfg->err_timeout); + + ret = xa_err(xa_store(&dev->jetty_table.xa, jfs->sq.id, &jfs->sq, GFP_KERNEL)); + if (ret) { + dev_err(dev->dev, "failed to store_sq(%u), ret=%d.", jfs->sq.id, ret); + goto err_store_sq; + } + + ret = udata ? udma_alloc_u_sq_buf(dev, &jfs->sq, &ucmd) : + udma_alloc_k_sq_buf(dev, &jfs->sq, cfg); + if (ret) + goto err_alloc_sq_buf; + + jfs->sq.trans_mode = cfg->trans_mode; + + return ret; + +err_alloc_sq_buf: + xa_erase(&dev->jetty_table.xa, jfs->sq.id); +err_store_sq: + if (jfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, jfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + jfs->sq.id, false); +err_alloc_id: +err_get_user_cmd: + return ret; +} + +struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *dev = to_udma_dev(ub_dev); + struct udma_jfs *jfs; + int ret; + + if (cfg->trans_mode == UBCORE_TP_RC) { + dev_err(dev->dev, "jfs not support RC transmode.\n"); + return NULL; + } + + jfs = kcalloc(1, sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return NULL; + + ret = udma_alloc_jfs_sq(dev, cfg, jfs, udata); + if (ret) { + dev_err(dev->dev, "failed to alloc_jfs_sq, ret = %d.\n", ret); + goto err_alloc_sq; + } + + ret = udma_create_hw_jfs_ctx(dev, jfs, cfg); + if (ret) { + dev_err(dev->dev, + "post mailbox create jfs ctx failed, ret = %d.\n", ret); + goto err_create_hw_jfs; + } + + jfs->mode = UDMA_NORMAL_JFS_TYPE; + jfs->sq.state = UBCORE_JETTY_STATE_READY; + refcount_set(&jfs->ae_refcount, 1); + init_completion(&jfs->ae_comp); + if (dfx_switch) + udma_dfx_store_jfs_id(dev, jfs); + + return &jfs->ubcore_jfs; + +err_create_hw_jfs: + udma_free_sq_buf(dev, &jfs->sq); + xa_erase(&dev->jetty_table.xa, jfs->sq.id); + if (jfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, jfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + jfs->sq.id, false); +err_alloc_sq: + kfree(jfs); + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 39a7b5d1bfc4..425c87400fec 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -6,6 +6,17 @@ #include "udma_common.h" +#define MAX_WQEBB_NUM 4 +#define UDMA_JFS_WQEBB_SIZE 64 +#define UDMA_JFS_SGE_SIZE 16 + +#define SQE_WRITE_NOTIFY_CTL_LEN 80 + +enum udma_jfs_type { + UDMA_NORMAL_JFS_TYPE, + UDMA_KERNEL_STARS_JFS_TYPE, +}; + struct udma_jfs { struct ubcore_jfs ubcore_jfs; struct udma_jetty_queue sq; @@ -27,4 +38,19 @@ static inline struct udma_jfs *to_udma_jfs_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfs, sq); } +static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) +{ + return (sqe_ctl_len + (sge_num - 1) * UDMA_JFS_SGE_SIZE) / + UDMA_JFS_WQEBB_SIZE + 1; +} + +struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata); +int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_create_jetty_ucmd *ucmd); +int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_cfg *jfs_cfg); +void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); + #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index dcf0ae79d583..bf1ffe10367f 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -19,6 +19,7 @@ #include "udma_dev.h" #include "udma_eq.h" #include "udma_segment.h" +#include "udma_jfs.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -173,6 +174,7 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, -- Gitee From cc7db36903bcd26a404d2769d1a6a7fea8d271e5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 10:18:46 +0800 Subject: [PATCH 008/126] ub: udma: Support destroy jfs. commit 717d2a2e8d2b4539b400f0f4f04833cab2fdc4e3 openEuler This patch adds the ability to destroy jfs, During the destruction process, driver will destroy jfs context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 200 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 4 + drivers/ub/urma/hw/udma/udma_jfs.c | 54 ++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 6 files changed, 262 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 469e55e93d7a..bc6ad5c509fd 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -37,6 +37,8 @@ extern bool dump_aux_info; #define UDMA_MAX_SL_NUM 16 #define UDMA_DEFAULT_SL_NUM 0 +#define UDMA_RCV_SEND_MAX_DIFF 512U + #define UDMA_CQE_SIZE 64 #define UDMA_MAX_GRANT_SIZE 0xFFFFFFFFF000 diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index a92fbc7d5d11..b012010c0e74 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -261,3 +261,203 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) sq->ta_timeout = time[index]; } + +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id) +{ + struct ubase_mbx_attr attr = {}; + int ret; + + attr.tag = jetty_id; + attr.op = UDMA_CMD_DESTROY_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, NULL, 0, &attr); + if (ret) + dev_err(dev->dev, + "post mailbox destroy jetty ctx failed, ret = %d.\n", ret); + + return ret; +} + +int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, + enum jetty_state state) +{ + struct udma_jetty_ctx *ctx, *ctx_mask; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for jettyc.\n"); + return -EINVAL; + } + + ctx = (struct udma_jetty_ctx *)mailbox->buf; + + /* Optimize chip access performance. */ + ctx_mask = (struct udma_jetty_ctx *)((char *)ctx + UDMA_JFS_MASK_OFFSET); + memset(ctx_mask, 0xff, sizeof(struct udma_jetty_ctx)); + ctx->state = state; + ctx_mask->state = 0; + + mbox_attr.tag = jetty_id; + mbox_attr.op = UDMA_CMD_MODIFY_JFS_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to upgrade jettyc, ret = %d.\n", ret); + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_query_jetty_ctx(struct udma_dev *dev, + struct udma_jetty_ctx *jfs_ctx, + uint32_t jetty_id) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + + mbox_attr.tag = jetty_id; + mbox_attr.op = UDMA_CMD_QUERY_JFS_CONTEXT; + mailbox = udma_mailbox_query_ctx(dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + memcpy((void *)jfs_ctx, mailbox->buf, sizeof(*jfs_ctx)); + + udma_free_cmd_mailbox(dev, mailbox); + + return 0; +} + +static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_timeout) +{ + uint32_t wait_time; + + if (*sum_times > ta_timeout) + return true; + + wait_time = 1 << times; + msleep(wait_time); + *sum_times += wait_time; + + return false; +} + +static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + struct udma_jetty_ctx ctx = {}; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t times = 0; + + while (true) { + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) + return false; + + if (ctx.flush_cqe_done) + return true; + + if (udma_wait_timeout(&sum_times, times, UDMA_TA_TIMEOUT_64000MS)) + break; + + times++; + } + + /* In the flip scenario, ctx.next_rcv_ssn - ctx.next_send_ssn value is less than 512. */ + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.flush_ssn_vld && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF) + return true; + + udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), + (uint32_t *)&ctx); + + return false; +} + +int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ + struct udma_jetty_ctx ctx = {}; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t times = 0; + int ret; + + while (true) { + ret = udma_query_jetty_ctx(dev, &ctx, sq->id); + if (ret) { + dev_err(dev->dev, "query jetty ctx failed, id = %u, ret = %d.\n", + sq->id, ret); + return ret; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.PI == ctx.CI && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_READY) + break; + + if (rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_ERROR) + break; + + if (udma_wait_timeout(&sum_times, times, sq->ta_timeout)) { + dev_warn(dev->dev, "TA timeout, id = %u. PI = %d, CI = %d, nxt_send_ssn = %d nxt_rcv_ssn = %d state = %d.\n", + sq->id, ctx.PI, ctx.CI, ctx.next_send_ssn, + ctx.next_rcv_ssn, ctx.state); + break; + } + times++; + } + + return 0; +} + +static bool udma_destroy_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq) +{ +#define UDMA_DESTROY_JETTY_DELAY_TIME 100U + + if (sq->state != UBCORE_JETTY_STATE_READY && sq->state != UBCORE_JETTY_STATE_SUSPENDED) + goto query_jetty_fd; + + if (dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE) + goto modify_to_err; + + if (udma_modify_jetty_precondition(dev, sq)) + return false; + +modify_to_err: + if (udma_set_jetty_state(dev, sq->id, JETTY_ERROR)) { + dev_err(dev->dev, "modify jetty to error failed, id: %u.\n", + sq->id); + return false; + } + + sq->state = UBCORE_JETTY_STATE_ERROR; + +query_jetty_fd: + if (!udma_query_jetty_fd(dev, sq)) + return false; + + udelay(UDMA_DESTROY_JETTY_DELAY_TIME); + + return true; +} + +int udma_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue *sq) +{ + int ret; + + if (!udma_destroy_jetty_precondition(dev, sq)) + return -EFAULT; + + if (sq->state != UBCORE_JETTY_STATE_RESET) { + ret = udma_destroy_hw_jetty_ctx(dev, sq->id); + if (ret) { + dev_err(dev->dev, "jetty destroyed failed, id: %u.\n", + sq->id); + return ret; + } + } + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index fb70231278b7..63d7073b8631 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -15,6 +15,7 @@ #define SQE_VA_H_VALID_BIT GENMASK(31, 0) #define JETTY_CTX_JFRN_H_OFFSET 12 #define AVAIL_SGMT_OST_INIT 512 +#define UDMA_JFS_MASK_OFFSET 128 #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) @@ -220,6 +221,9 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue *sq); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 002636a03c21..cb00cec5ccfd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -355,3 +355,57 @@ struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, kfree(jfs); return NULL; } + +static void udma_free_jfs(struct ubcore_jfs *jfs) +{ + struct udma_dev *dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *ujfs = to_udma_jfs(jfs); + + xa_erase(&dev->jetty_table.xa, ujfs->sq.id); + + if (refcount_dec_and_test(&ujfs->ae_refcount)) + complete(&ujfs->ae_comp); + wait_for_completion(&ujfs->ae_comp); + + if (dfx_switch) + udma_dfx_delete_id(dev, &dev->dfx_info->jfs, jfs->jfs_id.id); + + if (ujfs->mode == UDMA_NORMAL_JFS_TYPE) + udma_free_sq_buf(dev, &ujfs->sq); + else + kfree(ujfs->sq.wrid); + + if (ujfs->sq.id < dev->caps.jetty.start_idx) + udma_id_free(&dev->rsvd_jetty_ida_table, ujfs->sq.id); + else + udma_adv_id_free(&dev->jetty_table.bitmap_table, + ujfs->sq.id, false); + + kfree(ujfs); +} + +int udma_destroy_jfs(struct ubcore_jfs *jfs) +{ + struct udma_dev *dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *ujfs = to_udma_jfs(jfs); + int ret; + + if (!ujfs->ue_rx_closed && udma_close_ue_rx(dev, true, true, false, 0)) { + dev_err(dev->dev, "close ue rx failed when destroying jfs.\n"); + return -EINVAL; + } + + ret = udma_modify_and_destroy_jetty(dev, &ujfs->sq); + if (ret) { + dev_info(dev->dev, "udma modify error and destroy jfs failed, id: %u.\n", + jfs->jfs_id.id); + if (!ujfs->ue_rx_closed) + udma_open_ue_rx(dev, true, true, false, 0); + return ret; + } + + udma_free_jfs(jfs); + udma_open_ue_rx(dev, true, true, false, 0); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 425c87400fec..ed1ff16e4573 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -47,6 +47,7 @@ static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfs(struct ubcore_jfs *jfs); int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd); int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index bf1ffe10367f..2d0b1b0f7332 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -176,6 +176,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_seg = udma_unimport_seg, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, + .destroy_jfs = udma_destroy_jfs, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From 430d5b8fa4f6c05706c4de424d78b0abe9a9d41f Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 14:49:06 +0800 Subject: [PATCH 009/126] ub: udma: Support create jfr. commit 4d99b1a2909140212152b956ae8969405ba3bb30 openEuler This patch adds the ability to create jfr, During the creation process, driver will create jfr context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Makefile | 2 +- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.c | 447 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 63 ++++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 515 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/hw/udma/udma_jfr.c diff --git a/drivers/ub/urma/hw/udma/Makefile b/drivers/ub/urma/hw/udma/Makefile index 7b1c82ab51dd..8eddc0984ac7 100644 --- a/drivers/ub/urma/hw/udma/Makefile +++ b/drivers/ub/urma/hw/udma/Makefile @@ -3,6 +3,6 @@ udma-$(CONFIG_UB_UDMA) := udma_main.o udma_cmd.o udma_common.o udma_ctx.o udma_db.o \ udma_rct.o udma_tid.o udma_debugfs.o udma_eq.o udma_jfc.o \ udma_ctrlq_tp.o udma_eid.o udma_ctl.o udma_segment.o \ - udma_dfx.o udma_jfs.o udma_jetty.o + udma_dfx.o udma_jfs.o udma_jetty.o udma_jfr.o obj-m := udma.o diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index bc6ad5c509fd..7656124f875a 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -19,6 +19,8 @@ extern bool dump_aux_info; #define WQE_BB_SIZE_SHIFT 6 +#define UDMA_CTX_NUM 2 + #define MAX_JETTY_IN_JETTY_GRP 32 #define UDMA_USER_DATA_H_OFFSET 32U diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c new file mode 100644 index 000000000000..9783907f4602 --- /dev/null +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "UDMA: " fmt + +#include +#include "udma_cmd.h" +#include +#include "udma_jetty.h" +#include "udma_common.h" +#include "udma_db.h" +#include "udma_jfc.h" +#include "udma_jfr.h" + +const char *state_str[] = { + "RESET", + "READY", + "ERROR", + "INVALID" +}; + +static int udma_verify_jfr_param(struct udma_dev *dev, + struct ubcore_jfr_cfg *cfg) +{ + if (!cfg->max_sge || !cfg->depth || cfg->depth > dev->caps.jfr.depth || + cfg->max_sge > dev->caps.jfr_sge) { + dev_err(dev->dev, "Invalid jfr param, depth = %u, max_sge = %u.\n", + cfg->depth, cfg->max_sge); + return -EINVAL; + } + + if (cfg->flag.bs.token_policy > UBCORE_TOKEN_PLAIN_TEXT) { + dev_err(dev->dev, "jfr key policy = %d is not supported now.\n", + cfg->flag.bs.token_policy); + return -EINVAL; + } + + return 0; +} + +static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) +{ + uint32_t rqe_buf_size; + uint32_t idx_buf_size; + uint32_t sge_per_wqe; + int ret; + + sge_per_wqe = min(jfr->max_sge, dev->caps.jfr_sge); + jfr->rq.buf.entry_size = UDMA_SGE_SIZE * sge_per_wqe; + jfr->rq.buf.entry_cnt = jfr->wqe_cnt; + rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, rqe_buf_size, &jfr->rq.buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc rq buffer for jfr when buffer size = %u.\n", + rqe_buf_size); + return ret; + } + + jfr->idx_que.buf.entry_size = UDMA_IDX_QUE_ENTRY_SZ; + jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; + idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, idx_buf_size, &jfr->idx_que.buf); + if (ret) { + dev_err(dev->dev, + "failed to alloc idx que buffer for jfr when buffer size = %u.\n", + idx_buf_size); + goto err_idx_que; + } + + jfr->rq.wrid = kcalloc(1, jfr->rq.buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!jfr->rq.wrid) + goto err_wrid; + + jfr->jetty_addr = (uintptr_t)&jfr->rq; + + if (udma_alloc_sw_db(dev, &jfr->sw_db, UDMA_JFR_TYPE_DB)) { + dev_err(dev->dev, "failed to alloc sw db for jfr(%u).\n", jfr->rq.id); + goto err_alloc_db; + } + + udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0); + + jfr->rq.tid = dev->tid; + + return 0; + +err_alloc_db: + kfree(jfr->rq.wrid); +err_wrid: + udma_k_free_buf(dev, idx_buf_size, &jfr->idx_que.buf); +err_idx_que: + udma_k_free_buf(dev, rqe_buf_size, &jfr->rq.buf); + + return -ENOMEM; +} + +static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + unsigned long byte; + int ret; + + if (!udata->udrv_data) { + dev_err(dev->dev, "jfr udata udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jfr in_len %u or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jfr udata, byte = %lu.\n", byte); + return -EFAULT; + } + + if (!ucmd->non_pin) { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfr->rq.buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr rqe buf addr, ret = %d.\n", ret); + return ret; + } + + ret = pin_queue_addr(dev, ucmd->idx_addr, ucmd->idx_len, + &jfr->idx_que.buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr idx que addr, ret = %d.\n", ret); + goto err_pin_idx_buf; + } + } else { + jfr->rq.buf.addr = ucmd->buf_addr; + jfr->idx_que.buf.addr = ucmd->idx_addr; + } + + jfr->udma_ctx = to_udma_context(udata->uctx); + jfr->sw_db.db_addr = ucmd->db_addr; + jfr->jfr_sleep_buf.db_addr = ucmd->jfr_sleep_buf; + + if (!ucmd->non_pin) { + ret = udma_pin_sw_db(jfr->udma_ctx, &jfr->sw_db); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr sw db addr, ret = %d.\n", ret); + goto err_pin_sw_db; + } + + ret = udma_pin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); + if (ret) { + dev_err(dev->dev, + "failed to pin jfr sleep time buf, ret = %d.\n", ret); + goto err_pin_jfr_sleep_buf; + } + } + + jfr->jetty_addr = ucmd->jetty_addr; + jfr->rq.tid = jfr->udma_ctx->tid; + + return ret; + +err_pin_jfr_sleep_buf: + udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); +err_pin_sw_db: + unpin_queue_addr(jfr->idx_que.buf.umem); +err_pin_idx_buf: + unpin_queue_addr(jfr->rq.buf.umem); + return ret; +} + +static int udma_get_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + + if (udata == NULL) + return udma_get_k_jfr_buf(dev, jfr); + else + return udma_get_u_jfr_buf(dev, jfr, udata, &ucmd); +} + +static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) +{ + uint32_t size; + + if (!jfr->rq.buf.kva && !jfr->idx_que.buf.kva && + jfr->sw_db.page && jfr->jfr_sleep_buf.page) { + udma_unpin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); + udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); + unpin_queue_addr(jfr->idx_que.buf.umem); + unpin_queue_addr(jfr->rq.buf.umem); + return; + } + + if (jfr->rq.buf.kva) { + size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; + udma_k_free_buf(dev, size, &jfr->rq.buf); + udma_free_sw_db(dev, &jfr->sw_db); + } + + if (jfr->idx_que.buf.kva) { + size = jfr->idx_que.buf.entry_cnt * jfr->idx_que.buf.entry_size; + udma_k_free_buf(dev, size, &jfr->idx_que.buf); + udma_destroy_udma_table(dev, &jfr->idx_que.jfr_idx_table, "JFR_IDX"); + } + + kfree(jfr->rq.wrid); +} + +static enum udma_rx_limit_wl to_udma_limit_wl(uint32_t rx_threshold) +{ + if (rx_threshold >= LIMIT_WL_4096_V) + return UDMA_RX_LIMIT_WL_4096; + if (rx_threshold >= LIMIT_WL_512_V) + return UDMA_RX_LIMIT_WL_512; + if (rx_threshold >= LIMIT_WL_64_V) + return UDMA_RX_LIMIT_WL_64; + + return UDMA_RX_LIMIT_WL_0; +} + +static void udma_init_jfrc(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg, + struct udma_jfr *jfr, void *mb_buf, + uint32_t rx_threshold) +{ + struct udma_jfr_ctx *ctx = (struct udma_jfr_ctx *)mb_buf; + struct udma_jfc *jfc = to_udma_jfc(cfg->jfc); + uint32_t tid = jfr->rq.tid; + uint64_t db_addr; + + db_addr = jfr->sw_db.db_addr; + + memset(ctx, 0, sizeof(struct udma_jfr_ctx) * UDMA_CTX_NUM); + ctx->state = UDMA_JFR_STATE_READY; + ctx->record_db_en = 1; + ctx->rqe_base_addr_l = (jfr->rq.buf.addr >> RQE_VA_L_PAGE_4K_OFFSET) & + (uint32_t)RQE_VA_L_VALID_BIT; + ctx->rqe_base_addr_h = (jfr->rq.buf.addr >> (uint32_t)RQE_VA_H_PAGE_4K_OFFSET) & + (uint32_t)RQE_VA_H_VALID_BIT; + ctx->idx_que_addr_l = (jfr->idx_que.buf.addr >> JFR_IDX_VA_L_PAGE_4K_OFFSET) & + (uint32_t)JFR_IDX_VA_L_VALID_BIT; + ctx->idx_que_addr_h = (jfr->idx_que.buf.addr >> (uint32_t)JFR_IDX_VA_H_PAGE_4K_OFFSET) & + (uint32_t)JFR_IDX_VA_H_VALID_BIT; + ctx->record_db_addr_l = (db_addr >> JFR_DB_VA_L_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_L_VALID_BIT; + ctx->record_db_addr_m = (db_addr >> (uint32_t)JFR_DB_VA_M_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_M_VALID_BIT; + ctx->record_db_addr_h = (db_addr >> (uint32_t)JFR_DB_VA_H_PAGE_64_OFFSET) & + (uint32_t)JFR_DB_VA_H_VALID_BIT; + ctx->rqe_token_id_l = tid & (uint32_t)RQE_TOKEN_ID_L_MASK; + ctx->rqe_token_id_h = (tid >> RQE_TOKEN_ID_H_OFFSET) & (uint32_t)RQE_TOKEN_ID_H_MASK; + ctx->jfcn_l = cfg->jfc->id & (uint32_t)JFR_JFCN_L_VALID_BIT; + ctx->jfcn_h = (cfg->jfc->id >> JFR_JFCN_H_OFFSET) & (uint32_t)JFR_JFCN_H_VALID_BIT; + if (cfg->min_rnr_timer > UDMA_RNR_MAX) { + dev_warn(dev->dev, + "min_rnr_timer is out of range, max_value(%d) is applied.\n", + UDMA_RNR_MAX); + ctx->rnr_timer = UDMA_RNR_MAX; + } else { + ctx->rnr_timer = cfg->min_rnr_timer; + } + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) + ctx->token_en = 1; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->token_value = cfg->token_value.token; + ctx->user_data_l = jfr->jetty_addr; + ctx->user_data_h = jfr->jetty_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->rqe_size_shift = ilog2(jfr->max_sge); + ctx->rqe_shift = ilog2(jfr->wqe_cnt); + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->cqeie = jfc->inline_en; + + ctx->limit_wl = (uint32_t)to_udma_limit_wl(rx_threshold); + ctx->pld_token_id = tid & (uint32_t)JFR_PLD_TOKEN_ID_MASK; +} + +static void udma_reset_sw_k_jfr_queue(struct udma_jfr *jfr) +{ + ida_destroy(&jfr->idx_que.jfr_idx_table.ida_table.ida); + ida_init(&jfr->idx_que.jfr_idx_table.ida_table.ida); + jfr->rq.pi = 0; + jfr->rq.ci = 0; + *jfr->sw_db.db_record = 0; +} + +static int udma_hw_init_jfrc(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg, + struct udma_jfr *jfr, uint32_t rx_threshold) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfr_ctx *ctx = NULL; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFRC.\n"); + return -ENOMEM; + } + + udma_init_jfrc(dev, cfg, jfr, mailbox->buf, rx_threshold); + + mbox_attr.tag = jfr->rq.id; + mbox_attr.op = UDMA_CMD_CREATE_JFR_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post mbox cmd of create JFRC, ret = %d.\n", + ret); + + if (jfr->rq.buf.kva) + udma_reset_sw_k_jfr_queue(jfr); + + ctx = (struct udma_jfr_ctx *)mailbox->buf; + ctx->token_value = 0; + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static void set_jfr_param(struct udma_jfr *jfr, struct ubcore_jfr_cfg *cfg) +{ + if (cfg->depth < UDMA_MIN_JFR_DEPTH) + jfr->wqe_cnt = UDMA_MIN_JFR_DEPTH; + else + jfr->wqe_cnt = roundup_pow_of_two(cfg->depth); + + jfr->ubcore_jfr.jfr_id.id = jfr->rq.id; + jfr->ubcore_jfr.jfr_cfg = *cfg; + jfr->max_sge = roundup_pow_of_two(cfg->max_sge); + jfr->ubcore_jfr.jfr_cfg.max_sge = jfr->max_sge; + jfr->ubcore_jfr.jfr_cfg.depth = jfr->wqe_cnt; + jfr->state = UBCORE_JFR_STATE_READY; + + if (!cfg->flag.bs.lock_free) + spin_lock_init(&jfr->lock); +} + +static int udma_alloc_jfr_id(struct udma_dev *udma_dev, uint32_t cfg_id, uint32_t *idx) +{ + struct udma_ida *ida_table = &udma_dev->jfr_table.ida_table; + uint32_t min; + uint32_t max; + int id; + + if (cfg_id && (cfg_id < ida_table->min || cfg_id > ida_table->max)) { + dev_err(udma_dev->dev, + "user specify id %u error, min %u max %u.\n", + cfg_id, ida_table->min, ida_table->max); + return -EINVAL; + } + + spin_lock(&ida_table->lock); + min = cfg_id ? cfg_id : ida_table->next; + max = cfg_id ? cfg_id : ida_table->max; + id = ida_alloc_range(&ida_table->ida, min, max, GFP_ATOMIC); + if (id < 0) { + if (!cfg_id) + id = ida_alloc_range(&ida_table->ida, min = ida_table->min, + max, GFP_ATOMIC); + if (id < 0) { + dev_err(udma_dev->dev, + "alloc jfr id range (%u - %u) failed, ret = %d.\n", + min, max, id); + spin_unlock(&ida_table->lock); + + return id; + } + } + + *idx = (uint32_t)id; + + if (!cfg_id) + ida_table->next = (uint32_t)id + 1 > ida_table->max ? + ida_table->min : (uint32_t)id + 1; + spin_unlock(&ida_table->lock); + + return 0; +} + +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, + struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct udma_jfr *udma_jfr; + int ret; + + ret = udma_verify_jfr_param(udma_dev, cfg); + if (ret) { + dev_err(udma_dev->dev, "verify jfr param failed.\n"); + return NULL; + } + + udma_jfr = kzalloc(sizeof(*udma_jfr), GFP_KERNEL); + if (!udma_jfr) + return NULL; + + ret = udma_alloc_jfr_id(udma_dev, cfg->id, &udma_jfr->rq.id); + if (ret) + goto err_alloc_jfr_id; + + set_jfr_param(udma_jfr, cfg); + + ret = udma_get_jfr_buf(udma_dev, udma_jfr, udata); + if (ret) + goto err_get_jfr_buf; + + ret = xa_err(xa_store(&udma_dev->jfr_table.xa, udma_jfr->rq.id, + udma_jfr, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, "store jfr to jfr_table failed.\n"); + goto err_xa_store; + } + + ret = udma_hw_init_jfrc(udma_dev, cfg, udma_jfr, 0); + if (ret) { + dev_err(udma_dev->dev, "failed to init JFRC, ret = %d.\n", ret); + goto err_hw_init_jfrc; + } + + refcount_set(&udma_jfr->ae_refcount, 1); + init_completion(&udma_jfr->ae_comp); + + if (dfx_switch) + udma_dfx_store_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id, "jfr"); + + return &udma_jfr->ubcore_jfr; + +err_hw_init_jfrc: + xa_erase(&udma_dev->jfr_table.xa, udma_jfr->rq.id); +err_xa_store: + udma_put_jfr_buf(udma_dev, udma_jfr); +err_get_jfr_buf: + udma_id_free(&udma_dev->jfr_table.ida_table, udma_jfr->rq.id); +err_alloc_jfr_id: + kfree(udma_jfr); + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index bffb68b3cdbd..bd29f9c4a526 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -8,7 +8,67 @@ #include "udma_ctx.h" #include "udma_common.h" +#define RQE_VA_L_PAGE_4K_OFFSET 12U +#define RQE_VA_L_VALID_BIT GENMASK(19, 0) +#define RQE_VA_H_OFFSET 20 +#define RQE_VA_H_PAGE_4K_OFFSET (RQE_VA_H_OFFSET + RQE_VA_L_PAGE_4K_OFFSET) +#define RQE_VA_H_VALID_BIT GENMASK(31, 0) + +#define RQE_TOKEN_ID_L_MASK GENMASK(13, 0) +#define RQE_TOKEN_ID_H_OFFSET 14U +#define RQE_TOKEN_ID_H_MASK GENMASK(5, 0) + +#define JFR_IDX_VA_L_PAGE_4K_OFFSET 12U +#define JFR_IDX_VA_L_VALID_BIT GENMASK(31, 0) +#define JFR_IDX_VA_H_OFFSET 32 +#define JFR_IDX_VA_H_PAGE_4K_OFFSET \ + (JFR_IDX_VA_H_OFFSET + JFR_IDX_VA_L_PAGE_4K_OFFSET) +#define JFR_IDX_VA_H_VALID_BIT GENMASK(19, 0) + +#define JFR_DB_VA_L_PAGE_64_OFFSET 6U +#define JFR_DB_VA_L_VALID_BIT GENMASK(23, 0) +#define JFR_DB_VA_M_OFFSET 24 +#define JFR_DB_VA_M_PAGE_64_OFFSET \ + (JFR_DB_VA_M_OFFSET + JFR_DB_VA_L_PAGE_64_OFFSET) +#define JFR_DB_VA_M_VALID_BIT GENMASK(31, 0) +#define JFR_DB_VA_H_OFFSET 32 +#define JFR_DB_VA_H_PAGE_64_OFFSET \ + (JFR_DB_VA_H_OFFSET + JFR_DB_VA_M_PAGE_64_OFFSET) +#define JFR_DB_VA_H_VALID_BIT GENMASK(1, 0) + +#define JFR_JFCN_L_VALID_BIT GENMASK(11, 0) #define JFR_JFCN_H_OFFSET 12U +#define JFR_JFCN_H_VALID_BIT GENMASK(7, 0) + +#define UDMA_JFR_DB_PI_M GENMASK(15, 0) + +#define JFR_PLD_TOKEN_ID_MASK GENMASK(19, 0) + +#define UDMA_MIN_JFR_DEPTH 64 +#define UDMA_SGE_SIZE 16U +#define UDMA_IDX_QUE_ENTRY_SZ 4 +#define UDMA_RNR_MAX 19 + +enum jfr_state { + UDMA_JFR_STATE_RESET = 0, + UDMA_JFR_STATE_READY, + UDMA_JFR_STATE_ERROR, + JFR_STATE_NUM, +}; + +enum udma_rx_limit_wl { + UDMA_RX_LIMIT_WL_0 = 0, + UDMA_RX_LIMIT_WL_64, + UDMA_RX_LIMIT_WL_512, + UDMA_RX_LIMIT_WL_4096 +}; + +enum { + LIMIT_WL_0_V = 0, + LIMIT_WL_64_V = 64, + LIMIT_WL_512_V = 512, + LIMIT_WL_4096_V = 4096 +}; struct udma_jfr_idx_que { struct udma_buf buf; @@ -92,4 +152,7 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfr, rq); } +struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata); + #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2d0b1b0f7332..2b6cbcb0bd20 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -20,6 +20,7 @@ #include "udma_eq.h" #include "udma_segment.h" #include "udma_jfs.h" +#include "udma_jfr.h" #include "udma_cmd.h" #include "udma_ctx.h" #include "udma_rct.h" @@ -177,6 +178,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, + .create_jfr = udma_create_jfr, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; -- Gitee From 304526461d06e0bc5de0dac307b30325f96d4163 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 15:22:55 +0800 Subject: [PATCH 010/126] ub: udma: Support destroy jfr. commit c9ab1d26bebe7b020b7b0f938cd76b8157133f92 openEuler This patch adds the ability to destroy jfr, During the destruction process, driver will destroy jfr context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_jfr.c | 205 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 5 + drivers/ub/urma/hw/udma/udma_main.c | 6 + 4 files changed, 217 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 7656124f875a..9f2a20eea557 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -12,6 +12,7 @@ #include extern bool dfx_switch; +extern uint32_t jfr_sleep_time; extern uint32_t jfc_arm_mode; extern bool dump_aux_info; diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 9783907f4602..f15ca6b26d42 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -445,3 +445,208 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, kfree(udma_jfr); return NULL; } + +static int modify_jfr_context(struct udma_dev *dev, uint32_t jfrn, + bool state_flag, bool rx_threshold_flag, + struct ubcore_jfr_attr *attr) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jfr_ctx *ctx, *ctx_mask; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFRC.\n"); + return -EINVAL; + } + + ctx = (struct udma_jfr_ctx *)mailbox->buf; + ctx_mask = ctx + 1; + memset(ctx_mask, 0xff, sizeof(struct udma_jfr_ctx)); + if (state_flag) { + ctx->state = attr->state; + ctx_mask->state = 0; + } + + if (rx_threshold_flag) { + ctx->limit_wl = (uint32_t)to_udma_limit_wl(attr->rx_threshold); + ctx_mask->limit_wl = 0; + } + + mbox_attr.tag = jfrn; + mbox_attr.op = UDMA_CMD_MODIFY_JFR_CONTEXT; + + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post mbox cmd of modify JFRC, ret = %d.\n", ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_modify_jfr_to_error(struct ubcore_jfr *jfr, bool *need_sleep) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubcore_jfr_attr attr; + int ret = 0; + + if (udma_jfr->state == UBCORE_JFR_STATE_READY) { + attr.state = UBCORE_JFR_STATE_ERROR; + attr.mask = UBCORE_JFR_STATE; + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, true, false, &attr); + if (ret) { + dev_err(udma_dev->dev, "failed to modify jfr state to error, id: %u.\n", + udma_jfr->rq.id); + return ret; + } + + udma_jfr->state = UBCORE_JFR_STATE_ERROR; + + *need_sleep = true; + } + + return ret; +} + +static int udma_modify_jfr_to_reset(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + struct ubase_mbx_attr mbox_attr = {}; + int ret = 0; + + if (udma_jfr->state != UBCORE_JFR_STATE_RESET) { + mbox_attr.tag = udma_jfr->rq.id; + mbox_attr.op = UDMA_CMD_DESTROY_JFR_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, NULL, 0, &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, "failed to post jfr destroy cmd, id: %u.\n", + udma_jfr->rq.id); + return ret; + } + + udma_jfr->state = UBCORE_JFR_STATE_RESET; + } + + return ret; +} + +static int udma_modify_and_del_jfr(struct udma_dev *udma_dev, struct udma_jfr *udma_jfr) +{ + bool large_payload = false; + bool need_sleep = false; + uint32_t sleep_time = 0; + int ret = 0; + + ret = udma_modify_jfr_to_error(&udma_jfr->ubcore_jfr, &need_sleep); + if (ret) + return ret; + if (!udma_jfr->rq.buf.kva && udma_jfr->jfr_sleep_buf.page) + large_payload = !!(*(bool *)udma_jfr->jfr_sleep_buf.virt_addr); + if (need_sleep) { + sleep_time = large_payload ? jfr_sleep_time : UDMA_DEF_JFR_SLEEP_TIME; + dev_info_ratelimited(udma_dev->dev, "jfr sleep time = %u us.\n", sleep_time); + usleep_range(sleep_time, sleep_time + UDMA_SLEEP_DELAY_TIME); + } + + return udma_modify_jfr_to_reset(&udma_jfr->ubcore_jfr); +} + +static void udma_free_jfr(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id); + + xa_erase(&udma_dev->jfr_table.xa, udma_jfr->rq.id); + + if (refcount_dec_and_test(&udma_jfr->ae_refcount)) + complete(&udma_jfr->ae_comp); + wait_for_completion(&udma_jfr->ae_comp); + + udma_put_jfr_buf(udma_dev, udma_jfr); + udma_id_free(&udma_dev->jfr_table.ida_table, udma_jfr->rq.id); + jfr->jfr_cfg.token_value.token = 0; + kfree(udma_jfr); +} + +int udma_destroy_jfr(struct ubcore_jfr *jfr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + int ret; + + ret = udma_modify_and_del_jfr(udma_dev, udma_jfr); + if (ret) { + dev_err(udma_dev->dev, + "failed to modify and delete jfr, id: %u, ret = %d.\n", + udma_jfr->rq.id, ret); + return ret; + } + + udma_free_jfr(jfr); + + return 0; +} + +int udma_destroy_jfr_batch(struct ubcore_jfr **jfr, int jfr_cnt, int *bad_jfr_index) +{ + bool large_payload = false; + struct udma_dev *udma_dev; + struct udma_jfr *udma_jfr; + bool need_sleep = false; + uint32_t sleep_time = 0; + uint32_t i; + int ret; + + if (!jfr) { + pr_info("jfr array is null.\n"); + return -EINVAL; + } + + if (!jfr_cnt) { + pr_info("jfr cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jfr[0]->ub_dev); + + for (i = 0; i < jfr_cnt; i++) { + ret = udma_modify_jfr_to_error(jfr[i], &need_sleep); + if (ret) { + *bad_jfr_index = 0; + return ret; + } + + if (unlikely(large_payload)) + continue; + udma_jfr = to_udma_jfr(jfr[i]); + if (!udma_jfr->rq.buf.kva && udma_jfr->jfr_sleep_buf.page) + large_payload = !!(*(bool *)udma_jfr->jfr_sleep_buf.virt_addr); + } + + if (need_sleep) { + sleep_time = large_payload ? jfr_sleep_time : UDMA_DEF_JFR_SLEEP_TIME; + dev_info(udma_dev->dev, "jfr sleep time = %u us.\n", sleep_time); + usleep_range(sleep_time, sleep_time + UDMA_SLEEP_DELAY_TIME); + } + + for (i = 0; i < jfr_cnt; i++) { + ret = udma_modify_jfr_to_reset(jfr[i]); + if (ret) { + *bad_jfr_index = 0; + return ret; + } + } + + for (i = 0; i < jfr_cnt; i++) + udma_free_jfr(jfr[i]); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index bd29f9c4a526..43ee96cea746 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -49,6 +49,9 @@ #define UDMA_IDX_QUE_ENTRY_SZ 4 #define UDMA_RNR_MAX 19 +#define UDMA_DEF_JFR_SLEEP_TIME 1000 +#define UDMA_SLEEP_DELAY_TIME 10 + enum jfr_state { UDMA_JFR_STATE_RESET = 0, UDMA_JFR_STATE_READY, @@ -154,5 +157,7 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfr(struct ubcore_jfr *jfr); +int udma_destroy_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, int *bad_jfr_index); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2b6cbcb0bd20..c5911b027ba6 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -33,6 +33,7 @@ bool is_rmmod; static DEFINE_MUTEX(udma_reset_mutex); +uint32_t jfr_sleep_time = 1000; uint32_t jfc_arm_mode; bool dump_aux_info; @@ -179,6 +180,8 @@ static struct ubcore_ops g_dev_ops = { .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, + .destroy_jfr = udma_destroy_jfr, + .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, .query_jetty = udma_query_jetty, }; @@ -1092,6 +1095,9 @@ module_init(udma_init); module_exit(udma_exit); MODULE_LICENSE("GPL"); +module_param(jfr_sleep_time, uint, 0444); +MODULE_PARM_DESC(jfr_sleep_time, "Set the destroy jfr sleep time, default: 1000 us.\n"); + module_param(jfc_arm_mode, uint, 0444); MODULE_PARM_DESC(jfc_arm_mode, "Set the ARM mode of the JFC, default: 0(0:Always ARM, other: NO ARM."); -- Gitee From 1ec3f6ac287e032ef6264bef408de0fad36731f0 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 15:51:47 +0800 Subject: [PATCH 011/126] ub: udma: Support create jfc. commit 14ca6ade755ce2333f8f9c1524bdd24ff5577c8b openEuler This patch adds the ability to create jfc. During the creation process, driver will create jfc context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 7 + drivers/ub/urma/hw/udma/udma_jfc.c | 381 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 36 +++ drivers/ub/urma/hw/udma/udma_main.c | 6 + 4 files changed, 430 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 9f2a20eea557..d9b10ab28028 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -12,6 +12,7 @@ #include extern bool dfx_switch; +extern bool cqe_mode; extern uint32_t jfr_sleep_time; extern uint32_t jfc_arm_mode; extern bool dump_aux_info; @@ -85,6 +86,11 @@ struct udma_mailbox_cmd { struct rw_semaphore udma_mb_rwsem; }; +struct udma_ex_jfc_addr { + uint64_t cq_addr; + uint32_t cq_len; +}; + struct udma_dev { struct ubase_adev_com comdev; struct ubcore_device ub_dev; @@ -124,6 +130,7 @@ struct udma_dev { uint32_t status; struct udma_dev_debugfs *dbgfs; uint32_t ue_num; + struct udma_ex_jfc_addr cq_addr_array[UDMA_JFC_TYPE_NUM]; uint32_t ue_id; struct page *db_page; u8 udma_tp_sl_num; diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index ee223bb923f6..9d86d9003593 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -18,6 +18,387 @@ #include #include "udma_jfc.h" +static void udma_construct_jfc_ctx(struct udma_dev *dev, + struct udma_jfc *jfc, + struct udma_jfc_ctx *ctx) +{ + memset(ctx, 0, sizeof(struct udma_jfc_ctx)); + + ctx->state = UDMA_JFC_STATE_VALID; + if (jfc_arm_mode) + ctx->arm_st = UDMA_CTX_NO_ARMED; + else + ctx->arm_st = UDMA_CTX_ALWAYS_ARMED; + ctx->shift = jfc->cq_shift - UDMA_JFC_DEPTH_SHIFT_BASE; + ctx->jfc_type = UDMA_NORMAL_JFC_TYPE; + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->inline_en = jfc->inline_en; + ctx->cqe_va_l = jfc->buf.addr >> CQE_VA_L_OFFSET; + ctx->cqe_va_h = jfc->buf.addr >> CQE_VA_H_OFFSET; + ctx->cqe_token_id = jfc->tid; + + if (cqe_mode) + ctx->cq_cnt_mode = UDMA_CQE_CNT_MODE_BY_CI_PI_GAP; + else + ctx->cq_cnt_mode = UDMA_CQE_CNT_MODE_BY_COUNT; + + ctx->ceqn = jfc->ceqn; + if (jfc->stars_en) { + ctx->stars_en = UDMA_STARS_SWITCH; + ctx->record_db_en = UDMA_NO_RECORD_EN; + } else { + ctx->record_db_en = UDMA_RECORD_EN; + ctx->record_db_addr_l = jfc->db.db_addr >> UDMA_DB_L_OFFSET; + ctx->record_db_addr_h = jfc->db.db_addr >> UDMA_DB_H_OFFSET; + } +} + +void udma_init_jfc_param(struct ubcore_jfc_cfg *cfg, + struct udma_jfc *jfc) +{ + jfc->base.id = jfc->jfcn; + jfc->base.jfc_cfg = *cfg; + jfc->ceqn = cfg->ceqn; + jfc->lock_free = cfg->flag.bs.lock_free; + jfc->inline_en = cfg->flag.bs.jfc_inline; + jfc->cq_shift = ilog2(jfc->buf.entry_cnt); +} + +int udma_check_jfc_cfg(struct udma_dev *dev, struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg) +{ + if (!jfc->buf.entry_cnt || jfc->buf.entry_cnt > dev->caps.jfc.depth) { + dev_err(dev->dev, "invalid jfc depth = %u, cap depth = %u.\n", + jfc->buf.entry_cnt, dev->caps.jfc.depth); + return -EINVAL; + } + + if (jfc->buf.entry_cnt < UDMA_JFC_DEPTH_MIN) + jfc->buf.entry_cnt = UDMA_JFC_DEPTH_MIN; + + if (cfg->ceqn >= dev->caps.comp_vector_cnt) { + dev_err(dev->dev, "invalid ceqn = %u, cap ceq cnt = %u.\n", + cfg->ceqn, dev->caps.comp_vector_cnt); + return -EINVAL; + } + + return 0; +} + +static int udma_get_cmd_from_user(struct udma_create_jfc_ucmd *ucmd, + struct udma_dev *dev, + struct ubcore_udata *udata, + struct udma_jfc *jfc) +{ +#define UDMA_JFC_CQE_SHIFT 6 + unsigned long byte; + + if (!udata->udrv_data || !udata->udrv_data->in_addr) { + dev_err(dev->dev, "jfc udrv_data or in_addr is null.\n"); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + min(udata->udrv_data->in_len, + (uint32_t)sizeof(*ucmd))); + if (byte) { + dev_err(dev->dev, + "failed to copy udata from user, byte = %lu.\n", byte); + return -EFAULT; + } + + jfc->mode = ucmd->mode; + jfc->ctx = to_udma_context(udata->uctx); + if (jfc->mode > UDMA_NORMAL_JFC_TYPE && jfc->mode < UDMA_KERNEL_STARS_JFC_TYPE) { + jfc->buf.entry_cnt = ucmd->buf_len; + return 0; + } + + jfc->db.db_addr = ucmd->db_addr; + jfc->buf.entry_cnt = ucmd->buf_len >> UDMA_JFC_CQE_SHIFT; + + return 0; +} + +static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, + struct ubcore_udata *udata, struct udma_jfc *jfc) +{ + struct udma_context *uctx; + uint32_t size; + int ret = 0; + + if (udata) { + ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfc->buf); + if (ret) { + dev_err(dev->dev, "failed to pin queue for jfc, ret = %d.\n", ret); + return ret; + } + uctx = to_udma_context(udata->uctx); + jfc->tid = uctx->tid; + ret = udma_pin_sw_db(uctx, &jfc->db); + if (ret) { + dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); + unpin_queue_addr(jfc->buf.umem); + } + + return ret; + } + + if (!jfc->lock_free) + spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; + jfc->tid = dev->tid; + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + + ret = udma_k_alloc_buf(dev, size, &jfc->buf); + if (ret) { + dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); + return ret; + } + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + udma_k_free_buf(dev, size, &jfc->buf); + return -ENOMEM; + } + + return ret; +} + +static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct udma_context *uctx; + uint32_t size; + + if (jfc->buf.kva) { + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + udma_k_free_buf(dev, size, &jfc->buf); + } else if (jfc->buf.umem) { + uctx = to_udma_context(jfc->base.uctx); + unpin_queue_addr(jfc->buf.umem); + } + + if (jfc->db.page) { + uctx = to_udma_context(jfc->base.uctx); + udma_unpin_sw_db(uctx, &jfc->db); + } else if (jfc->db.kpage) { + udma_free_sw_db(dev, &jfc->db); + } +} + +int udma_post_create_jfc_mbox(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFCC.\n"); + return -ENOMEM; + } + + if (jfc->mode == UDMA_STARS_JFC_TYPE || jfc->mode == UDMA_CCU_JFC_TYPE || + jfc->mode == UDMA_KERNEL_STARS_JFC_TYPE) + jfc->stars_en = true; + udma_construct_jfc_ctx(dev, jfc, (struct udma_jfc_ctx *)mailbox->buf); + + mbox_attr.tag = jfc->jfcn; + mbox_attr.op = UDMA_CMD_CREATE_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post create JFC mailbox, ret = %d.\n", ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_verify_stars_jfc_param(struct udma_dev *dev, + struct udma_ex_jfc_addr *jfc_addr, + struct udma_jfc *jfc) +{ + uint32_t size; + + if (!jfc_addr->cq_addr) { + dev_err(dev->dev, "CQE addr is wrong.\n"); + return -ENOMEM; + } + if (!jfc_addr->cq_len) { + dev_err(dev->dev, "CQE len is wrong.\n"); + return -EINVAL; + } + + size = jfc->buf.entry_cnt * dev->caps.cqe_size; + + if (size != jfc_addr->cq_len) { + dev_err(dev->dev, "cqe buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + return 0; +} + +static int udma_get_stars_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +{ + struct udma_ex_jfc_addr *jfc_addr = &dev->cq_addr_array[jfc->mode]; + int ret; + + jfc->tid = dev->tid; + + ret = udma_verify_stars_jfc_param(dev, jfc_addr, jfc); + if (ret) + return ret; + + jfc->buf.addr = (dma_addr_t)(uintptr_t)jfc_addr->cq_addr; + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + return -ENOMEM; + } + + return ret; +} + +static int udma_create_stars_jfc(struct udma_dev *dev, + struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata, + struct udma_create_jfc_ucmd *ucmd) +{ + unsigned long flags_store; + unsigned long flags_erase; + int ret; + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, &jfc->jfcn); + if (ret) { + dev_err(dev->dev, "failed to alloc id for stars JFC.\n"); + return -ENOMEM; + } + + udma_init_jfc_param(cfg, jfc); + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored stars jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_stars_jfc_buf(dev, jfc); + if (ret) + goto err_alloc_cqc; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_get_jfc_buf; + + refcount_set(&jfc->event_refcount, 1); + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return 0; + +err_get_jfc_buf: + udma_free_sw_db(dev, &jfc->db); +err_alloc_cqc: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); + + return -ENOMEM; +} + +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct udma_create_jfc_ucmd ucmd = {}; + unsigned long flags_store; + unsigned long flags_erase; + struct udma_jfc *jfc; + int ret; + + jfc = kzalloc(sizeof(struct udma_jfc), GFP_KERNEL); + if (!jfc) + return NULL; + + if (udata) { + ret = udma_get_cmd_from_user(&ucmd, dev, udata, jfc); + if (ret) + goto err_get_cmd; + } else { + jfc->arm_sn = 1; + jfc->buf.entry_cnt = cfg->depth ? roundup_pow_of_two(cfg->depth) : cfg->depth; + } + + ret = udma_check_jfc_cfg(dev, jfc, cfg); + if (ret) + goto err_get_cmd; + + if (jfc->mode == UDMA_STARS_JFC_TYPE || jfc->mode == UDMA_CCU_JFC_TYPE) { + if (udma_create_stars_jfc(dev, jfc, cfg, udata, &ucmd)) + goto err_get_cmd; + return &jfc->base; + } + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, + &jfc->jfcn); + if (ret) + goto err_get_cmd; + + udma_init_jfc_param(cfg, jfc); + + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_jfc_buf(dev, &ucmd, udata, jfc); + if (ret) + goto err_get_jfc_buf; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_alloc_cqc; + + refcount_set(&jfc->event_refcount, 1); + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return &jfc->base; + +err_alloc_cqc: + jfc->base.uctx = (udata == NULL ? NULL : udata->uctx); + udma_free_jfc_buf(dev, jfc); +err_get_jfc_buf: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); +err_get_cmd: + kfree(jfc); + return NULL; +} + int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 8cb7271739d7..eba31242050c 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -7,10 +7,43 @@ #include "udma_dev.h" #include "udma_ctx.h" +#define UDMA_JFC_DEPTH_MIN 64 #define UDMA_JFC_DEPTH_SHIFT_BASE 6 +#define CQE_VA_L_OFFSET 12 +#define CQE_VA_H_OFFSET 32 + +#define UDMA_DB_L_OFFSET 6 +#define UDMA_DB_H_OFFSET 38 + +#define UDMA_STARS_SWITCH 1 + +enum udma_jfc_state { + UDMA_JFC_STATE_INVALID, + UDMA_JFC_STATE_VALID, + UDMA_JFC_STATE_ERROR, +}; + +enum udma_armed_jfc { + UDMA_CTX_NO_ARMED, + UDMA_CTX_ALWAYS_ARMED, + UDMA_CTX_REG_NEXT_CEQE, + UDMA_CTX_REG_NEXT_SOLICITED_CEQE, +}; + +enum udma_record_db { + UDMA_NO_RECORD_EN, + UDMA_RECORD_EN, +}; + +enum udma_cq_cnt_mode { + UDMA_CQE_CNT_MODE_BY_COUNT, + UDMA_CQE_CNT_MODE_BY_CI_PI_GAP, +}; + struct udma_jfc { struct ubcore_jfc base; + struct udma_context *ctx; uint32_t jfcn; uint32_t ceqn; uint32_t tid; @@ -103,6 +136,9 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) return container_of(jfc, struct udma_jfc, base); } +struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index c5911b027ba6..72457ae13878 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -20,6 +20,7 @@ #include "udma_eq.h" #include "udma_segment.h" #include "udma_jfs.h" +#include "udma_jfc.h" #include "udma_jfr.h" #include "udma_cmd.h" #include "udma_ctx.h" @@ -31,6 +32,7 @@ #include "udma_common.h" #include "udma_ctrlq_tp.h" +bool cqe_mode = true; bool is_rmmod; static DEFINE_MUTEX(udma_reset_mutex); uint32_t jfr_sleep_time = 1000; @@ -176,6 +178,7 @@ static struct ubcore_ops g_dev_ops = { .unregister_seg = udma_unregister_seg, .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, + .create_jfc = udma_create_jfc, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, @@ -1095,6 +1098,9 @@ module_init(udma_init); module_exit(udma_exit); MODULE_LICENSE("GPL"); +module_param(cqe_mode, bool, 0444); +MODULE_PARM_DESC(cqe_mode, "Set cqe reporting mode, default: 1 (0:BY_COUNT, 1:BY_CI_PI_GAP)"); + module_param(jfr_sleep_time, uint, 0444); MODULE_PARM_DESC(jfr_sleep_time, "Set the destroy jfr sleep time, default: 1000 us.\n"); -- Gitee From d86448c761d299d76c24c4e13a45a4cc2bae67e8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:06:10 +0800 Subject: [PATCH 012/126] ub: udma: Support destroy jfc. commit 10408c91c73de7803de5f335804d8c85c6b6ddcf openEuler This patch adds the ability to destroy jfc, During the destruction process, driver will destroy jfc context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 105 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 107 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 9d86d9003593..bfbf479ec06f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -399,6 +399,111 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, return NULL; } +static int udma_post_destroy_jfc_mbox(struct udma_dev *dev, uint32_t jfcn) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *ctx; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for JFCC.\n"); + return -ENOMEM; + } + + ctx = (struct udma_jfc_ctx *)mailbox->buf; + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_DESTROY_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to post destroy JFC mailbox, ret = %d.\n", + ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_query_jfc_destroy_done(struct udma_dev *dev, uint32_t jfcn) +{ + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + struct udma_jfc_ctx *jfc_ctx; + int ret; + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_QUERY_JFC_CONTEXT; + mailbox = udma_mailbox_query_ctx(dev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + jfc_ctx = (struct udma_jfc_ctx *)mailbox->buf; + ret = jfc_ctx->pi == jfc_ctx->wr_cqe_idx ? 0 : -EAGAIN; + + jfc_ctx->cqe_token_value = 0; + jfc_ctx->remote_token_value = 0; + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +static int udma_destroy_and_flush_jfc(struct udma_dev *dev, uint32_t jfcn) +{ +#define QUERY_MAX_TIMES 5 + uint32_t wait_times = 0; + int ret; + + ret = udma_post_destroy_jfc_mbox(dev, jfcn); + if (ret) { + dev_err(dev->dev, "failed to post mbox to destroy jfc, id: %u.\n", jfcn); + return ret; + } + + while (true) { + if (udma_query_jfc_destroy_done(dev, jfcn) == 0) + return 0; + if (wait_times > QUERY_MAX_TIMES) + break; + msleep(1 << wait_times); + wait_times++; + } + dev_err(dev->dev, "jfc flush timed out, id: %u.\n", jfcn); + + return -EFAULT; +} + +int udma_destroy_jfc(struct ubcore_jfc *jfc) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *ujfc = to_udma_jfc(jfc); + unsigned long flags; + int ret; + + ret = udma_destroy_and_flush_jfc(dev, ujfc->jfcn); + if (ret) + return ret; + + xa_lock_irqsave(&dev->jfc_table.xa, flags); + __xa_erase(&dev->jfc_table.xa, ujfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags); + + if (refcount_dec_and_test(&ujfc->event_refcount)) + complete(&ujfc->event_comp); + wait_for_completion(&ujfc->event_comp); + + if (dfx_switch) + udma_dfx_delete_id(dev, &dev->dfx_info->jfc, jfc->id); + + udma_free_jfc_buf(dev, ujfc); + udma_id_free(&dev->jfc_table.ida_table, ujfc->jfcn); + kfree(ujfc); + + return 0; +} + int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index eba31242050c..21f4016a42cd 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -139,6 +139,7 @@ static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, struct ubcore_jfc_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jfc(struct ubcore_jfc *jfc); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 72457ae13878..d1d45200b585 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -179,6 +179,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .create_jfc = udma_create_jfc, + .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, -- Gitee From 7e66684419e5810295fa5a8cef0b8debaa049ab3 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:35:29 +0800 Subject: [PATCH 013/126] ub: udma: Support create jetty. commit 4d0a86f5836c68cb1303313cd3b7eed1876864b2 openEuler This patch adds the ability to create jetty. During the creation process, driver will create jetty context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 300 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 304 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index b012010c0e74..63e5eae5f7b7 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -18,6 +18,144 @@ bool well_known_jetty_pgsz_check = true; +static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jetty, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + struct udma_context *uctx; + unsigned long byte; + + if (!udata) { + jetty->sq.jetty_type = (enum udma_jetty_type)UDMA_URMA_NORMAL_JETTY_TYPE; + return 0; + } + + if (!udata->udrv_data) { + dev_err(dev->dev, "jetty udata udrv_data is null.\n"); + return -EINVAL; + } + + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + dev_err(dev->dev, "jetty in_len (%u) or addr is invalid.\n", + udata->udrv_data->in_len); + return -EINVAL; + } + + byte = copy_from_user(ucmd, (void *)(uintptr_t)udata->udrv_data->in_addr, + sizeof(*ucmd)); + if (byte) { + dev_err(dev->dev, + "failed to copy jetty udata, byte = %lu.\n", byte); + return -EFAULT; + } + + uctx = to_udma_context(udata->uctx); + jetty->sq.tid = uctx->tid; + jetty->jetty_addr = ucmd->jetty_addr; + jetty->pi_type = ucmd->pi_type; + jetty->sq.jetty_type = (enum udma_jetty_type)ucmd->jetty_type; + jetty->sq.non_pin = ucmd->non_pin; + + return 0; +} + +static int udma_get_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty, + struct ubcore_udata *udata, + struct ubcore_jetty_cfg *cfg, + struct udma_create_jetty_ucmd *ucmd) +{ + struct ubcore_jfs_cfg jfs_cfg = { + .depth = cfg->jfs_depth, + .trans_mode = cfg->trans_mode, + .priority = cfg->priority, + .max_sge = cfg->max_send_sge, + .max_rsge = cfg->max_send_rsge, + .max_inline_data = cfg->max_inline_data, + .rnr_retry = cfg->rnr_retry, + .err_timeout = cfg->err_timeout, + .jfs_context = cfg->jetty_context, + .jfc = cfg->send_jfc, + }; + int ret; + + jfs_cfg.flag.bs.lock_free = cfg->flag.bs.lock_free; + if (!udata) + jetty->jetty_addr = (uintptr_t)&jetty->sq; + + jetty->jfr = to_udma_jfr(cfg->jfr); + + ret = udata ? udma_alloc_u_sq_buf(dev, &jetty->sq, ucmd) : + udma_alloc_k_sq_buf(dev, &jetty->sq, &jfs_cfg); + if (ret) { + dev_err(dev->dev, "failed to get sq buf, ret = %d.\n", ret); + return ret; + } + jetty->sq.trans_mode = jfs_cfg.trans_mode; + jetty->sq.is_jetty = true; + + return ret; +} + +static void udma_init_jettyc(struct udma_dev *dev, struct ubcore_jetty_cfg *cfg, + struct udma_jetty *jetty, void *mb_buf) +{ + struct udma_jetty_ctx *ctx = (struct udma_jetty_ctx *)mb_buf; + struct udma_jfc *receive_jfc = to_udma_jfc(cfg->recv_jfc); + uint8_t i; + + ctx->state = JETTY_READY; + ctx->jfs_mode = JETTY; + ctx->type = to_udma_type(cfg->trans_mode); + ctx->sl = dev->udma_sl[UDMA_DEFAULT_SL_NUM]; + if (ctx->type == JETTY_RM || ctx->type == JETTY_RC) { + for (i = 0; i < dev->udma_total_sl_num; i++) { + if (cfg->priority == dev->udma_sl[i]) { + ctx->sl = cfg->priority; + break; + } + } + } else if (ctx->type == JETTY_UM) { + ctx->sl = dev->unic_sl[UDMA_DEFAULT_SL_NUM]; + for (i = 0; i < dev->unic_sl_num; i++) { + if (cfg->priority == dev->unic_sl[i]) { + ctx->sl = cfg->priority; + break; + } + } + } + ctx->sqe_base_addr_l = (jetty->sq.buf.addr >> SQE_VA_L_OFFSET) & + (uint32_t)SQE_VA_L_VALID_BIT; + ctx->sqe_base_addr_h = (jetty->sq.buf.addr >> SQE_VA_H_OFFSET) & + (uint32_t)SQE_VA_H_VALID_BIT; + ctx->sqe_token_id_l = jetty->sq.tid & (uint32_t)SQE_TOKEN_ID_L_MASK; + ctx->sqe_token_id_h = (jetty->sq.tid >> SQE_TOKEN_ID_H_OFFSET) & + (uint32_t)SQE_TOKEN_ID_H_MASK; + ctx->sqe_bb_shift = ilog2(roundup_pow_of_two(jetty->sq.buf.entry_cnt)); + ctx->tx_jfcn = cfg->send_jfc->id; + ctx->ta_timeout = to_ta_timeout(cfg->err_timeout); + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_RNR_RETRY)) + ctx->rnr_retry_num = cfg->rnr_retry; + + ctx->jfrn_l = jetty->jfr->rq.id; + ctx->jfrn_h = jetty->jfr->rq.id >> JETTY_CTX_JFRN_H_OFFSET; + ctx->rx_jfcn = cfg->recv_jfc->id; + ctx->user_data_l = jetty->jetty_addr; + ctx->user_data_h = jetty->jetty_addr >> UDMA_USER_DATA_H_OFFSET; + ctx->seid_idx = cfg->eid_index; + ctx->pi_type = jetty->pi_type ? 1 : 0; + + if (!!(dev->caps.feature & UDMA_CAP_FEATURE_JFC_INLINE)) + ctx->cqe_ie = receive_jfc->inline_en; + + ctx->err_mode = cfg->flag.bs.error_suspend; + ctx->cmp_odr = cfg->flag.bs.outorder_comp; + ctx->avail_sgmt_ost = AVAIL_SGMT_OST_INIT; + ctx->sqe_pld_tokenid = jetty->sq.tid & (uint32_t)SQE_PLD_TOKEN_ID_MASK; + ctx->next_send_ssn = get_random_u16(); + ctx->next_rcv_ssn = ctx->next_send_ssn; +} + static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) { struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; @@ -244,6 +382,114 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, return ret; } +static void free_jetty_id(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty, bool is_grp) +{ + if (udma_jetty->sq.id < udma_dev->caps.jetty.start_idx) + udma_id_free(&udma_dev->rsvd_jetty_ida_table, udma_jetty->sq.id); + else + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + udma_jetty->sq.id, false); +} + +static void udma_dfx_store_jetty_id(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty) +{ + struct udma_dfx_jetty *jetty; + int ret; + + jetty = (struct udma_dfx_jetty *)xa_load(&udma_dev->dfx_info->jetty.table, + udma_jetty->sq.id); + if (jetty) { + dev_warn(udma_dev->dev, "jetty_id(%u) already exists in dfx.\n", + udma_jetty->sq.id); + return; + } + + jetty = kzalloc(sizeof(*jetty), GFP_KERNEL); + if (!jetty) + return; + + jetty->id = udma_jetty->sq.id; + jetty->jfs_depth = udma_jetty->sq.buf.entry_cnt / udma_jetty->sq.sqe_bb_cnt; + + write_lock(&udma_dev->dfx_info->jetty.rwlock); + ret = xa_err(xa_store(&udma_dev->dfx_info->jetty.table, udma_jetty->sq.id, + jetty, GFP_KERNEL)); + if (ret) { + write_unlock(&udma_dev->dfx_info->jetty.rwlock); + dev_err(udma_dev->dev, "store jetty_id(%u) to jetty_table failed in dfx.\n", + udma_jetty->sq.id); + kfree(jetty); + return; + } + + ++udma_dev->dfx_info->jetty.cnt; + write_unlock(&udma_dev->dfx_info->jetty.rwlock); +} + +static int +udma_alloc_jetty_sq(struct udma_dev *udma_dev, struct udma_jetty *jetty, + struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata) +{ + struct udma_create_jetty_ucmd ucmd = {}; + int ret; + + ret = udma_get_user_jetty_cmd(udma_dev, jetty, udata, &ucmd); + if (ret) { + dev_err(udma_dev->dev, + "udma get user jetty ucmd failed, ret = %d.\n", ret); + return ret; + } + + ret = alloc_jetty_id(udma_dev, &jetty->sq, cfg->id, cfg->jetty_grp); + if (ret) { + dev_err(udma_dev->dev, "alloc jetty id failed, ret = %d.\n", ret); + return ret; + } + jetty->ubcore_jetty.jetty_id.id = jetty->sq.id; + jetty->ubcore_jetty.jetty_cfg = *cfg; + + ret = udma_get_jetty_buf(udma_dev, jetty, udata, cfg, &ucmd); + if (ret) + free_jetty_id(udma_dev, jetty, !!cfg->jetty_grp); + + return ret; +} + +static void udma_free_jetty_id_buf(struct udma_dev *udma_dev, + struct udma_jetty *udma_jetty, + struct ubcore_jetty_cfg *cfg) +{ + udma_free_sq_buf(udma_dev, &udma_jetty->sq); + free_jetty_id(udma_dev, udma_jetty, !!cfg->jetty_grp); +} + +static int udma_create_hw_jetty_ctx(struct udma_dev *dev, struct udma_jetty *udma_jetty, + struct ubcore_jetty_cfg *cfg) +{ + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + int ret; + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jetty priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + udma_init_jettyc(dev, cfg, udma_jetty, &ctx); + + attr.tag = udma_jetty->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) + dev_err(dev->dev, + "post mailbox create jetty ctx failed, ret = %d.\n", ret); + + return ret; +} + void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) { #define UDMA_TA_TIMEOUT_MAX_INDEX 3 @@ -262,6 +508,60 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) sq->ta_timeout = time[index]; } +struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_jetty *udma_jetty; + int ret; + + udma_jetty = kzalloc(sizeof(*udma_jetty), GFP_KERNEL); + if (!udma_jetty) + return NULL; + + ret = udma_alloc_jetty_sq(udma_dev, udma_jetty, cfg, udata); + if (ret) { + dev_err(udma_dev->dev, + "udma alloc jetty id buf failed, ret = %d.\n", ret); + goto err_alloc_jetty; + } + + ret = xa_err(xa_store(&udma_dev->jetty_table.xa, udma_jetty->sq.id, + &udma_jetty->sq, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, + "store jetty sq(%u) to sq table failed, ret = %d.\n", + udma_jetty->sq.id, ret); + goto err_store_jetty_sq; + } + + ret = udma_create_hw_jetty_ctx(udma_dev, udma_jetty, cfg); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox create jetty ctx failed, ret = %d.\n", ret); + goto err_create_hw_jetty; + } + + udma_set_query_flush_time(&udma_jetty->sq, cfg->err_timeout); + udma_jetty->sq.state = UBCORE_JETTY_STATE_READY; + refcount_set(&udma_jetty->ae_refcount, 1); + init_completion(&udma_jetty->ae_comp); + + if (dfx_switch) + udma_dfx_store_jetty_id(udma_dev, udma_jetty); + + return &udma_jetty->ubcore_jetty; +err_create_hw_jetty: + xa_erase(&udma_dev->jetty_table.xa, udma_jetty->sq.id); +err_store_jetty_sq: + udma_free_jetty_id_buf(udma_dev, udma_jetty, cfg); +err_alloc_jetty: + kfree(udma_jetty); + + return NULL; +} + int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id) { struct ubase_mbx_attr attr = {}; diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 63d7073b8631..0c6d409520eb 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -221,6 +221,9 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); +struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index d1d45200b585..6d2832081630 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -187,6 +187,7 @@ static struct ubcore_ops g_dev_ops = { .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, + .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, }; -- Gitee From f683ae090bd548017c963d41827347cf2a786a2a Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 16:50:18 +0800 Subject: [PATCH 014/126] ub: udma: Support destroy jetty. commit 70df47775eb5fec27c8b0fccfadeb97d9f0e210a openEuler This patch adds the ability to destroy jetty, During the destruction process, driver will destroy jetty context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 46 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 48 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 63e5eae5f7b7..bc35512c57b6 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -761,3 +761,49 @@ int udma_modify_and_destroy_jetty(struct udma_dev *dev, return 0; } + +static void udma_free_jetty(struct ubcore_jetty *jetty) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty, + udma_jetty->sq.id); + + xa_erase(&udma_dev->jetty_table.xa, udma_jetty->sq.id); + + if (refcount_dec_and_test(&udma_jetty->ae_refcount)) + complete(&udma_jetty->ae_comp); + wait_for_completion(&udma_jetty->ae_comp); + + udma_free_sq_buf(udma_dev, &udma_jetty->sq); + free_jetty_id(udma_dev, udma_jetty, !!udma_jetty->sq.jetty_grp); + kfree(udma_jetty); +} + +int udma_destroy_jetty(struct ubcore_jetty *jetty) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + if (!udma_jetty->ue_rx_closed && udma_close_ue_rx(udma_dev, true, true, false, 0)) { + dev_err(udma_dev->dev, "close ue rx failed when destroying jetty.\n"); + return -EINVAL; + } + + ret = udma_modify_and_destroy_jetty(udma_dev, &udma_jetty->sq); + if (ret) { + dev_err(udma_dev->dev, "udma modify error and destroy jetty failed, id: %u.\n", + jetty->jetty_id.id); + if (!udma_jetty->ue_rx_closed) + udma_open_ue_rx(udma_dev, true, true, false, 0); + return ret; + } + + udma_free_jetty(jetty); + udma_open_ue_rx(udma_dev, true, true, false, 0); + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 0c6d409520eb..a37c9a9ff54f 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -224,6 +224,7 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); +int udma_destroy_jetty(struct ubcore_jetty *jetty); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 6d2832081630..a1f3bcb5dde3 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -189,6 +189,7 @@ static struct ubcore_ops g_dev_ops = { .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, + .destroy_jetty = udma_destroy_jetty, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 704d5ac2f31867e4ca2f05de0b115bdccc8561ba Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 17:05:48 +0800 Subject: [PATCH 015/126] ub: udma: Support create jetty group. commit fdebd311c4b97415cdc307c322944dbc691f32d0 openEuler This patch adds the ability to create jetty group. During The creation process, driver will create jetty group context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 112 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 116 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index bc35512c57b6..7cbffa81bf1d 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -807,3 +807,115 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } + +static int udma_alloc_group_start_id(struct udma_dev *udma_dev, + struct udma_group_bitmap *bitmap_table, + uint32_t *start_jetty_id) +{ + int ret; + + ret = udma_adv_id_alloc(udma_dev, bitmap_table, start_jetty_id, true, + bitmap_table->grp_next); + if (ret) { + ret = udma_adv_id_alloc(udma_dev, bitmap_table, start_jetty_id, + true, bitmap_table->min); + if (ret) + return ret; + } + + bitmap_table->grp_next = (*start_jetty_id + NUM_JETTY_PER_GROUP) > + bitmap_table->max ? bitmap_table->min : + (*start_jetty_id + NUM_JETTY_PER_GROUP); + + return 0; +} + +static int udma_alloc_jetty_grp_id(struct udma_dev *udma_dev, + struct udma_jetty_grp *jetty_grp) +{ + int ret; + + ret = udma_alloc_group_start_id(udma_dev, &udma_dev->jetty_table.bitmap_table, + &jetty_grp->start_jetty_id); + if (ret) { + dev_err(udma_dev->dev, + "alloc jetty id for grp failed, ret = %d.\n", ret); + return ret; + } + + ret = udma_id_alloc_auto_grow(udma_dev, &udma_dev->jetty_grp_table.ida_table, + &jetty_grp->jetty_grp_id); + if (ret) { + dev_err(udma_dev->dev, + "alloc jetty grp id failed, ret = %d.\n", ret); + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + jetty_grp->start_jetty_id, true); + return ret; + } + + jetty_grp->ubcore_jetty_grp.jetty_grp_id.id = jetty_grp->jetty_grp_id; + + return 0; +} + +struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + struct ubase_mbx_attr mbox_attr = {}; + struct udma_jetty_grp_ctx ctx = {}; + struct udma_jetty_grp *jetty_grp; + int ret; + + if (cfg->policy != UBCORE_JETTY_GRP_POLICY_HASH_HINT) { + dev_err(udma_dev->dev, "policy %u not support.\n", cfg->policy); + return NULL; + } + + jetty_grp = kzalloc(sizeof(*jetty_grp), GFP_KERNEL); + if (!jetty_grp) + return NULL; + + ret = udma_alloc_jetty_grp_id(udma_dev, jetty_grp); + if (ret) + goto err_alloc_jetty_grp_id; + + ctx.start_jetty_id = jetty_grp->start_jetty_id; + + ret = xa_err(xa_store(&udma_dev->jetty_grp_table.xa, jetty_grp->jetty_grp_id, + jetty_grp, GFP_KERNEL)); + if (ret) { + dev_err(udma_dev->dev, "store jetty group(%u) failed, ret = %d.\n", + jetty_grp->jetty_grp_id, ret); + goto err_store_jetty_grp; + } + + mbox_attr.tag = jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_CREATE_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, &ctx, sizeof(ctx), &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox update jetty ctx failed, ret = %d.\n", ret); + goto err_post_mailbox; + } + + mutex_init(&jetty_grp->valid_lock); + refcount_set(&jetty_grp->ae_refcount, 1); + init_completion(&jetty_grp->ae_comp); + + if (dfx_switch) + udma_dfx_store_id(udma_dev, &udma_dev->dfx_info->jetty_grp, + jetty_grp->jetty_grp_id, "jetty_grp"); + + return &jetty_grp->ubcore_jetty_grp; +err_post_mailbox: + xa_erase(&udma_dev->jetty_grp_table.xa, jetty_grp->jetty_grp_id); +err_store_jetty_grp: + udma_id_free(&udma_dev->jetty_grp_table.ida_table, + jetty_grp->jetty_grp_id); +err_alloc_jetty_grp_id: + kfree(jetty_grp); + + return NULL; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index a37c9a9ff54f..b2e65a8c8f86 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -225,6 +225,9 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, + struct ubcore_jetty_grp_cfg *cfg, + struct ubcore_udata *udata); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index a1f3bcb5dde3..baf9a970fed8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -190,6 +190,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty = udma_create_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .create_jetty_grp = udma_create_jetty_grp, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 7e7998388a292b54d590271522dae8f1dc106739 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 17:24:30 +0800 Subject: [PATCH 016/126] ub: udma: Support destroy jetty group. commit a9fe853ef4163a534f78f2b380bdf51f845b0242 openEuler This patch adds the ability to destroy jetty group. During the destruction process, driver will destroy jetty group context and send it to the hardware. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 41 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 +- drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 7cbffa81bf1d..67534599f23d 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -919,3 +919,44 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, return NULL; } + +int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) +{ + struct udma_jetty_grp *udma_jetty_grp = to_udma_jetty_grp(jetty_grp); + struct udma_dev *udma_dev = to_udma_dev(jetty_grp->ub_dev); + struct ubase_mbx_attr mbox_attr = {}; + int ret; + + mbox_attr.tag = udma_jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_DESTROY_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, NULL, 0, &mbox_attr); + if (ret) { + dev_err(udma_dev->dev, + "post mailbox destroy jetty group failed, ret = %d.\n", ret); + return ret; + } + + xa_erase(&udma_dev->jetty_grp_table.xa, udma_jetty_grp->jetty_grp_id); + + if (refcount_dec_and_test(&udma_jetty_grp->ae_refcount)) + complete(&udma_jetty_grp->ae_comp); + wait_for_completion(&udma_jetty_grp->ae_comp); + + if (dfx_switch) + udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty_grp, + udma_jetty_grp->jetty_grp_id); + + if (udma_jetty_grp->valid != 0) + dev_err(udma_dev->dev, + "jetty group been used, jetty valid is 0x%x.\n", + udma_jetty_grp->valid); + + mutex_destroy(&udma_jetty_grp->valid_lock); + udma_id_free(&udma_dev->jetty_grp_table.ida_table, + udma_jetty_grp->jetty_grp_id); + udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, + udma_jetty_grp->start_jetty_id, true); + kfree(udma_jetty_grp); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index b2e65a8c8f86..8f23621a58f6 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -228,8 +228,9 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); -int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); +int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index baf9a970fed8..2491aea7f9e2 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -191,6 +191,7 @@ static struct ubcore_ops g_dev_ops = { .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, .create_jetty_grp = udma_create_jetty_grp, + .delete_jetty_grp = udma_delete_jetty_grp, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 63aa2afe71f9bcd5aaf4deee1d21a0482b415544 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 19:52:57 +0800 Subject: [PATCH 017/126] ub: udma: Support modify jfs. commit 0ea69661e7ac19a85dcad8c250275d7f9582731b openEuler This patch adds the ability to modify jfs. During the modify jfs process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 58 +++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 11 +++- drivers/ub/urma/hw/udma/udma_jfs.c | 77 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 5 files changed, 148 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 67534599f23d..74f9a30e2be3 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -18,6 +18,22 @@ bool well_known_jetty_pgsz_check = true; +const char *state_name[] = { + "RESET", + "READY", + "SUSPENDED", + "ERROR", + "INVALID" +}; + +const char *to_state_name(enum ubcore_jetty_state state) +{ + if ((int)state >= (int)STATE_NUM) + return state_name[STATE_NUM]; + + return state_name[state]; +} + static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jetty, struct ubcore_udata *udata, struct udma_create_jetty_ucmd *ucmd) @@ -465,6 +481,14 @@ static void udma_free_jetty_id_buf(struct udma_dev *udma_dev, free_jetty_id(udma_dev, udma_jetty, !!cfg->jetty_grp); } +void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq) +{ + sq->kva_curr = sq->buf.kva; + sq->pi = 0; + sq->ci = 0; + sq->flush_flag = false; +} + static int udma_create_hw_jetty_ctx(struct udma_dev *dev, struct udma_jetty *udma_jetty, struct ubcore_jetty_cfg *cfg) { @@ -808,6 +832,40 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, + enum ubcore_jetty_state attr_state) +{ + switch (jetty_state) { + case UBCORE_JETTY_STATE_RESET: + return attr_state == UBCORE_JETTY_STATE_READY; + case UBCORE_JETTY_STATE_READY: + return attr_state == UBCORE_JETTY_STATE_ERROR || + attr_state == UBCORE_JETTY_STATE_SUSPENDED; + case UBCORE_JETTY_STATE_SUSPENDED: + return attr_state == UBCORE_JETTY_STATE_ERROR; + case UBCORE_JETTY_STATE_ERROR: + return attr_state == UBCORE_JETTY_STATE_RESET; + default: + break; + } + + return false; +} + +enum jetty_state to_jetty_state(enum ubcore_jetty_state state) +{ + switch (state) { + case UBCORE_JETTY_STATE_ERROR: + return JETTY_ERROR; + case UBCORE_JETTY_STATE_SUSPENDED: + return JETTY_SUSPEND; + default: + break; + } + + return STATE_NUM; +} + static int udma_alloc_group_start_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, uint32_t *start_jetty_id) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 8f23621a58f6..013fb8ddd17f 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -219,6 +219,10 @@ static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queu return container_of(queue, struct udma_jetty, sq); } +enum jetty_state to_jetty_state(enum ubcore_jetty_state state); +const char *to_state_name(enum ubcore_jetty_state state); +bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, + enum ubcore_jetty_state attr_state); int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, uint32_t cfg_id, struct ubcore_jetty_group *jetty_grp); struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, @@ -229,9 +233,14 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); -void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); +int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, + enum jetty_state state); + +void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); +void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index cb00cec5ccfd..e770bc5f6a2f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -409,3 +409,80 @@ int udma_destroy_jfs(struct ubcore_jfs *jfs) return 0; } + +static int udma_modify_jfs_state(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs, + struct ubcore_jfs_attr *attr) +{ + int ret; + + switch (attr->state) { + case UBCORE_JETTY_STATE_RESET: + ret = udma_destroy_hw_jetty_ctx(udma_dev, udma_jfs->sq.id); + break; + case UBCORE_JETTY_STATE_READY: + ret = udma_create_hw_jfs_ctx(udma_dev, udma_jfs, &udma_jfs->ubcore_jfs.jfs_cfg); + if (ret) + break; + + udma_reset_sw_k_jetty_queue(&udma_jfs->sq); + break; + default: + ret = udma_close_ue_rx(udma_dev, true, true, false, 0); + if (ret) + break; + + if (!(udma_dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) { + if (udma_modify_jetty_precondition(udma_dev, &udma_jfs->sq)) { + ret = -ENOMEM; + udma_open_ue_rx(udma_dev, true, true, false, 0); + break; + } + } + + ret = udma_set_jetty_state(udma_dev, udma_jfs->sq.id, to_jetty_state(attr->state)); + if (ret) + udma_open_ue_rx(udma_dev, true, true, false, 0); + else + udma_jfs->ue_rx_closed = true; + break; + } + + return ret; +} + +int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + int ret = 0; + + if (!(attr->mask & UBCORE_JFS_STATE)) { + dev_err(udma_dev->dev, "modify jfs mask is error or not set, jfs_id = %u.\n", + udma_jfs->sq.id); + return -EINVAL; + } + + if (udma_jfs->sq.state == attr->state) { + dev_info(udma_dev->dev, "jfs state has been %s.\n", + to_state_name(attr->state)); + return 0; + } + + if (!verify_modify_jetty(udma_jfs->sq.state, attr->state)) { + dev_err(udma_dev->dev, "not support modify jfs state from %s to %s.\n", + to_state_name(udma_jfs->sq.state), to_state_name(attr->state)); + return -EINVAL; + } + + ret = udma_modify_jfs_state(udma_dev, udma_jfs, attr); + if (ret) { + dev_err(udma_dev->dev, "modify jfs %u state to %u failed.\n", + udma_jfs->sq.id, attr->state); + return ret; + } + + udma_jfs->sq.state = attr->state; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index ed1ff16e4573..6cdc281e53c3 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -53,5 +53,7 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfs_cfg *jfs_cfg); void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 2491aea7f9e2..c726a231e4f8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -181,6 +181,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfc = udma_create_jfc, .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, + .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, -- Gitee From 31b59d6cb65baa837802aa0764a3309c59d5f48d Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 20 Aug 2025 20:09:44 +0800 Subject: [PATCH 018/126] ub: udma: Support modify jetty. commit f9d5d20b756c9484513f0c0a0493b560f98d9546 openEuler This patch adds the ability to modify jetty. During the modify jetty process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 77 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 80 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 74f9a30e2be3..914ef33b81d9 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -866,6 +866,83 @@ enum jetty_state to_jetty_state(enum ubcore_jetty_state state) return STATE_NUM; } +static int udma_modify_jetty_state(struct udma_dev *udma_dev, struct udma_jetty *udma_jetty, + struct ubcore_jetty_attr *attr) +{ + int ret; + + switch (attr->state) { + case UBCORE_JETTY_STATE_RESET: + ret = udma_destroy_hw_jetty_ctx(udma_dev, udma_jetty->sq.id); + break; + case UBCORE_JETTY_STATE_READY: + ret = udma_create_hw_jetty_ctx(udma_dev, udma_jetty, + &udma_jetty->ubcore_jetty.jetty_cfg); + if (ret) + break; + + udma_reset_sw_k_jetty_queue(&udma_jetty->sq); + break; + default: + ret = udma_close_ue_rx(udma_dev, true, true, false, 0); + if (ret) + break; + + if (!(udma_dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE)) { + if (udma_modify_jetty_precondition(udma_dev, &udma_jetty->sq)) { + ret = -ENOMEM; + udma_open_ue_rx(udma_dev, true, true, false, 0); + break; + } + } + + ret = udma_set_jetty_state(udma_dev, udma_jetty->sq.id, + to_jetty_state(attr->state)); + if (ret) + udma_open_ue_rx(udma_dev, true, true, false, 0); + else + udma_jetty->ue_rx_closed = true; + break; + } + + return ret; +} + +int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + if (!(attr->mask & UBCORE_JETTY_STATE)) { + dev_err(udma_dev->dev, "modify jetty mask is error or not set, jetty_id = %u.\n", + udma_jetty->sq.id); + return -EINVAL; + } + + if (udma_jetty->sq.state == attr->state) { + dev_info(udma_dev->dev, "jetty state has been %s.\n", to_state_name(attr->state)); + return 0; + } + + if (!verify_modify_jetty(udma_jetty->sq.state, attr->state)) { + dev_err(udma_dev->dev, "not support modify jetty state from %s to %s.\n", + to_state_name(udma_jetty->sq.state), to_state_name(attr->state)); + return -EINVAL; + } + + ret = udma_modify_jetty_state(udma_dev, udma_jetty, attr); + if (ret) { + dev_err(udma_dev->dev, "modify jetty %u state to %s failed.\n", + udma_jetty->sq.id, to_state_name(attr->state)); + return ret; + } + udma_jetty->sq.state = attr->state; + + return 0; +} + static int udma_alloc_group_start_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bitmap_table, uint32_t *start_jetty_id) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 013fb8ddd17f..5b428e999ff1 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -229,6 +229,8 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index c726a231e4f8..93ca98ef248e 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -189,6 +189,7 @@ static struct ubcore_ops g_dev_ops = { .destroy_jfr_batch = udma_destroy_jfr_batch, .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, + .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, .create_jetty_grp = udma_create_jetty_grp, -- Gitee From c7629dccbaa2df6a6c6762cb0d1dc1f5991e5b51 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 09:26:22 +0800 Subject: [PATCH 019/126] ub: udma: Support modify jfr. commit d7b858224867b71d55b7ad700bb86da388ec2d91 openEuler This patch adds the ability to modify jfr. During the modify jfr process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfr.c | 140 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 3 +- 3 files changed, 144 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index f15ca6b26d42..953fcffc5001 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -19,6 +19,14 @@ const char *state_str[] = { "INVALID" }; +static const char *to_state_str(enum ubcore_jfr_state state) +{ + if ((int)state >= (int)JFR_STATE_NUM) + return state_str[JFR_STATE_NUM]; + + return state_str[state]; +} + static int udma_verify_jfr_param(struct udma_dev *dev, struct ubcore_jfr_cfg *cfg) { @@ -650,3 +658,135 @@ int udma_destroy_jfr_batch(struct ubcore_jfr **jfr, int jfr_cnt, int *bad_jfr_in return 0; } + +static bool verify_modify_jfr_state(enum ubcore_jfr_state jfr_state, + enum ubcore_jfr_state attr_state) +{ + switch (jfr_state) { + case UBCORE_JFR_STATE_RESET: + return attr_state == UBCORE_JFR_STATE_READY; + case UBCORE_JFR_STATE_READY: + return attr_state == UBCORE_JFR_STATE_ERROR; + case UBCORE_JFR_STATE_ERROR: + return attr_state == UBCORE_JFR_STATE_RESET; + default: + break; + } + + return false; +} + +static int verify_modify_jfr(struct udma_dev *udma_dev, struct udma_jfr *udma_jfr, + struct ubcore_jfr_attr *attr, bool *state_flag, + bool *rx_threshold_flag) +{ + *rx_threshold_flag = false; + *state_flag = false; + + if (!(attr->mask & (UBCORE_JFR_RX_THRESHOLD | UBCORE_JFR_STATE))) { + dev_err(udma_dev->dev, + "modify jfr mask is error or not set, jfrn = %u.\n", + udma_jfr->rq.id); + return -EINVAL; + } + + if (attr->mask & UBCORE_JFR_RX_THRESHOLD) { + if (attr->rx_threshold >= udma_jfr->wqe_cnt) { + dev_err(udma_dev->dev, + "JFR rx_threshold(%u) must less than wqe num(%u).\n", + attr->rx_threshold, udma_jfr->wqe_cnt); + return -EINVAL; + } + *rx_threshold_flag = true; + } + + if (attr->mask & UBCORE_JFR_STATE) { + if (udma_jfr->state == attr->state) { + dev_info(udma_dev->dev, + "jfr(%u) state has been %s, keep it unchanged.\n", + udma_jfr->rq.id, to_state_str(attr->state)); + return 0; + } else if (!verify_modify_jfr_state(udma_jfr->state, + attr->state)) { + dev_err(udma_dev->dev, + "jfr(%u) not support modify jfr state from %s to %s.\n", + udma_jfr->rq.id, to_state_str(udma_jfr->state), + to_state_str(attr->state)); + return -EINVAL; + } else if ((attr->state == UBCORE_JFR_STATE_RESET || + attr->state == UBCORE_JFR_STATE_ERROR) && + *rx_threshold_flag) { + dev_err(udma_dev->dev, + "jfr(%u) not support set rx threshold when change state to %s.\n", + udma_jfr->rq.id, to_state_str(attr->state)); + return -EINVAL; + } + *state_flag = true; + } + + return 0; +} + +static int udma_destroy_hw_jfr_ctx(struct udma_dev *dev, uint32_t jfr_id) +{ + struct ubase_mbx_attr attr = {}; + int ret; + + attr.tag = jfr_id; + attr.op = UDMA_CMD_DESTROY_JFR_CONTEXT; + ret = post_mailbox_update_ctx(dev, NULL, 0, &attr); + if (ret) + dev_err(dev->dev, + "post mailbox destroy jfr ctx failed, ret = %d.\n", ret); + + return ret; +} + +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); + struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + bool rx_threshold_flag = false; + bool state_flag = false; + int ret = 0; + + ret = verify_modify_jfr(udma_dev, udma_jfr, attr, &state_flag, + &rx_threshold_flag); + if (ret) + return ret; + + if (!(rx_threshold_flag || state_flag)) + return 0; + + if (rx_threshold_flag && !state_flag) { + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, state_flag, + rx_threshold_flag, attr); + } else { + switch (attr->state) { + case UBCORE_JFR_STATE_RESET: + ret = udma_destroy_hw_jfr_ctx(udma_dev, udma_jfr->rq.id); + break; + case UBCORE_JFR_STATE_READY: + ret = udma_hw_init_jfrc(udma_dev, &jfr->jfr_cfg, udma_jfr, + rx_threshold_flag ? + attr->rx_threshold : udma_jfr->rx_threshold); + break; + default: + ret = modify_jfr_context(udma_dev, udma_jfr->rq.id, state_flag, + rx_threshold_flag, attr); + break; + } + } + + if (ret) + return ret; + + if (state_flag) + udma_jfr->state = attr->state; + + if (rx_threshold_flag) + udma_jfr->rx_threshold = attr->rx_threshold; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 43ee96cea746..ae6d0d97f460 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -155,6 +155,8 @@ static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfr, rq); } +int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jfr(struct ubcore_jfr *jfr); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 93ca98ef248e..3a478a468c6f 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -185,9 +185,10 @@ static struct ubcore_ops g_dev_ops = { .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, .create_jfr = udma_create_jfr, + .modify_jfr = udma_modify_jfr, + .query_jfr = udma_query_jfr, .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, - .query_jfr = udma_query_jfr, .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, -- Gitee From 77a59d4c995c3f70c5b20a15fe3bd3eb75152289 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 09:37:37 +0800 Subject: [PATCH 020/126] ub: udma: Support modify jfc. commit ab014f21ec7d348ce728443be3d8af41ab6d8d40 openEuler This patch adds the ability to modify jfc. During the modify jfc process, the driver will post mailbox to notify the hardware to modify. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 109 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 112 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index bfbf479ec06f..5067b3c52104 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -536,3 +536,112 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, return 0; } + +static int udma_get_cqe_period(uint16_t cqe_period) +{ + uint16_t period[] = { + UDMA_CQE_PERIOD_0, + UDMA_CQE_PERIOD_4, + UDMA_CQE_PERIOD_16, + UDMA_CQE_PERIOD_64, + UDMA_CQE_PERIOD_256, + UDMA_CQE_PERIOD_1024, + UDMA_CQE_PERIOD_4096, + UDMA_CQE_PERIOD_16384 + }; + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(period); ++i) { + if (cqe_period == period[i]) + return i; + } + + return -EINVAL; +} + +static int udma_check_jfc_attr(struct udma_dev *udma_dev, struct ubcore_jfc_attr *attr) +{ + if (!(attr->mask & (UBCORE_JFC_MODERATE_COUNT | UBCORE_JFC_MODERATE_PERIOD))) { + dev_err(udma_dev->dev, + "udma modify jfc mask is not set or invalid.\n"); + return -EINVAL; + } + + if ((attr->mask & UBCORE_JFC_MODERATE_COUNT) && + (attr->moderate_count >= UDMA_CQE_COALESCE_CNT_MAX)) { + dev_err(udma_dev->dev, "udma cqe coalesce cnt %u is invalid.\n", + attr->moderate_count); + return -EINVAL; + } + + if ((attr->mask & UBCORE_JFC_MODERATE_PERIOD) && + (udma_get_cqe_period(attr->moderate_period) == -EINVAL)) { + dev_err(udma_dev->dev, "udma cqe coalesce period %u is invalid.\n", + attr->moderate_period); + return -EINVAL; + } + + return 0; +} + +static int udma_modify_jfc_attr(struct udma_dev *dev, uint32_t jfcn, + struct ubcore_jfc_attr *attr) +{ + struct udma_jfc_ctx *jfc_context, *ctx_mask; + struct ubase_mbx_attr mbox_attr = {}; + struct ubase_cmd_mailbox *mailbox; + int ret; + + mailbox = udma_alloc_cmd_mailbox(dev); + if (!mailbox) { + dev_err(dev->dev, "failed to alloc mailbox for modify jfc.\n"); + return -ENOMEM; + } + + jfc_context = &((struct udma_jfc_ctx *)mailbox->buf)[0]; + ctx_mask = &((struct udma_jfc_ctx *)mailbox->buf)[1]; + memset(ctx_mask, 0xff, sizeof(struct udma_jfc_ctx)); + + if (attr->mask & UBCORE_JFC_MODERATE_COUNT) { + jfc_context->cqe_coalesce_cnt = attr->moderate_count; + ctx_mask->cqe_coalesce_cnt = 0; + } + + if (attr->mask & UBCORE_JFC_MODERATE_PERIOD) { + jfc_context->cqe_coalesce_period = + udma_get_cqe_period(attr->moderate_period); + ctx_mask->cqe_coalesce_period = 0; + } + + mbox_attr.tag = jfcn; + mbox_attr.op = UDMA_CMD_MODIFY_JFC_CONTEXT; + ret = udma_post_mbox(dev, mailbox, &mbox_attr); + if (ret) + dev_err(dev->dev, + "failed to send post mbox in modify JFCC, ret = %d.\n", + ret); + + udma_free_cmd_mailbox(dev, mailbox); + + return ret; +} + +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_device = to_udma_dev(ubcore_jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(ubcore_jfc); + int ret; + + ret = udma_check_jfc_attr(udma_device, attr); + if (ret) + return ret; + + ret = udma_modify_jfc_attr(udma_device, udma_jfc->jfcn, attr); + if (ret) + dev_err(udma_device->dev, + "failed to modify JFC, jfcn = %u, ret = %d.\n", + udma_jfc->jfcn, ret); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 21f4016a42cd..29db1243623e 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -142,5 +142,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, int udma_destroy_jfc(struct ubcore_jfc *jfc); int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); +int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 3a478a468c6f..b1fad9e31f38 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -179,6 +179,7 @@ static struct ubcore_ops g_dev_ops = { .import_seg = udma_import_seg, .unimport_seg = udma_unimport_seg, .create_jfc = udma_create_jfc, + .modify_jfc = udma_modify_jfc, .destroy_jfc = udma_destroy_jfc, .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, -- Gitee From fd0c8eabdea48d61baa8374d5681c9942cca6b34 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:43:50 +0800 Subject: [PATCH 021/126] ub: cdma: support kernel resource reclamation commit 82ffc9d85146f7dbf53de48a04c7d56ee9fce95a openEuler This patch implements kernel resource reclamation functionality within the CDMA driver. The implementation includes reclaiming the corresponding context and the resources under that context, such as queues, jfs, ctp, jfc, and segments, after the user-space process has exited. Signed-off-by: Zhipeng Lu Signed-off-by: Bangwei Zhang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_chardev.c | 60 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_chardev.h | 2 ++ drivers/ub/cdma/cdma_context.c | 46 ++++++++++++++++++++++++++ drivers/ub/cdma/cdma_context.h | 1 + drivers/ub/cdma/cdma_event.c | 2 ++ drivers/ub/cdma/cdma_types.h | 6 ++++ 6 files changed, 117 insertions(+) diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 124b5701b253..a1a289eb0e91 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -176,11 +176,61 @@ static int cdma_mmap(struct file *file, struct vm_area_struct *vma) return 0; } +static void cdma_mmu_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct cdma_mn *mn_notifier = container_of(mn, struct cdma_mn, mn); + struct cdma_file *cfile = container_of(mn_notifier, struct cdma_file, mn_notifier); + + if (mn_notifier->mm != mm || mn_notifier->mm == NULL) { + pr_info("mm already released.\n"); + return; + } + mn_notifier->mm = NULL; + + mutex_lock(&cfile->ctx_mutex); + cdma_cleanup_context_uobj(cfile); + if (cfile->uctx) + cdma_cleanup_context_res(cfile->uctx); + cfile->uctx = NULL; + mutex_unlock(&cfile->ctx_mutex); +} + +static const struct mmu_notifier_ops cdma_mm_notifier_ops = { + .release = cdma_mmu_release +}; + +static int cdma_register_mmu(struct cdma_file *file) +{ + struct cdma_mn *mn_notifier = &file->mn_notifier; + int ret; + + mn_notifier->mm = current->mm; + mn_notifier->mn.ops = &cdma_mm_notifier_ops; + ret = mmu_notifier_register(&mn_notifier->mn, current->mm); + if (ret) + mn_notifier->mm = NULL; + + return ret; +} + +static void cdma_unregister_mmu(struct cdma_file *cfile) +{ + struct cdma_mn *mn_notifier = &cfile->mn_notifier; + struct mm_struct *mm = mn_notifier->mm; + + if (!mm) + return; + + cfile->mn_notifier.mm = NULL; + mmu_notifier_unregister(&cfile->mn_notifier.mn, mm); +} + static int cdma_open(struct inode *inode, struct file *file) { struct cdma_chardev *chardev; struct cdma_file *cfile; struct cdma_dev *cdev; + int ret; chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); @@ -189,6 +239,13 @@ static int cdma_open(struct inode *inode, struct file *file) if (!cfile) return -ENOMEM; + ret = cdma_register_mmu(cfile); + if (ret) { + dev_err(cdev->dev, "register mmu failed, ret = %d.\n", ret); + kfree(cfile); + return ret; + } + cdma_init_uobj_idr(cfile); mutex_lock(&cdev->file_mutex); cfile->cdev = cdev; @@ -216,6 +273,8 @@ static int cdma_close(struct inode *inode, struct file *file) mutex_lock(&cfile->ctx_mutex); cdma_cleanup_context_uobj(cfile); + if (cfile->uctx) + cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; mutex_unlock(&cfile->ctx_mutex); @@ -302,6 +361,7 @@ void cdma_release_file(struct kref *ref) { struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + cdma_unregister_mmu(cfile); mutex_destroy(&cfile->ctx_mutex); idr_destroy(&cfile->idr); kfree(cfile); diff --git a/drivers/ub/cdma/cdma_chardev.h b/drivers/ub/cdma/cdma_chardev.h index 5366dd77ea54..0bd4fcc654ff 100644 --- a/drivers/ub/cdma/cdma_chardev.h +++ b/drivers/ub/cdma/cdma_chardev.h @@ -4,6 +4,8 @@ #ifndef __CDMA_CHARDEV_H__ #define __CDMA_CHARDEV_H__ +#include + #define CDMA_TEST_NAME "cdma_dev" #define CDMA_MAX_DEVICES 1 #define CDMA_JETTY_DSQE_OFFSET 0x1000 diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c index e3b3e13d8a4e..c95ccb0c28b4 100644 --- a/drivers/ub/cdma/cdma_context.c +++ b/drivers/ub/cdma/cdma_context.c @@ -6,6 +6,11 @@ #include #include #include "cdma.h" +#include "cdma_queue.h" +#include "cdma_jfc.h" +#include "cdma_jfs.h" +#include "cdma_tp.h" +#include "cdma_segment.h" #include "cdma_context.h" static void cdma_ctx_handle_free(struct cdma_dev *cdev, @@ -133,3 +138,44 @@ void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx) mutex_destroy(&ctx->pgdir_mutex); kfree(ctx); } + +static void cdma_cleanup_queue_res(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + struct cdma_table *queue_tbl = &cdev->queue_table; + struct cdma_queue *queue, *next_queue; + + list_for_each_entry_safe(queue, next_queue, &ctx->queue_list, list) { + list_del(&queue->list); + + if (queue->jfs) + cdma_delete_jfs(cdev, queue->jfs->id); + + if (queue->tp) + cdma_delete_ctp(cdev, queue->tp->tp_id); + + if (queue->jfc) + cdma_delete_jfc(cdev, queue->jfc->id, NULL); + + spin_lock(&queue_tbl->lock); + idr_remove(&queue_tbl->idr_tbl.idr, queue->id); + spin_unlock(&queue_tbl->lock); + kfree(queue); + } +} + +static void cdma_cleanup_segment_res(struct cdma_dev *cdev, struct cdma_context *ctx) +{ + struct cdma_segment *segment, *next_segment; + + list_for_each_entry_safe(segment, next_segment, &ctx->seg_list, list) { + list_del(&segment->list); + cdma_unregister_seg(cdev, segment); + } +} + +void cdma_cleanup_context_res(struct cdma_context *ctx) +{ + cdma_cleanup_queue_res(ctx->cdev, ctx); + cdma_cleanup_segment_res(ctx->cdev, ctx); + cdma_free_context(ctx->cdev, ctx); +} diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 590bffb14cce..47736a281257 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -35,5 +35,6 @@ struct cdma_ctx_res { struct cdma_context *cdma_find_ctx_by_handle(struct cdma_dev *cdev, int handle); struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel); void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx); +void cdma_cleanup_context_res(struct cdma_context *ctx); #endif /* CDMA_CONTEXT_H */ diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index f887c52a0479..f2c51d4833ee 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -409,6 +409,7 @@ struct cdma_jfce *cdma_alloc_jfce(struct cdma_file *cfile) jfce->fd = new_fd; jfce->file = file; jfce->cfile = cfile; + kref_get(&cfile->ref); fd_install(new_fd, file); return jfce; @@ -655,6 +656,7 @@ struct cdma_jfae *cdma_alloc_jfae(struct cdma_file *cfile) jfae->fd = fd; jfae->file = file; jfae->cfile = cfile; + kref_get(&cfile->ref); fd_install(fd, file); return jfae; diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index e4c2f3fd7b52..afd59c2c4731 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -121,6 +121,11 @@ struct cdma_base_jfc { struct cdma_jfc_event jfc_event; }; +struct cdma_mn { + struct mmu_notifier mn; + struct mm_struct *mm; +}; + struct cdma_file { struct cdma_dev *cdev; struct list_head list; @@ -128,6 +133,7 @@ struct cdma_file { struct cdma_context *uctx; struct idr idr; spinlock_t idr_lock; + struct cdma_mn mn_notifier; struct kref ref; }; -- Gitee From e64eb7a92c426eee226df43459e7cfd4536aeebd Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:46:33 +0800 Subject: [PATCH 022/126] ub: cdma: support dma write semantic configuration commit 81b92c3f5cf6162d1fda6853750e6b0031390d80 openEuler This patch implements functionality related to DMA write semantic configuration within the CDMA driver. The implementation includes support for the dma_write interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 3 +- drivers/ub/cdma/cdma_api.c | 62 ++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 3 ++ drivers/ub/cdma/cdma_handle.c | 72 +++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 14 +++++++ drivers/ub/cdma/cdma_jfs.h | 70 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_types.h | 9 +++++ include/ub/cdma/cdma_api.h | 8 ++++ 8 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/cdma/cdma_handle.c create mode 100644 drivers/ub/cdma/cdma_handle.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 714e0542f387..cb3ea219f9e2 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -2,6 +2,7 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ - cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o + cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ + cdma_handle.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 89d01159f797..c3656793abb2 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -11,6 +11,7 @@ #include "cdma_queue.h" #include "cdma_jfc.h" #include "cdma.h" +#include "cdma_handle.h" #include struct dma_device *dma_get_device_list(u32 *num_devices) @@ -437,6 +438,67 @@ void dma_unimport_seg(struct dma_seg *dma_seg) } EXPORT_SYMBOL_GPL(dma_unimport_seg); +static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, + struct cdma_dev **cdev, + struct cdma_queue **cdma_queue) +{ + struct cdma_queue *tmp_q; + struct cdma_dev *tmp_dev; + u32 eid; + + eid = dma_dev->attr.eid.dw0; + tmp_dev = get_cdma_dev_by_eid(eid); + if (!tmp_dev) { + pr_err("get cdma dev failed, eid = 0x%x.\n", eid); + return -EINVAL; + } + + if (tmp_dev->status == CDMA_SUSPEND) { + pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); + return -EINVAL; + } + + tmp_q = cdma_find_queue(tmp_dev, queue_id); + if (!tmp_q) { + dev_err(tmp_dev->dev, "get resource failed.\n"); + return -EINVAL; + } + + if (!tmp_q->tp || !tmp_q->jfs || !tmp_q->jfc) { + dev_err(tmp_dev->dev, "get jetty parameters failed.\n"); + return -EFAULT; + } + + *cdev = tmp_dev; + *cdma_queue = tmp_q; + + return 0; +} + +enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("write input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_write); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index f0370bea2861..3d45f64f5926 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -23,6 +23,9 @@ #define CDMA_RANGE_INDEX_ENTRY_CNT 0x100000 #define CDMA_SEGMENT_ENTRY_CNT 0x10000 +#define CDMA_ENABLE_FLAG 1 +#define CDMA_DISABLE_FLAG 0 + #define CDMA_DB_SIZE 64 #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c new file mode 100644 index 000000000000..aa4274aac27b --- /dev/null +++ b/drivers/ub/cdma/cdma_handle.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include "cdma_jfs.h" +#include "cdma_common.h" +#include "cdma_handle.h" + +static int cdma_rw_check(struct cdma_dev *cdev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg) +{ + if (!rmt_seg->len || !local_seg->len) { + dev_err(cdev->dev, "invalid len.\n"); + return -EINVAL; + } + + if (!rmt_seg->sva || !local_seg->sva) { + dev_err(cdev->dev, "invalid address.\n"); + return -EINVAL; + } + + return 0; +} + +static inline void cdma_fill_comm_wr(struct cdma_jfs_wr *wr, + struct cdma_queue *queue) +{ + wr->flag.bs.complete_enable = CDMA_ENABLE_FLAG; + wr->flag.bs.inline_flag = CDMA_DISABLE_FLAG; + wr->flag.bs.fence = CDMA_ENABLE_FLAG; + wr->tpn = queue->tp->tpn; + wr->rmt_eid = queue->cfg.rmt_eid.dw0; + wr->next = NULL; +} + +static inline void cdma_fill_sge(struct cdma_sge_info *rmt_sge, + struct cdma_sge_info *local_sge, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg) +{ + local_sge->addr = local_seg->sva; + local_sge->len = local_seg->len; + local_sge->seg = local_seg; + + rmt_sge->addr = rmt_seg->sva; + rmt_sge->len = rmt_seg->len; + rmt_sge->seg = rmt_seg; +} + +int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; + struct cdma_sge_info rmt_sge, local_sge; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "write param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.rw.src.num_sge = 1; + wr.rw.src.sge = &local_sge; + wr.rw.dst.num_sge = 1; + wr.rw.dst.sge = &rmt_sge; + + return 0; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h new file mode 100644 index 000000000000..27a3f9495d18 --- /dev/null +++ b/drivers/ub/cdma/cdma_handle.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_HANDLE_H__ +#define __CDMA_HANDLE_H__ + +#include "cdma_segment.h" +#include "cdma_queue.h" +#include "cdma.h" + +int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg); + +#endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index e4dcaa765a89..b34821701511 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -20,6 +20,74 @@ #define CDMA_RCV_SEND_MAX_DIFF 512U +union cdma_jfs_wr_flag { + struct { + /* 0: There is no order with other WR. + * 1: relax order. + * 2: strong order. + * 3: reserve. + */ + u32 place_order : 2; + /* 0: There is no completion order with other WR + * 1: Completion order with previous WR. + */ + u32 comp_order : 1; + /* 0: There is no fence. + * 1: Fence with previous read and atomic WR + */ + u32 fence : 1; + /* 0: not solicited. + * 1: solicited. It will trigger an event + * on remote side + */ + u32 solicited_enable : 1; + /* 0: Do not notify local process + * after the task is complete. + * 1: Notify local process + * after the task is completed. + */ + u32 complete_enable : 1; + /* 0: No inline. + * 1: Inline data. + */ + u32 inline_flag : 1; + u32 reserved : 25; + } bs; + u32 value; +}; + +struct cdma_sge_info { + u64 addr; + u32 len; + struct dma_seg *seg; +}; + +struct cdma_sg { + struct cdma_sge_info *sge; + u32 num_sge; +}; + +struct cdma_rw_wr { + struct cdma_sg src; + struct cdma_sg dst; + u8 target_hint; /* hint of jetty in a target jetty group */ + u64 notify_data; /* notify data or immeditate data in host byte order */ + u64 notify_addr; + u32 notify_tokenid; + u32 notify_tokenvalue; +}; + +struct cdma_jfs_wr { + enum cdma_wr_opcode opcode; + union cdma_jfs_wr_flag flag; + u32 tpn; + u32 rmt_eid; + union { + struct cdma_rw_wr rw; + }; + struct cdma_jfs_wr *next; +}; + struct cdma_jfs { struct cdma_base_jfs base_jfs; struct cdma_dev *dev; @@ -155,5 +223,7 @@ struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg, struct cdma_udata *udata); int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); +int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, + struct cdma_jfs_wr **bad_wr); #endif diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index afd59c2c4731..0b861c891558 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -121,6 +121,15 @@ struct cdma_base_jfc { struct cdma_jfc_event jfc_event; }; +enum cdma_wr_opcode { + CDMA_WR_OPC_WRITE = 0x00, + CDMA_WR_OPC_WRITE_NOTIFY = 0x02, + CDMA_WR_OPC_READ = 0x10, + CDMA_WR_OPC_CAS = 0x20, + CDMA_WR_OPC_FADD = 0x22, + CDMA_WR_OPC_LAST +}; + struct cdma_mn { struct mmu_notifier mn; struct mm_struct *mm; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index ff69c268b569..bc30586a5c4f 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -72,6 +72,11 @@ struct dma_context { u32 tid; /* data valid only in bit 0-19 */ }; +enum dma_status { + DMA_STATUS_OK, + DMA_STATUS_INVAL, +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -96,6 +101,9 @@ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg); void dma_unimport_seg(struct dma_seg *dma_seg); +enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 5847ef31b8c81d8b5431405efe893d36e34083fd Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:53:44 +0800 Subject: [PATCH 023/126] ub: cdma: support dma write semantic delivery commit 43aea2fc121a93eb5c3f95992018763b7860b0c9 openEuler This patch implements the functionality of issuing dma write semantics in the CDMA driver. The implementation includes executing the issuance of semantics after the configuration of semantics in the dma_write interface is completed. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_common.h | 6 + drivers/ub/cdma/cdma_handle.c | 8 +- drivers/ub/cdma/cdma_jfs.c | 301 ++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 49 ++++++ 4 files changed, 363 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 3d45f64f5926..a0bb3758ae3a 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -30,6 +30,12 @@ #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) +/* thanks to include/rdma/ib_verbs.h */ +enum cdma_sq_opcode { + CDMA_OPC_WRITE = 0x3, + CDMA_OPC_INVALID = 0x12, +}; + enum cdma_jfsc_mode { CDMA_JFS_MODE, CDMA_JETTY_MODE, diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index aa4274aac27b..11446f2a8307 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -53,6 +53,8 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, { struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; if (cdma_rw_check(cdev, rmt_seg, local_seg)) { dev_err(cdev->dev, "write param check failed.\n"); @@ -68,5 +70,9 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, wr.rw.dst.num_sge = 1; wr.rw.dst.sge = &rmt_sge; - return 0; + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for write failed, ret = %d.\n", ret); + + return ret; } diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index abc05c44432b..86bc71851731 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -558,3 +558,304 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return 0; } + +static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) +{ + switch (opcode) { + case CDMA_WR_OPC_WRITE: + return CDMA_OPC_WRITE; + default: + return CDMA_OPC_INVALID; + } +} + +static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq) +{ + return tmp_sq->sge_num; +} + +static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, + struct cdma_jfs_wr *wr) +{ + return wr->rw.src.num_sge > sq->max_sge_num; +} + +static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr, + struct cdma_normal_sge *sge) +{ + struct cdma_sge_info *sge_info; + u32 sge_num = 0; + u32 num_sge; + u32 i; + + switch (wr->opcode) { + case CDMA_WR_OPC_WRITE: + sge_info = wr->rw.src.sge; + num_sge = wr->rw.src.num_sge; + break; + default: + return -EINVAL; + } + + for (i = 0; i < num_sge; i++) { + if (sge_info[i].len == 0) + continue; + sge->va = sge_info[i].addr; + sge->length = sge_info[i].len; + sge->token_id = sge_info[i].seg->tid; + sge++; + sge_num++; + } + sqe_ctl->sge_num = sge_num; + + return 0; +} + +static inline u32 cdma_get_ctl_len(u8 opcode) +{ + return SQE_NORMAL_CTL_LEN; +} + +static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + u32 ctrl_len; + + ctrl_len = cdma_get_ctl_len(sqe_ctl->opcode); + sge = (struct cdma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + if (cdma_fill_sw_sge(sqe_ctl, wr, sge)) + return -EINVAL; + + sge_info = wr->rw.dst.sge; + + sqe_ctl->toid = sge_info[0].seg->tid; + sqe_ctl->token_en = sge_info[0].seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info[0].seg->token_value; + sqe_ctl->target_hint = wr->rw.target_hint; + sqe_ctl->rmt_addr_l_or_token_id = + sge_info[0].addr & (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + +static int cdma_fill_normal_sge(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + switch (wr->opcode) { + case CDMA_WR_OPC_WRITE: + return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); + default: + dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", + (u8)wr->opcode); + return -EINVAL; + } +} + +static int cdma_set_sqe(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr, struct cdma_jetty_queue *sq, + u8 opcode) +{ + int ret; + + sqe_ctl->cqe = wr->flag.bs.complete_enable; + sqe_ctl->owner = (sq->pi & sq->buf.entry_cnt) == 0 ? 1 : 0; + sqe_ctl->opcode = opcode; + sqe_ctl->tpn = wr->tpn; + sqe_ctl->place_odr = wr->flag.bs.place_order; + sqe_ctl->fence = wr->flag.bs.fence; + sqe_ctl->comp_order = wr->flag.bs.comp_order; + sqe_ctl->se = wr->flag.bs.solicited_enable; + sqe_ctl->inline_en = 0; + memcpy(sqe_ctl->rmt_eid, &wr->rmt_eid, sizeof(wr->rmt_eid)); + + ret = cdma_fill_normal_sge(cdev, sqe_ctl, wr); + if (ret) + dev_err(cdev->dev, + "cdma fill normal sge failed, wr opcode = %u.\n", + (u8)wr->opcode); + + return ret; +} + +static u32 cdma_cal_wqebb_num(struct cdma_jfs_wr *wr, u8 opcode, + struct cdma_sqe_ctl *tmp_sq) +{ + u32 normal_sge_num; + u32 sqe_ctl_len; + u32 wqebb_cnt; + + sqe_ctl_len = cdma_get_ctl_len(opcode); + + normal_sge_num = cdma_get_normal_sge_num(opcode, tmp_sq); + wqebb_cnt = cdma_sq_cal_wqebb_num(sqe_ctl_len, normal_sge_num); + + return wqebb_cnt; +} + +static inline bool to_check_sq_overflow(struct cdma_jetty_queue *sq, + u32 wqebb_cnt) +{ + return (sq->pi - sq->ci + wqebb_cnt) > sq->buf.entry_cnt; +} + +static int cdma_copy_to_sq(struct cdma_jetty_queue *sq, u32 wqebb_cnt, + struct cdma_jfs_wqebb *tmp_sq) +{ + u32 remain = sq->buf.entry_cnt - (sq->pi & (sq->buf.entry_cnt - 1)); + u32 tail_cnt; + u32 head_cnt; + + if (to_check_sq_overflow(sq, wqebb_cnt)) + return -ENOMEM; + + tail_cnt = remain > wqebb_cnt ? wqebb_cnt : remain; + head_cnt = wqebb_cnt - tail_cnt; + + memcpy(sq->kva_curr, tmp_sq, tail_cnt * sizeof(*tmp_sq)); + if (head_cnt) + memcpy(sq->buf.kva, tmp_sq + tail_cnt, + head_cnt * sizeof(*tmp_sq)); + + return 0; +} + +static void *cdma_k_update_ptr(u32 total_size, u32 wqebb_size, u8 *base_addr, + u8 *curr_addr) +{ + u8 *end_addr; + + end_addr = base_addr + total_size; + curr_addr = ((curr_addr + wqebb_size) < end_addr) ? + (curr_addr + wqebb_size) : + base_addr + (curr_addr + wqebb_size - end_addr); + + return curr_addr; +} + +static int cdma_post_one_wr(struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr, + struct cdma_dev *cdev, + struct cdma_sqe_ctl **dwqe_addr, u8 *dwqe_enable) +{ + struct cdma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = { 0 }; + u32 wqebb_cnt; + u8 opcode; + int ret; + + opcode = cdma_get_jfs_opcode(wr->opcode); + if (opcode == CDMA_OPC_INVALID) { + dev_err(cdev->dev, "cdma invalid opcode = %u.\n", wr->opcode); + return -EINVAL; + } + + if (cdma_k_check_sge_num(opcode, sq, wr)) { + dev_err(cdev->dev, "cdma sge num invalid, opcode = %u.\n", + opcode); + return -EINVAL; + } + + ret = cdma_set_sqe(cdev, (struct cdma_sqe_ctl *)tmp_sq, wr, sq, opcode); + if (ret) + return ret; + + wqebb_cnt = + cdma_cal_wqebb_num(wr, opcode, (struct cdma_sqe_ctl *)tmp_sq); + if (wqebb_cnt == 1 && + !!(cdev->caps.feature & CDMA_CAP_FEATURE_DIRECT_WQE)) + *dwqe_enable = 1; + + ret = cdma_copy_to_sq(sq, wqebb_cnt, tmp_sq); + if (ret) { + dev_err(cdev->dev, "cdma jfs overflow, wqebb_cnt = %u.\n", + wqebb_cnt); + return ret; + } + + *dwqe_addr = sq->kva_curr; + + sq->kva_curr = cdma_k_update_ptr(sq->buf.entry_cnt * sq->buf.entry_size, + wqebb_cnt * sq->buf.entry_size, + (u8 *)sq->buf.kva, (u8 *)sq->kva_curr); + + sq->pi += wqebb_cnt; + + return 0; +} + +static void cdma_write_dsqe(struct cdma_jetty_queue *sq, + struct cdma_sqe_ctl *ctrl) +{ +#define DWQE_SIZE 8 + int i; + + ctrl->sqe_bb_idx = sq->pi; + for (i = 0; i < DWQE_SIZE; i++) + writeq_relaxed(*((u64 *)ctrl + i), (u64 *)sq->dwqe_addr + i); +} + +static inline void cdma_k_update_sq_db(struct cdma_jetty_queue *sq) +{ + u32 *db_addr = (u32 *)sq->db_addr; + *db_addr = sq->pi; +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +static int cdma_post_sq_wr(struct cdma_dev *cdev, struct cdma_jetty_queue *sq, + struct cdma_jfs_wr *wr, struct cdma_jfs_wr **bad_wr) +{ + struct cdma_sqe_ctl *dwqe_addr; + struct cdma_jfs_wr *it; + u8 dwqe_enable = 0; + int wr_cnt = 0; + int ret = 0; + + spin_lock(&sq->lock); + + for (it = wr; it != NULL; it = it->next) { + ret = cdma_post_one_wr(sq, it, cdev, &dwqe_addr, &dwqe_enable); + if (ret) { + dev_err(cdev->dev, "cdma post one wr failed.\n"); + *bad_wr = it; + goto post_wr; + } + wr_cnt++; + } + +post_wr: + if (wr_cnt) { + if (cdev->status != CDMA_SUSPEND) { + /* Ensure the order of write memory operations */ + wmb(); + if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) + cdma_write_dsqe(sq, dwqe_addr); + else + cdma_k_update_sq_db(sq); + } + } + + spin_unlock(&sq->lock); + + return ret; +} + +int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, + struct cdma_jfs_wr **bad_wr) +{ + struct cdma_dev *cdev = jfs->dev; + int ret; + + ret = cdma_post_sq_wr(cdev, &jfs->sq, wr, bad_wr); + if (ret) + dev_err(cdev->dev, + "cdma post jfs wr failed, sq_id = %u.\n", jfs->sq.id); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index b34821701511..98374b278737 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -9,9 +9,13 @@ #include "cdma_segment.h" #define MAX_WQEBB_NUM 4 +#define CDMA_SQE_RMT_EID_SIZE 4 #define CDMA_JFS_WQEBB_SIZE 64 +#define SQE_NORMAL_CTL_LEN 48 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define SQE_CTL_RMA_ADDR_OFFSET 32 +#define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) #define CDMA_TA_TIMEOUT_128MS 128 #define CDMA_TA_TIMEOUT_1000MS 1000 @@ -20,6 +24,45 @@ #define CDMA_RCV_SEND_MAX_DIFF 512U +struct cdma_jfs_wqebb { + u32 value[16]; +}; + +struct cdma_sqe_ctl { + /* DW0 */ + u32 sqe_bb_idx : 16; + u32 place_odr : 2; + u32 comp_order : 1; + u32 fence : 1; + u32 se : 1; + u32 cqe : 1; + u32 inline_en : 1; + u32 rsv : 5; + u32 token_en : 1; + u32 rmt_jetty_type : 2; + u32 owner : 1; + /* DW1 */ + u32 target_hint : 8; + u32 opcode : 8; + u32 rsv1 : 6; + u32 inline_msg_len : 10; + /* DW2 */ + u32 tpn : 24; + u32 sge_num : 8; + /* DW3 */ + u32 toid : 20; + u32 rsv2 : 12; + /* DW4~7 */ + u32 rmt_eid[CDMA_SQE_RMT_EID_SIZE]; + /* DW8 */ + u32 rmt_token_value; + /* DW9~11 */ + u32 rsv3; + u32 rmt_addr_l_or_token_id; + u32 rmt_addr_h_or_token_value; +}; + + union cdma_jfs_wr_flag { struct { /* 0: There is no order with other WR. @@ -62,6 +105,12 @@ struct cdma_sge_info { struct dma_seg *seg; }; +struct cdma_normal_sge { + u32 length; + u32 token_id; + u64 va; +}; + struct cdma_sg { struct cdma_sge_info *sge; u32 num_sge; -- Gitee From 4a7ed4bec6a2106c62fb33cddc89b149ace5e22d Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:56:41 +0800 Subject: [PATCH 024/126] ub: cdma: support dma write with notify semantic commit 26598fa61f1a005665e4dba668fbe5c0844072fa openEuler This patch implements functionality related to the configuration and issuance of DMA write with notify semantics in the CDMA driver. The implementation includes support for the dma_write_with_notify interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 28 +++++++++++++++++++++++++++- drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 11 ++++++++++- drivers/ub/cdma/cdma_handle.h | 3 ++- drivers/ub/cdma/cdma_jfs.c | 28 +++++++++++++++++++++++++++- drivers/ub/cdma/cdma_jfs.h | 11 ++++++++++- include/ub/cdma/cdma_api.h | 10 ++++++++++ 7 files changed, 87 insertions(+), 5 deletions(-) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index c3656793abb2..33f9ce38bb98 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -491,7 +491,7 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, if (ret) return DMA_STATUS_INVAL; - ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg); + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg, NULL); if (ret) return DMA_STATUS_INVAL; @@ -499,6 +499,32 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_write); +enum dma_status dma_write_with_notify(struct dma_device *dma_dev, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_notify_data *data) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg || !data || !data->notify_seg) { + pr_err("write with notify input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_write(cdev, cdma_queue, local_seg, rmt_seg, data); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_write_with_notify); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index a0bb3758ae3a..cd4f48c2dce4 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -33,6 +33,7 @@ /* thanks to include/rdma/ib_verbs.h */ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, + CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index 11446f2a8307..ca6daeb03f4c 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -49,7 +49,8 @@ static inline void cdma_fill_sge(struct cdma_sge_info *rmt_sge, } int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, - struct dma_seg *local_seg, struct dma_seg *rmt_seg) + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_notify_data *data) { struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_WRITE }; struct cdma_sge_info rmt_sge, local_sge; @@ -61,6 +62,14 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, return -EINVAL; } + if (data) { + wr.opcode = CDMA_WR_OPC_WRITE_NOTIFY; + wr.rw.notify_addr = data->notify_seg->sva; + wr.rw.notify_data = data->notify_data; + wr.rw.notify_tokenid = data->notify_seg->tid; + wr.rw.notify_tokenvalue = data->notify_seg->token_value; + } + cdma_fill_comm_wr(&wr, queue); cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 27a3f9495d18..e9394e2c321a 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -9,6 +9,7 @@ #include "cdma.h" int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, - struct dma_seg *local_seg, struct dma_seg *rmt_seg); + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_notify_data *data); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 86bc71851731..e8a1677a1f2e 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -564,6 +564,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) switch (opcode) { case CDMA_WR_OPC_WRITE: return CDMA_OPC_WRITE; + case CDMA_WR_OPC_WRITE_NOTIFY: + return CDMA_OPC_WRITE_WITH_NOTIFY; default: return CDMA_OPC_INVALID; } @@ -577,7 +579,13 @@ static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { - return wr->rw.src.num_sge > sq->max_sge_num; + switch (opcode) { + case CDMA_OPC_WRITE_WITH_NOTIFY: + return wr->rw.src.num_sge > CDMA_JFS_MAX_SGE_NOTIFY || + wr->rw.src.num_sge > sq->max_sge_num; + default: + return wr->rw.src.num_sge > sq->max_sge_num; + } } static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, @@ -591,6 +599,7 @@ static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, switch (wr->opcode) { case CDMA_WR_OPC_WRITE: + case CDMA_WR_OPC_WRITE_NOTIFY: sge_info = wr->rw.src.sge; num_sge = wr->rw.src.num_sge; break; @@ -614,6 +623,9 @@ static int cdma_fill_sw_sge(struct cdma_sqe_ctl *sqe_ctl, static inline u32 cdma_get_ctl_len(u8 opcode) { + if (opcode == CDMA_OPC_WRITE_WITH_NOTIFY) + return SQE_WRITE_NOTIFY_CTL_LEN; + return SQE_NORMAL_CTL_LEN; } @@ -621,6 +633,7 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) { + struct cdma_token_info *token_info; struct cdma_sge_info *sge_info; struct cdma_normal_sge *sge; u32 ctrl_len; @@ -643,6 +656,18 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & (u32)SQE_CTL_RMA_ADDR_BIT; + if (sqe_ctl->opcode == CDMA_OPC_WRITE_WITH_NOTIFY) { + token_info = (struct cdma_token_info *) + ((void *)sqe_ctl + SQE_NOTIFY_TOKEN_ID_FIELD); + token_info->token_id = wr->rw.notify_tokenid; + token_info->token_value = wr->rw.notify_tokenvalue; + + memcpy((void *)sqe_ctl + SQE_NOTIFY_ADDR_FIELD, + &wr->rw.notify_addr, sizeof(u64)); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->rw.notify_data, sizeof(u64)); + } + return 0; } @@ -652,6 +677,7 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, { switch (wr->opcode) { case CDMA_WR_OPC_WRITE: + case CDMA_WR_OPC_WRITE_NOTIFY: return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 98374b278737..b94f8aca2d99 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -12,10 +12,14 @@ #define CDMA_SQE_RMT_EID_SIZE 4 #define CDMA_JFS_WQEBB_SIZE 64 #define SQE_NORMAL_CTL_LEN 48 +#define CDMA_JFS_MAX_SGE_NOTIFY 11 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 #define SQE_CTL_RMA_ADDR_OFFSET 32 #define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) +#define SQE_NOTIFY_TOKEN_ID_FIELD 48 +#define SQE_NOTIFY_ADDR_FIELD 56 +#define SQE_ATOMIC_DATA_FIELD 64 #define CDMA_TA_TIMEOUT_128MS 128 #define CDMA_TA_TIMEOUT_1000MS 1000 @@ -28,6 +32,12 @@ struct cdma_jfs_wqebb { u32 value[16]; }; +struct cdma_token_info { + u32 token_id : 20; + u32 rsv : 12; + u32 token_value; +}; + struct cdma_sqe_ctl { /* DW0 */ u32 sqe_bb_idx : 16; @@ -62,7 +72,6 @@ struct cdma_sqe_ctl { u32 rmt_addr_h_or_token_value; }; - union cdma_jfs_wr_flag { struct { /* 0: There is no order with other WR. diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index bc30586a5c4f..39dff8f6378f 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -77,6 +77,11 @@ enum dma_status { DMA_STATUS_INVAL, }; +struct dma_notify_data { + struct dma_seg *notify_seg; + u64 notify_data; +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -104,6 +109,11 @@ void dma_unimport_seg(struct dma_seg *dma_seg); enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id); +enum dma_status dma_write_with_notify(struct dma_device *dma_dev, + struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_notify_data *data); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 18838a350bc30f108984750a976c13af0fb35668 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Fri, 29 Aug 2025 15:59:42 +0800 Subject: [PATCH 025/126] ub: cdma: support dma read semantic commit 75367599a7c1a46e8705a01b176762bb8715d233 openEuler This patch implements functionality related to the configuration and issuance of DMA read semantics in the CDMA driver. The implementation includes support for the dma_read interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 24 ++++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 29 ++++++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 2 ++ drivers/ub/cdma/cdma_jfs.c | 42 +++++++++++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 3 +++ 6 files changed, 101 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 33f9ce38bb98..c4038ad740c5 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -525,6 +525,30 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, } EXPORT_SYMBOL_GPL(dma_write_with_notify); +enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("read input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_read(cdev, cdma_queue, local_seg, rmt_seg); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_read); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index cd4f48c2dce4..3858756a9e5b 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -34,6 +34,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, + CDMA_OPC_READ = 0x6, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index ca6daeb03f4c..8646e2b08519 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -85,3 +85,32 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_READ }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "read param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.rw.src.num_sge = 1; + wr.rw.src.sge = &rmt_sge; + wr.rw.dst.num_sge = 1; + wr.rw.dst.sge = &local_sge; + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for read failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index e9394e2c321a..aaf7ad61044f 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -11,5 +11,7 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, struct dma_notify_data *data); +int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index e8a1677a1f2e..a505a00361cb 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -566,6 +566,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_WRITE; case CDMA_WR_OPC_WRITE_NOTIFY: return CDMA_OPC_WRITE_WITH_NOTIFY; + case CDMA_WR_OPC_READ: + return CDMA_OPC_READ; default: return CDMA_OPC_INVALID; } @@ -580,6 +582,8 @@ static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { switch (opcode) { + case CDMA_OPC_READ: + return wr->rw.dst.num_sge > sq->max_sge_num; case CDMA_OPC_WRITE_WITH_NOTIFY: return wr->rw.src.num_sge > CDMA_JFS_MAX_SGE_NOTIFY || wr->rw.src.num_sge > sq->max_sge_num; @@ -671,6 +675,42 @@ static int cdma_k_fill_write_sqe(struct cdma_dev *cdev, return 0; } +static int cdma_k_fill_read_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + u32 sge_num = 0; + u32 num; + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge_info = wr->rw.dst.sge; + + for (num = 0; num < wr->rw.dst.num_sge; num++) { + if (!sge_info[num].len) + continue; + sge->va = sge_info[num].addr; + sge->length = sge_info[num].len; + sge->token_id = sge_info[num].seg->tid; + sge++; + sge_num++; + } + + sge_info = wr->rw.src.sge; + sqe_ctl->sge_num = sge_num; + sqe_ctl->toid = sge_info[0].seg->tid; + sqe_ctl->token_en = sge_info[0].seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info[0].seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = + sge_info[0].addr & (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -679,6 +719,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, case CDMA_WR_OPC_WRITE: case CDMA_WR_OPC_WRITE_NOTIFY: return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_READ: + return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 39dff8f6378f..eb425553d6ac 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -114,6 +114,9 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, struct dma_seg *local_seg, int queue_id, struct dma_notify_data *data); +enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From dcde3c3577dce8787de9aabc35a38fb7b555b22e Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 21:43:46 +0800 Subject: [PATCH 026/126] ub: cdma: support dma cas semantic commit 89f3c2b942a6cc9812e123be53644044d201037a openEuler This patch implements functionality related to the configuration and issuance of DMA cas semantics in the CDMA driver. The implementation includes support for the dma_cas interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 25 ++++++++++++ drivers/ub/cdma/cdma_common.h | 5 +++ drivers/ub/cdma/cdma_handle.c | 36 +++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 3 ++ drivers/ub/cdma/cdma_jfs.c | 73 ++++++++++++++++++++++++++++++++++- drivers/ub/cdma/cdma_jfs.h | 23 +++++++++++ include/ub/cdma/cdma_api.h | 9 +++++ 7 files changed, 173 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index c4038ad740c5..e9fec2437cd5 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -549,6 +549,31 @@ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_read); +enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_cas_data *data) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg || !data) { + pr_err("cas input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_cas(cdev, cdma_queue, local_seg, rmt_seg, data); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_cas); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 3858756a9e5b..54d0e3e43af4 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -28,6 +28,10 @@ #define CDMA_DB_SIZE 64 +#define CDMA_ATOMIC_LEN_4 4 +#define CDMA_ATOMIC_LEN_8 8 +#define CDMA_ATOMIC_LEN_16 16 + #define SQE_PLD_TOKEN_ID_MASK GENMASK(19, 0) /* thanks to include/rdma/ib_verbs.h */ @@ -35,6 +39,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE = 0x3, CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_READ = 0x6, + CDMA_OPC_CAS, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index 8646e2b08519..b172383e10e6 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -114,3 +114,39 @@ int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_cas_data *data) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_CAS }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "cas param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.cas.src = &local_sge; + wr.cas.dst = &rmt_sge; + + if (local_sge.len <= CDMA_ATOMIC_LEN_8) { + wr.cas.cmp_data = data->compare_data; + wr.cas.swap_data = data->swap_data; + } else { + wr.cas.cmp_addr = data->compare_data; + wr.cas.swap_addr = data->swap_data; + } + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for cas failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index aaf7ad61044f..8c99a0a0cb32 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -13,5 +13,8 @@ int cdma_write(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_notify_data *data); int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg); +int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, + struct dma_cas_data *data); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index a505a00361cb..79d5074ff4ef 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -568,6 +568,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_WRITE_WITH_NOTIFY; case CDMA_WR_OPC_READ: return CDMA_OPC_READ; + case CDMA_WR_OPC_CAS: + return CDMA_OPC_CAS; default: return CDMA_OPC_INVALID; } @@ -575,13 +577,20 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq) { - return tmp_sq->sge_num; + switch (opcode) { + case CDMA_OPC_CAS: + return CDMA_ATOMIC_SGE_NUM_ATOMIC; + default: + return tmp_sq->sge_num; + } } static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, struct cdma_jfs_wr *wr) { switch (opcode) { + case CDMA_OPC_CAS: + return sq->max_sge_num == 0; case CDMA_OPC_READ: return wr->rw.dst.num_sge > sq->max_sge_num; case CDMA_OPC_WRITE_WITH_NOTIFY: @@ -711,6 +720,66 @@ static int cdma_k_fill_read_sqe(struct cdma_dev *cdev, return 0; } +static bool cdma_check_atomic_len(u32 len, u8 opcode) +{ + switch (len) { + case CDMA_ATOMIC_LEN_4: + case CDMA_ATOMIC_LEN_8: + return true; + case CDMA_ATOMIC_LEN_16: + if (opcode == CDMA_WR_OPC_CAS) + return true; + return false; + default: + return false; + } +} + +static int cdma_k_fill_cas_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + + sge_info = wr->cas.src; + if (!cdma_check_atomic_len(sge_info->len, wr->opcode)) { + dev_err(cdev->dev, "cdma cas sge len invalid, len = %u.\n", + sge_info->len); + return -EINVAL; + } + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge->va = sge_info->addr; + sge->length = sge_info->len; + sge->token_id = sge_info->seg->tid; + + sge_info = wr->cas.dst; + sqe_ctl->sge_num = CDMA_ATOMIC_SGE_NUM; + sqe_ctl->toid = sge_info->seg->tid; + sqe_ctl->token_en = sge_info->seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info->seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & + (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info->addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= CDMA_ATOMIC_LEN_8) { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->cas.swap_data, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + &wr->cas.cmp_data, sge->length); + } else { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (char *)wr->cas.swap_addr, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + (char *)wr->cas.cmp_addr, sge->length); + } + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -721,6 +790,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, return cdma_k_fill_write_sqe(cdev, sqe_ctl, wr); case CDMA_WR_OPC_READ: return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_CAS: + return cdma_k_fill_cas_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index b94f8aca2d99..8637c2a80074 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -15,6 +15,8 @@ #define CDMA_JFS_MAX_SGE_NOTIFY 11 #define CDMA_JFS_SGE_SIZE 16 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define CDMA_ATOMIC_SGE_NUM 1 +#define CDMA_ATOMIC_SGE_NUM_ATOMIC 2 #define SQE_CTL_RMA_ADDR_OFFSET 32 #define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) #define SQE_NOTIFY_TOKEN_ID_FIELD 48 @@ -135,6 +137,26 @@ struct cdma_rw_wr { u32 notify_tokenvalue; }; +struct cdma_cas_wr { + struct cdma_sge_info *dst; /* len in the sge is the length of CAS + * operation, only support 8/16/32B + */ + struct cdma_sge_info *src; /* local address for destination original + * value written back + */ + union { + u64 cmp_data; /* when the len is 8B, it indicates the compare value. */ + u64 cmp_addr; /* when the len is 16/32B, it indicates the data address. */ + }; + union { + /* if destination value is the same as cmp_data, + * destination value will be change to swap_data. + */ + u64 swap_data; + u64 swap_addr; + }; +}; + struct cdma_jfs_wr { enum cdma_wr_opcode opcode; union cdma_jfs_wr_flag flag; @@ -142,6 +164,7 @@ struct cdma_jfs_wr { u32 rmt_eid; union { struct cdma_rw_wr rw; + struct cdma_cas_wr cas; }; struct cdma_jfs_wr *next; }; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index eb425553d6ac..8a70bbb4d49c 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -77,6 +77,11 @@ enum dma_status { DMA_STATUS_INVAL, }; +struct dma_cas_data { + u64 compare_data; + u64 swap_data; +}; + struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; @@ -117,6 +122,10 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id); +enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, + struct dma_cas_data *data); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From 1e49396420e3eea51e5e8a3cd78bbea6b2ca2b05 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 25 Aug 2025 21:48:38 +0800 Subject: [PATCH 027/126] ub: cdma: support dma faa semantic commit 6022413537d64ed9c15cb308617ddb27f6e128ce openEuler This patch implements functionality related to the configuration and issuance of DMA faa semantics in the CDMA driver. The implementation includes support for the dma_faa interface and the process of configuring and issuing semantics within this interface. Signed-off-by: Zhipeng Lu Signed-off-by: Jinjie Cui Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_api.c | 24 ++++++++++++++++++ drivers/ub/cdma/cdma_common.h | 1 + drivers/ub/cdma/cdma_handle.c | 28 +++++++++++++++++++++ drivers/ub/cdma/cdma_handle.h | 2 ++ drivers/ub/cdma/cdma_jfs.c | 46 +++++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_jfs.h | 14 +++++++++++ include/ub/cdma/cdma_api.h | 3 +++ 7 files changed, 118 insertions(+) diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index e9fec2437cd5..8a30d20a1a09 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -574,6 +574,30 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_cas); +enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, u64 add) +{ + struct cdma_queue *cdma_queue = NULL; + struct cdma_dev *cdev = NULL; + int ret; + + if (!dma_dev || !rmt_seg || !local_seg) { + pr_err("faa input parameters error.\n"); + return DMA_STATUS_INVAL; + } + + ret = cdma_param_transfer(dma_dev, queue_id, &cdev, &cdma_queue); + if (ret) + return DMA_STATUS_INVAL; + + ret = cdma_faa(cdev, cdma_queue, local_seg, rmt_seg, add); + if (ret) + return DMA_STATUS_INVAL; + + return DMA_STATUS_OK; +} +EXPORT_SYMBOL_GPL(dma_faa); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 54d0e3e43af4..58855991647d 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -40,6 +40,7 @@ enum cdma_sq_opcode { CDMA_OPC_WRITE_WITH_NOTIFY = 0x5, CDMA_OPC_READ = 0x6, CDMA_OPC_CAS, + CDMA_OPC_FAA = 0xb, CDMA_OPC_INVALID = 0x12, }; diff --git a/drivers/ub/cdma/cdma_handle.c b/drivers/ub/cdma/cdma_handle.c index b172383e10e6..183f802cfdbf 100644 --- a/drivers/ub/cdma/cdma_handle.c +++ b/drivers/ub/cdma/cdma_handle.c @@ -150,3 +150,31 @@ int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, return ret; } + +int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add) +{ + struct cdma_jfs_wr wr = { .opcode = CDMA_WR_OPC_FADD }; + struct cdma_sge_info rmt_sge, local_sge; + struct cdma_jfs_wr *bad_wr = NULL; + int ret; + + if (cdma_rw_check(cdev, rmt_seg, local_seg)) { + dev_err(cdev->dev, "faa param check failed.\n"); + return -EINVAL; + } + + cdma_fill_comm_wr(&wr, queue); + + cdma_fill_sge(&rmt_sge, &local_sge, rmt_seg, local_seg); + + wr.faa.src = &local_sge; + wr.faa.dst = &rmt_sge; + wr.faa.operand = add; + + ret = cdma_post_jfs_wr((struct cdma_jfs *)queue->jfs, &wr, &bad_wr); + if (ret) + dev_err(cdev->dev, "post jfs for faa failed, ret = %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 8c99a0a0cb32..00cb8049778e 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -16,5 +16,7 @@ int cdma_read(struct cdma_dev *cdev, struct cdma_queue *queue, int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, struct dma_cas_data *data); +int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, + struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add); #endif /* CDMA_HANDLE_H */ diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 79d5074ff4ef..cbb47a7f56db 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -570,6 +570,8 @@ static u8 cdma_get_jfs_opcode(enum cdma_wr_opcode opcode) return CDMA_OPC_READ; case CDMA_WR_OPC_CAS: return CDMA_OPC_CAS; + case CDMA_WR_OPC_FADD: + return CDMA_OPC_FAA; default: return CDMA_OPC_INVALID; } @@ -579,6 +581,7 @@ static inline u32 cdma_get_normal_sge_num(u8 opcode, struct cdma_sqe_ctl *tmp_sq { switch (opcode) { case CDMA_OPC_CAS: + case CDMA_OPC_FAA: return CDMA_ATOMIC_SGE_NUM_ATOMIC; default: return tmp_sq->sge_num; @@ -590,6 +593,7 @@ static bool cdma_k_check_sge_num(u8 opcode, struct cdma_jetty_queue *sq, { switch (opcode) { case CDMA_OPC_CAS: + case CDMA_OPC_FAA: return sq->max_sge_num == 0; case CDMA_OPC_READ: return wr->rw.dst.num_sge > sq->max_sge_num; @@ -780,6 +784,46 @@ static int cdma_k_fill_cas_sqe(struct cdma_dev *cdev, return 0; } +static int cdma_k_fill_faa_sqe(struct cdma_dev *cdev, + struct cdma_sqe_ctl *sqe_ctl, + struct cdma_jfs_wr *wr) +{ + struct cdma_sge_info *sge_info; + struct cdma_normal_sge *sge; + + sge_info = wr->faa.src; + if (!cdma_check_atomic_len(sge_info->len, wr->opcode)) { + dev_err(cdev->dev, "cdma faa sge len invalid, len = %u.\n", + sge_info->len); + return -EINVAL; + } + + sge = (struct cdma_normal_sge *)(sqe_ctl + 1); + sge->va = sge_info->addr; + sge->length = sge_info->len; + sge->token_id = sge_info->seg->tid; + + sge_info = wr->faa.dst; + sqe_ctl->sge_num = CDMA_ATOMIC_SGE_NUM; + sqe_ctl->toid = sge_info->seg->tid; + sqe_ctl->token_en = sge_info->seg->token_value_valid; + sqe_ctl->rmt_token_value = sge_info->seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & + (u32)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info->addr >> (u32)SQE_CTL_RMA_ADDR_OFFSET) & + (u32)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= CDMA_ATOMIC_LEN_8) + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->faa.operand, sge->length); + else + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)wr->faa.operand_addr, sge->length); + + return 0; +} + static int cdma_fill_normal_sge(struct cdma_dev *cdev, struct cdma_sqe_ctl *sqe_ctl, struct cdma_jfs_wr *wr) @@ -792,6 +836,8 @@ static int cdma_fill_normal_sge(struct cdma_dev *cdev, return cdma_k_fill_read_sqe(cdev, sqe_ctl, wr); case CDMA_WR_OPC_CAS: return cdma_k_fill_cas_sqe(cdev, sqe_ctl, wr); + case CDMA_WR_OPC_FADD: + return cdma_k_fill_faa_sqe(cdev, sqe_ctl, wr); default: dev_err(cdev->dev, "cdma wr opcode invalid, opcode = %u.\n", (u8)wr->opcode); diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 8637c2a80074..fe46955c925b 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -157,6 +157,19 @@ struct cdma_cas_wr { }; }; +struct cdma_faa_wr { + struct cdma_sge_info *dst; /* len in the sge is the length of FAA + * operation, only support 4/8B + */ + struct cdma_sge_info *src; /* local address for destination original + * value written back + */ + union { + u64 operand; /* Addend */ + u64 operand_addr; + }; +}; + struct cdma_jfs_wr { enum cdma_wr_opcode opcode; union cdma_jfs_wr_flag flag; @@ -165,6 +178,7 @@ struct cdma_jfs_wr { union { struct cdma_rw_wr rw; struct cdma_cas_wr cas; + struct cdma_faa_wr faa; }; struct cdma_jfs_wr *next; }; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 8a70bbb4d49c..6809ba074c05 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -126,6 +126,9 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, struct dma_cas_data *data); +enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, + struct dma_seg *local_seg, int queue_id, u64 add); + int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); -- Gitee From eb4f828255b214c74f76002589a395856b295236 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 26 Aug 2025 09:32:48 +0800 Subject: [PATCH 028/126] ub: cdma: support debugfs interface commit 45ae057d001f39e6e83556d75f18da5aea2b0186 openEuler This patch implements functionality related to debugfs in the CDMA driver. The implementation includes the registration of debugfs and file registration, allowing users to view DFX information of devices and resources by reading the corresponding files. Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 2 + drivers/ub/cdma/cdma_debugfs.c | 783 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_debugfs.h | 58 +++ drivers/ub/cdma/cdma_jfs.h | 5 + drivers/ub/cdma/cdma_main.c | 7 + drivers/ub/cdma/cdma_queue.h | 3 +- 7 files changed, 858 insertions(+), 2 deletions(-) create mode 100644 drivers/ub/cdma/cdma_debugfs.c create mode 100644 drivers/ub/cdma/cdma_debugfs.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index cb3ea219f9e2..2ce4eefa2d84 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -3,6 +3,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ - cdma_handle.o + cdma_handle.o cdma_debugfs.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index 8ed8fdb4d6fa..e782b0229943 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -9,6 +9,7 @@ #include #include #include +#include "cdma_debugfs.h" #include extern u32 jfc_arm_mode; @@ -163,6 +164,7 @@ struct cdma_dev { struct auxiliary_device *adev; struct cdma_chardev chardev; struct cdma_caps caps; + struct cdma_dbgfs cdbgfs; u32 eid; u32 upi; diff --git a/drivers/ub/cdma/cdma_debugfs.c b/drivers/ub/cdma/cdma_debugfs.c new file mode 100644 index 000000000000..d0de451e92ca --- /dev/null +++ b/drivers/ub/cdma/cdma_debugfs.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define dev_fmt(fmt) "CDMA: " fmt + +#include +#include +#include +#include +#include +#include "cdma_queue.h" +#include "cdma.h" +#include "cdma_jfc.h" +#include "cdma_jfs.h" +#include "cdma_mbox.h" +#include "cdma_cmd.h" +#include "cdma_debugfs.h" + +#define CDMA_DBG_READ_LEN 65536 +#define BUF_10_BASE 10 +#define BUF_SIZE 8 + +/* ctx debugfs start */ +static void cdma_get_ctx_info(struct cdma_dev *cdev, + struct cdma_queue *queue, + enum cdma_dbg_ctx_type ctx_type, + struct cdma_ctx_info *ctx_info) +{ + struct auxiliary_device *adev = cdev->adev; + +#define CDMA_DBG_CTX_SIZE_256 256 +#define UBASE_CTX_SIZE_128 128 + switch (ctx_type) { + case CDMA_DBG_JFS_CTX: + ctx_info->start_idx = queue->jfs_id; + ctx_info->ctx_size = CDMA_DBG_CTX_SIZE_256; + ctx_info->op = UBASE_MB_QUERY_JFS_CONTEXT; + ctx_info->ctx_name = "jfs"; + break; + case CDMA_DBG_SQ_JFC_CTX: + ctx_info->start_idx = queue->jfc_id; + ctx_info->ctx_size = UBASE_CTX_SIZE_128; + ctx_info->op = UBASE_MB_QUERY_JFC_CONTEXT; + ctx_info->ctx_name = "sq_jfc"; + break; + default: + dev_err(&adev->dev, "get ctx info failed, ctx_type = %d.\n", + ctx_type); + break; + } +} + +static void cdma_print_ctx_hw_bytype(struct seq_file *s, + enum cdma_dbg_ctx_type ctx_type, + struct cdma_ctx_info *ctx_info, + struct ubase_cmd_mailbox *mailbox) +{ + struct cdma_jfs_ctx *jfs_ctx; + struct cdma_jfc_ctx *jfc_ctx; + + seq_printf(s, "offset\t%s%u\n", ctx_info->ctx_name, ctx_info->start_idx); + + if (ctx_type == CDMA_DBG_JFS_CTX) { + jfs_ctx = (struct cdma_jfs_ctx *)mailbox->buf; + jfs_ctx->sqe_base_addr_l = 0; + jfs_ctx->sqe_base_addr_h = 0; + jfs_ctx->user_data_l = 0; + jfs_ctx->user_data_h = 0; + ubase_print_context_hw(s, jfs_ctx, ctx_info->ctx_size); + } else if (ctx_type == CDMA_DBG_SQ_JFC_CTX) { + jfc_ctx = (struct cdma_jfc_ctx *)mailbox->buf; + jfc_ctx->cqe_va_l = 0; + jfc_ctx->cqe_va_h = 0; + jfc_ctx->cqe_token_value = 0; + jfc_ctx->record_db_addr_l = 0; + jfc_ctx->record_db_addr_h = 0; + jfc_ctx->remote_token_value = 0; + ubase_print_context_hw(s, jfc_ctx, ctx_info->ctx_size); + } + + seq_puts(s, "\n"); +} + +static int cdma_dbg_dump_ctx_hw(struct seq_file *s, enum cdma_dbg_ctx_type ctx_type) +{ + struct cdma_dev *cdev = dev_get_drvdata(s->private); + struct auxiliary_device *adev = cdev->adev; + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_ctx_info ctx_info = { 0 }; + struct ubase_cmd_mailbox *mailbox; + struct ubase_mbx_attr attr; + struct cdma_queue *queue; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&adev->dev, "find queue[%u] for dump context hw failed.\n", queue_id); + return -EINVAL; + } + + if (!queue->jfs_id) { + spin_unlock(&cdev->queue_table.lock); + dev_warn(&adev->dev, "queue resource is not initialized.\n"); + return -EINVAL; + } + + cdma_get_ctx_info(cdev, queue, ctx_type, &ctx_info); + spin_unlock(&cdev->queue_table.lock); + + cdma_fill_mbx_attr(&attr, ctx_info.start_idx, ctx_info.op, 0); + mailbox = cdma_mailbox_query_ctx(cdev, &attr); + if (!mailbox) { + dev_err(&adev->dev, "cdma dbg post query %s ctx mbx failed.\n", + ctx_info.ctx_name); + return -ENOMEM; + } + + cdma_print_ctx_hw_bytype(s, ctx_type, &ctx_info, mailbox); + + cdma_free_cmd_mailbox(cdev, mailbox); + + return 0; +} + +static int cdma_dbg_dump_jfs_ctx_hw(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx_hw(s, CDMA_DBG_JFS_CTX); +} + +static int cdma_dbg_dump_sq_jfc_ctx_hw(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx_hw(s, CDMA_DBG_SQ_JFC_CTX); +} + +static void cdma_get_jfs_cfg(struct cdma_queue *queue, struct seq_file *s) +{ + struct cdma_jfs_cfg *cfg; + + if (!queue->jfs) + return; + + cfg = &queue->jfs->cfg; + seq_printf(s, "%-13u", cfg->depth); + seq_printf(s, "%-12u", cfg->flag.value); + seq_printf(s, "%-17u", cfg->eid_index); + seq_printf(s, "%-10u", cfg->priority); + seq_printf(s, "%-9u", cfg->max_sge); + seq_printf(s, "%-10u", cfg->max_rsge); + seq_printf(s, "%-11u", cfg->rnr_retry); + seq_printf(s, "%-13u", cfg->err_timeout); + seq_printf(s, "%-14u", cfg->jfc_id); + seq_printf(s, "%-15u", cfg->sqe_pos); + seq_printf(s, "%-11u", cfg->tpn); + seq_printf(s, "%-15u", cfg->pld_pos); + seq_printf(s, "%-16u", cfg->queue_id); +} + +static void cdma_get_jfc_cfg(struct cdma_queue *queue, struct seq_file *s) +{ + struct cdma_jfc_cfg *cfg; + + if (!queue->jfc) + return; + + cfg = &queue->jfc->jfc_cfg; + seq_printf(s, "%-13u", cfg->depth); + seq_printf(s, "%-12u", cfg->ceqn); + seq_printf(s, "%-16u", cfg->queue_id); +} + +static void cdma_get_jfs_title(struct seq_file *s) +{ + seq_puts(s, "depth "); + seq_puts(s, "flag "); + seq_puts(s, "eid_index "); + seq_puts(s, "priority "); + seq_puts(s, "max_sge "); + seq_puts(s, "max_rsge "); + seq_puts(s, "rnr_retry "); + seq_puts(s, "err_timeout "); + seq_puts(s, "jfc_id "); + seq_puts(s, "sqe_pos "); + seq_puts(s, "tpn "); + seq_puts(s, "pld_pos "); + seq_puts(s, "queue_id "); + seq_puts(s, "\n"); +} + +static void cdma_get_jfc_title(struct seq_file *s) +{ + seq_puts(s, "depth "); + seq_puts(s, "flag "); + seq_puts(s, "ceqn "); + seq_puts(s, "queue_id "); + seq_puts(s, "\n"); +} + +static int cdma_dbg_dump_ctx(struct seq_file *s, enum cdma_dbg_ctx_type ctx_type) +{ + struct cdma_dbg_context { + void (*get_title)(struct seq_file *s); + void (*get_cfg)(struct cdma_queue *queue, struct seq_file *s); + } dbg_ctx[] = { + {cdma_get_jfs_title, cdma_get_jfs_cfg}, + {cdma_get_jfc_title, cdma_get_jfc_cfg}, + }; + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_queue *queue; + + dbg_ctx[ctx_type].get_title(s); + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump context failed.\n", queue_id); + return -EINVAL; + } + + dbg_ctx[ctx_type].get_cfg(queue, s); + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +int cdma_dbg_dump_jfs_ctx(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx(s, CDMA_DBG_JFS_CTX); +} + +int cdma_dbg_dump_sq_jfc_ctx(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + return cdma_dbg_dump_ctx(s, CDMA_DBG_SQ_JFC_CTX); +} +/* ctx debugfs end */ + +/* resource debugfs start */ +static int cdma_dbg_dump_dev_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u8 eu_num = cdev->base.attr.eu_num; + u32 seid_idx, seid, upi, i; + + seq_printf(s, "EU_ENTRY_NUM: %u\n", eu_num); + for (i = 0; i < eu_num; i++) { + seid_idx = cdev->base.attr.eus[i].eid_idx; + seid = cdev->base.attr.eus[i].eid.dw0; + upi = cdev->base.attr.eus[i].upi; + seq_printf(s, "SEID_IDX: %u, SEID: %u, UPI: %u\n", seid_idx, seid, upi); + } + + return 0; +} + +static int cdma_dbg_dump_cap_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + struct cdma_caps *caps = &cdev->caps; + + seq_printf(s, "MAX_JFC: %u\n", caps->jfc.max_cnt); + seq_printf(s, "MAX_JFS: %u\n", caps->jfs.max_cnt); + seq_printf(s, "MAX_JFC_DEPTH: %u\n", caps->jfc.depth); + seq_printf(s, "MAX_JFS_DEPTH: %u\n", caps->jfs.depth); + seq_printf(s, "MAX_JFS_SGE: %u\n", caps->jfs_sge); + seq_printf(s, "MAX_JFS_RSGE: %u\n", caps->jfs_rsge); + seq_printf(s, "MAX_MSG_SIZE: %u\n", caps->max_msg_len); + seq_printf(s, "TRANS_MODE: %u\n", caps->trans_mode); + seq_printf(s, "CEQ_CNT: %u\n", caps->comp_vector_cnt); + + return 0; +} + +static int cdma_dbg_dump_queue_info(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + struct cdma_queue *queue; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump queue info failed.\n", queue_id); + return -EINVAL; + } + + seq_printf(s, "QUEUE_DEPTH: %u\n", queue->cfg.queue_depth); + seq_printf(s, "DST CNA: 0x%x\n", queue->cfg.dcna); + seq_printf(s, "RMT EID: 0x%x\n", queue->cfg.rmt_eid.dw0); + seq_printf(s, "PRIORITY: %u\n", queue->cfg.priority); + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} +/* resource debugfs end */ + +/* entry info start */ +static void cdma_dbg_dump_sqe_info(struct cdma_sqe_ctl *sqe_ctl, struct seq_file *s) +{ + seq_printf(s, "sqe bb idx: %u\n", sqe_ctl->sqe_bb_idx); + seq_printf(s, "place odr: %u\n", sqe_ctl->place_odr); + seq_printf(s, "comp order: %u\n", sqe_ctl->comp_order); + seq_printf(s, "fence: %u\n", sqe_ctl->fence); + seq_printf(s, "se: %u\n", sqe_ctl->se); + seq_printf(s, "cqe: %u\n", sqe_ctl->cqe); + seq_printf(s, "owner: %u\n", sqe_ctl->owner); + seq_printf(s, "opcode: %u\n", sqe_ctl->opcode); + seq_printf(s, "tpn: %u\n", sqe_ctl->tpn); + seq_printf(s, "sge num: %u\n", sqe_ctl->sge_num); + seq_printf(s, "rmt eid: %u\n", sqe_ctl->rmt_eid[0]); +} + +static void cdma_dbg_dump_cqe_info(struct cdma_jfc_cqe *cqe, struct seq_file *s) +{ + seq_printf(s, "sr: %u\n", cqe->s_r); + seq_printf(s, "owner: %u\n", cqe->owner); + seq_printf(s, "opcode: %u\n", cqe->opcode); + seq_printf(s, "fd: %u\n", cqe->fd); + seq_printf(s, "substatus: %u\n", cqe->substatus); + seq_printf(s, "status: %u\n", cqe->status); + seq_printf(s, "entry idx: %u\n", cqe->entry_idx); + seq_printf(s, "tpn: %u\n", cqe->tpn); + seq_printf(s, "rmt eid: %u\n", cqe->rmt_eid[0]); + seq_printf(s, "byte cnt: %u\n", cqe->byte_cnt); +} + +static void cdma_dbg_dum_eu(struct cdma_dev *cdev, int i, struct seq_file *s) +{ + struct eu_info *eu = &cdev->base.attr.eus[i]; + + seq_printf(s, "%d: ", i); + seq_printf(s, "idx[0x%x] ", eu->eid_idx); + seq_printf(s, "eid[0x%x] ", eu->eid.dw0); + seq_printf(s, "upi[0x%x]\n", eu->upi); +} + +static int cdma_dbg_dump_sqe(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + u32 entry_pi = cdev->cdbgfs.cfg.entry_pi; + struct cdma_sqe_ctl *sqe_ctl; + struct cdma_queue *queue; + struct cdma_jfs *jfs; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump sqe failed.\n", queue_id); + return -EINVAL; + } + + if (queue->jfs && queue->is_kernel) { + jfs = to_cdma_jfs(queue->jfs); + if (entry_pi >= jfs->base_jfs.cfg.depth) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "pi [%u] overflow for dump sqe.\n", entry_pi); + return -EINVAL; + } + + spin_lock(&jfs->sq.lock); + sqe_ctl = (struct cdma_sqe_ctl *)(jfs->sq.buf.kva + + (entry_pi & (jfs->sq.buf.entry_cnt - 1)) * + jfs->sq.buf.entry_size); + cdma_dbg_dump_sqe_info(sqe_ctl, s); + spin_unlock(&jfs->sq.lock); + } else { + dev_warn(&cdev->adev->dev, "not support queue[%u] for dump sqe.\n", queue_id); + } + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +static int cdma_dbg_dump_cqe(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + u32 queue_id = cdev->cdbgfs.cfg.queue_id; + u32 entry_ci = cdev->cdbgfs.cfg.entry_ci; + struct cdma_queue *queue; + struct cdma_jfc_cqe *cqe; + struct cdma_jfc *jfc; + + spin_lock(&cdev->queue_table.lock); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); + if (!queue) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "find queue[%u] for dump cqe failed.\n", queue_id); + return -EINVAL; + } + + if (queue->jfc && queue->is_kernel) { + jfc = to_cdma_jfc(queue->jfc); + if (entry_ci >= jfc->base.jfc_cfg.depth) { + spin_unlock(&cdev->queue_table.lock); + dev_err(&cdev->adev->dev, "ci [%u] overflow for dump cqe.\n", entry_ci); + return -EINVAL; + } + + spin_lock(&jfc->lock); + cqe = (struct cdma_jfc_cqe *)(jfc->buf.kva + + (entry_ci & (jfc->buf.entry_cnt - 1)) * + jfc->buf.entry_size); + cdma_dbg_dump_cqe_info(cqe, s); + spin_unlock(&jfc->lock); + } else { + dev_warn(&cdev->adev->dev, "not support queue[%u] for dump cqe.\n", queue_id); + } + + spin_unlock(&cdev->queue_table.lock); + + return 0; +} + +/* Dump eu info */ +static int cdma_dbg_dump_eu(struct seq_file *s, void *data) +{ + if (!s || !s->private) + return -EINVAL; + + struct cdma_dev *cdev = dev_get_drvdata(s->private); + int ret, i; + + ret = cdma_ctrlq_query_eu(cdev); + if (ret) + return ret; + + for (i = 0; i < cdev->base.attr.eu_num; i++) + cdma_dbg_dum_eu(cdev, i, s); + + return 0; +} +/* entry info end */ + +static bool cdma_dbg_dentry_support(struct device *dev, u32 property) +{ + struct cdma_dev *cdev = dev_get_drvdata(dev); + + return ubase_dbg_dentry_support(cdev->adev, property); +} + +static struct ubase_dbg_dentry_info cdma_dbg_dentry[] = { + { + .name = "context", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, { + .name = "resource_info", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, { + .name = "entry_info", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, + /* keep "cdma" at the bottom and add new directory above */ + { + .name = "cdma", + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + }, +}; + +static struct ubase_dbg_cmd_info cdma_dbg_cmd[] = { + { + .name = "jfs_context", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_jfs_ctx, + }, { + .name = "sq_jfc_context", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sq_jfc_ctx, + }, { + .name = "jfs_context_hw", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_jfs_ctx_hw, + }, { + .name = "sq_jfc_context_hw", + .dentry_index = CDMA_DBG_DENTRY_CONTEXT, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sq_jfc_ctx_hw, + }, { + .name = "dev_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_dev_info, + }, { + .name = "cap_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_cap_info, + }, { + .name = "queue_info", + .dentry_index = CDMA_DBG_DENTRY_RES_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_queue_info, + }, { + .name = "sqe", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_sqe, + }, { + .name = "cqe", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_cqe, + }, { + .name = "eu", + .dentry_index = CDMA_DBG_DENTRY_ENTRY_INFO, + .property = UBASE_SUP_CDMA | UBASE_SUP_UBL, + .support = cdma_dbg_dentry_support, + .init = ubase_dbg_seq_file_init, + .read_func = cdma_dbg_dump_eu, + }, +}; + +static ssize_t cdma_dbgfs_cfg_write_val(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, + enum cdma_dbgfs_cfg_type type) +{ + struct cdma_dbgfs_cfg *cfg = (struct cdma_dbgfs_cfg *)filp->private_data; + char buf[BUF_SIZE] = { 0 }; + ssize_t len, ret; + u32 value; + + len = simple_write_to_buffer(buf, BUF_SIZE - 1, ppos, buffer, count); + if (len < 0) + return len; + + ret = kstrtouint(buf, BUF_10_BASE, &value); + if (ret) + return ret; + + switch (type) { + case CDMA_QUEUE_ID: + cfg->queue_id = value; + break; + case CDMA_ENTRY_PI: + cfg->entry_pi = value; + break; + case CDMA_ENTRY_CI: + cfg->entry_ci = value; + break; + default: + return -EINVAL; + } + + return len; +} + +static ssize_t cdma_dbgfs_cfg_read_val(struct file *filp, + char *buffer, size_t count, loff_t *ppos, + enum cdma_dbgfs_cfg_type type) +{ + struct cdma_dbgfs_cfg *cfg = (struct cdma_dbgfs_cfg *)filp->private_data; + char buf[BUF_SIZE] = { 0 }; + u32 value = 0; + size_t len; + + switch (type) { + case CDMA_QUEUE_ID: + value = cfg->queue_id; + break; + case CDMA_ENTRY_PI: + value = cfg->entry_pi; + break; + case CDMA_ENTRY_CI: + value = cfg->entry_ci; + break; + default: + break; + } + + len = scnprintf(buf, sizeof(buf), "%u\n", value); + + return simple_read_from_buffer(buffer, count, ppos, buf, len); +} + +static ssize_t cdma_dbgfs_cfg_write_queue_id(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_QUEUE_ID); +} + +static ssize_t cdma_dbgfs_cfg_read_queue_id(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_QUEUE_ID); +} + +static ssize_t cdma_dbgfs_cfg_write_entry_pi(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_ENTRY_PI); +} + +static ssize_t cdma_dbgfs_cfg_read_entry_pi(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_ENTRY_PI); +} + +static ssize_t cdma_dbgfs_cfg_write_entry_ci(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return cdma_dbgfs_cfg_write_val(filp, buffer, count, ppos, CDMA_ENTRY_CI); +} + +static ssize_t cdma_dbgfs_cfg_read_entry_ci(struct file *filp, + char *buffer, size_t count, + loff_t *ppos) +{ + return cdma_dbgfs_cfg_read_val(filp, buffer, count, ppos, CDMA_ENTRY_CI); +} + +static struct cdma_dbgfs_cfg_info cdma_dbg_cfg[] = { + { + .name = "queue_id", + {true, true, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_queue_id, + .write = cdma_dbgfs_cfg_write_queue_id, + .open = simple_open, }, + }, { + .name = "entry_pi", + {false, false, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_entry_pi, + .write = cdma_dbgfs_cfg_write_entry_pi, + .open = simple_open, }, + }, { + .name = "entry_ci", + {false, false, true}, + {.owner = THIS_MODULE, + .read = cdma_dbgfs_cfg_read_entry_ci, + .write = cdma_dbgfs_cfg_write_entry_ci, + .open = simple_open, }, + }, +}; + +static int cdma_dbg_create_cfg_file(struct cdma_dev *cdev, + struct ubase_dbg_dentry_info *dentry_info, + u8 array_size) +{ + struct dentry *debugfs_file; + struct dentry *cur_dir; + size_t i, j; + + for (i = 0; i < array_size - 1; i++) { + cur_dir = dentry_info[i].dentry; + for (j = 0; j < ARRAY_SIZE(cdma_dbg_cfg); j++) { + if (!cdma_dbg_cfg[j].dentry_valid[i]) + continue; + debugfs_file = debugfs_create_file(cdma_dbg_cfg[j].name, + 0400, cur_dir, &cdev->cdbgfs.cfg, + &cdma_dbg_cfg[j].file_ops); + if (!debugfs_file) + return -ENOMEM; + } + } + + return 0; +} + +int cdma_dbg_init(struct auxiliary_device *adev) +{ + struct ubase_dbg_dentry_info dbg_dentry[CDMA_DBG_DENTRY_ROOT + 1] = {0}; + struct dentry *ubase_root_dentry = ubase_diag_debugfs_root(adev); + struct device *dev = &adev->dev; + struct cdma_dev *cdev; + int ret; + + cdev = dev_get_drvdata(dev); + + if (!ubase_root_dentry) { + dev_err(dev, "dbgfs root dentry does not exist.\n"); + return -ENOENT; + } + + memcpy(dbg_dentry, cdma_dbg_dentry, sizeof(cdma_dbg_dentry)); + cdev->cdbgfs.dbgfs.dentry = debugfs_create_dir( + dbg_dentry[ARRAY_SIZE(dbg_dentry) - 1].name, ubase_root_dentry); + if (IS_ERR(cdev->cdbgfs.dbgfs.dentry)) { + dev_err(dev, "create cdma debugfs root dir failed.\n"); + return PTR_ERR(cdev->cdbgfs.dbgfs.dentry); + } + + dbg_dentry[CDMA_DBG_DENTRY_ROOT].dentry = cdev->cdbgfs.dbgfs.dentry; + cdev->cdbgfs.dbgfs.cmd_info = cdma_dbg_cmd; + cdev->cdbgfs.dbgfs.cmd_info_size = ARRAY_SIZE(cdma_dbg_cmd); + + ret = ubase_dbg_create_dentry(dev, &cdev->cdbgfs.dbgfs, dbg_dentry, + ARRAY_SIZE(dbg_dentry) - 1); + if (ret) { + dev_err(dev, "create cdma debugfs dentry failed, ret = %d.\n", ret); + goto create_dentry_err; + } + + ret = cdma_dbg_create_cfg_file(cdev, dbg_dentry, ARRAY_SIZE(dbg_dentry)); + if (ret) { + dev_err(dev, "create cdma debugfs cfg file failed, ret = %d.\n", ret); + goto create_dentry_err; + } + + return 0; + +create_dentry_err: + debugfs_remove_recursive(cdev->cdbgfs.dbgfs.dentry); + cdev->cdbgfs.dbgfs.dentry = NULL; + + return ret; +} + +void cdma_dbg_uninit(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev = dev_get_drvdata(&adev->dev); + + if (!cdev->cdbgfs.dbgfs.dentry) + return; + + debugfs_remove_recursive(cdev->cdbgfs.dbgfs.dentry); + cdev->cdbgfs.dbgfs.dentry = NULL; +} diff --git a/drivers/ub/cdma/cdma_debugfs.h b/drivers/ub/cdma/cdma_debugfs.h new file mode 100644 index 000000000000..1cd0f2ada9dc --- /dev/null +++ b/drivers/ub/cdma/cdma_debugfs.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_DEBUGFS_H__ +#define __CDMA_DEBUGFS_H__ + +#include +#include + +enum cdma_dbg_dentry_type { + CDMA_DBG_DENTRY_CONTEXT, + CDMA_DBG_DENTRY_RES_INFO, + CDMA_DBG_DENTRY_ENTRY_INFO, + /* must be the last entry. */ + CDMA_DBG_DENTRY_ROOT, +}; + +/* ctx debugfs start */ +struct cdma_ctx_info { + u32 start_idx; + u32 ctx_size; + u8 op; + const char *ctx_name; +}; + +enum cdma_dbg_ctx_type { + CDMA_DBG_JFS_CTX = 0, + CDMA_DBG_SQ_JFC_CTX = 1, +}; +/* ctx debugfs end */ + +struct cdma_dbgfs_cfg_info { + const char *name; + bool dentry_valid[CDMA_DBG_DENTRY_ROOT]; + const struct file_operations file_ops; +}; + +struct cdma_dbgfs_cfg { + u32 queue_id; + u32 entry_pi; + u32 entry_ci; +}; + +enum cdma_dbgfs_cfg_type { + CDMA_QUEUE_ID = 0, + CDMA_ENTRY_PI, + CDMA_ENTRY_CI +}; + +struct cdma_dbgfs { + struct ubase_dbgfs dbgfs; + struct cdma_dbgfs_cfg cfg; +}; + +int cdma_dbg_init(struct auxiliary_device *adev); +void cdma_dbg_uninit(struct auxiliary_device *adev); + +#endif /* CDMA_DEBUGFS_H */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index fe46955c925b..3d0391b03d97 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -314,6 +314,11 @@ struct cdma_jfs_ctx { u32 taack_nack_bm[32]; }; +static inline struct cdma_jfs *to_cdma_jfs(struct cdma_base_jfs *jfs) +{ + return container_of(jfs, struct cdma_jfs, base_jfs); +} + struct cdma_base_jfs *cdma_create_jfs(struct cdma_dev *cdev, struct cdma_jfs_cfg *cfg, struct cdma_udata *udata); diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 82dc5ab40cf8..cfdb1869e176 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -11,6 +11,7 @@ #include "cdma_chardev.h" #include #include "cdma_eq.h" +#include "cdma_debugfs.h" #include "cdma_cmd.h" /* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ @@ -64,6 +65,11 @@ static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev * if (ret) dev_warn(&auxdev->dev, "query eu failed, ret = %d.\n", ret); + ret = cdma_dbg_init(auxdev); + if (ret) + dev_warn(&auxdev->dev, "init cdma debugfs failed, ret = %d.\n", + ret); + return 0; } @@ -108,6 +114,7 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) return; } + cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); cdma_destroy_dev(cdev); diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 5b434ae66bb9..08b24cb0b3fc 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -4,9 +4,10 @@ #ifndef __CDMA_QUEUE_H__ #define __CDMA_QUEUE_H__ +#include + struct cdma_dev; struct cdma_context; -struct queue_cfg; enum cdma_queue_res_type { QUEUE_RES_TP, -- Gitee From 552ec709df8f209a51e948e33c866dbc2041ded1 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 29 Sep 2025 18:44:01 +0800 Subject: [PATCH 029/126] ub: cdma: support RX stop flow function commit 9184f7dd7e6ad431f6d076c1b95e0ab7fad255cb openEuler This patch implements the functionality of stopping RX flow during the UE deregistration process in the CDMA driver and intercepts user-space interfaces during the reset process. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/Makefile | 2 +- drivers/ub/cdma/cdma.h | 14 ++++ drivers/ub/cdma/cdma_api.c | 82 ++++++++++++++++++ drivers/ub/cdma/cdma_chardev.c | 55 +++++++++++- drivers/ub/cdma/cdma_cmd.c | 18 ++++ drivers/ub/cdma/cdma_cmd.h | 3 + drivers/ub/cdma/cdma_context.h | 1 + drivers/ub/cdma/cdma_dev.c | 30 +++++-- drivers/ub/cdma/cdma_dev.h | 2 +- drivers/ub/cdma/cdma_event.c | 47 +++++++---- drivers/ub/cdma/cdma_jfc.c | 8 +- drivers/ub/cdma/cdma_jfs.c | 8 +- drivers/ub/cdma/cdma_main.c | 100 +++++++++++++++++++++- drivers/ub/cdma/cdma_mmap.c | 149 +++++++++++++++++++++++++++++++++ drivers/ub/cdma/cdma_mmap.h | 14 ++++ drivers/ub/cdma/cdma_types.h | 16 ++++ drivers/ub/cdma/cdma_uobj.c | 6 +- drivers/ub/cdma/cdma_uobj.h | 2 +- include/ub/cdma/cdma_api.h | 12 +++ 19 files changed, 529 insertions(+), 40 deletions(-) create mode 100644 drivers/ub/cdma/cdma_mmap.c create mode 100644 drivers/ub/cdma/cdma_mmap.h diff --git a/drivers/ub/cdma/Makefile b/drivers/ub/cdma/Makefile index 2ce4eefa2d84..88dc9946a092 100644 --- a/drivers/ub/cdma/Makefile +++ b/drivers/ub/cdma/Makefile @@ -3,6 +3,6 @@ cdma-$(CONFIG_UB_CDMA) := cdma_main.o cdma_dev.o cdma_chardev.o cdma_cmd.o cdma_tid.o cdma_ioctl.o \ cdma_api.o cdma_context.o cdma_queue.o cdma_uobj.o cdma_jfc.o cdma_common.o \ cdma_db.o cdma_mbox.o cdma_tp.o cdma_jfs.o cdma_eq.o cdma_event.o cdma_segment.o \ - cdma_handle.o cdma_debugfs.o + cdma_handle.o cdma_debugfs.o cdma_mmap.o obj-m += cdma.o diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index e782b0229943..b7d00bcf39ac 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -14,6 +14,10 @@ extern u32 jfc_arm_mode; extern bool cqe_mode; +extern struct list_head g_client_list; +extern struct rw_semaphore g_clients_rwsem; +extern struct rw_semaphore g_device_rwsem; +extern struct mutex g_cdma_reset_mutex; #define CDMA_HW_PAGE_SHIFT 12 #define CDMA_HW_PAGE_SIZE (1 << CDMA_HW_PAGE_SHIFT) @@ -24,6 +28,8 @@ extern bool cqe_mode; #define CDMA_UPI_MASK 0x7FFF +#define DMA_MAX_DEV_NAME 64 + enum cdma_cqe_size { CDMA_64_CQE_SIZE, CDMA_128_CQE_SIZE, @@ -34,6 +40,12 @@ enum cdma_status { CDMA_SUSPEND, }; +enum cdma_client_ops { + CDMA_CLIENT_STOP, + CDMA_CLIENT_REMOVE, + CDMA_CLIENT_ADD, +}; + enum { CDMA_CAP_FEATURE_AR = BIT(0), CDMA_CAP_FEATURE_JFC_INLINE = BIT(4), @@ -195,6 +207,8 @@ struct cdma_dev { struct mutex file_mutex; struct list_head file_list; struct page *arm_db_page; + atomic_t cmdcnt; + struct completion cmddone; }; struct cdma_jfs_event { diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 8a30d20a1a09..cc3aa6ce4921 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -4,6 +4,8 @@ #define pr_fmt(fmt) "CDMA: " fmt #define dev_fmt pr_fmt +#include +#include #include "cdma_segment.h" #include "cdma_dev.h" #include "cdma_cmd.h" @@ -14,6 +16,10 @@ #include "cdma_handle.h" #include +LIST_HEAD(g_client_list); +DECLARE_RWSEM(g_clients_rwsem); +DECLARE_RWSEM(g_device_rwsem); + struct dma_device *dma_get_device_list(u32 *num_devices) { struct cdma_device_attr *attr; @@ -632,3 +638,79 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, return cdma_poll_jfc(cdma_queue->jfc, cr_cnt, cr); } EXPORT_SYMBOL_GPL(dma_poll_queue); + +int dma_register_client(struct dma_client *client) +{ + struct cdma_dev *cdev = NULL; + struct xarray *cdma_devs_tbl; + unsigned long index = 0; + u32 devs_num; + + if (client == NULL || client->client_name == NULL || + client->add == NULL || client->remove == NULL || + client->stop == NULL) { + pr_err("invalid parameter.\n"); + return -EINVAL; + } + + if (strnlen(client->client_name, DMA_MAX_DEV_NAME) >= DMA_MAX_DEV_NAME) { + pr_err("invalid parameter, client name.\n"); + return -EINVAL; + } + + down_write(&g_device_rwsem); + + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + + xa_for_each(cdma_devs_tbl, index, cdev) { + if (client->add && client->add(cdev->eid)) + pr_info("dma client: %s add failed.\n", + client->client_name); + } + down_write(&g_clients_rwsem); + list_add_tail(&client->list_node, &g_client_list); + up_write(&g_clients_rwsem); + up_write(&g_device_rwsem); + + pr_info("dma client: %s register success.\n", client->client_name); + return 0; +} +EXPORT_SYMBOL_GPL(dma_register_client); + +void dma_unregister_client(struct dma_client *client) +{ + struct cdma_dev *cdev = NULL; + struct xarray *cdma_devs_tbl; + unsigned long index = 0; + u32 devs_num; + + if (client == NULL || client->client_name == NULL || + client->add == NULL || client->remove == NULL || + client->stop == NULL) { + pr_err("Invalid parameter.\n"); + return; + } + + if (strnlen(client->client_name, DMA_MAX_DEV_NAME) >= DMA_MAX_DEV_NAME) { + pr_err("invalid parameter, client name.\n"); + return; + } + + down_write(&g_device_rwsem); + cdma_devs_tbl = get_cdma_dev_tbl(&devs_num); + + xa_for_each(cdma_devs_tbl, index, cdev) { + if (client->stop && client->remove) { + client->stop(cdev->eid); + client->remove(cdev->eid); + } + } + + down_write(&g_clients_rwsem); + list_del(&client->list_node); + up_write(&g_clients_rwsem); + up_write(&g_device_rwsem); + + pr_info("dma client: %s unregister success.\n", client->client_name); +} +EXPORT_SYMBOL_GPL(dma_unregister_client); diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index a1a289eb0e91..3614609d683e 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -6,6 +6,7 @@ #include #include +#include "cdma_cmd.h" #include "cdma_ioctl.h" #include "cdma_context.h" #include "cdma_chardev.h" @@ -13,6 +14,7 @@ #include "cdma_types.h" #include "cdma_uobj.h" #include "cdma.h" +#include "cdma_mmap.h" #define CDMA_DEVICE_NAME "cdma/dev" @@ -65,18 +67,27 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct cdma_ioctl_hdr hdr = { 0 }; int ret; + if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + pr_info("ioctl cdev is invalid.\n"); + return -ENODEV; + } + cdma_cmd_inc(cfile->cdev); + if (cmd == CDMA_SYNC) { ret = copy_from_user(&hdr, (void *)arg, sizeof(hdr)); if (ret || hdr.args_len > CDMA_MAX_CMD_SIZE) { pr_err("copy user ret = %d, input parameter len = %u.\n", ret, hdr.args_len); + cdma_cmd_dec(cfile->cdev); return -EINVAL; } ret = cdma_cmd_parse(cfile, &hdr); + cdma_cmd_dec(cfile->cdev); return ret; } pr_err("invalid ioctl command, command = %u.\n", cmd); + cdma_cmd_dec(cfile->cdev); return -ENOIOCTLCMD; } @@ -115,6 +126,11 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * u32 jfs_id; u32 cmd; + if (cdev->status == CDMA_SUSPEND) { + dev_warn(cdev->dev, "cdev is resetting.\n"); + return -EBUSY; + } + db_addr = cdev->db_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -158,21 +174,37 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * static int cdma_mmap(struct file *file, struct vm_area_struct *vma) { struct cdma_file *cfile = (struct cdma_file *)file->private_data; + struct cdma_umap_priv *priv; int ret; + if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + pr_info("mmap cdev is invalid.\n"); + return -ENODEV; + } + if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { pr_err("mmap failed, expect vm area size to be an integer multiple of page size.\n"); return -EINVAL; } + priv = kzalloc(sizeof(struct cdma_umap_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + vma->vm_ops = cdma_get_umap_ops(); + vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK | VM_IO); + mutex_lock(&cfile->ctx_mutex); ret = cdma_remap_pfn_range(cfile, vma); if (ret) { mutex_unlock(&cfile->ctx_mutex); + kfree(priv); return ret; } mutex_unlock(&cfile->ctx_mutex); + cdma_umap_priv_init(priv, vma); + return 0; } @@ -188,7 +220,7 @@ static void cdma_mmu_release(struct mmu_notifier *mn, struct mm_struct *mm) mn_notifier->mm = NULL; mutex_lock(&cfile->ctx_mutex); - cdma_cleanup_context_uobj(cfile); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_CLOSE); if (cfile->uctx) cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; @@ -235,6 +267,11 @@ static int cdma_open(struct inode *inode, struct file *file) chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); + if (cdev->status == CDMA_SUSPEND) { + dev_warn(cdev->dev, "cdev is resetting.\n"); + return -EBUSY; + } + cfile = kzalloc(sizeof(struct cdma_file), GFP_KERNEL); if (!cfile) return -ENOMEM; @@ -254,6 +291,8 @@ static int cdma_open(struct inode *inode, struct file *file) file->private_data = cfile; mutex_init(&cfile->ctx_mutex); list_add_tail(&cfile->list, &cdev->file_list); + mutex_init(&cfile->umap_mutex); + INIT_LIST_HEAD(&cfile->umaps_list); nonseekable_open(inode, file); mutex_unlock(&cdev->file_mutex); @@ -265,19 +304,28 @@ static int cdma_close(struct inode *inode, struct file *file) struct cdma_file *cfile = (struct cdma_file *)file->private_data; struct cdma_dev *cdev; + mutex_lock(&g_cdma_reset_mutex); + cdev = cfile->cdev; + if (!cdev) { + mutex_unlock(&g_cdma_reset_mutex); + kref_put(&cfile->ref, cdma_release_file); + inode->i_cdev = NULL; + return 0; + } mutex_lock(&cdev->file_mutex); list_del(&cfile->list); mutex_unlock(&cdev->file_mutex); mutex_lock(&cfile->ctx_mutex); - cdma_cleanup_context_uobj(cfile); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_CLOSE); if (cfile->uctx) cdma_cleanup_context_res(cfile->uctx); cfile->uctx = NULL; mutex_unlock(&cfile->ctx_mutex); + mutex_unlock(&g_cdma_reset_mutex); kref_put(&cfile->ref, cdma_release_file); pr_debug("cdma close success.\n"); @@ -361,7 +409,10 @@ void cdma_release_file(struct kref *ref) { struct cdma_file *cfile = container_of(ref, struct cdma_file, ref); + if (cfile->fault_page) + __free_pages(cfile->fault_page, 0); cdma_unregister_mmu(cfile); + mutex_destroy(&cfile->umap_mutex); mutex_destroy(&cfile->ctx_mutex); idr_destroy(&cfile->idr); kfree(cfile); diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c index 74e6b32a58c7..c8bf01d930ad 100644 --- a/drivers/ub/cdma/cdma_cmd.c +++ b/drivers/ub/cdma/cdma_cmd.c @@ -214,3 +214,21 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev) return 0; } + +void cdma_cmd_inc(struct cdma_dev *cdev) +{ + atomic_inc(&cdev->cmdcnt); +} + +void cdma_cmd_dec(struct cdma_dev *cdev) +{ + if (atomic_dec_and_test(&cdev->cmdcnt)) + complete(&cdev->cmddone); +} + +void cdma_cmd_flush(struct cdma_dev *cdev) +{ + cdma_cmd_dec(cdev); + pr_info("cmd flush cmdcnt is %d\n", atomic_read(&cdev->cmdcnt)); + wait_for_completion(&cdev->cmddone); +} diff --git a/drivers/ub/cdma/cdma_cmd.h b/drivers/ub/cdma/cdma_cmd.h index 550f60640b36..f85331c8c51b 100644 --- a/drivers/ub/cdma/cdma_cmd.h +++ b/drivers/ub/cdma/cdma_cmd.h @@ -76,4 +76,7 @@ struct eu_query_out { int cdma_init_dev_caps(struct cdma_dev *cdev); int cdma_ctrlq_query_eu(struct cdma_dev *cdev); +void cdma_cmd_inc(struct cdma_dev *cdev); +void cdma_cmd_dec(struct cdma_dev *cdev); +void cdma_cmd_flush(struct cdma_dev *cdev); #endif diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 47736a281257..0eb40763c29d 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -24,6 +24,7 @@ struct cdma_context { atomic_t ref_cnt; struct list_head queue_list; struct list_head seg_list; + bool invalid; }; struct cdma_ctx_res { diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index f08e60716edc..2b69a44b346e 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -55,15 +55,18 @@ static int cdma_add_device_to_list(struct cdma_dev *cdev) return -EINVAL; } + down_write(&g_device_rwsem); ret = xa_err(xa_store(&cdma_devs_tbl, adev->id, cdev, GFP_KERNEL)); if (ret) { dev_err(cdev->dev, "store cdma device to table failed, adev id = %u.\n", adev->id); + up_write(&g_device_rwsem); return ret; } atomic_inc(&cdma_devs_num); + up_write(&g_device_rwsem); return 0; } @@ -77,8 +80,10 @@ static void cdma_del_device_from_list(struct cdma_dev *cdev) return; } + down_write(&g_device_rwsem); atomic_dec(&cdma_devs_num); xa_erase(&cdma_devs_tbl, adev->id); + up_write(&g_device_rwsem); } static void cdma_tbl_init(struct cdma_table *table, u32 max, u32 min) @@ -393,6 +398,8 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) idr_init(&cdev->ctx_idr); spin_lock_init(&cdev->ctx_lock); + atomic_set(&cdev->cmdcnt, 1); + init_completion(&cdev->cmddone); dev_dbg(&adev->dev, "cdma.%u init succeeded.\n", adev->id); @@ -411,7 +418,7 @@ struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev) return NULL; } -void cdma_destroy_dev(struct cdma_dev *cdev) +void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove) { struct cdma_context *tmp; int id; @@ -421,21 +428,26 @@ void cdma_destroy_dev(struct cdma_dev *cdev) ubase_virt_unregister(cdev->adev); - cdma_release_table_res(cdev); + if (is_remove) { + cdma_release_table_res(cdev); - idr_for_each_entry(&cdev->ctx_idr, tmp, id) - cdma_free_context(cdev, tmp); - idr_destroy(&cdev->ctx_idr); + idr_for_each_entry(&cdev->ctx_idr, tmp, id) + cdma_free_context(cdev, tmp); + idr_destroy(&cdev->ctx_idr); + } cdma_destroy_arm_db_page(cdev); ubase_ctrlq_unregister_crq_event(cdev->adev, UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, CDMA_CTRLQ_EU_UPDATE); - cdma_free_dev_tid(cdev); - cdma_del_device_from_list(cdev); - cdma_uninit_dev_param(cdev); - kfree(cdev); + if (is_remove) { + cdma_free_dev_tid(cdev); + + cdma_del_device_from_list(cdev); + cdma_uninit_dev_param(cdev); + kfree(cdev); + } } bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index 85d41cbe0773..d433218934f1 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -24,7 +24,7 @@ enum cdma_ctrlq_eu_op { }; struct cdma_dev *cdma_create_dev(struct auxiliary_device *adev); -void cdma_destroy_dev(struct cdma_dev *cdev); +void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove); struct cdma_dev *get_cdma_dev_by_eid(u32 eid); struct xarray *get_cdma_dev_tbl(u32 *devices_num); bool cdma_find_seid_in_eus(struct eu_info *eus, u8 eu_num, struct dev_eid *eid, diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index f2c51d4833ee..057bf2daefc3 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -520,28 +520,40 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, return -EINVAL; } - INIT_LIST_HEAD(&event_list); - ret = cdma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, - &event_cnt, &event_list); - if (ret < 0) { - pr_err("wait event failed, ret = %d.\n", ret); - return ret; - } - event = list_first_entry(&event_list, struct cdma_jfe_event, node); - if (event == NULL) - return -EIO; - - cdma_set_async_event(&async_event, event); - list_del(&event->node); - kfree(event); - - if (event_cnt > 0) { + if (!jfae->cfile->cdev || jfae->cfile->cdev->status == CDMA_SUSPEND) { + pr_info("wait dev invalid event success.\n"); + async_event.event_data = 0; + async_event.event_type = CDMA_EVENT_DEV_INVALID; ret = (int)copy_to_user((void *)arg, &async_event, sizeof(async_event)); if (ret) { pr_err("dev copy to user failed, ret = %d\n", ret); return -EFAULT; } + } else { + INIT_LIST_HEAD(&event_list); + ret = cdma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, + &event_cnt, &event_list); + if (ret < 0) { + pr_err("wait event failed, ret = %d.\n", ret); + return ret; + } + event = list_first_entry(&event_list, struct cdma_jfe_event, node); + if (event == NULL) + return -EIO; + + cdma_set_async_event(&async_event, event); + list_del(&event->node); + kfree(event); + + if (event_cnt > 0) { + ret = (int)copy_to_user((void *)arg, &async_event, + sizeof(async_event)); + if (ret) { + pr_err("dev copy to user failed, ret = %d\n", ret); + return -EFAULT; + } + } } return 0; @@ -554,6 +566,9 @@ static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait if (!jfae || !jfae->cfile || !jfae->cfile->cdev) return POLLERR; + if (jfae->cfile->cdev->status == CDMA_SUSPEND) + return POLLIN | POLLRDNORM; + return cdma_jfe_poll(&jfae->jfe, filp, wait); } diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index cd92f90461ff..0b3611c3d27d 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -555,9 +555,11 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, return -EINVAL; } - ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); - if (ret) - dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + if (!(jfc->base.ctx && jfc->base.ctx->invalid)) { + ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); + if (ret) + dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); + } if (refcount_dec_and_test(&jfc->event_refcount)) complete(&jfc->event_comp); diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index cbb47a7f56db..8a62e2a2fd6b 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -538,9 +538,11 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return -EINVAL; } - ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); - if (ret) - dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + if (!(jfs->base_jfs.ctx && jfs->base_jfs.ctx->invalid)) { + ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); + if (ret) + dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); + } if (refcount_dec_and_test(&jfs->ae_ref_cnt)) complete(&jfs->ae_comp); diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index cfdb1869e176..817bcd6232e3 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -13,6 +13,14 @@ #include "cdma_eq.h" #include "cdma_debugfs.h" #include "cdma_cmd.h" +#include "cdma_types.h" +#include "cdma_mmap.h" +#include "cdma_context.h" +#include "cdma_uobj.h" +#include "cdma_event.h" + +static bool is_rmmod; +DEFINE_MUTEX(g_cdma_reset_mutex); /* Enabling jfc_arm_mode will cause jfc to report cqe; otherwise, it will not. */ uint jfc_arm_mode; @@ -52,6 +60,47 @@ static inline void cdma_unregister_event(struct auxiliary_device *adev) cdma_unreg_ae_event(adev); } +static void cdma_reset_unmap_vma_pages(struct cdma_dev *cdev, bool is_reset) +{ + struct cdma_file *cfile; + + mutex_lock(&cdev->file_mutex); + list_for_each_entry(cfile, &cdev->file_list, list) { + mutex_lock(&cfile->ctx_mutex); + cdma_unmap_vma_pages(cfile); + if (is_reset && cfile->uctx != NULL) + cfile->uctx->invalid = true; + mutex_unlock(&cfile->ctx_mutex); + } + mutex_unlock(&cdev->file_mutex); +} + +static void cdma_client_handler(struct cdma_dev *cdev, + enum cdma_client_ops client_ops) +{ + struct dma_client *client; + + down_write(&g_clients_rwsem); + list_for_each_entry(client, &g_client_list, list_node) { + switch (client_ops) { + case CDMA_CLIENT_STOP: + if (client->stop) + client->stop(cdev->eid); + break; + case CDMA_CLIENT_REMOVE: + if (client->remove) + client->remove(cdev->eid); + break; + case CDMA_CLIENT_ADD: + if (client->add && client->add(cdev->eid)) + dev_warn(&cdev->adev->dev, "add eid:0x%x, cdev for client:%s failed.\n", + cdev->eid, client->client_name); + break; + } + } + up_write(&g_clients_rwsem); +} + static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) { int ret; @@ -73,9 +122,33 @@ static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev * return 0; } +static void cdma_free_cfile_uobj(struct cdma_dev *cdev) +{ + struct cdma_file *cfile, *next_cfile; + struct cdma_jfae *jfae; + + mutex_lock(&cdev->file_mutex); + list_for_each_entry_safe(cfile, next_cfile, &cdev->file_list, list) { + list_del(&cfile->list); + mutex_lock(&cfile->ctx_mutex); + cdma_cleanup_context_uobj(cfile, CDMA_REMOVE_DRIVER_REMOVE); + cfile->cdev = NULL; + if (cfile->uctx) { + jfae = (struct cdma_jfae *)cfile->uctx->jfae; + if (jfae) + wake_up_interruptible(&jfae->jfe.poll_wait); + cdma_cleanup_context_res(cfile->uctx); + } + cfile->uctx = NULL; + mutex_unlock(&cfile->ctx_mutex); + } + mutex_unlock(&cdev->file_mutex); +} + static int cdma_init_dev(struct auxiliary_device *auxdev) { struct cdma_dev *cdev; + bool is_remove = true; int ret; dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", @@ -87,37 +160,56 @@ static int cdma_init_dev(struct auxiliary_device *auxdev) ret = cdma_create_chardev(cdev); if (ret) { - cdma_destroy_dev(cdev); + cdma_destroy_dev(cdev, is_remove); return ret; } ret = cdma_init_dev_info(auxdev, cdev); if (ret) { cdma_destroy_chardev(cdev); - cdma_destroy_dev(cdev); + cdma_destroy_dev(cdev, is_remove); return ret; } + cdma_client_handler(cdev, CDMA_CLIENT_ADD); return ret; } static void cdma_uninit_dev(struct auxiliary_device *auxdev) { struct cdma_dev *cdev; + int ret; dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", __func__, auxdev->name, auxdev->id); + mutex_lock(&g_cdma_reset_mutex); cdev = dev_get_drvdata(&auxdev->dev); if (!cdev) { dev_err(&auxdev->dev, "get drvdata from ubase failed.\n"); + ubase_reset_unregister(auxdev); + mutex_unlock(&g_cdma_reset_mutex); return; } + cdev->status = CDMA_SUSPEND; + cdma_cmd_flush(cdev); + cdma_client_handler(cdev, CDMA_CLIENT_STOP); + cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); + cdma_reset_unmap_vma_pages(cdev, false); + + if (!is_rmmod) { + ret = ubase_deactivate_dev(auxdev); + dev_info(&auxdev->dev, "ubase deactivate dev ret = %d.\n", ret); + } + + ubase_reset_unregister(auxdev); cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); - cdma_destroy_dev(cdev); + cdma_free_cfile_uobj(cdev); + cdma_destroy_dev(cdev, true); + mutex_unlock(&g_cdma_reset_mutex); } static int cdma_probe(struct auxiliary_device *auxdev, @@ -135,6 +227,7 @@ static int cdma_probe(struct auxiliary_device *auxdev, static void cdma_remove(struct auxiliary_device *auxdev) { cdma_uninit_dev(auxdev); + pr_info("cdma device remove success.\n"); } static const struct auxiliary_device_id cdma_id_table[] = { @@ -178,6 +271,7 @@ static int __init cdma_init(void) static void __exit cdma_exit(void) { + is_rmmod = true; auxiliary_driver_unregister(&cdma_driver); class_destroy(cdma_cdev_class); } diff --git a/drivers/ub/cdma/cdma_mmap.c b/drivers/ub/cdma/cdma_mmap.c new file mode 100644 index 000000000000..eaef6a9a4152 --- /dev/null +++ b/drivers/ub/cdma/cdma_mmap.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#define pr_fmt(fmt) "CDMA: " fmt + +#include +#include +#include "cdma_mmap.h" + +void cdma_umap_priv_init(struct cdma_umap_priv *priv, + struct vm_area_struct *vma) +{ + struct cdma_file *cfile = (struct cdma_file *)vma->vm_file->private_data; + + priv->vma = vma; + vma->vm_private_data = priv; + + mutex_lock(&cfile->umap_mutex); + list_add(&priv->node, &cfile->umaps_list); + mutex_unlock(&cfile->umap_mutex); +} + +/* thanks to drivers/infiniband/core/ib_core_uverbs.c */ +void cdma_unmap_vma_pages(struct cdma_file *cfile) +{ + struct cdma_umap_priv *priv, *next_priv; + struct mm_struct *mm = NULL; + struct vm_area_struct *vma; + int ret; + + while (1) { + mm = NULL; + mutex_lock(&cfile->umap_mutex); + list_for_each_entry_safe(priv, next_priv, &cfile->umaps_list, node) { + mm = priv->vma->vm_mm; + ret = mmget_not_zero(mm); + if (!ret) { + list_del_init(&priv->node); + mm = NULL; + continue; + } + break; + } + mutex_unlock(&cfile->umap_mutex); + if (!mm) + return; + + mutex_lock(&cfile->umap_mutex); + list_for_each_entry_safe(priv, next_priv, &cfile->umaps_list, node) { + vma = priv->vma; + if (vma->vm_mm != mm) + continue; + list_del_init(&priv->node); + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + mutex_unlock(&cfile->umap_mutex); + + mmput(mm); + } +} + +static void cdma_umap_open(struct vm_area_struct *vma) +{ + struct cdma_umap_priv *priv; + + priv = kzalloc(sizeof(struct cdma_umap_priv), GFP_KERNEL); + if (!priv) + goto out_zap; + + cdma_umap_priv_init(priv, vma); + + return; + +out_zap: + vma->vm_private_data = NULL; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void cdma_umap_close(struct vm_area_struct *vma) +{ + struct cdma_umap_priv *priv = (struct cdma_umap_priv *)vma->vm_private_data; + struct cdma_file *cfile = (struct cdma_file *)vma->vm_file->private_data; + + if (!priv) + return; + + mutex_lock(&cfile->umap_mutex); + list_del(&priv->node); + mutex_unlock(&cfile->umap_mutex); + kfree(priv); + vma->vm_private_data = NULL; + + pr_info("cdma umap close success.\n"); +} + +static vm_fault_t cdma_umap_fault(struct vm_fault *vmf) +{ + struct cdma_umap_priv *priv = (struct cdma_umap_priv *)vmf->vma->vm_private_data; + struct cdma_file *cfile = (struct cdma_file *)vmf->vma->vm_file->private_data; + vm_fault_t ret = 0; + + if (!priv) + return VM_FAULT_SIGBUS; + + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { + vmf->page = ZERO_PAGE(0); + get_page(vmf->page); + return 0; + } + + mutex_lock(&cfile->umap_mutex); + if (!cfile->fault_page) + cfile->fault_page = alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); + + if (cfile->fault_page) { + vmf->page = cfile->fault_page; + get_page(vmf->page); + } else { + ret = VM_FAULT_SIGBUS; + } + mutex_unlock(&cfile->umap_mutex); + + return ret; +} + +static int cdma_umap_remap(struct vm_area_struct *vma) +{ + pr_err("cdma umap remap is not permitted.\n"); + return -EINVAL; +} + +static int cdma_umap_can_split(struct vm_area_struct *vma, unsigned long addr) +{ + pr_err("cdma umap split is not permitted.\n"); + return -EINVAL; +} + +static const struct vm_operations_struct g_cdma_umap_ops = { + .open = cdma_umap_open, + .close = cdma_umap_close, + .fault = cdma_umap_fault, + .mremap = cdma_umap_remap, + .may_split = cdma_umap_can_split, +}; + +const struct vm_operations_struct *cdma_get_umap_ops(void) +{ + return (const struct vm_operations_struct *)&g_cdma_umap_ops; +} diff --git a/drivers/ub/cdma/cdma_mmap.h b/drivers/ub/cdma/cdma_mmap.h new file mode 100644 index 000000000000..0dd6c609a85e --- /dev/null +++ b/drivers/ub/cdma/cdma_mmap.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ + +#ifndef __CDMA_MMAP_H__ +#define __CDMA_MMAP_H__ + +#include +#include "cdma_types.h" + +void cdma_unmap_vma_pages(struct cdma_file *cfile); +const struct vm_operations_struct *cdma_get_umap_ops(void); +void cdma_umap_priv_init(struct cdma_umap_priv *priv, struct vm_area_struct *vma); + +#endif /* CDMA_MMAP_H */ diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 0b861c891558..947c360ba2ef 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -12,6 +12,14 @@ enum cdma_event_type { CDMA_EVENT_JFC_ERR, CDMA_EVENT_JFS_ERR, + CDMA_EVENT_DEV_INVALID, +}; + +enum cdma_remove_reason { + /* Context deletion. This call should delete the actual object itself */ + CDMA_REMOVE_CLOSE, + /* Driver is being hot-unplugged. This call should delete the actual object itself */ + CDMA_REMOVE_DRIVER_REMOVE, }; struct cdma_ucontext { @@ -142,8 +150,16 @@ struct cdma_file { struct cdma_context *uctx; struct idr idr; spinlock_t idr_lock; + struct mutex umap_mutex; + struct list_head umaps_list; + struct page *fault_page; struct cdma_mn mn_notifier; struct kref ref; }; +struct cdma_umap_priv { + struct vm_area_struct *vma; + struct list_head node; +}; + #endif diff --git a/drivers/ub/cdma/cdma_uobj.c b/drivers/ub/cdma/cdma_uobj.c index 3e6e1f9ad1b6..92fe4da441ea 100644 --- a/drivers/ub/cdma/cdma_uobj.c +++ b/drivers/ub/cdma/cdma_uobj.c @@ -2,6 +2,7 @@ /* Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. */ #include +#include "cdma_mmap.h" #include "cdma_uobj.h" #include "cdma_chardev.h" @@ -104,11 +105,14 @@ struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, return uobj; } -void cdma_cleanup_context_uobj(struct cdma_file *cfile) +void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why) { struct cdma_uobj *uobj; int id; + if (why == CDMA_REMOVE_DRIVER_REMOVE) + cdma_unmap_vma_pages(cfile); + spin_lock(&cfile->idr_lock); idr_for_each_entry(&cfile->idr, uobj, id) cdma_uobj_remove(uobj); diff --git a/drivers/ub/cdma/cdma_uobj.h b/drivers/ub/cdma/cdma_uobj.h index 505a66911960..f343559a33ce 100644 --- a/drivers/ub/cdma/cdma_uobj.h +++ b/drivers/ub/cdma/cdma_uobj.h @@ -28,7 +28,7 @@ struct cdma_uobj *cdma_uobj_create(struct cdma_file *cfile, void cdma_uobj_delete(struct cdma_uobj *uobj); struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, enum UOBJ_TYPE type); -void cdma_cleanup_context_uobj(struct cdma_file *cfile); +void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why); void cdma_close_uobj_fd(struct cdma_file *cfile); #endif diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 6809ba074c05..b90a64f128b9 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -87,6 +87,14 @@ struct dma_notify_data { u64 notify_data; }; +struct dma_client { + struct list_head list_node; + char *client_name; + int (*add)(u32 eid); + void (*remove)(u32 eid); + void (*stop)(u32 eid); +}; + struct dma_device *dma_get_device_list(u32 *num_devices); void dma_free_device_list(struct dma_device *dev_list, u32 num_devices); @@ -132,4 +140,8 @@ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr); +int dma_register_client(struct dma_client *client); + +void dma_unregister_client(struct dma_client *client); + #endif -- Gitee From 4080f382d9a543501b0996e4923a9648709c35cb Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 29 Sep 2025 18:47:20 +0800 Subject: [PATCH 030/126] ub: cdma: support reset function commit 710a287ef643833af2d9ac6a4892bb8829a77983 openEuler This patch implements the RX stop flow function during the driver unload or UE reset process in the CDMA driver, the RX resume flow function during the UE reset process, and the process of notifying the control plane to delete the corresponding UE connection information during the UE reset process. Signed-off-by: Zhipeng Lu Signed-off-by: Xinchi Ma Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_main.c | 104 ++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 817bcd6232e3..8ec5849ade39 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -101,6 +101,48 @@ static void cdma_client_handler(struct cdma_dev *cdev, up_write(&g_clients_rwsem); } +static void cdma_reset_down(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev || cdev->status == CDMA_SUSPEND) { + dev_warn(&adev->dev, "cdma device is not ready.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + cdev->status = CDMA_SUSPEND; + cdma_cmd_flush(cdev); + cdma_reset_unmap_vma_pages(cdev, true); + cdma_client_handler(cdev, CDMA_CLIENT_STOP); + cdma_unregister_event(adev); + cdma_dbg_uninit(adev); + mutex_unlock(&g_cdma_reset_mutex); +} + +static void cdma_reset_uninit(struct auxiliary_device *adev) +{ + enum ubase_reset_stage stage; + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev) { + dev_info(&adev->dev, "cdma device is not exist.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + stage = ubase_get_reset_stage(adev); + if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_SUSPEND) { + cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); + cdma_destroy_dev(cdev, is_rmmod); + } + mutex_unlock(&g_cdma_reset_mutex); +} + static int cdma_init_dev_info(struct auxiliary_device *auxdev, struct cdma_dev *cdev) { int ret; @@ -212,6 +254,66 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) mutex_unlock(&g_cdma_reset_mutex); } +static void cdma_reset_init(struct auxiliary_device *adev) +{ + struct cdma_dev *cdev; + + mutex_lock(&g_cdma_reset_mutex); + cdev = get_cdma_dev(adev); + if (!cdev) { + dev_err(&adev->dev, "cdma device is not exist.\n"); + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + if (cdma_register_crq_event(adev)) { + mutex_unlock(&g_cdma_reset_mutex); + return; + } + + if (cdma_create_arm_db_page(cdev)) + goto unregister_crq; + + if (cdma_init_dev_info(adev, cdev)) + goto destory_arm_db_page; + + idr_init(&cdev->ctx_idr); + spin_lock_init(&cdev->ctx_lock); + atomic_set(&cdev->cmdcnt, 1); + cdev->status = CDMA_NORMAL; + cdma_client_handler(cdev, CDMA_CLIENT_ADD); + mutex_unlock(&g_cdma_reset_mutex); + return; + +destory_arm_db_page: + cdma_destroy_arm_db_page(cdev); +unregister_crq: + cdma_unregister_crq_event(adev); + mutex_unlock(&g_cdma_reset_mutex); +} + +static void cdma_reset_handler(struct auxiliary_device *adev, + enum ubase_reset_stage stage) +{ + if (!adev) + return; + + switch (stage) { + case UBASE_RESET_STAGE_DOWN: + cdma_reset_down(adev); + break; + case UBASE_RESET_STAGE_UNINIT: + cdma_reset_uninit(adev); + break; + case UBASE_RESET_STAGE_INIT: + if (!is_rmmod) + cdma_reset_init(adev); + break; + default: + break; + } +} + static int cdma_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { @@ -221,6 +323,8 @@ static int cdma_probe(struct auxiliary_device *auxdev, if (ret) return ret; + ubase_reset_register(auxdev, cdma_reset_handler); + return 0; } -- Gitee From a38634d282f86e10160c4c4033f4aba4910e28f9 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 11 Nov 2025 15:00:50 +0800 Subject: [PATCH 031/126] ub: cdma: support for cdma kernelspace north-south compatibility requirements commit 34c67ed8f4c1070bd35c18026a36845b26f89b55 openEuler This patch adds north-south compatibility for CDMA. Signed-off-by: Zhipeng Lu Signed-off-by: Lin Yuan Signed-off-by: zhaolichang <943677312@qq.com> --- include/uapi/ub/cdma/cdma_abi.h | 62 +++++++++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 16 +++++++++ 2 files changed, 78 insertions(+) diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index b32954f28636..681854ed9765 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -142,6 +142,8 @@ struct cdma_cmd_create_jfs_args { __u32 tpn; __u64 dma_jfs; /* dma jfs pointer */ __u32 trans_mode; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 id; @@ -149,6 +151,8 @@ struct cdma_cmd_create_jfs_args { __u8 max_sge; __u8 max_rsge; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; struct cdma_cmd_udrv_priv udata; }; @@ -163,8 +167,12 @@ struct cdma_cmd_delete_jfs_args { __u32 jfs_id; __u64 handle; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -178,10 +186,14 @@ struct cdma_cmd_create_ctp_args { __u32 seid; __u32 deid; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 tpn; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -190,15 +202,25 @@ struct cdma_cmd_delete_ctp_args { __u32 tpn; __u64 handle; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_cmd_create_jfce_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { int fd; int id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -209,11 +231,15 @@ struct cdma_cmd_create_jfc_args { int jfce_id; __u32 ceqn; __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 id; __u32 depth; __u64 handle; /* handle of the allocated jfc obj in kernel */ + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; struct cdma_cmd_udrv_priv udata; }; @@ -223,10 +249,14 @@ struct cdma_cmd_delete_jfc_args { __u32 jfcn; __u64 handle; /* handle of jfc */ __u32 queue_id; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u32 comp_events_reported; __u32 async_events_reported; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -234,16 +264,26 @@ struct cdma_cmd_register_seg_args { struct { __u64 addr; __u64 len; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_cmd_unregister_seg_args { struct { __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } out; }; struct dev_eid { @@ -285,16 +325,28 @@ struct cdma_device_attr { }; struct cdma_cmd_query_device_attr_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { struct cdma_device_attr attr; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; struct cdma_create_context_args { + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } in; struct { __u8 cqe_size; __u8 dwqe_enable; int async_fd; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -324,10 +376,14 @@ struct cdma_cmd_create_queue_args { __u8 priority; __u64 user_ctx; __u32 trans_mode; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; struct { int queue_id; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } out; }; @@ -335,7 +391,13 @@ struct cdma_cmd_delete_queue_args { struct { __u32 queue_id; __u64 handle; + __u32 rsv_bitmap; + __u32 rsvd[4]; } in; + struct { + __u32 rsv_bitmap; + __u32 rsvd[4]; + } out; }; struct cdma_cmd_jfce_wait_args { diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index b90a64f128b9..61449ab9ee26 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -11,6 +11,8 @@ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; void *private_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; enum dma_cr_opcode { @@ -40,6 +42,8 @@ struct dma_cr { u32 local_id; u32 remote_id; u32 tpn; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct queue_cfg { @@ -49,6 +53,8 @@ struct queue_cfg { u32 dcna; struct dev_eid rmt_eid; u32 trans_mode; + u32 rsv_bitmap; + u32 rsvd[6]; }; struct dma_seg { @@ -58,6 +64,8 @@ struct dma_seg { u32 tid; /* data valid only in bit 0-19 */ u32 token_value; bool token_value_valid; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_seg_cfg { @@ -65,6 +73,8 @@ struct dma_seg_cfg { u64 len; u32 token_value; bool token_value_valid; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_context { @@ -80,11 +90,15 @@ enum dma_status { struct dma_cas_data { u64 compare_data; u64 swap_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_client { @@ -93,6 +107,8 @@ struct dma_client { int (*add)(u32 eid); void (*remove)(u32 eid); void (*stop)(u32 eid); + u32 rsv_bitmap; + u32 rsvd[4]; }; struct dma_device *dma_get_device_list(u32 *num_devices); -- Gitee From bf2e599b4622ff3e9e828cf176f8aead3c29e131 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 27 Nov 2025 09:27:24 +0800 Subject: [PATCH 032/126] ub:ubus: Add ubus and ubfi opensource document commit e40360b68c42fb79a086799873391e728c29c0de openEuler Add ubus and ubfi opensource document to introduce ubus and ubfi related functions and capabilities for users in opensource society. Signed-off-by: Yahui Liu Signed-off-by: Yuhao Xiang Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- .../ABI/testing/debugfs-ub-hisi-ubus | 37 ++ Documentation/ABI/testing/sysfs-bus-ub | 427 ++++++++++++++++++ Documentation/driver-api/ub/index.rst | 6 +- Documentation/driver-api/ub/ubfi.rst | 7 + Documentation/driver-api/ub/ubus.rst | 7 + Documentation/ub/index.rst | 4 +- Documentation/ub/ubfi/index.rst | 11 + Documentation/ub/ubfi/ubfi.rst | 178 ++++++++ Documentation/ub/ubus/hisi_ubus.rst | 95 ++++ Documentation/ub/ubus/index.rst | 13 + Documentation/ub/ubus/ubus-service.rst | 60 +++ Documentation/ub/ubus/ubus.rst | 312 +++++++++++++ 12 files changed, 1154 insertions(+), 3 deletions(-) create mode 100644 Documentation/ABI/testing/debugfs-ub-hisi-ubus create mode 100644 Documentation/ABI/testing/sysfs-bus-ub create mode 100644 Documentation/driver-api/ub/ubfi.rst create mode 100644 Documentation/driver-api/ub/ubus.rst create mode 100644 Documentation/ub/ubfi/index.rst create mode 100644 Documentation/ub/ubfi/ubfi.rst create mode 100644 Documentation/ub/ubus/hisi_ubus.rst create mode 100644 Documentation/ub/ubus/index.rst create mode 100644 Documentation/ub/ubus/ubus-service.rst create mode 100644 Documentation/ub/ubus/ubus.rst diff --git a/Documentation/ABI/testing/debugfs-ub-hisi-ubus b/Documentation/ABI/testing/debugfs-ub-hisi-ubus new file mode 100644 index 000000000000..69ba558bfc02 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-ub-hisi-ubus @@ -0,0 +1,37 @@ +What: /sys/kernel/debug/UB_BUS_CTL/eu_table +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the contents of the EID-UPI entry. + By default, the EID and UPI key-value pair for entry 0 is displayed. + By writing an entry index to the properties file, you can retrieve + the content of the corresponding entry. + + Example:: + + Display the content of entry5: + # echo 5 > /sys/kernel/debug/UB_BUS_CTL/eu_table + # cat /sys/kernel/debug/UB_BUS_CTL/eu_table + +What: /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/reg_info +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the register information for the specified queue of the designated + UB Bus controller. + +What: /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info +Date: Oct 2025 +Contact: Junlong Zheng +Description: Display the SQE and CQE contents of the specified MSQG for the designated + UB Bus controller. By default, the content of SQ entry 0 is displayed. + By writing the queue type and entry index to the properties file, you can + retrieve the content of the corresponding entry. + + Example:: + + Output the content of SQ entry3: + # echo 0 3 > /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + # cat /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + + Output the content of CQ entry5: + # echo 2 5 > /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info + # cat /sys/kernel/debug/UB_BUS_CTL/hi_msgq-/q_entry_info \ No newline at end of file diff --git a/Documentation/ABI/testing/sysfs-bus-ub b/Documentation/ABI/testing/sysfs-bus-ub new file mode 100644 index 000000000000..f7b3193958b7 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-ub @@ -0,0 +1,427 @@ +What: /sys/bus/ub/cluster +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates the current system operating mode: + 1 for cluster mode, 0 for Standalone mode. + +What: /sys/bus/ub/instance +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the list of bus instances created in the current system. + By default, it starts from the first one. Writing numbers into + the file can change the starting position of the output bus + instance. + +What: /sys/bus/ub/drivers/.../bind +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing an entity number to this file will cause the driver + to attempt to bind to the entity. This is useful for + overriding default bindings. The entity number is + the same as found in /sys/bus/ub/devices/. + For example:: + + # echo 00002 > /sys/bus/ub/drivers/sample/bind + +What: /sys/bus/ub/drivers/.../unbind +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing an entity number to this file will cause the + driver to attempt to unbind from the entity. This may be + useful when overriding default bindings. The entity + number is the same as found in /sys/bus/ub/devices/. + For example:: + + # echo 00002 > /sys/bus/ub/drivers/sample/unbind + +What: /sys/bus/ub/drivers/.../new_id +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing a device ID to this file will attempt to + dynamically add a new device ID to a UB device driver. + This may allow the driver to support more hardware than + was included in the driver's static device ID support + table at compile time. The format for the device ID is: + VVVV DDDD MVVV MMMM CCCC MMMM PPPP. That is Vendor ID, + Device ID, Module Vendor ID, Module ID, Class, Class Mask + and Private Driver Data. The Vendor ID and Device ID fields + are required, the rest are optional. Upon successfully + adding an ID, the driver will probe for the device and + attempt to bind to it. + For example:: + + # echo cc08 a001 > /sys/bus/ub/drivers/sample/new_id + +What: /sys/bus/ub/drivers/.../remove_id +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing a device ID to this file will remove an ID + that was dynamically added via the new_id sysfs entry. + The format for the device ID is: + VVVV DDDD MVVV MMMM CCCC MMMM. That is Vendor ID, Device + ID, Module Vendor ID, Module ID, Class and Class Mask. + The Vendor ID and Device ID fields are required, the rest + are optional. After successfully removing an ID, + the driver will no longer support the device. + This is useful to ensure auto probing won't + match the driver to the device. + For example:: + + # echo cc08 a001 > /sys/bus/ub/drivers/sample/remove_id + +What: /sys/bus/ub/devices/.../class_code +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the class code type of the entity. + +What: /sys/bus/ub/devices/.../config +Date: Oct 2025 +Contact: Junlong Zheng +Description: + A channel is provided for user-mode programs to access the + entity configuration space. User programs can open the file + using the open system call and then perform read/write + operations on the configuration space using the pread/pwrite + system calls. + For details, please refer to the implementation of ubutils + . + +What: /sys/bus/ub/devices/.../device +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the Device ID of the entity. + +What: /sys/bus/ub/devices/.../device_reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing 1 to this file can trigger a device-level reset. All + entities below it will be reset. + Supported only by Entity0. + +What: /sys/bus/ub/devices/.../direct_link +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the link connection relationships and the peer + information of the ports connected to this entity. + +What: /sys/bus/ub/devices/.../driver_override +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This file allows the driver for a device to be specified which + will override standard static and dynamic ID matching. When + specified, only a driver with a name matching the value written + to driver_override will have an opportunity to bind to the + device. The override is specified by writing a string to the + driver_override file (echo sample > driver_override) and + may be cleared with an empty string (echo > driver_override). + This returns the device to standard matching rules binding. + Writing to driver_override does not automatically unbind the + device from its current driver or make any attempt to + automatically load the specified driver. If no driver with a + matching name is currently loaded in the kernel, the device + will not bind to any driver. This also allows devices to + opt-out of driver binding using a driver_override name such as + "none". Only a single driver may be specified in the override, + there is no support for parsing delimiters. + +What: /sys/bus/ub/devices/.../eid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity's EID. + +What: /sys/bus/ub/devices/.../entity_idx +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the index of the entity, numbered starting from 0. + +What: /sys/bus/ub/devices/.../guid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the GUID of the entity. + +What: /sys/bus/ub/devices/.../instance +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the EID of the bus instance bound to the entity. + +What: /sys/bus/ub/devices/.../kref +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the current reference count of the entity. + +What: /sys/bus/ub/devices/.../match_driver +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the current entity is ready for driver + matching. Some entities require additional initialization work, + so this entry is provided to control the entity separately. + In this case, it is necessary to ensure a certain timing + sequence; For example, the driver should be loaded only after + this status of the entity is set to 1 to ensure that the driver + probe is correctly initiated. + +What: /sys/bus/ub/devices/.../numa +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This file contains the NUMA node to which the UB Entity is + attached, or -1 if the node is unknown. The initial value + comes from UBRT table, defined in the UB firmware specification. + +What: /sys/bus/ub/devices/.../primary_cna +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the primary compact network address of the entity. + +What: /sys/bus/ub/devices/.../primary_entity +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity number of the entity0 to which this entity + belongs. + +What: /sys/bus/ub/devices/.../mue_list +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display a list of all MUEs under this entity, excluding itself. + Only Entity0 has this attribute file. + +What: /sys/bus/ub/devices/.../reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Writing 1 to this file can trigger an entity-level reset, only + reset this entity, it will not affect other entities. + +What: /sys/bus/ub/devices/.../resource +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Information about the resource space of the entity is displayed, + with a total of 3 entries, each consisting of the following + three components: start_address, end_address, flags. + If all values are 0, it indicates that the resource space is + not supported. + +What: /sys/bus/ub/devices/.../resource +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Provide attribute files to the user-mode driver. Through the + open and mmap system calls, the resource space of an entity can + be mapped into the process space for direct access, thereby + improving the efficiency of cross-mode resource space access. + The memory attribute mapped by this interface is the device + attribute. + +What: /sys/bus/ub/devices/.../resource_wc +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Provide attribute files to the user-mode driver. Through the + open and mmap system calls, the resource space of an entity can + be mapped into the process space for direct access, thereby + improving the efficiency of cross-mode resource space access. + The memory attribute mapped by this interface is the + write-combine attribute. + +What: /sys/bus/ub/devices/.../sw_cap +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display whether forwarding capability is supported. + Only UBC supports it. + +What: /sys/bus/ub/devices/.../tid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the Token ID of the entity. The entity uses this + Token ID to access system memory. + +What: /sys/bus/ub/devices/.../type +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the type of the entity. The type is a subdomain segment + of GUID. + +What: /sys/bus/ub/devices/.../ubc +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the entity number of the UB controller associated with + the entity. + +What: /sys/bus/ub/devices/.../ub_numues +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of UEs that have been enabled for this + entity. Writing a value to the file enables the UEs. The written + value must be within the range of ub_totalues. Writing 0 + disables all UEs. + Only MUE supports this file. + +What: /sys/bus/ub/devices/.../ub_total_entities +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of all entities supported by this entity. + Only Entity0 supports this file. + +What: /sys/bus/ub/devices/.../ub_totalues +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the number of UEs owned by this entity. + Only MUE supports this file. + +What: /sys/bus/ub/devices/.../ue_list +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display a list of all UEs under this entity, excluding itself. + Only MUE has this attribute file. + +What: /sys/bus/ub/devices/.../upi +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display UPI of the entity. + +What: /sys/bus/ub/devices/.../vendor +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display Vendor ID of the entity. + +What: /sys/bus/ub/devices/.../msi_irqs/ +Date: Oct 2025 +Contact: Junlong Zheng +Description: + The /sys/bus/ub/devices/.../msi_irqs/ directory contains a + variable set of files, with each file being named after a + corresponding msi irq vector allocated to that entity. + +What: /sys/bus/ub/devices/.../msi_irqs/ +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This attribute indicates the mode that the irq vector named by + the file is in (msi vs. msix) + +What: /sys/bus/ub/devices/.../port/asy_link_width +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the port supports asymmetric link width. + Supported only on physical port. + + +What: /sys/bus/ub/devices/.../port/boundary +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Indicates whether the port is a boundary port. + +What: /sys/bus/ub/devices/.../port/cna +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display the CNA of this port. + +What: /sys/bus/ub/devices/.../port/glb_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Write 1 to enable global dynamic lane adjustment, + write 0 to disable this function. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/linkup +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Display whether the port has established a connection. + +What: /sys/bus/ub/devices/.../port/neighbor +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Displays the entity number of the peer entity at the port. + If no link is established, it displays "No Neighbor". + +What: /sys/bus/ub/devices/.../port/neighbor_guid +Date: Oct 2025 +Contact: Junlong Zheng +Description: + If there is a peer entity, display the GUID of the peer entity. + Otherwise, display "No Neighbor". + +What: /sys/bus/ub/devices/.../port/neighbor_port_idx +Date: Oct 2025 +Contact: Junlong Zheng +Description: + If there is a peer entity, the port index of the peer entity is + displayed. Otherwise, display "No Neighbor". + +What: /sys/bus/ub/devices/.../port/port_reset +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Supports individual port reset, triggered by writing 1. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/qdlws_exec_state +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Obtain the hardware execution status of the current dynamically + adjustable lane. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/rx_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Enable/Disable dynamic lane adjustment in the RX direction. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../port/tx_qdlws +Date: Oct 2025 +Contact: Junlong Zheng +Description: + Enable/Disable dynamic lane adjustment in the TX direction. + Supported only on physical port. + +What: /sys/bus/ub/devices/.../slot/power +Date: Oct 2025 +Contact: Junlong Zheng +Description: + This feature supports hot-plug notification. + Display the current slot status, the value can be "on", + "poweron", "poweroff", "off" or "unknown state". And can + write 1 to enable power on the slot, and write 0 to + power it off. + This file is supported only by entities that support + hot-plug features. diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index 85be3e89ea81..d3a5969e6e94 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -11,6 +11,8 @@ The Linux UnifiedBus implementer's API guide Table of contents .. toctree:: - :maxdepth: 1 + :maxdepth: 2 - ubase + ubfi + ubus + ubase \ No newline at end of file diff --git a/Documentation/driver-api/ub/ubfi.rst b/Documentation/driver-api/ub/ubfi.rst new file mode 100644 index 000000000000..a7b0466bdca4 --- /dev/null +++ b/Documentation/driver-api/ub/ubfi.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +UBFI Driver Support Library +--------------------------- + +.. kernel-doc:: include/ub/ubfi/ubfi.h + :functions: diff --git a/Documentation/driver-api/ub/ubus.rst b/Documentation/driver-api/ub/ubus.rst new file mode 100644 index 000000000000..4e39b79e9e5a --- /dev/null +++ b/Documentation/driver-api/ub/ubus.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0 + +UBUS Driver Support Library +----------------------------- + +.. kernel-doc:: include/ub/ubus/ubus.h + :functions: diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 34fd8d871f19..8e939a2ba8fe 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -10,4 +10,6 @@ UnifiedBus Subsystem .. toctree:: :maxdepth: 2 - ubase/index \ No newline at end of file + ubase/index + ubfi/index + ubus/index diff --git a/Documentation/ub/ubfi/index.rst b/Documentation/ub/ubfi/index.rst new file mode 100644 index 000000000000..2dd11600f4f7 --- /dev/null +++ b/Documentation/ub/ubfi/index.rst @@ -0,0 +1,11 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======================= +UB Firmware Spec Driver +======================= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ubfi diff --git a/Documentation/ub/ubfi/ubfi.rst b/Documentation/ub/ubfi/ubfi.rst new file mode 100644 index 000000000000..efea335726b8 --- /dev/null +++ b/Documentation/ub/ubfi/ubfi.rst @@ -0,0 +1,178 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========== +UBFI Driver +=========== + +What is UBFI +============ + +When BIOS boots the OS with UB firmware, it should report the UB-related +information in the system so that the OS can obtain the UB-related information, +including UBC, UMMU, and all other information required for UB enabling. + +Startup information is related to chip specifications and is static information +that can be reported through a static information table. There are three +reporting methods: UBIOS, ACPI, and Device Tree. The only difference among these +three methods lies in the entry points for obtaining the UB-related information +tables. The contents of each information table remain consistent. + +UnifiedBus Firmware Interface (UBFI) driver supports obtaining UB-related +information from the BIOS via the ACPI table or device tree. And create software +instances of UBCs and UMMUs in the OS. + +UBFI driver is one of the fundamental drivers of UB. It has achieved the +aforementioned functions. + +.. code-block:: none + + +--------------------------------------------------------------+ + | BIOS | + +--------------------------------------------------------------+ + ^ ^ + |acpi of| + v v + +--------------------------------------------------------------+ + | kernel | + +--------------------------------------------------------------+ + ^ + | + v + +--------------------------------------------------------------+ + | ubfi | + +--------------------------------------------------------------+ + ^ ^ + | | + v v + +-----------------+ +-----------------+ + | ubus | | ummu | + +-----------------+ +-----------------+ + +What does UBFI do +================= + +When loading the ubfi driver, it detects the current OS boot mode and retrieves +the UBRT (UB root table) physical address from the BIOS. + + - ACPI (UBRT table) + - device tree (node: chosen: ubios-information-table) + +For the structure of UBRT, please refer to https://www.unifiedbus.com/ + +Create UBC +---------- + +BIOS may report information about multiple UBCs, some of which is shared among +multiple UBCs and is reported in ``struct ubrt_ubc_table`` + +.. kernel-doc:: drivers/ub/ubfi/ubc.h + :functions: ubrt_ubc_table + +As ``ubc_cna_start``, ``ubc_cna_end``, ``ubc_eid_start``, ``ubc_eid_end``, +``ubc_feature``, ``cluster_mode``, these attributes belong to the entire UBPU +node and are shared by all UBCs. + +For a single UBC, its information is reported in the ``struct ubc_node`` + +.. kernel-doc:: drivers/ub/ubfi/ubc.h + :functions: ubc_node + +We have performed the following work on a single UBC. + + - Create the UBC structure and record the UBC information + - Register the UBC irq with the kernel + - Initialize UBC and register the UBC device with the kernel + - Register the MMIO address space of UBC with the kernel + - Set the MSI domain for all UBCs + +After completing these steps, ``struct list_head ubc_list`` will be provided +externally, which records all UBCs within the node for subsequent +interconnection and communication purposes. + +Set MSI domain for UBC +~~~~~~~~~~~~~~~~~~~~~~ + +UBFI driver requests interrupts from the interrupt management subsystem on +behalf of the entity and delivers the interrupt configuration to the entity. +When reporting an interrupt, the entity writes the interrupt information into +the interrupt controller, which then calls back the interrupt management +subsystem. The interrupt management subsystem subsequently invokes the UB driver +to handle the corresponding interrupt. + +UB created a new Message Signaled Interrupt domain called USI (UB Signaled +Interrupt). + +UB will add a platform device in the DSDT and IORT tables to associate UBC +with the USI domain. If booting with device tree, we will add a new UBC node in +DTS for binding the USI domain. For each UBC, a corresponding number of platform +devices should be created. We will set the USI domain of these platform devices +to the USI domain of each UBC. + +Example in DTS for UBC:: + + ubc@N { + compatible = "ub,ubc"; + #interrupt-cells = <0x3>; + interrupt-parent = <0x01>; + interrupts = <0x0 0xa 0x4>; + index = <0x00>; + msi-parent = <0x1 0xabcd>; + }; + +Parse UMMU and PMU +------------------ + +Both UMMU and UMMU-PMU devices are platform devices and support creation via +ACPI and DTS. + +ACPI method: + - The device information for UMMU and UMMU-PMU has been added to DSDT and + IORT tables. + - When the OS enables ACPI functionality, the ACPI system will recognize + the device information in the DSDT and IORT tables and automatically + create platform devices for UMMU and UMMU-PMU. + - The number of platform devices for UMMU and UMMU-PMU depends on the + number of device information nodes described in the DSDT and IORT tables. + +DTS method: + - The DTB file has added device tree nodes for UMMU and UMMU-PMU. + - When the OS enables the device tree functionality, the DTS system will + recognize the device tree nodes for UMMU and UMMU-PMU, and then + automatically create platform devices for them. + - The number of platform devices for UMMU and UMMU-PMU depends on the + number of corresponding device tree nodes described in the device tree. + + Example in DTS for UMMU and UMMU-PMU:: + + ummu@N { + compatible = "ub,ummu"; + index = <0x0>; + msi-parent = <&its>; + }; + + ummu-pmu@N { + compatible = "ub,ummu_pmu"; + index = <0x0>; + msi-parent = <&its>; + }; + +Obtain UMMU nodes from the UBRT table: + - The UBRT table can be parsed to extract the UMMU sub-table, which contains + several UMMU nodes. Each UMMU node describes the hardware information of an + UMMU device and its corresponding UMMU-PMU device. The specific content of + UMMU nodes can be found in ``struct ummu_node``. + + - The number of UMMU platform devices created via ACPI or DTS should match the + number of UMMU nodes in the UBRT table, as they have a one-to-one + correspondence. The same one-to-one correspondence applies to UMMU-PMU + devices and UMMU nodes. + +Configure UMMU and PMU devices: + - For each UMMU node parsed from the UBRT table, the register information and + NUMA affinity described in the UMMU node can be configured for the + corresponding UMMU and UMMU-PMU devices. + - Each UMMU node's content is stored in the ``ubrt_fwnode_list`` linked list. + Subsequently, the corresponding UMMU node can be found by using the fwnode + property of the UMMU and UMMU-PMU devices, making it convenient to obtain the + hardware information during the initialization of the UMMU and UMMU-PMU + drivers. \ No newline at end of file diff --git a/Documentation/ub/ubus/hisi_ubus.rst b/Documentation/ub/ubus/hisi_ubus.rst new file mode 100644 index 000000000000..b384b058129f --- /dev/null +++ b/Documentation/ub/ubus/hisi_ubus.rst @@ -0,0 +1,95 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================== +Hisilicon UBUS Driver +===================== + +Hisilicon UBUS Driver (abbreviated as Hisi UBUS) is a UnifiedBus (UB) +specification management subsystem specifically implemented for Hisi chips. It +provides a subsystem operation interfaces implementation:: + + static const struct ub_manage_subsystem_ops hisi_ub_manage_subsystem_ops = { + .vendor = HISI_VENDOR_ID, + .controller_probe = ub_bus_controller_probe, + .controller_remove = ub_bus_controller_remove, + .ras_handler_probe = ub_ras_handler_probe, + .ras_handler_remove = ub_ras_handler_remove + }; + +including probe/remove methods for the UB bus controller and ub ras handler. +Each specification management subsystem has a unique vendor id to identify the +provider. This vendor id is set to the vendor field of +``ub_manage_subsystem_ops`` implementation. During UB bus controller probe, a +ub_bus_controller_ops will be set to the UB bus controller, message device and +debug file system will be initialized. During UB bus controller remove, ops +will be unset, message device will be removed and debug file system will be +uninitialized. + +During module init, hisi_ub_manage_subsystem_ops is registered to Ubus driver +via the ``register_ub_manage_subsystem_ops()`` method provided by Ubus driver:: + + int register_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) + +When module is being unloaded, Ubus driver's +``unregister_ub_manage_subsystem_ops()`` is called to unregister the subsystem +operation interfaces:: + + void unregister_ub_manage_subsystem_ops(const struct ub_manage_subsystem_ops *ops) + +Hisi UBUS Controller Driver +=========================== +Hisi UBUS provides a ub bus controller operation interfaces implementation:: + + static struct ub_bus_controller_ops hi_ubc_ops = { + .eu_table_init = hi_eu_table_init, + .eu_table_uninit = hi_eu_table_uninit, + .eu_cfg = hi_eu_cfg, + .mem_decoder_create = hi_mem_decoder_create, + .mem_decoder_remove = hi_mem_decoder_remove, + .register_ubmem_irq = hi_register_ubmem_irq, + .unregister_ubmem_irq = hi_unregister_ubmem_irq, + .register_decoder_base_addr = hi_register_decoder_base_addr, + .entity_enable = hi_send_entity_enable_msg, + }; + +including init/uninit method for EID/UPI table, create/remove method for UB +memory decoder, register/unregister method for UB memory decoder interrupts +and so on. + +UB Message Core Driver +====================== +Hisi UBUS implements a message device that provides a set of operations:: + + static struct message_ops hi_message_ops = { + .probe_dev = hi_message_probe_dev, + .remove_dev = hi_message_remove_dev, + .sync_request = hi_message_sync_request, + .response = hi_message_response, + .sync_enum = hi_message_sync_enum, + .vdm_rx_handler = hi_vdm_rx_msg_handler, + .send = hi_message_send, + }; + +including synchronous message sending, synchronous enumeration message +sending, response message sending, vendor-defined message reception handling +and so on. After device creation, ``message_device_register()`` method of Ubus +driver is called to register the device to the Ubus driver message framework:: + + int message_device_register(struct message_device *mdev) + +This framework provides a unified interface for message transmission and +reception externally. + +Hisi UBUS Local Ras Error Handler +================================= +Hisi UBUS provides a local RAS handling module to detect and process errors +reported on the UB bus. It offers error printing and registry dump, determines +whether recovery is needed based on error type and severity, and can reset +ports for port issues in cluster environment. + +UB Vendor-Defined Messages Manager +================================== +Hisi UBUS defines several vendor-defined messages, implements messages' +transmission and processing. These private messages are mainly used for +managing the registration, release, and state control of physical and +virtual devices. \ No newline at end of file diff --git a/Documentation/ub/ubus/index.rst b/Documentation/ub/ubus/index.rst new file mode 100644 index 000000000000..a4c2a58324cf --- /dev/null +++ b/Documentation/ub/ubus/index.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============= +UB BUS Driver +============= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ubus + ubus-service + hisi_ubus \ No newline at end of file diff --git a/Documentation/ub/ubus/ubus-service.rst b/Documentation/ub/ubus/ubus-service.rst new file mode 100644 index 000000000000..fd347fff959a --- /dev/null +++ b/Documentation/ub/ubus/ubus-service.rst @@ -0,0 +1,60 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================================== +UnifiedBus Bus Driver (UBUS Driver) Service +=========================================== + +The UnifiedBus (UB) specification describes RAS-related error handling and +notification-based hot-plug functionalities. The UBUS driver implements these +two types of functionalities as two independent services in software. This +article will separately introduce these two services. + +UB Device Driver Error Service +============================== +The UB specification defines three categories of protocol errors: A, B, and C. +Among these, A and B category protocol errors are directly handled by the +UB device driver, and thus will not be further discussed in this document. +C category protocol errors are reported to the UBUS Driver via the APEI +mechanism. The UBUS Driver provides a set of mechanisms for handling C category +protocol errors, which work in conjunction with the UB device driver to +complete the error handling process. + +The UBUS driver provides the ``struct ub_error_handlers`` structure, which +includes multiple callback functions related to error handling. The UB device +driver needs to implement these callback functions:: + + struct ub_error_handlers { + void (*ub_reset_prepare)(struct ub_entity *uent); + void (*ub_reset_done)(struct ub_entity *uent); + ub_ers_result_t (*ub_error_detected)(struct ub_entity *uent, ub_channel_state_t state); + ub_ers_result_t (*ub_resource_enabled)(struct ub_entity *uent); + }; + +For UB device driver: + + - ub_reset_prepare is called before ELR, serving to notify the device driver to + prepare for the work before ELR + - ub_reset_done is called after ELR, serving to notify the device driver that + ELR has completed and services can be resumed + - ub_error_detected is called when the UB bus driver detects an error, serving + to notify the UB device driver of the occurrence of an error + - ub_resource_enabled is called after the UB bus driver has completed error + handling, serving to notify the UB device driver that error handling has + completed + +Hot-Plug Service +================ +The UB specification defines the hot-plug functionality for devices, which +requires coordination between software and hardware. The UBUS driver implements +the hot removal and hot insertion of external devices on a per-slot basis. +For detailed procedures, please refer to the UB specification document. The main +functional points implemented by the UBUS driver include: + + - Button event handling, completing the processing of hot-plug and + hot-unplug button messages + - Indicator control, switching different knowledge points based on the + device status + - Power control, performing power on/off operations for slots based on the + device status + - Providing a user-space sysfs interface to simulate button effects + according to user commands diff --git a/Documentation/ub/ubus/ubus.rst b/Documentation/ub/ubus/ubus.rst new file mode 100644 index 000000000000..e7176e98732e --- /dev/null +++ b/Documentation/ub/ubus/ubus.rst @@ -0,0 +1,312 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================================== +How To Write Linux UB Device Drivers +====================================== + +UnifiedBus (abbreviated as UB) is an interconnection technology and +architecture designed for computing systems. It unifies the communication +between IO, memory access, and various processing units within the same +interconnection technology framework, enabling high-performance data transfer, +unified resource management, efficient collaboration, and effective programming +in computing systems. Resource management is one of its key features, +implemented through a combination of software and hardware. The UB Bus Driver +(referred to as the UBUS Driver) implements the software portion of this +feature. This document provides a brief overview of the components within the +UBUS Driver framework and how to develop UB device drivers within this driver +framework. See more on the UB spec . + +Composition of the UBUS Driver +============================== +The UBUS Driver consists of two parts. The first part is the common +implementation section, which will be developed according to the UB +specification requirements. The second part is the proprietary implementation by +each manufacturer, which is based on the specific circuit designs of each host +manufacturer. Each host manufacturer can provide differentiated functionalities +in this part of the code. + +If the UBUS subsystem is not configured (CONFIG_UB_UBUS is not set), most of +the UBUS functions described below are defined as inline functions either +completely empty or just returning an appropriate error codes to avoid +lots of ifdefs in the drivers. + +The figure below illustrates the internal composition and system boundaries of +the UBUS Driver. + +.. code-block:: none + + +----------------------------------------------------------+ + | ub device driver | + +----------------------------------------------------------+ + ^ + | + v + +----------------------------------------------------------+ + | ubus driver | + | | + | +--------------------------------------------------+ | + | | ubus driver vendor-specific | | + | +--------------------------------------------------+ | + | | + | +--------------------------------------------------+ | + | | ubus driver common | | + | | | | + | | +------+ +--------+ +------+ +-------+ +-----+ | | +---------+ + | | | enum | | config | | port | | route | | msg | | | <-> | GIC/ITS | + | | +------+ +--------+ +------+ +-------+ +-----+ | | +---------+ + | | +------------+ +--------+ +-----------+ | | + | | | controller | | entity | | interrupt | | | +------------+ + | | +------------+ +--------+ +-----------+ | | <-> | IOMMU/UMMU | + | | +---------+ +----------+ +------+ +----------+ | | +------------+ + | | | decoder | | resource | | pool | | instance | | | + | | +---------+ +----------+ +------+ +----------+ | | + | | +-----+ +------+ +-------+ +---------+ +-------+ | | + | | | ras | | link | | reset | | hotplug | | sysfs | | | + | | +-----+ +------+ +-------+ +---------+ +-------+ | | + | | +------+ +---------------+ | | + | | | ubfi | | bus framework | | | + | | +------+ +---------------+ | | + | +--------------------------------------------------+ | + +----------------------------------------------------------+ + ^ + | + v + +----------------------------------------------------------+ + | hardware/firmware | + +----------------------------------------------------------+ + +The following briefly describes the functions of each submodule within the +UBUS driver: + + - enum: implement network topology scanning and device enumeration + functionality + - config: enable access to the device configuration space + - port: manage device ports + - route: implement the configuration of the routing table + - msg: implement message assembly and transmission/reception processing + for management messages + - controller: initialization and de-initialization of the UB controller + - entity: enable device configuration, multi-entity management, and other + functionalities + - interrupt: implement USI interrupt functionality + - decoder: implement address decoding functionality for MMIO access to + device resource space + - resource: manage the MMIO address space allocated by the user host to + the device + - pool: implementation of pooled message processing + - instance: implement bus instance management + - ras: implement handling for RAS exceptions + - link: implement processing of link messages + - reset: implement the reset function + - hotplug: enable hot-plug functionality for the device + - sysfs: implement sysfs attribute files + - ubfi: implement parsing of the UBRT table + - bus framework: implementation of the Ubus Driver Framework + +Structure of UB device driver +============================= +In Linux, the ``ub_driver`` structure is used to describe a UB device driver. +The `struct ub_driver` is employed to represent a UB device driver, and +the structure definition is as follows. + +.. kernel-doc:: include/ub/ubus/ubus.h + :functions: ub_driver + +This structure includes a matchable device table (`id_table`), a probe function, +a remove function, a shutdown function, error handling, and other functionalities. +The following content will provide a reference for the implementation of these +features. + +Rules for Device and Driver Matching +------------------------------------ +The matching rules for UnifiedBus devices and drivers are relatively flexible, +allowing for any combination of the following five matching entries in the +`struct ub_device_id` within the device driver to achieve the target matching rule: + + - GUID's Vendor ID + - GUID's Device ID + - Configuration Space Module Vendor ID + - Configuration Space Module ID + - Configuration Space Class Code + +The ID table is an array of ``struct ub_device_id`` entries ending with an +all-zero entry. Definitions with static const are generally preferred. + +.. kernel-doc:: include/linux/mod_devicetable.h + :functions: ub_device_id + +Most drivers only need ``UB_ENTITY()`` or ``UB_ENTITY_MODULE`` or +``UB_ENTITY_CLASS()`` to set up a ub_device_id table. + +The following is an example:: + + static const struct ub_device_id sample_tbl[] = { + { 0xCC08, 0xA001, UB_ANY_ID, UB_ANY_ID, 0, 0 }, + { UB_ENTITY(0xCC08, 0xA001), 0, 0 }, + { UB_ENTITY_MODULE(0xCC08, 0xA001, 0xCC08, 0xA001), 0, 0 }, + { UB_ENTITY_CLASS(0x0200, 0xffff) }, + }; + +New UB IDs may be added to a device driver ub_ids table at runtime +as shown below:: + + echo "vendor device modulevendor moduleid class class_mask driver_data" > \ + /sys/bus/ub/drivers/sample/new_id + +All fields are passed in as hexadecimal values (no leading 0x). +The vendor and device fields are mandatory, the others are optional. Users +need pass only as many optional fields as necessary: + + - modulevendor and moduledevice fields default to UB_ANY_ID (FFFFFFFF) + - class and classmask fields default to 0 + - driver_data defaults to 0UL. + - override_only field defaults to 0. + +Note that driver_data must match the value used by any of the ub_device_id +entries defined in the driver. This makes the driver_data field mandatory +if all the ub_device_id entries have a non-zero driver_data value. + +Once added, the driver probe routine will be invoked for any unclaimed +UB devices listed in its (newly updated) ub_ids list. + +Register UB Device Driver +------------------------- +The UB device driver uses `ub_register_driver` to register the device driver. +During the registration process, the matching between the device and the +driver will be triggered, with the matching rules referenced in the previous +section. + +UB Device Driver Probe Process Reference +---------------------------------------- +- Call `ub_set_user_info` to configure the user host information into the entity + Each entity's configuration space has corresponding user register + information, such as user EID, token ID, etc. Before the device driver + starts using the device, it needs to configure the user host information + for the device. + +- Call `ub_entity_enable` to configure the access path between the host and the device + Before using the device, you need to enable the bidirectional channel + switch for accessing the device from the user host and vice versa. + This is achieved by configuring the device configuration space registers. + +- Set the DMA mask size + The device driver can reconfigure this field segment based on the + device's DMA addressing capability. The default configuration is 32-bit. + +- Call the kernel DMA interface to request DMA memory + The device driver requests DMA memory through the DMA interface provided + by the kernel to prepare for subsequent device DMA operations. + +- Call `ub_iomap` to complete the MMIO access mapping for the resource space + The device resource space stores private configurations related to device + driver capabilities. Before accessing the device resource space, you need + to call the ioremap interface to complete address mapping. The ub_iomap + interface uses the device attribute, while the ub_iomap_wc interface + uses the writecombine attribute. + +- Call `ub_alloc_irq_vectors` or `ub_alloc_irq_vectors_affinity` to complete + the interrupt request, and then call the kernel's interrupt registration API. + +- Initiate specific business functions + +UB Device Driver Removal Process Reference +------------------------------------------ +- Stop specific business functions +- Invoke the kernel's interrupt unregistration API, call ub_disable_intr, to + complete the unregistration of the interrupt handler and release the interrupt +- Call ub_iounmap to demap the MMIO access space +- Invoke the kernel's DMA interface to release DMA memory +- Call ub_entity_enable to close the access path between the host and the device +- Call ub_unset_user_info to clear the user host information configured to the + entity + +UB Device Driver Shutdown +------------------------- +The UB device shutdown is triggered during the system shutdown or restart +process, and the UB device driver needs to stop the service flow in the shutdown +interface. + +UB Device Driver Virtual configure +---------------------------------- + +If the MUE supports multiple UEs, the device driver needs to provide +`virt_configure` callback. the UEs can be enabled or disabled to facilitate +direct connection to virtual machines for use. The bus driver will cyclically +call the virt_configure callback of the device driver to enable and disable +each UE in sequence. Within the virt_configure function of the device driver, +it needs to call `ub_enable_ue` and `ub_disable_ue` provided by the bus driver +to create and destroy UEs, at the same time, private processing logic can +also be executed. + +UE can be enabled and disabled through sysfs. The process is as follows:: + + 1. Check the number of UEs currently supported by the MUE + # cat /sys/bus/ub/devices/.../ub_totalues + 2. Specify the number of enabled UEs within the maximum UE quantity range + # echo 3 > /sys/bus/ub/devices/.../ub_numues + 3. Disable UEs + # echo 0 > /sys/bus/ub/devices/.../ub_numues + +UB Device Driver Virtual notify +------------------------------- + +If the device supports multiple UEs and the MUE device driver wants to be +aware of UE state changes, `virt_notify` hook function can be implemented to +capture the UE state. + +UB Device Driver Activate and Deactivate +---------------------------------------- + +The bus driver supports maintaining the working status of entities, indicating +whether an entity is in operation. It also provides corresponding interfaces +for controlling devices to enter or exit the working state, such as +`ub_activate_entity` and `ub_deactivate_entity`. If the device driver needs +to perform any special procedures, it must implement the corresponding activate +and deactivate hook functions. + +UB Device Driver RAS handler +---------------------------- + +The bus driver provides a set of hooks for RAS processing, creating an +opportunity window to notify device drivers when handling events such as +resets and RAS, allowing them to execute corresponding processing measures. +Currently implemented hooks include `reset_prepare`, `reset_done`, +`error_detected`, and `resource_enabled`. Device drivers can optionally +provide corresponding implementations to execute their own private processing. + +Uninstall UB Device Driver +-------------------------- +The UB device driver uses `ub_unregister_driver` to unregister the driver. This +interface call will perform a remove operation on all devices matched by the +driver, ultimately removing the UB device driver from the system. + +How to find UB devices manually +=============================== + +UBUS provides several interfaces to obtain ub_entities. You can search for them +using keywords such as GUID, EID, or entity number. Or you can find an entire +class of devices using vendor ID and device ID. + +How to access UB Configuration space +==================================== + +You can use `ub_config_(read|write)_(byte|word|dword)` to access the config +space of an entity represented by `struct ub_entity *`. All these functions return +0 when successful or an error code. Most drivers expect that accesses to valid UB +entities don't fail. + +The macros for configuration space registers are defined in the header file +include/uapi/ub/ubus/ubus_regs.h. + +Vendor and device identifications +================================= + +Do not add new device or vendor IDs to include/ub/ubus/ubus_ids.h unless they +are shared across multiple drivers. You can add private definitions in +your driver if they're helpful, or just use plain hex constants. + +The device IDs are arbitrary hex numbers (vendor controlled) and normally used +only in a single location, the ub_device_id table. + +Please DO submit new vendor/device IDs to . +There's a mirror of the ub.ids file at https://gitee.com/openeuler/ubutils/ub.ids. -- Gitee From 562a0106fefe3681b870313aa35926fa58dfbd02 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Thu, 27 Nov 2025 09:28:01 +0800 Subject: [PATCH 033/126] ub:ubus: Add ubus devicetree file commit 575bfc910e2219b710f60f49ea1fc22543e85b46 openEuler Define the attributes and attribute values of device nodes such as UB Controller to ensure that the kernel can correctly parse and use these attributes. Signed-off-by: Yahui Liu Signed-off-by: Yuhao Xiang Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhaolichang <943677312@qq.com> --- .../devicetree/bindings/ub/hisi,ubc.yaml | 35 ++++++++++++ .../devicetree/bindings/ub/ub,ubc.yaml | 55 +++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 Documentation/devicetree/bindings/ub/hisi,ubc.yaml create mode 100644 Documentation/devicetree/bindings/ub/ub,ubc.yaml diff --git a/Documentation/devicetree/bindings/ub/hisi,ubc.yaml b/Documentation/devicetree/bindings/ub/hisi,ubc.yaml new file mode 100644 index 000000000000..2219dd7902c1 --- /dev/null +++ b/Documentation/devicetree/bindings/ub/hisi,ubc.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ub/hisi,ubc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UBC (Unified Bus Controller) platform device + +maintainers: + - Yuhao Xiang + +description: |+ + This platform device was added to enable the automatic loading of the + hisi_ubus driver. If this feature is not needed, you can omit adding this + device and manually load the driver instead. + +properties: + $nodename: + pattern: "^hisi-ubc$" + description: | + The node name should be "hisi-ubc". + + compatible: + const: "hisi,ubc" + description: | + The compatible property should be "hisi,ubc" to identify the device as a + HiSilicon UBC platform device. + +unevaluatedProperties: false + +examples: + - |+ + hisi-ubc { + compatible = "hisi,ubc"; + }; diff --git a/Documentation/devicetree/bindings/ub/ub,ubc.yaml b/Documentation/devicetree/bindings/ub/ub,ubc.yaml new file mode 100644 index 000000000000..012a293cf9e2 --- /dev/null +++ b/Documentation/devicetree/bindings/ub/ub,ubc.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ub/ub,ubc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: UBC (Unified Bus Controller) platform device + +maintainers: + - Yuhao Xiang + +description: | + The UBC platform device reported UBC interrupt number and the association + between the ubc and the interrupt controller. + +properties: + $nodename: + pattern: "^ubc@[0-9a-f]*" + + compatible: + const: "ub,ubc" + + interrupts: + maxItems: 1 + description: | + The interrupt specifier for the UBC. Used by the msgq of the ub controller. + + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + The index of the UBC. This is used to identify the specific UBC + in a system with multiple UBC devices. Starts from 0. + + msi-parent: + description: The msi interrupt for the UBC. Used by the ub entity connected + to UBC. + +required: + - compatible + - interrupts + - index + - msi-parent + +unevaluatedProperties: true + +examples: + - |+ + ubc@0 { + compatible = "ub,ubc"; + #interrupt-cells = <0x3>; + interrupt-parent = <0x01>; + interrupts = <0x00 0xcb 0x4>; + index = <0x00>; + msi-parent = <&its 0x54c0>; + }; -- Gitee From d32f50d33bc776182fbfe61231f3a6b5fa134a05 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:03:22 +0800 Subject: [PATCH 034/126] ub: cdma: add CDMA driver-api documentation description commit 27d2c29e4512954c7efcee4fa62b13fd67ce0f93 openEuler This patch add CDMA driver-api documentation description Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/driver-api/ub/cdma.rst | 190 ++++++++++++++++++++++ Documentation/driver-api/ub/index.rst | 5 +- drivers/ub/cdma/cdma_api.c | 219 ++++++++++++++++++++++++++ include/ub/cdma/cdma_api.h | 84 +++++++++- 4 files changed, 492 insertions(+), 6 deletions(-) create mode 100644 Documentation/driver-api/ub/cdma.rst diff --git a/Documentation/driver-api/ub/cdma.rst b/Documentation/driver-api/ub/cdma.rst new file mode 100644 index 000000000000..784962a71af5 --- /dev/null +++ b/Documentation/driver-api/ub/cdma.rst @@ -0,0 +1,190 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============================ +CDMA Driver Support Library +============================ + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +This document aims to provide a guide for device driver developers on the CDMA +driver API, as well as how to use it for asynchronous memory read and write +operations between hosts in CDMA. + +CDMA Interface Operation +========================== +The API of the CDMA framework does not support arbitrary concurrent calls. +For example, using a Queue object and destroying the Queue concurrently can lead +to unexpected exceptions. +Users are required to ensure the correctness of the call logic. These objects +include context, segment, queue, etc. + +.. kernel-doc:: include/ub/cdma/cdma_api.h + :functions: + +.. kernel-doc:: drivers/ub/cdma/cdma_api.c + :export: + +CDMA API Sample +================= + +DMA Resource Sample +----------------------- +.. code-block:: c + + #define POLL_LOOP_EXAMP 100 + #define POLL_MSLEEP_EXAMP 1 + #define QUEUE_DEPTH_EXAMP 512 + #define QUEUE_RMT_EID_EXAMP 2 + #define QUEUE_DCAN_EXAMP 1 + + struct dma_seg_cfg local_seg_cfg = {}; + struct dma_seg_cfg rmt_seg_cfg = {}; + struct dma_seg *local_seg, *rmt_seg; + struct queue_cfg queue_cfg = {}; + int ctx_handle, queue_handle; + struct dma_device *dev_list; + struct dma_device *dma_dev; + u32 loop = POLL_LOOP_EXAMP; + struct dma_cr ret_cr = {}; + dma_status status; + int ret = -EINVAL; + u32 dev_num = 0; + + dev_list = dma_get_device_list(&dev_num); + if (!dev_list || !dev_num) { + printk("get device list failed\n"); + return; + } + dma_dev = &dev_list[0]; + + ctx_handle = dma_create_context(dma_dev); + if (ctx_handle < 0) { + printk("create context failed, ctx_handle: %d.\n", ctx_handle); + goto free_dev_list; + } + + queue_cfg.queue_depth = QUEUE_DEPTH_EXAMP; + queue_cfg.rmt_eid.dw0 = QUEUE_RMT_EID_EXAMP; + queue_cfg.dcna = QUEUE_DCAN_EXAMP; + queue_handle = dma_alloc_queue(dma_dev, ctx_handle, &queue_cfg); + if (queue_handle < 0) { + printk("allocate queue failed, queue_handle: %d.\n", queue_handle); + goto delete_ctx; + } + + /* Input parameter, local payload address */ + local_seg_cfg.sva = (u64)local_buf_addr; + /* Input parameter, local payload memory length */ + local_seg_cfg.len = local_buf_len; + + local_seg = dma_register_seg(dma_dev, ctx_handle, &local_seg_cfg); + if (!local_seg) { + printk("register local segment failed.\n"); + goto free_queue; + } + + /* Input parameter, remote payload address */ + rmt_seg_cfg.sva = (u64)rmt_buf_addr; + /* Input parameter, remote payload memory length */ + rmt_seg_cfg.len = rmt_buf_len; + + rmt_seg = dma_import_seg(&rmt_seg_cfg); + if (!rmt_seg) { + printk("import rmt segment failed.\n"); + goto unregister_seg; + } + + status = dma_write(dma_dev, rmt_seg, local_seg, queue_handle); + if (status != DMA_STATUS_OK) { + printk("write failed, status = %d.\n", status); + goto unimport_seg; + } + + while (loop > 0) { + ret = dma_poll_queue(dma_dev, queue_handle, 1, &ret_cr); + if (ret == 1) + break; + msleep(POLL_MSLEEP_EXAMP); + loop --; + } + ... + + unimport_seg: + dma_unimport_seg(rmt_seg); + unregister_seg: + dma_unregister_seg(dma_dev, local_seg); + free_queue: + dma_free_queue(dma_dev, queue_handle); + delete_ctx: + dma_delete_context(dma_dev, ctx_handle); + free_dev_list: + dma_free_device_list(dev_list, dev_num); + ... + +/* Register the virtual kernel online interface to notify users that + * the kernel-mode CDMA driver is online. + */ +DMA Client Sample +------------------- + +.. code-block:: c + + /* After the driver is loaded or restarted upon reset, the add + * interface is called to allow users to request resources + * required for DMA. + */ + static int example_add(u32 eid) + { + /* Refer to DMA Resource Sample, create context, queue, segment + * dma_get_device_list, dma_create_context, dma_alloc_queue etc. + */ + return 0; + } + + /* The stop interface is used to notify users to stop using the + * DMA channel. + */ + static void example_remove(u32 eid) + { + /* Refer to DMA Resource Sample, delete context, queue, segment + * dma_free_queue dma_delete_context dma_free_device_list etc. + */ + } + + /* The remove interface is used to notify users to delete resources + * under DMA. + */ + static void example_stop(u32 eid) + { + /* Stop read and write operations through status control */ + } + + static struct dma_client example_client = { + .client_name = "example", + .add = example_add, + .remove = example_remove, + .stop = example_stop, + }; + + static void example_register_client(u32 eid) + { + ... + dma_register_client(&example_client); + ... + } + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index d3a5969e6e94..5738694649be 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -11,8 +11,9 @@ The Linux UnifiedBus implementer's API guide Table of contents .. toctree:: - :maxdepth: 2 + :maxdepth: 4 ubfi ubus - ubase \ No newline at end of file + ubase + cdma diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index cc3aa6ce4921..ae84210c1f97 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -20,6 +20,16 @@ LIST_HEAD(g_client_list); DECLARE_RWSEM(g_clients_rwsem); DECLARE_RWSEM(g_device_rwsem); +/** + * dma_get_device_list - Get DMA device list + * @num_devices: DMA device number + * + * Users can perform subsequent resource creation operations using a pointer + * to a DMA device in the list. + * + * Context: Process context. + * Return: address of the first device in the list + */ struct dma_device *dma_get_device_list(u32 *num_devices) { struct cdma_device_attr *attr; @@ -73,6 +83,16 @@ struct dma_device *dma_get_device_list(u32 *num_devices) } EXPORT_SYMBOL_GPL(dma_get_device_list); +/** + * dma_free_device_list - Free DMA device list + * @dev_list: DMA device list + * @num_devices: DMA device number + * + * It can be called after using dev_list and must be called. + * + * Context: Process context. + * Return: NA + */ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) { int ref_cnt; @@ -97,6 +117,15 @@ void dma_free_device_list(struct dma_device *dev_list, u32 num_devices) } EXPORT_SYMBOL_GPL(dma_free_device_list); +/** + * dma_get_device_by_eid - Get the specified EID DMA device + * @eid: Device eid pointer + * + * Choose one to use with the dma_get_device_list function. + * + * Context: Process context. + * Return: DMA device structure pointer + */ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) { struct cdma_device_attr *attr; @@ -146,6 +175,16 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) } EXPORT_SYMBOL_GPL(dma_get_device_by_eid); +/** + * dma_create_context - Create DMA context + * @dma_dev: DMA device pointer + * + * The context is used to store resources such as Queue and Segment, and + * returns a pointer to the context information. + * + * Context: Process context. + * Return: DMA context ID value + */ int dma_create_context(struct dma_device *dma_dev) { struct cdma_ctx_res *ctx_res; @@ -189,6 +228,13 @@ int dma_create_context(struct dma_device *dma_dev) } EXPORT_SYMBOL_GPL(dma_create_context); +/** + * dma_delete_context - Delete DMA context + * @dma_dev: DMA device pointe + * @handle: DMA context ID value + * Context: Process context. + * Return: NA + */ void dma_delete_context(struct dma_device *dma_dev, int handle) { struct cdma_ctx_res *ctx_res; @@ -227,6 +273,17 @@ void dma_delete_context(struct dma_device *dma_dev, int handle) } EXPORT_SYMBOL_GPL(dma_delete_context); +/** + * dma_alloc_queue - Alloc DMA queue + * @dma_dev: DMA device pointer + * @ctx_id: DMA context ID + * @cfg: DMA queue configuration information pointer + * + * The user uses the queue for DMA read and write operations. + * + * Context: Process context. + * Return: DMA queue ID value + */ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cfg) { struct cdma_ctx_res *ctx_res; @@ -285,6 +342,13 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cf } EXPORT_SYMBOL_GPL(dma_alloc_queue); +/** + * dma_free_queue - Free DMA queue + * @dma_dev: DMA device pointer + * @queue_id: DMA queue ID + * Context: Process context. + * Return: NA + */ void dma_free_queue(struct dma_device *dma_dev, int queue_id) { struct cdma_ctx_res *ctx_res; @@ -318,6 +382,18 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } EXPORT_SYMBOL_GPL(dma_free_queue); +/** + * dma_register_seg - Register local segment + * @dma_dev: DMA device pointer + * @ctx_id: DMA context ID + * @cfg: DMA segment configuration information pointer + * + * The segment stores local payload information for operations such as DMA + * read and write, and returns a pointer to the segment information. + * + * Context: Process context. + * Return: DMA segment structure pointer + */ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, struct dma_seg_cfg *cfg) { @@ -390,6 +466,13 @@ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, } EXPORT_SYMBOL_GPL(dma_register_seg); +/** + * dma_unregister_seg - Unregister local segment + * @dma_dev: DMA device pointer + * @dma_seg: DMA segment pointer + * Context: Process context. + * Return: NA + */ void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) { struct cdma_ctx_res *ctx_res; @@ -426,6 +509,16 @@ void dma_unregister_seg(struct dma_device *dma_dev, struct dma_seg *dma_seg) } EXPORT_SYMBOL_GPL(dma_unregister_seg); +/** + * dma_import_seg - Import the remote segment + * @cfg: DMA segment configuration information pointer + * + * The segment stores the remote payload information for operations such as + * DMA read and write, and returns the segment information pointer. + * + * Context: Process context. + * Return: DMA segment structure pointer + */ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg) { if (!cfg || !cfg->sva || !cfg->len) @@ -435,6 +528,12 @@ struct dma_seg *dma_import_seg(struct dma_seg_cfg *cfg) } EXPORT_SYMBOL_GPL(dma_import_seg); +/** + * dma_unimport_seg - Unimport the remote segment + * @dma_seg: DMA segment pointer + * Context: Process context. + * Return: NA + */ void dma_unimport_seg(struct dma_seg *dma_seg) { if (!dma_seg) @@ -481,6 +580,22 @@ static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, return 0; } +/** + * dma_write - DMA write operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * + * Invoke this interface to initiate a unilateral write operation request, + * sending the specified number of bytes of data from the designated local + * memory starting position to the specified destination address. + * Once the data is successfully written to the remote node, the application + * can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id) { @@ -505,6 +620,23 @@ enum dma_status dma_write(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_write); +/** + * dma_write_with_notify - DMA write with notify operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @data: notify data for write with notify operation + * + * Invoke this interface to initiate a write notify operation request for a + * unilateral operation, which sends a specified number of bytes of data from a + * designated starting position in local memory to a specified destination address. + * Once the data is successfully read from the remote node into local memory, + * the application can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, @@ -531,6 +663,22 @@ enum dma_status dma_write_with_notify(struct dma_device *dma_dev, } EXPORT_SYMBOL_GPL(dma_write_with_notify); +/** + * dma_read - DMA read operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * + * Invoke this interface to initiate a unidirectional read operation request, + * reading data from the specified remote address to the designated local cache + * starting position. + * Once the data is successfully read from the remote node to the local memory, + * the application can poll the queue to obtain the completion message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id) { @@ -555,6 +703,21 @@ enum dma_status dma_read(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_read); +/** + * dma_cas - DMA cas operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @data: compare data and swap data for cas operaion + * + * Initiate a request for a unilateral atomic CAS operation. Once the operation + * is successful, the application can poll the queue to obtain the completion + * message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, struct dma_cas_data *data) @@ -580,6 +743,21 @@ enum dma_status dma_cas(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_cas); +/** + * dma_faa - DMA faa operation + * @dma_dev: DMA device pointer + * @rmt_seg: the remote segment pointer + * @local_seg: the local segment pointer + * @queue_id: DMA queue ID + * @add: add data for faa operation + * + * Initiate a request for a unilateral atomic FAA operation. Once the operation + * is successful, the application can poll the queue to obtain the completion + * message. + * + * Context: Process context. Takes and releases the spin_lock. + * Return: operation result, DMA_STATUS_OK on success + */ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, struct dma_seg *local_seg, int queue_id, u64 add) { @@ -604,6 +782,23 @@ enum dma_status dma_faa(struct dma_device *dma_dev, struct dma_seg *rmt_seg, } EXPORT_SYMBOL_GPL(dma_faa); +/** + * dma_poll_queue - DMA polling queue + * @dma_dev: DMA device pointer + * @queue_id : DMA queue ID + * @cr_cnt: number of completion record + * @cr: completion record pointer + * + * Poll the DMA channel completion event, and the polling result is returned to + * the address specified by the parameter cr. + * The cr data structure includes information such as the result of the request + * execution, the length of data transferred, and the type of error. + * The caller must ensure that the number of parameters cr_cnt matches the number + * of addresses specified by cr. + * + * Context: Process context. + * Return: Polling operation results >0 on success, others on failed + */ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, struct dma_cr *cr) { @@ -639,6 +834,21 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, } EXPORT_SYMBOL_GPL(dma_poll_queue); +/** + * dma_register_client - DMA register client + * @client: DMA device client pointer + * + * Register the management software interface to notify the management software + * that the DMA driver is online. After loading or resetting and restarting the + * driver, call the add interface to notify the management software to request + * the resources required by DMA. When the driver is reset, deregistered, or + * unloaded, call the stop interface to notify the management software to stop + * using the DMA channel, and then call the remove interface to notify the + * management software to delete the DMA resources. + * + * Context: Process context. + * Return: operation result, 0 on success, others on failed + */ int dma_register_client(struct dma_client *client) { struct cdma_dev *cdev = NULL; @@ -677,6 +887,15 @@ int dma_register_client(struct dma_client *client) } EXPORT_SYMBOL_GPL(dma_register_client); +/** + * dma_unregister_client - DMA unregister client + * @client: DMA device client pointer + * + * Unregister the management software interface, and delete client resources + * + * Context: Process context. + * Return: NA + */ void dma_unregister_client(struct dma_client *client) { struct cdma_dev *cdev = NULL; diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 61449ab9ee26..51acd722a74d 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -7,6 +7,14 @@ #include #include +/** + * struct dma_device - DMA device structure + * @attr: CDMA device attribute info: EID, UPI etc + * @ref_cnt: reference count for adding a context to device + * @private_data: cdma context resoucres pointer + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; @@ -22,17 +30,35 @@ enum dma_cr_opcode { DMA_CR_OPC_WRITE_WITH_IMM, }; +/** + * union dma_cr_flag - DMA completion record flag + * @bs: flag bit value structure + * @value: flag value + */ union dma_cr_flag { struct { - u8 s_r : 1; - u8 jetty : 1; - u8 suspend_done : 1; - u8 flush_err_done : 1; + u8 s_r : 1; /* indicate CR stands for sending or receiving */ + u8 jetty : 1; /* indicate id in the CR stands for jetty or JFS */ + u8 suspend_done : 1; /* suspend done flag */ + u8 flush_err_done : 1; /* flush error done flag */ u8 reserved : 4; } bs; u8 value; }; +/** + * struct dma_cr - DMA completion record structure + * @status: completion record status + * @user_ctx: user private data information, optional + * @opcode: DMA operation code + * @flag: completion record flag + * @completion_len: the number of bytes transferred + * @local_id: local JFS ID + * @remote_id: remote JFS ID, not in use for now + * @tpn: transport number + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_cr { enum dma_cr_status status; u64 user_ctx; @@ -46,6 +72,16 @@ struct dma_cr { u32 rsvd[4]; }; +/** + * struct queue_cfg - DMA queue config structure + * @queue_depth: queue depth + * @priority: the priority of JFS, ranging from [0, 15] + * @user_ctx: user private data information, optional + * @dcna: remote device CNA + * @rmt_eid: remote device EID + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct queue_cfg { u32 queue_depth; u8 priority; @@ -57,6 +93,17 @@ struct queue_cfg { u32 rsvd[6]; }; +/** + * struct dma_seg - DMA segment structure + * @handle: segment recouse handle + * @sva: payload virtual address + * @len: payload data length + * @tid: payload token id + * @token_value: not used for now + * @token_value_valid: not used for now + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_seg { u64 handle; u64 sva; @@ -77,6 +124,11 @@ struct dma_seg_cfg { u32 rsvd[4]; }; +/** + * struct dma_context - DMA context structure + * @dma_dev: DMA device pointer + * @tid: token id for segment + */ struct dma_context { struct dma_device *dma_dev; u32 tid; /* data valid only in bit 0-19 */ @@ -87,6 +139,13 @@ enum dma_status { DMA_STATUS_INVAL, }; +/** + * struct dma_cas_data - DMA CAS data structure + * @compare_data: compare data, length <= 8B: CMP value, length > 8B: data address + * @swap_data: swap data, length <= 8B: swap value, length > 8B: data address + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_cas_data { u64 compare_data; u64 swap_data; @@ -94,6 +153,13 @@ struct dma_cas_data { u32 rsvd[4]; }; +/** + * struct dma_notify_data - DMA write witch notify data structure + * @notify_seg: notify segment pointer + * @notify_data: notify data value + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; @@ -101,6 +167,16 @@ struct dma_notify_data { u32 rsvd[4]; }; +/** + * struct dma_client - DMA register client structure + * @list_node: client list + * @client_name: client name pointer + * @add: add DMA resource function pointer + * @remove: remove DMA resource function pointer + * @stop: stop DMA operation function pointer + * @rsv_bitmap: reserved field bitmap + * @rsvd: reserved field array + */ struct dma_client { struct list_head list_node; char *client_name; -- Gitee From f130e9683478261a0505015cca5f6f971fc75a46 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:11:48 +0800 Subject: [PATCH 035/126] ub: cdma: add CDMA kernel driver design document specification commit 7daf8d5283858f1a484e8bc7ef510f3e205001e2 openEuler This patch add CDMA kernel driver design document specification Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/cdma/cdma.rst | 312 ++++++++++++++++++++++++++++++++ Documentation/ub/cdma/index.rst | 14 ++ Documentation/ub/index.rst | 3 +- 3 files changed, 328 insertions(+), 1 deletion(-) create mode 100644 Documentation/ub/cdma/cdma.rst create mode 100644 Documentation/ub/cdma/index.rst diff --git a/Documentation/ub/cdma/cdma.rst b/Documentation/ub/cdma/cdma.rst new file mode 100644 index 000000000000..39be57652426 --- /dev/null +++ b/Documentation/ub/cdma/cdma.rst @@ -0,0 +1,312 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +====================================== +Crystal Direct Memory Access (CDMA) +====================================== + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +Overall Structure +=================== + +Driver Modules +--------------- + +The CDMA driver is divided into three modules: UBASE, K-DMA, and U-DMA: + +.. code-block:: none + + +---------------------------+ + | APP | + +---------------------------+ + | + +---------------------------+ + | U-DMA | + +---------------------------+ + | | + | +-------------------+ + | | K-DMA | + | +-------------------+ + | | | + | +----------------+ | + | | Auxiliary Bus | | + | +----------------+ | + | | | + | +-------------------+ + | | UBASE | + | +-------------------+ + | | + +---------------------------+ + | CDMA Hardware | + +---------------------------+ + ++ Figure 1: CDMA Module Relationship Diagram + +UBASE provides management of hardware public resources, including CMD, mailbox +management, event management, and device reset. +It also provides a device and driver matching interface for the CDMA driver based +on the kernel auxiliary bus. + +Within the K-DMA module, functional blocks are divided according to different data +objects: Device Management is responsible for device attribute configuration +(such as EID, UPI, etc.) and device capability queries (such as Jetty specifications); +Event Management handles events reported by the controller, including completion +events and asynchronous events; +Queue Management is responsible for JFS(Jetty For Send)/JFC(Jetty For Completion) +resource management. + +Within the U-DMA module, functional blocks are divided according to data plane +functions: Memory verbs, which are unidirectional operations including read, +write, and atomic operations. +Event verbs register callback functions with K-DMA for post-processing of +asynchronous events. + +Interaction Timing +------------------- + +.. code-block:: none + + +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ + | APP | | U-DMA | | K-DMA | | UBASE | | MS | | HW | + +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ + | CDMA API | | | | | + |---------->| ioctl | | | | + | |---------->| UBASE Func| | | + | | |----------->| | | + | | |<-----------| | | + | | | HW Interface | | + | | |----------------------------------->| + | | |<-----------------------------------| + | | | UBASE Func | | | + | | |----------->| MS MSG | | + | | | |---------->| | + | | | |<----------| | + | | |<-----------| | | + | |<----------| | | | + |<----------| | | | | + | | | | | | + | CDMA API | | | | | + |---------->| HW Interface | | | + | DMA OPT |----------------------------------------------->| + | |<-----------------------------------------------| + |<----------| | | | | + | | | | | | + ++ Figure 2: CDMA Interaction Timing + +The 'Figure 2' shows the runtime sequence of interactions between the CDMA driver, +the UBASE driver, the MS(Management Software), and the hardware. + +Functionality +=============== + +CDMA device creation and reset +--------------------------------- +The CDMA devices are dynamically created by the resource management on the +management software, and the reset operation is also performed by the management +software. +Files involved: cdma_main; + +CDMA device and context management +------------------------------------ +The CDMA driver supports lifecycle management of CDMA devices and enables +applications to create device contexts based on these devices. +Files involved: cdma_context, cdma_main; + +CDMA queue management +--------------------------- +The CDMA queue includes the CDMA JFS and JFC defined on the chip, and encompasses +the management of JFS, JFC, and CTP(Compact Transport) resources. +When a remote memory read/write request is initiated, the JFS is used to fill the +corresponding WQE(Work Queue Entry), and the request execution result is received +through the JFC. +Files involved: cdma_queue, cdma_jfs, cdma_jfc, cdma_tp, cdma_db; + +CDMA segment management +----------------------------- +The CDMA driver uses local and remote segment resources for read and write operations. +These operations primarily include the register and unregister functions for +local segment resources, as well as the import and export functions for remote +segment resources. +Files involved: cdma_segment; + +CDMA read/write semantics +--------------------------- +The CDMA communication capability is implemented on the chip side as CTP mode +communication, supporting transaction operations including: +write, write with notify, read, CAS(Compare And Swap), and FAA(Fetch And Add). +Files involved: cdma_handle; + +Processing and reporting of EQE events +--------------------------------------- +The CDMA communication device supports the reporting of transaction operation +results in interrupt mode. The reported events are classified into two types: +CE(Completion Event) and AE(Asynchronous Event). +The two types of events trigger the event callback processing function registered +by the CDMA driver in advance in the interrupt context. +Files involved: cdma_event, cdma_eq; + +Supported Hardware +==================== + +CDMA driver supported hardware: + +=========== ============= +Vendor ID Device ID +=========== ============= +0xCC08 0xA003 +0xCC08 0xA004 +0xCC08 0xD804 +0xCC08 0xD805 +=========== ============= + +You can use the ``lsub`` command on your host OS to query devices. +Below is an example output: + +.. code-block:: shell + + Class <000X>: Device : + <00004> Class <0002>: Device : + +Debugging +========== + +Device Info +----------- + +.. code-block:: none + + Query CDMA device information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/dev_info + The 'CDMA_ENO' value represents the ENO (Entity Number) information for + CDMA devices. You can use the 'lsub' command on your host OS to query devices. + +Capability Info +---------------- + +.. code-block:: none + + Query CDMA device capability information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/cap_info + +Queue Info +----------- + +.. code-block:: none + + Query current queue configuration information. + Example: + $ cat /sys/kernel/debug/ubase//cdma/resource_info/queue_info + Set the queue ID value for the current query using 'queue_id' command, like + $ echo 0 > /sys/kernel/debug/ubase//cdma/resource_info/queue_id. + +Reset Info +------------ + +.. code-block:: none + + Query CDMA device reset operation records. + Example: + $ cat /sys/kernel/debug/ubase//reset_info + +JFS Context +-------------- + +.. code-block:: none + + Query the current JFS channel context information on the software side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/jfs_context + The channel ID is configured by setting the queue ID command, like + $ echo 0 > /sys/kernel/debug/ubase//cdma/context/queue_id. + +JFS Context HW +--------------- + +.. code-block:: none + + Query the current JFS channel context information on the hardware side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/jfs_context_hw + +JFC Context +--------------- + +.. code-block:: none + + Query the current channel JFC context information on the software side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/sq_jfc_context + +JFC Context HW +------------------ + +.. code-block:: none + + Query the current JFC channel context information on the hardware side. + Example: + $ cat /sys/kernel/debug/ubase//cdma/context/sq_jfc_context_hw + +JFS Entity PI +------------------ + +.. code-block:: none + + Set or query the PI value of the current JFS channel, used for querying + specific SQE information of the JFS. + Example: + $ echo 0 > /sys/kernel/debug/ubase//cdma/entry_info/entry_pi + $ cat /sys/kernel/debug/ubase//cdma/entry_info/entry_pi + +JFS Entity Info +---------------- + +.. code-block:: none + + Query the information of a specific SQE for the current channel JFS. + Example: + $ cat /sys/kernel/debug/ubase//cdma/entry_info/sqe + The channel ID is configured through the queue ID command. + The SQE ID is set by configuring the 'entry_pi' as described above. + Supports kernel-space resources only. + +JFC Entity CI +---------------- + +.. code-block:: none + + Set or query the CI value of the current JFC channel, used for querying + specific CQE information of the JFC. + Example: + $ echo 0 > /sys/kernel/debug/ubase//cdma/entry_info/entry_ci + $ cat /sys/kernel/debug/ubase//cdma/entry_info/entry_ci + +JFC Entity Info +---------------- + +.. code-block:: none + + Query the information of a specific CQE for the current channel JFC. + Example: + $ cat /sys/kernel/debug/ubase//cdma/entry_info/cqe + The channel ID is configured through the Queue ID command. + The CQE ID is set by configuring the 'entry_ci' as described above. + Supports kernel-space resources only. + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. \ No newline at end of file diff --git a/Documentation/ub/cdma/index.rst b/Documentation/ub/cdma/index.rst new file mode 100644 index 000000000000..368403170e0d --- /dev/null +++ b/Documentation/ub/cdma/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============== +CDMA Driver +=============== + +.. toctree:: + :maxdepth: 2 + :numbered: + + cdma \ No newline at end of file diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 8e939a2ba8fe..c59089129f12 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -8,8 +8,9 @@ UnifiedBus Subsystem ===================== .. toctree:: - :maxdepth: 2 + :maxdepth: 4 ubase/index ubfi/index ubus/index + cdma/index -- Gitee From 08c290ccf257ce5fda3e93e6933298664b066428 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 24 Nov 2025 14:18:52 +0800 Subject: [PATCH 036/126] ub: cdma: add CDMA userspace-api documentation description commit d328ba25db0ec022a3e4885e770a3dcc3a101f0a openEuler This patch add CDMA userspace-api documentation description Signed-off-by: Zhipeng Lu Signed-off-by: Sunyi Nan Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/userspace-api/index.rst | 1 + Documentation/userspace-api/ub/cdma.rst | 51 ++++++++++++++++++++++++ Documentation/userspace-api/ub/index.rst | 13 ++++++ 3 files changed, 65 insertions(+) create mode 100644 Documentation/userspace-api/ub/cdma.rst create mode 100644 Documentation/userspace-api/ub/index.rst diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 2125bb520e52..c02b2bc235db 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -33,6 +33,7 @@ place where this information is gathered. sysfs-platform_profile vduse futex2 + ub/index .. only:: subproject and html diff --git a/Documentation/userspace-api/ub/cdma.rst b/Documentation/userspace-api/ub/cdma.rst new file mode 100644 index 000000000000..e5b9a1e9de76 --- /dev/null +++ b/Documentation/userspace-api/ub/cdma.rst @@ -0,0 +1,51 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============================== +CDMA Userspace Support Library +=============================== + +Overview +========= +CDMA (Crystal Direct Memory Access) is used to provide asynchronous memory read +and write operations between hosts or between host and devices. + +The key features are described as follows: + ++ 1. Peer-to-peer communication between hosts, enabling bidirectional asynchronous memory read or write. ++ 2. Asynchronous memory read and write between host and devices via DMA. ++ 3. Asynchronous memory read and write between devices and host via DMA. + +Char Device +============= +The driver creates one char device per CDMA found on the physical device. +Char devices can be found in /dev/cdma/ and are named as: +/dev/cdma/dev. + +User API +========= + +ioctl +------ +========================= ==================================================== +CDMA_CMD_QUERY_DEV_INFO Query CDMA device information. +CDMA_CMD_CREATE_CTX Create user context resource. +CDMA_CMD_DELETE_CTX Delete user context resource. +CDMA_CMD_CREATE_CTP Create CTP(Compact Transport) channel resource. +CDMA_CMD_DELETE_CTP Delete CTP channel resource. +CDMA_CMD_CREATE_JFS Create JFS(Jetty For Send) resource. +CDMA_CMD_DELETE_JFS Delete JFS resource. +CDMA_CMD_REGISTER_SEG Register local segment resource. +CDMA_CMD_UNREGISTER_SEG Unregister local segment resource. +CDMA_CMD_CREATE_QUEUE Create queue resource. +CDMA_CMD_DELETE_QUEUE Delete queue resource. +CDMA_CMD_CREATE_JFC Create JFC(Jetty For Completion) resource. +CDMA_CMD_DELETE_JFC Delete JFC resource. +CDMA_CMD_CREATE_JFCE Create JFCE(Jetty For Completion Event) resource. +========================= ==================================================== + +Support +======== +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. \ No newline at end of file diff --git a/Documentation/userspace-api/ub/index.rst b/Documentation/userspace-api/ub/index.rst new file mode 100644 index 000000000000..3206a2cf64c7 --- /dev/null +++ b/Documentation/userspace-api/ub/index.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +===================== +UnifiedBus Subsystem +===================== + +.. toctree:: + :maxdepth: 1 + + cdma \ No newline at end of file -- Gitee From 6f8c4977c3f91749e390fbddfe46bd2953484a33 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Wed, 26 Nov 2025 19:44:05 +0800 Subject: [PATCH 037/126] ub: ub_fwctl: add ub_fwctl driver-api documentation description commit f632e7e84ac26e324f4c4a43bc72947d44590d1d openEuler This patch add ub_fwctl driver-api documentation description Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 1 + Documentation/ub/ub_fwctl/index.rst | 11 ++ Documentation/ub/ub_fwctl/ub_fwctl.rst | 112 +++++++++++ Documentation/userspace-api/fwctl/fwctl.rst | 1 + Documentation/userspace-api/fwctl/index.rst | 1 + .../userspace-api/fwctl/ub_fwctl.rst | 51 +++++ drivers/fwctl/ub/ub_common.h | 54 +++++ include/uapi/fwctl/ub_fwctl.h | 184 ++++++++++++++++++ 8 files changed, 415 insertions(+) create mode 100644 Documentation/ub/ub_fwctl/index.rst create mode 100644 Documentation/ub/ub_fwctl/ub_fwctl.rst create mode 100644 Documentation/userspace-api/fwctl/ub_fwctl.rst diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index c59089129f12..0a3973b98512 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -10,6 +10,7 @@ UnifiedBus Subsystem .. toctree:: :maxdepth: 4 + ub_fwctl/index ubase/index ubfi/index ubus/index diff --git a/Documentation/ub/ub_fwctl/index.rst b/Documentation/ub/ub_fwctl/index.rst new file mode 100644 index 000000000000..4274b33be65a --- /dev/null +++ b/Documentation/ub/ub_fwctl/index.rst @@ -0,0 +1,11 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +UB FWCTL Driver +=============== + +.. toctree:: + :maxdepth: 2 + :numbered: + + ub_fwctl \ No newline at end of file diff --git a/Documentation/ub/ub_fwctl/ub_fwctl.rst b/Documentation/ub/ub_fwctl/ub_fwctl.rst new file mode 100644 index 000000000000..5256ff8d122f --- /dev/null +++ b/Documentation/ub/ub_fwctl/ub_fwctl.rst @@ -0,0 +1,112 @@ +.. SPDX-License-Identifier: GPL-2.0 + +====================== +UB FWCTL Kernel Design +====================== + +Overview +======== + +UB_FWCTL: Auxiliary bus device driver based on PMU IDEV. +It isolates user-mode debug (operation and maintenance information) functions from chip implementation details, +converts user debug commands into CMDQ commands, and sends them to the +software through the CMDQ channel of the PMU IDEV device to implement debug functions. + +Description of the Design +========================= + +The public debug tool, namely the newly added ub_fwctl tool in this document, +is primarily designed to provide functions such as querying UB public function configurations, +querying the status and statistics of many modules, and querying die-level information. + +The debug functions provided by this module are shared among multiple subsystems of UB and are not suitable +for being included in any single feature. Ub_fwctl interfaces with the open-source fwctl framework and +provides a user-defined command format for UB, supporting the public DFX functions of the UB system. + +Currently, ub_fwctl only provides querying functions and does not support configuration functions. +The DFX tools for each feature are described in detail in the corresponding feature design documents. +This design document focuses on the design of the ub_fwctl tool:: + + Purpose: As Auxiliary device driver, it provides the specific implementation of debug functions + OPS as provided by the fwctl module, and calls the CMDQ interface to pass debug messages to the software. + + Function List: + 1) Serve as an Auxiliary device driver to match Auxiliary devices. + 2) Register the fwctl device and the specific function implementation of ub_fwctl. + 3) Provide CMD queue management interfaces. + +Data structure design of UB FWCTL +================================= + +.. kernel-doc:: drivers/fwctl/ub/ub_common.h + + +System Function Design Description +================================== + +Loading and unloading the ub_fwctl driver +----------------------------------------- + +Feature Introduction:: + + FWCTL is a debug framework scheduled for integration into the mainline Linux kernel. + It provides a command pathway from userspace to kernelspace, + requiring device manufacturers to implement their + own driver plugins registered with the FWCTL kernel framework. + UB has implemented a driver called ub_fwctl, which consists of both a userspace + command-line tool (ubctl) and a kernel-space driver (ub_fwctl). After loading the ub_fwctl driver, + the sysfs system exposes a device file (such as /dev/ubcl) in the OS's /dev directory. + The userspace program ubctl can then open this device file via open(/dev/ubcl) + to obtain a file descriptor, and subsequently communicate with the driver through ioctl calls. + +Implementation Method of Function:: + + 1. Ub_fwctl registers itself with the fwctl framework. + 2. As a secondary device, ub_fwctl connects to ubase through the secondary + bus and uses the CMDQ (command queue) of The PMU IDEV to call the software + programming interface for reading and writing registers. + 3. ubctl provides command-line commands for users to invoke. + During operation, ubctl first opens the /dev/fwctl/fwctlNN device file. + It then assembles a corresponding data structure based on user input. + Next, it invokes the ioctl() system call to enter kernel mode. + Upon receiving a command from ubctl, the ub_fwctl driver first validates the command. + It then communicates with the ubase software module by calling its interface to access the CMDQ. + The software returns the register access result to ub_fwctl via the CMDQ. + ub_fwctl subsequently returns this data to user space. + Finally, after completing its operation, ubctl closes the opened /dev/ubcl file descriptor. + +.. code-block:: none + + +-------+ +----------+ +-------+ +-----+ + | ubctl | --ioctl--> | ub_fwctl | --ubase_send_cmd--> | ubase | --cmdq--> | imp | + +-------+ +----------+ +-------+ +-----+ + +Querying UB link and chip info by ub_fwctl +----------------------------------------- + +Feature Introduction:: + + After a failure occurs in the production environment, + further troubleshooting is required to identify the root cause, + including information checks such as abnormal interrupts, statistical counters, key FIFO status, + and key state machine status. The ubctl needs to support users to query the chip's + debug information through the command-line tool and output the chip's debug information + in a form that is understandable to users. + +Implementation Method of Function:: + + ubctl receives input from the command line, assembles it into corresponding commands, + and invokes ioctl to enter kernel space. The fwctl driver copies the data into the kernel space, + assembles it into the corresponding opcode, and sends the command to the software for processing via + the CMDQ of the PMU IDEV. After reading the corresponding registers according to the chip's rules, + the software returns the data to ub_fwctl, which then returns the data to user space. + Finally, ubctl displays the data. + + The following types of registers are supported for query: + 1. Querying information about the UB link. + 2. Querying QoS memory access information. + 3. Querying port link status. + 4. Querying DL layer service packet statistics. + 5. Querying NL layer service packet statistics. + 6. Querying SSU packet statistics. + 7. Querying BA layer packet statistics. diff --git a/Documentation/userspace-api/fwctl/fwctl.rst b/Documentation/userspace-api/fwctl/fwctl.rst index 8c586a8f677d..8c4472f98065 100644 --- a/Documentation/userspace-api/fwctl/fwctl.rst +++ b/Documentation/userspace-api/fwctl/fwctl.rst @@ -149,6 +149,7 @@ fwctl User API ============== .. kernel-doc:: include/uapi/fwctl/fwctl.h +.. kernel-doc:: include/uapi/fwctl/ub_fwctl.h sysfs Class ----------- diff --git a/Documentation/userspace-api/fwctl/index.rst b/Documentation/userspace-api/fwctl/index.rst index 06959fbf1547..be74da876cae 100644 --- a/Documentation/userspace-api/fwctl/index.rst +++ b/Documentation/userspace-api/fwctl/index.rst @@ -10,3 +10,4 @@ to securely construct and execute RPCs inside device firmware. :maxdepth: 1 fwctl + ub_fwctl diff --git a/Documentation/userspace-api/fwctl/ub_fwctl.rst b/Documentation/userspace-api/fwctl/ub_fwctl.rst new file mode 100644 index 000000000000..bdc0d3a5a7b6 --- /dev/null +++ b/Documentation/userspace-api/fwctl/ub_fwctl.rst @@ -0,0 +1,51 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================ +fwctl ub driver +================ + +Overview +======== + +The ub_fwctl tool is primarily designed to provide functions including querying +the configuration of UB common functions, the status and statistics of common modules, +and information at the Die level. Ub_fwctl is integrated with the open-source fwtl framework, +providing a custom user-mode command format for UB and supporting the common functionality of UB systems. + +The implemented driver is ub_fwctl, which includes the user-mode command line +tool ubctl and kernel-mode driver ub_fwctl. After the ub_fwctl driver is loaded, +a file such as ub_ctl is displayed in the /dev directory of the OS through the +sysfs system. The user-mode program ubtl obtains file descriptors by calling +open (/dev/fwctl/fwctlNN), and then communicates with the driver by calling ioctl. + +Function implementation scheme:: + + 1. Ub_fwctl registers itself with the fwctl framework. + + 2. As an auxiliary device, ub_fwctl is connected to ubase through an + auxiliary bus and uses pmu idev's CMDQ to call the software programming + interface to read and write registers. + + 3. Ubctl provides command-line commands for users to call. At startup, + ubctl opens the ubctl device file, assembles the corresponding data + structure based on input, and calls ioctl to enter kernel state. After + receiving the ubctl command, ub_fwctl first checks the legality of the + command, and then communicates with the software by calling the interface + provided by ubase to access CMDQ. The software returns the result of accessing + the register to ub_fwctl through CMDQ, and ub_fwctl then returns the + data to user state. Finally, close the opened ubctl file. + +ub_fwctl User API +================== + +First step for the app is to issue the ioctl(UBCTL_IOCTL_CMDRPC). Each RPC +request includes the operation id, and in and out buffer lengths and pointers. +The driver verifies the operations, then checks the request scope against the +required scope of the operation. The request is then put together with the +request data and sent through the software's message queue to the firmware, and the +results are returned to the caller. + +The RPC endpoints, operations, and buffer contents are defined by the +particular firmware package in the device, which varies across the +available product configurations. The details are available in the +specific product SDK documentation. diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index ab6761ffaad8..ab10576e3914 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -30,12 +30,27 @@ #define UBCTL_EXTRACT_BITS(value, start, end) \ (((value) >> (start)) & ((1UL << ((end) - (start) + 1)) - 1)) +/** + * struct ubctl_dev - Device struct of framework + * @fwctl: The device of fwctl + * @data_size: Length of @data + * @adev: data transmitted to users + */ struct ubctl_dev { struct fwctl_device fwctl; DECLARE_KFIFO_PTR(ioctl_fifo, unsigned long); struct auxiliary_device *adev; }; +/** + * struct ubctl_query_cmd_param - Parameters of userspace RPC + * @in_len: Length of @in + * @in: Data of input + * @out_len: Length of @out + * @out: Data of output + * + * Used to receive parameters passed from userspace RPC + */ struct ubctl_query_cmd_param { size_t in_len; struct fwctl_rpc_ub_in *in; @@ -43,6 +58,17 @@ struct ubctl_query_cmd_param { struct fwctl_rpc_ub_out *out; }; +/** + * struct ubctl_cmd - Parameters of query command + * @op_code: The operation code + * @is_read: Read-only or read-write + * @in_len: Length of @in_data + * @out_len: Length of @out_data + * @in: Data of input + * @out: Data of output + * + * Used for sending and receiving software communication + */ struct ubctl_cmd { u32 op_code; u32 is_read; @@ -76,16 +102,44 @@ struct ubctl_query_cmd_dp { void *cmd_out; }; +/** + * ubctl_ubase_cmd_send - The ubase interface for issuing cmdq + * @adev: The auxiliary framework device + * @cmd: Command information of ubctl + */ int ubctl_ubase_cmd_send(struct auxiliary_device *adev, struct ubctl_cmd *cmd); int ubctl_fill_cmd(struct ubctl_cmd *cmd, void *cmd_in, void *cmd_out, u32 out_len, u32 is_read); + +/** + * ubctl_query_data - Packaging and delivering parameters of cmdq + * @ucdev: Ubctl device + * @query_cmd_param: Parameters passed from userspace RPC + * @query_func: Callback functions for issuing and processing data + * @query_dp: Parameters related to cmdq + * @query_dp_num: Number of elements in @query_dp + * + */ int ubctl_query_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func, struct ubctl_query_dp *query_dp, u32 query_dp_num); + +/** + * ubctl_query_data_deal - Default callback function for processing returned data + * @ucdev: Ubctl device + * @query_cmd_param: Parameters passed from userspace RPC and IMP + * @cmd: Command information of ubctl + * @out_len: Data length of the 'out' in @query_cmd_param + * @offset: Data offset of the 'out' in @query_cmd_param + * + * On return the device is visible through sysfs and /dev, driver ops may be + * called. + */ int ubctl_query_data_deal(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_cmd *cmd, u32 out_len, u32 offset); #endif + diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 05d7be4d7f8f..38787e5cc8ca 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -36,95 +36,279 @@ struct fwctl_rpc_ub_out { __u32 data[]; }; +/** + * enum ub_fwctl_cmdrpc_type - Type of access for the RPC + * + * Refer to fwctl.rst for a more detailed discussion of these scopes. + */ enum ub_fwctl_cmdrpc_type { + /** + * @UTOOL_CMD_QUERY_NL: Query all registers at the NL layer + */ UTOOL_CMD_QUERY_NL = 0x0001, + /** + * @UTOOL_CMD_QUERY_NL_PKT_STATS: Query NL layer PKT_STATE related registers + */ UTOOL_CMD_QUERY_NL_PKT_STATS = 0x0002, + /** + * @UTOOL_CMD_QUERY_NL_SSU_STATS: Query NL layer SSU_STATS related registers + */ UTOOL_CMD_QUERY_NL_SSU_STATS = 0x0003, + /** + * @UTOOL_CMD_QUERY_NL_ABN: Query NL layer NL_ABN related registers + */ UTOOL_CMD_QUERY_NL_ABN = 0x0004, + /** + * @UTOOL_CMD_QUERY_TP: Query all registers at the TP layer + */ UTOOL_CMD_QUERY_TP = 0x0021, + /** + * @UTOOL_CMD_QUERY_TP_PKT_STATS: Query TP layer PKT_STATE related registers + */ UTOOL_CMD_QUERY_TP_PKT_STATS = 0x0022, + /** + * @UTOOL_CMD_QUERY_TP_TX_ROUTE: Query TP layer TX_ROUTE related registers + */ UTOOL_CMD_QUERY_TP_TX_ROUTE = 0x0023, + /** + * @UTOOL_CMD_QUERY_TP_ABN_STATS: Query TP layer ABN_STATS related registers + */ UTOOL_CMD_QUERY_TP_ABN_STATS = 0x0024, + /** + * @UTOOL_CMD_QUERY_TP_RX_BANK: Query TP layer RX_BANK related registers + */ UTOOL_CMD_QUERY_TP_RX_BANK = 0x0025, + /** + * @UTOOL_CMD_QUERY_DL: Query all registers at the DL layer + */ UTOOL_CMD_QUERY_DL = 0x0011, + /** + * @UTOOL_CMD_QUERY_DL_PKT_STATS: Query DL layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_DL_PKT_STATS = 0x0012, + /** + * @UTOOL_CMD_QUERY_DL_LINK_STATUS: Query DL layer LINK_STATUS related registers + */ UTOOL_CMD_QUERY_DL_LINK_STATUS = 0x0013, + /** + * @UTOOL_CMD_QUERY_DL_LANE: Query DL layer LANE related registers + */ UTOOL_CMD_QUERY_DL_LANE = 0x0014, + /** + * @UTOOL_CMD_QUERY_DL_BIT_ERR: Query DL layer BIT_ERR related registers + */ UTOOL_CMD_QUERY_DL_BIT_ERR = 0x0015, + /** + * @UTOOL_CMD_QUERY_DL_LINK_TRACE: Query DL layer LINK_TRACE related registers + */ UTOOL_CMD_QUERY_DL_LINK_TRACE = 0x0016, + /** + * @UTOOL_CMD_QUERY_DL_BIST: Query DL layer BIST related registers + */ UTOOL_CMD_QUERY_DL_BIST = 0x0017, + /** + * @UTOOL_CMD_CONF_DL_BIST: Config DL layer BIST related registers + */ UTOOL_CMD_CONF_DL_BIST = 0x0018, + /** + * @UTOOL_CMD_QUERY_DL_BIST_ERR: Query DL layer BIST_ERR related registers + */ UTOOL_CMD_QUERY_DL_BIST_ERR = 0x0019, + /** + * @UTOOL_CMD_QUERY_TA: Query all registers at the TA layer + */ UTOOL_CMD_QUERY_TA = 0x0031, + /** + * @UTOOL_CMD_QUERY_TA_PKT_STATS: Query TA layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_TA_PKT_STATS = 0x0032, + /** + * @UTOOL_CMD_QUERY_TA_ABN_STATS: Query TA layer ABN_STATS related registers + */ UTOOL_CMD_QUERY_TA_ABN_STATS = 0x0033, + /** + * @UTOOL_CMD_QUERY_BA: Query all registers at the BA layer + */ UTOOL_CMD_QUERY_BA = 0x0041, + /** + * @UTOOL_CMD_QUERY_BA_PKT_STATS: Query BA layer PKT_STATS related registers + */ UTOOL_CMD_QUERY_BA_PKT_STATS = 0x0042, + /** + * @UTOOL_CMD_QUERY_BA_MAR: Query BA layer MAR related registers + */ UTOOL_CMD_QUERY_BA_MAR = 0x0043, + /** + * @UTOOL_CMD_QUERY_BA_MAR_TABLE: Query BA layer MAR_TABLE related registers + */ UTOOL_CMD_QUERY_BA_MAR_TABLE = 0x0044, + /** + * @UTOOL_CMD_QUERY_BA_MAR_CYC_EN: Query BA layer MAR_CYC_EN related registers + */ UTOOL_CMD_QUERY_BA_MAR_CYC_EN = 0x0045, + /** + * @UTOOL_CMD_CONF_BA_MAR_CYC_EN: Config BA layer MAR_CYC_EN related registers + */ UTOOL_CMD_CONF_BA_MAR_CYC_EN = 0x0046, + /** + * @UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS: Config BA layer MAR_PEFR_STATS related registers + */ UTOOL_CMD_CONFIG_BA_MAR_PEFR_STATS = 0x0047, + /** + * @UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS: Query BA layer MAR_PEFR_STATS related registers + */ UTOOL_CMD_QUERY_BA_MAR_PEFR_STATS = 0x0048, + /** + * @UTOOL_CMD_QUERY_QOS: Query QOS related registers + */ UTOOL_CMD_QUERY_QOS = 0x0051, + /** + * @UTOOL_CMD_QUERY_SCC_VERSION: Query the scc version + */ UTOOL_CMD_QUERY_SCC_VERSION = 0x0061, + /** + * @UTOOL_CMD_QUERY_SCC_LOG: Query the scc log + */ UTOOL_CMD_QUERY_SCC_LOG = 0x0062, + /** + * @UTOOL_CMD_QUERY_SCC_DEBUG_EN: Query the scc debug switch + */ UTOOL_CMD_QUERY_SCC_DEBUG_EN = 0x0063, + /** + * @UTOOL_CMD_CONF_SCC_DEBUG_EN: Config the scc debug switch + */ UTOOL_CMD_CONF_SCC_DEBUG_EN = 0x0064, + /** + * @UTOOL_CMD_QUERY_MSGQ_QUE_STATS: Query MSGQ layer QUE_STATS related registers + */ UTOOL_CMD_QUERY_MSGQ_QUE_STATS = 0x0071, + /** + * @UTOOL_CMD_QUERY_MSGQ_ENTRY: Query MSGQ layer ENTRY related registers + */ UTOOL_CMD_QUERY_MSGQ_ENTRY = 0x0072, + + /** + * @UTOOL_CMD_QUERY_QUEUE: Query QUEUE information + */ UTOOL_CMD_QUERY_QUEUE = 0x0073, + /** + * @UTOOL_CMD_QUERY_PORT_INFO: Query information about the specified port + */ UTOOL_CMD_QUERY_PORT_INFO = 0x0081, + /** + * @UTOOL_CMD_QUERY_IO_DIE_PORT_INFO: Query port-related information about the specified + * io die + */ UTOOL_CMD_QUERY_IO_DIE_PORT_INFO = 0x0082, + /** + * @UTOOL_CMD_QUERY_UBOMMU: Query UBOMMU related information + */ UTOOL_CMD_QUERY_UBOMMU = 0x0091, + /** + * @UTOOL_CMD_QUERY_UMMU_ALL: Query all information of UMMU + */ UTOOL_CMD_QUERY_UMMU_ALL = 0x00A1, + /** + * @UTOOL_CMD_QUERY_UMMU_SYNC: Query information of UMMU SYNC + */ UTOOL_CMD_QUERY_UMMU_SYNC = 0x00A2, + /** + * @UTOOL_CMD_CONFIG_UMMU_SYNC: Config information of UMMU SYNC + */ UTOOL_CMD_CONFIG_UMMU_SYNC = 0x00A3, + /** + * @UTOOL_CMD_QUERY_ECC_2B: Query information of ECC 2B + */ UTOOL_CMD_QUERY_ECC_2B = 0x00B1, + /** + * @UTOOL_CMD_QUERY_LOOPBACK: Query information of loopback + */ UTOOL_CMD_QUERY_LOOPBACK = 0x00D1, + /** + * @UTOOL_CMD_CONF_LOOPBACK: Configure specified loopback mode + */ UTOOL_CMD_CONF_LOOPBACK = 0x00D2, + /** + * @UTOOL_CMD_QUERY_PRBS_EN: Query PRBS switch status + */ UTOOL_CMD_QUERY_PRBS_EN = 0x00D3, + /** + * @UTOOL_CMD_CONF_PRBS_EN: Config PRBS switch + */ UTOOL_CMD_CONF_PRBS_EN = 0x00D4, + /** + * @UTOOL_CMD_QUERY_PRBS_RESULT: Query PRBS error count result + */ UTOOL_CMD_QUERY_PRBS_RESULT = 0x00D5, + /** + * @UTOOL_CMD_QUERY_DUMP: Dump all register data + */ UTOOL_CMD_QUERY_DUMP = 0xFFFE, + /** + * @UTOOL_CMD_QUERY_MAX: Maximum Command Code + */ UTOOL_CMD_QUERY_MAX, }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @enable: The value of param '-e' + */ struct fwctl_pkt_in_enable { __u8 enable; }; +/** + * struct fwctl_pkt_in_table - ioctl(FWCTL_RPC) input + * @port_id: The value of param '-p' + * @table_num: Length of the table + * @index: The value of param '-i' + */ struct fwctl_pkt_in_table { __u32 port_id; __u32 table_num; __u32 index; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @port_id: The value of param '-p' + */ struct fwctl_pkt_in_port { __u32 port_id; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @index: The value of param '-i' + */ struct fwctl_pkt_in_index { __u32 index; }; +/** + * struct fwctl_pkt_in_enable - ioctl(FWCTL_RPC) input + * @ummu_id: The value of param '-u' + * @value: The value of param '-e' + */ struct fwctl_pkt_in_ummuid_value { __u32 ummu_id; __u32 value; }; #endif + -- Gitee From e3d01e1f400f587f4d6e10fe6012d0aa2694032c Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:12:05 +0800 Subject: [PATCH 038/126] iommu/ummu: Add UMMU documentation description commit 3a2ebcdd4d6c72fcd64521ffcfe496b8950be8d0 openEuler This patch add ummu documentation description Signed-off-by: Sihui Jiang Signed-off-by: Jingbin Wu Signed-off-by: Yanlong Zhu Signed-off-by: Liming An --- .../sysfs-class-iommu-ummu-bypass-mpam | 31 +++ .../ABI/testing/sysfs-class-iommu-ummu-iommu | 113 ++++++++++ .../testing/sysfs-class-iommu-ummu-uotr-mpam | 31 +++ .../testing/sysfs-devices-platform-ummu_vdev | 19 ++ Documentation/admin-guide/perf/ummu-pmu.rst | 112 ++++++++++ Documentation/driver-api/ub/index.rst | 1 + Documentation/driver-api/ub/ummu-core.rst | 7 + Documentation/ub/index.rst | 1 + Documentation/ub/ummu/index.rst | 12 + Documentation/ub/ummu/ummu-core.rst | 128 +++++++++++ Documentation/ub/ummu/ummu.rst | 134 ++++++++++++ Documentation/userspace-api/ummu_core.rst | 103 +++++++++ include/linux/ummu_core.h | 206 +++++++++++++++--- 13 files changed, 863 insertions(+), 35 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu create mode 100644 Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam create mode 100644 Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev create mode 100644 Documentation/admin-guide/perf/ummu-pmu.rst create mode 100644 Documentation/driver-api/ub/ummu-core.rst create mode 100644 Documentation/ub/ummu/index.rst create mode 100644 Documentation/ub/ummu/ummu-core.rst create mode 100644 Documentation/ub/ummu/ummu.rst create mode 100644 Documentation/userspace-api/ummu_core.rst diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam b/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam new file mode 100644 index 000000000000..c28753fb8b7d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-bypass-mpam @@ -0,0 +1,31 @@ +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_partid +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The partID value used by the MPAM function in the bypass UMMU + scenario. Format: %x. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_pmg +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PMG value used by the MPAM function in the bypass UMMU scenario. + Format: %x. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_run +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + After setting the bp_partid and bp_pmg values, write 1 to bp_run to + apply these values to the UMMU device. These values define the IO + regions that bypass the UMMU device in the bypass UMMU scenario. + +What: /sys/class/iommu/ummu./ummu_bypass_mpam/bp_mpam_info +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Retrieve the currently active MPAM configuration from the UMMU device. diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu new file mode 100644 index 000000000000..48ba4d6d4c60 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu @@ -0,0 +1,113 @@ +What: /sys/class/iommu/ummu./ummu-iommu/eid_list +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + List of all EIDs registered to UMMU. + +What: /sys/class/iommu/ummu./ummu-iommu/evtq_log2num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of Event Queues in the non-secure state of the + UMMU device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/evtq_log2size +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of each Event Queue in the non-secure state of the + UMMU device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/features +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + UMMU device capabilities. + +What: /sys/class/iommu/ummu./ummu-iommu/mcmdq_log2num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of Command Queues in the non-secure state of the + UMMU device in kernel mode. + +What: /sys/class/iommu/ummu./ummu-iommu/mcmdq_log2size +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of each command queue in the non-secure state of the + UMMU device in kernel mode. + +What: /sys/class/iommu/ummu./ummu-iommu/permq_ent_num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The depth of the permission queue in user mode of the UMMU + device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/permq_num +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The number of permission queues in user mode of the UMMU + device (in log2). + +What: /sys/class/iommu/ummu./ummu-iommu/ias +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The bit width of the input address supported by the UMMU + device. + +What: /sys/class/iommu/ummu./ummu-iommu/oas +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The bit width of the output address of the UMMU device. + +What: /sys/class/iommu/ummu./ummu-iommu/options +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Configurable features of the UMMU device. + +What: /sys/class/iommu/ummu./ummu-iommu/pgsize_bitmap +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Supported page size bitmap of the UMMU translation table. + +What: /sys/class/iommu/ummu./ummu-iommu/ptsize_bitmap +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Supported page size bitmap of the UMMU MAPT table + (permission check). + +What: /sys/class/iommu/ummu./ummu-iommu/tid_bits +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Maximum TokenID bit width supported in non-secure state. + +What: /sys/class/iommu/ummu./ummu-iommu/tid_type +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The domain_type corresponding to the TokenID, which requires + the TokenID value as input. Format: %x. diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam b/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam new file mode 100644 index 000000000000..8bbe9d65c7d9 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-uotr-mpam @@ -0,0 +1,31 @@ +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_partid +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PartID value used by the MPAM function to tag UMMU-initiated + traffic. Format: %x. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_pmg +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + The PMG value used by the MPAM function to tag UMMU-initiated traffic. + Format: %x. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_run +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + After setting the uotr_partid and uotr_pmg values, write 1 to uotr_run + to apply them to the UMMU device. These values tag I/O traffic initiated + by the UMMU itself. + +What: /sys/class/iommu/ummu./ummu_uotr_mpam/uotr_mpam_info +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Jingbin Wu +Description: + Retrieve the MPAM configuration last applied to the UMMU device. diff --git a/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev b/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev new file mode 100644 index 000000000000..2812b512c20e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-ummu_vdev @@ -0,0 +1,19 @@ +What: /sys/devices/platform/ummu_tid_root/logic_ummu/ummu_vdev..auto/ummu-vdev-attr/tid_mode +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Yanlong Zhu +Description: + (RO) Displays the MAPT mode of Token ID. Format: %d. Allowed values: + + == =========================================== + 0 Token ID is table mode. + 1 Token ID is entry mode. + 2 Fatal error occurred. + == =========================================== + +What: /sys/devices/platform/ummu_tid_root/logic_ummu/ummu_vdev..auto/ummu-vdev-attr/tid_val +Date: Oct 2025 +KernelVersion: 6.6 +Contact: Yanlong Zhu +Description: + (RO) Displays the value of Token ID. Format: %u. diff --git a/Documentation/admin-guide/perf/ummu-pmu.rst b/Documentation/admin-guide/perf/ummu-pmu.rst new file mode 100644 index 000000000000..5447f8c61ac6 --- /dev/null +++ b/Documentation/admin-guide/perf/ummu-pmu.rst @@ -0,0 +1,112 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +====================================== +UMMU Performance Monitoring Unit (PMU) +====================================== + +The UMMU includes a Performance Monitor Unit (PMU) to track and collect +statistics on key hardware events, such as TLB/PLB cache hit rates and +lookup latencies. By leveraging the Linux kernel's perf subsystem, the +collected event data can be efficiently processed, analyzed, and +visualized to drive targeted optimizations for UMMU performance. + +Usage +===== + +Basic usage follows the standard Linux kernel perf interface. The UMMU +device supports the following PMU events, which are exposed under the perf +event directory: + +.. code-block:: bash + + ls -l /sys/bus/event_source/devices/ummu_pmcg_0/events + +Constraints +=========== + +- No more than 8 events can be monitored at the same time. + +UMMU PMU Events +=============== + +.. table:: PMU Events in UMMU and Their Meanings + + +---------------------------------+-------------------------------------------------------------+ + | Event | Meaning | + +=================================+=============================================================+ + | kv_table_rd_average_latency | Average bus latency for reading key-value (KV) and | + | | content-addressable memory (CAM) tables during the | + | | conversion from DstEID to tecte_tag. | + +---------------------------------+-------------------------------------------------------------+ + | swif_cmd_send_num | Command count generated by SWIF. | + +---------------------------------+-------------------------------------------------------------+ + | swif_dvm_sync_latency | Average latency during execution of Sync commands issued | + | | by SWIF DVM. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_ns_sync_latency | Average latency during execution of Sync commands issued | + | | by the SWIF KCMD non-secure queue. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_s_sync_latency | Average latency during execution of Sync commands issued | + | | by the SWIF KCMD secure queue. | + +---------------------------------+-------------------------------------------------------------+ + | swif_ucmd_sync_latency | Average latency during execution of Sync commands issued | + | | by SWIF UCMD. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_plb_cache_hit_rate | The hit rate observed during table lookups in the TBU PLB | + | | table. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_tlb_cache_hit_rate | The hit rate observed during table lookups in the TBU TLB | + | | table. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_cntx_cache_miss_num | The number of cache misses observed in the TCU context | + | | cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_gpc_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU GPC cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_gpc_req_latency | The latency observed during GPC lookup operations in the | + | | TCU GPC module. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU PPTW cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_req_latency | The latency observed during permission-based PTW lookup | + | | operations in the TCU PPTW module. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_pptw_req_num | PPTW Request Count. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_tptw_cache_hit_rate | The hit rate observed during access operations in the | + | | TCU TPTW cache. | + +---------------------------------+-------------------------------------------------------------+ + | tcu_tptw_req_latency | The latency observed during PTW lookup operations in the | + | | TCU TPTW module. | + +---------------------------------+-------------------------------------------------------------+ + | ubif_kv_cache_hit_rate | The hit rate observed during access operations in the | + | | UBIF KV cache. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_req_average_latency | The average latency of table lookup requests during system | + | | operation. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_req_rate | The rate of table lookup requests during system operation. | + +---------------------------------+-------------------------------------------------------------+ + | ummu_rsp_rate | The rate of table lookup results during system operation. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_ptw_pack_rate | The rate of address translation table lookup requests sent | + | | by TBU RAB to TCU. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_pptw_pack_rate | The rate of permission table lookup requests sent by TBU | + | | RAB to TCU. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_ptw_latency | Average end-to-end latency of PTW requests from TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_pptw_latency | Average end-to-end latency of PPTW requests from TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | tbu_rab_buf_use_rate | Buffer utilization rate of TBU RAB. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_gpc_sync_latency | Average execution latency of Sync commands issued by the | + | | SWIF KCMD GPC queue, excluding those via the DVM interface. | + +---------------------------------+-------------------------------------------------------------+ + | swif_kcmd_realm_sync_latency | Average execution latency of Sync commands issued by the | + | | SWIF KCMD REALM queue, excluding those via the DVM | + | | interface. | + +---------------------------------+-------------------------------------------------------------+ diff --git a/Documentation/driver-api/ub/index.rst b/Documentation/driver-api/ub/index.rst index 5738694649be..0f9472ba6451 100644 --- a/Documentation/driver-api/ub/index.rst +++ b/Documentation/driver-api/ub/index.rst @@ -15,5 +15,6 @@ The Linux UnifiedBus implementer's API guide ubfi ubus + ummu-core ubase cdma diff --git a/Documentation/driver-api/ub/ummu-core.rst b/Documentation/driver-api/ub/ummu-core.rst new file mode 100644 index 000000000000..7bd07e0e1aff --- /dev/null +++ b/Documentation/driver-api/ub/ummu-core.rst @@ -0,0 +1,7 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +UMMU Core Support Library +--------------------------- + +.. kernel-doc:: include/linux/ummu_core.h + :functions: diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 0a3973b98512..c9366b0608dc 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -14,4 +14,5 @@ UnifiedBus Subsystem ubase/index ubfi/index ubus/index + ummu-core cdma/index diff --git a/Documentation/ub/ummu/index.rst b/Documentation/ub/ummu/index.rst new file mode 100644 index 000000000000..21360586e1f4 --- /dev/null +++ b/Documentation/ub/ummu/index.rst @@ -0,0 +1,12 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +======================= +UB UMMU +======================= + +.. toctree:: + :maxdepth: 2 + :numbered: + + ummu + ummu-core diff --git a/Documentation/ub/ummu/ummu-core.rst b/Documentation/ub/ummu/ummu-core.rst new file mode 100644 index 000000000000..6a16bbaa641f --- /dev/null +++ b/Documentation/ub/ummu/ummu-core.rst @@ -0,0 +1,128 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +====================================== +UMMU-CORE +====================================== + +:Authors: - Yanlong Zhu + +Introduction +============ +The Unified Bus Memory Management Unit (abbreviated as UMMU) is a component +that provides memory address mapping and access permission verification +during memory access processes. +It supports the sharing of memory resources between UBPU (UB Processing Units) +and ensures legitimate access to memory. + +The UMMU-Core is designed to work with the Linux IOMMU framework, as an +extension, providing the necessary interfaces to integrate with the system. +To maintain flexibility in deployment, the UMMU-Core can be compiled as a +loadable kernel module or built-in kernel image. + +EID Management +-------------- + +UMMU uses the following inputs — DstEID, TokenID and UBA (Unified Bus Address) — +to determine whether the entity is valid and which address domain it should access. + +Every UB entity must register its EID (Entity ID) with the UB domain to +communicate with other entities. UMMU-Core provides :c:func:`ummu_core_add_eid()` +and :c:func:`ummu_core_del_eid()` functions to manage EID. + +In some cases, UB devices may register before all UMMU devices. To handle +this, we designed an EID cached list to temporary save EIDs. Upon an UMMU +device register as global core device, the UMMU-Core will flushes the EID +cached list to it. Thread safety is guaranteed by the UMMU-Core. For +detailed information, refer to the `UB-Base-Specification-2.0`_. + +.. _UB-Base-Specification-2.0: https://www.unifiedbus.com/ + +TokenID Management +------------------ + +Each UB entity has multiple address spaces, such as DMA space, SVA space, +and others. The TokenID identifies the address space associated with each entity. + +The UMMU-Core introduces tdev (TID Device), a pseudo-device used to abstract +the concept of TID. It also supports UMMU driver functionality, enabling driver +management. The tdev can be used to allocate and grant memory address spaces. +When tdev is released, all associated resources will be freed. + +UMMU-Core acts as the TID manager in the UB system, offering TID allocation +strategies and TID allocation APIs to the UMMU driver. + +UMMU-Core supports multiple TID allocation strategies: + +- TRANSPARENT: + The TID is compatible with the global PASID (Process Address Space ID), + enabling seamless integration with system-wide address space management. +- ASSIGNED: + A pre-allocated TID, assigned from an external framework or management system. +- NORMAL: + The default TID allocation strategy, suitable for the majority of use cases. + +UMMU Device Registration +------------------------ + +The UMMU device registration is performed in two steps. An UMMU device +must implement the `ummu_core_device` interface and initialize it using +:c:func:`ummu_core_device_init()` function. This function initializes +the core device and allocates a dedicated TID manager to handle TID operations. + +Multiple UMMU devices can register to UMMU-Core by :c:func:`ummu_core_device_register()` +function. However, only global core device can take the charge of all UB device requests, +such as :c:func:`add_eid()` and :c:func:`del_eid()` functions. + +.. code-block:: none + + +-------------------+ + | IOMMU Framework | + +---------+---------+ + | + +----------+---------+ + | Global Core Device | + +----------+---------+ + | + +------------------------+-----------+-----------+------------------------+ + | | | | + +-------------------+ +-------------------+ +-------------------+ +-------------------+ + | Core Device 0 | | Core Device 1 | | Core Device 2 | ... | Core Device x | + +-------------------+ +-------------------+ +-------------------+ +-------------------+ + +Support KSVA mode +----------------- + +The KSVA (Kernel-space Shared Virtual Addressing) is not supported in the +current IOMMU framework, as it maps the entire kernel address space to +devices, which may cause critical errors. + +By leveraging isolated address space IDs and fine-grained permission controls, +we can restrict each device only access to the authorized address space +with KSVA mode. + +To manage the access permissions of each PASID, the IOMMU can implement a +permission checking mechanism. We abstract the permission management +operations into four fundamental types: + +- grant: + Grant access to a specified memory address range with defined + permissions (e.g., read, write, execute). +- ungrant: + Revoke previously granted access to a memory address range, invalidating + the device's permissions for that region. +- plb_sync_all: + Synchronize the PLB (Permission Lookaside Buffer) for all registered + PASIDs, ensuring global consistency of permission state across the IOMMU. +- plb_sync: + Synchronize the PLB for a specific PASID and memory range, minimizing + latency while maintaining access control integrity. + +These operations are integrated into the `iommu_domain` as part of the +`iommu_perm_ops` interface. + +UMMU SVA maintains a set of permission tables and page tables for each TID. +These resources can be allocated via the :c:func:`alloc_tid()` operation. +Once a TID is assigned, read and write permissions for the specific virtual +memory address ranges can be granted or ungranted. + +To access granted memory address ranges, permission verification is required. diff --git a/Documentation/ub/ummu/ummu.rst b/Documentation/ub/ummu/ummu.rst new file mode 100644 index 000000000000..b4f39749ff79 --- /dev/null +++ b/Documentation/ub/ummu/ummu.rst @@ -0,0 +1,134 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +=========== +UMMU Driver +=========== + +UMMU Functionality +================== + +The UMMU driver implements IOMMU functionality, enabling address +translation and access control for DMA transactions initiated by +peripheral devices. + +UMMU plays a critical role in system virtualization, device isolation, +and secure DMA address translation. + +In Shared Virtual Addressing (SVA) scenarios, UMMU enforces permission +checks to protect data within the shared address space, ensuring access +integrity and confidentiality. + +UMMU performs address translation and permission checking using input +parameters derived from the UB Memory Descriptor (EID + TokenID + UBA). + +For detailed information on the UB Memory Descriptor format and semantics, +refer to the `UB-Base-Specification-2.0`_. + +.. _UB-Base-Specification-2.0: https://www.unifiedbus.com/ + +The functionality of UMMU is primarily organized into the following three +core components: + +Configuration Table Lookup +-------------------------- + +The configuration data for address translation and permission checking in +UMMU is stored in memory and organized into two levels of configuration +tables: TECT (Target Entity Configuration Table) and TCT (Target Context +Table). + +- **TECT (Target Entity Configuration Table)**: + UMMU uses the DstEID to locate the corresponding TECT entry. This entry + primarily contains local entity information and serves as a storage + location for the entry points of the TCT and the Stage 2 address + translation tables. + +- **TCT (Target Context Table)**: + UMMU uses the TokenID to locate the corresponding TCT entry. This entry + describes the address space-level information, which may have a + granularity equal to or finer than that of the process level. The TCT + entry primarily stores the base addresses of the Stage 1 address + translation table and the MAPT (Memory Address Permission Table) used for + SVA mode permission checking. + +Address Translation +------------------- + +UMMU uses the EID and TokenID to locate the corresponding entries in the +TECT (Target Entity Configuration Table) and TCT (Target Context Table). +Based on the configuration table entries, it determines the base address +of the page table. It then uses the UBA and the page table base address to +perform the page table entry lookup and complete the address translation. + +In DMA scenarios, UMMU uses separate Stage 1 and Stage 2 translation +tables to support multiple-stage address translation. + +In user-space SVA scenarios, UMMU enables the device to directly access +the process's virtual address space. Similarly, kernel-space SVA allows +the device to access kernel-level virtual memory, enabling efficient data +sharing between the device and the kernel. + +Permission Checking +------------------- + +In SVA scenarios, UMMU performs permission checks to ensure the security +of the address space. + +UMMU performs permission checking in parallel with address translation. +After retrieving the TECT and TCT entries, if permission checking is +enabled for the currently accessed TECT entity, UMMU can obtain the MAPT +(Memory Address Permission Table) entry from the TCT entry. UMMU then +retrieves the permission information for the target memory from the MAPT, +compares it with the permissions specified in the memory access request, +and determines whether the access passes the permission check. + +The permission checking feature enables fine-grained control over memory +segment access, allowing the system to authorize or deauthorize specific +memory regions. It is recommended to enable the permission checking +feature to enforce security policies and protect the SVA address space +from unauthorized access. + +UMMU Driver Initialization +========================== + +When the UMMU driver detects an UMMU-capable platform device, it invokes +the probe function `ummu_device_probe()`. This function identifies the +device's hardware capabilities, allocates queues, configuration tables, +and interrupt handlers, and initializes the associated resources. + +UMMU Device Registration +======================== + +After the UMMU device completes its initialization, it is registered with +the UMMU framework. The UB system supports multiple UMMU devices within a +single chip. The UMMU framework abstracts a Logic UMMU device to uniformly +manage multiple physical UMMU devices. Once wrapped by the framework, the +Logic UMMU is ultimately registered with the IOMMU framework. + +In addition to calling the `struct iommu_ops` registered by individual UMMU +devices, the Logic UMMU leverages the extended operation set `struct +ummu_core_ops` provided by the UMMU framework to uniformly manage all +underlying UMMU device instances. This includes sharing configuration and +page table information across devices, and synchronizing invalidation +operations to ensure consistent table lookup results across the entire +device set. + +.. code-block:: none + + +-------------------+ + | IOMMU Framework | + +-------------------+ + ^ + | + Register + | + +--------------------+ + | UMMU-CORE Framework| + +--------------------+ + ^ + | + Register + | + +----------------+ +----------------+ +----------------+ + | ummu device 0 | | ummu device 1 | ... | ummu device x | + +----------------+ +----------------+ +----------------+ diff --git a/Documentation/userspace-api/ummu_core.rst b/Documentation/userspace-api/ummu_core.rst new file mode 100644 index 000000000000..79d9dd7a5740 --- /dev/null +++ b/Documentation/userspace-api/ummu_core.rst @@ -0,0 +1,103 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. ummu_core: + +======================= +UMMU_CORE Userspace API +======================= + +The UMMU UAPI provides APIs that enable communication between user-space components +and kernel-space components.The primary use case is Shared Virtual Address (SVA). + +.. contents:: :local: + +Functionalities +=============== +Only kernel-mode process expose the APIs. The supported user-kernel APIs +are as follows: + +1. Allocate/Free a TID +2. Send one or more PLBI commands +3. Map or unmap resources, including MAPT block and command queues + +Interfaces +========== +Although the data structures defined in UMMU_CORE UAPI are self-contained, +no user-facing API functions are provided. Instead, UMMU_CORE UAPI is +designed to work with UMMU_CORE driver. + +Upon loading, the UMMU_CORE driver registers a TID device, and sets up its operation function table. +The supported operations include open, release, map, and ioctl. + +Datastructures and Definitions +------------------------------ +1. struct ummu_token_info: stores token information for a shared-memory segment. + + - input: specifies the token generation mode.If input is 0, the tokenVal field is used as the token value. + If input is 1, the UMMU library generates a random token value, and tokenVal is ignored. + - tokenVal: the token value to use when input is 0. + +2. enum ummu_mapt_perm: access permissions for a shared-memory segment + + - MAPT_PERM_W: write only + - MAPT_PERM_R: read only + - MAPT_PERM_RW: read and write + - MAPT_PERM_ATOMIC_W: atomic write only + - MAPT_PERM_ATOMIC_R: atomic read only + - MAPT_PERM_ATOMIC_RW: atomic read and write + +3. enum ummu_mapt_mode: Memory Address Permission Table mode + + - MAPT_MODE_ENTRY: only one memory address segment can be managed per TID. + - MAPT_MODE_TABLE: multiple memory address segments can be managed per TID. + +4. enum ummu_ebit_state: + + - UMMU_EBIT_OFF: disable ebit check + - UMMU_EBIT_ON: enable ebit check + +5. definitions: + + - TID_DEVICE_NAME: a character device that enables user-mode processes to interact + with hardware or software through system calls. + - UMMU_IOCALLOC_TID: operation code for allocating a TID. + - UMMU_IOCFREE_TID: operation code for freeing a TID. + - UMMU_IOCPLBI_VA: operation code to flush the PLB cache for a specific virtual address. + - UMMU_IOCPLBI_ALL: operation code to flush the PLB cache for all virtual addresses. + +Descriptions and Examples +------------------------- +1. allocate/free tid + +The input parameters is *struct ummu_tid_info*.Below is an example: +:: + struct ummu_tid_info info = {}; + + int fd = open("/dev/ummu/tid", O_RDWR | O_CLOEXEC); + ioctl(fd, UMMU_IOCALLOC_TID, &info); + ioctl(fd, UMMU_IOCFREE_TID, &info); + +The PLBI command operation is performed via the ioctl interface, +using the operation codes UMMU_IOCPLBI_VA or UMMU_IOCPLBI_ALL. + +2. map resources + +This interface is used in two scenarios: +(1) Creating a new MAPT block +(2) Initializing user-mode queues + +For example: +:: + mmap(NULL, size, prot, flags, fd, PA); + +On success, this returns a virtual address. + +3. unmap resources + +This interface is used in two scenarios: +(1) Clearing MAPT blocks +(2) When user-mode process exits, all associated MAPT blocks and use-mode queue resources +are cleared. + +For example: +:: + munmap(buf, BLOCK_SIZE_4K); diff --git a/include/linux/ummu_core.h b/include/linux/ummu_core.h index eda283f7b524..29d0952e35e7 100644 --- a/include/linux/ummu_core.h +++ b/include/linux/ummu_core.h @@ -25,18 +25,41 @@ #define UMMU_DEV_READ 2 #define UMMU_DEV_ATOMIC 4 +/** + * enum eid_type - the eid type + * + * @EID_NONE: nommal EID type + * @EID_BYPASS: ummu address translations are bypassed + * @EID_TYPE_MAX: max of eid type + */ enum eid_type { EID_NONE = 0, EID_BYPASS, EID_TYPE_MAX, }; +/** + * enum tid_alloc_mode - tid different allocated mode + * + * @TID_ALLOC_TRANSPARENT: use pasid as tid, no need to assign again + * @TID_ALLOC_ASSIGNED: pre-allocated tid, no need to assign again + * @TID_ALLOC_NORMAL: alloc tid normal + */ enum tid_alloc_mode { TID_ALLOC_TRANSPARENT = 0, TID_ALLOC_ASSIGNED = 1, TID_ALLOC_NORMAL = 2, }; +/** + * enum ummu_resource_type - SVA resource type + * + * @UMMU_BLOCK: mapt block + * @UMMU_QUEUE: permission queue + * @UMMU_QUEUE_LIST: permission queue for multi ummu + * @UMMU_CNT: ummu count + * @UMMU_TID_RES: tid resource + */ enum ummu_resource_type { UMMU_BLOCK, UMMU_QUEUE, @@ -51,6 +74,13 @@ enum default_tid_ops_types { TID_OPS_MAX, }; +/** + * enum ummu_register_type - ummu device register type + * + * @REGISTER_TYPE_GLOBAL: register as the global iommu device + * @REGISTER_TYPE_NORMAL: register to the iommu framework + * @REGISTER_TYPE_MAX: max of ummu device register type + */ enum ummu_register_type { REGISTER_TYPE_GLOBAL, REGISTER_TYPE_NORMAL, @@ -62,6 +92,12 @@ struct ummu_tid_manager; struct ummu_base_domain; struct ummu_core_device; +/** + * struct block_args - param related to mapt block + * @index: mapt block index + * @block_size_order: block size in PAGE_SIZE + * @out_addr: allocated physical address + */ struct block_args { u32 index; int block_size_order; @@ -75,6 +111,12 @@ struct block_args { KABI_RESERVE(6) }; +/** + * struct queue_args - param related to queue + * @pcmdq_base: base address of command queue + * @pcplq_base: base address of completion queue + * @ctrl_page: base address of permission queue + */ struct queue_args { phys_addr_t pcmdq_base; phys_addr_t pcplq_base; @@ -87,6 +129,13 @@ struct queue_args { KABI_RESERVE(5) }; +/** + * struct tid_args - param related to tid + * @pcmdq_order: base address of command queue + * @pcplq_order: base address of completion queue + * @blk_exp_size: block size in PAGE_SIZE + * @hw_cap: cap of hardware + */ struct tid_args { u8 pcmdq_order; u8 pcplq_order; @@ -100,6 +149,16 @@ struct tid_args { KABI_RESERVE(5) }; +/** + * struct resource_args - SVA resource related args + * @type: SVA resource type + * @block: arg related to mapt block + * @queue: arg related to mapt queue for UMMU_QUEUE + * @queues: arg related to mapt queue for UMMU_QUEUE_LIST in multi ummu mode + * @tid_res: tid resource + * @ummu_cnt: return value number of ummu + * @block_index: block index for release + */ struct resource_args { enum ummu_resource_type type; union { @@ -117,6 +176,10 @@ struct resource_args { KABI_RESERVE(3) }; +/** + * struct ummu_param - param related to tid + * @mode: mapt mode: table mode or entry mode + */ struct ummu_param { enum ummu_mapt_mode mode; @@ -129,6 +192,14 @@ struct ummu_param { KABI_RESERVE(7) }; +/** + * struct ummu_tid_param - param related to alloc tid + * @device: device pointer + * @mode: mapt mode: table mode or entry mode + * @alloc_mode: tid alloc mode + * @assign_tid: assigned tid, for TID_ALLOC_TRANSPARENT or TID_ALLOC_ASSIGNED + * @domain_type: more about domain-types in iommu.h + */ struct ummu_tid_param { struct device *device; enum ummu_mapt_mode mode; @@ -143,6 +214,13 @@ struct ummu_tid_param { KABI_RESERVE(5) }; +/** + * struct tdev_attr - attr for tdev + * @name: tdev name + * @dma_attr: dma mode + * @priv: private data pointer + * @priv_len: private data length + */ struct tdev_attr { const char *name; enum dev_dma_attr dma_attr; @@ -189,7 +267,7 @@ struct ummu_core_ops { }; /** - * ummu-core defined iommu device type + * struct ummu_core_device - ummu-core defined iommu device type * @list: used to link all ummu-core devices * @tid_manager: tid domain manager. * @iommu: iommu prototype @@ -212,6 +290,14 @@ struct ummu_core_device { KABI_RESERVE(8) }; +/** + * struct ummu_base_domain - domain info + * @domain: iommu domain + * @core_dev: ummu device + * @parent: point to father domain + * @list: base address of domain list + * @tid: token id + */ struct ummu_base_domain { struct iommu_domain domain; struct ummu_core_device *core_dev; @@ -224,6 +310,14 @@ struct ummu_base_domain { KABI_RESERVE(3) KABI_RESERVE(4) }; + +/** + * struct tid_ops - ummu ops for normal use, expand from iommu_ops + * @alloc_tid_manager: alloc manager for tid + * @free_tid_manager: free all tid and manager for tid + * @alloc_tid: alloc tid func + * @free_tid: free tid func + */ struct tid_ops { struct ummu_tid_manager *(*alloc_tid_manager)( struct ummu_core_device *core_device, u32 min_tid, @@ -239,6 +333,13 @@ struct tid_ops { KABI_RESERVE(4) }; +/** + * struct ummu_tid_manager - assigned tid manager + * @ops: ummu tid ops for normal use, expand from iommu_ops + * @token_ids: xarray of assigned tid + * @min_tid: min tid range for alloc + * @max_tid: max tid range for alloc + */ struct ummu_tid_manager { const struct tid_ops *ops; struct xarray token_ids; @@ -252,6 +353,12 @@ struct ummu_tid_manager { KABI_RESERVE(4) }; +/** + * struct ummu_core_tid_args - tid related args + * @tid_ops: ummu tid ops for normal use, expand from iommu_ops + * @max_tid: max tid range for alloc + * @min_tid: min tid range for alloc + */ struct ummu_core_tid_args { const struct tid_ops *tid_ops; u32 max_tid; @@ -265,6 +372,13 @@ struct ummu_core_tid_args { KABI_RESERVE(6) }; +/** + * struct ummu_core_init_args - ummu core init args + * @core_ops: the ummu device need ummu core ops capability + * @tid_args: parameters related to tid + * @iommu_ops: iommu_ops is mandatory + * @hwdev: related hwdev + */ struct ummu_core_init_args { const struct ummu_core_ops *core_ops; struct ummu_core_tid_args tid_args; @@ -276,7 +390,16 @@ struct ummu_core_init_args { KABI_RESERVE(3) }; -/* Memory traffic monitoring of the UB device */ +/** + * struct ummu_mpam - Memory traffic monitoring of the UB device + * @flags: flags, see constants above + * @eid: entity id + * @tid: tid + * @partid: mpam partition id + * @pmg: mpam pmg + * @s1mpam: 0 for ste mpam, 1 for cd mpam + * @user_mpam_en: 0 for ummu mpam, 1 for user mpam + */ struct ummu_mpam { #define UMMU_DEV_SET_MPAM (1 << 0) #define UMMU_DEV_GET_MPAM (1 << 1) @@ -326,7 +449,7 @@ static inline void tdev_attr_init(struct tdev_attr *attr) #ifdef CONFIG_UB_UMMU_CORE /* EID API */ /** - * Add a new EID to the UMMU. + * ummu_core_add_eid() - Add a new EID to the UMMU. * @guid: entity/device identity. * @eid: entity id to be added. * @type: eid type. @@ -335,7 +458,7 @@ static inline void tdev_attr_init(struct tdev_attr *attr) */ int ummu_core_add_eid(guid_t *guid, eid_t eid, enum eid_type type); /** - * Delete an EID from the UMMU. + * ummu_core_del_eid() - Delete an EID from the UMMU. * @guid: entity/device identity. * @eid: entity id to be deleted. * @type: eid type. @@ -344,7 +467,7 @@ void ummu_core_del_eid(guid_t *guid, eid_t eid, enum eid_type type); /* UMMU IOVA API */ /** - * Allocate a range of IOVA. The input iova size might be aligned. + * dma_alloc_iova() - Allocate a range of IOVA. The input iova size might be aligned. * @dev: related device. * @size: iova size. * @attrs: dma attributes. @@ -358,14 +481,14 @@ struct iova_slot *dma_alloc_iova(struct device *dev, size_t size, size_t *sizep); /** - * Free a range of IOVA. + * dma_free_iova() - Free a range of IOVA. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. */ void dma_free_iova(struct iova_slot *slot); /** - * Fill a range of IOVA. It allocates pages and maps pages to the iova. + * ummu_fill_pages() - Fill a range of IOVA. It allocates pages and maps pages to the iova. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. * @iova: iova start. @@ -376,7 +499,7 @@ void dma_free_iova(struct iova_slot *slot); int ummu_fill_pages(struct iova_slot *slot, dma_addr_t iova, unsigned long nr_pages); /** - * Drain a range of IOVA. It unmaps iova and releases pages. + * ummu_drain_pages() - Drain a range of IOVA. It unmaps iova and releases pages. * The API is not thread-safe. * @slot: iova slot, generated from dma_alloc_iova. * @iova: iova start. @@ -422,12 +545,15 @@ static inline int ummu_drain_pages(struct iova_slot *slot, dma_addr_t iova, #if IS_ENABLED(CONFIG_UB_UMMU_CORE_DRIVER) /* UMMU SVA API */ /** - * Grant va range permission to sva. + * ummu_sva_grant_range() - Grant va range permission to sva. * @sva: related sva handle. * @va: va start * @size: va size * @perm: permission * @cookie: struct ummu_token_info* + * + * .. code-block:: c + * * if (!cookie) { * do not use cookie check. * } else if (cookie->input == 0) { @@ -437,18 +563,20 @@ static inline int ummu_drain_pages(struct iova_slot *slot, dma_addr_t iova, * } else { * invalid para * } - * * Return: 0 on success, or an error. */ int ummu_sva_grant_range(struct iommu_sva *sva, void *va, size_t size, int perm, void *cookie); /** - * Ungrant va range permission from sva. + * ummu_sva_ungrant_range() - Ungrant va range permission from sva. * @sva: related sva handle. * @va: va start * @size: va size * @cookie: va related cookie,struct ummu_token_info* + * + * .. code-block:: c + * * if (!cookie) { * do not use cookie check. * } else { @@ -461,7 +589,7 @@ int ummu_sva_ungrant_range(struct iommu_sva *sva, void *va, size_t size, void *cookie); /** - * Get tid from dev or sva. + * ummu_get_tid() - Get tid from dev or sva. * @dev: related device. * @sva: if sva is set, return sva mode related tid; otherwise * return the dma mode tid. @@ -472,7 +600,7 @@ int ummu_sva_ungrant_range(struct iommu_sva *sva, void *va, size_t size, int ummu_get_tid(struct device *dev, struct iommu_sva *sva, u32 *tidp); /** - * Get iommu_domain by tid and dev. + * ummu_core_get_domain_by_tid() - Get iommu_domain by tid and dev. * @dev: related device. * @tid: tid * @@ -482,7 +610,7 @@ struct iommu_domain *ummu_core_get_domain_by_tid(struct device *dev, u32 tid); /** - * Check whether the UMMU works in ksva mode. + * ummu_is_ksva() - Check whether the UMMU works in ksva mode. * @domain: related iommu domain * * Return: true or false. @@ -490,7 +618,7 @@ struct iommu_domain *ummu_core_get_domain_by_tid(struct device *dev, bool ummu_is_ksva(struct iommu_domain *domain); /** - * Check whether the UMMU works in sva mode. + * ummu_is_sva() - Check whether the UMMU works in sva mode. * @domain: related iommu domain * * Return: true or false. @@ -498,10 +626,13 @@ bool ummu_is_ksva(struct iommu_domain *domain); bool ummu_is_sva(struct iommu_domain *domain); /** - * Bind device to a process mm. + * ummu_sva_bind_device() - Bind device to a process mm. * @dev: related device. * @mm: process memory management. * @drvdata: ummu_param related to tid. + * + * .. code-block:: c + * * if (!drvdata) { * sva is in the bypass mapt mode. * } else { @@ -514,7 +645,7 @@ struct iommu_sva *ummu_sva_bind_device(struct device *dev, struct mm_struct *mm, struct ummu_param *drvdata); /** - * Bind device to kernel mm. + * ummu_ksva_bind_device() - Bind device to kernel mm. * @dev: related device. * @drvdata: ummu_param related to tid. ksva doesn't support bypass mapt. * @@ -527,50 +658,55 @@ void ummu_ksva_unbind_device(struct iommu_sva *handle); /* UMMU CORE API */ /** - * Initialiase ummu core device. + * ummu_core_device_init() - Initialiase ummu core device. * @ummu_core: ummu core device. * @args: ummu core init args. + * * UMMU driver should carefully choose the args based on its requirement. * iommu_ops is mandatory. * a. the ummu device need tid allocation capability. + * * a.1 default tid strategies satisfy the ummu device * -> set tid_ops form ummu_core_tid_ops[TID_OPS_MAX] * a.2 default tid strategies do not satisfy the ummu device * -> implement a new tid_ops in the driver. + * * b. the ummu device need ummu core ops capability. * -> set core_ops. + * * c. the ummu device has related hwdev. * -> set hwdev. */ int ummu_core_device_init(struct ummu_core_device *ummu_core, struct ummu_core_init_args *args); /** - * Deinitialiase ummu core device. + * ummu_core_device_deinit() - Deinitialiase ummu core device. * @ummu_core: ummu core device. */ void ummu_core_device_deinit(struct ummu_core_device *ummu_core); /** - * Register ummu core device to the ummu framework. + * ummu_core_device_register() - Register ummu core device to the ummu framework. * @ummu_core: ummu core device. * @type: register type. - REGISTER_TYPE_GLOBAL: register the ummu device as the global device, - The ummu device will be the device handle all request. - e.g. 1. add_eid/del_eid 2. provide ubus iommu ops. etc. - - REGISTER_TYPE_NORMAL: follow the iommu_device register. will not be - related to the global device. it work as a normal iommu device. + * + * REGISTER_TYPE_GLOBAL: register the ummu device as the global device, + * The ummu device will be the device handle all request. + * e.g. 1. add_eid/del_eid 2. provide ubus iommu ops. etc. + * + * REGISTER_TYPE_NORMAL: follow the iommu_device register. will not be + * related to the global device. it work as a normal iommu device. */ int ummu_core_device_register(struct ummu_core_device *ummu_core, enum ummu_register_type type); /** - * Unregister ummu core device from the ummu framework. + * ummu_core_device_unregister() - Unregister ummu core device from the ummu framework. * @dev: the ummu_core device tid belongs to. */ void ummu_core_device_unregister(struct ummu_core_device *dev); /** - * Invalidate ummu global configuration by tid. + * ummu_core_invalidate_cfg_table() - Invalidate ummu global configuration by tid. * @tid: tid * Return: 0 on success, or an error. */ @@ -578,7 +714,7 @@ int ummu_core_invalidate_cfg_table(u32 tid); /* UMMU TID API */ /** - * Alloc a tid from ummu framework, and alloc related pasid. + * ummu_core_alloc_tid() - Alloc a tid from ummu framework, and alloc related pasid. * @dev: the allocated tid will be attached to. * @drvdata: ummu_tid_param related to tid * @tidp: the allocated tid returned here. @@ -589,14 +725,14 @@ int ummu_core_alloc_tid(struct ummu_core_device *dev, struct ummu_tid_param *drvdata, u32 *tidp); /** - * Free a tid to ummu framework. + * ummu_core_free_tid() - Free a tid to ummu framework. * @dev: the ummu_core device tid belongs to. * @tid: token id. */ void ummu_core_free_tid(struct ummu_core_device *dev, u32 tid); /** - * Get mapt_mode related to the tid. + * ummu_core_get_mapt_mode() - Get mapt_mode related to the tid. * @dev: the ummu_core device tid belongs to. * @tid: token id. * @@ -606,7 +742,7 @@ enum ummu_mapt_mode ummu_core_get_mapt_mode(struct ummu_core_device *dev, u32 tid); /** - * Get device related to the tid. + * ummu_core_get_device() - Get device related to the tid. * It will increase the ref count of the device. * @dev: the ummu_core device tid belongs to. * @tid: token id. @@ -617,7 +753,7 @@ struct device *ummu_core_get_device(struct ummu_core_device *dev, u32 tid); void ummu_core_put_device(struct device *dev); /** - * Allocate a virtual device to hold a tid. + * ummu_core_alloc_tdev() - Allocate a virtual device to hold a tid. * @attr: attributes of tdev * @ptid: tid pointer * Return: device on success or NULL error. @@ -625,7 +761,7 @@ void ummu_core_put_device(struct device *dev); struct device *ummu_core_alloc_tdev(struct tdev_attr *attr, u32 *ptid); /** - * Free the virtual device + * ummu_core_free_tdev() - Free the virtual device * @dev: Return value allocated by ummu_core_alloc_tdev * * Return: 0 on success or an error. @@ -633,7 +769,7 @@ struct device *ummu_core_alloc_tdev(struct tdev_attr *attr, u32 *ptid); int ummu_core_free_tdev(struct device *dev); /** - * Get ummu_tid_type related to the tid. + * ummu_core_get_tid_type() - Get ummu_tid_type related to the tid. * @dev: the ummu_core device tid belongs to. * @tid: token id. * @tid_type: out param, ummu_tid_type -- Gitee From ee515f8fb5198cee00b1d07600114def95bc5bfc Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:13:07 +0800 Subject: [PATCH 039/126] iommu/ummu: Add UMMU devicetree file commit 04493490f0224dd57538943f92ccc108ad3af38c openEuler This patch add ummu devicetree file Signed-off-by: Jingbin Wu Signed-off-by: Liming An --- .../devicetree/bindings/iommu/hisi,ummu.yaml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 Documentation/devicetree/bindings/iommu/hisi,ummu.yaml diff --git a/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml b/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml new file mode 100644 index 000000000000..61d0074f7c4c --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/hisi,ummu.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iommu/hisi,ummu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UMMU Architecture Implementation + +maintainers: + - Jingbin Wu + +description: |+ + UMMU is an IOMMU device that performs address translation and permission checking + using DstEID, TokenID, and UBA as input parameters. + +properties: + $nodename: + pattern: "^ummu@[0-9a-f]*" + compatible: + const: ub,ummu + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + UMMU device index. Used to identify a specific UMMU instance in systems + with multiple UMMU devices. + msi-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: | + MSI parent device phandle. Required for MSI interrupt handling. + +required: + - compatible + - index + - msi-parent + +additionalProperties: false + +examples: + - |+ + ummu@0 { + compatible = "ub,ummu"; + index = <0x0>; + msi-parent = <&its>; + }; -- Gitee From 2eeb834665302e307ce3812b87ab2ec73116ac1a Mon Sep 17 00:00:00 2001 From: Liming An Date: Thu, 27 Nov 2025 19:16:22 +0800 Subject: [PATCH 040/126] iommu/ummu: Add UMMU-PMU devicetree file commit 48ebec01b515a3e8f665bbbd415e721c658d30d8 openEuler This patch add ummu-pmu devicetree file Signed-off-by: Lizhi He Wu Signed-off-by: Liming An --- .../bindings/perf/hisi,ummu-pmu.yaml | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml diff --git a/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml b/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml new file mode 100644 index 000000000000..c16fad1c35fe --- /dev/null +++ b/Documentation/devicetree/bindings/perf/hisi,ummu-pmu.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/perf/hisi,ummu-pmu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HiSilicon UMMU Performance Monitor Unit (PMU) + +maintainers: + - Jingbin Wu + +description: | + The UMMU includes a PMU(Performance Monitor Unit ) to monitor and collect + statistics on key hardware events, such as TLB/PLB cache hit rates and + lookup latencies. + +properties: + $nodename: + pattern: "^ummu-pmu@[0-9a-f]*" + compatible: + const: ub,ummu_pmu + index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + PMU device index. Identifies a specific UMMU-PMU instance in multi-UMMU-PMU + systems. + msi-parent: + $ref: /schemas/types.yaml#/definitions/phandle + description: | + MSI parent device phandle. Required for MSI interrupt handling. + +required: + - compatible + - index + - msi-parent + +additionalProperties: false + +examples: + - | + ummu-pmu@0 { + compatible = "ub,ummu_pmu"; + index = <0x0>; + msi-parent = <&its>; + }; -- Gitee From 3860879fb2db6ea0597168fc7541348e9aade2b2 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 10:48:36 +0800 Subject: [PATCH 041/126] ub: udma: Support import and unimport jfr and jetty. commit 5ebd5f51fdc2e797a8fc1cb185bbce1aa062a8bf openEuler This patch adds the ability to import and unimport jfr and jetty. During the chain establishment process, urma software stack will invoke the import interface to establish the chain. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 72 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 17 +++++++ drivers/ub/urma/hw/udma/udma_jfr.c | 35 ++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 5 ++ drivers/ub/urma/hw/udma/udma_main.c | 4 ++ 5 files changed, 133 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 914ef33b81d9..385bc9b5605b 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -832,6 +832,44 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +static int udma_check_jetty_grp_info(struct ubcore_tjetty_cfg *cfg, struct udma_dev *dev) +{ + if (cfg->type == UBCORE_JETTY_GROUP) { + if (cfg->trans_mode != UBCORE_TP_RM) { + dev_err(dev->dev, "import jg only support RM, transmode is %u.\n", + cfg->trans_mode); + return -EINVAL; + } + + if (cfg->policy != UBCORE_JETTY_GRP_POLICY_HASH_HINT) { + dev_err(dev->dev, "import jg only support hint, policy is %u.\n", + cfg->policy); + return -EINVAL; + } + } + + return 0; +} + +int udma_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct udma_target_jetty *udma_tjetty = to_udma_tjetty(tjetty); + struct udma_dev *udma_dev = to_udma_dev(tjetty->ub_dev); + + if (!IS_ERR_OR_NULL(tjetty->vtpn)) { + dev_err(udma_dev->dev, + "the target jetty is still being used, id = %u.\n", + tjetty->cfg.id.id); + return -EINVAL; + } + + udma_tjetty->token_value = 0; + tjetty->cfg.token_value.token = 0; + kfree(udma_tjetty); + + return 0; +} + bool verify_modify_jetty(enum ubcore_jetty_state jetty_state, enum ubcore_jetty_state attr_state) { @@ -1095,3 +1133,37 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } + +struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(ub_dev); + struct udma_target_jetty *tjetty; + int ret = 0; + + if (cfg->type != UBCORE_JETTY_GROUP && cfg->type != UBCORE_JETTY) { + dev_err(udma_dev->dev, + "the jetty of the type %u cannot be imported in exp.\n", + cfg->type); + return NULL; + } + + ret = udma_check_jetty_grp_info(cfg, udma_dev); + if (ret) + return NULL; + + tjetty = kzalloc(sizeof(*tjetty), GFP_KERNEL); + if (!tjetty) + return NULL; + + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) { + tjetty->token_value = cfg->token_value.token; + tjetty->token_value_valid = true; + } + + udma_swap_endian(cfg->id.eid.raw, tjetty->le_eid.raw, UBCORE_EID_SIZE); + + return &tjetty->ubcore_tjetty; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5b428e999ff1..dba8fa2a05a5 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -45,6 +45,13 @@ struct udma_jetty { bool ue_rx_closed; }; +struct udma_target_jetty { + struct ubcore_tjetty ubcore_tjetty; + union ubcore_eid le_eid; + uint32_t token_value; + bool token_value_valid; +}; + enum jfsc_mode { JFS, JETTY, @@ -214,6 +221,11 @@ static inline struct udma_jetty_grp *to_udma_jetty_grp(struct ubcore_jetty_group return container_of(jetty_grp, struct udma_jetty_grp, ubcore_jetty_grp); } +static inline struct udma_target_jetty *to_udma_tjetty(struct ubcore_tjetty *tjetty) +{ + return container_of(tjetty, struct udma_target_jetty, ubcore_tjetty); +} + static inline struct udma_jetty *to_udma_jetty_from_queue(struct udma_jetty_queue *queue) { return container_of(queue, struct udma_jetty, sq); @@ -229,6 +241,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_unimport_jetty(struct ubcore_tjetty *tjetty); int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, @@ -244,5 +257,9 @@ void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout) int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); +struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 953fcffc5001..7462d75f1fba 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -790,3 +790,38 @@ int udma_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, return 0; } + +int udma_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct udma_target_jetty *udma_tjfr = to_udma_tjetty(tjfr); + + udma_tjfr->token_value = 0; + tjfr->cfg.token_value.token = 0; + + kfree(udma_tjfr); + + return 0; +} + +struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_target_jetty *udma_tjfr; + + udma_tjfr = kzalloc(sizeof(*udma_tjfr), GFP_KERNEL); + if (!udma_tjfr) + return NULL; + + if (!udata) { + if (cfg->flag.bs.token_policy != UBCORE_TOKEN_NONE) { + udma_tjfr->token_value = cfg->token_value.token; + udma_tjfr->token_value_valid = true; + } + } + + udma_swap_endian(cfg->id.eid.raw, udma_tjfr->le_eid.raw, UBCORE_EID_SIZE); + + return &udma_tjfr->ubcore_tjetty; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index ae6d0d97f460..9a90e60bd391 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -161,5 +161,10 @@ struct ubcore_jfr *udma_create_jfr(struct ubcore_device *dev, struct ubcore_jfr_ struct ubcore_udata *udata); int udma_destroy_jfr(struct ubcore_jfr *jfr); int udma_destroy_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, int *bad_jfr_index); +int udma_unimport_jfr(struct ubcore_tjetty *tjfr); +struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b1fad9e31f38..b116b514ee3b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -190,10 +190,14 @@ static struct ubcore_ops g_dev_ops = { .query_jfr = udma_query_jfr, .destroy_jfr = udma_destroy_jfr, .destroy_jfr_batch = udma_destroy_jfr_batch, + .import_jfr_ex = udma_import_jfr_ex, + .unimport_jfr = udma_unimport_jfr, .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .import_jetty_ex = udma_import_jetty_ex, + .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, }; -- Gitee From 26ed18f42be04d762773c3ce78c3fc011a789d91 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 11:17:15 +0800 Subject: [PATCH 042/126] ub: udma: Add and remove jetty to jetty group. commit e5a126ae4f2494f8f60f14db00500714490f715b openEuler This patch adds the ability to Add and remove jetty to jetty group. During the process of creating/destroying Jetty, Jetty can be added or removed from the Jetty group. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_dev.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 115 +++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index d9b10ab28028..58c5da4a2234 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -23,6 +23,8 @@ extern bool dump_aux_info; #define UDMA_CTX_NUM 2 +#define UDMA_BITS_PER_INT 32 + #define MAX_JETTY_IN_JETTY_GRP 32 #define UDMA_USER_DATA_H_OFFSET 32U diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 385bc9b5605b..4b4e924f9111 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -172,6 +172,112 @@ static void udma_init_jettyc(struct udma_dev *dev, struct ubcore_jetty_cfg *cfg, ctx->next_rcv_ssn = ctx->next_send_ssn; } +static int update_jetty_grp_ctx_valid(struct udma_dev *udma_dev, + struct udma_jetty_grp *jetty_grp) +{ + struct udma_jetty_grp_ctx ctx[UDMA_CTX_NUM]; + struct ubase_mbx_attr mbox_attr = {}; + int ret; + + ctx[0].valid = jetty_grp->valid; + /* jetty number indicates the location of the jetty with the largest ID. */ + ctx[0].jetty_number = fls(jetty_grp->valid) - 1; + memset(ctx + 1, 0xff, sizeof(ctx[1])); + ctx[1].valid = 0; + ctx[1].jetty_number = 0; + + mbox_attr.tag = jetty_grp->jetty_grp_id; + mbox_attr.op = UDMA_CMD_MODIFY_JETTY_GROUP_CONTEXT; + ret = post_mailbox_update_ctx(udma_dev, ctx, sizeof(ctx), &mbox_attr); + if (ret) + dev_err(udma_dev->dev, + "post mailbox update jetty grp ctx failed, ret = %d.\n", + ret); + + return ret; +} + +static uint32_t udma_get_jetty_grp_jetty_id(uint32_t *valid, uint32_t *next) +{ + uint32_t bit_idx; + + bit_idx = find_next_zero_bit((unsigned long *)valid, UDMA_BITS_PER_INT, *next); + if (bit_idx >= UDMA_BITS_PER_INT) + bit_idx = find_next_zero_bit((unsigned long *)valid, UDMA_BITS_PER_INT, 0); + + *next = (*next + 1) >= UDMA_BITS_PER_INT ? 0 : *next + 1; + + return bit_idx; +} + +static int add_jetty_to_grp(struct udma_dev *udma_dev, struct ubcore_jetty_group *jetty_grp, + struct udma_jetty_queue *sq, uint32_t cfg_id) +{ + struct udma_jetty_grp *udma_jetty_grp = to_udma_jetty_grp(jetty_grp); + uint32_t bit_idx = cfg_id - udma_jetty_grp->start_jetty_id; + int ret = 0; + + mutex_lock(&udma_jetty_grp->valid_lock); + + if (cfg_id == 0) + bit_idx = udma_get_jetty_grp_jetty_id(&udma_jetty_grp->valid, + &udma_jetty_grp->next_jetty_id); + + if (bit_idx >= UDMA_BITS_PER_INT || (udma_jetty_grp->valid & BIT(bit_idx))) { + dev_err(udma_dev->dev, + "jg(%u.%u) vallid %u is full or user id(%u) error", + udma_jetty_grp->jetty_grp_id, udma_jetty_grp->start_jetty_id, + udma_jetty_grp->valid, cfg_id); + ret = -ENOMEM; + goto out; + } + + udma_jetty_grp->valid |= BIT(bit_idx); + sq->id = udma_jetty_grp->start_jetty_id + bit_idx; + sq->jetty_grp = udma_jetty_grp; + + ret = update_jetty_grp_ctx_valid(udma_dev, udma_jetty_grp); + if (ret) { + dev_err(udma_dev->dev, + "update jetty grp ctx valid failed, jetty_grp id is %u.\n", + udma_jetty_grp->jetty_grp_id); + + udma_jetty_grp->valid &= ~BIT(bit_idx); + } +out: + mutex_unlock(&udma_jetty_grp->valid_lock); + + return ret; +} + +static void remove_jetty_from_grp(struct udma_dev *udma_dev, + struct udma_jetty *jetty) +{ + struct udma_jetty_grp *jetty_grp = jetty->sq.jetty_grp; + uint32_t bit_idx; + int ret; + + bit_idx = jetty->sq.id - jetty_grp->start_jetty_id; + if (bit_idx >= UDMA_BITS_PER_INT) { + dev_err(udma_dev->dev, + "jetty_id(%u) is not in jetty grp, start_jetty_id(%u).\n", + jetty->sq.id, jetty_grp->start_jetty_id); + return; + } + + mutex_lock(&jetty_grp->valid_lock); + jetty_grp->valid &= ~BIT(bit_idx); + jetty->sq.jetty_grp = NULL; + + ret = update_jetty_grp_ctx_valid(udma_dev, jetty_grp); + if (ret) + dev_err(udma_dev->dev, + "update jetty grp ctx valid failed, jetty_grp id is %u.\n", + jetty_grp->jetty_grp_id); + + mutex_unlock(&jetty_grp->valid_lock); +} + static int udma_specify_rsvd_jetty_id(struct udma_dev *udma_dev, uint32_t cfg_id) { struct udma_ida *ida_table = &udma_dev->rsvd_jetty_ida_table; @@ -391,6 +497,13 @@ int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, return ret; sq->id = cfg_id; + } else if (jetty_grp) { + ret = add_jetty_to_grp(udma_dev, jetty_grp, sq, cfg_id); + if (ret) { + dev_err(udma_dev->dev, + "add jetty to grp failed, ret = %d.\n", ret); + return ret; + } } else { ret = udma_alloc_jetty_id_own(udma_dev, &sq->id, sq->jetty_type); } @@ -403,6 +516,8 @@ static void free_jetty_id(struct udma_dev *udma_dev, { if (udma_jetty->sq.id < udma_dev->caps.jetty.start_idx) udma_id_free(&udma_dev->rsvd_jetty_ida_table, udma_jetty->sq.id); + else if (is_grp) + remove_jetty_from_grp(udma_dev, udma_jetty); else udma_adv_id_free(&udma_dev->jetty_table.bitmap_table, udma_jetty->sq.id, false); -- Gitee From 242ade6c412bf9f1df740fc3303b0a635e5bc90e Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 12:00:24 +0800 Subject: [PATCH 043/126] ub: udma: Support post jfs work request. commit a13bf671c3242a5a4d215e12b9a01525d3ef238a openEuler This patch adds the ability to post jfs work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfs.c | 584 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 90 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 675 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index e770bc5f6a2f..978465d77672 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -486,3 +486,587 @@ int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, return 0; } + +static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) +{ + uint32_t opcode = sqe_ctl->opcode; + uint32_t sqe_ctl_len = get_ctl_len(opcode); + + switch (opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + case UDMA_OPC_WRITE: + case UDMA_OPC_WRITE_WITH_IMM: + if (sqe_ctl->inline_en) + return (sqe_ctl_len + sqe_ctl->inline_msg_len - 1) / + UDMA_JFS_WQEBB_SIZE + 1; + break; + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + return ATOMIC_WQEBB_CNT; + case UDMA_OPC_NOP: + return NOP_WQEBB_CNT; + default: + break; + } + + return sq_cal_wqebb_num(sqe_ctl_len, sqe_ctl->sge_num); +} + +static uint8_t udma_get_jfs_opcode(enum ubcore_opcode opcode) +{ + switch (opcode) { + case UBCORE_OPC_SEND: + return UDMA_OPC_SEND; + case UBCORE_OPC_SEND_IMM: + return UDMA_OPC_SEND_WITH_IMM; + case UBCORE_OPC_SEND_INVALIDATE: + return UDMA_OPC_SEND_WITH_INVALID; + case UBCORE_OPC_WRITE: + return UDMA_OPC_WRITE; + case UBCORE_OPC_WRITE_IMM: + return UDMA_OPC_WRITE_WITH_IMM; + case UBCORE_OPC_READ: + return UDMA_OPC_READ; + case UBCORE_OPC_CAS: + return UDMA_OPC_CAS; + case UBCORE_OPC_FADD: + return UDMA_OPC_FAA; + case UBCORE_OPC_NOP: + return UDMA_OPC_NOP; + default: + return UDMA_OPC_INVALID; + } +} + +static int +udma_fill_sw_sge(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, uint32_t max_inline_size, + struct udma_normal_sge *sge) +{ + struct ubcore_sge *sge_info; + uint32_t total_len = 0; + uint32_t sge_num = 0; + uint32_t num_sge; + uint32_t i; + + switch (wr->opcode) { + case UBCORE_OPC_SEND: + case UBCORE_OPC_SEND_IMM: + case UBCORE_OPC_SEND_INVALIDATE: + sge_info = wr->send.src.sge; + num_sge = wr->send.src.num_sge; + break; + case UBCORE_OPC_WRITE: + case UBCORE_OPC_WRITE_IMM: + sge_info = wr->rw.src.sge; + num_sge = wr->rw.src.num_sge; + break; + default: + return -EINVAL; + } + + if (wr->flag.bs.inline_flag) { + for (i = 0; i < num_sge; i++) { + if (total_len + sge_info[i].len > max_inline_size) { + dev_info(dev->dev, "inline_size %u is over max_size %u.\n", + total_len + sge_info[i].len, max_inline_size); + return -EINVAL; + } + + memcpy((void *)(uintptr_t)sge + total_len, + (void *)(uintptr_t)sge_info[i].addr, + sge_info[i].len); + total_len += sge_info[i].len; + } + sqe_ctl->inline_msg_len = total_len; + } else { + for (i = 0; i < num_sge; i++) { + if (sge_info[i].len == 0) + continue; + sge->va = sge_info[i].addr; + sge->length = sge_info[i].len; + sge++; + sge_num++; + } + sqe_ctl->sge_num = sge_num; + } + + return 0; +} + +static int +udma_k_fill_send_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, struct ubcore_tjetty *tjetty, + uint32_t max_inline_size) +{ + struct udma_target_jetty *udma_tjetty; + struct udma_token_info *token_info; + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + if (udma_fill_sw_sge(dev, sqe_ctl, wr, max_inline_size, sge)) + return -EINVAL; + + udma_tjetty = to_udma_tjetty(tjetty); + sqe_ctl->target_hint = wr->send.target_hint; + sqe_ctl->rmt_obj_id = tjetty->cfg.id.id; + sqe_ctl->token_en = udma_tjetty->token_value_valid; + sqe_ctl->rmt_token_value = udma_tjetty->token_value; + + if (wr->opcode == UBCORE_OPC_SEND_IMM) { + memcpy((void *)sqe_ctl + SQE_SEND_IMM_FIELD, &wr->send.imm_data, + sizeof(uint64_t)); + } else if (wr->opcode == UBCORE_OPC_SEND_INVALIDATE) { + udma_seg = to_udma_seg(wr->send.tseg); + token_info = (struct udma_token_info *)&sqe_ctl->rmt_addr_l_or_token_id; + token_info->token_id = udma_seg->tid; + token_info->token_value = udma_seg->token_value; + } + + return 0; +} + +static int +udma_k_fill_write_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr, struct ubcore_tjetty *tjetty, + uint32_t max_inline_size) +{ + struct udma_token_info *token_info; + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + uint32_t ctrl_len; + + ctrl_len = get_ctl_len(sqe_ctl->opcode); + sge = (struct udma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + if (udma_fill_sw_sge(dev, sqe_ctl, wr, max_inline_size, sge)) + return -EINVAL; + + sge_info = wr->rw.dst.sge; + udma_seg = to_udma_seg(sge_info[0].tseg); + + sqe_ctl->target_hint = wr->rw.target_hint; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info[0].addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sqe_ctl->opcode == UDMA_OPC_WRITE_WITH_IMM) { + memcpy((void *)sqe_ctl + SQE_WRITE_IMM_FIELD, &wr->rw.notify_data, + sizeof(uint64_t)); + token_info = (struct udma_token_info *) + ((void *)sqe_ctl + WRITE_IMM_TOKEN_FIELD); + token_info->token_id = tjetty->cfg.id.id; + token_info->token_value = tjetty->cfg.token_value.token; + } + + return 0; +} + +static int udma_k_fill_read_sqe(struct udma_sqe_ctl *sqe_ctl, struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + uint32_t sge_num = 0; + uint32_t num; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + sge_info = wr->rw.dst.sge; + + for (num = 0; num < wr->rw.dst.num_sge; num++) { + if (sge_info[num].len == 0) + continue; + sge->va = sge_info[num].addr; + sge->length = sge_info[num].len; + sge++; + sge_num++; + } + + sge_info = wr->rw.src.sge; + udma_seg = to_udma_seg(sge_info[0].tseg); + + sqe_ctl->sge_num = sge_num; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info[0].addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = + (sge_info[0].addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + return 0; +} + +static bool +udma_k_check_atomic_len(struct udma_dev *dev, uint32_t len, uint8_t opcode) +{ + switch (len) { + case UDMA_ATOMIC_LEN_4: + case UDMA_ATOMIC_LEN_8: + return true; + case UDMA_ATOMIC_LEN_16: + if (opcode == UBCORE_OPC_CAS) + return true; + dev_err(dev->dev, "the atomic opcode must be CAS when len is 16.\n"); + return false; + default: + dev_err(dev->dev, "invalid atomic len %u.\n", len); + return false; + } +} + +static int +udma_k_fill_cas_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + + sge_info = wr->cas.src; + if (!udma_k_check_atomic_len(dev, sge_info->len, wr->opcode)) + return -EINVAL; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + sge->va = sge_info->addr; + sge->length = sge_info->len; + + sge_info = wr->cas.dst; + udma_seg = to_udma_seg(sge_info->tseg); + + sqe_ctl->sge_num = UDMA_ATOMIC_SGE_NUM; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = (sge_info->addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= UDMA_ATOMIC_LEN_8) { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + &wr->cas.swap_data, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + &wr->cas.cmp_data, sge->length); + } else { + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)(uintptr_t)wr->cas.swap_addr, sge->length); + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD + sge->length, + (void *)(uintptr_t)wr->cas.cmp_addr, sge->length); + } + + return 0; +} + +static int +udma_k_fill_faa_sqe(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + struct ubcore_jfs_wr *wr) +{ + struct udma_segment *udma_seg; + struct udma_normal_sge *sge; + struct ubcore_sge *sge_info; + + sge_info = wr->faa.src; + if (!udma_k_check_atomic_len(dev, sge_info->len, wr->opcode)) + return -EINVAL; + + sge = (struct udma_normal_sge *)(sqe_ctl + 1); + + sge->va = sge_info->addr; + sge->length = sge_info->len; + + sge_info = wr->faa.dst; + udma_seg = to_udma_seg(sge_info->tseg); + + sqe_ctl->sge_num = UDMA_ATOMIC_SGE_NUM; + sqe_ctl->rmt_obj_id = udma_seg->tid; + sqe_ctl->token_en = udma_seg->token_value_valid; + sqe_ctl->rmt_token_value = udma_seg->token_value; + sqe_ctl->rmt_addr_l_or_token_id = sge_info->addr & (uint32_t)SQE_CTL_RMA_ADDR_BIT; + sqe_ctl->rmt_addr_h_or_token_value = (sge_info->addr >> (uint32_t)SQE_CTL_RMA_ADDR_OFFSET) & + (uint32_t)SQE_CTL_RMA_ADDR_BIT; + + if (sge->length <= UDMA_ATOMIC_LEN_8) + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, &wr->faa.operand, + sge->length); + else + memcpy((void *)sqe_ctl + SQE_ATOMIC_DATA_FIELD, + (void *)(uintptr_t)wr->faa.operand_addr, sge->length); + + return 0; +} + +static int udma_fill_normal_sge(struct udma_dev *dev, struct udma_sqe_ctl *sqe_ctl, + uint32_t max_inline_size, struct ubcore_jfs_wr *wr, + struct ubcore_tjetty *tjetty) +{ + switch (sqe_ctl->opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + return udma_k_fill_send_sqe(dev, sqe_ctl, wr, tjetty, + max_inline_size); + case UDMA_OPC_WRITE: + return udma_k_fill_write_sqe(dev, sqe_ctl, wr, tjetty, max_inline_size); + case UDMA_OPC_WRITE_WITH_IMM: + return udma_k_fill_write_sqe(dev, sqe_ctl, wr, tjetty, + max_inline_size > SQE_WRITE_IMM_INLINE_SIZE ? + SQE_WRITE_IMM_INLINE_SIZE : max_inline_size); + case UDMA_OPC_READ: + return udma_k_fill_read_sqe(sqe_ctl, wr); + case UDMA_OPC_CAS: + return udma_k_fill_cas_sqe(dev, sqe_ctl, wr); + case UDMA_OPC_FAA: + return udma_k_fill_faa_sqe(dev, sqe_ctl, wr); + default: + return -EINVAL; + } +} + +static int udma_k_set_sqe(struct udma_sqe_ctl *sqe_ctl, struct ubcore_jfs_wr *wr, + struct udma_jetty_queue *sq, uint8_t opcode, + struct udma_dev *dev) +{ + struct udma_target_jetty *udma_tjetty; + struct ubcore_tjetty *tjetty; + int ret = 0; + + sqe_ctl->cqe = wr->flag.bs.complete_enable; + sqe_ctl->owner = (sq->pi & sq->buf.entry_cnt) == 0 ? 1 : 0; + sqe_ctl->opcode = opcode; + sqe_ctl->place_odr = wr->flag.bs.place_order; + + if (opcode == UDMA_OPC_NOP) + return 0; + + if (sq->trans_mode == UBCORE_TP_RC) + tjetty = sq->rc_tjetty; + else + tjetty = wr->tjetty; + + udma_tjetty = to_udma_tjetty(tjetty); + + sqe_ctl->tpn = tjetty->vtpn->vtpn; + sqe_ctl->fence = wr->flag.bs.fence; + sqe_ctl->comp_order = wr->flag.bs.comp_order; + sqe_ctl->se = wr->flag.bs.solicited_enable; + sqe_ctl->inline_en = wr->flag.bs.inline_flag; + sqe_ctl->rmt_jetty_type = tjetty->cfg.type; + memcpy(sqe_ctl->rmt_eid, &udma_tjetty->le_eid.raw, sizeof(uint8_t) * + UDMA_SQE_RMT_EID_SIZE); + + ret = udma_fill_normal_sge(dev, sqe_ctl, sq->max_inline_size, wr, tjetty); + if (ret) + dev_err(dev->dev, "Failed to fill normal sge, opcode :%u in wr.\n", + (uint8_t)wr->opcode); + + return ret; +} + +static bool udma_k_check_sge_num(uint8_t opcode, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr) +{ + switch (opcode) { + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + return sq->max_sge_num == 0; + case UDMA_OPC_READ: + return wr->rw.dst.num_sge > UDMA_JFS_MAX_SGE_READ || + wr->rw.dst.num_sge > sq->max_sge_num; + case UDMA_OPC_WRITE_WITH_IMM: + return wr->rw.src.num_sge > UDMA_JFS_MAX_SGE_WRITE_IMM || + wr->rw.src.num_sge > sq->max_sge_num; + case UDMA_OPC_SEND: + case UDMA_OPC_SEND_WITH_IMM: + case UDMA_OPC_SEND_WITH_INVALID: + return wr->send.src.num_sge > sq->max_sge_num; + default: + return wr->rw.src.num_sge > sq->max_sge_num; + } +} + +static void udma_copy_to_sq(struct udma_jetty_queue *sq, uint32_t wqebb_cnt, + struct udma_jfs_wqebb *tmp_sq) +{ + uint32_t remain = sq->buf.entry_cnt - (sq->pi & (sq->buf.entry_cnt - 1)); + uint32_t field_h; + uint32_t field_l; + + field_h = remain > wqebb_cnt ? wqebb_cnt : remain; + field_l = wqebb_cnt > field_h ? wqebb_cnt - field_h : 0; + + memcpy(sq->kva_curr, tmp_sq, field_h * sizeof(*tmp_sq)); + + if (field_l) + memcpy(sq->buf.kva, tmp_sq + field_h, field_l * sizeof(*tmp_sq)); +} + +static void *udma_k_inc_ptr_wrap(uint32_t sq_buf_size, uint32_t wqebb_size, + uint8_t *sq_base_addr, uint8_t *sq_buf_curr) +{ + uint8_t *sq_buf_end; + + sq_buf_end = (uint8_t *)(sq_buf_size + sq_base_addr); + + sq_buf_curr = ((sq_buf_curr + wqebb_size) < sq_buf_end) ? + (sq_buf_curr + wqebb_size) : sq_base_addr + (sq_buf_curr + + wqebb_size - sq_buf_end); + + return sq_buf_curr; +} + +static int udma_post_one_wr(struct udma_jetty_queue *sq, struct ubcore_jfs_wr *wr, + struct udma_dev *udma_dev, struct udma_sqe_ctl **wqe_addr, + bool *dwqe_enable) +{ + struct udma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = {}; + uint32_t wqebb_cnt; + uint8_t opcode; + uint32_t i; + int ret; + + opcode = udma_get_jfs_opcode(wr->opcode); + if (unlikely(opcode == UDMA_OPC_INVALID)) { + dev_err(udma_dev->dev, "Invalid opcode :%u.\n", wr->opcode); + return -EINVAL; + } + + if (unlikely(udma_k_check_sge_num(opcode, sq, wr))) { + dev_err(udma_dev->dev, "WR sge num invalid.\n"); + return -EINVAL; + } + + ret = udma_k_set_sqe((struct udma_sqe_ctl *)(void *)tmp_sq, wr, sq, + opcode, udma_dev); + if (ret) + return ret; + + wqebb_cnt = get_wqebb_num((struct udma_sqe_ctl *)(void *)tmp_sq); + if (wqebb_cnt == 1 && !!(udma_dev->caps.feature & UDMA_CAP_FEATURE_DIRECT_WQE)) + *dwqe_enable = true; + + if (to_check_sq_overflow(sq, wqebb_cnt)) { + dev_err(udma_dev->dev, "JFS overflow, wqebb_cnt:%u.\n", wqebb_cnt); + return -ENOMEM; + } + + udma_copy_to_sq(sq, wqebb_cnt, tmp_sq); + + *wqe_addr = (struct udma_sqe_ctl *)sq->kva_curr; + + sq->kva_curr = udma_k_inc_ptr_wrap(sq->buf.entry_cnt * sq->buf.entry_size, + wqebb_cnt * sq->buf.entry_size, + (uint8_t *)sq->buf.kva, + (uint8_t *)sq->kva_curr); + + for (i = 0; i < wqebb_cnt; i++) + sq->wrid[(sq->pi + i) & (sq->buf.entry_cnt - 1)] = wr->user_ctx; + + sq->pi += wqebb_cnt; + + return 0; +} + +static inline void udma_k_update_sq_db(struct udma_jetty_queue *sq) +{ + uint32_t *db_addr = sq->db_addr; + *db_addr = sq->pi; +} + +#ifdef ST64B +static void st64b(uint64_t *src, uint64_t *dst) +{ + asm volatile ( + "mov x9, %0\n" + "mov x10, %1\n" + "ldr x0, [x9]\n" + "ldr x1, [x9, #8]\n" + "ldr x2, [x9, #16]\n" + "ldr x3, [x9, #24]\n" + "ldr x4, [x9, #32]\n" + "ldr x5, [x9, #40]\n" + "ldr x6, [x9, #48]\n" + "ldr x7, [x9, #56]\n" + ".inst 0xf83f9140\n" + ::"r" (src), "r"(dst):"cc", "memory" + ); +} +#endif + +static void udma_write_dsqe(struct udma_jetty_queue *sq, + struct udma_sqe_ctl *ctrl) +{ +#define DWQE_SIZE 8 + int i; + + ctrl->sqe_bb_idx = sq->pi; + +#ifdef ST64B + st64b(((uint64_t *)ctrl), (uint64_t *)sq->dwqe_addr); +#else + for (i = 0; i < DWQE_SIZE; i++) + writeq_relaxed(*((uint64_t *)ctrl + i), + (uint64_t *)sq->dwqe_addr + i); +#endif +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) +{ + struct udma_sqe_ctl *wqe_addr; + bool dwqe_enable = false; + struct ubcore_jfs_wr *it; + int wr_cnt = 0; + int ret = 0; + + if (!sq->lock_free) + spin_lock(&sq->lock); + + for (it = wr; it != NULL; it = (struct ubcore_jfs_wr *)(void *)it->next) { + ret = udma_post_one_wr(sq, it, udma_dev, &wqe_addr, &dwqe_enable); + if (ret) { + *bad_wr = it; + goto err_post_wr; + } + wr_cnt++; + } + +err_post_wr: + if (likely(wr_cnt && udma_dev->status != UDMA_SUSPEND)) { + wmb(); /* set sqe before doorbell */ + if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) + udma_write_dsqe(sq, wqe_addr); + else + udma_k_update_sq_db(sq); + } + + if (!sq->lock_free) + spin_unlock(&sq->lock); + + return ret; +} + +int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + int ret; + + ret = udma_post_sq_wr(udma_dev, &udma_jfs->sq, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, "Failed to post jfs wr, sq_id = %u.\n", + udma_jfs->sq.id); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 6cdc281e53c3..65d8e2ac52f2 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -7,10 +7,28 @@ #include "udma_common.h" #define MAX_WQEBB_NUM 4 +#define UDMA_SQE_RMT_EID_SIZE 16 +#define SQE_WRITE_IMM_CTL_LEN 64 +#define SQE_NORMAL_CTL_LEN 48 +#define ATOMIC_WQEBB_CNT 2 +#define NOP_WQEBB_CNT 1 #define UDMA_JFS_WQEBB_SIZE 64 #define UDMA_JFS_SGE_SIZE 16 +#define UDMA_JFS_MAX_SGE_READ 6 +#define UDMA_JFS_MAX_SGE_WRITE_IMM 12 +#define UDMA_ATOMIC_SGE_NUM 1 +#define UDMA_ATOMIC_LEN_4 4 +#define UDMA_ATOMIC_LEN_8 8 +#define UDMA_ATOMIC_LEN_16 16 +#define SQE_CTL_RMA_ADDR_OFFSET 32 +#define SQE_CTL_RMA_ADDR_BIT GENMASK(31, 0) +#define SQE_ATOMIC_DATA_FIELD 64 +#define SQE_SEND_IMM_FIELD 40 +#define WRITE_IMM_TOKEN_FIELD 56 +#define SQE_WRITE_IMM_FIELD 48 #define SQE_WRITE_NOTIFY_CTL_LEN 80 +#define SQE_WRITE_IMM_INLINE_SIZE 192 enum udma_jfs_type { UDMA_NORMAL_JFS_TYPE, @@ -28,6 +46,63 @@ struct udma_jfs { bool ue_rx_closed; }; +/* thanks to include/rdma/ib_verbs.h */ +enum udma_sq_opcode { + UDMA_OPC_SEND, + UDMA_OPC_SEND_WITH_IMM, + UDMA_OPC_SEND_WITH_INVALID, + UDMA_OPC_WRITE, + UDMA_OPC_WRITE_WITH_IMM, + UDMA_OPC_READ = 0x6, + UDMA_OPC_CAS, + UDMA_OPC_FAA = 0xb, + UDMA_OPC_NOP = 0x11, + UDMA_OPC_INVALID = 0x12, +}; + +struct udma_jfs_wqebb { + uint32_t value[16]; +}; + +struct udma_sqe_ctl { + uint32_t sqe_bb_idx : 16; + uint32_t place_odr : 2; + uint32_t comp_order : 1; + uint32_t fence : 1; + uint32_t se : 1; + uint32_t cqe : 1; + uint32_t inline_en : 1; + uint32_t rsv : 5; + uint32_t token_en : 1; + uint32_t rmt_jetty_type : 2; + uint32_t owner : 1; + uint32_t target_hint : 8; + uint32_t opcode : 8; + uint32_t rsv1 : 6; + uint32_t inline_msg_len : 10; + uint32_t tpn : 24; + uint32_t sge_num : 8; + uint32_t rmt_obj_id : 20; + uint32_t rsv2 : 12; + uint8_t rmt_eid[UDMA_SQE_RMT_EID_SIZE]; + uint32_t rmt_token_value; + uint32_t rsv3; + uint32_t rmt_addr_l_or_token_id; + uint32_t rmt_addr_h_or_token_value; +}; + +struct udma_normal_sge { + uint32_t length; + uint32_t token_id; + uint64_t va; +}; + +struct udma_token_info { + uint32_t token_id : 20; + uint32_t rsv : 12; + uint32_t token_value; +}; + static inline struct udma_jfs *to_udma_jfs(struct ubcore_jfs *jfs) { return container_of(jfs, struct udma_jfs, ubcore_jfs); @@ -38,12 +113,23 @@ static inline struct udma_jfs *to_udma_jfs_from_queue(struct udma_jetty_queue *q return container_of(queue, struct udma_jfs, sq); } +static inline bool to_check_sq_overflow(struct udma_jetty_queue *sq, + uint32_t wqebb_cnt) +{ + return sq->pi - sq->ci + wqebb_cnt > sq->buf.entry_cnt; +} + static inline uint32_t sq_cal_wqebb_num(uint32_t sqe_ctl_len, uint32_t sge_num) { return (sqe_ctl_len + (sge_num - 1) * UDMA_JFS_SGE_SIZE) / UDMA_JFS_WQEBB_SIZE + 1; } +static inline uint32_t get_ctl_len(uint8_t opcode) +{ + return opcode == UDMA_OPC_WRITE_WITH_IMM ? SQE_WRITE_IMM_CTL_LEN : SQE_NORMAL_CTL_LEN; +} + struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); @@ -55,5 +141,9 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); +int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, + struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); +int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b116b514ee3b..bfbaf9d07908 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -200,6 +200,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, + .post_jfs_wr = udma_post_jfs_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 22855e37bde25454c0a6e83c77c2c68dc6e7e045 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 14:20:51 +0800 Subject: [PATCH 044/126] ub: udma: Support post jfr work request. commit 45af039f81ccb00d0189bf03f875c2870deccb55 openEuler This patch adds the ability to post jfr work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 7 ++ drivers/ub/urma/hw/udma/udma_jfr.c | 100 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfr.h | 19 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 127 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index b1b129ee4449..f8bab657aa6a 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -91,6 +91,13 @@ static inline void udma_alloc_kernel_db(struct udma_dev *dev, queue->db_addr = queue->dwqe_addr + UDMA_DOORBELL_OFFSET; } +static inline void *get_buf_entry(struct udma_buf *buf, uint32_t n) +{ + uint32_t entry_index = n & (buf->entry_cnt - 1); + + return (char *)buf->kva + (entry_index * buf->entry_size); +} + static inline uint8_t to_ta_timeout(uint32_t err_timeout) { #define TA_TIMEOUT_DIVISOR 8 diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 7462d75f1fba..5e01e6a8f141 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -803,6 +803,106 @@ int udma_unimport_jfr(struct ubcore_tjetty *tjfr) return 0; } +static void fill_wqe_idx(struct udma_jfr *jfr, uint32_t wqe_idx) +{ + uint32_t *idx_buf; + + idx_buf = (uint32_t *)get_buf_entry(&jfr->idx_que.buf, jfr->rq.pi); + *idx_buf = cpu_to_le32(wqe_idx); + + jfr->rq.pi++; +} + +static void fill_recv_sge_to_wqe(struct ubcore_jfr_wr *wr, void *wqe, + uint32_t max_sge) +{ + struct udma_wqe_sge *sge = (struct udma_wqe_sge *)wqe; + uint32_t i, cnt; + + for (i = 0, cnt = 0; i < wr->src.num_sge; i++) { + if (!wr->src.sge[i].len) + continue; + set_data_of_sge(sge + cnt, wr->src.sge + i); + ++cnt; + } + + if (cnt < max_sge) + memset(sge + cnt, 0, (max_sge - cnt) * UDMA_SGE_SIZE); +} + +static int post_recv_one(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_jfr_wr *wr) +{ + uint32_t wqe_idx; + int ret = 0; + void *wqe; + + if (unlikely(wr->src.num_sge > jfr->max_sge)) { + dev_err(dev->dev, + "failed to check sge, wr_num_sge = %u, max_sge = %u, jfrn = %u.\n", + wr->src.num_sge, jfr->max_sge, jfr->rq.id); + return -EINVAL; + } + + if (udma_jfrwq_overflow(jfr)) { + dev_err(dev->dev, "failed to check jfrwq, jfrwq is full, jfrn = %u.\n", + jfr->rq.id); + return -ENOMEM; + } + + ret = udma_id_alloc(dev, &jfr->idx_que.jfr_idx_table.ida_table, + &wqe_idx); + if (ret) { + dev_err(dev->dev, "failed to get jfr wqe idx.\n"); + return ret; + } + wqe = get_buf_entry(&jfr->rq.buf, wqe_idx); + + fill_recv_sge_to_wqe(wr, wqe, jfr->max_sge); + + fill_wqe_idx(jfr, wqe_idx); + + jfr->rq.wrid[wqe_idx] = wr->user_ctx; + + return ret; +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct udma_dev *dev = to_udma_dev(ubcore_jfr->ub_dev); + struct udma_jfr *jfr = to_udma_jfr(ubcore_jfr); + uint32_t nreq; + int ret = 0; + + if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) + spin_lock(&jfr->lock); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ret = post_recv_one(dev, jfr, wr); + if (ret) { + *bad_wr = wr; + break; + } + } + + if (likely(nreq)) { + /* + * Ensure that the pipeline fills all RQEs into the RQ queue, + * then updating the PI pointer. + */ + wmb(); + *jfr->sw_db.db_record = jfr->rq.pi & + (uint32_t)UDMA_JFR_DB_PI_M; + } + + if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) + spin_unlock(&jfr->lock); + + return ret; +} + struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfr.h b/drivers/ub/urma/hw/udma/udma_jfr.h index 9a90e60bd391..c446eaedee1d 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.h +++ b/drivers/ub/urma/hw/udma/udma_jfr.h @@ -95,6 +95,12 @@ struct udma_jfr { struct completion ae_comp; }; +struct udma_wqe_sge { + uint32_t length; + uint32_t token_id; + uint64_t va; +}; + struct udma_jfr_ctx { /* DW0 */ uint32_t state : 2; @@ -150,6 +156,17 @@ static inline struct udma_jfr *to_udma_jfr(struct ubcore_jfr *jfr) return container_of(jfr, struct udma_jfr, ubcore_jfr); } +static inline bool udma_jfrwq_overflow(struct udma_jfr *jfr) +{ + return (jfr->rq.pi - jfr->rq.ci) >= jfr->wqe_cnt; +} + +static inline void set_data_of_sge(struct udma_wqe_sge *sge, struct ubcore_sge *sg) +{ + sge->va = cpu_to_le64(sg->addr); + sge->length = cpu_to_le32(sg->len); +} + static inline struct udma_jfr *to_udma_jfr_from_queue(struct udma_jetty_queue *queue) { return container_of(queue, struct udma_jfr, rq); @@ -166,5 +183,7 @@ struct ubcore_tjetty *udma_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); #endif /* __UDMA_JFR_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index bfbaf9d07908..5f8de12c21ea 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -201,6 +201,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .post_jfs_wr = udma_post_jfs_wr, + .post_jfr_wr = udma_post_jfr_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 245a4c68448ea9b784a4aa134f3ae5c78629bcf5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 15:15:38 +0800 Subject: [PATCH 045/126] ub: udma: Support post jetty work request. commit 6465886ae8e08630d4f8fd5b440347f8c3bdafc0 openEuler This patch adds the ability to post jetty work request. After user post a request, driver will assemble the wqe and update doorbell. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 34 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 5 +++- drivers/ub/urma/hw/udma/udma_main.c | 2 ++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 4b4e924f9111..61fc94c0898c 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1249,6 +1249,40 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } +int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + int ret; + + ret = udma_post_sq_wr(udma_dev, &udma_jetty->sq, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, + "jetty post sq wr failed, ret = %d, jetty id = %u.\n", + ret, udma_jetty->sq.id); + + return ret; +} + +int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct ubcore_jfr *jfr; + int ret; + + jfr = &udma_jetty->jfr->ubcore_jfr; + ret = udma_post_jfr_wr(jfr, wr, bad_wr); + if (ret) + dev_err(udma_dev->dev, + "jetty post jfr wr failed, ret = %d, jetty id = %u.\n", + ret, udma_jetty->sq.id); + + return ret; +} + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index dba8fa2a05a5..4b9749afb64e 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -250,7 +250,10 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, enum jetty_state state); - +int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5f8de12c21ea..ba5d2b7996f8 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -202,6 +202,8 @@ static struct ubcore_ops g_dev_ops = { .delete_jetty_grp = udma_delete_jetty_grp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, + .post_jetty_send_wr = udma_post_jetty_send_wr, + .post_jetty_recv_wr = udma_post_jetty_recv_wr, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From fb0f0f67f35800eed2fc53364232bf2ddd2d75dd Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 15:55:24 +0800 Subject: [PATCH 046/126] ub: udma: Support poll jfc. commit d72435589dce4f196c7993c40a20b5ff7bf5cb45 openEuler This patch adds the ability to poll jfc. When the hardware completes the sending task, it generates a completion event (CQE), which needs to be polled by the driver. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 163 ++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.c | 385 ++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 44 ++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 593 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 8b709dc10a20..b06ff3ea61cf 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,169 @@ #include #include "udma_def.h" +static int send_cmd_query_cqe_aux_info(struct udma_dev *udma_dev, + struct udma_cmd_query_cqe_aux_info *info) +{ + struct ubase_cmd_buf cmd_in, cmd_out; + int ret; + + udma_fill_buf(&cmd_in, UDMA_CMD_GET_CQE_AUX_INFO, true, + sizeof(struct udma_cmd_query_cqe_aux_info), info); + udma_fill_buf(&cmd_out, UDMA_CMD_GET_CQE_AUX_INFO, true, + sizeof(struct udma_cmd_query_cqe_aux_info), info); + + ret = ubase_cmd_send_inout(udma_dev->comdev.adev, &cmd_in, &cmd_out); + if (ret) + dev_err(udma_dev->dev, + "failed to query cqe aux info, ret = %d.\n", ret); + + return ret; +} + +static void free_kernel_cqe_aux_info(struct udma_cqe_aux_info_out *user_aux_info_out, + struct udma_cqe_aux_info_out *aux_info_out) +{ + if (!user_aux_info_out->aux_info_type) + return; + + kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; + + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_value = NULL; +} + +static int copy_out_cqe_data_from_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_cqe_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_cqe_aux_info_out *user_aux_info_out) +{ + if (out->addr != 0 && out->len == sizeof(struct udma_cqe_aux_info_out)) { + memcpy(aux_info_out, (void *)(uintptr_t)out->addr, + sizeof(struct udma_cqe_aux_info_out)); + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + if (aux_info_out->aux_info_num > MAX_CQE_AUX_INFO_TYPE_NUM) { + dev_err(udma_dev->dev, + "invalid cqe aux info num %u.\n", + aux_info_out->aux_info_num); + return -EINVAL; + } + + user_aux_info_out->aux_info_type = aux_info_out->aux_info_type; + user_aux_info_out->aux_info_value = aux_info_out->aux_info_value; + aux_info_out->aux_info_type = + kcalloc(aux_info_out->aux_info_num, + sizeof(enum udma_cqe_aux_info_type), GFP_KERNEL); + if (!aux_info_out->aux_info_type) + return -ENOMEM; + + aux_info_out->aux_info_value = + kcalloc(aux_info_out->aux_info_num, + sizeof(uint32_t), GFP_KERNEL); + if (!aux_info_out->aux_info_value) { + kfree(aux_info_out->aux_info_type); + return -ENOMEM; + } + } + } + + return 0; +} + +static int copy_out_cqe_data_to_user(struct udma_dev *udma_dev, + struct ubcore_user_ctl_out *out, + struct udma_cqe_aux_info_out *aux_info_out, + struct ubcore_ucontext *uctx, + struct udma_cqe_aux_info_out *user_aux_info_out) +{ + unsigned long byte; + + if (out->addr != 0 && out->len == sizeof(struct udma_cqe_aux_info_out)) { + if (uctx && aux_info_out->aux_info_num > 0 && + aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL) { + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_type, + (void *)aux_info_out->aux_info_type, + aux_info_out->aux_info_num * + sizeof(enum udma_cqe_aux_info_type)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info type failed, byte = %lu.\n", byte); + return -EFAULT; + } + + byte = copy_to_user((void __user *)user_aux_info_out->aux_info_value, + (void *)aux_info_out->aux_info_value, + aux_info_out->aux_info_num * + sizeof(uint32_t)); + if (byte) { + dev_err(udma_dev->dev, + "copy resp to aux info value failed, byte = %lu.\n", byte); + return -EFAULT; + } + + kfree(aux_info_out->aux_info_type); + kfree(aux_info_out->aux_info_value); + aux_info_out->aux_info_type = user_aux_info_out->aux_info_type; + aux_info_out->aux_info_value = user_aux_info_out->aux_info_value; + } + memcpy((void *)(uintptr_t)out->addr, aux_info_out, + sizeof(struct udma_cqe_aux_info_out)); + } + + return 0; +} + +int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_cqe_aux_info_out user_aux_info_out = {}; + struct udma_cqe_aux_info_out aux_info_out = {}; + struct udma_cmd_query_cqe_aux_info info = {}; + struct udma_cqe_info_in cqe_info_in = {}; + struct udma_dev *udev = to_udma_dev(dev); + int ret; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_cqe_info_in))) { + dev_err(udev->dev, "parameter invalid in query cqe aux info, in_len = %u.\n", + in->len); + return -EINVAL; + } + memcpy(&cqe_info_in, (void *)(uintptr_t)in->addr, + sizeof(struct udma_cqe_info_in)); + + ret = copy_out_cqe_data_from_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udev->dev, + "copy out data from user failed, ret = %d.\n", ret); + return ret; + } + + info.status = cqe_info_in.status; + info.is_client = !(cqe_info_in.s_r & 1); + + ret = send_cmd_query_cqe_aux_info(udev, &info); + if (ret) { + dev_err(udev->dev, + "send cmd query aux info failed, ret = %d.\n", + ret); + free_kernel_cqe_aux_info(&user_aux_info_out, &aux_info_out); + return ret; + } + + ret = copy_out_cqe_data_to_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); + if (ret) { + dev_err(udev->dev, + "copy out data to user failed, ret = %d.\n", ret); + free_kernel_cqe_aux_info(&user_aux_info_out, &aux_info_out); + } + + return ret; +} + static int to_hw_ae_event_type(struct udma_dev *udma_dev, uint32_t event_type, struct udma_cmd_query_ae_aux_info *info) { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 5067b3c52104..d6a3b53cfe79 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -645,3 +645,388 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, return ret; } + +static enum jfc_poll_state udma_get_cr_status(struct udma_dev *dev, + uint8_t src_status, + uint8_t substatus, + enum ubcore_cr_status *dst_status) +{ +#define UDMA_SRC_STATUS_NUM 7 +#define UDMA_SUB_STATUS_NUM 5 + +struct udma_cr_status { + bool is_valid; + enum ubcore_cr_status cr_status; +}; + + static struct udma_cr_status map[UDMA_SRC_STATUS_NUM][UDMA_SUB_STATUS_NUM] = { + {{true, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_UNSUPPORTED_OPCODE_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{false, UBCORE_CR_SUCCESS}, {true, UBCORE_CR_LOC_LEN_ERR}, + {true, UBCORE_CR_LOC_ACCESS_ERR}, {true, UBCORE_CR_REM_RESP_LEN_ERR}, + {true, UBCORE_CR_LOC_DATA_POISON}}, + {{false, UBCORE_CR_SUCCESS}, {true, UBCORE_CR_REM_UNSUPPORTED_REQ_ERR}, + {true, UBCORE_CR_REM_ACCESS_ABORT_ERR}, {false, UBCORE_CR_SUCCESS}, + {true, UBCORE_CR_REM_DATA_POISON}}, + {{true, UBCORE_CR_RNR_RETRY_CNT_EXC_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_ACK_TIMEOUT_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}}, + {{true, UBCORE_CR_FLUSH_ERR}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}, {false, UBCORE_CR_SUCCESS}, + {false, UBCORE_CR_SUCCESS}} + }; + + if ((src_status < UDMA_SRC_STATUS_NUM) && (substatus < UDMA_SUB_STATUS_NUM) && + map[src_status][substatus].is_valid) { + *dst_status = map[src_status][substatus].cr_status; + return JFC_OK; + } + + dev_err(dev->dev, "cqe status is error, status = %u, substatus = %u.\n", + src_status, substatus); + + return JFC_POLL_ERR; +} + +static void udma_handle_inline_cqe(struct udma_jfc_cqe *cqe, uint8_t opcode, + struct udma_jetty_queue *queue, + struct ubcore_cr *cr) +{ + struct udma_jfr *jfr = to_udma_jfr_from_queue(queue); + uint32_t rqe_idx, data_len, sge_idx, size; + struct udma_wqe_sge *sge_list; + void *cqe_inline_buf; + + rqe_idx = cqe->entry_idx; + sge_list = (struct udma_wqe_sge *)(jfr->rq.buf.kva + + rqe_idx * jfr->rq.buf.entry_size); + data_len = cqe->byte_cnt; + cqe_inline_buf = opcode == HW_CQE_OPC_SEND ? + (void *)&cqe->data_l : (void *)&cqe->inline_data; + + for (sge_idx = 0; (sge_idx < jfr->max_sge) && data_len; sge_idx++) { + size = sge_list[sge_idx].length < data_len ? + sge_list[sge_idx].length : data_len; + memcpy((void *)(uintptr_t)sge_list[sge_idx].va, + cqe_inline_buf, size); + data_len -= size; + cqe_inline_buf += size; + } + cr->completion_len = cqe->byte_cnt - data_len; + + if (data_len) { + cqe->status = UDMA_CQE_LOCAL_OP_ERR; + cqe->substatus = UDMA_CQE_LOCAL_LENGTH_ERR; + } +} + +static void udma_parse_opcode_for_res(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + uint8_t opcode = cqe->opcode; + struct udma_inv_tid *inv_tid; + + switch (opcode) { + case HW_CQE_OPC_SEND: + cr->opcode = UBCORE_CR_OPC_SEND; + break; + case HW_CQE_OPC_SEND_WITH_IMM: + cr->imm_data = (uint64_t)cqe->data_h << UDMA_IMM_DATA_SHIFT | + cqe->data_l; + cr->opcode = UBCORE_CR_OPC_SEND_WITH_IMM; + break; + case HW_CQE_OPC_SEND_WITH_INV: + cr->invalid_token.token_id = cqe->data_l & (uint32_t)UDMA_CQE_INV_TOKEN_ID; + cr->invalid_token.token_id <<= UDMA_TID_SHIFT; + cr->invalid_token.token_value.token = cqe->data_h; + cr->opcode = UBCORE_CR_OPC_SEND_WITH_INV; + + inv_tid = kzalloc(sizeof(*inv_tid), GFP_ATOMIC); + if (!inv_tid) + return; + + inv_tid->tid = cr->invalid_token.token_id >> UDMA_TID_SHIFT; + list_add(&inv_tid->list, tid_list); + + break; + case HW_CQE_OPC_WRITE_WITH_IMM: + cr->imm_data = (uint64_t)cqe->data_h << UDMA_IMM_DATA_SHIFT | + cqe->data_l; + cr->opcode = UBCORE_CR_OPC_WRITE_WITH_IMM; + break; + default: + cr->opcode = (enum ubcore_cr_opcode)HW_CQE_OPC_ERR; + dev_err(dev->dev, "receive invalid opcode :%u.\n", opcode); + cr->status = UBCORE_CR_UNSUPPORTED_OPCODE_ERR; + break; + } +} + +static struct udma_jfr *udma_get_jfr(struct udma_dev *udma_dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr) +{ + struct udma_jetty_queue *udma_sq; + struct udma_jetty *jetty = NULL; + struct udma_jfr *jfr = NULL; + uint32_t local_id; + + local_id = cr->local_id; + if (cqe->is_jetty) { + udma_sq = (struct udma_jetty_queue *)xa_load(&udma_dev->jetty_table.xa, local_id); + if (!udma_sq) { + dev_warn(udma_dev->dev, + "get jetty failed, jetty_id = %u.\n", local_id); + return NULL; + } + jetty = to_udma_jetty_from_queue(udma_sq); + jfr = jetty->jfr; + cr->user_data = (uintptr_t)&jetty->ubcore_jetty; + } else { + jfr = (struct udma_jfr *)xa_load(&udma_dev->jfr_table.xa, local_id); + if (!jfr) { + dev_warn(udma_dev->dev, + "get jfr failed jfr id = %u.\n", local_id); + return NULL; + } + cr->user_data = (uintptr_t)&jfr->ubcore_jfr; + } + + return jfr; +} + +static bool udma_update_jfr_idx(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + bool is_clean) +{ + struct udma_jetty_queue *queue; + uint8_t opcode = cqe->opcode; + struct udma_jfr *jfr; + uint32_t entry_idx; + + jfr = udma_get_jfr(dev, cqe, cr); + if (!jfr) + return true; + + queue = &jfr->rq; + entry_idx = cqe->entry_idx; + cr->user_ctx = queue->wrid[entry_idx & (queue->buf.entry_cnt - (uint32_t)1)]; + + if (!is_clean && cqe->inline_en) + udma_handle_inline_cqe(cqe, opcode, queue, cr); + + if (!jfr->ubcore_jfr.jfr_cfg.flag.bs.lock_free) + spin_lock(&jfr->lock); + + udma_id_free(&jfr->idx_que.jfr_idx_table.ida_table, entry_idx); + queue->ci++; + + if (!jfr->ubcore_jfr.jfr_cfg.flag.bs.lock_free) + spin_unlock(&jfr->lock); + + return false; +} + +static enum jfc_poll_state udma_parse_cqe_for_send(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr) +{ + struct udma_jetty_queue *queue; + struct udma_jetty *jetty; + struct udma_jfs *jfs; + + queue = (struct udma_jetty_queue *)(uintptr_t)( + (uint64_t)cqe->user_data_h << UDMA_ADDR_SHIFT | + cqe->user_data_l); + if (!queue) { + dev_err(dev->dev, "jetty queue addr is null, jetty_id = %u.\n", cr->local_id); + return JFC_POLL_ERR; + } + + if (unlikely(udma_get_cr_status(dev, cqe->status, cqe->substatus, &cr->status))) + return JFC_POLL_ERR; + + if (!!cqe->fd) { + cr->status = UBCORE_CR_WR_FLUSH_ERR_DONE; + queue->flush_flag = true; + } else { + queue->ci += (cqe->entry_idx - queue->ci) & (queue->buf.entry_cnt - 1); + cr->user_ctx = queue->wrid[queue->ci & (queue->buf.entry_cnt - 1)]; + queue->ci++; + } + + if (!!cr->flag.bs.jetty) { + jetty = to_udma_jetty_from_queue(queue); + cr->user_data = (uintptr_t)&jetty->ubcore_jetty; + } else { + jfs = container_of(queue, struct udma_jfs, sq); + cr->user_data = (uintptr_t)&jfs->ubcore_jfs; + } + + return JFC_OK; +} + +static enum jfc_poll_state udma_parse_cqe_for_recv(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + uint8_t substatus; + uint8_t status; + + if (unlikely(udma_update_jfr_idx(dev, cqe, cr, false))) + return JFC_POLL_ERR; + + udma_parse_opcode_for_res(dev, cqe, cr, tid_list); + status = cqe->status; + substatus = cqe->substatus; + if (unlikely(udma_get_cr_status(dev, status, substatus, &cr->status))) + return JFC_POLL_ERR; + + return JFC_OK; +} + +static enum jfc_poll_state parse_cqe_for_jfc(struct udma_dev *dev, + struct udma_jfc_cqe *cqe, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + enum jfc_poll_state ret; + + cr->flag.bs.s_r = cqe->s_r; + cr->flag.bs.jetty = cqe->is_jetty; + cr->completion_len = cqe->byte_cnt; + cr->tpn = cqe->tpn; + cr->local_id = cqe->local_num_h << UDMA_SRC_IDX_SHIFT | cqe->local_num_l; + cr->remote_id.id = cqe->rmt_idx; + udma_swap_endian((uint8_t *)(cqe->rmt_eid), cr->remote_id.eid.raw, UBCORE_EID_SIZE); + + if (cqe->s_r == CQE_FOR_RECEIVE) + ret = udma_parse_cqe_for_recv(dev, cqe, cr, tid_list); + else + ret = udma_parse_cqe_for_send(dev, cqe, cr); + + return ret; +} + +static struct udma_jfc_cqe *get_next_cqe(struct udma_jfc *jfc, uint32_t n) +{ + struct udma_jfc_cqe *cqe; + uint32_t valid_owner; + + cqe = (struct udma_jfc_cqe *)get_buf_entry(&jfc->buf, n); + + valid_owner = (n >> jfc->cq_shift) & UDMA_JFC_DB_VALID_OWNER_M; + if (!(cqe->owner ^ valid_owner)) + return NULL; + + return cqe; +} + +static void dump_cqe_aux_info(struct udma_dev *dev, struct ubcore_cr *cr) +{ + struct ubcore_user_ctl_out out = {}; + struct ubcore_user_ctl_in in = {}; + struct udma_cqe_info_in info_in; + + info_in.status = cr->status; + info_in.s_r = cr->flag.bs.s_r; + in.addr = (uint64_t)&info_in; + in.len = sizeof(struct udma_cqe_info_in); + in.opcode = UDMA_USER_CTL_QUERY_CQE_AUX_INFO; + + (void)udma_query_cqe_aux_info(&dev->ub_dev, NULL, &in, &out); +} + +static enum jfc_poll_state udma_poll_one(struct udma_dev *dev, + struct udma_jfc *jfc, + struct ubcore_cr *cr, + struct list_head *tid_list) +{ + struct udma_jfc_cqe *cqe; + + cqe = get_next_cqe(jfc, jfc->ci); + if (!cqe) + return JFC_EMPTY; + + ++jfc->ci; + /* Memory barrier */ + rmb(); + + if (parse_cqe_for_jfc(dev, cqe, cr, tid_list)) + return JFC_POLL_ERR; + + if (unlikely(cr->status != UBCORE_CR_SUCCESS) && dump_aux_info) + dump_cqe_aux_info(dev, cr); + + return JFC_OK; +} + +static void udma_inv_tid(struct udma_dev *dev, struct list_head *tid_list) +{ + struct udma_inv_tid *tid_node; + struct udma_inv_tid *tmp; + struct iommu_sva *ksva; + uint32_t tid; + + mutex_lock(&dev->ksva_mutex); + list_for_each_entry_safe(tid_node, tmp, tid_list, list) { + tid = tid_node->tid; + ksva = (struct iommu_sva *)xa_load(&dev->ksva_table, tid); + if (!ksva) { + dev_warn(dev->dev, "tid may have been released.\n"); + } else { + ummu_ksva_unbind_device(ksva); + __xa_erase(&dev->ksva_table, tid); + } + + list_del(&tid_node->list); + kfree(tid_node); + } + mutex_unlock(&dev->ksva_mutex); +} + +/* thanks to drivers/infiniband/hw/bnxt_re/ib_verbs.c */ +int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + enum jfc_poll_state err = JFC_OK; + struct list_head tid_list; + uint32_t ci; + int npolled; + + INIT_LIST_HEAD(&tid_list); + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_lock(&udma_jfc->lock); + + for (npolled = 0; npolled < cr_cnt; ++npolled) { + err = udma_poll_one(dev, udma_jfc, cr + npolled, &tid_list); + if (err != JFC_OK) + break; + } + + if (npolled) { + ci = udma_jfc->ci; + *udma_jfc->db.db_record = ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + } + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_unlock(&udma_jfc->lock); + + if (!list_empty(&tid_list)) + udma_inv_tid(dev, &tid_list); + + return err == JFC_POLL_ERR ? -UDMA_INTER_ERR : npolled; +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 29db1243623e..1b9476c1206a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -18,6 +18,9 @@ #define UDMA_STARS_SWITCH 1 +#define UDMA_JFC_DB_CI_IDX_M GENMASK(21, 0) +#define UDMA_CQE_INV_TOKEN_ID GENMASK(19, 0) + enum udma_jfc_state { UDMA_JFC_STATE_INVALID, UDMA_JFC_STATE_VALID, @@ -131,6 +134,46 @@ struct udma_jfc_ctx { uint32_t rsv11[12]; }; +struct udma_jfc_cqe { + /* DW0 */ + uint32_t s_r : 1; + uint32_t is_jetty : 1; + uint32_t owner : 1; + uint32_t inline_en : 1; + uint32_t opcode : 3; + uint32_t fd : 1; + uint32_t rsv : 8; + uint32_t substatus : 8; + uint32_t status : 8; + /* DW1 */ + uint32_t entry_idx : 16; + uint32_t local_num_l : 16; + /* DW2 */ + uint32_t local_num_h : 4; + uint32_t rmt_idx : 20; + uint32_t rsv1 : 8; + /* DW3 */ + uint32_t tpn : 24; + uint32_t rsv2 : 8; + /* DW4 */ + uint32_t byte_cnt; + /* DW5 ~ DW6 */ + uint32_t user_data_l; + uint32_t user_data_h; + /* DW7 ~ DW10 */ + uint32_t rmt_eid[4]; + /* DW11 ~ DW12 */ + uint32_t data_l; + uint32_t data_h; + /* DW13 ~ DW15 */ + uint32_t inline_data[3]; +}; + +struct udma_inv_tid { + uint32_t tid; + struct list_head list; +}; + static inline struct udma_jfc *to_udma_jfc(struct ubcore_jfc *jfc) { return container_of(jfc, struct udma_jfc, base); @@ -144,5 +187,6 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); +int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index ba5d2b7996f8..d2467aef2f47 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -204,6 +204,7 @@ static struct ubcore_ops g_dev_ops = { .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, .post_jetty_recv_wr = udma_post_jetty_recv_wr, + .poll_jfc = udma_poll_jfc, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 58cf74183185a2ebd2f6412f869a1362d9ff35c6 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 16:55:43 +0800 Subject: [PATCH 047/126] ub: udma: Support rearm jfc and clean jfc. commit 1d17cd5f0a17c47a983524cea1677386e9f20bf3 openEuler This patch adds the ability to rearm jfc and clean jfc. When the user uses the interrupt mode for poll jfc, it needs to be rearmed after each poll. When destroying jetty/jfs/jfr, it is necessary to clean jfc. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 6 +++ drivers/ub/urma/hw/udma/udma_jetty.c | 16 ++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 ++ drivers/ub/urma/hw/udma/udma_jfc.c | 71 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 2 + drivers/ub/urma/hw/udma/udma_jfr.c | 3 ++ drivers/ub/urma/hw/udma/udma_jfs.c | 2 + drivers/ub/urma/hw/udma/udma_main.c | 1 + 8 files changed, 104 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index f8bab657aa6a..c38cb43b2ec6 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -83,6 +83,12 @@ void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); +static inline void udma_write64(struct udma_dev *udma_dev, + uint64_t *val, void __iomem *dest) +{ + writeq(*val, dest); +} + static inline void udma_alloc_kernel_db(struct udma_dev *dev, struct udma_jetty_queue *queue) { diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 61fc94c0898c..87174be534ba 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -768,6 +768,19 @@ static int udma_query_jetty_ctx(struct udma_dev *dev, return 0; } +void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfc *send_jfc, + struct ubcore_jfc *recv_jfc) +{ + if (sq->buf.kva) { + if (send_jfc) + udma_clean_jfc(send_jfc, sq->id, dev); + + if (recv_jfc && recv_jfc != send_jfc) + udma_clean_jfc(recv_jfc, sq->id, dev); + } +} + static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_timeout) { uint32_t wait_time; @@ -906,6 +919,9 @@ static void udma_free_jetty(struct ubcore_jetty *jetty) struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + udma_clean_cqe_for_jetty(udma_dev, &udma_jetty->sq, jetty->jetty_cfg.send_jfc, + jetty->jetty_cfg.recv_jfc); + if (dfx_switch) udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jetty, udma_jetty->sq.id); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 4b9749afb64e..f9b3b8f60885 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -264,5 +264,8 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct ubcore_jfc *send_jfc, + struct ubcore_jfc *recv_jfc); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index d6a3b53cfe79..12c2f143a376 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -646,6 +646,24 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, return ret; } +int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only) +{ + struct udma_dev *dev = to_udma_dev(jfc->ub_dev); + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + struct udma_jfc_db db; + + db.ci = udma_jfc->ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + db.notify = solicited_only; + db.arm_sn = udma_jfc->arm_sn; + db.type = UDMA_CQ_ARM_DB; + db.jfcn = udma_jfc->jfcn; + + udma_write64(dev, (uint64_t *)&db, (void __iomem *)(dev->k_db_base + + UDMA_JFC_HW_DB_OFFSET)); + + return 0; +} + static enum jfc_poll_state udma_get_cr_status(struct udma_dev *dev, uint8_t src_status, uint8_t substatus, @@ -1030,3 +1048,56 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) return err == JFC_POLL_ERR ? -UDMA_INTER_ERR : npolled; } + +void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev) +{ + struct udma_jfc *udma_jfc = to_udma_jfc(jfc); + struct udma_jfc_cqe *dest; + struct udma_jfc_cqe *cqe; + struct ubcore_cr cr; + uint32_t nfreed = 0; + uint32_t local_id; + uint8_t owner_bit; + uint32_t pi; + + if (udma_jfc->mode != (uint32_t)UDMA_NORMAL_JFC_TYPE) + return; + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_lock(&udma_jfc->lock); + + for (pi = udma_jfc->ci; get_next_cqe(udma_jfc, pi) != NULL; ++pi) { + if (pi > udma_jfc->ci + udma_jfc->buf.entry_cnt) + break; + } + while ((int) --pi - (int) udma_jfc->ci >= 0) { + cqe = get_buf_entry(&udma_jfc->buf, pi); + /* make sure cqe buffer is valid */ + rmb(); + local_id = (cqe->local_num_h << UDMA_SRC_IDX_SHIFT) | cqe->local_num_l; + if (local_id == jetty_id) { + if (cqe->s_r == CQE_FOR_RECEIVE) { + cr.local_id = local_id; + (void)udma_update_jfr_idx(udma_dev, cqe, &cr, true); + } + + ++nfreed; + } else if (!!nfreed) { + dest = get_buf_entry(&udma_jfc->buf, pi + nfreed); + /* make sure owner bit is valid */ + rmb(); + owner_bit = dest->owner; + (void)memcpy(dest, cqe, udma_dev->caps.cqe_size); + dest->owner = owner_bit; + } + } + + if (!!nfreed) { + udma_jfc->ci += nfreed; + wmb(); /* be sure software get cqe data before update doorbell */ + *udma_jfc->db.db_record = udma_jfc->ci & (uint32_t)UDMA_JFC_DB_CI_IDX_M; + } + + if (!jfc->jfc_cfg.flag.bs.lock_free) + spin_unlock(&udma_jfc->lock); +} diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 1b9476c1206a..02b17b6011d2 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -187,6 +187,8 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, void *data); int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); +int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); +void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev); #endif /* __UDMA_JFC_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 5e01e6a8f141..5de0fc62c6e7 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -569,6 +569,9 @@ static void udma_free_jfr(struct ubcore_jfr *jfr) struct udma_dev *udma_dev = to_udma_dev(jfr->ub_dev); struct udma_jfr *udma_jfr = to_udma_jfr(jfr); + if (udma_jfr->rq.buf.kva && jfr->jfr_cfg.jfc) + udma_clean_jfc(jfr->jfr_cfg.jfc, udma_jfr->rq.id, udma_dev); + if (dfx_switch) udma_dfx_delete_id(udma_dev, &udma_dev->dfx_info->jfr, udma_jfr->rq.id); diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 978465d77672..a7b9576ea87e 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -361,6 +361,8 @@ static void udma_free_jfs(struct ubcore_jfs *jfs) struct udma_dev *dev = to_udma_dev(jfs->ub_dev); struct udma_jfs *ujfs = to_udma_jfs(jfs); + udma_clean_cqe_for_jetty(dev, &ujfs->sq, jfs->jfs_cfg.jfc, NULL); + xa_erase(&dev->jetty_table.xa, ujfs->sq.id); if (refcount_dec_and_test(&ujfs->ae_refcount)) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index d2467aef2f47..b6bb3e240d1e 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -181,6 +181,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfc = udma_create_jfc, .modify_jfc = udma_modify_jfc, .destroy_jfc = udma_destroy_jfc, + .rearm_jfc = udma_rearm_jfc, .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, -- Gitee From 289a19c4fd9fac2991a350b2514b71d7770ac584 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 19:35:14 +0800 Subject: [PATCH 048/126] ub: udma: Support get tp list. commit 534649e2be8ee2134415780cefa3a0bd0a5c3e1e openEuler This patch adds the ability to get tp list. During the chain construction process, the driver will obtain the tp list from UBEngine mgmt driver. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 231 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 33 ++++ drivers/ub/urma/hw/udma/udma_dev.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 3 + 4 files changed, 268 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index af1732e1629b..f08830f06fa5 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -115,6 +115,237 @@ int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn) return ret; } +static int udma_ctrlq_get_trans_type(struct udma_dev *dev, + enum ubcore_transport_mode trans_mode, + enum udma_ctrlq_trans_type *tp_type) +{ +#define UDMA_TRANS_MODE_NUM 5 + +struct udma_ctrlq_trans_map { + bool is_valid; + enum udma_ctrlq_trans_type tp_type; +}; + static struct udma_ctrlq_trans_map ctrlq_trans_map[UDMA_TRANS_MODE_NUM] = { + {false, UDMA_CTRLQ_TRANS_TYPE_MAX}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_RM}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_RC}, + {false, UDMA_CTRLQ_TRANS_TYPE_MAX}, + {true, UDMA_CTRLQ_TRANS_TYPE_TP_UM}, + }; + uint8_t transport_mode = (uint8_t)trans_mode; + + if ((transport_mode < UDMA_TRANS_MODE_NUM) && + ctrlq_trans_map[transport_mode].is_valid) { + *tp_type = ctrlq_trans_map[transport_mode].tp_type; + return 0; + } + + dev_err(dev->dev, "the trans_mode %u is not support.\n", trans_mode); + + return -EINVAL; +} + +static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq_tpid_table, + struct udma_ctrlq_tpid *tpid) +{ + struct udma_ctrlq_tpid *tpid_entity; + int ret; + + if (debug_switch) + dev_info(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", tpid->tpid); + + if (xa_load(ctrlq_tpid_table, tpid->tpid)) { + dev_warn(udev->dev, + "the tpid already exists in ctrlq tpid table, tpid = %u.\n", + tpid->tpid); + return 0; + } + + tpid_entity = kzalloc(sizeof(*tpid_entity), GFP_KERNEL); + if (!tpid_entity) + return -ENOMEM; + + memcpy(tpid_entity, tpid, sizeof(*tpid)); + + ret = xa_err(xa_store(ctrlq_tpid_table, tpid->tpid, tpid_entity, GFP_KERNEL)); + if (ret) { + dev_err(udev->dev, + "store tpid entity failed, ret = %d, tpid = %u.\n", + ret, tpid->tpid); + kfree(tpid_entity); + } + + return ret; +} + +static void udma_ctrlq_erase_one_tpid(struct xarray *ctrlq_tpid_table, + uint32_t tpid) +{ + struct udma_ctrlq_tpid *tpid_entity; + + xa_lock(ctrlq_tpid_table); + tpid_entity = xa_load(ctrlq_tpid_table, tpid); + if (!tpid_entity) { + xa_unlock(ctrlq_tpid_table); + return; + } + __xa_erase(ctrlq_tpid_table, tpid); + kfree(tpid_entity); + xa_unlock(ctrlq_tpid_table); +} + +static int udma_ctrlq_get_tpid_list(struct udma_dev *udev, + struct udma_ctrlq_get_tp_list_req_data *tp_cfg_req, + struct ubcore_get_tp_cfg *tpid_cfg, + struct udma_ctrlq_tpid_list_rsp *tpid_list_resp) +{ + enum udma_ctrlq_trans_type trans_type; + struct ubase_ctrlq_msg msg = {}; + int ret; + + if (!tpid_cfg->flag.bs.ctp) { + if (udma_ctrlq_get_trans_type(udev, tpid_cfg->trans_mode, &trans_type) != 0) { + dev_err(udev->dev, "udma get ctrlq trans_type failed, trans_mode = %d.\n", + tpid_cfg->trans_mode); + return -EINVAL; + } + + tp_cfg_req->trans_type = (uint32_t)trans_type; + } else { + tp_cfg_req->trans_type = UDMA_CTRLQ_TRANS_TYPE_CTP; + } + + udma_swap_endian(tpid_cfg->local_eid.raw, tp_cfg_req->seid, + UDMA_EID_SIZE); + udma_swap_endian(tpid_cfg->peer_eid.raw, tp_cfg_req->deid, + UDMA_EID_SIZE); + + udma_ctrlq_set_tp_msg(&msg, (void *)tp_cfg_req, sizeof(*tp_cfg_req), + (void *)tpid_list_resp, sizeof(*tpid_list_resp)); + msg.opcode = UDMA_CMD_CTRLQ_GET_TP_LIST; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "ctrlq send msg failed, ret = %d.\n", ret); + + return ret; +} + +static int udma_ctrlq_store_tpid_list(struct udma_dev *udev, + struct xarray *ctrlq_tpid_table, + struct udma_ctrlq_tpid_list_rsp *tpid_list_resp) +{ + int ret; + int i; + + if (debug_switch) + dev_info(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", + tpid_list_resp->tp_list_cnt); + + for (i = 0; i < (int)tpid_list_resp->tp_list_cnt; i++) { + ret = udma_ctrlq_store_one_tpid(udev, ctrlq_tpid_table, + &tpid_list_resp->tpid_list[i]); + if (ret) + goto err_store_one_tpid; + } + + return 0; + +err_store_one_tpid: + for (i--; i >= 0; i--) + udma_ctrlq_erase_one_tpid(ctrlq_tpid_table, tpid_list_resp->tpid_list[i].tpid); + + return ret; +} + +int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata) +{ + struct udma_ctrlq_get_tp_list_req_data tp_cfg_req = {}; + struct udma_ctrlq_tpid_list_rsp tpid_list_resp = {}; + struct udma_dev *udev = to_udma_dev(dev); + int ret; + int i; + + if (!udata) + tp_cfg_req.flag = UDMA_DEFAULT_PID; + else + tp_cfg_req.flag = (uint32_t)current->tgid & UDMA_PID_MASK; + + ret = udma_ctrlq_get_tpid_list(udev, &tp_cfg_req, tpid_cfg, &tpid_list_resp); + if (ret) { + dev_err(udev->dev, "udma ctrlq get tpid list failed, ret = %d.\n", ret); + return ret; + } + + if (tpid_list_resp.tp_list_cnt == 0 || tpid_list_resp.tp_list_cnt > *tp_cnt) { + dev_err(udev->dev, + "check tp list count failed, count = %u.\n", + tpid_list_resp.tp_list_cnt); + return -EINVAL; + } + + for (i = 0; i < tpid_list_resp.tp_list_cnt; i++) { + tp_list[i].tp_handle.bs.tpid = tpid_list_resp.tpid_list[i].tpid; + tp_list[i].tp_handle.bs.tpn_start = tpid_list_resp.tpid_list[i].tpn_start; + tp_list[i].tp_handle.bs.tp_cnt = + tpid_list_resp.tpid_list[i].tpn_cnt & UDMA_TPN_CNT_MASK; + } + *tp_cnt = tpid_list_resp.tp_list_cnt; + + ret = udma_ctrlq_store_tpid_list(udev, &udev->ctrlq_tpid_table, &tpid_list_resp); + if (ret) + dev_err(udev->dev, "udma ctrlq store list failed, ret = %d.\n", ret); + + return ret; +} + +void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, + bool is_need_flush) +{ + struct udma_ctrlq_tpid *tpid_entity = NULL; + unsigned long tpid = 0; + + xa_lock(ctrlq_tpid_table); + if (!xa_empty(ctrlq_tpid_table)) { + xa_for_each(ctrlq_tpid_table, tpid, tpid_entity) { + __xa_erase(ctrlq_tpid_table, tpid); + kfree(tpid_entity); + } + } + xa_unlock(ctrlq_tpid_table); + xa_destroy(ctrlq_tpid_table); +} + +int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) +{ + struct udma_req_msg *req_msg; + struct ubase_cmd_buf in; + uint32_t msg_len; + int ret; + + msg_len = sizeof(*req_msg) + req->len; + req_msg = kzalloc(msg_len, GFP_KERNEL); + if (!req_msg) + return -ENOMEM; + + req_msg->resp_code = opcode; + + (void)memcpy(&req_msg->req, req, sizeof(*req)); + (void)memcpy(req_msg->req.data, req->data, req->len); + udma_fill_buf(&in, UBASE_OPC_UE_TO_MUE, false, msg_len, req_msg); + + ret = ubase_cmd_send_in(udma_dev->comdev.adev, &in); + if (ret) + dev_err(udma_dev->dev, + "send req msg cmd failed, ret is %d.\n", ret); + + kfree(req_msg); + + return ret; +} + int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode) { diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 6672f8ea01ec..170597ae4b20 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -8,7 +8,12 @@ #define UDMA_EID_SIZE 16 #define UDMA_CNA_SIZE 16 +#define UDMA_PID_MASK 24 +#define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 +#define UDMA_MAX_TPID_NUM 5 + +#define UDMA_TPN_CNT_MASK 0x1F enum udma_ctrlq_cmd_code_type { UDMA_CMD_CTRLQ_REMOVE_SINGLE_TP = 0x13, @@ -36,6 +41,19 @@ enum udma_ctrlq_tpid_status { UDMA_CTRLQ_TPID_IDLE, }; +struct udma_ctrlq_tpid { + uint32_t tpid : 24; + uint32_t tpn_cnt : 8; + uint32_t tpn_start : 24; + uint32_t rsv : 8; +}; + +struct udma_ctrlq_tpid_list_rsp { + uint32_t tp_list_cnt : 16; + uint32_t rsv : 16; + struct udma_ctrlq_tpid tpid_list[UDMA_MAX_TPID_NUM]; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -79,6 +97,14 @@ struct udma_ctrlq_check_tp_active_rsp_info { struct udma_ctrlq_check_tp_active_rsp_data data[]; }; +struct udma_ctrlq_get_tp_list_req_data { + uint8_t seid[UDMA_EID_SIZE]; + uint8_t deid[UDMA_EID_SIZE]; + uint32_t trans_type : 4; + uint32_t rsv : 4; + uint32_t flag : 24; +}; + enum udma_cmd_ue_opcode { UDMA_CMD_UBCORE_COMMAND = 0x1, UDMA_CMD_NOTIFY_MUE_SAVE_TP = 0x2, @@ -101,7 +127,14 @@ struct udma_notify_flush_done { int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn); int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status); +int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata); + +void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, + bool is_need_flush); int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); +int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index 58c5da4a2234..f4ddf294b769 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -115,6 +115,7 @@ struct udma_dev { struct xarray crq_nb_table; struct xarray npu_nb_table; struct mutex npu_nb_mutex; + struct xarray ctrlq_tpid_table; struct xarray tpn_ue_idx_table; struct ubase_event_nb *ae_event_addr[UBASE_EVENT_TYPE_MAX]; resource_size_t db_base; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b6bb3e240d1e..e1c2f1ab359d 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -201,6 +201,7 @@ static struct ubcore_ops g_dev_ops = { .unimport_jetty = udma_unimport_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, + .get_tp_list = udma_get_tp_list, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, @@ -238,6 +239,7 @@ static void udma_destroy_tp_ue_idx_table(struct udma_dev *udma_dev) void udma_destroy_tables(struct udma_dev *udma_dev) { + udma_ctrlq_destroy_tpid_list(udma_dev, &udma_dev->ctrlq_tpid_table, false); udma_destroy_eid_table(udma_dev); mutex_destroy(&udma_dev->disable_ue_rx_mutex); if (!ida_is_empty(&udma_dev->rsvd_jetty_ida_table.ida)) @@ -299,6 +301,7 @@ static void udma_init_managed_by_ctrl_cpu_table(struct udma_dev *udma_dev) { mutex_init(&udma_dev->eid_mutex); xa_init(&udma_dev->eid_table); + xa_init(&udma_dev->ctrlq_tpid_table); } int udma_init_tables(struct udma_dev *udma_dev) -- Gitee From 1b1f0c71f951b9e02426d771b06711a138b3e957 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 21:27:14 +0800 Subject: [PATCH 049/126] ub: udma: Support active tp. commit a315631fec986888615f469189c30799aab3c610 openEuler This patch adds the ability to active tp. During the chain construction process, the driver will post ctrlq command to active tp. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 96 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 21 ++++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 118 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index f08830f06fa5..df84c748a866 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -145,6 +145,30 @@ struct udma_ctrlq_trans_map { return -EINVAL; } +static int udma_send_req_to_mue(struct udma_dev *dev, union ubcore_tp_handle *tp_handle) +{ + uint32_t data_len = (uint32_t)sizeof(struct udma_ue_tp_info); + struct udma_ue_tp_info *data; + struct ubcore_req *req_msg; + int ret; + + req_msg = kzalloc(sizeof(*req_msg) + data_len, GFP_KERNEL); + if (!req_msg) + return -ENOMEM; + + data = (struct udma_ue_tp_info *)req_msg->data; + data->start_tpn = tp_handle->bs.tpn_start; + data->tp_cnt = tp_handle->bs.tp_cnt; + req_msg->len = data_len; + ret = send_req_to_mue(dev, req_msg, UDMA_CMD_NOTIFY_MUE_SAVE_TP); + if (ret) + dev_err(dev->dev, "fail to notify mue save tp, ret %d.\n", ret); + + kfree(req_msg); + + return ret; +} + static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq_tpid_table, struct udma_ctrlq_tpid *tpid) { @@ -318,6 +342,62 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi xa_destroy(ctrlq_tpid_table); } +static int udma_k_ctrlq_create_active_tp_msg(struct udma_dev *udev, + struct ubcore_active_tp_cfg *active_cfg, + uint32_t *tp_id) +{ + struct udma_ctrlq_active_tp_resp_data active_tp_resp = {}; + struct udma_ctrlq_active_tp_req_data active_tp_req = {}; + struct ubase_ctrlq_msg msg = {}; + int ret; + + active_tp_req.local_tp_id = active_cfg->tp_handle.bs.tpid; + active_tp_req.local_tpn_cnt = active_cfg->tp_handle.bs.tp_cnt; + active_tp_req.local_tpn_start = active_cfg->tp_handle.bs.tpn_start; + active_tp_req.local_psn = active_cfg->tp_attr.tx_psn; + + active_tp_req.remote_tp_id = active_cfg->peer_tp_handle.bs.tpid; + active_tp_req.remote_tpn_cnt = active_cfg->peer_tp_handle.bs.tp_cnt; + active_tp_req.remote_tpn_start = active_cfg->peer_tp_handle.bs.tpn_start; + active_tp_req.remote_psn = active_cfg->tp_attr.rx_psn; + + if (debug_switch) + udma_dfx_ctx_print(udev, "udma create active tp msg info", + active_tp_req.local_tp_id, + sizeof(struct udma_ctrlq_active_tp_req_data) / sizeof(uint32_t), + (uint32_t *)&active_tp_req); + + msg.opcode = UDMA_CMD_CTRLQ_ACTIVE_TP; + udma_ctrlq_set_tp_msg(&msg, (void *)&active_tp_req, sizeof(active_tp_req), + (void *)&active_tp_resp, sizeof(active_tp_resp)); + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "udma active tp send failed, ret = %d.\n", ret); + + *tp_id = active_tp_resp.local_tp_id; + + return ret; +} + +int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, + struct ubcore_active_tp_cfg *active_cfg) +{ + uint32_t tp_id = active_cfg->tp_handle.bs.tpid; + int ret; + + ret = udma_k_ctrlq_create_active_tp_msg(dev, active_cfg, &tp_id); + if (ret) + return ret; + + active_cfg->tp_handle.bs.tpid = tp_id; + + if (dev->is_ue) + (void)udma_send_req_to_mue(dev, &(active_cfg->tp_handle)); + + return 0; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; @@ -376,3 +456,19 @@ int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, return ret; } + +int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + int ret; + + if (debug_switch) + udma_dfx_ctx_print(udma_dev, "udma active tp ex", active_cfg->tp_handle.bs.tpid, + sizeof(struct ubcore_active_tp_cfg) / sizeof(uint32_t), + (uint32_t *)active_cfg); + ret = udma_ctrlq_set_active_tp_ex(udma_dev, active_cfg); + if (ret) + dev_err(udma_dev->dev, "Failed to set active tp msg, ret %d.\n", ret); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 170597ae4b20..248d20a272d1 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -54,6 +54,26 @@ struct udma_ctrlq_tpid_list_rsp { struct udma_ctrlq_tpid tpid_list[UDMA_MAX_TPID_NUM]; }; +struct udma_ctrlq_active_tp_req_data { + uint32_t local_tp_id : 24; + uint32_t local_tpn_cnt : 8; + uint32_t local_tpn_start : 24; + uint32_t rsv : 8; + uint32_t remote_tp_id : 24; + uint32_t remote_tpn_cnt : 8; + uint32_t remote_tpn_start : 24; + uint32_t rsv1 : 8; + uint32_t local_psn; + uint32_t remote_psn; +}; + +struct udma_ctrlq_active_tp_resp_data { + uint32_t local_tp_id : 24; + uint32_t local_tpn_cnt : 8; + uint32_t local_tpn_start : 24; + uint32_t rsv : 8; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -136,5 +156,6 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); +int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index e1c2f1ab359d..f9bac04ba1a2 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -202,6 +202,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, + .active_tp = udma_active_tp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, -- Gitee From 896d75523cc786321128846a953d0c3d9f48aea8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 21 Aug 2025 21:50:18 +0800 Subject: [PATCH 050/126] ub: udma: Support deactivate tp. commit 9545e058c4f723a72d434b7ddd0d137b9ff012b6 openEuler This patch adds the ability to deactivate tp. During the chain construction process, the driver will post ctrlq command to deactivate tp. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 2 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 54 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 +++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 4 files changed, 68 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index c38cb43b2ec6..d7f7312d2b4c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -9,6 +9,8 @@ #include "udma_ctx.h" #include "udma_dev.h" +#define UDMA_TPHANDLE_TPID_SHIFT 0xFFFFFF + struct udma_jetty_grp { struct ubcore_jetty_group ubcore_jetty_grp; uint32_t start_jetty_id; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index df84c748a866..396880ddb7d7 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -398,6 +398,49 @@ int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, return 0; } +static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata) +{ +#define UDMA_RSP_TP_MUL 2 + uint32_t tp_id = tp_handle.bs.tpid & UDMA_TPHANDLE_TPID_SHIFT; + struct udma_ctrlq_deactive_tp_req_data deactive_tp_req = {}; + uint32_t tp_num = tp_handle.bs.tp_cnt; + struct ubase_ctrlq_msg msg = {}; + int ret; + + if (tp_num) { + ret = udma_close_ue_rx(udev, true, false, false, tp_num * UDMA_RSP_TP_MUL); + if (ret) { + dev_err(udev->dev, "close ue rx failed in deactivate tp.\n"); + return ret; + } + } + + msg.opcode = UDMA_CMD_CTRLQ_DEACTIVE_TP; + deactive_tp_req.tp_id = tp_id; + deactive_tp_req.tpn_cnt = tp_handle.bs.tp_cnt; + deactive_tp_req.start_tpn = tp_handle.bs.tpn_start; + if (!udata) + deactive_tp_req.pid_flag = UDMA_DEFAULT_PID; + else + deactive_tp_req.pid_flag = (uint32_t)current->tgid & UDMA_PID_MASK; + + udma_ctrlq_set_tp_msg(&msg, (void *)&deactive_tp_req, sizeof(deactive_tp_req), NULL, 0); + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret != -EAGAIN && ret) { + dev_err(udev->dev, "deactivate tp send msg failed, tp_id = %u, ret = %d.\n", + tp_id, ret); + if (tp_num) + udma_open_ue_rx(udev, true, false, false, tp_num * UDMA_RSP_TP_MUL); + return ret; + } + + udma_ctrlq_erase_one_tpid(&udev->ctrlq_tpid_table, tp_id); + + return (ret == -EAGAIN) ? 0 : ret; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; @@ -472,3 +515,14 @@ int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *activ return ret; } + +int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata) +{ + struct udma_dev *udma_dev = to_udma_dev(dev); + + if (debug_switch) + dev_info(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", tp_handle.bs.tpid); + + return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 248d20a272d1..5eb470a9e3d7 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -74,6 +74,15 @@ struct udma_ctrlq_active_tp_resp_data { uint32_t rsv : 8; }; +struct udma_ctrlq_deactive_tp_req_data { + uint32_t tp_id : 24; + uint32_t tpn_cnt : 8; + uint32_t start_tpn : 24; + uint32_t rsv : 8; + uint32_t pid_flag : 24; + uint32_t rsv1 : 8; +}; + struct udma_ctrlq_tp_flush_done_req_data { uint32_t tpn : 24; uint32_t rsv : 8; @@ -157,5 +166,7 @@ int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *active_cfg); +int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata); #endif /* __UDMA_CTRLQ_TP_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f9bac04ba1a2..f7a5adcd281b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -203,6 +203,7 @@ static struct ubcore_ops g_dev_ops = { .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, .active_tp = udma_active_tp, + .deactive_tp = udma_deactive_tp, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, -- Gitee From ddcd2273f26027109951b2d7366b035675e2a1a9 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 10:28:49 +0800 Subject: [PATCH 051/126] ub: udma: Support query NPU info. commit 4b13fcb18fad1bea907631524a08c3b59d5a903f openEuler This patch adds the ability to query NPU (Neural network Processing Unit)info. Users can send a ctrlq message through user control to query. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 22 ++++ drivers/ub/urma/hw/udma/udma_common.h | 1 + drivers/ub/urma/hw/udma/udma_ctl.c | 129 ++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 151 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 6 files changed, 316 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 375ed4826f6a..017216169ea3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -430,6 +430,28 @@ void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex) mutex_init(udma_mutex); } +void udma_destroy_npu_cb_table(struct udma_dev *dev) +{ + struct udma_ctrlq_event_nb *nb = NULL; + unsigned long index = 0; + + mutex_lock(&dev->npu_nb_mutex); + if (!xa_empty(&dev->npu_nb_table)) { + xa_for_each(&dev->npu_nb_table, index, nb) { + ubase_ctrlq_unregister_crq_event(dev->comdev.adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + nb->opcode); + __xa_erase(&dev->npu_nb_table, index); + kfree(nb); + nb = NULL; + } + } + + mutex_unlock(&dev->npu_nb_mutex); + xa_destroy(&dev->npu_nb_table); + mutex_destroy(&dev->npu_nb_mutex); +} + void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name) { diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index d7f7312d2b4c..300357af8895 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -72,6 +72,7 @@ struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); +void udma_destroy_npu_cb_table(struct udma_dev *dev); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); void udma_destroy_eid_table(struct udma_dev *udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index b06ff3ea61cf..ac2e573fa8db 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -371,3 +371,132 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc return ret; } + +static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { + [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, + [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, + [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, + [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, +}; + +static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { + [UDMA_USER_CTL_CREATE_JFS_EX] = NULL, + [UDMA_USER_CTL_DELETE_JFS_EX] = NULL, + [UDMA_USER_CTL_CREATE_JFC_EX] = NULL, + [UDMA_USER_CTL_DELETE_JFC_EX] = NULL, + [UDMA_USER_CTL_SET_CQE_ADDR] = NULL, + [UDMA_USER_CTL_QUERY_UE_INFO] = NULL, + [UDMA_USER_CTL_GET_DEV_RES_RATIO] = NULL, + [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, + [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_UBMEM_INFO] = NULL, + [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = NULL, +}; + +static int udma_user_data(struct ubcore_device *dev, + struct ubcore_user_ctl *k_user_ctl) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_user_ctl_out out = {}; + struct ubcore_user_ctl_in in = {}; + unsigned long byte; + int ret; + + if (k_user_ctl->in.len >= UDMA_HW_PAGE_SIZE || k_user_ctl->out.len >= UDMA_HW_PAGE_SIZE) { + dev_err(udev->dev, "The len exceeds the maximum value in user ctrl.\n"); + return -EINVAL; + } + + in.opcode = k_user_ctl->in.opcode; + if (!g_udma_user_ctl_u_ops[in.opcode]) { + dev_err(udev->dev, "invalid user opcode: 0x%x.\n", in.opcode); + return -EINVAL; + } + + if (k_user_ctl->in.len) { + in.addr = (uint64_t)kzalloc(k_user_ctl->in.len, GFP_KERNEL); + if (!in.addr) + return -ENOMEM; + + in.len = k_user_ctl->in.len; + byte = copy_from_user((void *)(uintptr_t)in.addr, + (void __user *)(uintptr_t)k_user_ctl->in.addr, + k_user_ctl->in.len); + if (byte) { + dev_err(udev->dev, + "failed to copy user data in user ctrl, byte = %lu.\n", byte); + kfree((void *)in.addr); + return -EFAULT; + } + } + + if (k_user_ctl->out.len) { + out.addr = (uint64_t)kzalloc(k_user_ctl->out.len, GFP_KERNEL); + if (!out.addr) { + kfree((void *)in.addr); + + return -ENOMEM; + } + out.len = k_user_ctl->out.len; + + if (k_user_ctl->out.addr) { + byte = copy_from_user((void *)(uintptr_t)out.addr, + (void __user *)(uintptr_t)k_user_ctl->out.addr, + k_user_ctl->out.len); + if (byte) { + dev_err(udev->dev, + "failed to copy user data out user ctrl, byte = %lu.\n", + byte); + kfree((void *)out.addr); + kfree((void *)in.addr); + + return -EFAULT; + } + } + } + + ret = g_udma_user_ctl_u_ops[in.opcode](dev, k_user_ctl->uctx, &in, &out); + kfree((void *)in.addr); + + if (out.addr) { + byte = copy_to_user((void __user *)(uintptr_t)k_user_ctl->out.addr, + (void *)(uintptr_t)out.addr, min(out.len, k_user_ctl->out.len)); + if (byte) { + dev_err(udev->dev, + "copy resp to user failed in user ctrl, byte = %lu.\n", byte); + ret = -EFAULT; + } + + kfree((void *)out.addr); + } + + return ret; +} + +int udma_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *k_user_ctl) +{ + struct udma_dev *udev; + + if (dev == NULL || k_user_ctl == NULL) + return -EINVAL; + + udev = to_udma_dev(dev); + + if (k_user_ctl->in.opcode >= UDMA_USER_CTL_MAX) { + dev_err(udev->dev, "invalid opcode: 0x%x.\n", k_user_ctl->in.opcode); + return -EINVAL; + } + + if (k_user_ctl->uctx) + return udma_user_data(dev, k_user_ctl); + + if (!g_udma_user_ctl_k_ops[k_user_ctl->in.opcode]) { + dev_err(udev->dev, "invalid user opcode: 0x%x.\n", k_user_ctl->in.opcode); + return -EINVAL; + } + + return g_udma_user_ctl_k_ops[k_user_ctl->in.opcode](dev, k_user_ctl->uctx, &k_user_ctl->in, + &k_user_ctl->out); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 396880ddb7d7..d28e206fb277 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -115,6 +115,157 @@ int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn) return ret; } +int udma_get_dev_resource_ratio(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev_resource_ratio dev_res = {}; + struct udma_dev_pair_info dev_res_out = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + int ret = 0; + + if (udma_check_base_param(in->addr, in->len, sizeof(dev_res.index))) { + dev_err(udev->dev, "parameter invalid in get dev res, len = %u.\n", in->len); + return -EINVAL; + } + + if (out->addr == 0 || out->len != sizeof(dev_res_out)) { + dev_err(udev->dev, "get dev resource ratio, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + memcpy(&dev_res.index, (void *)(uintptr_t)in->addr, sizeof(dev_res.index)); + + ret = ubase_get_bus_eid(udev->comdev.adev, &dev_res.eid); + if (ret) { + dev_err(udev->dev, "get dev bus eid failed, ret is %d.\n", ret); + return ret; + } + + ctrlq_msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(dev_res); + ctrlq_msg.in = (void *)&dev_res; + ctrlq_msg.out_size = sizeof(dev_res_out); + ctrlq_msg.out = &dev_res_out; + ctrlq_msg.opcode = UDMA_CTRLQ_GET_DEV_RESOURCE_RATIO; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + memcpy((void *)(uintptr_t)out->addr, &dev_res_out, sizeof(dev_res_out)); + + return ret; +} + +static int udma_dev_res_ratio_ctrlq_handler(struct auxiliary_device *adev, + uint8_t service_ver, void *data, + uint16_t len, uint16_t seq) +{ + struct udma_dev *udev = (struct udma_dev *)get_udma_dev(adev); + struct udma_ctrlq_event_nb *udma_cb; + int ret; + + mutex_lock(&udev->npu_nb_mutex); + udma_cb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + if (!udma_cb) { + dev_err(udev->dev, "failed to query npu info cb while xa_load.\n"); + mutex_unlock(&udev->npu_nb_mutex); + return -EINVAL; + } + + ret = udma_cb->crq_handler(&udev->ub_dev, data, len); + if (ret) + dev_err(udev->dev, "npu crq handler failed, ret = %d.\n", ret); + mutex_unlock(&udev->npu_nb_mutex); + + return ret; +} + +int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct ubase_ctrlq_event_nb ubase_cb = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ctrlq_event_nb *udma_cb; + int ret; + + if (udma_check_base_param(in->addr, in->len, sizeof(udma_cb->crq_handler))) { + dev_err(udev->dev, "parameter invalid in register npu cb, len = %u.\n", in->len); + return -EINVAL; + } + + udma_cb = kzalloc(sizeof(*udma_cb), GFP_KERNEL); + if (!udma_cb) + return -ENOMEM; + + udma_cb->opcode = UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO; + udma_cb->crq_handler = (void *)(uintptr_t)in->addr; + + mutex_lock(&udev->npu_nb_mutex); + if (xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO)) { + dev_err(udev->dev, "query npu info callback exist.\n"); + ret = -EINVAL; + goto err_release_udma_cb; + } + ret = xa_err(__xa_store(&udev->npu_nb_table, udma_cb->opcode, udma_cb, GFP_KERNEL)); + if (ret) { + dev_err(udev->dev, + "save crq nb entry failed, opcode is %u, ret is %d.\n", + udma_cb->opcode, ret); + goto err_release_udma_cb; + } + + ubase_cb.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ubase_cb.opcode = UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO; + ubase_cb.back = udev->comdev.adev; + ubase_cb.crq_handler = udma_dev_res_ratio_ctrlq_handler; + ret = ubase_ctrlq_register_crq_event(udev->comdev.adev, &ubase_cb); + if (ret) { + __xa_erase(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + dev_err(udev->dev, "ubase register npu crq event failed, ret is %d.\n", ret); + goto err_release_udma_cb; + } + mutex_unlock(&udev->npu_nb_mutex); + + return 0; + +err_release_udma_cb: + mutex_unlock(&udev->npu_nb_mutex); + kfree(udma_cb); + return ret; +} + +int udma_unregister_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ctrlq_event_nb *nb; + + ubase_ctrlq_unregister_crq_event(udev->comdev.adev, + UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, + UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + + mutex_lock(&udev->npu_nb_mutex); + nb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + if (!nb) { + dev_warn(udev->dev, "query npu info cb not exist.\n"); + goto err_find_npu_nb; + } + + __xa_erase(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); + kfree(nb); + nb = NULL; + +err_find_npu_nb: + mutex_unlock(&udev->npu_nb_mutex); + return 0; +} + static int udma_ctrlq_get_trans_type(struct udma_dev *dev, enum ubcore_transport_mode trans_mode, enum udma_ctrlq_trans_type *tp_type) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 5eb470a9e3d7..ba43f3590417 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -11,6 +11,7 @@ #define UDMA_PID_MASK 24 #define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 +#define UDMA_MAX_UE_IDX 256 #define UDMA_MAX_TPID_NUM 5 #define UDMA_TPN_CNT_MASK 0x1F @@ -154,6 +155,16 @@ struct udma_notify_flush_done { uint32_t tpn; }; +struct udma_dev_resource_ratio { + struct ubase_bus_eid eid; + uint32_t index; +}; + +int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_unregister_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); int udma_ctrlq_tp_flush_done(struct udma_dev *udev, uint32_t tpn); int udma_ctrlq_remove_single_tp(struct udma_dev *udev, uint32_t tpn, int status); int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f7a5adcd281b..3111c535b549 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -204,6 +204,7 @@ static struct ubcore_ops g_dev_ops = { .get_tp_list = udma_get_tp_list, .active_tp = udma_active_tp, .deactive_tp = udma_deactive_tp, + .user_ctl = udma_user_ctl, .post_jfs_wr = udma_post_jfs_wr, .post_jfr_wr = udma_post_jfr_wr, .post_jetty_send_wr = udma_post_jetty_send_wr, @@ -254,6 +255,7 @@ void udma_destroy_tables(struct udma_dev *udma_dev) xa_destroy(&udma_dev->crq_nb_table); udma_destroy_tp_ue_idx_table(udma_dev); + udma_destroy_npu_cb_table(udma_dev); if (!xa_empty(&udma_dev->ksva_table)) dev_err(udma_dev->dev, "ksva table is not empty.\n"); -- Gitee From 4f415568686a5ec9bebdce585ca5d873b85b3c5c Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 11:10:44 +0800 Subject: [PATCH 052/126] ub: udma: Support dump ae aux info. commit 57fd1bc217e5c6026902e8c0f0a3b46946a09825 openEuler This patch adds the ability to dump ae aux info. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 113 +++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index ac2e573fa8db..8147a784dd2f 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,117 @@ #include #include "udma_def.h" +const char *udma_ae_aux_info_type_str[] = { + "TP_RRP_FLUSH_TIMER_PKT_CNT", + "TPP_DFX5", + "TWP_AE_DFX", + "TP_RRP_ERR_FLG_0", + "TP_RRP_ERR_FLG_1", + "TP_RWP_INNER_ALM", + "TP_RCP_INNER_ALM", + "LQC_TA_TQEP_WQE_ERR", + "LQC_TA_CQM_CQE_INNER_ALARM", +}; + +static void dump_fill_aux_info(struct udma_dev *dev, struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info, + enum udma_ae_aux_info_type *type, uint32_t aux_info_num) +{ + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->ae_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", udma_ae_aux_info_type_str[type[i]], + info->ae_aux_info[type[i]]); +} + +static void dump_ae_tp_flush_done_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TP_RRP_FLUSH_TIMER_PKT_CNT, + TPP_DFX5, + }; + + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_tp_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TWP_AE_DFX_FOR_AE, + TP_RRP_ERR_FLG_0_FOR_AE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_jetty_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + TP_RRP_ERR_FLG_0_FOR_AE, + TP_RRP_ERR_FLG_1, + TP_RWP_INNER_ALM_FOR_AE, + TP_RCP_INNER_ALM_FOR_AE, + LQC_TA_TQEP_WQE_ERR, + LQC_TA_CQM_CQE_INNER_ALARM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_jfc_err_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + enum udma_ae_aux_info_type type[] = { + LQC_TA_CQM_CQE_INNER_ALARM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + + dump_fill_aux_info(dev, aux_info_out, info, type, aux_info_num); +} + +static void dump_ae_aux_info(struct udma_dev *dev, + struct udma_ae_aux_info_out *aux_info_out, + struct udma_cmd_query_ae_aux_info *info) +{ + switch (info->event_type) { + case UBASE_EVENT_TYPE_TP_FLUSH_DONE: + dump_ae_tp_flush_done_aux_info(dev, aux_info_out, info); + break; + case UBASE_EVENT_TYPE_TP_LEVEL_ERROR: + dump_ae_tp_err_aux_info(dev, aux_info_out, info); + break; + case UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR: + if (info->sub_type == UBASE_SUBEVENT_TYPE_JFS_CHECK_ERROR) + dump_ae_jetty_err_aux_info(dev, aux_info_out, info); + else + dump_ae_jfc_err_aux_info(dev, aux_info_out, info); + break; + default: + break; + } +} + static int send_cmd_query_cqe_aux_info(struct udma_dev *udma_dev, struct udma_cmd_query_cqe_aux_info *info) { @@ -362,6 +473,8 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc return ret; } + dump_ae_aux_info(udma_dev, &aux_info_out, &info); + ret = copy_out_ae_data_to_user(udma_dev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udma_dev->dev, -- Gitee From a4f7ea636f05872caabab0e92b1ad2490838e3e5 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 11:40:04 +0800 Subject: [PATCH 053/126] ub: udma: Support dump ce aux info. commit ea37ae296d7c96d306197138e09a870bc3008c16 openEuler This patch adds the ability to dump ce aux info. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 360 ++++++++++++++++++++++++++++- 1 file changed, 356 insertions(+), 4 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 8147a784dd2f..49cb1ebb0895 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -18,6 +18,51 @@ #include #include "udma_def.h" +const char *udma_cqe_aux_info_type_str[] = { + "TPP2TQEM_WR_CNT", + "DEVICE_RAS_STATUS_2", + "RXDMA_WR_PAYL_AXI_ERR", + "RXDMA_HEAD_SPLIT_ERR_FLAG0", + "RXDMA_HEAD_SPLIT_ERR_FLAG1", + "RXDMA_HEAD_SPLIT_ERR_FLAG2", + "RXDMA_HEAD_SPLIT_ERR_FLAG3", + "TP_RCP_INNER_ALM", + "TWP_AE_DFX", + "PA_OUT_PKT_ERR_CNT", + "TP_DAM_AXI_ALARM", + "TP_DAM_VFT_BT_ALARM", + "TP_EUM_AXI_ALARM", + "TP_EUM_VFT_BT_ALARM", + "TP_TPMM_AXI_ALARM", + "TP_TPMM_VFT_BT_ALARM", + "TP_TPGCM_AXI_ALARM", + "TP_TPGCM_VFT_BT_ALARM", + "TWP_ALM", + "TP_RWP_INNER_ALM", + "TWP_DFX21", + "LQC_TA_RNR_TANACK_CNT", + "FVT", + "RQMT0", + "RQMT1", + "RQMT2", + "RQMT3", + "RQMT4", + "RQMT5", + "RQMT6", + "RQMT7", + "RQMT8", + "RQMT9", + "RQMT10", + "RQMT11", + "RQMT12", + "RQMT13", + "RQMT14", + "RQMT15", + "PROC_ERROR_ALM", + "LQC_TA_TIMEOUT_TAACK_CNT", + "TP_RRP_ERR_FLG_0", +}; + const char *udma_ae_aux_info_type_str[] = { "TP_RRP_FLUSH_TIMER_PKT_CNT", "TPP_DFX5", @@ -30,6 +75,305 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static void dump_cqe_client_loc_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TPP2TQEM_WR_CNT, + DEVICE_RAS_STATUS_2, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_loc_access_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TWP_AE_DFX_FOR_CQE, + PA_OUT_PKT_ERR_CNT, + TP_DAM_AXI_ALARM, + TP_DAM_VFT_BT_ALARM, + TP_EUM_AXI_ALARM, + TP_EUM_VFT_BT_ALARM, + TP_TPMM_AXI_ALARM, + TP_TPMM_VFT_BT_ALARM, + TP_TPGCM_AXI_ALARM, + TP_TPGCM_VFT_BT_ALARM, + DEVICE_RAS_STATUS_2, + TWP_ALM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_rem_resp_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void +dump_cqe_client_rem_access_abort_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TP_RRP_ERR_FLG_0_FOR_CQE, + TPP2TQEM_WR_CNT, + TWP_DFX21 + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_client_ack_timeout_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + LQC_TA_TIMEOUT_TAACK_CNT, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void +dump_cqe_client_rnr_retry_cnt_exc_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + LQC_TA_RNR_TANACK_CNT, + FVT, + RQMT0, + RQMT1, + RQMT2, + RQMT3, + RQMT4, + RQMT5, + RQMT6, + RQMT7, + RQMT8, + RQMT9, + RQMT10, + RQMT11, + RQMT12, + RQMT13, + RQMT14, + RQMT15, + PROC_ERROR_ALM, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_server_loc_access_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + RXDMA_WR_PAYL_AXI_ERR, + RXDMA_HEAD_SPLIT_ERR_FLAG0, + RXDMA_HEAD_SPLIT_ERR_FLAG1, + RXDMA_HEAD_SPLIT_ERR_FLAG2, + RXDMA_HEAD_SPLIT_ERR_FLAG3, + TP_RCP_INNER_ALM_FOR_CQE, + TP_RRP_ERR_FLG_0_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_server_loc_len_err_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + enum udma_cqe_aux_info_type type[] = { + TP_RWP_INNER_ALM_FOR_CQE, + }; + uint32_t aux_info_num = ARRAY_SIZE(type); + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= aux_info_num) { + for (i = 0; i < aux_info_num; i++) { + aux_info_out->aux_info_type[i] = type[i]; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[type[i]]; + } + aux_info_out->aux_info_num = aux_info_num; + } + + for (i = 0; i < aux_info_num; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[type[i]], info->cqe_aux_info[type[i]]); +} + +static void dump_cqe_all_aux_info(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) +{ + int i; + + if (aux_info_out->aux_info_type != NULL && + aux_info_out->aux_info_value != NULL && + aux_info_out->aux_info_num >= MAX_CQE_AUX_INFO_TYPE_NUM) { + for (i = 0; i < MAX_CQE_AUX_INFO_TYPE_NUM; i++) { + aux_info_out->aux_info_type[i] = i; + aux_info_out->aux_info_value[i] = info->cqe_aux_info[i]; + } + aux_info_out->aux_info_num = MAX_CQE_AUX_INFO_TYPE_NUM; + } + + for (i = 0; i < MAX_CQE_AUX_INFO_TYPE_NUM; i++) + dev_info(dev->dev, "%s\t0x%08x\n", + udma_cqe_aux_info_type_str[i], info->cqe_aux_info[i]); +} + +static void (*udma_cqe_aux_info_dump[14][2])(struct udma_dev *dev, + struct udma_cqe_aux_info_out *aux_info_out, + struct udma_cmd_query_cqe_aux_info *info) = { + {NULL, NULL}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {dump_cqe_server_loc_len_err_aux_info, + dump_cqe_client_loc_len_err_aux_info}, + {NULL, NULL}, + {dump_cqe_server_loc_access_err_aux_info, + dump_cqe_client_loc_access_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_rem_resp_len_err_aux_info}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {NULL, NULL}, + {dump_cqe_all_aux_info, + dump_cqe_client_rem_access_abort_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_ack_timeout_err_aux_info}, + {dump_cqe_all_aux_info, + dump_cqe_client_rnr_retry_cnt_exc_err_aux_info}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, + {NULL, NULL}, + {dump_cqe_all_aux_info, dump_cqe_all_aux_info}, +}; + static void dump_fill_aux_info(struct udma_dev *dev, struct udma_ae_aux_info_out *aux_info_out, struct udma_cmd_query_ae_aux_info *info, enum udma_ae_aux_info_type *type, uint32_t aux_info_num) @@ -246,7 +590,7 @@ static int copy_out_cqe_data_to_user(struct udma_dev *udma_dev, } int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, - struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { struct udma_cqe_aux_info_out user_aux_info_out = {}; struct udma_cqe_aux_info_out aux_info_out = {}; @@ -263,6 +607,15 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u memcpy(&cqe_info_in, (void *)(uintptr_t)in->addr, sizeof(struct udma_cqe_info_in)); + info.status = cqe_info_in.status; + info.is_client = !(cqe_info_in.s_r & 1); + if (cqe_info_in.status >= ARRAY_SIZE(udma_cqe_aux_info_dump) || + udma_cqe_aux_info_dump[info.status][info.is_client] == NULL) { + dev_err(udev->dev, "status %u is invalid or does not need to be queried.\n", + cqe_info_in.status); + return -EINVAL; + } + ret = copy_out_cqe_data_from_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udev->dev, @@ -270,9 +623,6 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u return ret; } - info.status = cqe_info_in.status; - info.is_client = !(cqe_info_in.s_r & 1); - ret = send_cmd_query_cqe_aux_info(udev, &info); if (ret) { dev_err(udev->dev, @@ -282,6 +632,8 @@ int udma_query_cqe_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *u return ret; } + udma_cqe_aux_info_dump[info.status][info.is_client](udev, &aux_info_out, &info); + ret = copy_out_cqe_data_to_user(udev, out, &aux_info_out, uctx, &user_aux_info_out); if (ret) { dev_err(udev->dev, -- Gitee From 4f5b0d60426818108349c38946786ceb5c769b78 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 14:29:52 +0800 Subject: [PATCH 054/126] ub: udma: Support bind and unbind jetty. commit 03e3f930e3b9639951964a810b59ccfee3d4d73c openEuler This patch adds the ability to bind and unbind jetty. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 21 +++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 5 +++++ drivers/ub/urma/hw/udma/udma_main.c | 2 ++ 3 files changed, 28 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 87174be534ba..8dffd0c43721 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1299,6 +1299,15 @@ int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr return ret; } +int udma_unbind_jetty(struct ubcore_jetty *jetty) +{ + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + udma_jetty->sq.rc_tjetty = NULL; + + return 0; +} + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, @@ -1332,3 +1341,15 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, return &tjetty->ubcore_tjetty; } + +int udma_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + + udma_jetty->sq.rc_tjetty = tjetty; + + return 0; +} diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index f9b3b8f60885..7b5975dbcf14 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -254,6 +254,7 @@ int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr struct ubcore_jfs_wr **bad_wr); int udma_post_jetty_recv_wr(struct ubcore_jetty *jetty, struct ubcore_jfr_wr *wr, struct ubcore_jfr_wr **bad_wr); +int udma_unbind_jetty(struct ubcore_jetty *jetty); void udma_reset_sw_k_jetty_queue(struct udma_jetty_queue *sq); int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); @@ -264,6 +265,10 @@ struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata); +int udma_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfc *send_jfc, struct ubcore_jfc *recv_jfc); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 3111c535b549..1a143f8ca3c3 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -199,6 +199,8 @@ static struct ubcore_ops g_dev_ops = { .destroy_jetty = udma_destroy_jetty, .import_jetty_ex = udma_import_jetty_ex, .unimport_jetty = udma_unimport_jetty, + .bind_jetty_ex = udma_bind_jetty_ex, + .unbind_jetty = udma_unbind_jetty, .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, -- Gitee From 64becddef7ad34a233daac1c3e470b2784362557 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 14:58:52 +0800 Subject: [PATCH 055/126] ub: udma: Support destroy jfs and jetty batch. commit 49aa505c529d656712b64d471c59503a3897e7ac openEuler This patch adds the ability to destroy jfs and jetty batch. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 307 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 4 + drivers/ub/urma/hw/udma/udma_jfs.c | 42 ++++ drivers/ub/urma/hw/udma/udma_jfs.h | 1 + drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 356 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 8dffd0c43721..a3e29776ad4e 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -963,6 +963,309 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty) return 0; } +static int udma_batch_jetty_get_ack(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, bool *jetty_flag, + int *bad_jetty_index) +{ + struct udma_jetty_ctx ctx = {}; + struct udma_jetty_queue *sq; + uint16_t rcv_send_diff = 0; + uint32_t i; + int ret; + + for (i = 0; i < jetty_cnt; i++) { + sq = sq_list[i]; + if (sq->state != UBCORE_JETTY_STATE_READY && + sq->state != UBCORE_JETTY_STATE_SUSPENDED) + continue; + + if (jetty_flag[i]) + continue; + + ret = udma_query_jetty_ctx(dev, &ctx, sq->id); + if (ret) { + dev_err(dev->dev, + "query jetty ctx failed, id = %u, ret = %d.\n", + sq->id, ret); + *bad_jetty_index = 0; + return ret; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.PI == ctx.CI && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_READY) { + jetty_flag[i] = true; + continue; + } + + if (rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF && + ctx.state == JETTY_ERROR) { + jetty_flag[i] = true; + continue; + } + + *bad_jetty_index = 0; + break; + } + + return (i == jetty_cnt) ? 0 : -EAGAIN; +} + +static uint32_t get_max_jetty_ta_timeout(struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt) +{ + uint32_t max_timeout = 0; + uint32_t i; + + for (i = 0; i < jetty_cnt; i++) { + if (sq_list[i]->ta_timeout > max_timeout) + max_timeout = sq_list[i]->ta_timeout; + } + + return max_timeout; +} + +static bool udma_batch_query_jetty_fd(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t ta_timeout = get_max_jetty_ta_timeout(sq_list, jetty_cnt); + struct udma_jetty_ctx ctx = {}; + struct udma_jetty_queue *sq; + uint16_t rcv_send_diff = 0; + uint32_t sum_times = 0; + uint32_t flush_cnt = 0; + bool all_query_done; + uint32_t times = 0; + bool *jetty_flag; + uint32_t i; + + jetty_flag = kcalloc(jetty_cnt, sizeof(bool), GFP_KERNEL); + if (!jetty_flag) { + *bad_jetty_index = 0; + return false; + } + + while (true) { + for (i = 0; i < jetty_cnt; i++) { + if (jetty_flag[i]) + continue; + + sq = sq_list[i]; + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) { + kfree(jetty_flag); + *bad_jetty_index = 0; + return false; + } + + if (!ctx.flush_cqe_done) + continue; + + flush_cnt++; + jetty_flag[i] = true; + } + + if (flush_cnt == jetty_cnt) { + kfree(jetty_flag); + return true; + } + + if (udma_wait_timeout(&sum_times, times, ta_timeout)) + break; + + times++; + } + + all_query_done = true; + + for (i = 0; i < jetty_cnt; i++) { + if (jetty_flag[i]) + continue; + + sq = sq_list[i]; + if (udma_query_jetty_ctx(dev, &ctx, sq->id)) { + kfree(jetty_flag); + *bad_jetty_index = 0; + return false; + } + + rcv_send_diff = ctx.next_rcv_ssn - ctx.next_send_ssn; + if (ctx.flush_cqe_done || (ctx.flush_ssn_vld && + rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF)) + continue; + + *bad_jetty_index = 0; + all_query_done = false; + udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, + sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); + break; + } + + kfree(jetty_flag); + + return all_query_done; +} + +static int batch_modify_jetty_to_error(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + struct udma_jetty_queue *sq; + uint32_t i; + int ret; + + for (i = 0; i < jetty_cnt; i++) { + sq = sq_list[i]; + if (sq->state == UBCORE_JETTY_STATE_ERROR || + sq->state == UBCORE_JETTY_STATE_RESET) + continue; + + ret = udma_set_jetty_state(dev, sq->id, JETTY_ERROR); + if (ret) { + dev_err(dev->dev, "modify jetty to error failed, id: %u.\n", + sq->id); + *bad_jetty_index = 0; + return ret; + } + + sq->state = UBCORE_JETTY_STATE_ERROR; + } + + return 0; +} + +static int udma_batch_modify_jetty_precondition(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t ta_timeout = get_max_jetty_ta_timeout(sq_list, jetty_cnt); + uint32_t sum_times = 0; + uint32_t times = 0; + bool *jetty_flag; + int ret; + + jetty_flag = kcalloc(jetty_cnt, sizeof(bool), GFP_KERNEL); + if (!jetty_flag) { + *bad_jetty_index = 0; + return -ENOMEM; + } + + while (true) { + ret = udma_batch_jetty_get_ack(dev, sq_list, jetty_cnt, + jetty_flag, bad_jetty_index); + if (ret != -EAGAIN) { + kfree(jetty_flag); + return ret; + } + + if (udma_wait_timeout(&sum_times, times, ta_timeout)) { + dev_warn(dev->dev, + "timeout after %u ms, not all jetty get ack.\n", + sum_times); + break; + } + times++; + } + + kfree(jetty_flag); + + return 0; +} + +static bool udma_batch_destroy_jetty_precondition(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + if (!(dev->caps.feature & UDMA_CAP_FEATURE_UE_RX_CLOSE) && + udma_batch_modify_jetty_precondition(dev, sq_list, jetty_cnt, bad_jetty_index)) + return false; + + if (batch_modify_jetty_to_error(dev, sq_list, jetty_cnt, bad_jetty_index)) { + dev_err(dev->dev, "batch md jetty err failed.\n"); + return false; + } + + if (!udma_batch_query_jetty_fd(dev, sq_list, jetty_cnt, bad_jetty_index)) + return false; + + udelay(UDMA_DESTROY_JETTY_DELAY_TIME); + + return true; +} + +int udma_batch_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index) +{ + uint32_t i; + int ret; + + if (!udma_batch_destroy_jetty_precondition(dev, sq_list, jetty_cnt, bad_jetty_index)) + return -EFAULT; + + for (i = 0; i < jetty_cnt; i++) { + if (sq_list[i]->state != UBCORE_JETTY_STATE_RESET) { + ret = udma_destroy_hw_jetty_ctx(dev, sq_list[i]->id); + if (ret) { + dev_err(dev->dev, + "jetty destroyed failed, id: %u.\n", + sq_list[i]->id); + *bad_jetty_index = 0; + return ret; + } + + sq_list[i]->state = UBCORE_JETTY_STATE_RESET; + } + } + + return 0; +} + +int udma_destroy_jetty_batch(struct ubcore_jetty **jetty, int jetty_cnt, int *bad_jetty_index) +{ + struct udma_jetty_queue **sq_list; + struct udma_dev *udma_dev; + uint32_t i; + int ret; + + if (!jetty) { + pr_err("jetty array is null.\n"); + return -EINVAL; + } + + if (!jetty_cnt) { + pr_err("jetty cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jetty[0]->ub_dev); + + sq_list = kcalloc(1, sizeof(*sq_list) * jetty_cnt, GFP_KERNEL); + if (!sq_list) { + *bad_jetty_index = 0; + return -ENOMEM; + } + + for (i = 0; i < jetty_cnt; i++) + sq_list[i] = &(to_udma_jetty(jetty[i])->sq); + + ret = udma_batch_modify_and_destroy_jetty(udma_dev, sq_list, jetty_cnt, bad_jetty_index); + + kfree(sq_list); + + if (ret) { + dev_err(udma_dev->dev, + "udma batch modify error and destroy jetty failed.\n"); + return ret; + } + + for (i = 0; i < jetty_cnt; i++) + udma_free_jetty(jetty[i]); + + return 0; +} + static int udma_check_jetty_grp_info(struct ubcore_tjetty_cfg *cfg, struct udma_dev *dev) { if (cfg->type == UBCORE_JETTY_GROUP) { @@ -1353,3 +1656,7 @@ int udma_bind_jetty_ex(struct ubcore_jetty *jetty, return 0; } + +module_param(well_known_jetty_pgsz_check, bool, 0444); +MODULE_PARM_DESC(well_known_jetty_pgsz_check, + "Whether check the system page size. default: true(true:check; false: not check)"); diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 7b5975dbcf14..5558e4ca68e1 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -241,6 +241,7 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *ub_dev, struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jetty(struct ubcore_jetty *jetty); +int udma_destroy_jetty_batch(struct ubcore_jetty **jetty_arr, int jetty_num, int *bad_jetty_index); int udma_unimport_jetty(struct ubcore_tjetty *tjetty); int udma_modify_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_attr *attr, struct ubcore_udata *udata); @@ -272,5 +273,8 @@ int udma_bind_jetty_ex(struct ubcore_jetty *jetty, void udma_clean_cqe_for_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq, struct ubcore_jfc *send_jfc, struct ubcore_jfc *recv_jfc); +int udma_batch_modify_and_destroy_jetty(struct udma_dev *dev, + struct udma_jetty_queue **sq_list, + uint32_t jetty_cnt, int *bad_jetty_index); #endif /* __UDMA_JETTY_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index a7b9576ea87e..c1a4999128cb 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -412,6 +412,48 @@ int udma_destroy_jfs(struct ubcore_jfs *jfs) return 0; } +int udma_destroy_jfs_batch(struct ubcore_jfs **jfs, int jfs_cnt, int *bad_jfs_index) +{ + struct udma_jetty_queue **sq_list; + struct udma_dev *udma_dev; + uint32_t i; + int ret; + + if (!jfs) { + pr_err("jfs array is null.\n"); + return -EINVAL; + } + + if (!jfs_cnt) { + pr_err("jfs cnt is 0.\n"); + return -EINVAL; + } + + udma_dev = to_udma_dev(jfs[0]->ub_dev); + + sq_list = kcalloc(jfs_cnt, sizeof(*sq_list), GFP_KERNEL); + if (!sq_list) + return -ENOMEM; + + for (i = 0; i < jfs_cnt; i++) + sq_list[i] = &(to_udma_jfs(jfs[i])->sq); + + ret = udma_batch_modify_and_destroy_jetty(udma_dev, sq_list, jfs_cnt, bad_jfs_index); + + kfree(sq_list); + + if (ret) { + dev_err(udma_dev->dev, + "udma batch modify error and destroy jfs failed.\n"); + return ret; + } + + for (i = 0; i < jfs_cnt; i++) + udma_free_jfs(jfs[i]); + + return 0; +} + static int udma_modify_jfs_state(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs, struct ubcore_jfs_attr *attr) { diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 65d8e2ac52f2..887c8e3b0127 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -134,6 +134,7 @@ struct ubcore_jfs *udma_create_jfs(struct ubcore_device *ub_dev, struct ubcore_jfs_cfg *cfg, struct ubcore_udata *udata); int udma_destroy_jfs(struct ubcore_jfs *jfs); +int udma_destroy_jfs_batch(struct ubcore_jfs **jfs_arr, int jfs_num, int *bad_jfs_index); int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd); int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 1a143f8ca3c3..b3d7c065ddda 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -186,6 +186,7 @@ static struct ubcore_ops g_dev_ops = { .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, .destroy_jfs = udma_destroy_jfs, + .destroy_jfs_batch = udma_destroy_jfs_batch, .create_jfr = udma_create_jfr, .modify_jfr = udma_modify_jfr, .query_jfr = udma_query_jfr, @@ -197,6 +198,7 @@ static struct ubcore_ops g_dev_ops = { .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, .destroy_jetty = udma_destroy_jetty, + .destroy_jetty_batch = udma_destroy_jetty_batch, .import_jetty_ex = udma_import_jetty_ex, .unimport_jetty = udma_unimport_jetty, .bind_jetty_ex = udma_bind_jetty_ex, -- Gitee From 7b26cd5b6edd5a9b10905eacd26779f570034d3a Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 15:42:20 +0800 Subject: [PATCH 056/126] ub: udma: Support flush jfs and jetty. commit c858b41548fec87c2c817bba979b80b9c2259074 openEuler This patch adds the ability to flush jfs and jetty. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 25 ++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 1 + drivers/ub/urma/hw/udma/udma_jfs.c | 129 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfs.h | 5 ++ drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 162 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index a3e29776ad4e..2bc86bdb2421 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -1568,6 +1568,31 @@ int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) return ret; } +int udma_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *udma_dev = to_udma_dev(jetty->ub_dev); + struct udma_jetty *udma_jetty = to_udma_jetty(jetty); + struct udma_jetty_queue *sq = &udma_jetty->sq; + int n_flushed; + + if (!sq->flush_flag) + return 0; + + if (!sq->lock_free) + spin_lock(&sq->lock); + + for (n_flushed = 0; n_flushed < cr_cnt; n_flushed++) { + if (sq->ci == sq->pi) + break; + udma_flush_sq(udma_dev, sq, cr + n_flushed); + } + + if (!sq->lock_free) + spin_unlock(&sq->lock); + + return n_flushed; +} + int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr) { diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 5558e4ca68e1..64ec81bdc2b8 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -249,6 +249,7 @@ struct ubcore_jetty_group *udma_create_jetty_grp(struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, struct ubcore_udata *udata); int udma_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); +int udma_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, struct ubcore_cr *cr); int udma_set_jetty_state(struct udma_dev *dev, uint32_t jetty_id, enum jetty_state state); int udma_post_jetty_send_wr(struct ubcore_jetty *jetty, struct ubcore_jfs_wr *wr, diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index c1a4999128cb..d96f1caf016a 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -531,6 +531,94 @@ int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, return 0; } +static void fill_imm_data_or_token_for_cr(struct udma_dev *udma_dev, + struct udma_sqe_ctl *sqe_ctl, + struct ubcore_cr *cr, + uint32_t opcode) +{ + switch (opcode) { + case UDMA_OPC_SEND: + case UDMA_OPC_WRITE: + case UDMA_OPC_READ: + case UDMA_OPC_CAS: + case UDMA_OPC_FAA: + break; + case UDMA_OPC_SEND_WITH_IMM: + memcpy(&cr->imm_data, (void *)sqe_ctl + SQE_SEND_IMM_FIELD, + sizeof(uint64_t)); + break; + case UDMA_OPC_SEND_WITH_INVALID: + cr->invalid_token.token_id = sqe_ctl->rmt_addr_l_or_token_id; + cr->invalid_token.token_value.token = sqe_ctl->rmt_addr_h_or_token_value; + break; + case UDMA_OPC_WRITE_WITH_IMM: + memcpy(&cr->imm_data, (void *)sqe_ctl + SQE_WRITE_IMM_FIELD, + sizeof(uint64_t)); + break; + default: + dev_err(udma_dev->dev, "Flush invalid opcode :%u.\n", opcode); + break; + } +} + +static void fill_cr_by_sqe_ctl(struct udma_dev *udma_dev, + struct udma_sqe_ctl *sqe_ctl, + struct ubcore_cr *cr) +{ + uint32_t opcode = sqe_ctl->opcode; + struct udma_normal_sge *sge; + uint32_t src_sge_num = 0; + uint64_t total_len = 0; + uint32_t ctrl_len; + uint32_t i; + + fill_imm_data_or_token_for_cr(udma_dev, sqe_ctl, cr, opcode); + + cr->tpn = sqe_ctl->tpn; + cr->remote_id.id = sqe_ctl->rmt_obj_id; + memcpy(cr->remote_id.eid.raw, sqe_ctl->rmt_eid, UBCORE_EID_SIZE); + + if (sqe_ctl->inline_en) { + cr->completion_len = sqe_ctl->inline_msg_len; + return; + } + + src_sge_num = sqe_ctl->sge_num; + ctrl_len = get_ctl_len(opcode); + sge = (struct udma_normal_sge *)((void *)sqe_ctl + ctrl_len); + + for (i = 0; i < src_sge_num; i++) { + total_len += sge->length; + sge++; + } + + if (total_len > UINT32_MAX) { + cr->completion_len = UINT32_MAX; + dev_warn(udma_dev->dev, "total len %llu is overflow.\n", total_len); + } else { + cr->completion_len = total_len; + } +} + +static void udma_copy_from_sq(struct udma_jetty_queue *sq, uint32_t wqebb_cnt, + struct udma_jfs_wqebb *tmp_sq) +{ + uint32_t field_h; + uint32_t field_l; + uint32_t offset; + uint32_t remain; + + remain = sq->buf.entry_cnt - (sq->ci & (sq->buf.entry_cnt - 1)); + offset = (sq->ci & (sq->buf.entry_cnt - 1)) * UDMA_JFS_WQEBB_SIZE; + field_h = remain > wqebb_cnt ? wqebb_cnt : remain; + field_l = wqebb_cnt > field_h ? wqebb_cnt - field_h : 0; + + memcpy(tmp_sq, sq->buf.kva + offset, field_h * sizeof(*tmp_sq)); + + if (field_l) + memcpy(tmp_sq + field_h, sq->buf.kva, field_l * sizeof(*tmp_sq)); +} + static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) { uint32_t opcode = sqe_ctl->opcode; @@ -558,6 +646,47 @@ static uint32_t get_wqebb_num(struct udma_sqe_ctl *sqe_ctl) return sq_cal_wqebb_num(sqe_ctl_len, sqe_ctl->sge_num); } +void udma_flush_sq(struct udma_dev *udma_dev, + struct udma_jetty_queue *sq, struct ubcore_cr *cr) +{ + struct udma_jfs_wqebb tmp_sq[MAX_WQEBB_NUM] = {}; + + udma_copy_from_sq(sq, MAX_WQEBB_NUM, tmp_sq); + fill_cr_by_sqe_ctl(udma_dev, (struct udma_sqe_ctl *)tmp_sq, cr); + cr->status = UBCORE_CR_WR_UNHANDLED; + cr->user_ctx = sq->wrid[sq->ci & (sq->buf.entry_cnt - 1)]; + /* Fill in UINT8_MAX for send direction */ + cr->opcode = UINT8_MAX; + cr->local_id = sq->id; + + sq->ci += get_wqebb_num((struct udma_sqe_ctl *)tmp_sq); +} + +int udma_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr) +{ + struct udma_dev *udma_dev = to_udma_dev(jfs->ub_dev); + struct udma_jfs *udma_jfs = to_udma_jfs(jfs); + struct udma_jetty_queue *sq = &udma_jfs->sq; + int n_flushed; + + if (!sq->flush_flag) + return 0; + + if (!jfs->jfs_cfg.flag.bs.lock_free) + spin_lock(&sq->lock); + + for (n_flushed = 0; n_flushed < cr_cnt; n_flushed++) { + if (sq->ci == sq->pi) + break; + udma_flush_sq(udma_dev, sq, cr + n_flushed); + } + + if (!jfs->jfs_cfg.flag.bs.lock_free) + spin_unlock(&sq->lock); + + return n_flushed; +} + static uint8_t udma_get_jfs_opcode(enum ubcore_opcode opcode) { switch (opcode) { diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index 887c8e3b0127..b030150af88f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -30,6 +30,8 @@ #define SQE_WRITE_NOTIFY_CTL_LEN 80 #define SQE_WRITE_IMM_INLINE_SIZE 192 +#define UINT8_MAX 0xff + enum udma_jfs_type { UDMA_NORMAL_JFS_TYPE, UDMA_KERNEL_STARS_JFS_TYPE, @@ -142,9 +144,12 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq); int udma_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, struct ubcore_udata *udata); +int udma_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); +void udma_flush_sq(struct udma_dev *udma_dev, + struct udma_jetty_queue *sq, struct ubcore_cr *cr); #endif /* __UDMA_JFS_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index b3d7c065ddda..8845ac4cc661 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -185,6 +185,7 @@ static struct ubcore_ops g_dev_ops = { .create_jfs = udma_create_jfs, .modify_jfs = udma_modify_jfs, .query_jfs = udma_query_jfs, + .flush_jfs = udma_flush_jfs, .destroy_jfs = udma_destroy_jfs, .destroy_jfs_batch = udma_destroy_jfs_batch, .create_jfr = udma_create_jfr, @@ -197,6 +198,7 @@ static struct ubcore_ops g_dev_ops = { .create_jetty = udma_create_jetty, .modify_jetty = udma_modify_jetty, .query_jetty = udma_query_jetty, + .flush_jetty = udma_flush_jetty, .destroy_jetty = udma_destroy_jetty, .destroy_jetty_batch = udma_destroy_jetty_batch, .import_jetty_ex = udma_import_jetty_ex, -- Gitee From af8e302ca80fe202a9b07bb14d8cf8158c6e06d8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 19:15:30 +0800 Subject: [PATCH 057/126] ub: udma: Support device status inquiry. commit a857af1498bbb8b1602308d2880b49f6ef8b64ce openEuler This patch adds the ability to query device status. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_cmd.c | 21 ++++++++++++++++++++- drivers/ub/urma/hw/udma/udma_main.c | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_cmd.c b/drivers/ub/urma/hw/udma/udma_cmd.c index 6e4c66af0537..0e3c18c81e25 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.c +++ b/drivers/ub/urma/hw/udma/udma_cmd.c @@ -6,7 +6,11 @@ #include #include #include +#include "udma_eid.h" #include "udma_cmd.h" +#include "udma_jfc.h" +#include "udma_jfr.h" +#include "udma_jetty.h" bool debug_switch = true; @@ -71,6 +75,19 @@ void udma_free_cmd_mailbox(struct udma_dev *dev, kfree(mailbox); } +static void udma_set_mb_flag_or_fd(uint8_t op, struct udma_mbx_op_match *match, + void *buf) +{ + struct udma_jetty_ctx *jfs_ctx; + + if (op == UDMA_CMD_QUERY_JFS_CONTEXT) { + jfs_ctx = (struct udma_jetty_ctx *)buf; + jfs_ctx->flush_cqe_done = 1; + jfs_ctx->state = 1; + jfs_ctx->flush_ssn_vld = 1; + } +} + static bool udma_op_ignore_eagain(uint8_t op, void *buf) { struct udma_mbx_op_match matches[] = { @@ -100,8 +117,10 @@ static bool udma_op_ignore_eagain(uint8_t op, void *buf) uint32_t i; for (i = 0; i < ARRAY_SIZE(matches); i++) { - if (op == matches[i].op) + if (op == matches[i].op) { + udma_set_mb_flag_or_fd(op, &matches[i], buf); return matches[i].ignore_ret; + } } return false; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 8845ac4cc661..5123cc0c071b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -162,6 +162,31 @@ static int udma_query_device_attr(struct ubcore_device *dev, return 0; } +static int udma_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val) +{ + struct ubcore_stats_com_val *com_val = (struct ubcore_stats_com_val *)val->addr; + struct udma_dev *udma_dev = to_udma_dev(dev); + struct ubase_ub_dl_stats dl_stats = {}; + int ret; + + ret = ubase_get_ub_port_stats(udma_dev->comdev.adev, + udma_dev->port_logic_id, &dl_stats); + if (ret) { + dev_err(udma_dev->dev, "failed to query port stats, ret = %d.\n", ret); + return ret; + } + + com_val->tx_pkt = dl_stats.dl_tx_busi_pkt_num; + com_val->rx_pkt = dl_stats.dl_rx_busi_pkt_num; + com_val->rx_pkt_err = 0; + com_val->tx_pkt_err = 0; + com_val->tx_bytes = 0; + com_val->rx_bytes = 0; + + return ret; +} + static struct ubcore_ops g_dev_ops = { .owner = THIS_MODULE, .abi_version = 0, @@ -216,6 +241,7 @@ static struct ubcore_ops g_dev_ops = { .post_jetty_send_wr = udma_post_jetty_send_wr, .post_jetty_recv_wr = udma_post_jetty_recv_wr, .poll_jfc = udma_poll_jfc, + .query_stats = udma_query_stats, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From abcbc931113b549be7e74db70d5e830d30a3899f Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 22 Aug 2025 19:45:52 +0800 Subject: [PATCH 058/126] ub: udma: Support entity index inquiry. commit 1bbf139669f04e7b523b30b6258d851276820d3d openEuler This patch adds the ability to query entity index. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 29 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_common.h | 8 ++++++++ drivers/ub/urma/hw/udma/udma_main.c | 1 + 3 files changed, 38 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 017216169ea3..3ec53595bcf8 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -650,6 +650,35 @@ void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_ dma_free_iova(slot); } +int udma_query_ue_idx(struct ubcore_device *ubcore_dev, struct ubcore_devid *devid, + uint16_t *ue_idx) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct udma_ue_index_cmd cmd = {}; + struct ubase_cmd_buf out; + struct ubase_cmd_buf in; + int ret; + + if (!devid) { + dev_err(dev->dev, "failed to query ue idx, devid is NULL.\n"); + return -EINVAL; + } + + (void)memcpy(cmd.guid, devid->raw, sizeof(devid->raw)); + + udma_fill_buf(&in, UDMA_CMD_QUERY_UE_INDEX, true, sizeof(cmd), &cmd); + udma_fill_buf(&out, UDMA_CMD_QUERY_UE_INDEX, true, sizeof(cmd), &cmd); + + ret = ubase_cmd_send_inout(dev->comdev.adev, &in, &out); + if (ret) { + dev_err(dev->dev, "failed to query ue idx, ret = %d.\n", ret); + return ret; + } + *ue_idx = cmd.ue_idx; + + return 0; +} + void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, uint32_t *ctx) { diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 300357af8895..f09fffc5d50c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -68,6 +68,12 @@ struct udma_umem_param { bool is_kernel; }; +struct udma_ue_index_cmd { + uint16_t ue_idx; + uint8_t rsv[2]; + uint8_t guid[16]; +}; + struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); @@ -118,6 +124,8 @@ static inline uint64_t udma_cal_npages(uint64_t va, uint64_t len) return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / PAGE_SIZE; } +int udma_query_ue_idx(struct ubcore_device *ub_dev, struct ubcore_devid *devid, + uint16_t *ue_idx); void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, uint32_t len, uint32_t *ctx); void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 5123cc0c071b..f33943d4c2e0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -242,6 +242,7 @@ static struct ubcore_ops g_dev_ops = { .post_jetty_recv_wr = udma_post_jetty_recv_wr, .poll_jfc = udma_poll_jfc, .query_stats = udma_query_stats, + .query_ue_idx = udma_query_ue_idx, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 851f239570ac1d844625b704f1b26102ae40e630 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 09:25:15 +0800 Subject: [PATCH 059/126] ub: udma: Support tp context inquiry. commit 1e63a34814fddce6d7f02744a9ab17acd8fcb84f openEuler This patch adds the ability to query tp context. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.h | 238 ++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctl.c | 45 +++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 63 +++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 30 ++- drivers/ub/urma/hw/udma/udma_main.c | 2 + 5 files changed, 376 insertions(+), 2 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index f09fffc5d50c..2bfededa6203 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -9,6 +9,7 @@ #include "udma_ctx.h" #include "udma_dev.h" +#define TP_ACK_UDP_SPORT_H_OFFSET 8 #define UDMA_TPHANDLE_TPID_SHIFT 0xFFFFFF struct udma_jetty_grp { @@ -74,6 +75,243 @@ struct udma_ue_index_cmd { uint8_t guid[16]; }; +struct udma_tp_ctx { + /* Byte4 */ + uint32_t version : 1; + uint32_t tp_mode : 1; + uint32_t trt : 1; + uint32_t wqe_bb_shift : 4; + uint32_t oor_en : 1; + uint32_t tempid : 6; + uint32_t portn : 6; + uint32_t rsvd1 : 12; + /* Byte8 */ + uint32_t wqe_ba_l; + /* Byte12 */ + uint32_t wqe_ba_h : 20; + uint32_t udp_srcport_range : 4; + uint32_t cng_alg_sel : 3; + uint32_t lbi : 1; + uint32_t rsvd4 : 1; + uint32_t vlan_en : 1; + uint32_t mtu : 2; + /* Byte16 */ + uint32_t route_addr_idx : 20; + uint32_t rsvd6 : 12; + /* Byte20 */ + u32 tpn_vtpn : 24; + u32 rsvd7 : 8; + /* Byte24 to Byte28 */ + u32 rsvd8[2]; + /* Byte 32 */ + u32 seid_idx : 16; + u32 sjetty_l : 16; + /* Byte 36 */ + u32 sjetty_h : 4; + u32 tp_wqe_token_id : 20; + u32 tp_wqe_position : 1; + u32 rsv9_l : 7; + /* Byte 40 */ + u32 rsvd9_h : 6; + u32 taack_tpn : 24; + u32 rsvd10 : 2; + /* Byte 44 */ + u32 spray_en : 1; + u32 sr_en : 1; + u32 ack_freq_mode : 1; + u32 route_type : 2; + u32 vl : 4; + u32 dscp : 6; + u32 switch_mp_en : 1; + u32 at_times : 5; + u32 retry_num_init : 3; + u32 at : 5; + u32 rsvd13 : 3; + /* Byte 48 */ + u32 on_flight_size : 16; + u32 hpln : 8; + u32 fl_l : 8; + /* Byte 52 */ + u32 fl_h : 12; + u32 dtpn : 20; + /* Byte 56 */ + u32 rc_tpn : 24; + u32 rc_vl : 4; + u32 tpg_vld : 1; + u32 reorder_cap : 3; + /* Byte 60 */ + u32 reorder_q_shift : 4; + u32 reorder_q_addr_l : 28; + /* Byte 64 */ + u32 reorder_q_addr_h : 24; + u32 tpg_l : 8; + /* Byte 68 */ + u32 tpg_h : 12; + u32 jettyn : 20; + /* Byte 72 */ + u32 dyn_timeout_mode : 1; + u32 base_time : 23; + u32 rsvd15 : 8; + /* Byte 76 */ + u32 tpack_psn : 24; + u32 tpack_rspst : 3; + u32 tpack_rspinfo : 5; + /* Byte 80 */ + u32 tpack_msn : 24; + u32 ack_udp_srcport_l : 8; + /* Byte 84 */ + u32 ack_udp_srcport_h : 8; + u32 max_rcv_psn : 24; + /* Byte 88 */ + u32 scc_token : 19; + u32 poll_db_wait_do : 1; + u32 msg_rty_lp_flg : 1; + u32 retry_cnt : 3; + u32 sq_invld_flg : 1; + u32 wait_ack_timeout : 1; + u32 tx_rtt_caling : 1; + u32 cnp_tx_flag : 1; + u32 sq_db_doing : 1; + u32 tpack_doing : 1; + u32 sack_wait_do : 1; + u32 tpack_wait_do : 1; + /* Byte 92 */ + u16 post_max_idx; + u16 wqe_max_bb_idx; + /* Byte 96 */ + u16 wqe_bb_pi; + u16 wqe_bb_ci; + /* Byte 100 */ + u16 data_udp_srcport; + u16 wqe_msn; + /* Byte 104 */ + u32 cur_req_psn : 24; + u32 tx_ack_psn_err : 1; + u32 poll_db_type : 2; + u32 tx_ack_flg : 1; + u32 tx_sq_err_flg : 1; + u32 scc_retry_type : 2; + u32 flush_cqe_wait_do : 1; + /* Byte 108 */ + u32 wqe_max_psn : 24; + u32 ssc_token_l : 4; + u32 rsvd16 : 4; + /* Byte 112 */ + u32 tx_sq_timer; + /* Byte 116 */ + u32 rtt_timestamp_psn : 24; + u32 rsvd17 : 8; + /* Byte 120 */ + u32 rtt_timestamp : 24; + u32 cnp_timer_l : 8; + /* Byte 124 */ + u32 cnp_timer_h : 16; + u32 max_reorder_id : 16; + /* Byte 128 */ + u16 cur_reorder_id; + u16 wqe_max_msn; + /* Byte 132 */ + u16 post_bb_pi; + u16 post_bb_ci; + /* Byte 136 */ + u32 lr_ae_ind : 1; + u32 rx_cqe_cnt : 16; + u32 reorder_q_si : 13; + u32 rq_err_type_l : 2; + /* Byte 140 */ + u32 rq_err_type_h : 3; + u32 rsvd18 : 2; + u32 rsvd19 : 27; + /* Byte 144 */ + u32 req_seq; + /* Byte 148 */ + uint32_t req_ce_seq; + /* Byte 152 */ + u32 req_cmp_lrb_indx : 12; + u32 req_lrb_indx : 12; + u32 req_lrb_indx_vld : 1; + u32 rx_req_psn_err : 1; + u32 rx_req_last_optype : 3; + u32 rx_req_fake_flg : 1; + u32 rsvd20 : 2; + /* Byte 156 */ + uint16_t jfr_wqe_idx; + uint16_t rx_req_epsn_l; + /* Byte 160 */ + uint32_t rx_req_epsn_h : 8; + uint32_t rx_req_reduce_code : 8; + uint32_t rx_req_msn_l : 16; + /* Byte 164 */ + uint32_t rx_req_msn_h : 8; + uint32_t jfr_wqe_rnr : 1; + uint32_t jfr_wqe_rnr_timer : 5; + uint32_t rsvd21 : 2; + uint32_t jfr_wqe_cnt : 16; + /* Byte 168 */ + uint32_t max_reorder_q_idx : 13; + uint32_t rsvd22 : 3; + uint32_t reorder_q_ei : 13; + uint32_t rx_req_last_elr_flg : 1; + uint32_t rx_req_last_elr_err_type_l : 2; + /* Byte172 */ + uint32_t rx_req_last_elr_err_type_h : 3; + uint32_t rx_req_last_op : 1; + uint32_t jfrx_jetty : 1; + uint32_t jfrx_jfcn_l : 16; + uint32_t jfrx_jfcn_h : 4; + uint32_t jfrx_jfrn_l : 7; + /* Byte176 */ + u32 jfrx_jfrn_h1 : 9; + u32 jfrx_jfrn_h2 : 4; + u32 rq_timer_l : 19; + /* Byte180 */ + u32 rq_timer_h : 13; + u32 rq_at : 5; + u32 wait_cqe_timeout : 1; + u32 rsvd23 : 13; + /* Byte184 */ + u32 rx_sq_timer; + /* Byte188 */ + u32 tp_st : 3; + u32 rsvd24 : 4; + u32 ls_ae_ind : 1; + u32 retry_msg_psn : 24; + /* Byte192 */ + u32 retry_msg_fpsn : 24; + u32 rsvd25 : 8; + /* Byte196 */ + u16 retry_wqebb_idx; + u16 retry_msg_msn; + /* Byte200 */ + u32 ack_rcv_seq; + /* Byte204 */ + u32 rtt : 24; + u32 dup_sack_cnt : 8; + /* Byte208 */ + u32 sack_max_rcv_psn : 24; + u32 rsvd26 : 7; + u32 rx_ack_flg : 1; + /* Byte212 */ + u32 rx_ack_msn : 16; + u32 sack_lrb_indx : 12; + u32 rx_fake_flg : 1; + u32 rx_rtt_caling : 1; + u32 rx_ack_psn_err : 1; + u32 sack_lrb_indx_vld : 1; + /* Byte216 */ + u32 rx_ack_epsn : 24; + u32 rsvd27 : 8; + /* Byte220 */ + u32 max_retry_psn : 24; + u32 retry_reorder_id_l : 8; + /* Byte224 */ + u32 retry_reorder_id_h : 8; + u32 rsvd28 : 8; + u32 rsvd29 : 16; + /* Byte228 to Byte256 */ + u32 scc_data[8]; +}; + struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 49cb1ebb0895..764a29b9b24b 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -75,6 +75,49 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_tp_sport_out tp_sport_out = {}; + struct udma_tp_sport_in tp_sport_in = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_cmd_mailbox *mailbox = NULL; + struct ubase_mbx_attr mbox_attr = {}; + struct udma_tp_ctx *tpc; + + if (udma_check_base_param(out->addr, out->len, sizeof(struct udma_tp_sport_out)) || + udma_check_base_param(in->addr, in->len, sizeof(struct udma_tp_sport_in))) { + dev_err(udev->dev, "parameter invalid in query tp sport, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + if (udev->is_ue) { + dev_err(udev->dev, "ue is not supported.\n"); + return -EINVAL; + } + + memcpy(&tp_sport_in, (void *)(uintptr_t)in->addr, sizeof(struct udma_tp_sport_in)); + + mbox_attr.tag = tp_sport_in.tpn; + mbox_attr.op = UDMA_CMD_QUERY_TP_CONTEXT; + mailbox = udma_mailbox_query_ctx(udev, &mbox_attr); + if (!mailbox) + return -ENOMEM; + + tpc = (struct udma_tp_ctx *)mailbox->buf; + + tp_sport_out.ack_udp_srcport = tpc->ack_udp_srcport_h << TP_ACK_UDP_SPORT_H_OFFSET | + tpc->ack_udp_srcport_l; + tp_sport_out.data_udp_srcport = tpc->data_udp_srcport; + + memcpy((void *)(uintptr_t)out->addr, &tp_sport_out, out->len); + + udma_free_cmd_mailbox(udev, mailbox); + + return 0; +} + static void dump_cqe_client_loc_len_err_aux_info(struct udma_dev *dev, struct udma_cqe_aux_info_out *aux_info_out, struct udma_cmd_query_cqe_aux_info *info) @@ -840,6 +883,7 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, + [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, }; @@ -854,6 +898,7 @@ static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { [UDMA_USER_CTL_GET_DEV_RES_RATIO] = NULL, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = NULL, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = NULL, + [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, [UDMA_USER_CTL_QUERY_UBMEM_INFO] = NULL, diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index d28e206fb277..fd499a89e131 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -592,6 +592,69 @@ static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handl return (ret == -EAGAIN) ? 0 : ret; } +int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) +{ + struct udma_ctrlq_set_tp_attr_req tp_attr_req = {}; + struct udma_dev *udev = to_udma_dev(dev); + union ubcore_tp_handle tp_handle_val; + struct ubase_ctrlq_msg msg = {}; + int ret; + + tp_handle_val.value = tp_handle; + tp_attr_req.tpid = tp_handle_val.bs.tpid; + tp_attr_req.tpn_cnt = tp_handle_val.bs.tp_cnt; + tp_attr_req.tpn_start = tp_handle_val.bs.tpn_start; + tp_attr_req.tp_attr_cnt = tp_attr_cnt; + tp_attr_req.tp_attr.tp_attr_bitmap = tp_attr_bitmap; + memcpy(&tp_attr_req.tp_attr.tp_attr_value, (void *)tp_attr, sizeof(*tp_attr)); + + udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), NULL, 0); + msg.opcode = UDMA_CMD_CTRLQ_SET_TP_ATTR; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) + dev_err(udev->dev, "set tp attr failed, tpid = %u, ret = %d.\n", + tp_attr_req.tpid, ret); + + return ret; +} + +int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) +{ + struct udma_ctrlq_get_tp_attr_resp tp_attr_resp = {}; + struct udma_ctrlq_get_tp_attr_req tp_attr_req = {}; + struct udma_dev *udev = to_udma_dev(dev); + union ubcore_tp_handle tp_handle_val; + struct ubase_ctrlq_msg msg = {}; + int ret; + + tp_handle_val.value = tp_handle; + tp_attr_req.tpid.tpid = tp_handle_val.bs.tpid; + tp_attr_req.tpid.tpn_cnt = tp_handle_val.bs.tp_cnt; + tp_attr_req.tpid.tpn_start = tp_handle_val.bs.tpn_start; + udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), &tp_attr_resp, + sizeof(tp_attr_resp)); + msg.opcode = UDMA_CMD_CTRLQ_GET_TP_ATTR; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &msg); + if (ret) { + dev_err(udev->dev, "get tp attr failed, tpid = %u, ret = %d.\n", + tp_attr_req.tpid.tpid, ret); + return ret; + } + + *tp_attr_cnt = tp_attr_resp.tp_attr_cnt; + *tp_attr_bitmap = tp_attr_resp.tp_attr.tp_attr_bitmap; + memcpy((void *)tp_attr, &tp_attr_resp.tp_attr.tp_attr_value, + sizeof(tp_attr_resp.tp_attr.tp_attr_value)); + + return 0; +} + int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode) { struct udma_req_msg *req_msg; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index ba43f3590417..e83cd3e94c56 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -151,8 +151,27 @@ struct udma_ue_idx_table { uint8_t ue_idx[UDMA_UE_NUM]; }; -struct udma_notify_flush_done { - uint32_t tpn; +struct udma_ctrlq_tp_attr { + uint32_t tp_attr_bitmap; + struct ubcore_tp_attr_value tp_attr_value; +}; + +struct udma_ctrlq_get_tp_attr_req { + struct udma_ctrlq_tpid tpid; +}; + +struct udma_ctrlq_set_tp_attr_req { + uint32_t tpid : 24; + uint32_t tpn_cnt : 8; + uint32_t tpn_start : 24; + uint32_t tp_attr_cnt : 8; + struct udma_ctrlq_tp_attr tp_attr; +}; + +struct udma_ctrlq_get_tp_attr_resp { + uint32_t tpid : 24; + uint32_t tp_attr_cnt : 8; + struct udma_ctrlq_tp_attr tp_attr; }; struct udma_dev_resource_ratio { @@ -173,6 +192,13 @@ int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_c void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, bool is_need_flush); + +int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata); +int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata); int send_resp_to_ue(struct udma_dev *udma_dev, struct ubcore_resp *req_host, uint8_t dst_ue_idx, uint16_t opcode); int send_req_to_mue(struct udma_dev *udma_dev, struct ubcore_req *req, uint16_t opcode); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index f33943d4c2e0..7726d371476b 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -233,6 +233,8 @@ static struct ubcore_ops g_dev_ops = { .create_jetty_grp = udma_create_jetty_grp, .delete_jetty_grp = udma_delete_jetty_grp, .get_tp_list = udma_get_tp_list, + .set_tp_attr = udma_set_tp_attr, + .get_tp_attr = udma_get_tp_attr, .active_tp = udma_active_tp, .deactive_tp = udma_deactive_tp, .user_ctl = udma_user_ctl, -- Gitee From 330237123923c3097cc70ccf8fc4efc790247d80 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 10:11:49 +0800 Subject: [PATCH 060/126] ub: udma: Support create and destroy stars jfs. commit 18f72c331dd8e53d2863c7875de149fce6417efb openEuler This patch adds the ability to create and destroy stars jfs. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 203 +++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jetty.h | 3 + drivers/ub/urma/hw/udma/udma_jfs.c | 34 +++++ drivers/ub/urma/hw/udma/udma_jfs.h | 5 + 4 files changed, 245 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 764a29b9b24b..7851a262e50a 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -11,6 +11,7 @@ #include #include "udma_cmd.h" #include "udma_jetty.h" +#include "udma_segment.h" #include "udma_jfs.h" #include "udma_jfc.h" #include "udma_db.h" @@ -75,6 +76,206 @@ const char *udma_ae_aux_info_type_str[] = { "LQC_TA_CQM_CQE_INNER_ALARM", }; +static int udma_get_sq_buf_ex(struct udma_dev *dev, struct udma_jetty_queue *sq, + struct udma_jfs_cfg_ex *cfg_ex) +{ + struct ubcore_jfs_cfg *jfs_cfg; + uint32_t wqe_bb_depth; + uint32_t sqe_bb_cnt; + uint32_t size; + + jfs_cfg = &cfg_ex->base_cfg; + + if (!jfs_cfg->flag.bs.lock_free) + spin_lock_init(&sq->lock); + sq->max_inline_size = jfs_cfg->max_inline_data; + sq->max_sge_num = jfs_cfg->max_sge; + sq->tid = dev->tid; + sq->lock_free = jfs_cfg->flag.bs.lock_free; + + sqe_bb_cnt = sq_cal_wqebb_num(SQE_WRITE_NOTIFY_CTL_LEN, jfs_cfg->max_sge); + if (sqe_bb_cnt > MAX_WQEBB_NUM) + sqe_bb_cnt = MAX_WQEBB_NUM; + sq->sqe_bb_cnt = sqe_bb_cnt; + + wqe_bb_depth = roundup_pow_of_two(sqe_bb_cnt * jfs_cfg->depth); + sq->buf.entry_size = UDMA_JFS_WQEBB_SIZE; + size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); + sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; + + if (size != cfg_ex->cstm_cfg.sq.buff_size) { + dev_err(dev->dev, "buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + if (cfg_ex->cstm_cfg.sq.buff == 0) { + dev_err(dev->dev, "cstm_cfg sq buff is wrong.\n"); + return -EINVAL; + } + + sq->buf.addr = (dma_addr_t)(uintptr_t)phys_to_virt((uint64_t) + (uintptr_t)cfg_ex->cstm_cfg.sq.buff); + if (sq->buf.addr == 0) { + dev_err(dev->dev, "sq buff addr is wrong.\n"); + return -EINVAL; + } + + sq->buf.kva = (void *)(uintptr_t)sq->buf.addr; + + sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); + if (!sq->wrid) { + sq->buf.kva = NULL; + sq->buf.addr = 0; + dev_err(dev->dev, + "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", + sq->id, sq->buf.entry_cnt); + return -ENOMEM; + } + + udma_alloc_kernel_db(dev, sq); + sq->kva_curr = sq->buf.kva; + + sq->trans_mode = jfs_cfg->trans_mode; + + return 0; +} + +static int udma_get_jfs_buf_ex(struct udma_dev *dev, struct udma_jfs *jfs, + struct udma_jfs_cfg_ex *cfg_ex) +{ + int ret; + + jfs->jfs_addr = (uintptr_t)&jfs->sq; + + ret = udma_get_sq_buf_ex(dev, &jfs->sq, cfg_ex); + if (ret) + dev_err(dev->dev, + "failed to get sq buf in jfs process, ret = %d.\n", ret); + + return ret; +} + +static struct ubcore_jfs *udma_create_jfs_ex(struct ubcore_device *ub_dev, + struct udma_jfs_cfg_ex *cfg_ex) +{ + struct ubcore_jfs_cfg *cfg = &cfg_ex->base_cfg; + struct udma_dev *dev = to_udma_dev(ub_dev); + struct ubase_mbx_attr attr = {}; + struct udma_jetty_ctx ctx = {}; + struct udma_jfs *jfs; + int ret; + + ret = udma_verify_jfs_param(dev, cfg, true); + if (ret) + return NULL; + + jfs = kcalloc(1, sizeof(*jfs), GFP_KERNEL); + if (!jfs) + return NULL; + + dev_info(dev->dev, "start alloc id!\n"); + ret = udma_alloc_jetty_id(dev, &jfs->sq.id, &dev->caps.jetty); + if (ret) { + dev_err(dev->dev, "alloc JFS id failed, ret = %d.\n", ret); + goto err_alloc_jfsn; + } + jfs->ubcore_jfs.jfs_id.id = jfs->sq.id; + jfs->ubcore_jfs.jfs_cfg = *cfg; + jfs->ubcore_jfs.ub_dev = ub_dev; + jfs->ubcore_jfs.uctx = NULL; + jfs->ubcore_jfs.jfae_handler = cfg_ex->jfae_handler; + jfs->mode = UDMA_KERNEL_STARS_JFS_TYPE; + + ret = xa_err(xa_store(&dev->jetty_table.xa, jfs->sq.id, &jfs->sq, GFP_KERNEL)); + if (ret) { + dev_err(dev->dev, "store jfs sq(%u) failed, ret = %d.\n", + jfs->sq.id, ret); + goto err_store_jfs_sq; + } + + dev_info(dev->dev, "start get stars jfs buf!\n"); + ret = udma_get_jfs_buf_ex(dev, jfs, cfg_ex); + if (ret) + goto err_alloc_jfs_id; + + udma_set_query_flush_time(&jfs->sq, cfg->err_timeout); + jfs->sq.state = UBCORE_JETTY_STATE_READY; + udma_init_jfsc(dev, cfg, jfs, &ctx); + attr.tag = jfs->sq.id; + attr.op = UDMA_CMD_CREATE_JFS_CONTEXT; + ret = post_mailbox_update_ctx(dev, &ctx, sizeof(ctx), &attr); + if (ret) { + dev_err(dev->dev, "failed to upgrade JFSC, ret = %d.\n", ret); + goto err_update_ctx; + } + + refcount_set(&jfs->ae_refcount, 1); + init_completion(&jfs->ae_comp); + + if (dfx_switch) + udma_dfx_store_jfs_id(dev, jfs); + + dev_info(dev->dev, "create stars jfs success!\n"); + + return &jfs->ubcore_jfs; + +err_update_ctx: + kfree(jfs->sq.wrid); +err_alloc_jfs_id: + xa_erase(&dev->jetty_table.xa, jfs->sq.id); +err_store_jfs_sq: + udma_adv_id_free(&dev->jetty_table.bitmap_table, jfs->sq.id, false); +err_alloc_jfsn: + kfree(jfs); + return NULL; +} + +static int udma_create_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_jfs_cfg_ex cfg_ex; + struct ubcore_jfs *jfs; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_jfs_cfg_ex)) || + udma_check_base_param(out->addr, out->len, sizeof(struct ubcore_jfs *))) { + dev_err(udev->dev, "param invalid in create jfs, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + memcpy(&cfg_ex, (void *)(uintptr_t)in->addr, sizeof(struct udma_jfs_cfg_ex)); + + jfs = udma_create_jfs_ex(dev, &cfg_ex); + if (jfs == NULL) + return -EFAULT; + + memcpy((void *)(uintptr_t)out->addr, &jfs, sizeof(struct ubcore_jfs *)); + + return 0; +} + +static int udma_delete_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_jfs *jfs; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct ubcore_jfs *))) { + dev_err(udev->dev, "parameter invalid in delete jfs, len = %u.\n", + in->len); + return -EFAULT; + } + memcpy(&jfs, (void *)(uintptr_t)in->addr, sizeof(struct ubcore_jfs *)); + if (jfs == NULL) + return -EINVAL; + + if (udma_destroy_jfs(jfs)) + return -EFAULT; + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -881,6 +1082,8 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc } static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { + [UDMA_USER_CTL_CREATE_JFS_EX] = udma_create_jfs_ops_ex, + [UDMA_USER_CTL_DELETE_JFS_EX] = udma_delete_jfs_ops_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, diff --git a/drivers/ub/urma/hw/udma/udma_jetty.h b/drivers/ub/urma/hw/udma/udma_jetty.h index 64ec81bdc2b8..011711dc1926 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.h +++ b/drivers/ub/urma/hw/udma/udma_jetty.h @@ -262,7 +262,10 @@ int udma_destroy_hw_jetty_ctx(struct udma_dev *dev, uint32_t jetty_id); void udma_set_query_flush_time(struct udma_jetty_queue *sq, uint8_t err_timeout); int udma_modify_and_destroy_jetty(struct udma_dev *dev, struct udma_jetty_queue *sq); +int udma_alloc_jetty_id(struct udma_dev *udma_dev, uint32_t *idx, + struct udma_res *jetty_res); int udma_modify_jetty_precondition(struct udma_dev *dev, struct udma_jetty_queue *sq); + struct ubcore_tjetty *udma_import_jetty_ex(struct ubcore_device *ub_dev, struct ubcore_tjetty_cfg *cfg, struct ubcore_active_tp_cfg *active_tp_cfg, diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index d96f1caf016a..a1a36a0a136f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -155,6 +155,40 @@ void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, ctx->next_rcv_ssn = ctx->next_send_ssn; } +int udma_verify_jfs_param(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + bool enable_stars) +{ + if (!cfg->depth || cfg->depth > dev->caps.jfs.depth || + cfg->max_sge > dev->caps.jfs_sge || cfg->trans_mode == UBCORE_TP_RC) { + dev_err(dev->dev, + "jfs param is invalid, depth = %u, seg = %u, max_depth = %u, max_jfs_seg = %u, trans_mode = %u.\n", + cfg->depth, cfg->max_sge, dev->caps.jfs.depth, + dev->caps.jfs_sge, cfg->trans_mode); + return -EINVAL; + } + + if (enable_stars && cfg->max_inline_data != 0 && + cfg->max_inline_data > dev->caps.jfs_inline_sz) { + dev_err(dev->dev, "jfs param is invalid, inline_data:%u, max_inline_len:%u.\n", + cfg->max_inline_data, dev->caps.jfs_inline_sz); + return -EINVAL; + } + + if (enable_stars && cfg->max_rsge > dev->caps.jfs_rsge) { + dev_err(dev->dev, "jfs param is invalid, rsge:%u, max_rsge:%u.\n", + cfg->max_rsge, dev->caps.jfs_rsge); + return -EINVAL; + } + + if (cfg->priority >= UDMA_MAX_PRIORITY) { + dev_err(dev->dev, "kernel mode jfs priority is out of range, priority is %u.\n", + cfg->priority); + return -EINVAL; + } + + return 0; +} + void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs) { struct udma_dfx_jfs *jfs; diff --git a/drivers/ub/urma/hw/udma/udma_jfs.h b/drivers/ub/urma/hw/udma/udma_jfs.h index b030150af88f..d3a29f2a68a0 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.h +++ b/drivers/ub/urma/hw/udma/udma_jfs.h @@ -151,5 +151,10 @@ int udma_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, struct ubcore_jfs_wr **bad_wr); void udma_flush_sq(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct ubcore_cr *cr); +void udma_dfx_store_jfs_id(struct udma_dev *udma_dev, struct udma_jfs *udma_jfs); +void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + struct udma_jfs *jfs, void *mb_buf); +int udma_verify_jfs_param(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, + bool enable_stars); #endif /* __UDMA_JFS_H__ */ -- Gitee From 180d4eb5a4a1f2d699d80eab2aa6bcd4aac2a72e Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 11:31:26 +0800 Subject: [PATCH 061/126] ub: udma: Support create and destroy stars jfc. commit a388e048a2bdf74c3a699be40e12ca7bb317d707 openEuler This patch adds the ability to create and destroy stars jfc. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 205 +++++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_jfc.h | 4 + 2 files changed, 209 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 7851a262e50a..9b4c306d4887 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -276,6 +276,208 @@ static int udma_delete_jfs_ops_ex(struct ubcore_device *dev, struct ubcore_ucont return 0; } +static int udma_get_jfc_buf_ex(struct udma_dev *dev, + struct udma_jfc *jfc, + struct udma_jfc_cfg_ex *cfg_ex) +{ + uint32_t size; + int ret = 0; + + if (!jfc->lock_free) + spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; + jfc->tid = dev->tid; + size = jfc->buf.entry_size * jfc->buf.entry_cnt; + + if (size != cfg_ex->cstm_cfg.cq.buff_size) { + dev_err(dev->dev, "cqe buff size is wrong, buf size = %u.\n", size); + return -EINVAL; + } + + jfc->buf.addr = (dma_addr_t)(uintptr_t)cfg_ex->cstm_cfg.cq.buff; + + if (jfc->buf.addr == 0) { + dev_err(dev->dev, "cq buff addr is wrong.\n"); + return -EINVAL; + } + + jfc->buf.kva = (void *)(uintptr_t)jfc->buf.addr; + + ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); + if (ret) { + dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); + return -ENOMEM; + } + + return ret; +} + +static struct ubcore_jfc *udma_create_jfc_ex(struct ubcore_device *ubcore_dev, + struct udma_jfc_cfg_ex *cfg_ex) +{ + struct udma_dev *dev = to_udma_dev(ubcore_dev); + struct ubcore_jfc_cfg *cfg = &cfg_ex->base_cfg; + unsigned long flags_store; + unsigned long flags_erase; + struct udma_jfc *jfc; + int ret; + + jfc = kzalloc(sizeof(struct udma_jfc), GFP_KERNEL); + if (!jfc) + return NULL; + + jfc->arm_sn = 1; + jfc->buf.entry_cnt = cfg->depth ? roundup_pow_of_two(cfg->depth) : cfg->depth; + + ret = udma_check_jfc_cfg(dev, jfc, &cfg_ex->base_cfg); + if (ret) + goto err_check_cfg; + + ret = udma_id_alloc_auto_grow(dev, &dev->jfc_table.ida_table, &jfc->jfcn); + if (ret) + goto err_alloc_jfc_id; + + udma_init_jfc_param(cfg, jfc); + jfc->base.ub_dev = ubcore_dev; + jfc->base.uctx = NULL; + jfc->base.jfae_handler = cfg_ex->jfae_handler; + jfc->base.jfce_handler = cfg_ex->jfce_handler; + jfc->mode = UDMA_KERNEL_STARS_JFC_TYPE; + + xa_lock_irqsave(&dev->jfc_table.xa, flags_store); + ret = xa_err(__xa_store(&dev->jfc_table.xa, jfc->jfcn, jfc, GFP_ATOMIC)); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_store); + if (ret) { + dev_err(dev->dev, + "failed to stored jfc id to jfc_table, jfcn: %u.\n", + jfc->jfcn); + goto err_store_jfcn; + } + + ret = udma_get_jfc_buf_ex(dev, jfc, cfg_ex); + if (ret) + goto err_get_jfc_buf; + + ret = udma_post_create_jfc_mbox(dev, jfc); + if (ret) + goto err_alloc_cqc; + + refcount_set(&jfc->event_refcount, 1); + + init_completion(&jfc->event_comp); + + if (dfx_switch) + udma_dfx_store_id(dev, &dev->dfx_info->jfc, jfc->jfcn, "jfc"); + + return &jfc->base; + +err_alloc_cqc: + udma_free_sw_db(dev, &jfc->db); +err_get_jfc_buf: + xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); + __xa_erase(&dev->jfc_table.xa, jfc->jfcn); + xa_unlock_irqrestore(&dev->jfc_table.xa, flags_erase); +err_store_jfcn: + udma_id_free(&dev->jfc_table.ida_table, jfc->jfcn); +err_alloc_jfc_id: +err_check_cfg: + kfree(jfc); + return NULL; +} + +static int udma_create_jfc_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_jfc_cfg_ex cfg_ex; + struct ubcore_jfc *jfc; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_jfc_cfg_ex)) || + udma_check_base_param(out->addr, out->len, sizeof(struct ubcore_jfc *))) { + dev_err(udev->dev, "input parameter invalid in create jfc, in_len = %u, out_len = %u.\n", + in->len, out->len); + return -EINVAL; + } + + memcpy(&cfg_ex, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct udma_jfc_cfg_ex))); + + jfc = udma_create_jfc_ex(dev, &cfg_ex); + if (jfc == NULL) + return -EFAULT; + + memcpy((void *)(uintptr_t)out->addr, &jfc, sizeof(struct ubcore_jfc *)); + + return 0; +} + +static int udma_delete_jfc_ops_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubcore_jfc *jfc; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct ubcore_jfc *))) { + dev_err(udev->dev, "parameter invalid in delete jfc, len = %u.\n", + in->len); + return -EINVAL; + } + + memcpy(&jfc, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct ubcore_jfc *))); + if (jfc == NULL) + return -EINVAL; + + if (udma_destroy_jfc(jfc)) + return -EFAULT; + + return 0; +} + +static int udma_set_cqe_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ex_jfc_addr *jfc_addr; + struct udma_set_cqe_ex cqe_ex; + uint32_t cq_depth; + + if (udma_check_base_param(in->addr, in->len, sizeof(struct udma_set_cqe_ex))) { + dev_err(udev->dev, "parameter invalid in set cqe, len = %u.\n", + in->len); + return -EINVAL; + } + + memcpy(&cqe_ex, (void *)(uintptr_t)in->addr, + min(in->len, sizeof(struct udma_set_cqe_ex))); + + if (cqe_ex.jfc_type != UDMA_STARS_JFC_TYPE && + cqe_ex.jfc_type != UDMA_CCU_JFC_TYPE) { + dev_err(udev->dev, "invalid jfc type, mode = %u.\n", cqe_ex.jfc_type); + return -EINVAL; + } + + if (cqe_ex.addr == 0) { + dev_err(udev->dev, "cq addr is wrong in set cqe.\n"); + return -EINVAL; + } + + cq_depth = cqe_ex.len / udev->caps.cqe_size; + if (cq_depth < UDMA_JFC_DEPTH_MIN || cq_depth > udev->caps.jfc.depth || + (cqe_ex.len % udev->caps.cqe_size) != 0 || + cq_depth != roundup_pow_of_two(cq_depth)) { + dev_err(udev->dev, "cq buff size is wrong in set cqe, size = %u.\n", + cqe_ex.len); + return -EINVAL; + } + + jfc_addr = &udev->cq_addr_array[cqe_ex.jfc_type]; + jfc_addr->cq_addr = cqe_ex.addr; + jfc_addr->cq_len = cqe_ex.len; + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -1084,6 +1286,9 @@ int udma_query_ae_aux_info(struct ubcore_device *dev, struct ubcore_ucontext *uc static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_CREATE_JFS_EX] = udma_create_jfs_ops_ex, [UDMA_USER_CTL_DELETE_JFS_EX] = udma_delete_jfs_ops_ex, + [UDMA_USER_CTL_CREATE_JFC_EX] = udma_create_jfc_ops_ex, + [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, + [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, diff --git a/drivers/ub/urma/hw/udma/udma_jfc.h b/drivers/ub/urma/hw/udma/udma_jfc.h index 02b17b6011d2..6f62f33eccdf 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.h +++ b/drivers/ub/urma/hw/udma/udma_jfc.h @@ -189,6 +189,10 @@ int udma_modify_jfc(struct ubcore_jfc *ubcore_jfc, struct ubcore_jfc_attr *attr, struct ubcore_udata *udata); int udma_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); +int udma_check_jfc_cfg(struct udma_dev *dev, struct udma_jfc *jfc, + struct ubcore_jfc_cfg *cfg); +void udma_init_jfc_param(struct ubcore_jfc_cfg *cfg, struct udma_jfc *jfc); +int udma_post_create_jfc_mbox(struct udma_dev *dev, struct udma_jfc *jfc); void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev *udma_dev); #endif /* __UDMA_JFC_H__ */ -- Gitee From ab4801d9659b042222c7217cc65ba56fb1184b01 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 12:00:22 +0800 Subject: [PATCH 062/126] ub: udma: Support query entity information. commit 097d0f314ccb55b25ceef209efd174cafc590864 openEuler This patch adds the ability to query entity information. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 9b4c306d4887..1a5c8926f5bc 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -478,6 +478,31 @@ static int udma_set_cqe_ex(struct ubcore_device *dev, struct ubcore_ucontext *uc return 0; } +static int udma_query_ue_info_ex(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct udma_ue_info_ex info = {}; + + if (udma_check_base_param(out->addr, out->len, sizeof(struct udma_ue_info_ex))) { + dev_err(udev->dev, "parameter invalid in query ue, len = %u.\n", + out->len); + return -EINVAL; + } + + info.chip_id = udev->chip_id; + info.die_id = udev->die_id; + info.dwqe_addr = udev->db_base + JETTY_DSQE_OFFSET; + info.db_base_addr = info.dwqe_addr + UDMA_DOORBELL_OFFSET; + info.ue_id = udev->ue_id; + info.register_base_addr = udev->db_base; + info.offset_len = PAGE_SIZE; + + memcpy((void *)(uintptr_t)out->addr, &info, sizeof(struct udma_ue_info_ex)); + + return 0; +} + static int udma_ctrlq_query_tp_sport(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) { @@ -1289,6 +1314,7 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_CREATE_JFC_EX] = udma_create_jfc_ops_ex, [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, + [UDMA_USER_CTL_QUERY_UE_INFO] = udma_query_ue_info_ex, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, -- Gitee From 77bb3de647cd1f7c198daad924373b6db7e5f809 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 14:31:04 +0800 Subject: [PATCH 063/126] ub: udma: Support query resource ratio. commit 8eaec1c8a063361668fffcd1a7d9ae6345a51ae5 openEuler This patch adds the ability to query resource ratio. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 2 ++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 41 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 8 +++++ 3 files changed, 51 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 1a5c8926f5bc..2b6f24c80a6a 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1315,11 +1315,13 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_DELETE_JFC_EX] = udma_delete_jfc_ops_ex, [UDMA_USER_CTL_SET_CQE_ADDR] = udma_set_cqe_ex, [UDMA_USER_CTL_QUERY_UE_INFO] = udma_query_ue_info_ex, + [UDMA_USER_CTL_GET_DEV_RES_RATIO] = udma_get_dev_resource_ratio, [UDMA_USER_CTL_NPU_REGISTER_INFO_CB] = udma_register_npu_cb, [UDMA_USER_CTL_NPU_UNREGISTER_INFO_CB] = udma_unregister_npu_cb, [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = udma_query_pair_dev_count, }; static udma_user_ctl_ops g_udma_user_ctl_u_ops[] = { diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index fd499a89e131..043786f39b3d 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -740,3 +740,44 @@ int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); } + +int udma_query_pair_dev_count(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + struct ubase_bus_eid eid = {}; + uint32_t pair_device_num = 0; + int ret; + + if (out->addr == 0 || out->len != sizeof(pair_device_num)) { + dev_err(udev->dev, "query pair dev count, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + ret = ubase_get_bus_eid(udev->comdev.adev, &eid); + if (ret) { + dev_err(udev->dev, "get dev bus eid failed, ret is %d.\n", ret); + return ret; + } + + ctrlq_msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(eid); + ctrlq_msg.in = (void *)&eid; + ctrlq_msg.out_size = sizeof(pair_device_num); + ctrlq_msg.out = &pair_device_num; + ctrlq_msg.opcode = UDMA_CTRLQ_GET_DEV_RESOURCE_COUNT; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + + memcpy((void *)(uintptr_t)out->addr, &pair_device_num, sizeof(pair_device_num)); + + return ret; +} diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index e83cd3e94c56..7c7d1ad39b92 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -179,6 +179,12 @@ struct udma_dev_resource_ratio { uint32_t index; }; +int udma_query_pair_dev_count(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + +int udma_get_dev_resource_ratio(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); + int udma_register_npu_cb(struct ubcore_device *dev, struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); @@ -192,6 +198,8 @@ int udma_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *tpid_c void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpid_table, bool is_need_flush); +int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, + struct ubcore_active_tp_cfg *active_cfg); int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, -- Gitee From 92df0eee52b744b846479df82cc4145abc029678 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Sat, 23 Aug 2025 14:51:50 +0800 Subject: [PATCH 064/126] ub: udma: Support query ub memory info. commit d0c38b53548dcda5ed5fbf1dcc28065b3482e227 openEuler This patch adds the ability to query ub memory info. In addition, this patch add disassociate ucontext ops for urma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctl.c | 1 + drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 36 +++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 11 ++++++++ drivers/ub/urma/hw/udma/udma_main.c | 5 ++++ 4 files changed, 53 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index 2b6f24c80a6a..af0568f3ce74 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1321,6 +1321,7 @@ static udma_user_ctl_ops g_udma_user_ctl_k_ops[] = { [UDMA_USER_CTL_QUERY_TP_SPORT] = udma_ctrlq_query_tp_sport, [UDMA_USER_CTL_QUERY_CQE_AUX_INFO] = udma_query_cqe_aux_info, [UDMA_USER_CTL_QUERY_AE_AUX_INFO] = udma_query_ae_aux_info, + [UDMA_USER_CTL_QUERY_UBMEM_INFO] = udma_ctrlq_query_ubmem_info, [UDMA_USER_CTL_QUERY_PAIR_DEVNUM] = udma_query_pair_dev_count, }; diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 043786f39b3d..966dc7a41d94 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -592,6 +592,42 @@ static int udma_k_ctrlq_deactive_tp(struct udma_dev *udev, union ubcore_tp_handl return (ret == -EAGAIN) ? 0 : ret; } +int udma_ctrlq_query_ubmem_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out) +{ +#define UDMA_CTRLQ_SER_TYPE_UBMEM 0x5 + struct udma_ctrlq_ubmem_out_query ubmem_info_out = {}; + struct udma_dev *udev = to_udma_dev(dev); + struct ubase_ctrlq_msg ctrlq_msg = {}; + uint32_t input_buf = 0; + int ret; + + if (out->addr == 0 || out->len != sizeof(struct udma_ctrlq_ubmem_out_query)) { + dev_err(udev->dev, "query ubmem info failed, addr is NULL:%d, len:%u.\n", + out->addr == 0, out->len); + return -EINVAL; + } + + ctrlq_msg.service_type = UDMA_CTRLQ_SER_TYPE_UBMEM; + ctrlq_msg.service_ver = UBASE_CTRLQ_SER_VER_01; + ctrlq_msg.need_resp = 1; + ctrlq_msg.in_size = sizeof(input_buf); + ctrlq_msg.in = (void *)&input_buf; + ctrlq_msg.out_size = sizeof(ubmem_info_out); + ctrlq_msg.out = &ubmem_info_out; + ctrlq_msg.opcode = UDMA_CTRLQ_QUERY_UBMEM_INFO; + + ret = ubase_ctrlq_send_msg(udev->comdev.adev, &ctrlq_msg); + if (ret) { + dev_err(udev->dev, "get dev res send ctrlq msg failed, ret is %d.\n", ret); + return ret; + } + + memcpy((void *)(uintptr_t)out->addr, &ubmem_info_out, sizeof(ubmem_info_out)); + + return ret; +} + int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, const struct ubcore_tp_attr_value *tp_attr, struct ubcore_udata *udata) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index 7c7d1ad39b92..bfa3ed44c381 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -14,6 +14,7 @@ #define UDMA_MAX_UE_IDX 256 #define UDMA_MAX_TPID_NUM 5 +#define UDMA_CTRLQ_UBMEM_INFO_NUM (96) #define UDMA_TPN_CNT_MASK 0x1F enum udma_ctrlq_cmd_code_type { @@ -28,6 +29,10 @@ enum udma_ctrlq_cmd_code_type { UDMA_CMD_CTRLQ_MAX }; +enum udma_ctrlq_ubmem_opcode { + UDMA_CTRLQ_QUERY_UBMEM_INFO = 0x1, +}; + enum udma_ctrlq_trans_type { UDMA_CTRLQ_TRANS_TYPE_TP_RM = 0, UDMA_CTRLQ_TRANS_TYPE_CTP, @@ -151,6 +156,10 @@ struct udma_ue_idx_table { uint8_t ue_idx[UDMA_UE_NUM]; }; +struct udma_ctrlq_ubmem_out_query { + uint32_t data[UDMA_CTRLQ_UBMEM_INFO_NUM]; +}; + struct udma_ctrlq_tp_attr { uint32_t tp_attr_bitmap; struct ubcore_tp_attr_value tp_attr_value; @@ -200,6 +209,8 @@ void udma_ctrlq_destroy_tpid_list(struct udma_dev *dev, struct xarray *ctrlq_tpi bool is_need_flush); int udma_ctrlq_set_active_tp_ex(struct udma_dev *dev, struct ubcore_active_tp_cfg *active_cfg); +int udma_ctrlq_query_ubmem_info(struct ubcore_device *dev, struct ubcore_ucontext *uctx, + struct ubcore_user_ctl_in *in, struct ubcore_user_ctl_out *out); int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 7726d371476b..a6964f3ab878 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -187,6 +187,10 @@ static int udma_query_stats(struct ubcore_device *dev, struct ubcore_stats_key * return ret; } +static void udma_disassociate_ucontext(struct ubcore_ucontext *uctx) +{ +} + static struct ubcore_ops g_dev_ops = { .owner = THIS_MODULE, .abi_version = 0, @@ -245,6 +249,7 @@ static struct ubcore_ops g_dev_ops = { .poll_jfc = udma_poll_jfc, .query_stats = udma_query_stats, .query_ue_idx = udma_query_ue_idx, + .disassociate_ucontext = udma_disassociate_ucontext, }; static void udma_uninit_group_table(struct udma_dev *dev, struct udma_group_table *table) -- Gitee From 591ffb9afae0b366be53045da2db3003630358a5 Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Tue, 9 Sep 2025 19:32:21 +0800 Subject: [PATCH 065/126] ub: udma: Support 2M hugepage function. commit 1ae22d037be802e750a85d5de87836795de2f37f openEuler This patch adds the ability to alloc and destroy huge page. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 26 ++++- drivers/ub/urma/hw/udma/udma_common.h | 8 +- drivers/ub/urma/hw/udma/udma_ctx.c | 150 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_ctx.h | 6 ++ drivers/ub/urma/hw/udma/udma_db.c | 4 +- drivers/ub/urma/hw/udma/udma_def.h | 20 ++++ drivers/ub/urma/hw/udma/udma_dev.h | 5 + drivers/ub/urma/hw/udma/udma_jfc.c | 6 +- drivers/ub/urma/hw/udma/udma_jfr.c | 12 +-- drivers/ub/urma/hw/udma/udma_jfs.c | 6 +- 10 files changed, 225 insertions(+), 18 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 3ec53595bcf8..31b4d504c6e4 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -561,7 +561,7 @@ static void udma_unpin_k_addr(struct ubcore_umem *umem) udma_umem_release(umem, true); } -int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, +int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf) { size_t aligned_memory_size; @@ -593,7 +593,7 @@ int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, return 0; } -void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, +void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf) { udma_unpin_k_addr(buf->umem); @@ -701,3 +701,25 @@ void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size) for (i = 0; i < res_size; i++) res[i] = arr[res_size - i - 1]; } + +void udma_init_hugepage(struct udma_dev *dev) +{ + INIT_LIST_HEAD(&dev->hugepage_list); + mutex_init(&dev->hugepage_lock); +} + +void udma_destroy_hugepage(struct udma_dev *dev) +{ + struct udma_hugepage_priv *priv; + + mutex_lock(&dev->hugepage_lock); + list_for_each_entry(priv, &dev->hugepage_list, list) { + dev_info(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + udma_unpin_k_addr(priv->umem); + vfree(priv->va_base); + kfree(priv); + } + mutex_unlock(&dev->hugepage_lock); + mutex_destroy(&dev->hugepage_lock); +} diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 2bfededa6203..11497248de57 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -5,6 +5,7 @@ #define __UDMA_COMM_H__ #include +#include #include #include "udma_ctx.h" #include "udma_dev.h" @@ -324,8 +325,8 @@ void udma_dfx_store_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity uint32_t id, const char *name); void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entity, uint32_t id); -int udma_k_alloc_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); -void udma_k_free_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); @@ -368,4 +369,7 @@ void udma_dfx_ctx_print(struct udma_dev *udev, const char *name, uint32_t id, ui uint32_t *ctx); void udma_swap_endian(uint8_t arr[], uint8_t res[], uint32_t res_size); +void udma_init_hugepage(struct udma_dev *dev); +void udma_destroy_hugepage(struct udma_dev *dev); + #endif /* __UDMA_COMM_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 985abb19929a..71ad304bfa80 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -176,3 +176,153 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) return 0; } + +int udma_alloc_u_hugepage(struct udma_context *ctx, struct vm_area_struct *vma) +{ + uint32_t page_num = (vma->vm_end - vma->vm_start) >> UDMA_HUGEPAGE_SHIFT; + struct udma_hugepage_priv *priv; + int ret = -ENOMEM; + int i; + + mutex_lock(&ctx->dev->hugepage_lock); + if (page_num > ctx->dev->total_hugepage_num) { + dev_err(ctx->dev->dev, "insufficient resources for mmap.\n"); + mutex_unlock(&ctx->dev->hugepage_lock); + return -EINVAL; + } + ctx->dev->total_hugepage_num -= page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto err_alloc_priv; + + priv->page_num = page_num; + priv->pages = kcalloc(priv->page_num, sizeof(*priv->pages), GFP_KERNEL); + if (!priv->pages) + goto err_alloc_arr; + + for (i = 0; i < priv->page_num; i++) { + priv->pages[i] = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(UDMA_HUGEPAGE_SIZE)); + if (!priv->pages[i]) { + dev_err(ctx->dev->dev, "failed to alloc 2M pages.\n"); + goto err_alloc_pages; + } + ret = remap_pfn_range(vma, vma->vm_start + i * UDMA_HUGEPAGE_SIZE, + page_to_pfn(priv->pages[i]), UDMA_HUGEPAGE_SIZE, + vma->vm_page_prot); + if (ret) { + dev_err(ctx->dev->dev, "failed to remap_pfn_range, ret=%d.\n", ret); + goto err_remap_pfn_range; + } + } + + priv->va_base = (void *)vma->vm_start; + priv->va_len = priv->page_num << UDMA_HUGEPAGE_SHIFT; + priv->left_va_len = priv->va_len; + refcount_set(&priv->refcnt, 1); + + mutex_lock(&ctx->hugepage_lock); + list_add(&priv->list, &ctx->hugepage_list); + mutex_unlock(&ctx->hugepage_lock); + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "map_hugepage, 2m_page_num=%u.\n", + priv->page_num); + return 0; + +err_remap_pfn_range: +err_alloc_pages: + for (i = 0; i < priv->page_num; i++) { + if (priv->pages[i]) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + else + break; + } + kfree(priv->pages); +err_alloc_arr: + kfree(priv); +err_alloc_priv: + mutex_lock(&ctx->dev->hugepage_lock); + ctx->dev->total_hugepage_num += page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + + return ret; +} + +static struct udma_hugepage_priv *udma_list_find_before(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + + list_for_each_entry(priv, &ctx->hugepage_list, list) { + if (va >= priv->va_base && va < priv->va_base + priv->va_len) + return priv; + } + + return NULL; +} + +int udma_occupy_u_hugepage(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + + mutex_lock(&ctx->hugepage_lock); + priv = udma_list_find_before(ctx, va); + if (priv) { + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "occupy_hugepage.\n"); + refcount_inc(&priv->refcnt); + } + mutex_unlock(&ctx->hugepage_lock); + + return priv ? 0 : -EFAULT; +} + +void udma_return_u_hugepage(struct udma_context *ctx, void *va) +{ + struct udma_hugepage_priv *priv; + struct vm_area_struct *vma; + uint32_t i; + + mutex_lock(&ctx->hugepage_lock); + priv = udma_list_find_before(ctx, va); + if (!priv) { + mutex_unlock(&ctx->hugepage_lock); + dev_warn(ctx->dev->dev, "va is invalid addr.\n"); + return; + } + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "return_hugepage.\n"); + refcount_dec(&priv->refcnt); + if (!refcount_dec_if_one(&priv->refcnt)) { + mutex_unlock(&ctx->hugepage_lock); + return; + } + + list_del(&priv->list); + mutex_unlock(&ctx->hugepage_lock); + + if (current->mm) { + mmap_write_lock(current->mm); + vma = find_vma(current->mm, (unsigned long)priv->va_base); + if (vma != NULL && vma->vm_start <= (unsigned long)priv->va_base && + vma->vm_end >= (unsigned long)(priv->va_base + priv->va_len)) + zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); + mmap_write_unlock(current->mm); + } else { + dev_warn(ctx->dev->dev, "current mm released.\n"); + } + + if (dfx_switch) + dev_info_ratelimited(ctx->dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->page_num); + mutex_lock(&ctx->dev->hugepage_lock); + for (i = 0; i < priv->page_num; i++) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + ctx->dev->total_hugepage_num += priv->page_num; + mutex_unlock(&ctx->dev->hugepage_lock); + kfree(priv->pages); + kfree(priv); +} diff --git a/drivers/ub/urma/hw/udma/udma_ctx.h b/drivers/ub/urma/hw/udma/udma_ctx.h index a93aab94c1e9..2521d2de3108 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.h +++ b/drivers/ub/urma/hw/udma/udma_ctx.h @@ -16,6 +16,8 @@ struct udma_context { struct mutex pgdir_mutex; struct iommu_sva *sva; uint32_t tid; + struct mutex hugepage_lock; + struct list_head hugepage_list; }; static inline struct udma_context *to_udma_context(struct ubcore_ucontext *uctx) @@ -39,4 +41,8 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, int udma_free_ucontext(struct ubcore_ucontext *ucontext); int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma); +int udma_alloc_u_hugepage(struct udma_context *ctx, struct vm_area_struct *vma); +int udma_occupy_u_hugepage(struct udma_context *ctx, void *va); +void udma_return_u_hugepage(struct udma_context *ctx, void *va); + #endif /* __UDMA_CTX_H__ */ diff --git a/drivers/ub/urma/hw/udma/udma_db.c b/drivers/ub/urma/hw/udma/udma_db.c index ea7b5d98ee6b..c66d6b23b2e8 100644 --- a/drivers/ub/urma/hw/udma/udma_db.c +++ b/drivers/ub/urma/hw/udma/udma_db.c @@ -115,7 +115,7 @@ static struct udma_k_sw_db_page *udma_alloc_db_page(struct udma_dev *dev, bitmap_fill(page->bitmap, page->num_db); - ret = udma_k_alloc_buf(dev, PAGE_SIZE, &page->db_buf); + ret = udma_alloc_normal_buf(dev, PAGE_SIZE, &page->db_buf); if (ret) { dev_err(dev->dev, "Failed alloc db page buf, ret is %d.\n", ret); goto err_kva; @@ -165,7 +165,7 @@ void udma_free_sw_db(struct udma_dev *dev, struct udma_sw_db *db) set_bit(db->index, db->kpage->bitmap); if (bitmap_full(db->kpage->bitmap, db->kpage->num_db)) { - udma_k_free_buf(dev, PAGE_SIZE, &db->kpage->db_buf); + udma_free_normal_buf(dev, PAGE_SIZE, &db->kpage->db_buf); bitmap_free(db->kpage->bitmap); list_del(&db->kpage->list); kfree(db->kpage); diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index ca107e34a37c..b8d80fa7a98d 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -109,6 +109,24 @@ struct udma_sw_db_page { refcount_t refcount; }; +struct udma_hugepage_priv { + struct list_head list; + struct page **pages; + uint32_t page_num; + struct ubcore_umem *umem; + void *va_base; + uint32_t va_len; + uint32_t left_va_offset; + uint32_t left_va_len; + refcount_t refcnt; +}; + +struct udma_hugepage { + void *va_start; + uint32_t va_len; + struct udma_hugepage_priv *priv; +}; + struct udma_buf { dma_addr_t addr; union { @@ -123,6 +141,8 @@ struct udma_buf { uint32_t cnt_per_page_shift; struct xarray id_table_xa; struct mutex id_table_mutex; + bool is_hugepage; + struct udma_hugepage *hugepage; }; struct udma_k_sw_db_page { diff --git a/drivers/ub/urma/hw/udma/udma_dev.h b/drivers/ub/urma/hw/udma/udma_dev.h index f4ddf294b769..1f76ccb84c30 100644 --- a/drivers/ub/urma/hw/udma/udma_dev.h +++ b/drivers/ub/urma/hw/udma/udma_dev.h @@ -35,6 +35,8 @@ extern bool dump_aux_info; #define UDMA_HW_PAGE_SHIFT 12 #define UDMA_HW_PAGE_SIZE (1 << UDMA_HW_PAGE_SHIFT) +#define UDMA_HUGEPAGE_SHIFT 21 +#define UDMA_HUGEPAGE_SIZE (1 << UDMA_HUGEPAGE_SHIFT) #define UDMA_DEV_UE_NUM 47 @@ -147,6 +149,9 @@ struct udma_dev { u8 udma_sl[UDMA_MAX_SL_NUM]; int disable_ue_rx_count; struct mutex disable_ue_rx_mutex; + struct mutex hugepage_lock; + struct list_head hugepage_list; + uint32_t total_hugepage_num; }; #define UDMA_ERR_MSG_LEN 128 diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 12c2f143a376..2f3cb33af300 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -150,7 +150,7 @@ static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *u jfc->tid = dev->tid; size = jfc->buf.entry_size * jfc->buf.entry_cnt; - ret = udma_k_alloc_buf(dev, size, &jfc->buf); + ret = udma_alloc_normal_buf(dev, size, &jfc->buf); if (ret) { dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); return ret; @@ -159,7 +159,7 @@ static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *u ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); if (ret) { dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); - udma_k_free_buf(dev, size, &jfc->buf); + udma_free_normal_buf(dev, size, &jfc->buf); return -ENOMEM; } @@ -173,7 +173,7 @@ static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) if (jfc->buf.kva) { size = jfc->buf.entry_size * jfc->buf.entry_cnt; - udma_k_free_buf(dev, size, &jfc->buf); + udma_free_normal_buf(dev, size, &jfc->buf); } else if (jfc->buf.umem) { uctx = to_udma_context(jfc->base.uctx); unpin_queue_addr(jfc->buf.umem); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 5de0fc62c6e7..2790ab87982c 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -58,7 +58,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) jfr->rq.buf.entry_cnt = jfr->wqe_cnt; rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; - ret = udma_k_alloc_buf(dev, rqe_buf_size, &jfr->rq.buf); + ret = udma_alloc_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); if (ret) { dev_err(dev->dev, "failed to alloc rq buffer for jfr when buffer size = %u.\n", @@ -70,7 +70,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; - ret = udma_k_alloc_buf(dev, idx_buf_size, &jfr->idx_que.buf); + ret = udma_alloc_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); if (ret) { dev_err(dev->dev, "failed to alloc idx que buffer for jfr when buffer size = %u.\n", @@ -98,9 +98,9 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) err_alloc_db: kfree(jfr->rq.wrid); err_wrid: - udma_k_free_buf(dev, idx_buf_size, &jfr->idx_que.buf); + udma_free_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); err_idx_que: - udma_k_free_buf(dev, rqe_buf_size, &jfr->rq.buf); + udma_free_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); return -ENOMEM; } @@ -211,13 +211,13 @@ static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) if (jfr->rq.buf.kva) { size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; - udma_k_free_buf(dev, size, &jfr->rq.buf); + udma_free_normal_buf(dev, size, &jfr->rq.buf); udma_free_sw_db(dev, &jfr->sw_db); } if (jfr->idx_que.buf.kva) { size = jfr->idx_que.buf.entry_cnt * jfr->idx_que.buf.entry_size; - udma_k_free_buf(dev, size, &jfr->idx_que.buf); + udma_free_normal_buf(dev, size, &jfr->idx_que.buf); udma_destroy_udma_table(dev, &jfr->idx_que.jfr_idx_table, "JFR_IDX"); } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index a1a36a0a136f..fd4e6e025077 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -71,7 +71,7 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; - ret = udma_k_alloc_buf(dev, size, &sq->buf); + ret = udma_alloc_normal_buf(dev, size, &sq->buf); if (ret) { dev_err(dev->dev, "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); @@ -80,7 +80,7 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); if (!sq->wrid) { - udma_k_free_buf(dev, size, &sq->buf); + udma_free_normal_buf(dev, size, &sq->buf); dev_err(dev->dev, "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", sq->id, sq->buf.entry_cnt); @@ -99,7 +99,7 @@ void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) if (sq->buf.kva) { size = sq->buf.entry_cnt * sq->buf.entry_size; - udma_k_free_buf(dev, size, &sq->buf); + udma_free_normal_buf(dev, size, &sq->buf); kfree(sq->wrid); return; } -- Gitee From 4b0b7acbe3f4d55851552c82815e7d91f5962691 Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Tue, 9 Sep 2025 20:37:38 +0800 Subject: [PATCH 066/126] ub: udma: jetty and rct support the hugepage buffer. commit 551b2ed34422a838904274d72789a2a99e29434a openEuler This patch adds the ability to jetty and rct can use the hugepage buffer. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 147 ++++++++++++++++++++++++++ drivers/ub/urma/hw/udma/udma_common.h | 3 + drivers/ub/urma/hw/udma/udma_ctx.c | 138 ++++++++++++++++++------ drivers/ub/urma/hw/udma/udma_def.h | 2 + drivers/ub/urma/hw/udma/udma_jetty.c | 1 + drivers/ub/urma/hw/udma/udma_jfc.c | 87 ++++++++------- drivers/ub/urma/hw/udma/udma_jfr.c | 68 +++++++----- drivers/ub/urma/hw/udma/udma_jfs.c | 37 ++++--- drivers/ub/urma/hw/udma/udma_main.c | 31 +++++- drivers/ub/urma/hw/udma/udma_rct.c | 66 ++++++++---- include/uapi/ub/urma/udma/udma_abi.h | 10 +- 11 files changed, 451 insertions(+), 139 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 31b4d504c6e4..07d57a5ce96b 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -603,6 +603,153 @@ void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, buf->addr = 0; } +static struct udma_hugepage_priv * +udma_alloc_hugepage_priv(struct udma_dev *dev, uint32_t len) +{ + struct udma_hugepage_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + priv->va_len = ALIGN(len, UDMA_HUGEPAGE_SIZE); + if (priv->va_len >> UDMA_HUGEPAGE_SHIFT > dev->total_hugepage_num) { + dev_err(dev->dev, "insufficient resources for mmap.\n"); + goto err_vmalloc_huge; + } + + priv->left_va_len = priv->va_len; + priv->va_base = vmalloc_huge(priv->va_len, GFP_KERNEL); + if (!priv->va_base) { + dev_err(dev->dev, "failed to vmalloc_huge, size=%u.", priv->va_len); + goto err_vmalloc_huge; + } + memset(priv->va_base, 0, priv->va_len); + + priv->umem = udma_pin_k_addr(&dev->ub_dev, (uint64_t)priv->va_base, priv->va_len); + if (IS_ERR(priv->umem)) { + dev_err(dev->dev, "pin kernel buf failed.\n"); + goto err_pin; + } + + refcount_set(&priv->refcnt, 1); + list_add(&priv->list, &dev->hugepage_list); + dev->total_hugepage_num -= priv->va_len >> UDMA_HUGEPAGE_SHIFT; + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "map_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + return priv; + +err_pin: + vfree(priv->va_base); +err_vmalloc_huge: + kfree(priv); + + return NULL; +} + +static struct udma_hugepage * +udma_alloc_hugepage(struct udma_dev *dev, uint32_t len) +{ + struct udma_hugepage_priv *priv = NULL; + struct udma_hugepage *hugepage; + bool b_reuse = false; + + hugepage = kzalloc(sizeof(*hugepage), GFP_KERNEL); + if (!hugepage) + return NULL; + + mutex_lock(&dev->hugepage_lock); + if (!list_empty(&dev->hugepage_list)) { + priv = list_first_entry(&dev->hugepage_list, struct udma_hugepage_priv, list); + b_reuse = len <= priv->left_va_len; + } + + if (b_reuse) { + refcount_inc(&priv->refcnt); + } else { + priv = udma_alloc_hugepage_priv(dev, len); + if (!priv) { + mutex_unlock(&dev->hugepage_lock); + kfree(hugepage); + return NULL; + } + } + + hugepage->va_start = priv->va_base + priv->left_va_offset; + hugepage->va_len = len; + hugepage->priv = priv; + priv->left_va_offset += len; + priv->left_va_len -= len; + mutex_unlock(&dev->hugepage_lock); + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "occupy_hugepage, 4k_page_num=%u.\n", + hugepage->va_len >> UDMA_HW_PAGE_SHIFT); + return hugepage; +} + +static void udma_free_hugepage(struct udma_dev *dev, struct udma_hugepage *hugepage) +{ + struct udma_hugepage_priv *priv = hugepage->priv; + + if (dfx_switch) + dev_info_ratelimited(dev->dev, "return_hugepage, 4k_page_num=%u.\n", + hugepage->va_len >> UDMA_HW_PAGE_SHIFT); + mutex_lock(&dev->hugepage_lock); + if (refcount_dec_and_test(&priv->refcnt)) { + if (dfx_switch) + dev_info_ratelimited(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", + priv->va_len >> UDMA_HUGEPAGE_SHIFT); + list_del(&priv->list); + dev->total_hugepage_num += priv->va_len >> UDMA_HUGEPAGE_SHIFT; + + udma_unpin_k_addr(priv->umem); + vfree(priv->va_base); + kfree(priv); + } else { + memset(hugepage->va_start, 0, hugepage->va_len); + } + mutex_unlock(&dev->hugepage_lock); + kfree(hugepage); +} + +int udma_k_alloc_buf(struct udma_dev *dev, struct udma_buf *buf) +{ + uint32_t size = buf->entry_size * buf->entry_cnt; + uint32_t hugepage_size; + int ret = 0; + + if (ubase_adev_prealloc_supported(dev->comdev.adev)) { + hugepage_size = ALIGN(size, UDMA_HW_PAGE_SIZE); + buf->hugepage = udma_alloc_hugepage(dev, hugepage_size); + if (buf->hugepage) { + buf->kva = buf->hugepage->va_start; + buf->addr = (uint64_t)buf->kva; + buf->is_hugepage = true; + } else { + dev_warn(dev->dev, + "failed to alloc hugepage buf, switch to alloc normal buf."); + ret = udma_alloc_normal_buf(dev, size, buf); + } + } else { + ret = udma_alloc_normal_buf(dev, size, buf); + } + + return ret; +} + +void udma_k_free_buf(struct udma_dev *dev, struct udma_buf *buf) +{ + uint32_t size = buf->entry_cnt * buf->entry_size; + + if (buf->is_hugepage) + udma_free_hugepage(dev, buf->hugepage); + else + udma_free_normal_buf(dev, size, buf); +} + void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr) { struct iova_slot *slot; diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index 11497248de57..dee92a4186d3 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -46,6 +46,7 @@ struct udma_jetty_queue { uint32_t lock_free; /* Support kernel mode lock-free mode */ uint32_t ta_timeout; /* ms */ enum ubcore_jetty_state state; + struct udma_context *udma_ctx; bool non_pin; struct udma_jetty_grp *jetty_grp; enum udma_jetty_type jetty_type; @@ -327,6 +328,8 @@ void udma_dfx_delete_id(struct udma_dev *udma_dev, struct udma_dfx_entity *entit uint32_t id); int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); void udma_free_normal_buf(struct udma_dev *udma_dev, size_t memory_size, struct udma_buf *buf); +int udma_k_alloc_buf(struct udma_dev *dev, struct udma_buf *buf); +void udma_k_free_buf(struct udma_dev *dev, struct udma_buf *buf); void *udma_alloc_iova(struct udma_dev *udma_dev, size_t memory_size, dma_addr_t *addr); void udma_free_iova(struct udma_dev *udma_dev, size_t memory_size, void *kva_or_slot, dma_addr_t addr); diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 71ad304bfa80..5f60fca10d86 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -31,6 +31,7 @@ static int udma_init_ctx_resp(struct udma_dev *dev, struct ubcore_udrv_priv *udr resp.die_id = dev->die_id; resp.dump_aux_info = dump_aux_info; resp.jfr_sge = dev->caps.jfr_sge; + resp.hugepage_enable = ubase_adev_prealloc_supported(dev->comdev.adev); byte = copy_to_user((void *)(uintptr_t)udrv_data->out_addr, &resp, (uint32_t)sizeof(resp)); @@ -70,6 +71,8 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, ctx->dev = dev; INIT_LIST_HEAD(&ctx->pgdir_list); mutex_init(&ctx->pgdir_mutex); + INIT_LIST_HEAD(&ctx->hugepage_list); + mutex_init(&ctx->hugepage_lock); ret = udma_init_ctx_resp(dev, udrv_data); if (ret) { @@ -91,8 +94,11 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, int udma_free_ucontext(struct ubcore_ucontext *ucontext) { struct udma_dev *udma_dev = to_udma_dev(ucontext->ub_dev); + struct udma_hugepage_priv *priv; + struct vm_area_struct *vma; struct udma_context *ctx; int ret; + int i; ctx = to_udma_context(ucontext); @@ -103,20 +109,109 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) mutex_destroy(&ctx->pgdir_mutex); ummu_sva_unbind_device(ctx->sva); + mutex_lock(&ctx->hugepage_lock); + list_for_each_entry(priv, &ctx->hugepage_list, list) { + if (current->mm) { + mmap_write_lock(current->mm); + vma = find_vma(current->mm, (unsigned long)priv->va_base); + if (vma != NULL && vma->vm_start <= (unsigned long)priv->va_base && + vma->vm_end >= (unsigned long)(priv->va_base + priv->va_len)) + zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); + mmap_write_unlock(current->mm); + } + + dev_info(udma_dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", priv->page_num); + for (i = 0; i < priv->page_num; i++) + __free_pages(priv->pages[i], get_order(UDMA_HUGEPAGE_SIZE)); + kfree(priv->pages); + kfree(priv); + } + mutex_unlock(&ctx->hugepage_lock); + mutex_destroy(&ctx->hugepage_lock); + kfree(ctx); return 0; } -int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) +static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uctx, + struct vm_area_struct *vma) { -#define JFC_DB_UNMAP_BOUND 1 - struct udma_dev *udma_dev = to_udma_dev(uctx->ub_dev); struct ubcore_ucontext *jetty_uctx; struct udma_jetty_queue *sq; - resource_size_t db_addr; uint64_t address; uint64_t j_id; + + j_id = get_mmap_idx(vma); + + xa_lock(&dev->jetty_table.xa); + sq = xa_load(&dev->jetty_table.xa, j_id); + if (!sq) { + dev_err(dev->dev, + "mmap failed, j_id: %llu not exist\n", j_id); + xa_unlock(&dev->jetty_table.xa); + return -EINVAL; + } + + if (sq->is_jetty) + jetty_uctx = to_udma_jetty_from_queue(sq)->ubcore_jetty.uctx; + else + jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; + + if (jetty_uctx != uctx) { + dev_err(dev->dev, + "mmap failed, j_id: %llu, uctx invalid\n", j_id); + xa_unlock(&dev->jetty_table.xa); + return -EINVAL; + } + xa_unlock(&dev->jetty_table.xa); + + address = (uint64_t)dev->db_base + JETTY_DSQE_OFFSET + j_id * UDMA_HW_PAGE_SIZE; + + if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static int udma_mmap_hugepage(struct udma_dev *dev, struct ubcore_ucontext *uctx, + struct vm_area_struct *vma) +{ + uint32_t max_map_size = dev->caps.cqe_size * dev->caps.jfc.depth; + uint32_t map_size = vma->vm_end - vma->vm_start; + + if (!IS_ALIGNED(map_size, UDMA_HUGEPAGE_SIZE)) { + dev_err(dev->dev, "mmap size is not 2m alignment.\n"); + return -EINVAL; + } + + if (map_size == 0) { + dev_err(dev->dev, "mmap size is zero.\n"); + return -EINVAL; + } + + if (map_size > max_map_size) { + dev_err(dev->dev, "mmap size(%u) is greater than the max_size.\n", + map_size); + return -EINVAL; + } + + vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY); + vma->vm_page_prot = __pgprot(((~PTE_ATTRINDX_MASK) & vma->vm_page_prot.pgprot) | + PTE_ATTRINDX(MT_NORMAL)); + if (udma_alloc_u_hugepage(to_udma_context(uctx), vma)) { + dev_err(dev->dev, "failed to alloc hugepage.\n"); + return -ENOMEM; + } + + return 0; +} + +int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) +{ +#define JFC_DB_UNMAP_BOUND 1 + struct udma_dev *udma_dev = to_udma_dev(uctx->ub_dev); uint32_t cmd; if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0) { @@ -125,7 +220,6 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) return -EINVAL; } - db_addr = udma_dev->db_base; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); cmd = get_mmap_cmd(vma); @@ -133,41 +227,15 @@ int udma_mmap(struct ubcore_ucontext *uctx, struct vm_area_struct *vma) case UDMA_MMAP_JFC_PAGE: if (io_remap_pfn_range(vma, vma->vm_start, jfc_arm_mode > JFC_DB_UNMAP_BOUND ? - (uint64_t)db_addr >> PAGE_SHIFT : + (uint64_t)udma_dev->db_base >> PAGE_SHIFT : page_to_pfn(udma_dev->db_page), PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; break; case UDMA_MMAP_JETTY_DSQE: - j_id = get_mmap_idx(vma); - xa_lock(&udma_dev->jetty_table.xa); - sq = xa_load(&udma_dev->jetty_table.xa, j_id); - if (!sq) { - dev_err(udma_dev->dev, - "mmap failed, j_id: %llu not exist\n", j_id); - xa_unlock(&udma_dev->jetty_table.xa); - return -EINVAL; - } - - if (sq->is_jetty) - jetty_uctx = to_udma_jetty_from_queue(sq)->ubcore_jetty.uctx; - else - jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; - - if (jetty_uctx != uctx) { - dev_err(udma_dev->dev, - "mmap failed, j_id: %llu, uctx invalid\n", j_id); - xa_unlock(&udma_dev->jetty_table.xa); - return -EINVAL; - } - xa_unlock(&udma_dev->jetty_table.xa); - - address = (uint64_t)db_addr + JETTY_DSQE_OFFSET + j_id * UDMA_HW_PAGE_SIZE; - - if (io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) - return -EAGAIN; - break; + return udma_mmap_jetty_dsqe(udma_dev, uctx, vma); + case UDMA_MMAP_HUGEPAGE: + return udma_mmap_hugepage(udma_dev, uctx, vma); default: dev_err(udma_dev->dev, "mmap failed, cmd(%u) not support\n", cmd); diff --git a/drivers/ub/urma/hw/udma/udma_def.h b/drivers/ub/urma/hw/udma/udma_def.h index b8d80fa7a98d..0681f6dd950d 100644 --- a/drivers/ub/urma/hw/udma/udma_def.h +++ b/drivers/ub/urma/hw/udma/udma_def.h @@ -63,6 +63,8 @@ struct udma_caps { uint16_t rc_queue_num; uint16_t rc_queue_depth; uint8_t rc_entry_size; + uint64_t rc_dma_len; + dma_addr_t rc_dma_addr; uint8_t ack_queue_num; uint8_t port_num; uint8_t cqe_size; diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index 2bc86bdb2421..c3f3f9a90fb3 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -66,6 +66,7 @@ static int udma_get_user_jetty_cmd(struct udma_dev *dev, struct udma_jetty *jett } uctx = to_udma_context(udata->uctx); + jetty->sq.udma_ctx = uctx; jetty->sq.tid = uctx->tid; jetty->jetty_addr = ucmd->jetty_addr; jetty->pi_type = ucmd->pi_type; diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 2f3cb33af300..92c9fcbaae9f 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -120,70 +120,83 @@ static int udma_get_cmd_from_user(struct udma_create_jfc_ucmd *ucmd, return 0; } -static int udma_get_jfc_buf(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, - struct ubcore_udata *udata, struct udma_jfc *jfc) +static int udma_alloc_u_cq(struct udma_dev *dev, struct udma_create_jfc_ucmd *ucmd, + struct udma_jfc *jfc) { - struct udma_context *uctx; - uint32_t size; - int ret = 0; + int ret; - if (udata) { + if (ucmd->is_hugepage) { + jfc->buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(jfc->ctx, (void *)jfc->buf.addr)) { + dev_err(dev->dev, "failed to create cq, va not map.\n"); + return -EINVAL; + } + jfc->buf.is_hugepage = true; + } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfc->buf); if (ret) { dev_err(dev->dev, "failed to pin queue for jfc, ret = %d.\n", ret); return ret; } - uctx = to_udma_context(udata->uctx); - jfc->tid = uctx->tid; - ret = udma_pin_sw_db(uctx, &jfc->db); - if (ret) { - dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); - unpin_queue_addr(jfc->buf.umem); - } + } + jfc->tid = jfc->ctx->tid; - return ret; + ret = udma_pin_sw_db(jfc->ctx, &jfc->db); + if (ret) { + dev_err(dev->dev, "failed to pin sw db for jfc, ret = %d.\n", ret); + goto err_pin_db; } + return 0; +err_pin_db: + if (ucmd->is_hugepage) + udma_return_u_hugepage(jfc->ctx, (void *)jfc->buf.addr); + else + unpin_queue_addr(jfc->buf.umem); + + return ret; +} + +static int udma_alloc_k_cq(struct udma_dev *dev, struct udma_jfc *jfc) +{ + int ret; + if (!jfc->lock_free) spin_lock_init(&jfc->lock); + jfc->buf.entry_size = dev->caps.cqe_size; jfc->tid = dev->tid; - size = jfc->buf.entry_size * jfc->buf.entry_cnt; - - ret = udma_alloc_normal_buf(dev, size, &jfc->buf); + ret = udma_k_alloc_buf(dev, &jfc->buf); if (ret) { - dev_err(dev->dev, "failed to alloc buffer for jfc.\n"); + dev_err(dev->dev, "failed to alloc cq buffer, id=%u.\n", jfc->jfcn); return ret; } ret = udma_alloc_sw_db(dev, &jfc->db, UDMA_JFC_TYPE_DB); if (ret) { dev_err(dev->dev, "failed to alloc sw db for jfc(%u).\n", jfc->jfcn); - udma_free_normal_buf(dev, size, &jfc->buf); - return -ENOMEM; + udma_k_free_buf(dev, &jfc->buf); } return ret; } -static void udma_free_jfc_buf(struct udma_dev *dev, struct udma_jfc *jfc) +static void udma_free_cq(struct udma_dev *dev, struct udma_jfc *jfc) { - struct udma_context *uctx; - uint32_t size; - - if (jfc->buf.kva) { - size = jfc->buf.entry_size * jfc->buf.entry_cnt; - udma_free_normal_buf(dev, size, &jfc->buf); - } else if (jfc->buf.umem) { - uctx = to_udma_context(jfc->base.uctx); - unpin_queue_addr(jfc->buf.umem); + if (jfc->mode != UDMA_NORMAL_JFC_TYPE) { + udma_free_sw_db(dev, &jfc->db); + return; } - if (jfc->db.page) { - uctx = to_udma_context(jfc->base.uctx); - udma_unpin_sw_db(uctx, &jfc->db); - } else if (jfc->db.kpage) { + if (jfc->buf.kva) { + udma_k_free_buf(dev, &jfc->buf); udma_free_sw_db(dev, &jfc->db); + } else { + if (jfc->buf.is_hugepage) + udma_return_u_hugepage(jfc->ctx, (void *)jfc->buf.addr); + else + unpin_queue_addr(jfc->buf.umem); + udma_unpin_sw_db(jfc->ctx, &jfc->db); } } @@ -369,7 +382,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, goto err_store_jfcn; } - ret = udma_get_jfc_buf(dev, &ucmd, udata, jfc); + ret = udata ? udma_alloc_u_cq(dev, &ucmd, jfc) : udma_alloc_k_cq(dev, jfc); if (ret) goto err_get_jfc_buf; @@ -387,7 +400,7 @@ struct ubcore_jfc *udma_create_jfc(struct ubcore_device *ubcore_dev, err_alloc_cqc: jfc->base.uctx = (udata == NULL ? NULL : udata->uctx); - udma_free_jfc_buf(dev, jfc); + udma_free_cq(dev, jfc); err_get_jfc_buf: xa_lock_irqsave(&dev->jfc_table.xa, flags_erase); __xa_erase(&dev->jfc_table.xa, jfc->jfcn); @@ -497,7 +510,7 @@ int udma_destroy_jfc(struct ubcore_jfc *jfc) if (dfx_switch) udma_dfx_delete_id(dev, &dev->dfx_info->jfc, jfc->id); - udma_free_jfc_buf(dev, ujfc); + udma_free_cq(dev, ujfc); udma_id_free(&dev->jfc_table.ida_table, ujfc->jfcn); kfree(ujfc); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 2790ab87982c..6bfc135fa846 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -48,28 +48,20 @@ static int udma_verify_jfr_param(struct udma_dev *dev, static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) { - uint32_t rqe_buf_size; uint32_t idx_buf_size; - uint32_t sge_per_wqe; int ret; - sge_per_wqe = min(jfr->max_sge, dev->caps.jfr_sge); - jfr->rq.buf.entry_size = UDMA_SGE_SIZE * sge_per_wqe; + jfr->rq.buf.entry_size = UDMA_SGE_SIZE * min(jfr->max_sge, dev->caps.jfr_sge); jfr->rq.buf.entry_cnt = jfr->wqe_cnt; - rqe_buf_size = jfr->rq.buf.entry_size * jfr->rq.buf.entry_cnt; - - ret = udma_alloc_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); + ret = udma_k_alloc_buf(dev, &jfr->rq.buf); if (ret) { - dev_err(dev->dev, - "failed to alloc rq buffer for jfr when buffer size = %u.\n", - rqe_buf_size); + dev_err(dev->dev, "failed to alloc rq buffer, id=%u.\n", jfr->rq.id); return ret; } jfr->idx_que.buf.entry_size = UDMA_IDX_QUE_ENTRY_SZ; jfr->idx_que.buf.entry_cnt = jfr->wqe_cnt; idx_buf_size = jfr->idx_que.buf.entry_size * jfr->idx_que.buf.entry_cnt; - ret = udma_alloc_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); if (ret) { dev_err(dev->dev, @@ -100,24 +92,22 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) err_wrid: udma_free_normal_buf(dev, idx_buf_size, &jfr->idx_que.buf); err_idx_que: - udma_free_normal_buf(dev, rqe_buf_size, &jfr->rq.buf); + udma_k_free_buf(dev, &jfr->rq.buf); return -ENOMEM; } -static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, - struct ubcore_udata *udata, +static int udma_jfr_get_u_cmd(struct udma_dev *dev, struct ubcore_udata *udata, struct udma_create_jetty_ucmd *ucmd) { unsigned long byte; - int ret; if (!udata->udrv_data) { dev_err(dev->dev, "jfr udata udrv_data is null.\n"); return -EINVAL; } - if (!udata->udrv_data->in_addr || udata->udrv_data->in_len < sizeof(*ucmd)) { + if (!udata->udrv_data->in_addr || udata->udrv_data->in_len != sizeof(*ucmd)) { dev_err(dev->dev, "jfr in_len %u or addr is invalid.\n", udata->udrv_data->in_len); return -EINVAL; @@ -131,14 +121,41 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, return -EFAULT; } - if (!ucmd->non_pin) { + return 0; +} + +static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, + struct ubcore_udata *udata, + struct udma_create_jetty_ucmd *ucmd) +{ + int ret; + + ret = udma_jfr_get_u_cmd(dev, udata, ucmd); + if (ret) + return ret; + + jfr->udma_ctx = to_udma_context(udata->uctx); + if (ucmd->non_pin) { + jfr->rq.buf.addr = ucmd->buf_addr; + } else if (ucmd->is_hugepage) { + jfr->rq.buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr)) { + dev_err(dev->dev, "failed to create rq, va not map.\n"); + return -EINVAL; + } + jfr->rq.buf.is_hugepage = true; + } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &jfr->rq.buf); if (ret) { dev_err(dev->dev, "failed to pin jfr rqe buf addr, ret = %d.\n", ret); return ret; } + } + if (ucmd->non_pin) { + jfr->idx_que.buf.addr = ucmd->idx_addr; + } else { ret = pin_queue_addr(dev, ucmd->idx_addr, ucmd->idx_len, &jfr->idx_que.buf); if (ret) { @@ -146,12 +163,8 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, "failed to pin jfr idx que addr, ret = %d.\n", ret); goto err_pin_idx_buf; } - } else { - jfr->rq.buf.addr = ucmd->buf_addr; - jfr->idx_que.buf.addr = ucmd->idx_addr; } - jfr->udma_ctx = to_udma_context(udata->uctx); jfr->sw_db.db_addr = ucmd->db_addr; jfr->jfr_sleep_buf.db_addr = ucmd->jfr_sleep_buf; @@ -181,7 +194,10 @@ static int udma_get_u_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr, err_pin_sw_db: unpin_queue_addr(jfr->idx_que.buf.umem); err_pin_idx_buf: - unpin_queue_addr(jfr->rq.buf.umem); + if (ucmd->is_hugepage) + udma_return_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr); + else + unpin_queue_addr(jfr->rq.buf.umem); return ret; } @@ -205,13 +221,15 @@ static void udma_put_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) udma_unpin_sw_db(jfr->udma_ctx, &jfr->jfr_sleep_buf); udma_unpin_sw_db(jfr->udma_ctx, &jfr->sw_db); unpin_queue_addr(jfr->idx_que.buf.umem); - unpin_queue_addr(jfr->rq.buf.umem); + if (jfr->rq.buf.is_hugepage) + udma_return_u_hugepage(jfr->udma_ctx, (void *)jfr->rq.buf.addr); + else + unpin_queue_addr(jfr->rq.buf.umem); return; } if (jfr->rq.buf.kva) { - size = jfr->rq.buf.entry_cnt * jfr->rq.buf.entry_size; - udma_free_normal_buf(dev, size, &jfr->rq.buf); + udma_k_free_buf(dev, &jfr->rq.buf); udma_free_sw_db(dev, &jfr->sw_db); } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index fd4e6e025077..7277db44da12 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -21,7 +21,7 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, struct udma_create_jetty_ucmd *ucmd) { - int ret; + int ret = 0; if (ucmd->sqe_bb_cnt == 0 || ucmd->buf_len == 0) { dev_err(dev->dev, "invalid param, sqe_bb_cnt=%u, buf_len=%u.\n", @@ -33,17 +33,22 @@ int udma_alloc_u_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, sq->buf.entry_cnt = ucmd->buf_len >> WQE_BB_SIZE_SHIFT; if (sq->non_pin) { sq->buf.addr = ucmd->buf_addr; + } else if (ucmd->is_hugepage) { + sq->buf.addr = ucmd->buf_addr; + if (udma_occupy_u_hugepage(sq->udma_ctx, (void *)sq->buf.addr)) { + dev_err(dev->dev, "failed to create sq, va not map.\n"); + return -EINVAL; + } + sq->buf.is_hugepage = true; } else { ret = pin_queue_addr(dev, ucmd->buf_addr, ucmd->buf_len, &sq->buf); if (ret) { - dev_err(dev->dev, - "failed to pin jetty/jfs queue addr, ret = %d.\n", - ret); + dev_err(dev->dev, "failed to pin sq, ret = %d.\n", ret); return ret; } } - return 0; + return ret; } int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, @@ -71,19 +76,18 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, size = ALIGN(wqe_bb_depth * sq->buf.entry_size, UDMA_HW_PAGE_SIZE); sq->buf.entry_cnt = size >> WQE_BB_SIZE_SHIFT; - ret = udma_alloc_normal_buf(dev, size, &sq->buf); + ret = udma_k_alloc_buf(dev, &sq->buf); if (ret) { - dev_err(dev->dev, - "failed to alloc jetty (%u) sq buf when size = %u.\n", sq->id, size); + dev_err(dev->dev, "failed to alloc sq buffer, id=%u.\n", sq->id); return ret; } sq->wrid = kcalloc(1, sq->buf.entry_cnt * sizeof(uint64_t), GFP_KERNEL); if (!sq->wrid) { - udma_free_normal_buf(dev, size, &sq->buf); dev_err(dev->dev, "failed to alloc wrid for jfs id = %u when entry cnt = %u.\n", sq->id, sq->buf.entry_cnt); + udma_k_free_buf(dev, &sq->buf); return -ENOMEM; } @@ -95,18 +99,20 @@ int udma_alloc_k_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq, void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) { - uint32_t size; - if (sq->buf.kva) { - size = sq->buf.entry_cnt * sq->buf.entry_size; - udma_free_normal_buf(dev, size, &sq->buf); + udma_k_free_buf(dev, &sq->buf); kfree(sq->wrid); return; } + if (sq->non_pin) return; - unpin_queue_addr(sq->buf.umem); + if (sq->buf.is_hugepage) { + udma_return_u_hugepage(sq->udma_ctx, (void *)sq->buf.addr); + } else { + unpin_queue_addr(sq->buf.umem); + } } void udma_init_jfsc(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, @@ -277,6 +283,7 @@ static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, } uctx = to_udma_context(udata->uctx); + jfs->sq.udma_ctx = uctx; jfs->sq.tid = uctx->tid; jfs->jfs_addr = ucmd->jetty_addr; jfs->pi_type = ucmd->pi_type; @@ -292,7 +299,7 @@ static int udma_get_user_jfs_cmd(struct udma_dev *dev, struct udma_jfs *jfs, } static int udma_alloc_jfs_sq(struct udma_dev *dev, struct ubcore_jfs_cfg *cfg, - struct udma_jfs *jfs, struct ubcore_udata *udata) + struct udma_jfs *jfs, struct ubcore_udata *udata) { struct udma_create_jetty_ucmd ucmd = {}; int ret; diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index a6964f3ab878..cbf773d01c48 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -487,7 +487,6 @@ static void udma_get_jetty_id_range(struct udma_dev *udma_dev, static int query_caps_from_firmware(struct udma_dev *udma_dev) { -#define RC_QUEUE_ENTRY_SIZE 128 struct udma_cmd_ue_resource cmd = {}; int ret; @@ -514,10 +513,6 @@ static int query_caps_from_firmware(struct udma_dev *udma_dev) udma_get_jetty_id_range(udma_dev, &cmd); - udma_dev->caps.rc_queue_num = cmd.rc_queue_num; - udma_dev->caps.rc_queue_depth = cmd.rc_depth; - udma_dev->caps.rc_entry_size = RC_QUEUE_ENTRY_SIZE; - udma_dev->caps.feature = cmd.cap_info; udma_dev->caps.ue_cnt = cmd.ue_cnt >= UDMA_DEV_UE_NUM ? UDMA_DEV_UE_NUM - 1 : cmd.ue_cnt; @@ -581,9 +576,24 @@ static int udma_construct_qos_param(struct udma_dev *dev) return 0; } +static void cal_max_2m_num(struct udma_dev *dev) +{ + uint32_t jfs_pg = ALIGN(dev->caps.jfs.depth * MAX_WQEBB_IN_SQE * + UDMA_JFS_WQEBB_SIZE, UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + uint32_t jfr_pg = ALIGN(dev->caps.jfr.depth * dev->caps.jfr_sge * + UDMA_SGE_SIZE, UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + uint32_t jfc_pg = ALIGN(dev->caps.jfc.depth * dev->caps.cqe_size, + UDMA_HUGEPAGE_SIZE) >> UDMA_HUGEPAGE_SHIFT; + + dev->total_hugepage_num = + (dev->caps.jetty.start_idx + dev->caps.jetty.max_cnt) * jfs_pg + + dev->caps.jfr.max_cnt * jfr_pg + dev->caps.jfc.max_cnt * jfc_pg; +} + static int udma_set_hw_caps(struct udma_dev *udma_dev) { #define MAX_MSG_LEN 0x10000 +#define RC_QUEUE_ENTRY_SIZE 64 struct ubase_adev_caps *a_caps; uint32_t jetty_grp_cnt; int ret; @@ -609,6 +619,14 @@ static int udma_set_hw_caps(struct udma_dev *udma_dev) udma_dev->caps.jetty.start_idx = a_caps->jfs.start_idx; udma_dev->caps.jetty.next_idx = udma_dev->caps.jetty.start_idx; udma_dev->caps.cqe_size = UDMA_CQE_SIZE; + udma_dev->caps.rc_queue_num = a_caps->rc_max_cnt; + udma_dev->caps.rc_queue_depth = a_caps->rc_que_depth; + udma_dev->caps.rc_entry_size = RC_QUEUE_ENTRY_SIZE; + udma_dev->caps.rc_dma_len = a_caps->pmem.dma_len; + udma_dev->caps.rc_dma_addr = a_caps->pmem.dma_addr; + + cal_max_2m_num(udma_dev); + ret = udma_construct_qos_param(udma_dev); if (ret) return ret; @@ -657,11 +675,14 @@ static int udma_init_dev_param(struct udma_dev *udma_dev) for (i = 0; i < UDMA_DB_TYPE_NUM; i++) INIT_LIST_HEAD(&udma_dev->db_list[i]); + udma_init_hugepage(udma_dev); + return 0; } static void udma_uninit_dev_param(struct udma_dev *udma_dev) { + udma_destroy_hugepage(udma_dev); mutex_destroy(&udma_dev->db_mutex); dev_set_drvdata(&udma_dev->comdev.adev->dev, NULL); udma_destroy_tables(udma_dev); diff --git a/drivers/ub/urma/hw/udma/udma_rct.c b/drivers/ub/urma/hw/udma/udma_rct.c index 599c80c118fd..ee11d3ef3ee9 100644 --- a/drivers/ub/urma/hw/udma/udma_rct.c +++ b/drivers/ub/urma/hw/udma/udma_rct.c @@ -51,13 +51,50 @@ static int udma_destroy_rc_queue_ctx(struct udma_dev *dev, struct udma_rc_queue return ret; } +static int udma_alloc_rct_buffer(struct udma_dev *dev, struct ubcore_device_cfg *cfg, + struct udma_rc_queue *rcq) +{ + uint32_t rct_buffer_size = dev->caps.rc_entry_size * cfg->rc_cfg.depth; + uint32_t buf_num_per_hugepage; + + rcq->buf.entry_size = dev->caps.rc_entry_size; + rcq->buf.entry_cnt = cfg->rc_cfg.depth; + if (ubase_adev_prealloc_supported(dev->comdev.adev)) { + rct_buffer_size = ALIGN(rct_buffer_size, PAGE_SIZE); + if (rct_buffer_size > UDMA_HUGEPAGE_SIZE) { + rcq->buf.addr = dev->caps.rc_dma_addr + rcq->id * rct_buffer_size; + } else { + buf_num_per_hugepage = UDMA_HUGEPAGE_SIZE / rct_buffer_size; + rcq->buf.addr = dev->caps.rc_dma_addr + + rcq->id / buf_num_per_hugepage * UDMA_HUGEPAGE_SIZE + + rcq->id % buf_num_per_hugepage * rct_buffer_size; + } + } else { + rcq->buf.kva_or_slot = udma_alloc_iova(dev, rct_buffer_size, &rcq->buf.addr); + if (!rcq->buf.kva_or_slot) { + dev_err(dev->dev, "failed to alloc rct buffer.\n"); + return -ENOMEM; + } + } + + return 0; +} + +static void udma_free_rct_buffer(struct udma_dev *dev, struct udma_rc_queue *rcq) +{ + uint32_t rct_buffer_size = rcq->buf.entry_size * rcq->buf.entry_cnt; + + if (!ubase_adev_prealloc_supported(dev->comdev.adev)) { + udma_free_iova(dev, rct_buffer_size, rcq->buf.kva_or_slot, rcq->buf.addr); + rcq->buf.kva_or_slot = NULL; + rcq->buf.addr = 0; + } +} + static int udma_alloc_rc_queue(struct udma_dev *dev, struct ubcore_device_cfg *cfg, int rc_queue_id) { - uint32_t rcq_entry_size = dev->caps.rc_entry_size; - uint32_t rcq_entry_num = cfg->rc_cfg.depth; struct udma_rc_queue *rcq; - uint32_t size; int ret; rcq = kzalloc(sizeof(struct udma_rc_queue), GFP_KERNEL); @@ -65,15 +102,9 @@ static int udma_alloc_rc_queue(struct udma_dev *dev, return -ENOMEM; rcq->id = rc_queue_id; - size = rcq_entry_size * rcq_entry_num; - rcq->buf.kva_or_slot = udma_alloc_iova(dev, size, &rcq->buf.addr); - if (!rcq->buf.kva_or_slot) { - ret = -ENOMEM; - dev_err(dev->dev, "failed to alloc rc queue buffer.\n"); - goto err_alloc_rcq; - } - rcq->buf.entry_size = rcq_entry_size; - rcq->buf.entry_cnt = rcq_entry_num; + ret = udma_alloc_rct_buffer(dev, cfg, rcq); + if (ret) + goto err_alloc_rct_buffer; ret = udma_create_rc_queue_ctx(dev, rcq); if (ret) { @@ -101,10 +132,8 @@ static int udma_alloc_rc_queue(struct udma_dev *dev, dev_err(dev->dev, "udma destroy rc queue ctx failed when alloc rc queue.\n"); err_create_rcq_ctx: - udma_free_iova(dev, size, rcq->buf.kva_or_slot, rcq->buf.addr); - rcq->buf.kva_or_slot = NULL; - rcq->buf.addr = 0; -err_alloc_rcq: + udma_free_rct_buffer(dev, rcq); +err_alloc_rct_buffer: kfree(rcq); return ret; @@ -131,10 +160,7 @@ void udma_free_rc_queue(struct udma_dev *dev, int rc_queue_id) if (dfx_switch) udma_dfx_delete_id(dev, &dev->dfx_info->rc, rc_queue_id); - udma_free_iova(dev, rcq->buf.entry_size * rcq->buf.entry_cnt, - rcq->buf.kva_or_slot, rcq->buf.addr); - rcq->buf.kva_or_slot = NULL; - rcq->buf.addr = 0; + udma_free_rct_buffer(dev, rcq); kfree(rcq); } diff --git a/include/uapi/ub/urma/udma/udma_abi.h b/include/uapi/ub/urma/udma/udma_abi.h index 02440d162c8d..5859f5254b5e 100644 --- a/include/uapi/ub/urma/udma/udma_abi.h +++ b/include/uapi/ub/urma/udma/udma_abi.h @@ -74,7 +74,8 @@ struct udma_create_jetty_ucmd { __aligned_u64 jetty_addr; __u32 pi_type : 1; __u32 non_pin : 1; - __u32 rsv : 30; + __u32 is_hugepage : 1; + __u32 rsv : 29; __u32 jetty_type; __aligned_u64 jfr_sleep_buf; __u32 jfs_id; @@ -86,6 +87,9 @@ struct udma_create_jfc_ucmd { __u32 buf_len; __u32 mode; /* 0: normal, 1: user stars, 2: kernel stars */ __aligned_u64 db_addr; + __u32 is_hugepage : 1; + __u32 rsv : 31; + __u32 rsv1; }; struct udma_create_ctx_resp { @@ -93,7 +97,8 @@ struct udma_create_ctx_resp { __u32 dwqe_enable : 1; __u32 reduce_enable : 1; __u32 dump_aux_info : 1; - __u32 rsv : 21; + __u32 hugepage_enable : 1; + __u32 rsv : 20; __u32 ue_id; __u32 chip_id; __u32 die_id; @@ -109,6 +114,7 @@ struct udma_create_jfr_resp { enum db_mmap_type { UDMA_MMAP_JFC_PAGE, UDMA_MMAP_JETTY_DSQE, + UDMA_MMAP_HUGEPAGE, }; enum { -- Gitee From 5072a5ba524e1228438d41bdf3e8a7ff1d422146 Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Wed, 12 Nov 2025 15:29:26 +0800 Subject: [PATCH 067/126] ub: udma: reset and segment permission issues are resolved. commit 524bc5788973aee8196c2cda37a63a2b38f2a863 openEuler This patch adds the bugfix to 1. fix ubase and udma locker circular dependency. 2. modify segment permission issue. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/Kconfig | 2 +- drivers/ub/urma/hw/udma/udma_ctx.c | 1 + drivers/ub/urma/hw/udma/udma_main.c | 2 +- drivers/ub/urma/hw/udma/udma_segment.c | 26 ++++++++++++++++++++------ 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/drivers/ub/urma/hw/udma/Kconfig b/drivers/ub/urma/hw/udma/Kconfig index fd5d27ef9813..c6d5ca89e7ef 100644 --- a/drivers/ub/urma/hw/udma/Kconfig +++ b/drivers/ub/urma/hw/udma/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0+ # Copyright(c) 2025 HiSilicon Technologies CO., Ltd. All rights reserved. -menuconfig UB_UDMA +config UB_UDMA default n tristate "UB UDMA Driver" depends on UB_UBASE && UB_URMA && UB_UMMU_CORE diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 5f60fca10d86..6f7f1ecef0c5 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -83,6 +83,7 @@ struct ubcore_ucontext *udma_alloc_ucontext(struct ubcore_device *ub_dev, return &ctx->base; err_init_ctx_resp: + mutex_destroy(&ctx->hugepage_lock); mutex_destroy(&ctx->pgdir_mutex); err_unbind_dev: ummu_sva_unbind_device(ctx->sva); diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index cbf773d01c48..be76c20c1ff0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1124,8 +1124,8 @@ void udma_remove(struct auxiliary_device *adev) { struct udma_dev *udma_dev; - mutex_lock(&udma_reset_mutex); ubase_reset_unregister(adev); + mutex_lock(&udma_reset_mutex); udma_dev = get_udma_dev(adev); if (!udma_dev) { mutex_unlock(&udma_reset_mutex); diff --git a/drivers/ub/urma/hw/udma/udma_segment.c b/drivers/ub/urma/hw/udma/udma_segment.c index 90615d1ae2b4..7bb52ed4d5a1 100644 --- a/drivers/ub/urma/hw/udma/udma_segment.c +++ b/drivers/ub/urma/hw/udma/udma_segment.c @@ -70,14 +70,28 @@ static void udma_init_seg_cfg(struct udma_segment *seg, struct ubcore_seg_cfg *c static int udma_u_get_seg_perm(struct ubcore_seg_cfg *cfg) { - if (cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_ONLY || - cfg->flag.bs.access & UBCORE_ACCESS_ATOMIC) + bool local_only_flag = cfg->flag.bs.access & UBCORE_ACCESS_LOCAL_ONLY; + bool atomic_flag = cfg->flag.bs.access & UBCORE_ACCESS_ATOMIC; + bool write_flag = cfg->flag.bs.access & UBCORE_ACCESS_WRITE; + bool read_flag = cfg->flag.bs.access & UBCORE_ACCESS_READ; + + /* After setting ACCESS_LOCAL, other operations cannot be configured. */ + if (local_only_flag && !atomic_flag && !write_flag && !read_flag) + return UMMU_DEV_ATOMIC | UMMU_DEV_WRITE | UMMU_DEV_READ; + + /* Atomic require additional configuration of write and read. */ + if (!local_only_flag && atomic_flag && write_flag && read_flag) return UMMU_DEV_ATOMIC | UMMU_DEV_WRITE | UMMU_DEV_READ; - if (cfg->flag.bs.access & UBCORE_ACCESS_WRITE) + /* Write require additional configuration of read. */ + if (!local_only_flag && !atomic_flag && write_flag && read_flag) return UMMU_DEV_WRITE | UMMU_DEV_READ; - return UMMU_DEV_READ; + if (!local_only_flag && !atomic_flag && !write_flag && read_flag) + return UMMU_DEV_READ; + + /* All other configurations are illegal. */ + return 0; } static int udma_sva_grant(struct ubcore_seg_cfg *cfg, struct iommu_sva *ksva) @@ -245,8 +259,8 @@ struct ubcore_target_seg *udma_register_seg(struct ubcore_device *ub_dev, ret = udma_sva_grant(cfg, ksva); if (ret) { dev_err(udma_dev->dev, - "ksva grant failed with token policy %d, ret = %d.\n", - cfg->flag.bs.token_policy, ret); + "ksva grant failed token policy %d, access %d, ret = %d.\n", + cfg->flag.bs.token_policy, cfg->flag.bs.access, ret); goto err_load_ksva; } mutex_unlock(&udma_dev->ksva_mutex); -- Gitee From b541df604ed9d8544b94db7d45d6f6f1cf56944a Mon Sep 17 00:00:00 2001 From: JiaWei Kang Date: Wed, 12 Nov 2025 15:57:52 +0800 Subject: [PATCH 068/126] ub: udma: Resolve issues related to stream logout and interrupt lock. commit cfc1a43a32ef15ec77372b0695759cb6e17bc96d openEuler This patch adds the bugfix to 1. modify some review comments. 2. resolve the problem of deregistration with stream. 3. disabling interrupts when jetty is locked. Signed-off-by: JiaWei Kang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 2 +- drivers/ub/urma/hw/udma/udma_ctx.c | 2 +- drivers/ub/urma/hw/udma/udma_eq.c | 81 +++++++++++++------------ drivers/ub/urma/hw/udma/udma_jfc.c | 7 ++- drivers/ub/urma/hw/udma/udma_jfr.c | 5 +- drivers/ub/urma/hw/udma/udma_jfs.c | 5 +- drivers/ub/urma/hw/udma/udma_main.c | 19 +++--- 7 files changed, 65 insertions(+), 56 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index bfa3ed44c381..bdd4617cb4c4 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -8,7 +8,7 @@ #define UDMA_EID_SIZE 16 #define UDMA_CNA_SIZE 16 -#define UDMA_PID_MASK 24 +#define UDMA_PID_MASK 0xFFFFFF #define UDMA_DEFAULT_PID 1 #define UDMA_UE_NUM 64 #define UDMA_MAX_UE_IDX 256 diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 6f7f1ecef0c5..ccc3b4905af9 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -381,7 +381,7 @@ void udma_return_u_hugepage(struct udma_context *ctx, void *va) zap_vma_ptes(vma, (unsigned long)priv->va_base, priv->va_len); mmap_write_unlock(current->mm); } else { - dev_warn(ctx->dev->dev, "current mm released.\n"); + dev_warn_ratelimited(ctx->dev->dev, "current mm released.\n"); } if (dfx_switch) diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 53dfb2bdebd5..655714b872a5 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -629,6 +629,9 @@ static int udma_ctrlq_eid_update(struct auxiliary_device *adev, uint8_t service_ } udma_dev = get_udma_dev(adev); + if (udma_dev->status != UDMA_NORMAL) + return udma_ctrlq_send_eid_update_response(udma_dev, seq, 0); + if (len < sizeof(struct udma_ctrlq_eid_out_update)) { dev_err(udma_dev->dev, "msg len(%u) is invalid.\n", len); return udma_ctrlq_send_eid_update_response(udma_dev, seq, -EINVAL); @@ -656,15 +659,18 @@ static int udma_ctrlq_eid_update(struct auxiliary_device *adev, uint8_t service_ return udma_ctrlq_send_eid_update_response(udma_dev, seq, ret); } -static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, - uint16_t len, uint32_t tp_num, - struct udma_ctrlq_check_tp_active_rsp_info *rsp_info) +static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, uint16_t len, + struct udma_ctrlq_check_tp_active_rsp_info **rsp_info, + uint32_t *rsp_info_len) { +#define UDMA_CTRLQ_CHECK_TP_OFFSET 0xFF struct udma_ctrlq_check_tp_active_req_info *req_info = NULL; - uint32_t req_info_len = 0; + uint32_t req_info_len; + uint32_t tp_num; int i; - req_info_len = sizeof(uint32_t) + + tp_num = *((uint32_t *)data) & UDMA_CTRLQ_CHECK_TP_OFFSET; + req_info_len = sizeof(struct udma_ctrlq_check_tp_active_req_info) + sizeof(struct udma_ctrlq_check_tp_active_req_data) * tp_num; if (len < req_info_len) { dev_err(udev->dev, "msg param num(%u) is invalid.\n", tp_num); @@ -675,35 +681,45 @@ static int udma_ctrlq_check_tp_status(struct udma_dev *udev, void *data, return -ENOMEM; memcpy(req_info, data, req_info_len); + *rsp_info_len = sizeof(struct udma_ctrlq_check_tp_active_rsp_info) + + sizeof(struct udma_ctrlq_check_tp_active_rsp_data) * tp_num; + *rsp_info = kzalloc(*rsp_info_len, GFP_KERNEL); + if (!(*rsp_info)) { + *rsp_info_len = 0; + kfree(req_info); + req_info = NULL; + return -ENOMEM; + } + rcu_read_lock(); for (i = 0; i < req_info->num; i++) { if (find_vpid(req_info->data[i].pid_flag)) - rsp_info->data[i].result = UDMA_CTRLQ_TPID_IN_USE; + (*rsp_info)->data[i].result = UDMA_CTRLQ_TPID_IN_USE; else - rsp_info->data[i].result = UDMA_CTRLQ_TPID_EXITED; + (*rsp_info)->data[i].result = UDMA_CTRLQ_TPID_EXITED; - rsp_info->data[i].tp_id = req_info->data[i].tp_id; + (*rsp_info)->data[i].tp_id = req_info->data[i].tp_id; } - rsp_info->num = tp_num; + (*rsp_info)->num = tp_num; rcu_read_unlock(); + + if (debug_switch) + udma_dfx_ctx_print(udev, "udma check tp active", (*rsp_info)->data[0].tp_id, + *rsp_info_len / sizeof(uint32_t), (uint32_t *)(*rsp_info)); kfree(req_info); + req_info = NULL; return 0; } -static int udma_ctrlq_check_param(struct udma_dev *udev, void *data, uint16_t len) +static int udma_ctrlq_check_tp_active_param(struct udma_dev *udev, void *data, uint16_t len) { -#define UDMA_CTRLQ_HDR_LEN 12 -#define UDMA_CTRLQ_MAX_BB 32 -#define UDMA_CTRLQ_BB_LEN 32 - if (data == NULL) { dev_err(udev->dev, "data is NULL.\n"); return -EINVAL; } - if ((len < UDMA_CTRLQ_BB_LEN - UDMA_CTRLQ_HDR_LEN) || - len > (UDMA_CTRLQ_BB_LEN * UDMA_CTRLQ_MAX_BB - UDMA_CTRLQ_HDR_LEN)) { + if (len < sizeof(struct udma_ctrlq_check_tp_active_req_info)) { dev_err(udev->dev, "msg data len(%u) is invalid.\n", len); return -EINVAL; } @@ -715,29 +731,17 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, uint8_t service_ver, void *data, uint16_t len, uint16_t seq) { -#define UDMA_CTRLQ_CHECK_TP_OFFSET 0xFF struct udma_ctrlq_check_tp_active_rsp_info *rsp_info = NULL; struct udma_dev *udev = get_udma_dev(adev); struct ubase_ctrlq_msg msg = {}; uint32_t rsp_info_len = 0; - uint32_t tp_num = 0; - int ret_val; int ret; - ret_val = udma_ctrlq_check_param(udev, data, len); - if (ret_val == 0) { - tp_num = *((uint32_t *)data) & UDMA_CTRLQ_CHECK_TP_OFFSET; - rsp_info_len = sizeof(uint32_t) + - sizeof(struct udma_ctrlq_check_tp_active_rsp_data) * tp_num; - rsp_info = kzalloc(rsp_info_len, GFP_KERNEL); - if (!rsp_info) { - dev_err(udev->dev, "check tp mag malloc failed.\n"); - return -ENOMEM; - } - - ret_val = udma_ctrlq_check_tp_status(udev, data, len, tp_num, rsp_info); - if (ret_val) - dev_err(udev->dev, "check tp status failed, ret_val(%d).\n", ret_val); + ret = udma_ctrlq_check_tp_active_param(udev, data, len); + if (ret == 0) { + ret = udma_ctrlq_check_tp_status(udev, data, len, &rsp_info, &rsp_info_len); + if (ret) + dev_err(udev->dev, "check tp status failed, ret(%d).\n", ret); } msg.service_ver = UBASE_CTRLQ_SER_VER_01; @@ -748,17 +752,16 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, msg.in_size = (uint16_t)rsp_info_len; msg.in = (void *)rsp_info; msg.resp_seq = seq; - msg.resp_ret = (uint8_t)(-ret_val); + msg.resp_ret = (uint8_t)(-ret); ret = ubase_ctrlq_send_msg(adev, &msg); - if (ret) { - kfree(rsp_info); + if (ret) dev_err(udev->dev, "send check tp active ctrlq msg failed, ret(%d).\n", ret); - return ret; - } + kfree(rsp_info); + rsp_info = NULL; - return (ret_val) ? ret_val : 0; + return ret; } static struct ubase_ctrlq_event_nb udma_ctrlq_opts[] = { diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 92c9fcbaae9f..50ef624629df 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -528,9 +528,9 @@ int udma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, xa_lock(&udma_dev->jfc_table.xa); udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, jfcn); if (!udma_jfc) { + xa_unlock(&udma_dev->jfc_table.xa); dev_warn(udma_dev->dev, "Completion event for bogus jfcn %lu.\n", jfcn); - xa_unlock(&udma_dev->jfc_table.xa); return -EINVAL; } @@ -1034,13 +1034,14 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) struct udma_jfc *udma_jfc = to_udma_jfc(jfc); enum jfc_poll_state err = JFC_OK; struct list_head tid_list; + unsigned long flags; uint32_t ci; int npolled; INIT_LIST_HEAD(&tid_list); if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_lock(&udma_jfc->lock); + spin_lock_irqsave(&udma_jfc->lock, flags); for (npolled = 0; npolled < cr_cnt; ++npolled) { err = udma_poll_one(dev, udma_jfc, cr + npolled, &tid_list); @@ -1054,7 +1055,7 @@ int udma_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) } if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_unlock(&udma_jfc->lock); + spin_unlock_irqrestore(&udma_jfc->lock, flags); if (!list_empty(&tid_list)) udma_inv_tid(dev, &tid_list); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 6bfc135fa846..8e98319715e0 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -894,11 +894,12 @@ int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, { struct udma_dev *dev = to_udma_dev(ubcore_jfr->ub_dev); struct udma_jfr *jfr = to_udma_jfr(ubcore_jfr); + unsigned long flags; uint32_t nreq; int ret = 0; if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) - spin_lock(&jfr->lock); + spin_lock_irqsave(&jfr->lock, flags); for (nreq = 0; wr; ++nreq, wr = wr->next) { ret = post_recv_one(dev, jfr, wr); @@ -919,7 +920,7 @@ int udma_post_jfr_wr(struct ubcore_jfr *ubcore_jfr, struct ubcore_jfr_wr *wr, } if (!ubcore_jfr->jfr_cfg.flag.bs.lock_free) - spin_unlock(&jfr->lock); + spin_unlock_irqrestore(&jfr->lock, flags); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 7277db44da12..5d520a0cea00 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -1240,11 +1240,12 @@ int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, struct udma_sqe_ctl *wqe_addr; bool dwqe_enable = false; struct ubcore_jfs_wr *it; + unsigned long flags; int wr_cnt = 0; int ret = 0; if (!sq->lock_free) - spin_lock(&sq->lock); + spin_lock_irqsave(&sq->lock, flags); for (it = wr; it != NULL; it = (struct ubcore_jfs_wr *)(void *)it->next) { ret = udma_post_one_wr(sq, it, udma_dev, &wqe_addr, &dwqe_enable); @@ -1265,7 +1266,7 @@ int udma_post_sq_wr(struct udma_dev *udma_dev, struct udma_jetty_queue *sq, } if (!sq->lock_free) - spin_unlock(&sq->lock); + spin_unlock_irqrestore(&sq->lock, flags); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index be76c20c1ff0..44a93fd000b0 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1067,12 +1067,6 @@ void udma_reset_down(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - if (udma_close_ue_rx(udma_dev, false, false, true, 0)) { - mutex_unlock(&udma_reset_mutex); - dev_err(&adev->dev, "udma close ue rx failed in reset down process.\n"); - return; - } - udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); udma_dev->status = UDMA_SUSPEND; mutex_unlock(&udma_reset_mutex); @@ -1096,11 +1090,18 @@ void udma_reset_uninit(struct auxiliary_device *adev) return; } + if (udma_close_ue_rx(udma_dev, false, false, true, 0)) { + mutex_unlock(&udma_reset_mutex); + dev_err(&adev->dev, "udma close ue rx failed in reset process.\n"); + return; + } + + /* Event should unregister before unset ubcore dev. */ + udma_unregister_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); udma_open_ue_rx(udma_dev, false, false, true, 0); - udma_unregister_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -1143,14 +1144,16 @@ void udma_remove(struct auxiliary_device *adev) udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); + /* Event should unregister before unset ubcore dev. */ + udma_unregister_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); (void)ubase_activate_dev(adev); - udma_unregister_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); + dev_info(&adev->dev, "udma device remove success.\n"); } static struct auxiliary_driver udma_drv = { -- Gitee From a6c57b429b2cf33760f0f160e8225d6ab1f6c870 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 14:42:06 +0800 Subject: [PATCH 069/126] ub: udma: add udma driver module doc. commit 51fa1dfca96022eaee4f3795557ebe11b74051cd openEuler This patch adds the document related to udma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 1 + Documentation/ub/urma/udma/index.rst | 14 ++ Documentation/ub/urma/udma/udma.rst | 296 +++++++++++++++++++++++++++ 3 files changed, 311 insertions(+) create mode 100644 Documentation/ub/urma/udma/index.rst create mode 100644 Documentation/ub/urma/udma/udma.rst diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index c9366b0608dc..22276b791363 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -16,3 +16,4 @@ UnifiedBus Subsystem ubus/index ummu-core cdma/index + urma/udma/index diff --git a/Documentation/ub/urma/udma/index.rst b/Documentation/ub/urma/udma/index.rst new file mode 100644 index 000000000000..3a721ff1efcc --- /dev/null +++ b/Documentation/ub/urma/udma/index.rst @@ -0,0 +1,14 @@ +.. SPDX-License-Identifier: GPL-2.0+ +.. include:: + +:Copyright: |copy| 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +============ +UDMA Driver +============ + +.. toctree:: + :maxdepth: 2 + :numbered: + + udma diff --git a/Documentation/ub/urma/udma/udma.rst b/Documentation/ub/urma/udma/udma.rst new file mode 100644 index 000000000000..8e17734fa580 --- /dev/null +++ b/Documentation/ub/urma/udma/udma.rst @@ -0,0 +1,296 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + +=============================================== +UnifiedBus DIRECT MEMORY ACCESS DRIVER (UDMA) +=============================================== + +Overview +========= + +This document describes the context and capabilities of the UDMA driver. + +**UnifiedBus** is an interconnect protocol for SuperPoD, It unifies IO, +memory access, and communication between various processing units under +a single interconnect technology framework. +The UnifiedBus specifications are open source and available on the official +website: `UB Specification Documents `_. + +**UDMA** (UnifiedBus Direct Memory Access), is a hardware I/O device that +provides direct memory access capabilities. + +**URMA(Unified Remote Memory Access)** is a component within the UnifiedBus +protocol stack, designed to abstract and facilitate communication between +different hardware and software entities. + +The UDMA driver integrates with the UnifiedBus protocol by implementing +the **URMA programming API**, through this API, the driver exposes the +UnifiedBus remote memory access programming model to application developers. + + +Device Driver Model +===================== + +The UDMA device is a UnifiedBus auxiliary device attached to the auxiliary bus. +The UDMA driver is developed based on the UBASE driver framework and uses +the auxiliary bus to perform device-driver binding. + +.. code-block:: none + + +---------------+ +-------+ + | UDMA Driver | | ... | + +-------+-------+ +-------+ + | + \|/ + +-------+-------------------+ + | auxiliary bus | + +-------+-------------------+ + /|\ + | + +-------+-------+ + | UDMA Device | + +----+----------+ + /|\ + | UBASE driver creates UDMA device + +----+------------------+ + | UBASE Driver | + +-----------+-----------+ + | + \|/ + +-----------+---------------+ + | ubus | + +---------------------------+ + +The figure above illustrates the hierarchy between the UDMA driver and the +UBASE driver. The UBASE driver is responsible for creating the UDMA auxiliary device +and registering it with the auxiliary bus. + + +Context & Submodules +======================= + +The UDMA driver depends on the ``Hardware programming interface``, +``UBASE driver``, and ``UMMU driver``. +It implements the URMA API and provides direct memory access capabilities. + +Below figure describe the UDMA driver's context and submodules. + +.. code-block:: none + + +-------------+ + | 5. URMA API | + +-----+-------+ + ^ + | + | + +-----------------+-----------------------+ + | UDMA Driver | + | | + | | + | +--------------------+ +-----------+ | + | | udma_main | | udma_comon| | + | +--------------------+ +-----------+ | + | +----------------++--------++--------+ | + | | udma_context ||udma_eid||udma_tid| | + | +----------------++--------++--------+ | + | +-----------------+ +----------------+ | +---------------+ + | | udma_jetty | | udma_segment | +----->| 4. UMMU Driver| + | +-----------------+ +----------------+ | +---------------+ + | +------------++---------++-----------+ | + | | udma_jfs ||udma_jfr || udma_jfc | | + | +------------++---------++-----------+ | + | +---------++---------------++--------+ | + | | udma_db || udma_ctrlq_tp ||udma_dfx| | + | +---------++---------------++--------+ | + | +-----------+ +---------+ +----------+ | + | | udma_cmd | |udma_ctl | | udma_eq | | + | +-----------+ +---------+ +----------+ | + +-----------------------------+-----------+ + | +---------------------+ + | | 3. Management Module| + \|/ +----------+----------+ + +--------+----------+ | + | 2. UBASE Driver +<---------------+ + +---------+---------+ + Software + -------------------------------+----------------------------------------- + \|/ Hardware + +-----------------------------+----------+ + | 1. Hardware programming interface | + +----------------------------------------+ + +Context +--------- + +1. Hardware programming interface: The UDMA driver encapsulates the + hardware programming interface, abstracting the hardware specifics. + +2. UBASE: UBASE driver responsible for managing UDMA auxiliary devices. + It also provides common management capabilities for auxiliary bus devices + and interacts with the Management module. + The UDMA device driver is built upon the UBASE driver and reuses its common utility functions. + +3. Management module: responsible for device management and configuration. + +4. UMMU: UnifiedBus Memory Management Unit, providing memory management + functionality(address translation, access permission, etc.) for UnifiedBus devices. + +5. URMA API: URMA programming interface, URMA API abstracts the memory operations, + and the UDMA driver implements it, so application developers do not need to be + aware of the details of the UDMA driver. + + +Submodules +------------ + +The UDMA driver submodules can be divided into 4 categories: +common utility and main functions, UDMA communication, device management and configuration, +UDMA device debugging. + +**Common Utility and Main Functions** + +* udma_main: Implements module_init/module_exit, and registers the UDMA driver + to the auxiliary bus. + +* udma_common: Provides common utility functions for UDMA driver. + +**UDMA Communication** + +Theses submodules handle UDMA communication setup and +processes(e.g read/write or send/recv operations). + +* udma_context: Manages UDMA communication context (allocates context, frees context, etc.). +* udma_eid: Manages UDMA Entity IDs (adds, removes, and queries UDMA entities). +* udma_tid: Manages TIDs (Token IDs) (allocates, frees token IDs). +* udma_segment: Manages memory segments, including local memory segment + registration and remote memory segment import. +* udma_jetty, udma_jfs, udma_jfr, udma_jfc: Manages UnifiedBus communication + jetty-related resources, including jetty, jfs, jfr, and jfc. + +**UDMA Device Management and Configuration** + +These submodules handle the UDMA device management and UDMA communication configuration. + +* udma_cmd: Encapsulates hardware configuration commands for UDMA communication, + e.g., create jfs, create jfc, etc. +* udma_db: Encapsulates UDMA hardware doorbell operations. +* udma_ctrlq_tp: Encapsulates control queue (CtrlQ) operations for UDMA hardware + configuration, e.g., get the transport channels. +* udma_ctl: Encapsulates UDMA hardware-specific configure operations, which are + not defined in the URMA API. Application developers should include the header file ``include/ub/urma/udma/udma_ctl.h`` separately. +* udma_eq: Encapsulates hardware event queue operations, e.g., register + CtrlQ event handle to receive CtrlQ events. + +**UDMA Device Debugging** + +* udma_dfx: Queries the UDMA hardware runtime configurations, e.g., + jetty state, transport mode, etc. + + +Supported Hardware +==================== + +UDMA driver supported hardware: + +=========== ============= +Vendor ID Device ID +=========== ============= +0xCC08 0xA001 +0xCC08 0xA002 +0xCC08 0xD802 +0xCC08 0xD803 +0xCC08 0xD80B +0xCC08 0xD80C +=========== ============= + +You can use the ``lsub`` command on your host OS to query UB devices. Below is an example output: + +.. code-block:: shell + + UB network controller <0002>: Huawei Technologies Co., Ltd. URMA management ub entity : + UB network controller <0082>: Huawei Technologies Co., Ltd. URMA management ub entity : + UB network controller <0002>: Huawei Technologies Co., Ltd. URMA management ub entity : + +The ``Vendor ID`` and ``Device ID`` are located at the end of each output line +with the format ``:``, e.g., ``:``. + +Note the ``lsub`` command is from ubutils; make sure it is installed on your host. + + +Module Parameters +=================== + +UDMA driver supports 4 parameters: **cqe_mode**, **jfc_arm_mode**, +**jfr_sleep_time**, **dump_aux_info**. +The default value represents the best practices; however, you may need to change +the default value in certain cases. + +cqe_mode +----------- + +``cqe_mode`` controls the method of **Completion Queue Entry (CQE)** event generation. + +In interrupt mode, UDMA provides two mechanisms for generating CQE events: +**Producer Index (PI)/Consumer Index (CI) difference comparison** +and **counter-based threshold**. + +* PI/CI difference comparison: PI (Producer Index) and CI (Consumer Index) + respectively point to the next CQE to be written and read in the Completion Queue. + The device generates an interrupt to notify the upper layer when the + difference (the number of pending CQEs) exceeds a certain threshold. +* Counter-based threshold: An interrupt is generated when the total number of + CQEs written to the Completion Queue reaches a programmed threshold. + +**Parameter values:** + +* 0: Counter-based threshold +* 1: PI/CI difference comparison + +**Default value**: 1 + + +jfc_arm_mode +-------------- + +`jfc_arm_mode` controls the completion event interrupt mode. + +**Parameter Values:** + +* 0: Always ARM, interrupt always enabled +* 1: No ARM, interrupt is disabled and cannot be modified +* Other value (e.g., 2): Interrupt is disabled, but can be modified + +**Default value:** 0 + + +jfr_sleep_time +---------------- + +``jfr_sleep_time`` configures the maximum blocking wait time (in microseconds) +when deregistering a JFR (Jetty-related resource). The default value is 1000 us. +You can adjust this parameter value as needed. + +The allowed range is: ``[0,UINT32_MAX]`` + +dump_aux_info +--------------- + +``dump_aux_info`` controls whether to dump auxiliary information +(the hardware register values) into the event body when reporting asynchronous +or completion events. + + +**Parameter Values:** + +* false: Disables the dumping of auxiliary information. +* true: Enables the dumping of auxiliary information. + +**Default value**: false + + +Support +======= + +If there is any issue or question, please email the specific information related +to the issue or question to or vendor's support channel. -- Gitee From d77a2fe97df31d259fa41d452b725f54b67b72aa Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 15:00:52 +0800 Subject: [PATCH 070/126] ub: udma: mask jetty context addr info. commit 5b8885c72d25a4694b448a6085c1fd156a45ac29 openEuler For security reasons, the jetty context address will not be printed. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jetty.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_jetty.c b/drivers/ub/urma/hw/udma/udma_jetty.c index c3f3f9a90fb3..e41b85e71054 100644 --- a/drivers/ub/urma/hw/udma/udma_jetty.c +++ b/drivers/ub/urma/hw/udma/udma_jetty.c @@ -796,6 +796,14 @@ static bool udma_wait_timeout(uint32_t *sum_times, uint32_t times, uint32_t ta_t return false; } +static void udma_mask_jetty_ctx(struct udma_jetty_ctx *ctx) +{ + ctx->sqe_base_addr_l = 0; + ctx->sqe_base_addr_h = 0; + ctx->user_data_l = 0; + ctx->user_data_h = 0; +} + static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *sq) { struct udma_jetty_ctx ctx = {}; @@ -821,6 +829,7 @@ static bool udma_query_jetty_fd(struct udma_dev *dev, struct udma_jetty_queue *s if (ctx.flush_ssn_vld && rcv_send_diff < UDMA_RCV_SEND_MAX_DIFF) return true; + udma_mask_jetty_ctx(&ctx); udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); @@ -1098,6 +1107,8 @@ static bool udma_batch_query_jetty_fd(struct udma_dev *dev, *bad_jetty_index = 0; all_query_done = false; + + udma_mask_jetty_ctx(&ctx); udma_dfx_ctx_print(dev, "Flush Failed Jetty", sq->id, sizeof(ctx) / sizeof(uint32_t), (uint32_t *)&ctx); break; -- Gitee From 00cb32a416acb91f1ce71a57c25ae9a0a911f3e1 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 15:35:43 +0800 Subject: [PATCH 071/126] ub: udma: bugfix for set and get tp attr. commit 7b406a1a737767a4aec23c778f6f631453e77b6d openEuler This patch fix a bug about set and get tp attr. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 966dc7a41d94..86d68ace7000 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -170,6 +170,11 @@ static int udma_dev_res_ratio_ctrlq_handler(struct auxiliary_device *adev, struct udma_ctrlq_event_nb *udma_cb; int ret; + if (service_ver != UBASE_CTRLQ_SER_VER_01) { + dev_err(udev->dev, "Unsupported server version (%u).\n", service_ver); + return -EOPNOTSUPP; + } + mutex_lock(&udev->npu_nb_mutex); udma_cb = xa_load(&udev->npu_nb_table, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO); if (!udma_cb) { @@ -646,6 +651,14 @@ int udma_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, tp_attr_req.tp_attr.tp_attr_bitmap = tp_attr_bitmap; memcpy(&tp_attr_req.tp_attr.tp_attr_value, (void *)tp_attr, sizeof(*tp_attr)); + udma_swap_endian((uint8_t *)tp_attr->sip, tp_attr_req.tp_attr.tp_attr_value.sip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr->dip, tp_attr_req.tp_attr.tp_attr_value.dip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr->sma, tp_attr_req.tp_attr.tp_attr_value.sma, + UBCORE_MAC_BYTES); + udma_swap_endian((uint8_t *)tp_attr->dma, tp_attr_req.tp_attr.tp_attr_value.dma, + UBCORE_MAC_BYTES); udma_ctrlq_set_tp_msg(&msg, &tp_attr_req, sizeof(tp_attr_req), NULL, 0); msg.opcode = UDMA_CMD_CTRLQ_SET_TP_ATTR; @@ -687,6 +700,14 @@ int udma_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, *tp_attr_bitmap = tp_attr_resp.tp_attr.tp_attr_bitmap; memcpy((void *)tp_attr, &tp_attr_resp.tp_attr.tp_attr_value, sizeof(tp_attr_resp.tp_attr.tp_attr_value)); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.sip, tp_attr->sip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.dip, tp_attr->dip, + UBCORE_IP_ADDR_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.sma, tp_attr->sma, + UBCORE_MAC_BYTES); + udma_swap_endian((uint8_t *)tp_attr_resp.tp_attr.tp_attr_value.dma, tp_attr->dma, + UBCORE_MAC_BYTES); return 0; } -- Gitee From 027f0619ce63c96ec233bf7367e02762006af0cb Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 16:55:54 +0800 Subject: [PATCH 072/126] ub: udma: Support eid and guid updates. commit 53d00d68d89395191c185ae7fa29a6a1d8ef7cd5 openEuler This patch support update eid and guid for udma. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubase/ubase_ctrlq.c | 1 + drivers/ub/urma/hw/udma/udma_cmd.h | 14 ++++++ drivers/ub/urma/hw/udma/udma_eq.c | 71 +++++++++++++++++++++++++++++ include/ub/ubase/ubase_comm_ctrlq.h | 1 + 4 files changed, 87 insertions(+) diff --git a/drivers/ub/ubase/ubase_ctrlq.c b/drivers/ub/ubase/ubase_ctrlq.c index 8fcc42a04a51..18f72cc68235 100644 --- a/drivers/ub/ubase/ubase_ctrlq.c +++ b/drivers/ub/ubase/ubase_ctrlq.c @@ -20,6 +20,7 @@ static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_unic[] = { static const struct ubase_ctrlq_event_nb ubase_ctrlq_wlist_udma[] = { {UBASE_CTRLQ_SER_TYPE_TP_ACL, UBASE_CTRLQ_OPC_CHECK_TP_ACTIVE, NULL, NULL}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_SEID, NULL, NULL}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_UPDATE_UE_SEID_GUID, NULL, NULL}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO, NULL, NULL}, }; diff --git a/drivers/ub/urma/hw/udma/udma_cmd.h b/drivers/ub/urma/hw/udma/udma_cmd.h index 6ec531913033..24f6d65bf1ad 100644 --- a/drivers/ub/urma/hw/udma/udma_cmd.h +++ b/drivers/ub/urma/hw/udma/udma_cmd.h @@ -49,9 +49,23 @@ enum udma_ctrlq_eid_update_op { UDMA_CTRLQ_EID_DEL, }; +enum udma_ctrlq_eid_guid_update_op { + UDMA_CTRLQ_EID_GUID_ADD = 0, + UDMA_CTRLQ_EID_GUID_DEL, +}; + +struct udma_ctrlq_ue_eid_guid_out { + struct udma_ctrlq_eid_info eid_info; + uint32_t op_type : 4; + uint32_t rsv : 28; + uint32_t ue_id; + guid_t ue_guid; +} __packed; + enum udma_ctrlq_dev_mgmt_opcode { UDMA_CTRLQ_GET_SEID_INFO = 0x1, UDMA_CTRLQ_UPDATE_SEID_INFO = 0x2, + UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID = 0x3, UDMA_CTRLQ_GET_DEV_RESOURCE_COUNT = 0x11, UDMA_CTRLQ_GET_DEV_RESOURCE_RATIO = 0x12, UDMA_CTRLQ_NOTIFY_DEV_RESOURCE_RATIO = 0x13, diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index 655714b872a5..d3b6813b1d55 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -764,11 +764,82 @@ static int udma_ctrlq_check_tp_active(struct auxiliary_device *adev, return ret; } +static int udma_ctrlq_send_eid_guid_response(struct udma_dev *udma_dev, + uint16_t seq, + int ret_val) +{ + struct ubase_ctrlq_msg msg = {}; + int in_buf = 0; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.opcode = UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID; + msg.need_resp = 0; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + msg.in = (void *)&in_buf; + msg.in_size = sizeof(in_buf); + + ret = ubase_ctrlq_send_msg(udma_dev->comdev.adev, &msg); + if (ret) + dev_err(udma_dev->dev, "send eid-guid rsp failed, ret = %d.\n", + ret); + + return ret; +} + +static int udma_ctrlq_notify_mue_eid_guid(struct auxiliary_device *adev, + uint8_t service_ver, + void *data, + uint16_t len, + uint16_t seq) +{ + struct udma_ctrlq_ue_eid_guid_out eid_guid_entry = {}; + struct udma_dev *udma_dev; + + if (adev == NULL || data == NULL) { + pr_err("adev is null : %d, data is null : %d.\n", + adev == NULL, data == NULL); + return -EINVAL; + } + + udma_dev = get_udma_dev(adev); + if (udma_dev->is_ue) + return 0; + + if (udma_dev->status != UDMA_NORMAL) + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, 0); + if (len < sizeof(struct udma_ctrlq_ue_eid_guid_out)) { + dev_err(udma_dev->dev, "eid-guid len(%u) is invalid.\n", len); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, -EINVAL); + } + memcpy(&eid_guid_entry, data, sizeof(eid_guid_entry)); + if (eid_guid_entry.op_type != UDMA_CTRLQ_EID_GUID_ADD && + eid_guid_entry.op_type != UDMA_CTRLQ_EID_GUID_DEL) { + dev_err(udma_dev->dev, "eid-guid type(%u) is invalid.\n", + eid_guid_entry.op_type); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, + -EINVAL); + } + if (eid_guid_entry.eid_info.eid_idx >= SEID_TABLE_SIZE) { + dev_err(udma_dev->dev, "invalid ue eid_idx = %u.\n", + eid_guid_entry.eid_info.eid_idx); + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, + -EINVAL); + } + + return udma_ctrlq_send_eid_guid_response(udma_dev, seq, 0); +} + static struct ubase_ctrlq_event_nb udma_ctrlq_opts[] = { {UBASE_CTRLQ_SER_TYPE_TP_ACL, UDMA_CMD_CTRLQ_CHECK_TP_ACTIVE, NULL, udma_ctrlq_check_tp_active}, {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UDMA_CTRLQ_UPDATE_SEID_INFO, NULL, udma_ctrlq_eid_update}, + {UBASE_CTRLQ_SER_TYPE_DEV_REGISTER, UDMA_CTRLQ_OPC_UPDATE_UE_SEID_GUID, NULL, + udma_ctrlq_notify_mue_eid_guid}, }; static int udma_register_one_ctrlq_event(struct auxiliary_device *adev, diff --git a/include/ub/ubase/ubase_comm_ctrlq.h b/include/ub/ubase/ubase_comm_ctrlq.h index 3e08a5ab5a4f..c50bfd60047f 100644 --- a/include/ub/ubase/ubase_comm_ctrlq.h +++ b/include/ub/ubase/ubase_comm_ctrlq.h @@ -53,6 +53,7 @@ enum ubase_ctrlq_opc_type_ip { enum ubase_ctrlq_opc_type_dev_register { UBASE_CTRLQ_OPC_UPDATE_SEID = 0x02, + UBASE_CTRLQ_OPC_UPDATE_UE_SEID_GUID = 0x03, UBASE_CTRLQ_OPC_NOTIFY_RES_RATIO = 0x13, UBASE_CTRLQ_OPC_CTRLQ_CTRL = 0x14, UBASE_CTRLQ_OPC_UE_RESET_CTRL = 0x15, -- Gitee From 273d5c0169aec9091fd65857358c6184205202b0 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Fri, 14 Nov 2025 11:54:54 +0800 Subject: [PATCH 073/126] ub:ubus: Bugfix of ubus and ubfi commit 7e992348e2c3a9064e165fbf5a075a2945790756 openEuler driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID34DG CVE: NA ----------------------------------------------------------- 1. Add the CONFIG_UB option to the ub directory in Makefile; 2. Add static for inner functions; 3. fix ubfi's ubc_list init bug. Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/Makefile | 2 +- drivers/irqchip/irq-gic-v3-its-ub-msi.c | 2 +- drivers/ub/ubfi/ubc.c | 4 +--- drivers/ub/ubus/ioctl.c | 1 + drivers/ub/ubus/reset.c | 3 ++- drivers/ub/ubus/resource.c | 6 +++--- drivers/ub/ubus/route.c | 1 + drivers/ub/ubus/services/ras.c | 4 ++-- drivers/ub/ubus/ubus_config.c | 12 ++++++------ drivers/ub/ubus/ubus_driver.c | 6 +++--- drivers/ub/ubus/ubus_entity.c | 2 +- 11 files changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/Makefile b/drivers/Makefile index 269267ac3b4f..f36e00dfd1bd 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_GPIOLIB) += gpio/ obj-y += pwm/ obj-y += pci/ -obj-y += ub/ +obj-$(CONFIG_UB) += ub/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ diff --git a/drivers/irqchip/irq-gic-v3-its-ub-msi.c b/drivers/irqchip/irq-gic-v3-its-ub-msi.c index 4caccd12fdc4..08274a57c5d5 100644 --- a/drivers/irqchip/irq-gic-v3-its-ub-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-ub-msi.c @@ -146,7 +146,7 @@ static int its_ub_of_msi_init(void) return 0; } -int __init its_ub_msi_init(void) +static int __init its_ub_msi_init(void) { its_ub_of_msi_init(); its_ub_acpi_msi_init(); diff --git a/drivers/ub/ubfi/ubc.c b/drivers/ub/ubfi/ubc.c index e89aeeafb913..a3f7bab8863f 100644 --- a/drivers/ub/ubfi/ubc.c +++ b/drivers/ub/ubfi/ubc.c @@ -28,7 +28,7 @@ #define to_ub_ubc(n) container_of(n, struct ub_bus_controller, dev) -struct list_head ubc_list; +LIST_HEAD(ubc_list); EXPORT_SYMBOL_GPL(ubc_list); u32 ubc_eid_start; @@ -593,8 +593,6 @@ int handle_ubc_table(u64 pointer) if (!info_node) return -EINVAL; - INIT_LIST_HEAD(&ubc_list); - ret = parse_ubc_table(info_node); if (ret) goto err_handle; diff --git a/drivers/ub/ubus/ioctl.c b/drivers/ub/ubus/ioctl.c index abcd4e878755..0825a87d8b81 100644 --- a/drivers/ub/ubus/ioctl.c +++ b/drivers/ub/ubus/ioctl.c @@ -10,6 +10,7 @@ #include "ubus.h" #include "instance.h" +#include "ioctl.h" #define UBUS_MAX_DEVICES 1 #define UBUS_DEVICE_NAME "unified_bus" diff --git a/drivers/ub/ubus/reset.c b/drivers/ub/ubus/reset.c index 4b7d86624e24..596d848c2c11 100644 --- a/drivers/ub/ubus/reset.c +++ b/drivers/ub/ubus/reset.c @@ -12,6 +12,7 @@ #include "route.h" #include "ubus_controller.h" #include "ubus_config.h" +#include "reset.h" enum elr_type { ELR_PREPARE = 0, @@ -30,7 +31,7 @@ static u32 saved_cfg_offset[] = { * ub_elr - Initiate an UB entity level reset * @dev: UB entity to reset */ -int ub_elr(struct ub_entity *dev) +static int ub_elr(struct ub_entity *dev) { u8 command; u8 val = 0; diff --git a/drivers/ub/ubus/resource.c b/drivers/ub/ubus/resource.c index b57117e02415..d4516d672bb5 100644 --- a/drivers/ub/ubus/resource.c +++ b/drivers/ub/ubus/resource.c @@ -97,7 +97,7 @@ static int _ub_assign_resource(struct ub_entity *uent, int idx, return -ENOMEM; } -int ub_assign_resource(struct ub_entity *uent, int idx) +static int ub_assign_resource(struct ub_entity *uent, int idx) { struct resource *res = &uent->zone[idx].res; resource_size_t size; @@ -123,7 +123,7 @@ int ub_assign_resource(struct ub_entity *uent, int idx) return 0; } -void ub_release_resource(struct ub_entity *uent, int idx) +static void ub_release_resource(struct ub_entity *uent, int idx) { int ret; @@ -292,7 +292,7 @@ int ub_insert_resource(struct ub_entity *dev, int idx) return 0; } -int ub_entity_alloc_mmio_idx(struct ub_entity *dev, int idx) +static int ub_entity_alloc_mmio_idx(struct ub_entity *dev, int idx) { if (is_ibus_controller(dev) || is_idev(dev)) return ub_insert_resource(dev, idx); diff --git a/drivers/ub/ubus/route.c b/drivers/ub/ubus/route.c index 73e0df5436dc..364bf78d93c3 100644 --- a/drivers/ub/ubus/route.c +++ b/drivers/ub/ubus/route.c @@ -12,6 +12,7 @@ #include "enum.h" #include "port.h" #include "ubus_driver.h" +#include "route.h" #define UB_ROUTE_TABLE_ENTRY_START (UB_ROUTE_TABLE_SLICE_START + (0x10 << 2)) #define EBW(port_nums) ((((port_nums) - 1) >> 5) + 1) /* Entry Bit Width */ diff --git a/drivers/ub/ubus/services/ras.c b/drivers/ub/ubus/services/ras.c index c38b300e1646..fc4365f819a3 100644 --- a/drivers/ub/ubus/services/ras.c +++ b/drivers/ub/ubus/services/ras.c @@ -29,7 +29,7 @@ enum ras_err_level { RAS_ERR_DEVICE_LEVEL, }; -int cper_severity_to_ub_ras(int cper_severity) +static int cper_severity_to_ub_ras(int cper_severity) { switch (cper_severity) { case CPER_SEV_FATAL: @@ -269,7 +269,7 @@ static inline void ras_recover_entry_init(struct ras_recover_entry *entry, static DEFINE_SPINLOCK(ub_ras_recover_ring_lock); static DECLARE_WORK(ub_ras_recover_work, ub_ras_recover_work_func); -void ub_ras_recover_queue(struct cper_sec_ubus *ubus_err, int severity) +static void ub_ras_recover_queue(struct cper_sec_ubus *ubus_err, int severity) { #define PORT_VALID_BIT 0b100ULL #define OVERFLOW_FLAG_BIT 0b10000ULL diff --git a/drivers/ub/ubus/ubus_config.c b/drivers/ub/ubus/ubus_config.c index a2d285da998a..2c1f068dc11a 100644 --- a/drivers/ub/ubus/ubus_config.c +++ b/drivers/ub/ubus/ubus_config.c @@ -308,7 +308,7 @@ int ub_send_cfg(struct ub_entity *uent, u8 size, u64 pos, u32 *val) req_pkt.header.msgetah.code); } -int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) +static int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -318,7 +318,7 @@ int __ub_cfg_read_byte(struct ub_entity *uent, u64 pos, u8 *val) return ub_sync_cfg(uent, (u8)sizeof(u8), pos, false, (u32 *)val); } -int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) +static int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -328,7 +328,7 @@ int __ub_cfg_read_word(struct ub_entity *uent, u64 pos, u16 *val) return ub_sync_cfg(uent, (u8)sizeof(u16), pos, false, (u32 *)val); } -int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) +static int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) { if (!uent || !uent->message || !uent->message->mdev || !val) { pr_err("uent or message or mdev is null\n"); @@ -338,7 +338,7 @@ int __ub_cfg_read_dword(struct ub_entity *uent, u64 pos, u32 *val) return ub_sync_cfg(uent, (u8)sizeof(u32), pos, false, val); } -int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) +static int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); @@ -348,7 +348,7 @@ int __ub_cfg_write_byte(struct ub_entity *uent, u64 pos, u8 val) return ub_sync_cfg(uent, (u8)sizeof(u8), pos, true, (u32 *)&val); } -int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) +static int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); @@ -358,7 +358,7 @@ int __ub_cfg_write_word(struct ub_entity *uent, u64 pos, u16 val) return ub_sync_cfg(uent, (u8)sizeof(u16), pos, true, (u32 *)&val); } -int __ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val) +static int __ub_cfg_write_dword(struct ub_entity *uent, u64 pos, u32 val) { if (!uent || !uent->message || !uent->message->mdev) { pr_err("uent or message or mdev is null\n"); diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index 9431bbccd3b0..974020bf3c38 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -134,7 +134,7 @@ ub_match_one_device(const struct ub_device_id *id, const struct ub_entity *dev) return NULL; } -const struct ub_device_id *ub_match_id(const struct ub_device_id *ids, +static const struct ub_device_id *ub_match_id(const struct ub_device_id *ids, struct ub_entity *dev) { if (ids && dev) { @@ -567,7 +567,7 @@ static int ub_bus_num_ue(struct device *dev) return ub_num_ue(to_ub_entity(dev)); } -void ub_bus_type_init(void) +static void ub_bus_type_init(void) { ub_bus_type.match = ub_bus_match; ub_bus_type.uevent = ub_uevent; @@ -580,7 +580,7 @@ void ub_bus_type_init(void) ub_bus_type.num_vf = ub_bus_num_ue; } -void ub_bus_type_uninit(void) +static void ub_bus_type_uninit(void) { ub_bus_type.match = NULL; ub_bus_type.uevent = NULL; diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index fcea27373ccb..6031469a27b9 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -63,7 +63,7 @@ struct ub_entity *ub_alloc_ent(void) EXPORT_SYMBOL_GPL(ub_alloc_ent); static DEFINE_IDA(uent_num_ida); -void ub_entity_num_free(struct ub_entity *uent) +static void ub_entity_num_free(struct ub_entity *uent) { ida_free(&uent_num_ida, uent->uent_num); } -- Gitee From a6f7c06767c8c3b2b9db6c7ee28c896e19529ff2 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Mon, 17 Nov 2025 14:25:58 +0800 Subject: [PATCH 074/126] ub:ubus: Add ummu_map attribute in sysfs commit d9875bf05b18a022f352201fec2e517e72236ca4 openEuler driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID77OO CVE: NA ----------------------------------------------------------- Add ummu_map attribute in sysfs Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/sysfs.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/ub/ubus/sysfs.c b/drivers/ub/ubus/sysfs.c index a583cf7efa97..2e997c5c5f1a 100644 --- a/drivers/ub/ubus/sysfs.c +++ b/drivers/ub/ubus/sysfs.c @@ -325,6 +325,15 @@ static ssize_t primary_cna_show(struct device *dev, } DEVICE_ATTR_RO(primary_cna); +static ssize_t ummu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ub_entity *uent = to_ub_entity(dev); + + return sysfs_emit(buf, "%#04x\n", uent->ubc->attr.ummu_map); +} +DEVICE_ATTR_RO(ummu_map); + static ssize_t instance_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -393,6 +402,7 @@ static struct attribute *ub_entity_attrs[] = { &dev_attr_tid.attr, &dev_attr_primary_entity.attr, &dev_attr_kref.attr, + &dev_attr_ummu_map.attr, NULL }; -- Gitee From 78501f6d324000b5b30730d5c5a37238a3871bb5 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Sat, 29 Nov 2025 10:43:16 +0800 Subject: [PATCH 075/126] ub: hisi-ubus: Fix ub memory decoder create commit 2f37e06abbc56da4affaa2620d07148052395b05 openEuler drivers: inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Fix ub memory decoder create cannot get data Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/vendor/hisilicon/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ub/ubus/vendor/hisilicon/memory.c b/drivers/ub/ubus/vendor/hisilicon/memory.c index fa9747171eea..e49a4a82322f 100644 --- a/drivers/ub/ubus/vendor/hisilicon/memory.c +++ b/drivers/ub/ubus/vendor/hisilicon/memory.c @@ -301,8 +301,8 @@ static u8 get_mem_decoder_number(struct hi_ubc_private_data *data) int hi_mem_decoder_create(struct ub_bus_controller *ubc) { + struct hi_ubc_private_data *data = ubc->data; struct ub_mem_device *mem_device; - struct hi_ubc_private_data *data; void *priv_data; int ret; -- Gitee From be013b80a70f088f1b32ecf8af94e9eeaf5b5c0e Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 26 Nov 2025 09:30:46 +0800 Subject: [PATCH 076/126] ub:hisi-ubus ub:hisi-ubus: Move the decoder's page table operations to hisi-ubus commit bd9e1c9e31192a1eabadca924fab339f91eec5ae openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ Move the decoder's page table operations to hisi-ubus Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubfi/irq.c | 4 +- drivers/ub/ubus/Makefile | 2 +- drivers/ub/ubus/decoder.c | 115 +++++++--------- drivers/ub/ubus/decoder.h | 43 +++--- drivers/ub/ubus/omm.h | 32 ----- drivers/ub/ubus/resource.c | 1 - drivers/ub/ubus/ubus_controller.h | 9 ++ drivers/ub/ubus/vendor/hisilicon/Makefile | 2 +- drivers/ub/ubus/vendor/hisilicon/controller.c | 14 +- .../hisilicon/hisi-decoder.c} | 128 +++++++++++++----- .../ub/ubus/vendor/hisilicon/hisi-decoder.h | 50 +++++++ drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 2 - 12 files changed, 232 insertions(+), 170 deletions(-) delete mode 100644 drivers/ub/ubus/omm.h rename drivers/ub/ubus/{omm.c => vendor/hisilicon/hisi-decoder.c} (86%) create mode 100644 drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index d47e4ce67bd6..846af8d6c5f1 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -61,8 +61,10 @@ int ubrt_register_gsi(u32 hwirq, int trigger, int polarity, const char *name, res->start = irq; res->end = irq; res->flags = IORESOURCE_IRQ; -#endif return 0; +#else + return -EINVAL; +#endif } EXPORT_SYMBOL_GPL(ubrt_register_gsi); diff --git a/drivers/ub/ubus/Makefile b/drivers/ub/ubus/Makefile index 59505977dd2f..96efd80d502f 100644 --- a/drivers/ub/ubus/Makefile +++ b/drivers/ub/ubus/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_UB_UBUS) += ub-driver.o controller.o config.o entity.o ras.o obj-$(CONFIG_UB_UBUS) += msi/ ubus-y := ubus_driver.o sysfs.o ubus_controller.o msg.o ubus_config.o port.o cc.o eid.o cna.o route.o -ubus-y += enum.o resource.o ubus_entity.o reset.o cap.o interrupt.o decoder.o omm.o ioctl.o eu.o link.o +ubus-y += enum.o resource.o ubus_entity.o reset.o cap.o interrupt.o decoder.o ioctl.o eu.o link.o ubus-y += instance.o pool.o memory.o ubus-y += services/ras.o services/service.o services/gucd.o diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index f5a8e69fd1cf..288e33a96038 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -13,7 +13,6 @@ #include "ubus.h" #include "ubus_controller.h" -#include "omm.h" #include "decoder.h" #define MMIO_SIZE_MASK GENMASK_ULL(18, 16) @@ -33,8 +32,6 @@ #define EVTQ_ENABLE 0x1 #define EVT_ENTRY_SIZE 16 -#define DECODER_PAGE_TABLE_ENTRY_SIZE 8 - #define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ static void ub_decoder_uninit_queue(struct ub_decoder *decoder) @@ -170,68 +167,24 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) return ret; } -static int ub_decoder_create_page_table(struct ub_decoder *decoder) +static int ub_decoder_create_page_table(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - struct page_table_desc *invalid_desc = &decoder->invalid_desc; - struct ub_entity *uent = decoder->uent; - struct page_table *pgtlb; - void *pgtlb_base; - size_t size; - - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - pgtlb = &decoder->pgtlb; - pgtlb_base = dmam_alloc_coherent(decoder->dev, size, - &pgtlb->pgtlb_dma, GFP_KERNEL); - if (!pgtlb_base) { - ub_err(uent, "allocate ub decoder page table fail\n"); - return -ENOMEM; - } - pgtlb->pgtlb_base = pgtlb_base; - - size = sizeof(*pgtlb->desc_base) * DECODER_PAGE_TABLE_SIZE; - pgtlb->desc_base = kzalloc(size, GFP_KERNEL); - if (!pgtlb->desc_base) { - ub_err(uent, "allocate ub decoder page table desc fail\n"); - goto release_pgtlb; - } - - invalid_desc->page_base = dmam_alloc_coherent(decoder->dev, - RANGE_TABLE_PAGE_SIZE, - &invalid_desc->page_dma, - GFP_KERNEL); - if (!invalid_desc->page_base) { - ub_err(uent, "decoder alloc free page fail\n"); - goto release_desc; - } - decoder->invalid_page_dma = (invalid_desc->page_dma & - DECODER_PGTBL_PGPRT_MASK) >> - DECODER_DMA_PAGE_ADDR_OFFSET; - - ub_decoder_init_page_table(decoder, pgtlb_base); + if (ubc->ops->create_decoder_table) + return ubc->ops->create_decoder_table(decoder); - return 0; - -release_desc: - kfree(pgtlb->desc_base); - pgtlb->desc_base = NULL; -release_pgtlb: - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - dmam_free_coherent(decoder->dev, size, pgtlb_base, pgtlb->pgtlb_dma); - return -ENOMEM; + ub_err(decoder->uent, "ub bus controller can't create decoder table\n"); + return -EPERM; } -static void ub_decoder_free_page_table(struct ub_decoder *decoder) +static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - struct page_table_desc *invalid_desc = &decoder->invalid_desc; - size_t size; - - dmam_free_coherent(decoder->dev, RANGE_TABLE_PAGE_SIZE, - invalid_desc->page_base, invalid_desc->page_dma); - kfree(decoder->pgtlb.desc_base); - - size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; - dmam_free_coherent(decoder->dev, size, decoder->pgtlb.pgtlb_base, - decoder->pgtlb.pgtlb_dma); + if (ubc->ops->free_decoder_table) + ubc->ops->free_decoder_table(decoder); + else + ub_err(decoder->uent, + "ub bus controller can't free decoder table\n"); } static void ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, @@ -302,7 +255,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) if (ret) goto release_decoder; - ret = ub_decoder_create_page_table(decoder); + ret = ub_decoder_create_page_table(ubc, decoder); if (ret) { ub_err(uent, "decoder create page table failed\n"); goto release_queue; @@ -321,7 +274,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) return ret; release_page_table: - ub_decoder_free_page_table(decoder); + ub_decoder_free_page_table(ubc, decoder); release_queue: ub_decoder_uninit_queue(decoder); release_decoder: @@ -397,7 +350,7 @@ static void ub_remove_decoder(struct ub_bus_controller *ubc) ub_decoder_device_unset(decoder); - ub_decoder_free_page_table(decoder); + ub_decoder_free_page_table(ubc, decoder); ub_decoder_uninit_queue(decoder); @@ -635,6 +588,7 @@ int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, ret = wait_for_cmdq_notify(decoder); return ret; } +EXPORT_SYMBOL_GPL(ub_decoder_cmd_request); static bool queue_empty(struct ub_decoder_queue *q) { @@ -839,3 +793,38 @@ void ub_decoder_uninit(struct ub_entity *uent) ub_remove_decoder(uent->ubc); } + +int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) +{ + struct ub_bus_controller *ubc; + + if (!decoder) { + pr_err("unmap mmio decoder ptr is null\n"); + return -EINVAL; + } + + ubc = decoder->uent->ubc; + if (!ubc->ops->decoder_unmap) { + pr_err("decoder_unmap ops not exist\n"); + return -EINVAL; + } + return ubc->ops->decoder_unmap(ubc->decoder, addr, size); +} + +int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) +{ + struct ub_bus_controller *ubc; + + if (!decoder || !info) { + pr_err("decoder or map info is null\n"); + return -EINVAL; + } + + ubc = decoder->uent->ubc; + if (!ubc->ops->decoder_map) { + pr_err("decoder_map ops not exist\n"); + return -EINVAL; + } + + return ubc->ops->decoder_map(ubc->decoder, info); +} diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 47710ead71db..37d628dc45e2 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -87,33 +87,20 @@ struct ub_decoder { struct mutex table_lock; }; -#define DECODER_PGTBL_PGPRT_MASK GENMASK_ULL(47, 12) -#define DECODER_DMA_PAGE_ADDR_OFFSET 12 - -#define PGTLB_CACHE_IR_NC 0b00 -#define PGTLB_CACHE_IR_WBRA 0b01 -#define PGTLB_CACHE_IR_WT 0b10 -#define PGTLB_CACHE_IR_WB 0b11 -#define PGTLB_CACHE_OR_NC 0b0000 -#define PGTLB_CACHE_OR_WBRA 0b0100 -#define PGTLB_CACHE_OR_WT 0b1000 -#define PGTLB_CACHE_OR_WB 0b1100 -#define PGTLB_CACHE_SH_NSH 0b000000 -#define PGTLB_CACHE_SH_OSH 0b100000 -#define PGTLB_CACHE_SH_ISH 0b110000 - -#define PGTLB_ATTR_DEFAULT (PGTLB_CACHE_IR_WBRA | \ - PGTLB_CACHE_OR_WBRA | \ - PGTLB_CACHE_SH_ISH) - -#define RGTLB_TO_PGTLB 8 -#define DECODER_PAGE_ENTRY_SIZE 64 -#define DECODER_PAGE_SIZE (1 << 12) -#define DECODER_PAGE_TABLE_SIZE (1 << 12) -#define PAGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * DECODER_PAGE_SIZE) -#define RANGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * \ - DECODER_PAGE_SIZE * \ - RGTLB_TO_PGTLB) +struct decoder_map_info { + phys_addr_t pa; + phys_addr_t uba; + u64 size; + u32 tpg_num; + u8 order_id; + u8 order_type; + u64 eid_low; + u64 eid_high; + u32 token_id; + u32 token_value; + u32 upi; + u32 src_eid; +}; void ub_decoder_init(struct ub_entity *uent); void ub_decoder_uninit(struct ub_entity *uent); @@ -121,4 +108,6 @@ void ub_init_decoder_usi(struct ub_entity *uent); void ub_uninit_decoder_usi(struct ub_entity *uent); int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, u64 size, enum ub_cmd_op_type op); +int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); +int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); #endif /* __DECODER_H__ */ diff --git a/drivers/ub/ubus/omm.h b/drivers/ub/ubus/omm.h deleted file mode 100644 index e90ce9cb1f7f..000000000000 --- a/drivers/ub/ubus/omm.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. - */ - -#ifndef __OMM_H__ -#define __OMM_H__ - -#include - -extern u8 ubc_feature; - -struct decoder_map_info { - phys_addr_t pa; - phys_addr_t uba; - u64 size; - u32 tpg_num; - u8 order_id; - u8 order_type; - u64 eid_low; - u64 eid_high; - u32 token_id; - u32 token_value; - u32 upi; - u32 src_eid; -}; - -void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base); -int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); -int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); - -#endif /* __OMM_H__ */ diff --git a/drivers/ub/ubus/resource.c b/drivers/ub/ubus/resource.c index d4516d672bb5..6e8ceeb9fa93 100644 --- a/drivers/ub/ubus/resource.c +++ b/drivers/ub/ubus/resource.c @@ -12,7 +12,6 @@ #include "ubus.h" #include "msg.h" #include "decoder.h" -#include "omm.h" #include "resource.h" struct query_token_msg_pld_req { diff --git a/drivers/ub/ubus/ubus_controller.h b/drivers/ub/ubus/ubus_controller.h index 4b3c7a74a414..04eb4a3d7648 100644 --- a/drivers/ub/ubus/ubus_controller.h +++ b/drivers/ub/ubus/ubus_controller.h @@ -6,6 +6,9 @@ #ifndef __UBUS_CONTROLLER_H__ #define __UBUS_CONTROLLER_H__ +#include +#include "decoder.h" + struct ub_bus_controller; struct ub_bus_controller_ops { int (*eu_table_init)(struct ub_bus_controller *ubc); @@ -18,6 +21,12 @@ struct ub_bus_controller_ops { void (*register_decoder_base_addr)(struct ub_bus_controller *ubc, u64 *cmd_queue, u64 *event_queue); int (*entity_enable)(struct ub_entity *uent, u8 enable); + int (*create_decoder_table)(struct ub_decoder *decoder); + void (*free_decoder_table)(struct ub_decoder *decoder); + int (*decoder_map)(struct ub_decoder *decoder, + struct decoder_map_info *info); + int (*decoder_unmap)(struct ub_decoder *decoder, phys_addr_t addr, + u64 size); KABI_RESERVE(1) KABI_RESERVE(2) diff --git a/drivers/ub/ubus/vendor/hisilicon/Makefile b/drivers/ub/ubus/vendor/hisilicon/Makefile index 998c0e09aeef..fec1dbe15796 100644 --- a/drivers/ub/ubus/vendor/hisilicon/Makefile +++ b/drivers/ub/ubus/vendor/hisilicon/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0+ hisi_ubus-objs := hisi-ubus.o controller.o vdm.o local-ras.o msg.o msg-core.o -hisi_ubus-objs += msg-debugfs.o eu-table.o memory.o +hisi_ubus-objs += msg-debugfs.o eu-table.o memory.o hisi-decoder.o obj-$(CONFIG_UB_HISI_UBUS) += hisi_ubus.o diff --git a/drivers/ub/ubus/vendor/hisilicon/controller.c b/drivers/ub/ubus/vendor/hisilicon/controller.c index d7ea5c118d32..6c9c8e320479 100644 --- a/drivers/ub/ubus/vendor/hisilicon/controller.c +++ b/drivers/ub/ubus/vendor/hisilicon/controller.c @@ -10,6 +10,7 @@ #include #include "../../ubus_controller.h" +#include "hisi-decoder.h" #include "hisi-ubus.h" #include "hisi-msg.h" @@ -23,17 +24,12 @@ static struct ub_bus_controller_ops hi_ubc_ops = { .unregister_ubmem_irq = hi_unregister_ubmem_irq, .register_decoder_base_addr = hi_register_decoder_base_addr, .entity_enable = hi_send_entity_enable_msg, + .create_decoder_table = hi_create_decoder_table, + .free_decoder_table = hi_free_decoder_table, + .decoder_map = hi_decoder_map, + .decoder_unmap = hi_decoder_unmap, }; -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, u64 *cmd_queue, - u64 *event_queue) -{ - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; - - *cmd_queue = data->io_decoder_cmdq; - *event_queue = data->io_decoder_evtq; -} - static void ub_bus_controller_debugfs_init(struct ub_bus_controller *ubc) { if (!debugfs_initialized()) diff --git a/drivers/ub/ubus/omm.c b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c similarity index 86% rename from drivers/ub/ubus/omm.c rename to drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c index b8b59a9da4f1..ac1fa0498ffc 100644 --- a/drivers/ub/ubus/omm.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c @@ -3,11 +3,13 @@ * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. */ -#define pr_fmt(fmt) "ubus omm: " fmt +#define pr_fmt(fmt) "ubus hisi decoder: " fmt #include -#include "decoder.h" -#include "omm.h" +#include +#include "../../ubus.h" +#include "hisi-ubus.h" +#include "hisi-decoder.h" enum entry_type { INVALID_ENTRY = 0x0, @@ -95,6 +97,7 @@ struct range_table_entry { u64 reserve11 : 12; }; +#define DECODER_PAGE_TABLE_ENTRY_SIZE 8 #define UBA_ADDR_OFFSET 12 #define DECODER_PAGE_INDEX_LOC 20 @@ -107,6 +110,11 @@ struct range_table_entry { #define DECODER_RGTLB_ADDRESS_MASK GENMASK_ULL(34, 20) #define DECODER_RGTLB_ADDRESS_OFFSET 20 #define TOKEN_VALID_MASK GENMASK(0, 0) +#define MEM_LMT_MAX 0x7FFF +#define RANGE_UBA_LOW_MASK GENMASK_ULL(34, 20) +#define RANGE_UBA_HIGH_MASK GENMASK_ULL(63, 35) +#define UBA_CARRY 0x800000000 +#define UBA_NOCARRY 0x0 #define get_pgtlb_idx(decoder, pa) ((((pa) - (decoder)->mmio_base_addr) & \ DECODER_PAGE_TABLE_MASK) >> \ @@ -485,12 +493,6 @@ static int handle_page_table(struct ub_decoder *decoder, u64 *offset, return ret; } -#define MEM_LMT_MAX 0x7FFF -#define RANGE_UBA_LOW_MASK GENMASK_ULL(34, 20) -#define RANGE_UBA_HIGH_MASK GENMASK_ULL(63, 35) -#define UBA_CARRY 0x800000000 -#define UBA_NOCARRY 0x0 - static void fill_range_table(struct ub_decoder *decoder, struct range_table_entry *rg_entry, struct decoder_map_info *info, u64 *offset) @@ -593,7 +595,7 @@ static int handle_table(struct ub_decoder *decoder, ret); /* if it is map operation, revert it. unmap operation can't revert */ if (is_map) - (void)ub_decoder_unmap(decoder, info->pa, + (void)hi_decoder_unmap(decoder, info->pa, rollback_size); break; } @@ -601,7 +603,88 @@ static int handle_table(struct ub_decoder *decoder, return ret; } -int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) +static void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base) +{ + struct page_table_entry *pgtlb_entry; + int i; + + for (i = 0; i < DECODER_PAGE_TABLE_SIZE; i++) { + pgtlb_entry = (struct page_table_entry *)pgtlb_base + i; + pgtlb_entry->entry_type = PAGE_TABLE; + pgtlb_entry->next_lv_addr = decoder->invalid_page_dma; + pgtlb_entry->pgtlb_attr = PGTLB_ATTR_DEFAULT; + } +} + +void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, + u64 *cmd_queue, u64 *event_queue) +{ + struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; + + *cmd_queue = data->io_decoder_cmdq; + *event_queue = data->io_decoder_evtq; +} + +int hi_create_decoder_table(struct ub_decoder *decoder) +{ + struct page_table_desc *invalid_desc = &decoder->invalid_desc; + struct page_table *pgtlb; + void *pgtlb_base; + size_t size; + + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + pgtlb = &decoder->pgtlb; + pgtlb_base = dmam_alloc_coherent(decoder->dev, size, + &pgtlb->pgtlb_dma, GFP_KERNEL); + if (!pgtlb_base) + return -ENOMEM; + + pgtlb->pgtlb_base = pgtlb_base; + + size = sizeof(*pgtlb->desc_base) * DECODER_PAGE_TABLE_SIZE; + pgtlb->desc_base = kzalloc(size, GFP_KERNEL); + if (!pgtlb->desc_base) + goto release_pgtlb; + + invalid_desc->page_base = dmam_alloc_coherent(decoder->dev, + RANGE_TABLE_PAGE_SIZE, + &invalid_desc->page_dma, + GFP_KERNEL); + if (!invalid_desc->page_base) + goto release_desc; + + decoder->invalid_page_dma = (invalid_desc->page_dma & + DECODER_PGTBL_PGPRT_MASK) >> + DECODER_DMA_PAGE_ADDR_OFFSET; + + ub_decoder_init_page_table(decoder, pgtlb_base); + + return 0; + +release_desc: + kfree(pgtlb->desc_base); + pgtlb->desc_base = NULL; +release_pgtlb: + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + dmam_free_coherent(decoder->dev, size, pgtlb_base, pgtlb->pgtlb_dma); + return -ENOMEM; +} + +void hi_free_decoder_table(struct ub_decoder *decoder) +{ + struct page_table_desc *invalid_desc = &decoder->invalid_desc; + size_t size; + + dmam_free_coherent(decoder->dev, RANGE_TABLE_PAGE_SIZE, + invalid_desc->page_base, invalid_desc->page_dma); + kfree(decoder->pgtlb.desc_base); + + size = DECODER_PAGE_TABLE_ENTRY_SIZE * DECODER_PAGE_TABLE_SIZE; + dmam_free_coherent(decoder->dev, size, decoder->pgtlb.pgtlb_base, + decoder->pgtlb.pgtlb_dma); +} + +int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) { int ret; struct decoder_map_info info = { @@ -609,10 +692,6 @@ int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) .size = size, }; - if (!decoder) { - pr_err("unmap mmio decoder ptr is null\n"); - return -EINVAL; - } if (size < SZ_1M) size = SZ_1M; ret = handle_table(decoder, &info, false); @@ -621,12 +700,8 @@ int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) return ub_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); } -int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) +int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) { - if (!decoder || !info) { - pr_err("decoder or map info is null\n"); - return -EINVAL; - } if (info->size < SZ_1M) info->size = SZ_1M; ub_info(decoder->uent, @@ -637,16 +712,3 @@ int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) return handle_table(decoder, info, true); } - -void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_base) -{ - struct page_table_entry *pgtlb_entry; - int i; - - for (i = 0; i < DECODER_PAGE_TABLE_SIZE; i++) { - pgtlb_entry = (struct page_table_entry *)pgtlb_base + i; - pgtlb_entry->entry_type = PAGE_TABLE; - pgtlb_entry->next_lv_addr = decoder->invalid_page_dma; - pgtlb_entry->pgtlb_attr = PGTLB_ATTR_DEFAULT; - } -} diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h new file mode 100644 index 000000000000..50658ef7b9cb --- /dev/null +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) HiSilicon Technologies Co., Ltd. 2025. All rights reserved. + */ + +#ifndef __HISI_DECODER_H__ +#define __HISI_DECODER_H__ + +#include +#include +#include "../../decoder.h" + +#define DECODER_PGTBL_PGPRT_MASK GENMASK_ULL(47, 12) +#define DECODER_DMA_PAGE_ADDR_OFFSET 12 + +#define PGTLB_CACHE_IR_NC 0b00 +#define PGTLB_CACHE_IR_WBRA 0b01 +#define PGTLB_CACHE_IR_WT 0b10 +#define PGTLB_CACHE_IR_WB 0b11 +#define PGTLB_CACHE_OR_NC 0b0000 +#define PGTLB_CACHE_OR_WBRA 0b0100 +#define PGTLB_CACHE_OR_WT 0b1000 +#define PGTLB_CACHE_OR_WB 0b1100 +#define PGTLB_CACHE_SH_NSH 0b000000 +#define PGTLB_CACHE_SH_OSH 0b100000 +#define PGTLB_CACHE_SH_ISH 0b110000 + +#define PGTLB_ATTR_DEFAULT (PGTLB_CACHE_IR_WBRA | \ + PGTLB_CACHE_OR_WBRA | \ + PGTLB_CACHE_SH_ISH) + +#define RGTLB_TO_PGTLB 8 +#define DECODER_PAGE_ENTRY_SIZE 64 +#define DECODER_PAGE_SIZE (1 << 12) +#define DECODER_PAGE_TABLE_SIZE (1 << 12) +#define PAGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * DECODER_PAGE_SIZE) +#define RANGE_TABLE_PAGE_SIZE (DECODER_PAGE_ENTRY_SIZE * \ + DECODER_PAGE_SIZE * \ + RGTLB_TO_PGTLB) + +void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, + u64 *cmd_queue, u64 *event_queue); + +int hi_create_decoder_table(struct ub_decoder *decoder); +void hi_free_decoder_table(struct ub_decoder *decoder); + +int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); +int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); + +#endif /* __HISI_DECODER_H__ */ diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 092695e9d43c..92e97c257302 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -43,8 +43,6 @@ int hi_mem_decoder_create(struct ub_bus_controller *ubc); void hi_mem_decoder_remove(struct ub_bus_controller *ubc); void hi_register_ubmem_irq(struct ub_bus_controller *ubc); void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc); -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, u64 *cmd_queue, - u64 *event_queue); int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable); int ub_bus_controller_probe(struct ub_bus_controller *ubc); -- Gitee From 2cd331cfba110633c56352c6e879ef1f561e5d01 Mon Sep 17 00:00:00 2001 From: Junlong Zheng Date: Mon, 1 Dec 2025 15:37:47 +0800 Subject: [PATCH 077/126] ub:ubus: fix bug of msg workqueue null commit 08ec74f94b034b0face4f8f0509bb43e1a1cd3be openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ 1.fix bug of msg workqueue null 2.don't cfg ubc0 route table during cluster mode Signed-off-by: Junlong Zheng Signed-off-by: Shi Yang --- drivers/ub/ubus/msg.c | 12 +++++++----- drivers/ub/ubus/route.c | 4 ++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/msg.c b/drivers/ub/ubus/msg.c index 1d1893a8f54d..54f77128ad2f 100644 --- a/drivers/ub/ubus/msg.c +++ b/drivers/ub/ubus/msg.c @@ -162,7 +162,7 @@ struct workqueue_struct *get_rx_msg_wq(u8 msg_code) return rx_msg_wq[msg_code]; } -static bool msg_rx_flag; +static atomic_t msg_rx_flag; int message_rx_init(void) { @@ -186,18 +186,20 @@ int message_rx_init(void) rx_msg_wq[i] = q; } - msg_rx_flag = true; + wmb(); /* Ensure the register is written correctly. */ + atomic_set(&msg_rx_flag, 1); return 0; } void message_rx_uninit(void) { -#define MSG_RX_WAIT_US 1000 +#define MSG_RX_WAIT_US 15000 struct workqueue_struct *q; int i; - msg_rx_flag = false; + atomic_set(&msg_rx_flag, 0); + wmb(); /* Ensure the register is written correctly. */ /* For cpus still handle rx msg in interrupt context */ udelay(MSG_RX_WAIT_US); @@ -297,7 +299,7 @@ int message_rx_handler(struct ub_bus_controller *ubc, void *pkt, u16 len) struct msg_extended_header *msgetah = &header->msgetah; struct ub_rx_msg_task *task; - if (!msg_rx_flag) + if (!atomic_read(&msg_rx_flag)) return -EBUSY; if (len < MSG_PKT_HEADER_SIZE) { diff --git a/drivers/ub/ubus/route.c b/drivers/ub/ubus/route.c index 364bf78d93c3..ef3d462a90ee 100644 --- a/drivers/ub/ubus/route.c +++ b/drivers/ub/ubus/route.c @@ -505,6 +505,10 @@ static void ub_set_route_table_entry(struct ub_entity *uent, u32 dst_cna, if (uent->port_nums == 1) return; + /* In a cluster scenario, do not configure the UBC routing table. */ + if (is_ibus_controller(uent) && uent->ubc->cluster) + return; + pr_info("cna %#x uent set dstcna %#x route\n", uent->cna, dst_cna); for (i = 0; i < EBW(uent->port_nums); i++) { -- Gitee From 124c7ecc3f158a386ea05e8a9598c6bf70b543d2 Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Fri, 14 Nov 2025 15:59:01 +0800 Subject: [PATCH 078/126] vfio:ubus vfio-ub support ub entity enable commit 49e93aebab5dc6a0f1901977aafdf4fca30939b9 openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ vfio-ub support ub entity enable. Signed-off-by: Yahui Liu Signed-off-by: Shi Yang --- drivers/vfio/ubus/vfio_ub_config.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vfio/ubus/vfio_ub_config.c b/drivers/vfio/ubus/vfio_ub_config.c index aaf398281d9e..0addb2526c16 100644 --- a/drivers/vfio/ubus/vfio_ub_config.c +++ b/drivers/vfio/ubus/vfio_ub_config.c @@ -588,6 +588,9 @@ static int vfio_ub_cfg1_basic_write(struct vfio_ub_core_device *vdev, u64 pos, if (count < 0) return count; + if (pos == UB_ENTITY_RS_ACCESS_EN) + ub_entity_enable(vdev->uent, val & 0x1); + buf = vfio_ub_find_cfg_buf(vdev, UB_CFG1_BASIC_CAP); if (!buf) return -EFAULT; -- Gitee From 6e7746951a3843a606d006ae6d564889b271b97b Mon Sep 17 00:00:00 2001 From: Anzhe Li Date: Sat, 29 Nov 2025 11:53:40 +0800 Subject: [PATCH 079/126] ub:ubus adapt port local ras commit 3764805d000a62144f97c87c2ecee5c918a0801e openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------------ Adapt port local ras, and add port reset function in cluster mode. Signed-off-by: Anzhe Li Signed-off-by: Shi Yang --- drivers/ub/ubus/link.c | 14 ++-- drivers/ub/ubus/pool.c | 4 +- drivers/ub/ubus/port.c | 79 ++++++++------------ drivers/ub/ubus/port.h | 7 +- drivers/ub/ubus/reset.c | 6 +- drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h | 2 +- drivers/ub/ubus/vendor/hisilicon/local-ras.c | 40 +++++++++- drivers/ub/ubus/vendor/hisilicon/vdm.c | 45 +++++++++++ drivers/ub/ubus/vendor/hisilicon/vdm.h | 11 +++ 9 files changed, 142 insertions(+), 66 deletions(-) diff --git a/drivers/ub/ubus/link.c b/drivers/ub/ubus/link.c index 266d8e828143..001139f3ad63 100644 --- a/drivers/ub/ubus/link.c +++ b/drivers/ub/ubus/link.c @@ -285,13 +285,13 @@ static void port_link_state_change(struct ub_port *port, struct ub_port *r_port) void ublc_link_up_handle(struct ub_port *port) { struct ub_entity *uent = port->uent; - struct ub_port *r_port; struct ub_entity *r_uent; + struct ub_port *r_port; int ret; if (port->r_uent) { - ub_err(uent, "port%u is already up\n", port->index); - return; + ub_warn(uent, "port%u is already up\n", port->index); + goto link_up_notify; } device_lock(&uent->dev); @@ -324,6 +324,8 @@ void ublc_link_up_handle(struct ub_port *port) ub_info(uent, "port%u link up\n", port->index); out: device_unlock(&uent->dev); +link_up_notify: + ub_notify_share_port(port, UB_PORT_EVENT_LINK_UP); } void ublc_link_down_handle(struct ub_port *port) @@ -332,8 +334,8 @@ void ublc_link_down_handle(struct ub_port *port) struct ub_port *r_port; if (!port->r_uent) { - ub_err(uent, "port%u is already down\n", port->index); - return; + ub_warn(uent, "port%u is already down\n", port->index); + goto link_down_notify; } device_lock(&uent->dev); @@ -355,6 +357,8 @@ void ublc_link_down_handle(struct ub_port *port) device_unlock(&uent->dev); ub_info(uent, "port%u link down\n", port->index); +link_down_notify: + ub_notify_share_port(port, UB_PORT_EVENT_LINK_DOWN); } void ub_link_change_handler(struct work_struct *work) diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index e86b19b58f63..414e9dba0c20 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -613,9 +613,9 @@ static void ub_port_reset_notify_handler(struct ub_bus_controller *ubc, void *ms port = ubc->uent->ports + pld->port_index; if (port->shareable && port->domain_boundary) { if (pld->type == RESET_PREPARE) - ub_notify_share_port(port, RESET_PREPARE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); else if (pld->type == RESET_DONE) - ub_notify_share_port(port, RESET_DONE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); } rsp: diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index 36950c24e344..a8a238df8cc4 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -577,49 +577,46 @@ static DECLARE_RWSEM(ub_share_port_notify_list_rwsem); struct ub_share_port_notify_node { struct ub_entity *parent; - struct ub_entity *idev; + struct ub_entity *entity; u16 port_id; struct ub_share_port_ops *ops; struct list_head node; }; -int ub_register_share_port(struct ub_entity *idev, u16 port_id, +int ub_register_share_port(struct ub_entity *entity, u16 port_id, struct ub_share_port_ops *ops) { struct ub_share_port_notify_node *notify_node; struct ub_entity *parent; struct ub_port *port; - if (unlikely(!idev || !ops)) + if (unlikely(!entity || !ops)) return -EINVAL; - if (!is_idev(idev)) { - ub_err(idev, "don't support non-idev device with type %u register share port\n", - uent_type(idev)); + if (!is_idev(entity) && !is_ibus_controller(entity)) { + ub_err(entity, + "don't support device with type %u register share port\n", + uent_type(entity)); return -EINVAL; } /* get primary entity first */ - parent = idev; - while (!is_primary(parent)) - parent = parent->pue; - - /* check parent is controller */ - parent = to_ub_entity(parent->dev.parent); - if (!is_ibus_controller(parent)) { - ub_err(idev, "don't support register share port at non-controller device with type %u\n", - uent_type(parent)); - return -EINVAL; - } - - if (port_id >= parent->port_nums) { - ub_err(parent, "port id %u exceeds port num %u\n", port_id, - parent->port_nums); - return -EINVAL; + parent = entity; + if (is_idev(parent)) { + while (!is_primary(parent)) + parent = parent->pue; + + /* check parent is controller */ + parent = to_ub_entity(parent->dev.parent); + if (!is_ibus_controller(parent)) { + ub_err(entity, "don't support register share port at non-controller device with type %u\n", + uent_type(parent)); + return -EINVAL; + } } port = parent->ports + port_id; - if (!port->shareable) { + if (is_idev(entity) && !port->shareable) { ub_err(parent, "port%u isn't shareable\n", port_id); return -EINVAL; } @@ -629,7 +626,7 @@ int ub_register_share_port(struct ub_entity *idev, u16 port_id, return -ENOMEM; notify_node->parent = parent; - notify_node->idev = idev; + notify_node->entity = entity; notify_node->port_id = port_id; notify_node->ops = ops; INIT_LIST_HEAD(¬ify_node->node); @@ -638,33 +635,33 @@ int ub_register_share_port(struct ub_entity *idev, u16 port_id, list_add_tail(¬ify_node->node, &ub_share_port_notify_list); up_write(&ub_share_port_notify_list_rwsem); - ub_info(idev, "register share port at %u success\n", port_id); + ub_info(entity, "register share port at %u success\n", port_id); return 0; } EXPORT_SYMBOL_GPL(ub_register_share_port); -void ub_unregister_share_port(struct ub_entity *idev, u16 port_id, +void ub_unregister_share_port(struct ub_entity *entity, u16 port_id, struct ub_share_port_ops *ops) { struct ub_share_port_notify_node *notify_node; - if (unlikely(!idev)) + if (unlikely(!entity)) return; down_write(&ub_share_port_notify_list_rwsem); list_for_each_entry(notify_node, &ub_share_port_notify_list, node) { - if (notify_node->idev != idev || + if (notify_node->entity != entity || notify_node->port_id != port_id || notify_node->ops != ops) continue; list_del(¬ify_node->node); kfree(notify_node); - ub_info(idev, "unregister share port at %u success\n", port_id); + ub_info(entity, "unregister share port at %u success\n", port_id); goto unlock; } - ub_err(idev, "share port %u isn't registered, unregister failed\n", + ub_err(entity, "share port %u isn't registered, unregister failed\n", port_id); unlock: up_write(&ub_share_port_notify_list_rwsem); @@ -672,13 +669,13 @@ void ub_unregister_share_port(struct ub_entity *idev, u16 port_id, EXPORT_SYMBOL_GPL(ub_unregister_share_port); void ub_notify_share_port(struct ub_port *port, - enum ub_share_port_notify_type type) + enum ub_port_event type) { struct ub_share_port_notify_node *notify_node; struct ub_share_port_ops *ops; struct ub_entity *uent; - if (!port || type >= NOTIFY_TYPE_MAX) + if (!port || type > UB_PORT_EVENT_RESET_DONE) return; uent = port->uent; @@ -689,23 +686,13 @@ void ub_notify_share_port(struct ub_port *port, continue; ops = notify_node->ops; - switch (type) { - case RESET_PREPARE: - if (ops->reset_prepare) - ops->reset_prepare(notify_node->idev, - notify_node->port_id); - break; - case RESET_DONE: - if (ops->reset_done) - ops->reset_done(notify_node->idev, - notify_node->port_id); - break; - default: - break; - } + if (ops->event_notify) + ops->event_notify(notify_node->entity, + notify_node->port_id, type); } up_read(&ub_share_port_notify_list_rwsem); } +EXPORT_SYMBOL_GPL(ub_notify_share_port); bool ub_port_check_link_up(struct ub_port *port) { diff --git a/drivers/ub/ubus/port.h b/drivers/ub/ubus/port.h index 0350908c531a..21a2c7d33299 100644 --- a/drivers/ub/ubus/port.h +++ b/drivers/ub/ubus/port.h @@ -8,10 +8,9 @@ #define for_each_uent_port(p, d) \ for ((p) = (d)->ports; ((p) - (d)->ports) < (d)->port_nums; (p)++) -enum ub_share_port_notify_type { +enum ub_port_reset_notify_type { RESET_PREPARE, - RESET_DONE, - NOTIFY_TYPE_MAX + RESET_DONE }; struct ub_port; @@ -24,7 +23,7 @@ void ub_ports_del(struct ub_entity *uent); int ub_ports_setup(struct ub_entity *uent); void ub_ports_unset(struct ub_entity *uent); void ub_notify_share_port(struct ub_port *port, - enum ub_share_port_notify_type type); + enum ub_port_event type); int ub_port_write_dword(struct ub_port *port, u32 pos, u32 val); bool ub_port_check_link_up(struct ub_port *port); diff --git a/drivers/ub/ubus/reset.c b/drivers/ub/ubus/reset.c index 596d848c2c11..51d984d25b8f 100644 --- a/drivers/ub/ubus/reset.c +++ b/drivers/ub/ubus/reset.c @@ -293,8 +293,7 @@ int ub_port_reset(struct ub_entity *dev, int port_id) return -EINVAL; } - if (port->shareable) - ub_notify_share_port(port, RESET_PREPARE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); /* enable port reset */ ret = ub_port_write_dword(port, UB_PORT_RST, 0x01); @@ -310,8 +309,7 @@ int ub_port_reset(struct ub_entity *dev, int port_id) device_unlock(&dev->dev); if (ub_wait_port_complete(port)) { - if (port->shareable) - ub_notify_share_port(port, RESET_DONE); + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); port->link_state = LINK_STATE_NORMAL; ub_info(dev, "port(%d) reset success!\n", port_id); return ret; diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h index 92e97c257302..44e5fc8165ba 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-ubus.h @@ -44,7 +44,7 @@ void hi_mem_decoder_remove(struct ub_bus_controller *ubc); void hi_register_ubmem_irq(struct ub_bus_controller *ubc); void hi_unregister_ubmem_irq(struct ub_bus_controller *ubc); int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable); - +int hi_send_port_reset_msg(struct ub_entity *uent, u16 port_idx); int ub_bus_controller_probe(struct ub_bus_controller *ubc); void ub_bus_controller_remove(struct ub_bus_controller *ubc); diff --git a/drivers/ub/ubus/vendor/hisilicon/local-ras.c b/drivers/ub/ubus/vendor/hisilicon/local-ras.c index 9dc016555732..cf65f06da93f 100644 --- a/drivers/ub/ubus/vendor/hisilicon/local-ras.c +++ b/drivers/ub/ubus/vendor/hisilicon/local-ras.c @@ -9,7 +9,9 @@ #include "../../ubus.h" #include "../../ubus_driver.h" #include "../../reset.h" +#include "../../port.h" #include "local-ras.h" +#include "hisi-ubus.h" struct sub_module_info { u32 sub_module_id; @@ -197,7 +199,31 @@ static int ubus_port_recover(struct ub_entity *uent, u16 port_id) return 0; } -static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id) +static int ubus_port_recover_cluster(struct ub_entity *uent, u16 port_id) +{ + struct ub_port *port; + int ret; + + if (port_id >= uent->port_nums || uent->ports[port_id].type != PHYSICAL) { + pr_err("port id is over port nums or port type is not physical\n"); + return -EINVAL; + } + + port = uent->ports + port_id; + ub_notify_share_port(port, UB_PORT_EVENT_RESET_PREPARE); + + ret = hi_send_port_reset_msg(uent, port_id); + if (ret) { + pr_err("ub vdm port reset failed, ret:%d\n", ret); + return ret; + } + + ub_notify_share_port(port, UB_PORT_EVENT_RESET_DONE); + + return 0; +} + +static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id, bool cluster) { #define NL_PORTS 2 /* @@ -210,7 +236,10 @@ static int nl_ssu_link_credi_overtime_recover(struct ub_entity *uent, u8 nl_id) for (i = 0; i < NL_PORTS; i++) { port_id += i; - ret = ubus_port_recover(uent, port_id); + if (!cluster) + ret = ubus_port_recover(uent, port_id); + else + ret = ubus_port_recover_cluster(uent, port_id); if (ret) { ub_err(uent, "port[%u] recover failed, ret=%d.\n", port_id, ret); return ret; @@ -230,11 +259,14 @@ static int ubus_recover(struct ub_entity *uent, if (is_nl_local_ras(edata->sub_module_id) && is_nl_ssu_link_credi_overtime_err(edata)) { nl_id = edata->core_id; - return nl_ssu_link_credi_overtime_recover(uent, nl_id); + return nl_ssu_link_credi_overtime_recover(uent, nl_id, uent->ubc->cluster); } port_id = (int)edata->port_id; - return ubus_port_recover(uent, port_id); + if (uent->ubc->cluster) + return ubus_port_recover_cluster(uent, port_id); + else + return ubus_port_recover(uent, port_id); } static void hisi_ubus_handle_error(struct ub_entity *uent, diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index 2d0444a585c9..f95da0843e26 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -537,3 +537,48 @@ int hi_send_entity_enable_msg(struct ub_entity *uent, u8 enable) return 0; } + +int hi_send_port_reset_msg(struct ub_entity *uent, u16 port_idx) +{ + struct port_reset_pld *rst_pld; + struct vdm_msg_pkt pkt = {}; + struct msg_info info = {}; + struct msg_pkt_dw0 *pld_dw0; + u8 status; + int ret; + + if (!uent->ubc->cluster) + return 0; + + ub_msg_pkt_header_init(&pkt.header, uent, VDM_PORT_RESET_PLD_SIZE, + code_gen(UB_MSG_CODE_VDM, UB_VENDOR_MSG, + MSG_REQ), true); + + pkt.guid_high = *(u64 *)(&uent->ubc->uent->guid.dw[SZ_2]); + pld_dw0 = &pkt.pld_dw0; + pld_dw0->opcode = VDM_OPCODE_UB2FM_COMM_MSG; + pld_dw0->sub_opcode = VDM_SUB_OPCODE_PORT_RESET; + rst_pld = &pkt.reset_pld; + rst_pld->port_idx = port_idx; + + message_info_init(&info, uent->ubc->uent, &pkt, &pkt, + (VDM_PORT_RESET_SIZE << MSG_REQ_SIZE_OFFSET) | + VDM_PORT_RESET_SIZE); + + ub_info(uent, "Sync request port reset msg\n"); + + ret = hi_message_sync_request(uent->message->mdev, &info, + pkt.header.msgetah.code); + if (ret) { + ub_err(uent, "msg sync request ret=%d\n", ret); + return ret; + } + + status = pkt.header.msgetah.rsp_status; + if (status != UB_MSG_RSP_SUCCESS) { + ub_err(uent, "msg rsp status=%#02x\n", status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.h b/drivers/ub/ubus/vendor/hisilicon/vdm.h index 183449aa082e..725288d6abac 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.h +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.h @@ -24,6 +24,7 @@ enum vdm_fm2ub_sub_opcode { enum vdm_ub2fm_sub_opcode { VDM_SUB_OPCODE_ENTITY_ENABLE = 0x1, + VDM_SUB_OPCODE_PORT_RESET = 0x2, }; struct msg_pkt_dw0 { @@ -103,6 +104,13 @@ struct idev_ue_rls_pld { }; #define IDEV_UE_RLS_PLD_TOTAL_SIZE 36 +struct port_reset_pld { + /* DW1 */ + u16 rsvd; + u16 port_idx; +}; +#define VDM_PORT_RESET_PLD_SIZE 16 + #define MSG_IDEV_MUE_REG_SIZE \ (MSG_PKT_HEADER_SIZE + IDEV_MUE_REG_PLD_TOTAL_SIZE) #define MSG_IDEV_MUE_RLS_SIZE \ @@ -111,6 +119,8 @@ struct idev_ue_rls_pld { (MSG_PKT_HEADER_SIZE + IDEV_UE_REG_PLD_TOTAL_SIZE) #define MSG_IDEV_UE_RLS_SIZE \ (MSG_PKT_HEADER_SIZE + IDEV_UE_RLS_PLD_TOTAL_SIZE) +#define VDM_PORT_RESET_SIZE \ + (MSG_PKT_HEADER_SIZE + VDM_PORT_RESET_PLD_SIZE) #define VENDOR_GUID_PLD_SIZE 8 @@ -119,6 +129,7 @@ struct vdm_msg_pkt { u64 guid_high; struct msg_pkt_dw0 pld_dw0; union { + struct port_reset_pld reset_pld; struct entity_enable_pld enable_pld; struct idev_pue_reg_pld pd_reg_pld; struct idev_pue_rls_pld pd_rls_pld; -- Gitee From 6b188c5f99bfe1aacb05fdc4ac313ec2b9eedda3 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Thu, 4 Dec 2025 19:23:42 +0800 Subject: [PATCH 080/126] ub:unic Adaptation of the port reset interface commit 3c85899aa7a42bf9b40a4432bd0eb57e5b92ebab openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID7LHB CVE: NA ---------------------------------------------------------------- The ubus port reset interface is changed. Therefore, the port reset interface is adapted. Signed-off-by: Yixi Shen Signed-off-by: Xiongchuan Zhou Signed-off-by: Shi Yang --- drivers/ub/ubase/ubase_ubus.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/ub/ubase/ubase_ubus.c b/drivers/ub/ubase/ubase_ubus.c index a589915cd686..a0351b283890 100644 --- a/drivers/ub/ubase/ubase_ubus.c +++ b/drivers/ub/ubase/ubase_ubus.c @@ -114,26 +114,22 @@ static void ubase_ubus_uninit(struct ub_entity *ue) ub_entity_enable(ue, 0); } -static void ubase_port_reset_prepare(struct ub_entity *ue, u16 port_id) +static void ubase_port_event_notify(struct ub_entity *ue, u16 port_id, int event) { struct ubase_dev *udev = dev_get_drvdata(&ue->dev); - ubase_info(udev, "port %u reset prepare.\n", port_id); - ubase_port_down(udev); -} - -static void ubase_port_reset_done(struct ub_entity *ue, u16 port_id) -{ - struct ubase_dev *udev = dev_get_drvdata(&ue->dev); - - ubase_port_up(udev); - ubase_info(udev, "port %u reset done.\n", port_id); - udev->reset_stat.port_reset_cnt++; + if (event == UB_PORT_EVENT_RESET_PREPARE) { + ubase_info(udev, "port %u reset prepare.\n", port_id); + ubase_port_down(udev); + } else if (event == UB_PORT_EVENT_RESET_DONE) { + ubase_port_up(udev); + ubase_info(udev, "port %u reset done.\n", port_id); + udev->reset_stat.port_reset_cnt++; + } } static struct ub_share_port_ops ubase_share_port_ops = { - .reset_prepare = ubase_port_reset_prepare, - .reset_done = ubase_port_reset_done + .event_notify = ubase_port_event_notify }; static int ubase_ubus_reg_share_port(struct ubase_dev *udev) -- Gitee From 7e3a81fc52ea874b699e2aaf30d20c5f39173548 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 25 Nov 2025 19:27:05 +0800 Subject: [PATCH 081/126] ub:ubus: add active_mutex in ub_entity commit f21f7d173065acedec14639e7522420e62baefde openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Add mutex to the entity structure. Signed-off-by: Yiyu Liu Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/ubus_entity.c | 25 ++++++++++++++++--------- include/ub/ubus/ubus.h | 12 +++++++----- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 6031469a27b9..4fa1a8533e71 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -53,6 +53,8 @@ struct ub_entity *ub_alloc_ent(void) INIT_LIST_HEAD(&uent->slot_list); INIT_LIST_HEAD(&uent->instance_node); + mutex_init(&uent->active_mutex); + uent->dev.type = &ub_dev_type; uent->cna = 0; uent->tid = 0; /* default tid according to ummu */ @@ -971,12 +973,17 @@ void ub_entity_enable(struct ub_entity *uent, u8 enable) ub_cfg_write_byte(uent, UB_BUS_ACCESS_EN, enable); ub_cfg_write_byte(uent, UB_ENTITY_RS_ACCESS_EN, enable); - if (!enable && !ub_entity_test_priv_flag(uent, UB_ENTITY_ACTIVE)) + mutex_lock(&uent->active_mutex); + + if (!enable && !ub_entity_test_priv_flag(uent, UB_ENTITY_ACTIVE)) { + mutex_unlock(&uent->active_mutex); return; + } if (uent->ubc && uent->ubc->ops && uent->ubc->ops->entity_enable) { ret = uent->ubc->ops->entity_enable(uent, enable); if (ret) { + mutex_unlock(&uent->active_mutex); ub_err(uent, "entity enable, ret=%d, enable=%u\n", ret, enable); return; @@ -989,6 +996,8 @@ void ub_entity_enable(struct ub_entity *uent, u8 enable) ub_entity_assign_priv_flag(uent, UB_ENTITY_ACTIVE, true); else ub_entity_assign_priv_flag(uent, UB_ENTITY_ACTIVE, false); + + mutex_unlock(&uent->active_mutex); } EXPORT_SYMBOL_GPL(ub_entity_enable); @@ -1074,12 +1083,11 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx) return -EINVAL; } - if (!device_trylock(&target_dev->dev)) - return -EBUSY; + mutex_lock(&target_dev->active_mutex); if (ub_entity_test_priv_flag(target_dev, UB_ENTITY_ACTIVE)) { ub_warn(uent, "entity_idx[%u] is already in normal state\n", entity_idx); - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return 0; } @@ -1091,7 +1099,7 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx) ub_info(uent, "udrv activate entity_idx[%u] success\n", entity_idx); } - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return ret; } EXPORT_SYMBOL_GPL(ub_activate_entity); @@ -1115,12 +1123,11 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx) return -EINVAL; } - if (!device_trylock(&target_dev->dev)) - return -EBUSY; + mutex_lock(&target_dev->active_mutex); if (!ub_entity_test_priv_flag(target_dev, UB_ENTITY_ACTIVE)) { ub_warn(uent, "entity_idx[%u] is already in disable state\n", entity_idx); - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return 0; } @@ -1132,7 +1139,7 @@ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx) ub_info(uent, "udrv deactivate entity_idx[%u] success\n", entity_idx); } - device_unlock(&target_dev->dev); + mutex_unlock(&target_dev->active_mutex); return ret; } EXPORT_SYMBOL_GPL(ub_deactivate_entity); diff --git a/include/ub/ubus/ubus.h b/include/ub/ubus/ubus.h index ca3ba63c226a..6752ead41e64 100644 --- a/include/ub/ubus/ubus.h +++ b/include/ub/ubus/ubus.h @@ -263,6 +263,8 @@ struct ub_entity { u32 user_eid; struct ub_eu_table *eu_table; + struct mutex active_mutex; + u32 support_feature; u16 upi; @@ -568,7 +570,7 @@ struct ub_entity *ub_get_entity(unsigned int vendor, unsigned int entity, * * Enable or disable the communication channel between entity and user host. * - * Context: Any context. + * Context: Any context, it will take mutex_lock()/mutex_unlock(). */ void ub_entity_enable(struct ub_entity *uent, u8 enable); @@ -883,9 +885,9 @@ const struct cpumask *ub_irq_get_affinity(struct ub_entity *uent, int nr); * @uent: UB entity. * @entity_idx: Number of the entity to be activated. * - * Context: Any context, it will take device_trylock()/device_unlock() + * Context: Any context, it will take mutex_lock()/mutex_unlock() * Return: 0 if success, or %-EINVAL if the device doesn't match the driver, - * or %-EBUSY if can't get device_trylock(), or other failed negative values. + * or other failed negative values. */ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); @@ -894,9 +896,9 @@ int ub_activate_entity(struct ub_entity *uent, u32 entity_idx); * @uent: UB entity. * @entity_idx: Number of the entity to be deactivated. * - * Context: Any context, it will take device_trylock()/device_unlock() + * Context: Any context, it will take mutex_lock()/mutex_unlock() * Return: 0 if success, or %-EINVAL if the entity doesn't match the driver, - * or %-EBUSY if can't get device_trylock(), or other failed negative values. + * or other failed negative values. */ int ub_deactivate_entity(struct ub_entity *uent, u32 entity_idx); -- Gitee From 6b06d8246d3a42a9b5ce3859ee2a34b0f160bae7 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 9 Dec 2025 09:56:40 +0800 Subject: [PATCH 082/126] ub:hisi-ubus: Fix ue reg/unreg without lock bug commit d6926f5c7ee4c3f9dda78762a3f59a5afa867963 openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ------------------------------------------------------------ Currently, UE registration and deregistration are not performed under the protection of the MUE device lock. When the MUE performs a reset, there is a risk of concurrent resource conflicts. Fixes: 86fec00cb73a ("ub:hisi-ubus: Support UBUS vdm entity enable message") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/vendor/hisilicon/vdm.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index f95da0843e26..4a19e0fb8d57 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -14,8 +14,6 @@ #include "hisi-ubus.h" #include "vdm.h" -static DEFINE_SPINLOCK(ub_vdm_lock); - struct opcode_func_map { u16 sub_opcode; u16 idev_pkt_size; @@ -274,6 +272,7 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms struct ub_entity *pue, *alloc_dev = NULL; u16 ue_entity_idx = pld->ue_entity_idx; int start_idx, end_idx, ret; + int lock = 0; u8 status; /* check whether pue is registered. */ @@ -306,10 +305,13 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms goto ue_reg_rsp; } - spin_lock(&ub_vdm_lock); - ret = ub_idevice_enable_handle(pue, ue_entity_idx, 0, NULL, &alloc_dev); - spin_unlock(&ub_vdm_lock); + lock = device_trylock(&pue->dev); + if (!lock) { + status = UB_MSG_RSP_EXEC_EBUSY; + goto ue_reg_rsp; + } + ret = ub_idevice_enable_handle(pue, ue_entity_idx, 0, NULL, &alloc_dev); if (ret == 0) { alloc_dev->user_eid = pld->user_eid[0]; ub_info(pue, "enable idev ue succeeded, user_eid=0x%x\n", @@ -331,6 +333,9 @@ static u8 ub_idevice_ue_add_handler(struct ub_bus_controller *ubc, struct vdm_ms pue->num_ues += 1; } + if (lock) + device_unlock(&pue->dev); + return status; } @@ -346,6 +351,7 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms struct ub_entity *pue, *vd_dev, *tmp; u16 ue_entity_idx = pld->ue_entity_idx; u16 start_idx, end_idx; + int lock = 0; u8 status; /* search for pue with guid. Return an error if pue does not exist */ @@ -372,6 +378,12 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms "The pue of this vdm ue to be disabled is normal\n"); } + lock = device_trylock(&pue->dev); + if (!lock) { + status = UB_MSG_RSP_EXEC_EBUSY; + goto ue_rls_rsp; + } + status = UB_MSG_RSP_EXEC_ENODEV; /* otherwise, delete this ue with ue idx in message payload */ list_for_each_entry_safe(vd_dev, tmp, &pue->ue_list, node) @@ -389,6 +401,9 @@ static u8 ub_idevice_ue_rls_handler(struct ub_bus_controller *ubc, struct vdm_ms pue->num_ues -= 1; } + if (lock) + device_unlock(&pue->dev); + return status; } -- Gitee From 732d6d27fe3cf68924d4ed2d320295ebec695f01 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 9 Dec 2025 10:29:25 +0800 Subject: [PATCH 083/126] ub:ubus: Delete ubc cfg0 config during cluster mode commit 3ee6b2906e902998d3bcb476bd812e9d4db8c738 openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- In cluster mode, the management authority of UBC is primarily held by UBFM. Ubus cannot configure the configuration space of cfg0 except for the vport configuration space. Fixes: 280895301d3b ("ub:ubus: Support Ubus read/write configuration functions") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/enum.c | 4 +++ drivers/ub/ubus/eu.c | 5 +++- drivers/ub/ubus/instance.c | 12 +++++---- drivers/ub/ubus/pool.c | 7 +++++ drivers/ub/ubus/port.c | 4 +++ drivers/ub/ubus/services/gucd.c | 29 ++++----------------- drivers/ub/ubus/ubus_entity.c | 22 +++++++++------- drivers/ub/ubus/vendor/hisilicon/eu-table.c | 6 +++++ include/uapi/ub/ubus/ubus_regs.h | 5 ---- 9 files changed, 49 insertions(+), 45 deletions(-) diff --git a/drivers/ub/ubus/enum.c b/drivers/ub/ubus/enum.c index 48c37e30ca31..67a87f9f23aa 100644 --- a/drivers/ub/ubus/enum.c +++ b/drivers/ub/ubus/enum.c @@ -1430,6 +1430,10 @@ int ub_enum_entities_active(struct list_head *dev_list) list_del(&uent->node); ub_entity_add(uent, uent->ubc); + + if (is_ibus_controller(uent) && uent->ubc->cluster) + continue; + ub_start_ent(uent); } diff --git a/drivers/ub/ubus/eu.c b/drivers/ub/ubus/eu.c index 97e040eadd5e..5c99d44918e5 100644 --- a/drivers/ub/ubus/eu.c +++ b/drivers/ub/ubus/eu.c @@ -93,7 +93,7 @@ void ub_eu_table_init(struct ub_entity *uent) struct ub_bus_controller *ubc = uent->ubc; int ret; - if (!is_ibus_controller(uent)) + if (!is_ibus_controller(uent) || ubc->cluster) return; ret = ub_eu_table_init_common(uent); @@ -137,6 +137,9 @@ int ub_cfg_eu_table(struct ub_bus_controller *ubc, bool flag, u32 eid, u16 upi) struct ub_bus_controller_ops *ops = ubc->ops; int ret; + if (ubc->cluster) + return 0; + if (!ops || !ops->eu_cfg) return -ENODEV; diff --git a/drivers/ub/ubus/instance.c b/drivers/ub/ubus/instance.c index 8c49c04388e3..342fc9960ef2 100644 --- a/drivers/ub/ubus/instance.c +++ b/drivers/ub/ubus/instance.c @@ -915,15 +915,17 @@ int ub_ioctl_bus_instance_unbind(void __user *uptr) int ub_default_bus_instance_init(struct ub_entity *uent) { - bool m_idev = is_p_idevice(uent); - bool fad = is_p_device(uent); struct ub_bus_instance *bi; + bool use_cluster; int ret; if (is_switch(uent)) return 0; - if (fad || m_idev) { + use_cluster = is_p_device(uent) || is_p_idevice(uent) || + (is_ibus_controller(uent) && uent->ubc->cluster); + + if (use_cluster) { mutex_lock(&dynamic_mutex); bi = ub_find_bus_instance(eid_match, &uent->user_eid); } else { @@ -931,7 +933,7 @@ int ub_default_bus_instance_init(struct ub_entity *uent) } if (!bi) { - if (fad || m_idev) + if (use_cluster) mutex_unlock(&dynamic_mutex); ub_err(uent, "get default bi NULL\n"); return -EINVAL; @@ -941,7 +943,7 @@ int ub_default_bus_instance_init(struct ub_entity *uent) ret = ub_bind_bus_instance(uent, bi); mutex_unlock(&uent->instance_lock); - if (fad || m_idev) { + if (use_cluster) { ub_bus_instance_put(bi); mutex_unlock(&dynamic_mutex); } diff --git a/drivers/ub/ubus/pool.c b/drivers/ub/ubus/pool.c index 414e9dba0c20..4fdd9aca922d 100644 --- a/drivers/ub/ubus/pool.c +++ b/drivers/ub/ubus/pool.c @@ -533,7 +533,14 @@ static void ub_cfg_cpl_notify_handler(struct ub_bus_controller *ubc, void *msg, if (ret) { dev_err(&ubc->dev, "handle notify bi failed, ret=%d\n", ret); rsp_status = err_to_msg_rsp(ret); + goto rsp; + } + + if (!ub_entity_test_priv_flag(ubc->uent, UB_ENTITY_START)) { + ubc->uent->user_eid = notify->eid[0]; + ub_start_ent(ubc->uent); } + rsp: header->msgetah.rsp_status = rsp_status; ub_cfg_cpl_notify_msg_rsp(ubc, header); diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index a8a238df8cc4..f2ec6e8b9f47 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -353,10 +353,14 @@ static umode_t ub_port_qdlws_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct ub_port *port = to_ub_port(kobj); + struct ub_entity *uent = port->uent; if (port->type == VIRTUAL) return 0; + if (is_ibus_controller(uent) && uent->ubc->cluster) + return 0; + if (test_bit(UB_PORT_CAP15_QDLWS, port->cap_map)) return a->mode; diff --git a/drivers/ub/ubus/services/gucd.c b/drivers/ub/ubus/services/gucd.c index ca5f0a3578e8..a7796bcce6aa 100644 --- a/drivers/ub/ubus/services/gucd.c +++ b/drivers/ub/ubus/services/gucd.c @@ -71,6 +71,9 @@ static int ub_component_service_register(struct ub_entity *uent) int capabilities; int i; + if (is_ibus_controller(uent) && uent->ubc->cluster) + return 0; + /* Get and check component services */ capabilities = get_component_service_capability(uent); if (!capabilities) @@ -91,36 +94,15 @@ static int ub_component_service_register(struct ub_entity *uent) return 0; } -static void ub_enable_err_msq_ctrl(struct ub_entity *uent) -{ - int ret; - - ret = ub_cfg_write_dword(uent, EMQ_CAP_START + UB_CAP_ERR_MSG_QUE_CTL, - UB_CAP_INTERRUPT_GEN_ENA); - if (ret) - ub_err(uent, "enable error msq controller failed\n"); -} - -static void ub_disable_err_msq_ctrl(struct ub_entity *uent) -{ - int ret; - - ret = ub_cfg_write_dword(uent, EMQ_CAP_START + UB_CAP_ERR_MSG_QUE_CTL, - 0x0); - if (ret) - ub_err(uent, "disable error msq controller failed\n"); -} - static void ub_setup_bus_controller(struct ub_entity *uent) { u32 vec_num_max; int usi_count; - if (ub_cc_supported(uent)) + if (ub_cc_supported(uent) && !uent->ubc->cluster) ub_cc_enable(uent); ub_set_user_info(uent); - ub_enable_err_msq_ctrl(uent); vec_num_max = ub_int_type1_vec_count(uent); usi_count = ub_alloc_irq_vectors(uent, vec_num_max, vec_num_max); if (usi_count < 0) { @@ -143,10 +125,9 @@ static void ub_unset_bus_controller(struct ub_entity *uent) ub_mem_uninit_usi(uent); ub_uninit_decoder_usi(uent); ub_disable_intr(uent); - ub_disable_err_msq_ctrl(uent); ub_unset_user_info(uent); - if (ub_cc_supported(uent)) + if (ub_cc_supported(uent) && !uent->ubc->cluster) ub_cc_disable(uent); } diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 4fa1a8533e71..b43d682ba3d8 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -421,6 +421,11 @@ void ub_entity_add(struct ub_entity *uent, void *ctx) ret = ub_ports_add(uent); WARN_ON(ret); } + + if (is_ibus_controller(uent)) { + ret = ub_static_bus_instance_init(uent->ubc); + WARN_ON(ret); + } } EXPORT_SYMBOL_GPL(ub_entity_add); @@ -432,11 +437,6 @@ void ub_start_ent(struct ub_entity *uent) if (!uent) return; - if (is_ibus_controller(uent)) { - ret = ub_static_bus_instance_init(uent->ubc); - WARN_ON(ret); - } - ret = ub_default_bus_instance_init(uent); WARN_ON(ret); @@ -507,9 +507,6 @@ void ub_stop_ent(struct ub_entity *uent) ub_remove_sysfs_ent_files(uent); ub_default_bus_instance_uninit(uent); - - if (is_ibus_controller(uent)) - ub_static_bus_instance_uninit(uent->ubc); } EXPORT_SYMBOL_GPL(ub_stop_ent); @@ -527,6 +524,9 @@ void ub_remove_ent(struct ub_entity *uent) list_for_each_entry_safe_reverse(ent, tmp, &uent->mue_list, node) ub_remove_ent(ent); + if (is_ibus_controller(uent)) + ub_static_bus_instance_uninit(uent->ubc); + if (is_primary(uent)) ub_ports_del(uent); @@ -1008,7 +1008,8 @@ int ub_set_user_info(struct ub_entity *uent) u32 eid = uent->ubc->uent->eid; - if (is_p_device(uent)) + if (is_p_device(uent) || + (uent->ubc->cluster && is_ibus_controller(uent))) goto cfg1; /* set dsteid to device */ @@ -1033,7 +1034,8 @@ void ub_unset_user_info(struct ub_entity *uent) if (!uent) return; - if (is_p_device(uent)) + if (is_p_device(uent) || + (uent->ubc->cluster && is_ibus_controller(uent))) goto cfg1; ub_cfg_write_dword(uent, UB_UCNA, 0); diff --git a/drivers/ub/ubus/vendor/hisilicon/eu-table.c b/drivers/ub/ubus/vendor/hisilicon/eu-table.c index 6bbdfb0e0bf7..3004093b52d8 100644 --- a/drivers/ub/ubus/vendor/hisilicon/eu-table.c +++ b/drivers/ub/ubus/vendor/hisilicon/eu-table.c @@ -165,12 +165,18 @@ static const struct file_operations hi_eu_table_info_ops = { static void hi_eu_table_debugfs_init(struct ub_bus_controller *ubc) { + if (ubc->cluster) + return; + debugfs_create_file("eu_table", 0600, ubc->debug_root, ubc, &hi_eu_table_info_ops); } static void hi_eu_table_debugfs_uninit(struct ub_bus_controller *ubc) { + if (ubc->cluster) + return; + debugfs_lookup_and_remove("eu_table", ubc->debug_root); } diff --git a/include/uapi/ub/ubus/ubus_regs.h b/include/uapi/ub/ubus/ubus_regs.h index 9eed901fd205..a4fe600f5459 100644 --- a/include/uapi/ub/ubus/ubus_regs.h +++ b/include/uapi/ub/ubus/ubus_regs.h @@ -271,9 +271,4 @@ enum ub_port_cap_id { #define QDLWS_EXEC_STATUS_MASK GENMASK(2, 0) #define QDLWS_EXEC_STATUS_MAX 4 -/* Error Message Queue Capability */ -#define EMQ_CAP_START 0x00001400 -#define UB_CAP_ERR_MSG_QUE_CTL 0x8 -#define UB_CAP_INTERRUPT_GEN_ENA 0x100 - #endif /* _UAPI_UB_UBUS_UBUS_REGS_H_ */ -- Gitee From 6315c8a4f818b3b259e236ee706740eef3922cec Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 14:31:20 +0800 Subject: [PATCH 084/126] ub:ubus: Matt and MMIO judgments are not performed in cluster commit 68ce3e1d69c339382cf0b8aa95a7e0957c9ce22d openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- Rollback after setting the decoder register fails. Matt and MMIO judgments are not performed in cluster true Fixes: abc591c50df5 ("ub:ubus: Supports decoder event processing") Signed-off-by: Yuhao Xiang Signed-off-by: Shi Yang --- drivers/ub/ubus/decoder.c | 152 +++++++++++++++++++++++++++----------- drivers/ub/ubus/decoder.h | 1 + 2 files changed, 109 insertions(+), 44 deletions(-) diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index 288e33a96038..56d20dbbf0aa 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -75,28 +75,68 @@ static int ub_decoder_init_queue(struct ub_bus_controller *ubc, static u32 set_mmio_base_reg(struct ub_decoder *decoder) { - u32 ret; + u32 mmio_high = upper_32_bits(decoder->mmio_base_addr); + u32 mmio_low = lower_32_bits(decoder->mmio_base_addr); + struct ub_entity *ent = decoder->uent; + u32 low_bit, high_bit, ret; + + if (!ent->ubc->cluster) { + ret = (u32)ub_cfg_write_dword(ent, DECODER_MMIO_BA0, + 0xffffffff); + ret |= (u32)ub_cfg_write_dword(ent, DECODER_MMIO_BA1, + 0xffffffff); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MMIO_BA0, &low_bit); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MMIO_BA1, &high_bit); + if (ret) { + ub_err(ent, "Failed to access decoder MMIO BA\n"); + return ret; + } + + if ((low_bit | mmio_low) != low_bit || + (high_bit | mmio_high) != high_bit) { + ub_err(ent, "decoder MMIO address does not match HW reg\n"); + return -EINVAL; + } + } ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_MMIO_BA0, lower_32_bits(decoder->mmio_base_addr)); ret |= (u32)ub_cfg_write_dword(decoder->uent, DECODER_MMIO_BA1, upper_32_bits(decoder->mmio_base_addr)); - if (ret) - ub_err(decoder->uent, "set decoder mmio base failed\n"); return ret; } static u32 set_page_table_reg(struct ub_decoder *decoder) { - u32 ret; + u32 matt_high = upper_32_bits(decoder->pgtlb.pgtlb_dma); + u32 matt_low = lower_32_bits(decoder->pgtlb.pgtlb_dma); + struct ub_entity *ent = decoder->uent; + u32 low_bit, high_bit, ret; + + if (!ent->ubc->cluster) { + ret = (u32)ub_cfg_write_dword(ent, DECODER_MATT_BA0, + 0xffffffff); + ret |= (u32)ub_cfg_write_dword(ent, DECODER_MATT_BA1, + 0xffffffff); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MATT_BA0, &low_bit); + ret |= (u32)ub_cfg_read_dword(ent, DECODER_MATT_BA1, &high_bit); + if (ret) { + ub_err(ent, "Failed to access decoder MATT BA\n"); + return ret; + } + + if ((low_bit | matt_low) != low_bit || + (high_bit | matt_high) != high_bit) { + ub_err(ent, "decoder MATT address does not match HW reg\n"); + return -EINVAL; + } + } ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_MATT_BA0, lower_32_bits(decoder->pgtlb.pgtlb_dma)); ret |= (u32)ub_cfg_write_dword(decoder->uent, DECODER_MATT_BA1, upper_32_bits(decoder->pgtlb.pgtlb_dma)); - if (ret) - ub_err(decoder->uent, "set decoder page table reg failed\n"); return ret; } @@ -145,6 +185,25 @@ static u32 set_queue_reg(struct ub_decoder *decoder) return ret; } +static void unset_queue_reg(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + u32 ret; + + ret = (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_CFG, + decoder->vals.cmdq_cfg_val); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_CFG, + decoder->vals.evtq_cfg_val); + + ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR0, 0); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR1, 0); + + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR0, 0); + ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR1, 0); + if (ret) + ub_err(uent, "unset queue reg fail\n"); +} + static u32 set_decoder_enable(struct ub_decoder *decoder) { u32 ret = (u32)ub_cfg_write_dword(decoder->uent, DECODER_CTRL, 1); @@ -155,6 +214,14 @@ static u32 set_decoder_enable(struct ub_decoder *decoder) return ret; } +static void unset_decoder_enable(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + + if (ub_cfg_write_dword(uent, DECODER_CTRL, 0)) + ub_err(uent, "unset decoder enable fail\n"); +} + static u32 ub_decoder_device_set(struct ub_decoder *decoder) { u32 ret; @@ -164,6 +231,11 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) ret |= set_queue_reg(decoder); ret |= set_decoder_enable(decoder); + if (ret) { + unset_decoder_enable(decoder); + unset_queue_reg(decoder); + } + return ret; } @@ -187,21 +259,26 @@ static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, "ub bus controller can't free decoder table\n"); } -static void ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, +static int ub_get_decoder_mmio_base(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { struct resource_entry *entry; - decoder->mmio_base_addr = -1; resource_list_for_each_entry(entry, &ubc->resources) { if (entry->res->flags == IORESOURCE_MEM && - strstr(entry->res->name, "UB_BUS_CTL") && - entry->res->start < decoder->mmio_base_addr) + strstr(entry->res->name, "UB_BUS_CTL")) { decoder->mmio_base_addr = entry->res->start; + decoder->mmio_end_addr = entry->res->end; + break; + } + } + + if (decoder->mmio_base_addr == 0) { + ub_err(decoder->uent, "get decoder mmio base failed\n"); + return -EINVAL; } - ub_info(decoder->uent, "decoder mmio base is %#llx\n", - decoder->mmio_base_addr); + return 0; } static const char * const mmio_size_desc[] = { @@ -209,15 +286,21 @@ static const char * const mmio_size_desc[] = { "2Tbyte", "4Tbyte", "8Tbyte", "16Tbyte" }; +static const u64 mmio_size[] = { + 128ULL * SZ_1G, 256ULL * SZ_1G, 512ULL * SZ_1G, SZ_1T, + 2 * SZ_1T, 4 * SZ_1T, 8 * SZ_1T, 16 * SZ_1T +}; + static int ub_get_decoder_cap(struct ub_decoder *decoder) { struct ub_entity *uent = decoder->uent; + u64 size; u32 val; int ret; ret = ub_cfg_read_dword(uent, DECODER_CAP, &val); if (ret) { - ub_err(uent, "read decoder cap failed\n"); + ub_err(uent, "read decoder cap fail\n"); return ret; } @@ -225,9 +308,15 @@ static int ub_get_decoder_cap(struct ub_decoder *decoder) decoder->cmdq.qs = (val & CMDQ_SIZE_MASK) >> CMDQ_SIZE_OFFSET; decoder->evtq.qs = (val & EVTQ_SIZE_MASK) >> EVTQ_SIZE_OFFSET; - ub_dbg(uent, "cmdq_queue_size=%u, evtq_queue_size=%u, mmio_size=%s\n", - decoder->cmdq.qs, decoder->evtq.qs, - mmio_size_desc[decoder->mmio_size_sup]); + size = decoder->mmio_end_addr - decoder->mmio_base_addr + 1; + if (size > mmio_size[decoder->mmio_size_sup]) + decoder->mmio_end_addr = decoder->mmio_base_addr + + mmio_size[decoder->mmio_size_sup] - 1; + + ub_info(uent, "decoder mmio_addr[%#llx-%#llx], cmdq_queue_size=%u, evtq_queue_size=%u, mmio_size_sup=%s\n", + decoder->mmio_base_addr, decoder->mmio_end_addr, + decoder->cmdq.qs, decoder->evtq.qs, + mmio_size_desc[decoder->mmio_size_sup]); return 0; } @@ -245,7 +334,9 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) decoder->uent = uent; mutex_init(&decoder->table_lock); - ub_get_decoder_mmio_base(ubc, decoder); + ret = ub_get_decoder_mmio_base(ubc, decoder); + if (ret) + goto release_decoder; ret = ub_get_decoder_cap(decoder); if (ret) @@ -293,25 +384,6 @@ static void unset_mmio_base_reg(struct ub_decoder *decoder) ub_err(uent, "unset mmio base reg failed\n"); } -static void unset_queue_reg(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - u32 ret; - - ret = (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_CFG, - decoder->vals.cmdq_cfg_val); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_CFG, - decoder->vals.evtq_cfg_val); - - ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR0, 0); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_CMDQ_BASE_ADDR1, 0); - - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR0, 0); - ret |= (u32)ub_cfg_write_dword(uent, DECODER_EVENTQ_BASE_ADDR1, 0); - if (ret) - ub_err(uent, "unset queue reg failed\n"); -} - static void unset_page_table_reg(struct ub_decoder *decoder) { struct ub_entity *uent = decoder->uent; @@ -323,14 +395,6 @@ static void unset_page_table_reg(struct ub_decoder *decoder) ub_err(uent, "unset page table reg failed\n"); } -static void unset_decoder_enable(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - - if (ub_cfg_write_dword(uent, DECODER_CTRL, 0)) - ub_err(uent, "unset decoder enable failed\n"); -} - static void ub_decoder_device_unset(struct ub_decoder *decoder) { unset_decoder_enable(decoder); diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 37d628dc45e2..6667d07e9219 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -72,6 +72,7 @@ struct ub_decoder { struct device *dev; struct ub_entity *uent; phys_addr_t mmio_base_addr; + phys_addr_t mmio_end_addr; u32 mmio_size_sup; u64 rg_size; struct ub_decoder_queue cmdq; -- Gitee From 1734c22040117ba7d48fb68ddd0cad6651f0ae0c Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 20:44:11 +0800 Subject: [PATCH 085/126] ub:ubfi: Fix UBFI memory leak issue commit b41f79594e18b9628b1d8d4e50ac1687c9679ecf openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- UBFI in abnormal branch memory leakage issues and redundant branches Fixes: 312a6b7fabe9 ("ub:ubfi: ubfi driver parse ubc information from ubrt") Signed-off-by: Yuhao Xiang Signed-off-by: Shi Yang --- drivers/ub/ubfi/ub_fi.c | 42 ++++++++++++------------ drivers/ub/ubfi/ub_fi.h | 7 ++-- drivers/ub/ubfi/ubc.c | 35 ++++++++++---------- drivers/ub/ubfi/ubrt.c | 72 +++++++++++++++++++++++++++-------------- drivers/ub/ubfi/ubrt.h | 4 --- 5 files changed, 89 insertions(+), 71 deletions(-) diff --git a/drivers/ub/ubfi/ub_fi.c b/drivers/ub/ubfi/ub_fi.c index 50b359e52b7b..aa8b69f39dd8 100644 --- a/drivers/ub/ubfi/ub_fi.c +++ b/drivers/ub/ubfi/ub_fi.c @@ -15,16 +15,16 @@ #define ACPI_SIG_UBRT "UBRT" /* UB Root Table */ #define UBIOS_INFO_TABLE "linux,ubios-information-table" -enum bios_report_mode bios_mode = UNKNOWN; +enum firmware_report_mode firmware_mode = UNKNOWN; -static void ub_bios_mode_init(void) +static void ub_firmware_mode_init(void) { if (acpi_disabled) - bios_mode = DTS; + firmware_mode = DTS; else - bios_mode = ACPI; + firmware_mode = ACPI; - pr_info("Starting with mode: %d\n", bios_mode); + pr_info("Starting with mode: %d\n", firmware_mode); } static int ubfi_get_acpi_ubrt(void) @@ -34,14 +34,13 @@ static int ubfi_get_acpi_ubrt(void) status = acpi_get_table(ACPI_SIG_UBRT, 0, &header); if (ACPI_FAILURE(status)) { - pr_err("ACPI failed to get UBRT.\n"); if (status != AE_NOT_FOUND) pr_err("ACPI failed msg: %s\n", acpi_format_exception(status)); return -ENODEV; } acpi_table = (struct acpi_table_ubrt *)header; - pr_info("get ubrt by acpi success\n"); + pr_debug("get ubrt by acpi success\n"); return 0; } @@ -65,35 +64,32 @@ static int ubfi_get_dts_ubrt(void) if (!ubios_table) return -ENOMEM; - pr_info("ubfi get ubrt by device tree success\n"); + pr_debug("ubfi get ubrt by device tree success\n"); return 0; } static int ubfi_get_ubrt(void) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) return ubfi_get_acpi_ubrt(); - else if (bios_mode == DTS) + else return ubfi_get_dts_ubrt(); - return -EINVAL; } static int handle_ubrt(void) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) return handle_acpi_ubrt(); - else if (bios_mode == DTS) + else return handle_dts_ubrt(); - - return -EINVAL; } static void ubfi_put_ubrt(void) { - if (bios_mode == ACPI) { + if (firmware_mode == ACPI) { acpi_put_table((struct acpi_table_header *)acpi_table); acpi_table = NULL; - } else if (bios_mode == DTS) { + } else { ub_table_put(ubios_table); ubios_table = NULL; } @@ -103,15 +99,21 @@ static int __init ubfi_init(void) { int ret; - ub_bios_mode_init(); + ub_firmware_mode_init(); ret = ubfi_get_ubrt(); if (ret) { - pr_warn("can't get ub information from bios, ret=%d\n", ret); + pr_warn("can't get ub information from firmware, ret=%d\n", ret); return 0; } - return handle_ubrt(); + ret = handle_ubrt(); + if (ret) { + pr_err("failed to handle ubrt, ret=%d\n", ret); + ubfi_put_ubrt(); + } + + return ret; } static void __exit ubfi_exit(void) diff --git a/drivers/ub/ubfi/ub_fi.h b/drivers/ub/ubfi/ub_fi.h index 3edf8dd6de4e..9d36c308cede 100644 --- a/drivers/ub/ubfi/ub_fi.h +++ b/drivers/ub/ubfi/ub_fi.h @@ -6,12 +6,11 @@ #ifndef __UB_FI_H__ #define __UB_FI_H__ -enum bios_report_mode { +enum firmware_report_mode { ACPI = 0, DTS = 1, - UBIOS = 3, - UNKNOWN = 4, + UNKNOWN = 2 }; -extern enum bios_report_mode bios_mode; +extern enum firmware_report_mode firmware_mode; #endif /* __UB_FI_H__ */ diff --git a/drivers/ub/ubfi/ubc.c b/drivers/ub/ubfi/ubc.c index a3f7bab8863f..3a160643f075 100644 --- a/drivers/ub/ubfi/ubc.c +++ b/drivers/ub/ubfi/ubc.c @@ -212,7 +212,7 @@ static int ubc_dev_new_resource_entry(struct resource *res, return 0; } -static int dts_register_irq(u32 ctl_no, int irq_type, const char *name, +static int dts_register_irq(u32 ctl_no, int irq_idx, const char *name, struct resource *res) { struct device_node *np; @@ -228,16 +228,16 @@ static int dts_register_irq(u32 ctl_no, int irq_type, const char *name, if (ctl_no != index) continue; - irq = irq_of_parse_and_map(np, irq_type); + irq = irq_of_parse_and_map(np, irq_idx); if (!irq) continue; } if (!irq) { - pr_err("irq_type %d parse and map fail\n", irq_type); + pr_err("irq_idx %d parse and map failed\n", irq_idx); return -EINVAL; } - pr_info("irq_type[%d] register success, irq=%u\n", irq_type, irq); + pr_info("irq_idx %d register successfully, irq=%u\n", irq_idx, irq); res->name = name; res->start = irq; @@ -258,9 +258,9 @@ static void remove_ubc_resource(struct ub_bus_controller *ubc) if ((res->flags & IORESOURCE_IRQ) && !strcmp(res->name, "UBUS") && !ubc->ctl_no) { - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ubrt_unregister_gsi(ubc->attr.msg_int); - else if (bios_mode == DTS) + else irq_dispose_mapping(ubc->queue_virq); } } @@ -296,12 +296,10 @@ static int add_ubc_irq_resource(struct ubc_node *node, trigger = !!(ubc->attr.msg_int_attr & UB_MSGQ_INT_TRIGGER_MASK); polarity = !!(ubc->attr.msg_int_attr & UB_MSGQ_INT_POLARITY_MASK); - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ret = ubrt_register_gsi(hwirq, trigger, polarity, "UBUS", &res); - else if (bios_mode == DTS) - ret = dts_register_irq(ubc->ctl_no, 0, "UBUS", &res); else - ret = -EINVAL; + ret = dts_register_irq(ubc->ctl_no, 0, "UBUS", &res); if (ret) { pr_err("register irq fail, ret=%d\n", ret); @@ -318,9 +316,9 @@ static int add_ubc_irq_resource(struct ubc_node *node, return 0; out: - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ubrt_unregister_gsi(hwirq); - else if (bios_mode == DTS) + else irq_dispose_mapping(res.start); return ret; @@ -333,7 +331,7 @@ static void ub_release_ubc_dev(struct device *dev) pr_info("%s release ub bus controller device.\n", ubc->name); - if (bios_mode == DTS) { + if (firmware_mode == DTS) { usi_np = irq_domain_get_of_node(dev->msi.domain); if (usi_np) of_node_put(usi_np); @@ -509,7 +507,7 @@ static int create_ubc(struct ubc_node *node, u32 ctl_no) if (ret) goto free_resource; - /* after init_ubc, ubc resources will be released in the dev->release */ + /* after init_ubc, if failed, ubc resources will be released in the dev->release */ ret = init_ubc(ubc); if (ret) return ret; @@ -551,9 +549,10 @@ static int parse_ubc_table(void *info_node) pr_info("cna_start=%u, cna_end=%u\n", ubc_cna_start, ubc_cna_end); pr_info("eid_start=%u, eid_end=%u\n", ubc_eid_start, ubc_eid_end); - pr_info("ubc_count=%u, bios_cluster_mode=%u, feature=%u\n", count, + pr_info("ubc_count=%u, firmware_cluster_mode=%u, feature=%u\n", count, cluster_mode, ubc_feature); - if (ubc_cna_start > ubc_cna_end || ubc_eid_start > ubc_eid_end) { + if (ubc_cna_start > ubc_cna_end || ubc_eid_start > ubc_eid_end || + ubc_cna_start == 0 || ubc_eid_start == 0) { pr_err("eid or cna range is incorrect\n"); return -EINVAL; } @@ -597,9 +596,9 @@ int handle_ubc_table(u64 pointer) if (ret) goto err_handle; - pr_info("Update msi domain for ub bus controller\n"); + pr_debug("Update msi domain for ub bus controller\n"); /* Update msi domain for ub bus controller */ - if (bios_mode == ACPI) + if (firmware_mode == ACPI) ret = acpi_update_ubc_msi_domain(); else ret = dts_update_ubc_msi_domain(); diff --git a/drivers/ub/ubfi/ubrt.c b/drivers/ub/ubfi/ubrt.c index 8908b2ae8edd..ecf975526e72 100644 --- a/drivers/ub/ubfi/ubrt.c +++ b/drivers/ub/ubfi/ubrt.c @@ -21,10 +21,13 @@ struct acpi_table_ubrt *acpi_table; struct ubios_root_table *ubios_table; /* - * ummu max count is 32, max size is 40 + 32 * 128 = 4640 - * ubc max count is 32, max size is 40 + 88 + 32 * 256 + 32 * 4 = 8448 + * ubios max sub table count is 256, max size is 40 + 8 * 256 = 2088 + * ummu max count is 32, max size is 32 + 8 + 32 * 160 = 5160 + * ubc max count is 32, max size is 32 + 24 + 32 * 384 = 12344 + * Choose the largest one as the maximum value for the ubios table. */ -#define UBIOS_TABLE_TOTLE_SIZE_MAX 8448 +#define UBIOS_TABLE_TOTAL_SIZE_MAX (sizeof(struct ubrt_ubc_table) + \ + 32 * sizeof(struct ubc_node)) /* remember to use ub_table_put to release memory alloced by ub_table_get */ void *ub_table_get(u64 pa) @@ -44,8 +47,9 @@ void *ub_table_get(u64 pa) total_size = readl(va + UB_TABLE_HEADER_NAME_LEN); pr_debug("ub table size is[0x%x]\n", total_size); - if (total_size == 0 || total_size > UBIOS_TABLE_TOTLE_SIZE_MAX) { - pr_err("ubios table size is invalid\n"); + if (total_size == 0 || total_size > UBIOS_TABLE_TOTAL_SIZE_MAX) { + pr_err("ubios table size is invalid, total_size=0x%x\n", + total_size); iounmap(va); return NULL; } @@ -81,6 +85,7 @@ void uninit_ub_nodes(void) int handle_acpi_ubrt(void) { + bool ubc_done = false, ummu_done = false; struct ubrt_sub_table *sub_table; int ret = 0; u32 i; @@ -89,16 +94,14 @@ int handle_acpi_ubrt(void) for (i = 0; i < acpi_table->count; i++) { sub_table = &acpi_table->sub_table[i]; - switch (sub_table->type) { - case UB_BUS_CONTROLLER_TABLE: + if (sub_table->type == UB_BUS_CONTROLLER_TABLE && !ubc_done) { ret = handle_ubc_table(sub_table->pointer); - break; - case UMMU_TABLE: + ubc_done = true; + } else if (sub_table->type == UMMU_TABLE && !ummu_done) { ret = handle_ummu_table(sub_table->pointer); - break; - default: + ummu_done = true; + } else { pr_warn("Ignore sub table: type %u\n", sub_table->type); - break; } if (ret) { pr_err("parse ubrt sub table type %u failed\n", @@ -112,10 +115,25 @@ int handle_acpi_ubrt(void) return ret; } +static int get_ubrt_table_name(char *name, u64 sub_table) +{ + void __iomem *va; + + va = ioremap(sub_table, sizeof(struct ub_table_header)); + if (!va) { + pr_err("ioremap ub table header failed\n"); + return -ENOMEM; + } + + memcpy_fromio(name, va, UB_TABLE_HEADER_NAME_LEN - 1); + iounmap(va); + return 0; +} + int handle_dts_ubrt(void) { - char name[UB_TABLE_HEADER_NAME_LEN] = {}; - struct ub_table_header *header; + bool ubc_done = false, ummu_done = false; + char name[UB_TABLE_HEADER_NAME_LEN]; int ret = 0, i; if (ubios_table->count == 0) { @@ -125,24 +143,28 @@ int handle_dts_ubrt(void) pr_info("ubios sub table count is %u\n", ubios_table->count); for (i = 0; i < ubios_table->count; i++) { - header = (struct ub_table_header *)ub_table_get( - ubios_table->sub_tables[i]); - if (!header) + memset(name, 0, UB_TABLE_HEADER_NAME_LEN); + ret = get_ubrt_table_name(name, ubios_table->sub_tables[i]); + if (ret) + goto out; + if (name[0] == '\0') continue; - - memcpy(name, header->name, UB_TABLE_HEADER_NAME_LEN - 1); pr_info("ubrt sub table name is %s\n", name); - ub_table_put(header); - if (!strncmp(name, UBIOS_SIG_UBC, strlen(UBIOS_SIG_UBC))) - ret = handle_ubc_table(ubios_table->sub_tables[i]); - else if (!strncmp(name, UBIOS_SIG_UMMU, strlen(UBIOS_SIG_UMMU))) + if (!strncmp(name, UBIOS_SIG_UMMU, strlen(UBIOS_SIG_UMMU)) && + !ummu_done) { ret = handle_ummu_table(ubios_table->sub_tables[i]); - else + ummu_done = true; + } else if (!strncmp(name, UBIOS_SIG_UBC, strlen(UBIOS_SIG_UBC)) && + !ubc_done) { + ret = handle_ubc_table(ubios_table->sub_tables[i]); + ubc_done = true; + } else { pr_warn("Ignore sub table: %s\n", name); + } if (ret) { - pr_err("Create %s device ret=%d\n", name, ret); + pr_err("Create %s failed, ret=%d\n", name, ret); goto out; } } diff --git a/drivers/ub/ubfi/ubrt.h b/drivers/ub/ubfi/ubrt.h index 0cbc5fe82368..1f15f0ce76c9 100644 --- a/drivers/ub/ubfi/ubrt.h +++ b/drivers/ub/ubfi/ubrt.h @@ -43,10 +43,6 @@ enum ubrt_sub_table_type { UB_BUS_CONTROLLER_TABLE = 0, UMMU_TABLE = 1, UB_RESERVED_MEMORY_TABLE = 2, - VIRTUAL_BUS_TABLE = 3, - CALL_ID_SERVICE_TABLE = 4, - UB_ENTITY_TABLE = 5, - UB_TOPOLOGY_TABLE = 6, }; extern struct acpi_table_ubrt *acpi_table; -- Gitee From 3f469e92043781abb64d9a9390950631ab4a7529 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 10 Dec 2025 20:01:02 +0800 Subject: [PATCH 086/126] ub:ubus: bugfix calltrace of killing qemu when rmmod hisi_ubus commit 0e8de1af5c2d288c7e6eb2fc702049bf2c3792ab openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- bugfix calltrace of killing qemu when rmmod hisi_ubus Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/msg.c | 23 ++--------------------- drivers/ub/ubus/msg.h | 4 ---- drivers/ub/ubus/vendor/hisilicon/msg.c | 11 ----------- 3 files changed, 2 insertions(+), 36 deletions(-) diff --git a/drivers/ub/ubus/msg.c b/drivers/ub/ubus/msg.c index 54f77128ad2f..34b2831bd8bf 100644 --- a/drivers/ub/ubus/msg.c +++ b/drivers/ub/ubus/msg.c @@ -83,39 +83,20 @@ static void dev_message_put(struct ub_entity *uent) int message_probe_device(struct ub_entity *uent) { - const struct message_ops *ops = uent->ubc->mdev->ops; - int ret; - if (!dev_message_get(uent)) return -ENOMEM; - if (uent->message->mdev) - return 0; - - if (ops->probe_dev) { - ret = ops->probe_dev(uent); - if (ret) - goto err_probe; - } - - uent->message->mdev = uent->ubc->mdev; + if (!uent->message->mdev) + uent->message->mdev = uent->ubc->mdev; return 0; - -err_probe: - dev_message_put(uent); - return ret; } void message_remove_device(struct ub_entity *uent) { - const struct message_ops *ops = uent->ubc->mdev->ops; - if (!uent->message) return; - if (ops->remove_dev) - ops->remove_dev(uent); dev_message_put(uent); } diff --git a/drivers/ub/ubus/msg.h b/drivers/ub/ubus/msg.h index 92a126bdf471..f714295fa0e9 100644 --- a/drivers/ub/ubus/msg.h +++ b/drivers/ub/ubus/msg.h @@ -190,8 +190,6 @@ typedef void (*rx_msg_handler_t)(struct ub_bus_controller *ubc, void *pkt, u16 l /** * struct message_ops - message ops and capabilities - * @probe_dev: probe ub_entity to init message - * @remove_dev: remove ub_entity to uninit message * @sync_request: send message to target ub_entity and wait response * @send: send message to target ub_entity but not wait response * @response: send response message to target @@ -200,8 +198,6 @@ typedef void (*rx_msg_handler_t)(struct ub_bus_controller *ubc, void *pkt, u16 l * @owner: Driver module providing these ops */ struct message_ops { - int (*probe_dev)(struct ub_entity *uent); - void (*remove_dev)(struct ub_entity *uent); int (*sync_request)(struct message_device *mdev, struct msg_info *info, u8 code); int (*send)(struct message_device *mdev, struct msg_info *info, diff --git a/drivers/ub/ubus/vendor/hisilicon/msg.c b/drivers/ub/ubus/vendor/hisilicon/msg.c index 178682aa5cd6..5c4e672aa55e 100644 --- a/drivers/ub/ubus/vendor/hisilicon/msg.c +++ b/drivers/ub/ubus/vendor/hisilicon/msg.c @@ -463,15 +463,6 @@ static void hi_msg_queue_uninit(struct hi_message_device *hmd) hi_msg_core_uninit(&hmd->hmc); } -static int hi_message_probe_dev(struct ub_entity *uent) -{ - return 0; -} - -static void hi_message_remove_dev(struct ub_entity *uent) -{ -} - static bool pkt_plen_valid(void *pkt, u16 pkt_size, int task_type) { struct msg_pkt_header *header = (struct msg_pkt_header *)pkt; @@ -638,8 +629,6 @@ int hi_message_private(struct message_device *mdev, struct msg_info *info, } static struct message_ops hi_message_ops = { - .probe_dev = hi_message_probe_dev, - .remove_dev = hi_message_remove_dev, .sync_request = hi_message_sync_request, .response = hi_message_response, .sync_enum = hi_message_sync_enum, -- Gitee From cd2550c467e789762d7b03e2caef44970f2f0a3e Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 10 Dec 2025 16:27:55 +0800 Subject: [PATCH 087/126] ub:ubus: add hotplug capability check commit b0aa44c0a884645c7a84a1ad65d01a7a3ab976a8 openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- HotPlug capability check added to determine whether power-on and power-off functions are supported. Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/services/hotplug/hotplug.h | 1 + .../ub/ubus/services/hotplug/hotplug_core.c | 121 +++++++++++++++++- .../ub/ubus/services/hotplug/hotplug_ctrl.c | 71 +++++----- include/uapi/ub/ubus/ubus_regs.h | 1 + 4 files changed, 154 insertions(+), 40 deletions(-) diff --git a/drivers/ub/ubus/services/hotplug/hotplug.h b/drivers/ub/ubus/services/hotplug/hotplug.h index 93c8e3c798b9..81f731eb29be 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug.h +++ b/drivers/ub/ubus/services/hotplug/hotplug.h @@ -53,6 +53,7 @@ struct ub_slot { #define WORK_LED(slot) ((slot)->slot_cap & UB_SLOT_WLPS) #define PWR_LED(slot) ((slot)->slot_cap & UB_SLOT_PLPS) #define PRESENT(slot) ((slot)->slot_cap & UB_SLOT_PDSS) +#define PWR(slot) ((slot)->slot_cap & UB_SLOT_PWCS) struct ubhp_msg_payload { u16 slot_id; diff --git a/drivers/ub/ubus/services/hotplug/hotplug_core.c b/drivers/ub/ubus/services/hotplug/hotplug_core.c index 0646a8d388d7..a419dbae0ea2 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug_core.c +++ b/drivers/ub/ubus/services/hotplug/hotplug_core.c @@ -397,6 +397,102 @@ static void ubhp_disconnect_slot(struct ub_slot *slot) slot->r_uent = NULL; } +static void ubhp_clear_port(struct ub_slot *slot) +{ + struct ub_port *port; + + for_each_slot_port(port, slot) { + port->r_index = 0; + guid_copy(&port->r_guid, &guid_null); + } +} + +/** + * ubhp_enum_at_slot() - enum at slot to find new devices + * @slot: the slot that has new device plugged in + * @dev_list: a list to store the new found devices + * + * this func use bfs to enum devices and put them into dev_list, + * which means the previous device in dev_list is enumerated previous + */ +static int ubhp_enum_at_slot(struct ub_slot *slot, struct list_head *dev_list) +{ + void *buf; + int ret; + +#define UB_TOPO_BUF_SZ SZ_4K + buf = kzalloc(UB_TOPO_BUF_SZ, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ub_enum_topo_scan_ports(slot->uent, slot->port_start, slot->port_num, + dev_list, buf); + if (ret) + ubhp_clear_port(slot); + + kfree(buf); + return ret; +} + +/** + * a simple example for link up + * for a given topo like + * +-------------+ +---------+ +---------+ +--------+ + * | controller0 |p0:---:p0| switch0 |p1:---slot0---:p0| switch1 |p1:---:p0| device0| + * +-------------+ +---------+ +---------+ +--------+ + * when slot0 is calling handle link up + * 1. enum at slot0 to create switch1 and device0, put them in dev_list + * 2. route dev_list to set up route between these two devices + * 3. handle route link up at slot0, add route of left(controller0 & switch0) + * into right(switch1 & device0) and route of right into left + * 4. start switch1 and device0 + */ +static int ubhp_handle_link_up(struct ub_slot *slot) +{ + struct list_head dev_list; + int ret; + + INIT_LIST_HEAD(&dev_list); + + ret = ubhp_enum_at_slot(slot, &dev_list); + if (ret) { + ub_err(slot->uent, "enum at slot%u failed, ret=%d\n", slot->slot_id, ret); + return ret; + } + + if (list_empty(&dev_list)) { + ub_warn(slot->uent, "link up without remote dev\n"); + return -ENXIO; + } + + ret = ub_route_entities(&dev_list); + if (ret) { + ub_err(slot->uent, "hotplug cal route failed, ret=%d\n", ret); + goto err_route; + } + + slot->r_uent = slot->ports->r_uent; + ret = ubhp_update_route_link_up(slot); + if (ret) { + ub_err(slot->uent, "hotplug update route failed, ret=%d\n", ret); + goto err_link_up; + } + + ret = ub_enum_entities_active(&dev_list); + if (ret) { + ub_err(slot->uent, "hotplug start devices failed, ret=%d\n", ret); + goto err_link_up; + } + + return 0; +err_link_up: + ubhp_update_route_link_down(slot); + slot->r_uent = NULL; +err_route: + ub_enum_clear_ent_list(&dev_list); + return ret; +} + /** * a simple example for link down * for a given topo like @@ -471,7 +567,7 @@ static void ubhp_button_handler(struct work_struct *work) void ubhp_handle_power(struct ub_slot *slot, bool power_on) { - if (!slot) + if (!slot || !PWR(slot)) return; mutex_lock(&slot->state_lock); @@ -531,11 +627,22 @@ static void ubhp_handle_present(struct ub_slot *slot) ubhp_set_slot_power(slot, POWER_ON); - mutex_unlock(&slot->state_lock); - ubhp_get_slot(slot); - queue_delayed_work(get_rx_msg_wq(UB_MSG_CODE_LINK), - &slot->power_work, HP_LINK_WAIT_DELAY * HZ); - return; + /* If support power ctrl, wait link up process */ + if (PWR(slot)) { + mutex_unlock(&slot->state_lock); + ubhp_get_slot(slot); + queue_delayed_work(get_rx_msg_wq(UB_MSG_CODE_LINK), + &slot->power_work, HP_LINK_WAIT_DELAY * HZ); + return; + } + + if (ubhp_handle_link_up(slot)) + goto poweroff; + + ubhp_set_indicators(slot, INDICATOR_ON, INDICATOR_NOOP); + slot->state = SLOT_ON; + ub_info(slot->uent, "slot%u on\n", slot->slot_id); + out: /** * why cancel button work here: @@ -559,6 +666,8 @@ static void ubhp_handle_present(struct ub_slot *slot) ub_info(slot->uent, "slot%u handle hotplug succeeded\n", slot->slot_id); return; +poweroff: + ubhp_set_slot_power(slot, POWER_OFF); clear_state: slot->state = SLOT_OFF; ubhp_set_indicators(slot, INDICATOR_OFF, INDICATOR_NOOP); diff --git a/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c b/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c index 28753cc2501c..73d32079bdd8 100644 --- a/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c +++ b/drivers/ub/ubus/services/hotplug/hotplug_ctrl.c @@ -95,13 +95,18 @@ void ubhp_set_indicators(struct ub_slot *slot, u8 power, u8 work) void ubhp_set_slot_power(struct ub_slot *slot, enum power_state power) { - ub_slot_write_byte(slot, UB_SLOT_PW_CTRL, power); + if (PWR(slot)) + ub_slot_write_byte(slot, UB_SLOT_PW_CTRL, power); } bool ubhp_card_present(struct ub_slot *slot) { u8 val; + /* always present if no present ctrl */ + if (!PRESENT(slot)) + return true; + ub_slot_read_byte(slot, UB_SLOT_PD_STA, &val); return !!(val & UB_SLOT_PD_STA_MASK); @@ -153,54 +158,52 @@ bool ubhp_confirm_event(struct ub_slot *slot, enum hotplug_event event) return true; } -static void ubhp_start_slot(struct ub_slot *slot) +static void ubhp_enable(struct ub_slot *slot, u32 pos, u32 mask, bool flag) { u8 val; - /* enable PP */ - ub_slot_read_byte(slot, UB_SLOT_PP_CTRL, &val); - val |= UB_SLOT_PP_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PP_CTRL, val); + if (!flag) + return; - /* enable PD */ - ub_slot_read_byte(slot, UB_SLOT_PD_CTRL, &val); - val |= UB_SLOT_PD_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PD_CTRL, val); + ub_slot_read_byte(slot, pos, &val); + val |= mask; + ub_slot_write_byte(slot, pos, val); +} - /* enable PDS */ - ub_slot_read_byte(slot, UB_SLOT_PDS_CTRL, &val); - val |= UB_SLOT_PDS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PDS_CTRL, val); +static void ubhp_disable(struct ub_slot *slot, u32 pos, u32 mask, bool flag) +{ + u8 val; + + if (!flag) + return; + + ub_slot_read_byte(slot, pos, &val); + val &= ~mask; + ub_slot_write_byte(slot, pos, val); +} +static void ubhp_start_slot(struct ub_slot *slot) +{ + /* enable PP */ + ubhp_enable(slot, UB_SLOT_PP_CTRL, UB_SLOT_PP_CTRL_MASK, BUTTON(slot)); + /* enable PD */ + ubhp_enable(slot, UB_SLOT_PD_CTRL, UB_SLOT_PD_CTRL_MASK, PRESENT(slot)); + /* enable PDS */ + ubhp_enable(slot, UB_SLOT_PDS_CTRL, UB_SLOT_PDS_CTRL_MASK, PRESENT(slot)); /* enable MS */ - ub_slot_read_byte(slot, UB_SLOT_MS_CTRL, &val); - val |= UB_SLOT_MS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_MS_CTRL, val); + ubhp_enable(slot, UB_SLOT_MS_CTRL, UB_SLOT_MS_CTRL_MASK, true); } static void ubhp_stop_slot(struct ub_slot *slot) { - u8 val; - /* disable MS */ - ub_slot_read_byte(slot, UB_SLOT_MS_CTRL, &val); - val &= ~UB_SLOT_MS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_MS_CTRL, val); - + ubhp_disable(slot, UB_SLOT_MS_CTRL, UB_SLOT_MS_CTRL_MASK, true); /* disable PDS */ - ub_slot_read_byte(slot, UB_SLOT_PDS_CTRL, &val); - val &= ~UB_SLOT_PDS_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PDS_CTRL, val); - + ubhp_disable(slot, UB_SLOT_PDS_CTRL, UB_SLOT_PDS_CTRL_MASK, PRESENT(slot)); /* disable PD */ - ub_slot_read_byte(slot, UB_SLOT_PD_CTRL, &val); - val &= ~UB_SLOT_PD_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PD_CTRL, val); - + ubhp_disable(slot, UB_SLOT_PD_CTRL, UB_SLOT_PD_CTRL_MASK, PRESENT(slot)); /* disable PP */ - ub_slot_read_byte(slot, UB_SLOT_PP_CTRL, &val); - val &= ~UB_SLOT_PP_CTRL_MASK; - ub_slot_write_byte(slot, UB_SLOT_PP_CTRL, val); + ubhp_disable(slot, UB_SLOT_PP_CTRL, UB_SLOT_PP_CTRL_MASK, BUTTON(slot)); } void ubhp_start_slots(struct ub_entity *uent) diff --git a/include/uapi/ub/ubus/ubus_regs.h b/include/uapi/ub/ubus/ubus_regs.h index a4fe600f5459..47847be68e91 100644 --- a/include/uapi/ub/ubus/ubus_regs.h +++ b/include/uapi/ub/ubus/ubus_regs.h @@ -81,6 +81,7 @@ enum ub_port_cap_id { #define UB_SLOT_WLPS 0x2 #define UB_SLOT_PLPS 0x4 #define UB_SLOT_PDSS 0x8 +#define UB_SLOT_PWCS 0x10 #define UB_SLOT_PORT UB_ADDR_TO_POS(0x3) #define UB_SLOT_START_PORT 0x0000ffff #define UB_SLOT_PP_CTRL UB_ADDR_TO_POS(0x4) -- Gitee From 6b319b654f21ba8a633dc0cac76a41b377c30a97 Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Wed, 10 Dec 2025 20:36:11 +0800 Subject: [PATCH 088/126] ub:ubus: bugfix port reset in cluster mode commit 246a059f3238542f9f9ada5fda550e7f8fb4f78a openEuler drivers inclusion category: Bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- bugfix port reset in cluster mode. Signed-off-by: Yahui Liu Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- Documentation/ub/ubus/hisi_ubus.rst | 2 -- drivers/ub/ubus/port.c | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Documentation/ub/ubus/hisi_ubus.rst b/Documentation/ub/ubus/hisi_ubus.rst index b384b058129f..90e76d2e587f 100644 --- a/Documentation/ub/ubus/hisi_ubus.rst +++ b/Documentation/ub/ubus/hisi_ubus.rst @@ -61,8 +61,6 @@ UB Message Core Driver Hisi UBUS implements a message device that provides a set of operations:: static struct message_ops hi_message_ops = { - .probe_dev = hi_message_probe_dev, - .remove_dev = hi_message_remove_dev, .sync_request = hi_message_sync_request, .response = hi_message_response, .sync_enum = hi_message_sync_enum, diff --git a/drivers/ub/ubus/port.c b/drivers/ub/ubus/port.c index f2ec6e8b9f47..f4d91d0e9d99 100644 --- a/drivers/ub/ubus/port.c +++ b/drivers/ub/ubus/port.c @@ -162,6 +162,11 @@ static ssize_t port_reset_store(struct ub_port *port, const char *buf, return -EINVAL; } + if (port->uent->ubc->cluster) { + ub_err(port->uent, "Port reset is not supported by sysfs in cluster mode\n"); + return -EINVAL; + } + ret = ub_port_reset_function(port); if (ret < 0) return ret; -- Gitee From d83d8a99744c9dd9f6313ceb2ef4a8fca0de9af0 Mon Sep 17 00:00:00 2001 From: Junlong Zheng Date: Wed, 10 Dec 2025 20:24:13 +0800 Subject: [PATCH 089/126] ub:hisi-ubus: fix MUE unreg msg rsp time commit f0942c09a5cf0eb52de419aeaf1067d7798fd60e openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- 1. During MUE deregistration processing, first send a response to the control plane, then proceed with the device destruction process. 2. Add entity number to the uent release log. Fixes: 86fec00cb73a ("ub:hisi-ubus: Support UBUS vdm entity enable message") Signed-off-by: Junlong Zheng Signed-off-by: Shi Yang --- drivers/ub/ubus/ubus_entity.c | 4 +++- drivers/ub/ubus/vendor/hisilicon/vdm.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index b43d682ba3d8..105c6f396b44 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -463,6 +463,7 @@ EXPORT_SYMBOL_GPL(ub_start_ent); static void ub_release_ent(struct device *dev) { struct ub_entity *uent; + u32 uent_num; uent = to_ub_entity(dev); if (is_primary(uent) && !is_p_device(uent)) { @@ -480,8 +481,9 @@ static void ub_release_ent(struct device *dev) kfree(uent->driver_override); uent->token_value = 0; + uent_num = uent->uent_num; kfree(uent); - pr_info("uent release\n"); + pr_info("uent[%#x] release\n", uent_num); } void ub_stop_ent(struct ub_entity *uent) diff --git a/drivers/ub/ubus/vendor/hisilicon/vdm.c b/drivers/ub/ubus/vendor/hisilicon/vdm.c index 4a19e0fb8d57..329bc51b5a0e 100644 --- a/drivers/ub/ubus/vendor/hisilicon/vdm.c +++ b/drivers/ub/ubus/vendor/hisilicon/vdm.c @@ -256,10 +256,11 @@ static u8 ub_idevice_pue_rls_handler(struct ub_bus_controller *ubc, struct vdm_m status = UB_MSG_RSP_SUCCESS; } + ub_vdm_msg_rsp(ubc, pkt, status); + if (status == UB_MSG_RSP_SUCCESS) ub_disable_ent(uent); - ub_vdm_msg_rsp(ubc, pkt, status); return status; } -- Gitee From cd42e19b6112345c928f5df61d9b2e310409d157 Mon Sep 17 00:00:00 2001 From: Jianquan-Lin Date: Tue, 16 Dec 2025 09:58:16 +0800 Subject: [PATCH 090/126] ub:ubus: Change create device irq domain debug info commit 9147e429cddb371664aeb08f8ed5af680cb6054a openEuler drivers inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID700V CVE: NA ----------------------------------------------------------- Change create device irq domain debug info. Fixes: 81962d0ecc6e ("ub:ubus: Support UBUS Interrupt framework") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubus/msi/irqdomain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ub/ubus/msi/irqdomain.c b/drivers/ub/ubus/msi/irqdomain.c index 5a01c40368bc..83da28e03fc7 100644 --- a/drivers/ub/ubus/msi/irqdomain.c +++ b/drivers/ub/ubus/msi/irqdomain.c @@ -95,7 +95,7 @@ static bool ub_create_device_domain(struct ub_entity *uent, return true; if (WARN_ON_ONCE(1)) - pr_info("TODO: create device irq domain.\n"); + pr_err("Create device irq domain failed.\n"); return false; } -- Gitee From a8adabddc0faeff3948f52037c226af4748977e2 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 15 Dec 2025 11:22:17 +0800 Subject: [PATCH 091/126] ub: cdma: add eid upi update response commit f7c5a9a48b65c9b80282e4776fcdfc46bb3e02e7 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ----------------------------------------------------------- add eid upi update response Fixes: ca1562136e14 ("ub: cdma: support querying sl information and updating eu") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma_dev.c | 62 ++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index 2b69a44b346e..ae3d76cff5b2 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -298,30 +298,66 @@ static int cdma_ctrlq_eu_del(struct cdma_dev *cdev, struct eu_info *eu) return ret; } +static int cdma_ctrlq_eu_update_response(struct cdma_dev *cdev, u16 seq, int ret_val) +{ + struct ubase_ctrlq_msg msg = { 0 }; + int inbuf = 0; + int ret; + + msg.service_ver = UBASE_CTRLQ_SER_VER_01; + msg.service_type = UBASE_CTRLQ_SER_TYPE_DEV_REGISTER; + msg.opcode = CDMA_CTRLQ_EU_UPDATE; + msg.need_resp = 0; + msg.is_resp = 1; + msg.resp_seq = seq; + msg.resp_ret = (uint8_t)(-ret_val); + msg.in = (void *)&inbuf; + msg.in_size = sizeof(inbuf); + + ret = ubase_ctrlq_send_msg(cdev->adev, &msg); + if (ret) + dev_err(cdev->dev, "send eu update response failed, ret = %d, ret_val = %d.\n", + ret, ret_val); + return ret; +} + static int cdma_ctrlq_eu_update(struct auxiliary_device *adev, u8 service_ver, - void *data, u16 len, u16 seq) + void *data, u16 len, u16 seq) { struct cdma_dev *cdev = dev_get_drvdata(&adev->dev); - struct cdma_ctrlq_eu_info *ctrlq_eu; + struct cdma_ctrlq_eu_info eu = { 0 }; int ret = -EINVAL; - if (len < sizeof(*ctrlq_eu)) { - dev_err(cdev->dev, "ctrlq data len is invalid.\n"); - return -EINVAL; + if (cdev->status != CDMA_NORMAL) { + dev_err(cdev->dev, "status is abnormal and don't update eu.\n"); + return cdma_ctrlq_eu_update_response(cdev, seq, 0); + } + + if (len < sizeof(eu)) { + dev_err(cdev->dev, "update eu msg len = %u is invalid.\n", len); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); } - ctrlq_eu = (struct cdma_ctrlq_eu_info *)data; + memcpy(&eu, data, sizeof(eu)); + if (eu.op != CDMA_CTRLQ_EU_ADD && eu.op != CDMA_CTRLQ_EU_DEL) { + dev_err(cdev->dev, "update eu op = %u is invalid.\n", eu.op); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); + } + + if (eu.eu.eid_idx >= CDMA_MAX_EU_NUM) { + dev_err(cdev->dev, "update eu invalid eid_idx = %u.\n", + eu.eu.eid_idx); + return cdma_ctrlq_eu_update_response(cdev, seq, -EINVAL); + } mutex_lock(&cdev->eu_mutex); - if (ctrlq_eu->op == CDMA_CTRLQ_EU_ADD) - ret = cdma_ctrlq_eu_add(cdev, &ctrlq_eu->eu); - else if (ctrlq_eu->op == CDMA_CTRLQ_EU_DEL) - ret = cdma_ctrlq_eu_del(cdev, &ctrlq_eu->eu); - else - dev_err(cdev->dev, "ctrlq eu op is invalid.\n"); + if (eu.op == CDMA_CTRLQ_EU_ADD) + ret = cdma_ctrlq_eu_add(cdev, &eu.eu); + else if (eu.op == CDMA_CTRLQ_EU_DEL) + ret = cdma_ctrlq_eu_del(cdev, &eu.eu); mutex_unlock(&cdev->eu_mutex); - return ret; + return cdma_ctrlq_eu_update_response(cdev, seq, ret); } int cdma_create_arm_db_page(struct cdma_dev *cdev) -- Gitee From d280a526559edfd14759182367c3890b41f33ae6 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 11:20:51 +0800 Subject: [PATCH 092/126] ub: cdma: modify log level about en information commit 3a0deab724ae6b28be4b3ea36b4805aa02cd7c50 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- modify log level about en information Fixes: ca1562136e14 ("ub: cdma: support querying sl information and updating eu") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma.h | 2 +- drivers/ub/cdma/cdma_api.c | 2 +- drivers/ub/cdma/cdma_chardev.h | 2 +- drivers/ub/cdma/cdma_cmd.c | 12 +++++---- drivers/ub/cdma/cdma_cmd.h | 3 ++- drivers/ub/cdma/cdma_common.h | 2 +- drivers/ub/cdma/cdma_context.h | 2 +- drivers/ub/cdma/cdma_db.h | 2 +- drivers/ub/cdma/cdma_debugfs.h | 2 +- drivers/ub/cdma/cdma_dev.c | 11 ++++----- drivers/ub/cdma/cdma_dev.h | 2 +- drivers/ub/cdma/cdma_eq.h | 2 +- drivers/ub/cdma/cdma_event.h | 3 ++- drivers/ub/cdma/cdma_handle.h | 2 +- drivers/ub/cdma/cdma_ioctl.c | 6 ++--- drivers/ub/cdma/cdma_ioctl.h | 2 +- drivers/ub/cdma/cdma_jfc.h | 2 +- drivers/ub/cdma/cdma_jfs.h | 2 +- drivers/ub/cdma/cdma_main.c | 1 - drivers/ub/cdma/cdma_mbox.h | 2 +- drivers/ub/cdma/cdma_mmap.h | 2 +- drivers/ub/cdma/cdma_queue.c | 3 +-- drivers/ub/cdma/cdma_queue.h | 3 ++- drivers/ub/cdma/cdma_segment.h | 2 +- drivers/ub/cdma/cdma_tid.h | 2 +- drivers/ub/cdma/cdma_tp.c | 44 ++++++++++++++++++--------------- drivers/ub/cdma/cdma_tp.h | 3 ++- drivers/ub/cdma/cdma_types.h | 6 ++--- drivers/ub/cdma/cdma_uobj.h | 2 +- include/uapi/ub/cdma/cdma_abi.h | 2 +- include/ub/cdma/cdma_api.h | 2 +- 31 files changed, 70 insertions(+), 65 deletions(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b7d00bcf39ac..b77cf1350545 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -230,4 +230,4 @@ static inline struct cdma_dev *get_cdma_dev(struct auxiliary_device *adev) return (struct cdma_dev *)dev_get_drvdata(&adev->dev); } -#endif /* _CDMA_H_ */ +#endif /* __CDMA_H__ */ diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index ae84210c1f97..36f037b97d0b 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -367,7 +367,7 @@ void dma_free_queue(struct dma_device *dma_dev, int queue_id) } ctx_res = (struct cdma_ctx_res *)dma_dev->private_data; - queue = (struct cdma_queue *)xa_load(&ctx_res->queue_xa, queue_id); + queue = xa_load(&ctx_res->queue_xa, queue_id); if (!queue) { dev_err(cdev->dev, "no queue found in this device, id = %d\n", queue_id); diff --git a/drivers/ub/cdma/cdma_chardev.h b/drivers/ub/cdma/cdma_chardev.h index 0bd4fcc654ff..684c7a766d16 100644 --- a/drivers/ub/cdma/cdma_chardev.h +++ b/drivers/ub/cdma/cdma_chardev.h @@ -18,4 +18,4 @@ void cdma_destroy_chardev(struct cdma_dev *cdev); int cdma_create_chardev(struct cdma_dev *cdev); void cdma_release_file(struct kref *ref); -#endif /* _CDMA_CHARDEV_H_ */ +#endif /* __CDMA_CHARDEV_H__ */ diff --git a/drivers/ub/cdma/cdma_cmd.c b/drivers/ub/cdma/cdma_cmd.c index c8bf01d930ad..239369363a25 100644 --- a/drivers/ub/cdma/cdma_cmd.c +++ b/drivers/ub/cdma/cdma_cmd.c @@ -32,8 +32,10 @@ static int cdma_query_caps_from_firmware(struct cdma_dev *cdev) int ret; ret = cdma_cmd_query_fw_resource(cdev, &cmd); - if (ret) - return dev_err_probe(cdev->dev, ret, "query fw resource failed\n"); + if (ret) { + dev_err(cdev->dev, "query fw resource failed, ret = %d\n", ret); + return ret; + } caps->jfs_sge = cmd.jfs_sge; caps->trans_mode = cmd.trans_mode; @@ -42,9 +44,9 @@ static int cdma_query_caps_from_firmware(struct cdma_dev *cdev) caps->ue_cnt = cmd.ue_cnt; caps->ue_id = cmd.ue_id; - dev_dbg(cdev->dev, "jfs_sge = 0x%x, trans_mode = 0x%x, seid.max_cnt = 0x%x\n", + dev_info(cdev->dev, "jfs_sge = 0x%x, trans_mode = 0x%x, seid.max_cnt = 0x%x\n", caps->jfs_sge, caps->trans_mode, caps->seid.max_cnt); - dev_dbg(cdev->dev, "feature = 0x%x, ue_cnt = 0x%x, ue_id = 0x%x\n", + dev_info(cdev->dev, "feature = 0x%x, ue_cnt = 0x%x, ue_id = 0x%x\n", caps->feature, caps->ue_cnt, caps->ue_id); return 0; @@ -207,7 +209,7 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev) attr->eu_num = out_query.seid_num; for (i = 0; i < attr->eu_num; i++) - dev_dbg(cdev->dev, + dev_info(cdev->dev, "cdma init eus[%u], upi = 0x%x, eid = 0x%x, eid_idx = 0x%x.\n", i, eus[i].upi, eus[i].eid.dw0, eus[i].eid_idx); mutex_unlock(&cdev->eu_mutex); diff --git a/drivers/ub/cdma/cdma_cmd.h b/drivers/ub/cdma/cdma_cmd.h index f85331c8c51b..0f676791b121 100644 --- a/drivers/ub/cdma/cdma_cmd.h +++ b/drivers/ub/cdma/cdma_cmd.h @@ -79,4 +79,5 @@ int cdma_ctrlq_query_eu(struct cdma_dev *cdev); void cdma_cmd_inc(struct cdma_dev *cdev); void cdma_cmd_dec(struct cdma_dev *cdev); void cdma_cmd_flush(struct cdma_dev *cdev); -#endif + +#endif /* __CDMA_CMD_H__ */ diff --git a/drivers/ub/cdma/cdma_common.h b/drivers/ub/cdma/cdma_common.h index 58855991647d..d6da50f5d0aa 100644 --- a/drivers/ub/cdma/cdma_common.h +++ b/drivers/ub/cdma/cdma_common.h @@ -108,4 +108,4 @@ int cdma_pin_queue_addr(struct cdma_dev *cdev, u64 addr, u32 len, struct cdma_buf *buf); void cdma_unpin_queue_addr(struct cdma_umem *umem); -#endif +#endif /* __CDMA_COMMON_H__ */ diff --git a/drivers/ub/cdma/cdma_context.h b/drivers/ub/cdma/cdma_context.h index 0eb40763c29d..715b59b64a41 100644 --- a/drivers/ub/cdma/cdma_context.h +++ b/drivers/ub/cdma/cdma_context.h @@ -38,4 +38,4 @@ struct cdma_context *cdma_alloc_context(struct cdma_dev *cdev, bool is_kernel); void cdma_free_context(struct cdma_dev *cdev, struct cdma_context *ctx); void cdma_cleanup_context_res(struct cdma_context *ctx); -#endif /* CDMA_CONTEXT_H */ +#endif /* __CDMA_CONTEXT_H__ */ diff --git a/drivers/ub/cdma/cdma_db.h b/drivers/ub/cdma/cdma_db.h index fa3ef8c0f570..f11780bf3e29 100644 --- a/drivers/ub/cdma/cdma_db.h +++ b/drivers/ub/cdma/cdma_db.h @@ -39,4 +39,4 @@ int cdma_alloc_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); void cdma_free_sw_db(struct cdma_dev *dev, struct cdma_sw_db *db); -#endif /* CDMA_DB_H */ +#endif /* __CDMA_DB_H__ */ diff --git a/drivers/ub/cdma/cdma_debugfs.h b/drivers/ub/cdma/cdma_debugfs.h index 1cd0f2ada9dc..fa8af1f1c1ba 100644 --- a/drivers/ub/cdma/cdma_debugfs.h +++ b/drivers/ub/cdma/cdma_debugfs.h @@ -55,4 +55,4 @@ struct cdma_dbgfs { int cdma_dbg_init(struct auxiliary_device *adev); void cdma_dbg_uninit(struct auxiliary_device *adev); -#endif /* CDMA_DEBUGFS_H */ +#endif /* __CDMA_DEBUGFS_H__ */ diff --git a/drivers/ub/cdma/cdma_dev.c b/drivers/ub/cdma/cdma_dev.c index ae3d76cff5b2..54e2e4c778f7 100644 --- a/drivers/ub/cdma/cdma_dev.c +++ b/drivers/ub/cdma/cdma_dev.c @@ -232,10 +232,10 @@ static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) if (eu->eid_idx != eus[i].eid_idx) continue; - dev_dbg(cdev->dev, - "cdma.%u: eid_idx[0x%x] eid[0x%x->0x%x] upi[0x%x->0x%x] update success.\n", - cdev->adev->id, eu->eid_idx, eus[i].eid.dw0, - eu->eid.dw0, eus[i].upi, eu->upi & CDMA_UPI_MASK); + dev_info(cdev->dev, + "cdma.%u: eid_idx[0x%x] eid[0x%x->0x%x] upi[0x%x->0x%x] update success.\n", + cdev->adev->id, eu->eid_idx, eus[i].eid.dw0, + eu->eid.dw0, eus[i].upi, eu->upi & CDMA_UPI_MASK); eus[i].eid = eu->eid; eus[i].upi = eu->upi & CDMA_UPI_MASK; @@ -254,7 +254,7 @@ static int cdma_ctrlq_eu_add(struct cdma_dev *cdev, struct eu_info *eu) } eus[attr->eu_num++] = *eu; - dev_dbg(cdev->dev, + dev_info(cdev->dev, "cdma.%u: eid_idx[0x%x] eid[0x%x] upi[0x%x] add success.\n", cdev->adev->id, eu->eid_idx, eu->eid.dw0, eu->upi & CDMA_UPI_MASK); @@ -479,7 +479,6 @@ void cdma_destroy_dev(struct cdma_dev *cdev, bool is_remove) if (is_remove) { cdma_free_dev_tid(cdev); - cdma_del_device_from_list(cdev); cdma_uninit_dev_param(cdev); kfree(cdev); diff --git a/drivers/ub/cdma/cdma_dev.h b/drivers/ub/cdma/cdma_dev.h index d433218934f1..2737cd3a5c58 100644 --- a/drivers/ub/cdma/cdma_dev.h +++ b/drivers/ub/cdma/cdma_dev.h @@ -34,4 +34,4 @@ void cdma_unregister_crq_event(struct auxiliary_device *adev); int cdma_create_arm_db_page(struct cdma_dev *cdev); void cdma_destroy_arm_db_page(struct cdma_dev *cdev); -#endif /* _CDMA_DEV_H_ */ +#endif /* __CDMA_DEV_H__ */ diff --git a/drivers/ub/cdma/cdma_eq.h b/drivers/ub/cdma/cdma_eq.h index 70e9edcccad4..cac34b5a2ae0 100644 --- a/drivers/ub/cdma/cdma_eq.h +++ b/drivers/ub/cdma/cdma_eq.h @@ -15,4 +15,4 @@ void cdma_unreg_ae_event(struct auxiliary_device *adev); int cdma_reg_ce_event(struct auxiliary_device *adev); void cdma_unreg_ce_event(struct auxiliary_device *adev); -#endif +#endif /* __CDMA_EQ_H__ */ diff --git a/drivers/ub/cdma/cdma_event.h b/drivers/ub/cdma/cdma_event.h index 4ca14c3c5fcb..d11a2f681dc7 100644 --- a/drivers/ub/cdma/cdma_event.h +++ b/drivers/ub/cdma/cdma_event.h @@ -76,4 +76,5 @@ void cdma_release_comp_event(struct cdma_jfce *jfce, struct list_head *event_lis void cdma_release_async_event(struct cdma_context *ctx, struct list_head *event_list); void cdma_put_jfae(struct cdma_context *ctx); -#endif /* CDMA_EVENT_H */ + +#endif /* __CDMA_EVENT_H__ */ diff --git a/drivers/ub/cdma/cdma_handle.h b/drivers/ub/cdma/cdma_handle.h index 00cb8049778e..4d36f72c0d97 100644 --- a/drivers/ub/cdma/cdma_handle.h +++ b/drivers/ub/cdma/cdma_handle.h @@ -19,4 +19,4 @@ int cdma_cas(struct cdma_dev *cdev, struct cdma_queue *queue, int cdma_faa(struct cdma_dev *cdev, struct cdma_queue *queue, struct dma_seg *local_seg, struct dma_seg *rmt_seg, u64 add); -#endif /* CDMA_HANDLE_H */ +#endif /* __CDMA_HANDLE_H__ */ diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 0a62e306d6f7..abcf39f0d021 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -71,8 +71,7 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, int ret; if (cfile->uctx) { - dev_err(cdev->dev, "create jfae failed, ctx handle = %d.\n", - ctx->handle); + dev_err(cdev->dev, "cdma context has been created.\n"); return -EEXIST; } @@ -92,7 +91,8 @@ static int cdma_create_ucontext(struct cdma_ioctl_hdr *hdr, ctx->jfae = cdma_alloc_jfae(cfile); if (!ctx->jfae) { - dev_err(cdev->dev, "create jfae failed.\n"); + dev_err(cdev->dev, "create jfae failed, ctx handle = %d.\n", + ctx->handle); ret = -EFAULT; goto free_context; } diff --git a/drivers/ub/cdma/cdma_ioctl.h b/drivers/ub/cdma/cdma_ioctl.h index a5b20c99117e..160c8e0f4300 100644 --- a/drivers/ub/cdma/cdma_ioctl.h +++ b/drivers/ub/cdma/cdma_ioctl.h @@ -9,4 +9,4 @@ struct cdma_ioctl_hdr; int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr); -#endif /* _CDMA_IOCTL_H_ */ +#endif /* __CDMA_IOCTL_H__ */ diff --git a/drivers/ub/cdma/cdma_jfc.h b/drivers/ub/cdma/cdma_jfc.h index 7f512150e50c..39535bf513f3 100644 --- a/drivers/ub/cdma/cdma_jfc.h +++ b/drivers/ub/cdma/cdma_jfc.h @@ -188,4 +188,4 @@ int cdma_jfc_completion(struct notifier_block *nb, unsigned long jfcn, int cdma_poll_jfc(struct cdma_base_jfc *base_jfc, int cr_cnt, struct dma_cr *cr); -#endif /* CDMA_JFC_H */ +#endif /* __CDMA_JFC_H__ */ diff --git a/drivers/ub/cdma/cdma_jfs.h b/drivers/ub/cdma/cdma_jfs.h index 3d0391b03d97..187d1f660db6 100644 --- a/drivers/ub/cdma/cdma_jfs.h +++ b/drivers/ub/cdma/cdma_jfs.h @@ -326,4 +326,4 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id); int cdma_post_jfs_wr(struct cdma_jfs *jfs, struct cdma_jfs_wr *wr, struct cdma_jfs_wr **bad_wr); -#endif +#endif /* __CDMA_JFS_H__ */ diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index 8ec5849ade39..b7748e791c5e 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -383,5 +383,4 @@ static void __exit cdma_exit(void) module_init(cdma_init); module_exit(cdma_exit); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0"); MODULE_DESCRIPTION("Hisilicon UBus Crystal DMA Driver"); diff --git a/drivers/ub/cdma/cdma_mbox.h b/drivers/ub/cdma/cdma_mbox.h index e8a00f5c9b97..0cddd4fb8df8 100644 --- a/drivers/ub/cdma/cdma_mbox.h +++ b/drivers/ub/cdma/cdma_mbox.h @@ -43,4 +43,4 @@ int cdma_post_mailbox_ctx(struct cdma_dev *cdev, void *ctx, u32 size, struct ubase_cmd_mailbox *cdma_mailbox_query_ctx(struct cdma_dev *cdev, struct ubase_mbx_attr *attr); -#endif /* CDMA_MBOX_H */ +#endif /* __CDMA_MBOX_H__ */ diff --git a/drivers/ub/cdma/cdma_mmap.h b/drivers/ub/cdma/cdma_mmap.h index 0dd6c609a85e..65abdb5e284e 100644 --- a/drivers/ub/cdma/cdma_mmap.h +++ b/drivers/ub/cdma/cdma_mmap.h @@ -11,4 +11,4 @@ void cdma_unmap_vma_pages(struct cdma_file *cfile); const struct vm_operations_struct *cdma_get_umap_ops(void); void cdma_umap_priv_init(struct cdma_umap_priv *priv, struct vm_area_struct *vma); -#endif /* CDMA_MMAP_H */ +#endif /* __CDMA_MMAP_H__ */ diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index 9b03baef162c..ab7252a649f0 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -16,8 +16,7 @@ struct cdma_queue *cdma_find_queue(struct cdma_dev *cdev, u32 queue_id) struct cdma_queue *queue; spin_lock(&cdev->queue_table.lock); - queue = (struct cdma_queue *)idr_find(&cdev->queue_table.idr_tbl.idr, - queue_id); + queue = idr_find(&cdev->queue_table.idr_tbl.idr, queue_id); spin_unlock(&cdev->queue_table.lock); return queue; diff --git a/drivers/ub/cdma/cdma_queue.h b/drivers/ub/cdma/cdma_queue.h index 08b24cb0b3fc..072e51c3a300 100644 --- a/drivers/ub/cdma/cdma_queue.h +++ b/drivers/ub/cdma/cdma_queue.h @@ -36,4 +36,5 @@ struct cdma_queue *cdma_create_queue(struct cdma_dev *cdev, int cdma_delete_queue(struct cdma_dev *cdev, u32 queue_id); void cdma_set_queue_res(struct cdma_dev *cdev, struct cdma_queue *queue, enum cdma_queue_res_type type, void *res); -#endif + +#endif /* __CDMA_QUEUE_H__ */ diff --git a/drivers/ub/cdma/cdma_segment.h b/drivers/ub/cdma/cdma_segment.h index 113e357fcedd..ef1610205fae 100644 --- a/drivers/ub/cdma/cdma_segment.h +++ b/drivers/ub/cdma/cdma_segment.h @@ -32,4 +32,4 @@ void cdma_seg_ungrant(struct cdma_segment *seg); struct dma_seg *cdma_import_seg(struct dma_seg_cfg *cfg); void cdma_unimport_seg(struct dma_seg *seg); -#endif /* CDMA_SEGMENT_H */ +#endif /* __CDMA_SEGMENT_H__ */ diff --git a/drivers/ub/cdma/cdma_tid.h b/drivers/ub/cdma/cdma_tid.h index 8bbd8c0c979a..9b82d47281e1 100644 --- a/drivers/ub/cdma/cdma_tid.h +++ b/drivers/ub/cdma/cdma_tid.h @@ -13,4 +13,4 @@ struct cdma_dev; int cdma_alloc_dev_tid(struct cdma_dev *cdev); void cdma_free_dev_tid(struct cdma_dev *cdev); -#endif +#endif /* __CDMA_TID_H__ */ diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c index a77f1164b416..c5a6b9c7d395 100644 --- a/drivers/ub/cdma/cdma_tp.c +++ b/drivers/ub/cdma/cdma_tp.c @@ -23,7 +23,7 @@ static inline int cdma_ctrlq_msg_send(struct cdma_dev *cdev, static int cdma_ctrlq_create_ctp(struct cdma_dev *cdev, struct cdma_tp_cfg *cfg, u32 *tpn) { - struct cdma_ctrlq_tp_create_cfg ctrlq_tp; + struct cdma_ctrlq_tp_create_cfg ctrlq_tp = { 0 }; struct cdma_ctrlq_tp_ret tp_out = { 0 }; struct ubase_ctrlq_msg msg = { 0 }; int ret; @@ -75,25 +75,29 @@ static void cdma_ctrlq_delete_ctp(struct cdma_dev *cdev, u32 tpn, struct ubase_ctrlq_msg msg = { 0 }; int ret; - ctrlq_tp.seid_flag = CDMA_CTRLQ_FLAG_ON; - ctrlq_tp.deid_flag = CDMA_CTRLQ_FLAG_ON; - ctrlq_tp.scna = cfg->scna; - ctrlq_tp.dcna = cfg->dcna; - ctrlq_tp.seid[0] = cfg->seid; - ctrlq_tp.deid[0] = cfg->deid; - ctrlq_tp.tpn = tpn; - ctrlq_tp.route_type = CDMA_ROUTE_TYPE_CNA; - ctrlq_tp.trans_type = CDMA_TRANS_TYPE_CDMA_CTP; - - msg.service_ver = UBASE_CTRLQ_SER_VER_01; - msg.service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL; - msg.opcode = CDMA_CTRLQ_DELETE_CTP; - msg.need_resp = CDMA_CTRLQ_FLAG_ON; - msg.is_resp = CDMA_CTRLQ_FLAG_OFF; - msg.in_size = sizeof(ctrlq_tp); - msg.in = &ctrlq_tp; - msg.out_size = sizeof(tp_out); - msg.out = &tp_out; + ctrlq_tp = (struct cdma_ctrlq_tp_delete_cfg) { + .seid_flag = CDMA_CTRLQ_FLAG_ON, + .deid_flag = CDMA_CTRLQ_FLAG_ON, + .scna = cfg->scna, + .dcna = cfg->dcna, + .seid[0] = cfg->seid, + .deid[0] = cfg->deid, + .tpn = tpn, + .route_type = CDMA_ROUTE_TYPE_CNA, + .trans_type = CDMA_TRANS_TYPE_CDMA_CTP + }; + + msg = (struct ubase_ctrlq_msg) { + .service_ver = UBASE_CTRLQ_SER_VER_01, + .service_type = UBASE_CTRLQ_SER_TYPE_TP_ACL, + .opcode = CDMA_CTRLQ_DELETE_CTP, + .need_resp = CDMA_CTRLQ_FLAG_ON, + .is_resp = CDMA_CTRLQ_FLAG_OFF, + .in_size = sizeof(ctrlq_tp), + .in = &ctrlq_tp, + .out_size = sizeof(tp_out), + .out = &tp_out + }; ret = cdma_ctrlq_msg_send(cdev, &msg); if (ret) diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h index 72019df35d74..dc18002785ed 100644 --- a/drivers/ub/cdma/cdma_tp.h +++ b/drivers/ub/cdma/cdma_tp.h @@ -73,4 +73,5 @@ struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); -#endif /* CDMA_TP_H */ + +#endif /* __CDMA_TP_H__ */ diff --git a/drivers/ub/cdma/cdma_types.h b/drivers/ub/cdma/cdma_types.h index 947c360ba2ef..1a9aef127bc2 100644 --- a/drivers/ub/cdma/cdma_types.h +++ b/drivers/ub/cdma/cdma_types.h @@ -12,7 +12,7 @@ enum cdma_event_type { CDMA_EVENT_JFC_ERR, CDMA_EVENT_JFS_ERR, - CDMA_EVENT_DEV_INVALID, + CDMA_EVENT_DEV_INVALID }; enum cdma_remove_reason { @@ -73,7 +73,6 @@ struct cdma_tp_cfg { struct cdma_base_tp { struct cdma_ucontext *uctx; struct cdma_tp_cfg cfg; - u64 usr_tp; u32 tpn; u32 tp_id; }; @@ -101,7 +100,6 @@ struct cdma_base_jfs { struct cdma_context *ctx; struct cdma_jfs_cfg cfg; cdma_event_callback_t jfae_handler; - u64 usr_jfs; u32 id; atomic_t use_cnt; struct cdma_jfs_event jfs_event; @@ -162,4 +160,4 @@ struct cdma_umap_priv { struct list_head node; }; -#endif +#endif /* __CDMA_TYPES_H__ */ diff --git a/drivers/ub/cdma/cdma_uobj.h b/drivers/ub/cdma/cdma_uobj.h index f343559a33ce..480db379901a 100644 --- a/drivers/ub/cdma/cdma_uobj.h +++ b/drivers/ub/cdma/cdma_uobj.h @@ -31,4 +31,4 @@ struct cdma_uobj *cdma_uobj_get(struct cdma_file *cfile, int id, void cdma_cleanup_context_uobj(struct cdma_file *cfile, enum cdma_remove_reason why); void cdma_close_uobj_fd(struct cdma_file *cfile); -#endif +#endif /* __CDMA_UOBJ_H__ */ diff --git a/include/uapi/ub/cdma/cdma_abi.h b/include/uapi/ub/cdma/cdma_abi.h index 681854ed9765..d9c89e57019b 100644 --- a/include/uapi/ub/cdma/cdma_abi.h +++ b/include/uapi/ub/cdma/cdma_abi.h @@ -417,4 +417,4 @@ enum jfc_poll_state { JFC_POLL_ERR, }; -#endif +#endif /* _UAPI_UB_CDMA_CDMA_ABI_H_ */ diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 51acd722a74d..4f80012ef3e2 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -236,4 +236,4 @@ int dma_register_client(struct dma_client *client); void dma_unregister_client(struct dma_client *client); -#endif +#endif /* _UB_CDMA_CDMA_API_H_ */ -- Gitee From 3549dc6f3a47028652c120eebecb1cf665ab981b Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 14:07:08 +0800 Subject: [PATCH 093/126] ub: cdma: fix kasan cdma jfae uaf commit 53a1a76fe01939f53d15d0e360d25d26de9a9514 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- fix kasan cdma jfae uaf Fixes: 35203448b9d1 ("ub: cdma: support reporting asynchronous events") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma_event.c | 5 ++++- drivers/ub/cdma/cdma_ioctl.c | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index 057bf2daefc3..bf0554c200a5 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -611,7 +611,10 @@ static int cdma_delete_jfae(struct inode *inode, struct file *filp) if (!mutex_trylock(&cfile->ctx_mutex)) return -ENOLCK; - jfae->ctx->jfae = NULL; + + if (jfae->ctx) + jfae->ctx->jfae = NULL; + cdma_uninit_jfe(&jfae->jfe); kfree(jfae); filp->private_data = NULL; diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index abcf39f0d021..dbd4bc78429e 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -127,6 +127,7 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, struct cdma_file *cfile) { struct cdma_dev *cdev = cfile->cdev; + struct cdma_jfae *jfae; if (!cfile->uctx) { dev_err(cdev->dev, "cdma context has not been created.\n"); @@ -140,6 +141,10 @@ static int cdma_delete_ucontext(struct cdma_ioctl_hdr *hdr, return -EBUSY; } + jfae = cfile->uctx->jfae; + if (jfae) + jfae->ctx = NULL; + cdma_free_context(cdev, cfile->uctx); cfile->uctx = NULL; -- Gitee From 8ca7319b26d13cf23aefc5c71f292c1e14e498a0 Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Mon, 8 Dec 2025 14:59:59 +0800 Subject: [PATCH 094/126] ub: cdma: fix the timing issue during flow-based deregistration. commit c3c1e92567d995d8fb362964ab696c5345746669 openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- fix the timing issue during flow-based deregistration. Fixes: 710a287ef643 ("ub: cdma: support reset function") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- drivers/ub/cdma/cdma.h | 1 + drivers/ub/cdma/cdma_api.c | 14 +++++++------- drivers/ub/cdma/cdma_chardev.c | 8 ++++---- drivers/ub/cdma/cdma_context.c | 2 +- drivers/ub/cdma/cdma_event.c | 28 +++++++++++++++++----------- drivers/ub/cdma/cdma_ioctl.c | 4 ++-- drivers/ub/cdma/cdma_jfc.c | 18 ++++++++++++------ drivers/ub/cdma/cdma_jfs.c | 20 +++++++++++++------- drivers/ub/cdma/cdma_main.c | 21 ++++++++------------- drivers/ub/cdma/cdma_queue.c | 4 ++-- drivers/ub/cdma/cdma_tp.c | 4 ++-- drivers/ub/cdma/cdma_tp.h | 2 +- 12 files changed, 70 insertions(+), 56 deletions(-) diff --git a/drivers/ub/cdma/cdma.h b/drivers/ub/cdma/cdma.h index b77cf1350545..5fea4526e505 100644 --- a/drivers/ub/cdma/cdma.h +++ b/drivers/ub/cdma/cdma.h @@ -38,6 +38,7 @@ enum cdma_cqe_size { enum cdma_status { CDMA_NORMAL, CDMA_SUSPEND, + CDMA_INVALID }; enum cdma_client_ops { diff --git a/drivers/ub/cdma/cdma_api.c b/drivers/ub/cdma/cdma_api.c index 36f037b97d0b..ce7461411d62 100644 --- a/drivers/ub/cdma/cdma_api.c +++ b/drivers/ub/cdma/cdma_api.c @@ -57,7 +57,7 @@ struct dma_device *dma_get_device_list(u32 *num_devices) xa_for_each(cdma_devs_tbl, index, cdev) { attr = &cdev->base.attr; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", attr->eid.dw0); continue; @@ -150,7 +150,7 @@ struct dma_device *dma_get_device_by_eid(struct dev_eid *eid) xa_for_each(cdma_devs_tbl, index, cdev) { attr = &cdev->base.attr; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", attr->eid.dw0); continue; @@ -203,7 +203,7 @@ int dma_create_context(struct dma_device *dma_dev) return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return -EINVAL; @@ -302,7 +302,7 @@ int dma_alloc_queue(struct dma_device *dma_dev, int ctx_id, struct queue_cfg *cf return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return -EINVAL; @@ -414,7 +414,7 @@ struct dma_seg *dma_register_seg(struct dma_device *dma_dev, int ctx_id, return NULL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", dma_dev->attr.eid.dw0); return NULL; @@ -558,7 +558,7 @@ static int cdma_param_transfer(struct dma_device *dma_dev, int queue_id, return -EINVAL; } - if (tmp_dev->status == CDMA_SUSPEND) { + if (tmp_dev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); return -EINVAL; } @@ -818,7 +818,7 @@ int dma_poll_queue(struct dma_device *dma_dev, int queue_id, u32 cr_cnt, return -EINVAL; } - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { pr_warn("cdma device is not prepared, eid = 0x%x.\n", eid); return -EINVAL; } diff --git a/drivers/ub/cdma/cdma_chardev.c b/drivers/ub/cdma/cdma_chardev.c index 3614609d683e..51b19d614743 100644 --- a/drivers/ub/cdma/cdma_chardev.c +++ b/drivers/ub/cdma/cdma_chardev.c @@ -67,7 +67,7 @@ static long cdma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct cdma_ioctl_hdr hdr = { 0 }; int ret; - if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + if (!cfile->cdev || cfile->cdev->status >= CDMA_SUSPEND) { pr_info("ioctl cdev is invalid.\n"); return -ENODEV; } @@ -126,7 +126,7 @@ static int cdma_remap_pfn_range(struct cdma_file *cfile, struct vm_area_struct * u32 jfs_id; u32 cmd; - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { dev_warn(cdev->dev, "cdev is resetting.\n"); return -EBUSY; } @@ -177,7 +177,7 @@ static int cdma_mmap(struct file *file, struct vm_area_struct *vma) struct cdma_umap_priv *priv; int ret; - if (!cfile->cdev || cfile->cdev->status == CDMA_SUSPEND) { + if (!cfile->cdev || cfile->cdev->status >= CDMA_SUSPEND) { pr_info("mmap cdev is invalid.\n"); return -ENODEV; } @@ -267,7 +267,7 @@ static int cdma_open(struct inode *inode, struct file *file) chardev = container_of(inode->i_cdev, struct cdma_chardev, cdev); cdev = container_of(chardev, struct cdma_dev, chardev); - if (cdev->status == CDMA_SUSPEND) { + if (cdev->status >= CDMA_SUSPEND) { dev_warn(cdev->dev, "cdev is resetting.\n"); return -EBUSY; } diff --git a/drivers/ub/cdma/cdma_context.c b/drivers/ub/cdma/cdma_context.c index c95ccb0c28b4..ec55b03b49fc 100644 --- a/drivers/ub/cdma/cdma_context.c +++ b/drivers/ub/cdma/cdma_context.c @@ -151,7 +151,7 @@ static void cdma_cleanup_queue_res(struct cdma_dev *cdev, struct cdma_context *c cdma_delete_jfs(cdev, queue->jfs->id); if (queue->tp) - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, ctx->invalid); if (queue->jfc) cdma_delete_jfc(cdev, queue->jfc->id, NULL); diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index bf0554c200a5..e8ecb7f8c4f6 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -512,6 +512,8 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, struct cdma_cmd_async_event async_event = { 0 }; struct cdma_jfe_event *event; struct list_head event_list; + struct cdma_context *ctx; + struct cdma_dev *cdev; u32 event_cnt; int ret; @@ -520,7 +522,10 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, return -EINVAL; } - if (!jfae->cfile->cdev || jfae->cfile->cdev->status == CDMA_SUSPEND) { + ctx = jfae->ctx; + cdev = jfae->cfile->cdev; + + if (!cdev || cdev->status == CDMA_INVALID || !ctx || ctx->invalid) { pr_info("wait dev invalid event success.\n"); async_event.event_data = 0; async_event.event_type = CDMA_EVENT_DEV_INVALID; @@ -562,11 +567,16 @@ static int cdma_get_async_event(struct cdma_jfae *jfae, struct file *filp, static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait) { struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; + struct cdma_context *ctx; + struct cdma_dev *cdev; - if (!jfae || !jfae->cfile || !jfae->cfile->cdev) + if (!jfae || !jfae->cfile) return POLLERR; - if (jfae->cfile->cdev->status == CDMA_SUSPEND) + ctx = jfae->ctx; + cdev = jfae->cfile->cdev; + + if (!cdev || cdev->status == CDMA_INVALID || !ctx || ctx->invalid) return POLLIN | POLLRDNORM; return cdma_jfe_poll(&jfae->jfe, filp, wait); @@ -575,25 +585,21 @@ static __poll_t cdma_jfae_poll(struct file *filp, struct poll_table_struct *wait static long cdma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cdma_jfae *jfae = (struct cdma_jfae *)filp->private_data; - unsigned int nr; - int ret; + unsigned int nr = (unsigned int)_IOC_NR(cmd); + long ret = -ENOIOCTLCMD; if (!jfae) return -EINVAL; - nr = (unsigned int)_IOC_NR(cmd); - switch (nr) { case JFAE_CMD_GET_ASYNC_EVENT: ret = cdma_get_async_event(jfae, filp, arg); break; default: - dev_err(jfae->cfile->cdev->dev, "nr = %u.\n", nr); - ret = -ENOIOCTLCMD; - break; + pr_err("jfae ioctl wrong nr = %u.\n", nr); } - return (long)ret; + return ret; } static int cdma_delete_jfae(struct inode *inode, struct file *filp) diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index dbd4bc78429e..4a30cbbd383f 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -215,7 +215,7 @@ static int cdma_cmd_create_ctp(struct cdma_ioctl_hdr *hdr, return 0; delete_ctp: - cdma_delete_ctp(cdev, ctp->tp_id); + cdma_delete_ctp(cdev, ctp->tp_id, false); delete_obj: cdma_uobj_delete(uobj); @@ -260,7 +260,7 @@ static int cdma_cmd_delete_ctp(struct cdma_ioctl_hdr *hdr, } ctp = uobj->object; - cdma_delete_ctp(cdev, ctp->tp_id); + cdma_delete_ctp(cdev, ctp->tp_id, cfile->uctx->invalid); cdma_uobj_delete(uobj); cdma_set_queue_res(cdev, queue, QUEUE_RES_TP, NULL); diff --git a/drivers/ub/cdma/cdma_jfc.c b/drivers/ub/cdma/cdma_jfc.c index 0b3611c3d27d..9c6c82eaee93 100644 --- a/drivers/ub/cdma/cdma_jfc.c +++ b/drivers/ub/cdma/cdma_jfc.c @@ -249,12 +249,20 @@ static int cdma_query_jfc_destroy_done(struct cdma_dev *cdev, uint32_t jfcn) return ret; } -static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, u32 jfcn) +static int cdma_destroy_and_flush_jfc(struct cdma_dev *cdev, struct cdma_jfc *jfc) { #define QUERY_MAX_TIMES 5 + struct cdma_context *ctx = jfc->base.ctx; + u32 jfcn = jfc->jfcn; u32 wait_times = 0; int ret; + if (cdev->status == CDMA_INVALID || (ctx && ctx->invalid)) { + dev_info(cdev->dev, + "resetting Ignore jfc ctx, jfcn = %u\n", jfcn); + return 0; + } + ret = cdma_post_destroy_jfc_mbox(cdev, jfcn, CDMA_JFC_STATE_INVALID); if (ret) { dev_err(cdev->dev, "post mbox to destroy jfc failed, id: %u.\n", jfcn); @@ -555,11 +563,9 @@ int cdma_delete_jfc(struct cdma_dev *cdev, u32 jfcn, return -EINVAL; } - if (!(jfc->base.ctx && jfc->base.ctx->invalid)) { - ret = cdma_destroy_and_flush_jfc(cdev, jfc->jfcn); - if (ret) - dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); - } + ret = cdma_destroy_and_flush_jfc(cdev, jfc); + if (ret) + dev_err(cdev->dev, "jfc delete failed, jfcn = %u.\n", jfcn); if (refcount_dec_and_test(&jfc->event_refcount)) complete(&jfc->event_comp); diff --git a/drivers/ub/cdma/cdma_jfs.c b/drivers/ub/cdma/cdma_jfs.c index 8a62e2a2fd6b..437e48d1e9a6 100644 --- a/drivers/ub/cdma/cdma_jfs.c +++ b/drivers/ub/cdma/cdma_jfs.c @@ -498,10 +498,18 @@ static bool cdma_destroy_jfs_precondition(struct cdma_dev *cdev, } static int cdma_modify_and_destroy_jfs(struct cdma_dev *cdev, - struct cdma_jetty_queue *sq) + struct cdma_jfs *jfs) { + struct cdma_context *ctx = jfs->base_jfs.ctx; + struct cdma_jetty_queue *sq = &jfs->sq; int ret = 0; + if (cdev->status == CDMA_INVALID || (ctx && ctx->invalid)) { + dev_info(cdev->dev, + "resetting Ignore jfs ctx, id = %u.\n", sq->id); + return 0; + } + if (!cdma_destroy_jfs_precondition(cdev, sq)) return -EINVAL; @@ -538,11 +546,9 @@ int cdma_delete_jfs(struct cdma_dev *cdev, u32 jfs_id) return -EINVAL; } - if (!(jfs->base_jfs.ctx && jfs->base_jfs.ctx->invalid)) { - ret = cdma_modify_and_destroy_jfs(cdev, &jfs->sq); - if (ret) - dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); - } + ret = cdma_modify_and_destroy_jfs(cdev, jfs); + if (ret) + dev_err(cdev->dev, "jfs delete failed, id = %u.\n", jfs->id); if (refcount_dec_and_test(&jfs->ae_ref_cnt)) complete(&jfs->ae_comp); @@ -1018,7 +1024,7 @@ static int cdma_post_sq_wr(struct cdma_dev *cdev, struct cdma_jetty_queue *sq, post_wr: if (wr_cnt) { - if (cdev->status != CDMA_SUSPEND) { + if (cdev->status == CDMA_NORMAL) { /* Ensure the order of write memory operations */ wmb(); if (wr_cnt == 1 && dwqe_enable && (sq->pi - sq->ci == 1)) diff --git a/drivers/ub/cdma/cdma_main.c b/drivers/ub/cdma/cdma_main.c index b7748e791c5e..83c6671dcf66 100644 --- a/drivers/ub/cdma/cdma_main.c +++ b/drivers/ub/cdma/cdma_main.c @@ -107,13 +107,13 @@ static void cdma_reset_down(struct auxiliary_device *adev) mutex_lock(&g_cdma_reset_mutex); cdev = get_cdma_dev(adev); - if (!cdev || cdev->status == CDMA_SUSPEND) { + if (!cdev || cdev->status >= CDMA_SUSPEND) { dev_warn(&adev->dev, "cdma device is not ready.\n"); mutex_unlock(&g_cdma_reset_mutex); return; } - cdev->status = CDMA_SUSPEND; + cdev->status = CDMA_INVALID; cdma_cmd_flush(cdev); cdma_reset_unmap_vma_pages(cdev, true); cdma_client_handler(cdev, CDMA_CLIENT_STOP); @@ -136,7 +136,7 @@ static void cdma_reset_uninit(struct auxiliary_device *adev) } stage = ubase_get_reset_stage(adev); - if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_SUSPEND) { + if (stage == UBASE_RESET_STAGE_UNINIT && cdev->status == CDMA_INVALID) { cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); cdma_destroy_dev(cdev, is_rmmod); } @@ -225,12 +225,12 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) dev_dbg(&auxdev->dev, "%s called, matched aux dev(%s.%u).\n", __func__, auxdev->name, auxdev->id); + ubase_reset_unregister(auxdev); mutex_lock(&g_cdma_reset_mutex); cdev = dev_get_drvdata(&auxdev->dev); if (!cdev) { - dev_err(&auxdev->dev, "get drvdata from ubase failed.\n"); - ubase_reset_unregister(auxdev); mutex_unlock(&g_cdma_reset_mutex); + dev_err(&auxdev->dev, "cdma device is not exist.\n"); return; } @@ -239,19 +239,15 @@ static void cdma_uninit_dev(struct auxiliary_device *auxdev) cdma_client_handler(cdev, CDMA_CLIENT_STOP); cdma_client_handler(cdev, CDMA_CLIENT_REMOVE); cdma_reset_unmap_vma_pages(cdev, false); - - if (!is_rmmod) { - ret = ubase_deactivate_dev(auxdev); - dev_info(&auxdev->dev, "ubase deactivate dev ret = %d.\n", ret); - } - - ubase_reset_unregister(auxdev); + ret = is_rmmod ? 0 : ubase_deactivate_dev(auxdev); cdma_dbg_uninit(auxdev); cdma_unregister_event(auxdev); cdma_destroy_chardev(cdev); cdma_free_cfile_uobj(cdev); cdma_destroy_dev(cdev, true); mutex_unlock(&g_cdma_reset_mutex); + + dev_info(&auxdev->dev, "cdma device remove success, ret = %d.\n", ret); } static void cdma_reset_init(struct auxiliary_device *adev) @@ -331,7 +327,6 @@ static int cdma_probe(struct auxiliary_device *auxdev, static void cdma_remove(struct auxiliary_device *auxdev) { cdma_uninit_dev(auxdev); - pr_info("cdma device remove success.\n"); } static const struct auxiliary_device_id cdma_id_table[] = { diff --git a/drivers/ub/cdma/cdma_queue.c b/drivers/ub/cdma/cdma_queue.c index ab7252a649f0..2d6a04d0bff9 100644 --- a/drivers/ub/cdma/cdma_queue.c +++ b/drivers/ub/cdma/cdma_queue.c @@ -100,7 +100,7 @@ static int cdma_create_queue_res(struct cdma_dev *cdev, struct queue_cfg *cfg, return 0; delete_tp: - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, false); delete_jfc: cdma_delete_jfc(cdev, queue->jfc->id, NULL); @@ -112,7 +112,7 @@ static void cdma_delete_queue_res(struct cdma_dev *cdev, { cdma_delete_jfs(cdev, queue->jfs->id); queue->jfs = NULL; - cdma_delete_ctp(cdev, queue->tp->tp_id); + cdma_delete_ctp(cdev, queue->tp->tp_id, false); queue->tp = NULL; cdma_delete_jfc(cdev, queue->jfc->id, NULL); queue->jfc = NULL; diff --git a/drivers/ub/cdma/cdma_tp.c b/drivers/ub/cdma/cdma_tp.c index c5a6b9c7d395..681f0be3a74c 100644 --- a/drivers/ub/cdma/cdma_tp.c +++ b/drivers/ub/cdma/cdma_tp.c @@ -202,7 +202,7 @@ struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, return NULL; } -void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) +void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id, bool invalid) { struct cdma_tp_cfg cfg = { 0 }; struct cdma_tp *tp; @@ -219,7 +219,7 @@ void cdma_delete_ctp(struct cdma_dev *cdev, u32 tp_id) spin_lock(&cdev->ctp_table.lock); refcount_dec(&tp->refcount); if (refcount_dec_if_one(&tp->refcount)) { - if (cdev->status != CDMA_SUSPEND) { + if (cdev->status == CDMA_NORMAL && !invalid) { flag = true; tpn = tp->base.tpn; cfg = tp->base.cfg; diff --git a/drivers/ub/cdma/cdma_tp.h b/drivers/ub/cdma/cdma_tp.h index dc18002785ed..d291bbae68d3 100644 --- a/drivers/ub/cdma/cdma_tp.h +++ b/drivers/ub/cdma/cdma_tp.h @@ -70,7 +70,7 @@ struct cdma_ctrlq_tp_delete_cfg { struct cdma_base_tp *cdma_create_ctp(struct cdma_dev *cdev, struct cdma_tp_cfg *cfg); -void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id); +void cdma_delete_ctp(struct cdma_dev *cdev, uint32_t tp_id, bool invalid); void cdma_destroy_ctp_imm(struct cdma_dev *cdev, uint32_t tp_id); -- Gitee From a0aba17476eb1ea593dac9917adbfe1a1dd4259d Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Tue, 9 Dec 2025 21:10:52 +0800 Subject: [PATCH 095/126] ub: cdma: modify the compatibility field according to the Linux KABI specification commit 874105a62f638a0d580524a6ac46bd2369edaa3e openEuler driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDBCLI CVE: NA ---------------------------------------------------------------------- modify the compatibility field according to the Linux KABI specification Fixes: 34c67ed84c10 ("ub: cdma: support for cdma kernelspace north-south compatibility requirements") Signed-off-by: Zhipeng Lu Signed-off-by: Shi Yang --- include/ub/cdma/cdma_api.h | 49 +++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/include/ub/cdma/cdma_api.h b/include/ub/cdma/cdma_api.h index 4f80012ef3e2..f3b90848bee1 100644 --- a/include/ub/cdma/cdma_api.h +++ b/include/ub/cdma/cdma_api.h @@ -5,6 +5,7 @@ #define _UB_CDMA_CDMA_API_H_ #include +#include #include /** @@ -19,8 +20,10 @@ struct dma_device { struct cdma_device_attr attr; atomic_t ref_cnt; void *private_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; enum dma_cr_opcode { @@ -68,8 +71,10 @@ struct dma_cr { u32 local_id; u32 remote_id; u32 tpn; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -89,8 +94,10 @@ struct queue_cfg { u32 dcna; struct dev_eid rmt_eid; u32 trans_mode; - u32 rsv_bitmap; - u32 rsvd[6]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -111,8 +118,10 @@ struct dma_seg { u32 tid; /* data valid only in bit 0-19 */ u32 token_value; bool token_value_valid; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; struct dma_seg_cfg { @@ -120,8 +129,10 @@ struct dma_seg_cfg { u64 len; u32 token_value; bool token_value_valid; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -149,8 +160,10 @@ enum dma_status { struct dma_cas_data { u64 compare_data; u64 swap_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -163,8 +176,10 @@ struct dma_cas_data { struct dma_notify_data { struct dma_seg *notify_seg; u64 notify_data; - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; /** @@ -183,8 +198,10 @@ struct dma_client { int (*add)(u32 eid); void (*remove)(u32 eid); void (*stop)(u32 eid); - u32 rsv_bitmap; - u32 rsvd[4]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) }; struct dma_device *dma_get_device_list(u32 *num_devices); -- Gitee From 91c1491b42f2a8664008d8d63a5522a065616719 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Wed, 19 Nov 2025 13:38:29 +0800 Subject: [PATCH 096/126] ub:ubus: Fix X86 build error commit d742069465ca1953abb2591d392686fbf1d69714 openEuler driver inclusion category: feature bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID7DDH CVE: NA ----------------------------------------------------------- Fix X86 build error.When CONFIG_GENERIC_MSI_IRQ is close and CONFIG_UB is on, msi.domain cannot be found. Add CONFIG_GENERIC_MSI_IRQ in function ub_update_msi_domain Signed-off-by: Jianquan Lin Signed-off-by: Shi Yang --- drivers/ub/ubfi/irq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index 846af8d6c5f1..5835bc8421b3 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -12,6 +12,7 @@ int ub_update_msi_domain(struct device *dev, enum irq_domain_bus_token bus_token) { +#ifdef CONFIG_GENERIC_MSI_IRQ struct fwnode_handle *fwnode; struct irq_domain *domain; @@ -35,7 +36,7 @@ int ub_update_msi_domain(struct device *dev, /* Update msi domain with new bus_token */ dev_set_msi_domain(dev, domain); - +#endif return 0; } EXPORT_SYMBOL_GPL(ub_update_msi_domain); -- Gitee From a13fae78dffceed27e3b08e54643432089b7864e Mon Sep 17 00:00:00 2001 From: Yahui Liu Date: Wed, 10 Dec 2025 10:09:59 +0800 Subject: [PATCH 097/126] ub:ubus: hi_msg_sync_wait first pull cq commit eea5fe7853d982ab3bef4665e731f58f3747a116 openEuler In cluster mode, ub_get_ue_by_entity_idx should not check entity idx. Currently hi_msg_sync_wait() first check msg is not timeout, then pull the cq, which can be misjudged because of cpu not getting dispatch for long time but actually the msg cq already arrived. Change the order usingdo {} while() so that driver will pull cq at least once. Fixes: 0f9e2dbe888d ("ub:ubus: Support for enabling and disabling ue") Signed-off-by: Yahui Liu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubus/ubus_entity.c | 3 --- drivers/ub/ubus/vendor/hisilicon/msg.c | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/ub/ubus/ubus_entity.c b/drivers/ub/ubus/ubus_entity.c index 105c6f396b44..3dbc196d4da4 100644 --- a/drivers/ub/ubus/ubus_entity.c +++ b/drivers/ub/ubus/ubus_entity.c @@ -1057,9 +1057,6 @@ static struct ub_entity *ub_get_ue_by_entity_idx(struct ub_entity *pue, u32 enti { struct ub_entity *ue; - if (ub_check_ue_para(pue, entity_idx)) - return NULL; - list_for_each_entry(ue, &pue->ue_list, node) { if (ue->entity_idx == entity_idx) return ue; diff --git a/drivers/ub/ubus/vendor/hisilicon/msg.c b/drivers/ub/ubus/vendor/hisilicon/msg.c index 5c4e672aa55e..6d4e9d422c49 100644 --- a/drivers/ub/ubus/vendor/hisilicon/msg.c +++ b/drivers/ub/ubus/vendor/hisilicon/msg.c @@ -249,14 +249,14 @@ static int hi_msg_sync_wait(struct hi_message_device *hmd, int task_type, unsigned long flags; int idx; - while (!time_after64(get_jiffies_64(), end_time)) { + do { idx = hi_msg_cq_poll(hmc, task_type, msn); if (idx >= 0) return idx; if (flag) usleep_range(SLEEP_MIN_US, SLEEP_MAX_US); - } + } while (!time_after64(get_jiffies_64(), end_time)); timeout_msg = kzalloc(TIMEOUT_MSG_INFO_SZ, GFP_ATOMIC); if (!timeout_msg) -- Gitee From 99495d06dddb76ae71d1c1dae472259b2845c006 Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Tue, 9 Dec 2025 14:30:33 +0800 Subject: [PATCH 098/126] ub:ubus: Move the decoder's queue operations to hisi-ubus commit 0b1aae123308a1c20d76f275a5015d8a6b2754bf openEuler Move the decoder's queue operations to hisi-ubus Fixes: abc591c50df5 ("ub:ubus: Supports decoder event processing") Signed-off-by: Yuhao Xiang Signed-off-by: zhao-lichang <943677312@qq.com> --- drivers/ub/ubus/decoder.c | 421 ++---------------- drivers/ub/ubus/decoder.h | 2 - drivers/ub/ubus/ubus_controller.h | 5 +- drivers/ub/ubus/vendor/hisilicon/controller.c | 4 +- .../ub/ubus/vendor/hisilicon/hisi-decoder.c | 394 +++++++++++++++- .../ub/ubus/vendor/hisilicon/hisi-decoder.h | 8 +- 6 files changed, 428 insertions(+), 406 deletions(-) diff --git a/drivers/ub/ubus/decoder.c b/drivers/ub/ubus/decoder.c index 56d20dbbf0aa..c32df61a1d1d 100644 --- a/drivers/ub/ubus/decoder.c +++ b/drivers/ub/ubus/decoder.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "ubus.h" @@ -25,52 +24,28 @@ #define CMDQ_SIZE_USE_MASK GENMASK(11, 8) #define CMDQ_SIZE_USE_OFFSET 8 #define CMDQ_ENABLE 0x1 -#define CMD_ENTRY_SIZE 16 #define EVTQ_SIZE_USE_MASK GENMASK(11, 8) #define EVTQ_SIZE_USE_OFFSET 8 #define EVTQ_ENABLE 0x1 -#define EVT_ENTRY_SIZE 16 -#define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ - -static void ub_decoder_uninit_queue(struct ub_decoder *decoder) +static void ub_decoder_uninit_queue(struct ub_bus_controller *ubc, + struct ub_decoder *decoder) { - iounmap(decoder->cmdq.qbase); - iounmap(decoder->evtq.qbase); + if (ubc->ops->uninit_decoder_queue) + ubc->ops->uninit_decoder_queue(decoder); + else + ub_err(ubc->uent, "ub bus controller can't uninit decoder queue\n"); } static int ub_decoder_init_queue(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { - struct ub_entity *uent = ubc->uent; + if (ubc->ops->init_decoder_queue && ubc->ops->uninit_decoder_queue) + return ubc->ops->init_decoder_queue(decoder); - if (ubc->ops->register_decoder_base_addr) { - ubc->ops->register_decoder_base_addr(ubc, &decoder->cmdq.base, - &decoder->evtq.base); - } else { - ub_err(uent, - "ub_bus_controller_ops does not provide register_decoder_base_addr func, exit\n"); - return -EINVAL; - } - - if (decoder->cmdq.qs == 0 || decoder->evtq.qs == 0) { - ub_err(uent, "decoder cmdq or evtq qs is 0\n"); - return -EINVAL; - } - - decoder->cmdq.qbase = ioremap(decoder->cmdq.base, - (1 << decoder->cmdq.qs) * CMD_ENTRY_SIZE); - if (!decoder->cmdq.qbase) - return -ENOMEM; - - decoder->evtq.qbase = ioremap(decoder->evtq.base, - (1 << decoder->evtq.qs) * EVT_ENTRY_SIZE); - if (!decoder->evtq.qbase) { - iounmap(decoder->cmdq.qbase); - return -ENOMEM; - } - return 0; + ub_err(ubc->uent, "ub bus controller can't init decoder queue\n"); + return -EINVAL; } static u32 set_mmio_base_reg(struct ub_decoder *decoder) @@ -242,11 +217,11 @@ static u32 ub_decoder_device_set(struct ub_decoder *decoder) static int ub_decoder_create_page_table(struct ub_bus_controller *ubc, struct ub_decoder *decoder) { - if (ubc->ops->create_decoder_table) + if (ubc->ops->create_decoder_table && ubc->ops->free_decoder_table) return ubc->ops->create_decoder_table(decoder); ub_err(decoder->uent, "ub bus controller can't create decoder table\n"); - return -EPERM; + return -EINVAL; } static void ub_decoder_free_page_table(struct ub_bus_controller *ubc, @@ -308,6 +283,11 @@ static int ub_get_decoder_cap(struct ub_decoder *decoder) decoder->cmdq.qs = (val & CMDQ_SIZE_MASK) >> CMDQ_SIZE_OFFSET; decoder->evtq.qs = (val & EVTQ_SIZE_MASK) >> EVTQ_SIZE_OFFSET; + if (decoder->cmdq.qs == 0 || decoder->evtq.qs == 0) { + ub_err(uent, "decoder cmdq or evtq qs is 0\n"); + return -EINVAL; + } + size = decoder->mmio_end_addr - decoder->mmio_base_addr + 1; if (size > mmio_size[decoder->mmio_size_sup]) decoder->mmio_end_addr = decoder->mmio_base_addr + @@ -367,7 +347,7 @@ static int ub_create_decoder(struct ub_bus_controller *ubc) release_page_table: ub_decoder_free_page_table(ubc, decoder); release_queue: - ub_decoder_uninit_queue(decoder); + ub_decoder_uninit_queue(ubc, decoder); release_decoder: kfree(decoder); return ret; @@ -416,374 +396,29 @@ static void ub_remove_decoder(struct ub_bus_controller *ubc) ub_decoder_free_page_table(ubc, decoder); - ub_decoder_uninit_queue(decoder); + ub_decoder_uninit_queue(ubc, decoder); kfree(decoder); ubc->decoder = NULL; } -struct sync_entry { - u64 op : 8; - u64 reserve0 : 4; - u64 cm : 2; - u64 ntf_sh : 2; - u64 ntf_attr : 4; - u64 reserve1 : 12; - u64 notify_data : 32; - u64 reserve2 : 2; - u64 notify_addr : 50; - u64 reserve3 : 12; -}; - -struct tlbi_all_entry { - u32 op : 8; - u32 reserve0 : 24; - u32 reserve1; - u32 reserve2; - u32 reserve3; -}; - -struct tlbi_partial_entry { - u32 op : 8; - u32 reserve0 : 24; - u32 tlbi_addr_base : 28; - u32 reserve1 : 4; - u32 tlbi_addr_limt : 28; - u32 reserve2 : 4; - u32 reserve3; -}; - -#define TLBI_ADDR_MASK GENMASK_ULL(43, 20) -#define TLBI_ADDR_OFFSET 20 -#define CMDQ_ENT_DWORDS 2 - -#define NTF_SH_NSH 0b00 -#define NTF_SH_OSH 0b10 -#define NTF_SH_ISH 0b11 -#define NTF_ATTR_IR_NC 0b00 -#define NTF_ATTR_IR_WBRA 0b01 -#define NTF_ATTR_IR_WT 0b10 -#define NTF_ATTR_IR_WB 0b11 -#define NTF_ATTR_OR_NC 0b0000 -#define NTF_ATTR_OR_WBRA 0b0100 -#define NTF_ATTR_OR_WT 0b1000 -#define NTF_ATTR_OR_WB 0b1100 - -#define Q_IDX(qs, p) ((p) & ((1 << (qs)) - 1)) -#define Q_WRP(qs, p) ((p) & (1 << (qs))) -#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) - -enum NOTIFY_TYPE { - DISABLE_NOTIFY = 0, - ENABLE_NOTIFY = 1, -}; - -static bool queue_has_space(struct ub_decoder_queue *q, u32 n) -{ - u32 space, prod, cons; - - prod = Q_IDX(q->qs, q->prod.cmdq_wr_idx); - cons = Q_IDX(q->qs, q->cons.cmdq_rd_idx); - - if (Q_WRP(q->qs, q->prod.cmdq_wr_idx) == - Q_WRP(q->qs, q->cons.cmdq_rd_idx)) - space = (1 << q->qs) - (prod - cons); - else - space = cons - prod; - - return space >= n; -} - -static u32 queue_inc_prod_n(struct ub_decoder_queue *q, u32 n) -{ - u32 prod = (Q_WRP(q->qs, q->prod.cmdq_wr_idx) | - Q_IDX(q->qs, q->prod.cmdq_wr_idx)) + n; - return Q_WRP(q->qs, prod) | Q_IDX(q->qs, prod); -} - -#define CMD_0_OP GENMASK_ULL(7, 0) -#define CMD_0_ADDR_BASE GENMASK_ULL(59, 32) -#define CMD_1_ADDR_LIMT GENMASK_ULL(27, 0) - -static void decoder_cmdq_issue_cmd(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct tlbi_partial_entry entry = {}; - u64 cmd[CMDQ_ENT_DWORDS] = {}; - void *pi; - int i; - - entry.op = op; - entry.tlbi_addr_base = (addr & TLBI_ADDR_MASK) >> TLBI_ADDR_OFFSET; - entry.tlbi_addr_limt = ((addr + size - 1U) & TLBI_ADDR_MASK) >> - TLBI_ADDR_OFFSET; - - cmd[0] |= FIELD_PREP(CMD_0_OP, entry.op); - cmd[0] |= FIELD_PREP(CMD_0_ADDR_BASE, entry.tlbi_addr_base); - cmd[1] |= FIELD_PREP(CMD_1_ADDR_LIMT, entry.tlbi_addr_limt); - - pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct tlbi_partial_entry); - - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); -} - -#define NTF_DMA_ADDR_OFFSERT 2 -#define SYNC_0_OP GENMASK_ULL(7, 0) -#define SYNC_0_CM GENMASK_ULL(13, 12) -#define SYNC_0_NTF_ISH GENMASK_ULL(15, 14) -#define SYNC_0_NTF_ATTR GENMASK_ULL(19, 16) -#define SYNC_0_NTF_DATA GENMASK_ULL(63, 32) -#define SYNC_1_NTF_ADDR GENMASK_ULL(51, 2) -#define SYNC_NTF_DATA 0xffffffff - -static void decoder_cmdq_issue_sync(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - u64 cmd[CMDQ_ENT_DWORDS] = {}; - struct sync_entry entry = {}; - phys_addr_t sync_dma; - void __iomem *pi; - int i; - - entry.op = SYNC; - entry.cm = ENABLE_NOTIFY; - sync_dma = cmdq->base + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct sync_entry); - entry.ntf_sh = NTF_SH_NSH; - entry.ntf_attr = NTF_ATTR_IR_NC | NTF_ATTR_OR_NC; - entry.notify_data = SYNC_NTF_DATA; - entry.notify_addr = sync_dma >> NTF_DMA_ADDR_OFFSERT; - - cmd[0] |= FIELD_PREP(SYNC_0_OP, entry.op); - cmd[0] |= FIELD_PREP(SYNC_0_CM, entry.cm); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_ISH, entry.ntf_sh); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_ATTR, entry.ntf_attr); - cmd[0] |= FIELD_PREP(SYNC_0_NTF_DATA, entry.notify_data); - cmd[1] |= FIELD_PREP(SYNC_1_NTF_ADDR, entry.notify_addr); - - pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * - sizeof(struct sync_entry); - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - decoder->notify = pi; - cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); -} - -static void decoder_cmdq_update_prod(struct ub_decoder *decoder) -{ - struct ub_entity *uent = decoder->uent; - struct queue_idx q; - int ret; - - ret = ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &q.val); - if (ret) - ub_err(uent, "update pi, read decoder cmdq prod failed\n"); - - decoder->cmdq.prod.cmdq_err_resp = q.cmdq_err_resp; - ret = ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, - decoder->cmdq.prod.val); - if (ret) - ub_err(uent, "update pi, write decoder cmdq prod failed\n"); -} - -static int wait_for_cmdq_free(struct ub_decoder *decoder, u32 n) -{ - ktime_t timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct ub_entity *uent = decoder->uent; - int ret; - - while (true) { - ret = ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, - &(cmdq->cons.val)); - if (ret) - return ret; - - if (queue_has_space(cmdq, n + 1)) - return 0; - - if (ktime_compare(ktime_get(), timeout) > 0) { - ub_err(uent, "decoder cmdq wait free entry timeout\n"); - return -ETIMEDOUT; - } - cpu_relax(); - } -} - -static int wait_for_cmdq_notify(struct ub_decoder *decoder) -{ - ktime_t timeout; - u32 val; - - timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); - while (true) { - val = readl(decoder->notify); - if (val == SYNC_NTF_DATA) - return 0; - - if (ktime_compare(ktime_get(), timeout) > 0) { - ub_err(decoder->uent, "decoder cmdq wait notify timeout\n"); - return -ETIMEDOUT; - } - cpu_relax(); - } -} - -int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op) -{ - int ret; - - ret = wait_for_cmdq_free(decoder, 1); - if (ret) - return ret; - - decoder_cmdq_issue_cmd(decoder, addr, size, op); - decoder_cmdq_issue_sync(decoder); - decoder_cmdq_update_prod(decoder); - - ret = wait_for_cmdq_notify(decoder); - return ret; -} -EXPORT_SYMBOL_GPL(ub_decoder_cmd_request); - -static bool queue_empty(struct ub_decoder_queue *q) -{ - return (Q_IDX(q->qs, q->prod.eventq_wr_idx) == - Q_IDX(q->qs, q->cons.eventq_rd_idx)) && - (Q_WRP(q->qs, q->prod.eventq_wr_idx) == - Q_WRP(q->qs, q->cons.eventq_rd_idx)); -} - -static void queue_inc_cons(struct ub_decoder_queue *q) -{ - u32 cons = (Q_WRP(q->qs, q->cons.eventq_rd_idx) | - Q_IDX(q->qs, q->cons.eventq_rd_idx)) + 1; - q->cons.eventq_rd_idx = Q_WRP(q->qs, cons) | Q_IDX(q->qs, cons); -} - -enum event_op_type { - RESERVED = 0x00, - EVENT_ADDR_OUT_OF_RANGE = 0x01, - EVENT_ILLEGAL_CMD = 0x02, -}; - -#define EVTQ_0_ID GENMASK_ULL(7, 0) -#define EVTQ_0_ADDR GENMASK_ULL(59, 32) -#define EVTQ_0_CMD_OPCODE GENMASK_ULL(39, 32) -#define EVTQ_ENT_DWORDS 2 -#define MAX_REASON_NUM 3 - -static const char * const cmd_err_reason[MAX_REASON_NUM] = { - "no error", - "illegal command", - "abort error(read command with 2bit ecc)" -}; - -static void fix_err_cmd(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *cmdq = &(decoder->cmdq); - struct ub_entity *uent = decoder->uent; - u64 cmd[CMDQ_ENT_DWORDS] = {}; - struct queue_idx prod, cons; - void *pi; - int i; - - if (ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, &cons.val)) { - ub_err(uent, "decoder fix error cmd, read ci failed\n"); - return; - } - if (ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &prod.val)) { - ub_err(uent, "decoder fix error cmd, read pi failed\n"); - return; - } - - cmd[0] |= FIELD_PREP(CMD_0_OP, TLBI_ALL); - pi = cmdq->qbase + Q_IDX(cmdq->qs, cons.cmdq_rd_idx) * - sizeof(struct tlbi_partial_entry); - - for (i = 0; i < CMDQ_ENT_DWORDS; i++) - writeq(cmd[i], pi + i * sizeof(u64)); - - if (cons.cmdq_err_reason >= MAX_REASON_NUM) - ub_err(uent, "cmdq err reason is invalid, reason=%u\n", - cons.cmdq_err_reason); - else - ub_err(uent, "cmdq err reason is %s\n", cmd_err_reason[cons.cmdq_err_reason]); - - prod.cmdq_err_resp = cons.cmdq_err; - - if (ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, prod.val)) - ub_err(uent, "decoder fix error cmd, write pi err resp failed\n"); -} - -static void handle_evt(struct ub_decoder *decoder, u64 *evt) -{ - struct ub_entity *uent = decoder->uent; - - switch (FIELD_GET(EVTQ_0_ID, evt[0])) { - case EVENT_ADDR_OUT_OF_RANGE: - ub_err(uent, "decoder event, input addr out of range, addr=%#.7x00000\n", - (u32)FIELD_GET(EVTQ_0_ADDR, evt[0])); - break; - case EVENT_ILLEGAL_CMD: - ub_err(uent, "decoder event, illegal cmd, cmd_opcode=%#x\n", - (u32)FIELD_GET(EVTQ_0_CMD_OPCODE, evt[0])); - fix_err_cmd(decoder); - break; - default: - ub_err(uent, "invalid event opcode, opcode=%#x\n", - (u32)FIELD_GET(EVTQ_0_ID, evt[0])); - } -} - -static void decoder_event_deal(struct ub_decoder *decoder) -{ - struct ub_decoder_queue *evtq = &decoder->evtq; - struct ub_entity *uent = decoder->uent; - u64 evt[EVTQ_ENT_DWORDS]; - void *ci; - int i; - - if (ub_cfg_read_dword(uent, DECODER_EVENTQ_PROD, &(evtq->prod.val))) { - ub_err(uent, "decoder handle event, read eventq pi failed\n"); - return; - } - - while (!queue_empty(evtq)) { - ci = evtq->qbase + Q_IDX(evtq->qs, evtq->cons.eventq_rd_idx) * - EVT_ENTRY_SIZE; - - for (i = 0; i < EVTQ_ENT_DWORDS; i++) - evt[i] = readq(ci + i * sizeof(u64)); - - handle_evt(decoder, evt); - queue_inc_cons(evtq); - - if (ub_cfg_write_dword(uent, DECODER_EVENTQ_CONS, - evtq->cons.val)) - ub_err(uent, "decoder handle event, write eventq ci failed\n"); - } -} static irqreturn_t decoder_event_deal_handle(int irq, void *data) { struct ub_entity *uent = (struct ub_entity *)data; struct ub_decoder *decoder = uent->ubc->decoder; - if (!decoder) { ub_err(uent, "decoder does not exist\n"); - return IRQ_HANDLED; + return IRQ_NONE; + } + + if (!uent->ubc->ops->decoder_event_deal) { + ub_err(uent, "decoder event deal does not exist\n"); + return IRQ_NONE; } - decoder_event_deal(decoder); + uent->ubc->ops->decoder_event_deal(decoder); return IRQ_HANDLED; } @@ -885,8 +520,8 @@ int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) } ubc = decoder->uent->ubc; - if (!ubc->ops->decoder_map) { - pr_err("decoder_map ops not exist\n"); + if (!ubc->ops->decoder_map && !ubc->ops->decoder_unmap) { + pr_err("decoder_map or decoder_unmap ops not exist\n"); return -EINVAL; } diff --git a/drivers/ub/ubus/decoder.h b/drivers/ub/ubus/decoder.h index 6667d07e9219..48ffe9102a46 100644 --- a/drivers/ub/ubus/decoder.h +++ b/drivers/ub/ubus/decoder.h @@ -107,8 +107,6 @@ void ub_decoder_init(struct ub_entity *uent); void ub_decoder_uninit(struct ub_entity *uent); void ub_init_decoder_usi(struct ub_entity *uent); void ub_uninit_decoder_usi(struct ub_entity *uent); -int ub_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, - u64 size, enum ub_cmd_op_type op); int ub_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); int ub_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); #endif /* __DECODER_H__ */ diff --git a/drivers/ub/ubus/ubus_controller.h b/drivers/ub/ubus/ubus_controller.h index 04eb4a3d7648..7ef19e3eaa73 100644 --- a/drivers/ub/ubus/ubus_controller.h +++ b/drivers/ub/ubus/ubus_controller.h @@ -18,8 +18,8 @@ struct ub_bus_controller_ops { void (*mem_decoder_remove)(struct ub_bus_controller *ubc); void (*register_ubmem_irq)(struct ub_bus_controller *ubc); void (*unregister_ubmem_irq)(struct ub_bus_controller *ubc); - void (*register_decoder_base_addr)(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue); + int (*init_decoder_queue)(struct ub_decoder *decoder); + void (*uninit_decoder_queue)(struct ub_decoder *decoder); int (*entity_enable)(struct ub_entity *uent, u8 enable); int (*create_decoder_table)(struct ub_decoder *decoder); void (*free_decoder_table)(struct ub_decoder *decoder); @@ -27,6 +27,7 @@ struct ub_bus_controller_ops { struct decoder_map_info *info); int (*decoder_unmap)(struct ub_decoder *decoder, phys_addr_t addr, u64 size); + void (*decoder_event_deal)(struct ub_decoder *decoder); KABI_RESERVE(1) KABI_RESERVE(2) diff --git a/drivers/ub/ubus/vendor/hisilicon/controller.c b/drivers/ub/ubus/vendor/hisilicon/controller.c index 6c9c8e320479..b9a4e6dc02d0 100644 --- a/drivers/ub/ubus/vendor/hisilicon/controller.c +++ b/drivers/ub/ubus/vendor/hisilicon/controller.c @@ -22,12 +22,14 @@ static struct ub_bus_controller_ops hi_ubc_ops = { .mem_decoder_remove = hi_mem_decoder_remove, .register_ubmem_irq = hi_register_ubmem_irq, .unregister_ubmem_irq = hi_unregister_ubmem_irq, - .register_decoder_base_addr = hi_register_decoder_base_addr, + .init_decoder_queue = hi_init_decoder_queue, + .uninit_decoder_queue = hi_uninit_decoder_queue, .entity_enable = hi_send_entity_enable_msg, .create_decoder_table = hi_create_decoder_table, .free_decoder_table = hi_free_decoder_table, .decoder_map = hi_decoder_map, .decoder_unmap = hi_decoder_unmap, + .decoder_event_deal = hi_decoder_event_deal, }; static void ub_bus_controller_debugfs_init(struct ub_bus_controller *ubc) diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c index ac1fa0498ffc..00f958696b66 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) "ubus hisi decoder: " fmt #include +#include #include #include "../../ubus.h" #include "hisi-ubus.h" @@ -136,6 +137,10 @@ struct range_table_entry { DECODER_SUB_PAGE_TABLE_MASK) >> \ DECODER_SUB_PAGE_TABLE_LOC) +#define DECODER_QUEUE_TIMEOUT_US 1000000 /* 1s */ +#define CMD_ENTRY_SIZE 16 +#define EVT_ENTRY_SIZE 16 + static void fill_page_entry(struct page_entry *page, struct decoder_map_info *info, u64 offset) { @@ -616,13 +621,37 @@ static void ub_decoder_init_page_table(struct ub_decoder *decoder, void *pgtlb_b } } -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue) +int hi_init_decoder_queue(struct ub_decoder *decoder) { - struct hi_ubc_private_data *data = (struct hi_ubc_private_data *)ubc->data; + struct hi_ubc_private_data *data; + struct ub_bus_controller *ubc; + + if (!decoder) + return -EINVAL; + + ubc = decoder->uent->ubc; + data = (struct hi_ubc_private_data *)ubc->data; + decoder->cmdq.base = data->io_decoder_cmdq; + decoder->evtq.base = data->io_decoder_evtq; + + decoder->cmdq.qbase = ioremap(decoder->cmdq.base, + (1 << decoder->cmdq.qs) * CMD_ENTRY_SIZE); + if (!decoder->cmdq.qbase) + return -ENOMEM; + + decoder->evtq.qbase = ioremap(decoder->evtq.base, + (1 << decoder->evtq.qs) * EVT_ENTRY_SIZE); + if (!decoder->evtq.qbase) { + iounmap(decoder->cmdq.qbase); + return -ENOMEM; + } + return 0; +} - *cmd_queue = data->io_decoder_cmdq; - *event_queue = data->io_decoder_evtq; +void hi_uninit_decoder_queue(struct ub_decoder *decoder) +{ + iounmap(decoder->cmdq.qbase); + iounmap(decoder->evtq.qbase); } int hi_create_decoder_table(struct ub_decoder *decoder) @@ -697,7 +726,7 @@ int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size) ret = handle_table(decoder, &info, false); if (ret) return ret; - return ub_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); + return hi_decoder_cmd_request(decoder, addr, size, TLBI_PARTIAL); } int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) @@ -712,3 +741,356 @@ int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info) return handle_table(decoder, info, true); } + +struct sync_entry { + u64 op : 8; + u64 reserve0 : 4; + u64 cm : 2; + u64 ntf_sh : 2; + u64 ntf_attr : 4; + u64 reserve1 : 12; + u64 notify_data : 32; + u64 reserve2 : 2; + u64 notify_addr : 50; + u64 reserve3 : 12; +}; + +struct tlbi_all_entry { + u32 op : 8; + u32 reserve0 : 24; + u32 reserve1; + u32 reserve2; + u32 reserve3; +}; + +struct tlbi_partial_entry { + u32 op : 8; + u32 reserve0 : 24; + u32 tlbi_addr_base : 28; + u32 reserve1 : 4; + u32 tlbi_addr_limt : 28; + u32 reserve2 : 4; + u32 reserve3; +}; + +#define TLBI_ADDR_MASK GENMASK_ULL(43, 20) +#define TLBI_ADDR_OFFSET 20 +#define CMDQ_ENT_DWORDS 2 + +#define NTF_SH_NSH 0b00 +#define NTF_SH_OSH 0b10 +#define NTF_SH_ISH 0b11 + +#define NTF_ATTR_IR_NC 0b00 +#define NTF_ATTR_IR_WBRA 0b01 +#define NTF_ATTR_IR_WT 0b10 +#define NTF_ATTR_IR_WB 0b11 +#define NTF_ATTR_OR_NC 0b0000 +#define NTF_ATTR_OR_WBRA 0b0100 +#define NTF_ATTR_OR_WT 0b1000 +#define NTF_ATTR_OR_WB 0b1100 + +#define Q_IDX(qs, p) ((p) & ((1 << (qs)) - 1)) +#define Q_WRP(qs, p) ((p) & (1 << (qs))) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) + +enum NOTIFY_TYPE { + DISABLE_NOTIFY = 0, + ENABLE_NOTIFY = 1, +}; + +static bool queue_has_space(struct ub_decoder_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q->qs, q->prod.cmdq_wr_idx); + cons = Q_IDX(q->qs, q->cons.cmdq_rd_idx); + + if (Q_WRP(q->qs, q->prod.cmdq_wr_idx) == + Q_WRP(q->qs, q->cons.cmdq_rd_idx)) + space = (1 << q->qs) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + +static u32 queue_inc_prod_n(struct ub_decoder_queue *q, u32 n) +{ + u32 prod = (Q_WRP(q->qs, q->prod.cmdq_wr_idx) | + Q_IDX(q->qs, q->prod.cmdq_wr_idx)) + n; + return Q_WRP(q->qs, prod) | Q_IDX(q->qs, prod); +} + +#define CMD_0_OP GENMASK_ULL(7, 0) +#define CMD_0_ADDR_BASE GENMASK_ULL(59, 32) +#define CMD_1_ADDR_LIMT GENMASK_ULL(27, 0) + +static void decoder_cmdq_issue_cmd(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct tlbi_partial_entry entry = {}; + u64 cmd[CMDQ_ENT_DWORDS] = {}; + void *pi; + int i; + + entry.op = op; + entry.tlbi_addr_base = (addr & TLBI_ADDR_MASK) >> TLBI_ADDR_OFFSET; + entry.tlbi_addr_limt = ((addr + size - 1U) & TLBI_ADDR_MASK) >> + TLBI_ADDR_OFFSET; + + cmd[0] |= FIELD_PREP(CMD_0_OP, entry.op); + cmd[0] |= FIELD_PREP(CMD_0_ADDR_BASE, entry.tlbi_addr_base); + cmd[1] |= FIELD_PREP(CMD_1_ADDR_LIMT, entry.tlbi_addr_limt); + + pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct tlbi_partial_entry); + + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); +} + +#define NTF_DMA_ADDR_OFFSERT 2 +#define SYNC_0_OP GENMASK_ULL(7, 0) +#define SYNC_0_CM GENMASK_ULL(13, 12) +#define SYNC_0_NTF_ISH GENMASK_ULL(15, 14) +#define SYNC_0_NTF_ATTR GENMASK_ULL(19, 16) +#define SYNC_0_NTF_DATA GENMASK_ULL(63, 32) +#define SYNC_1_NTF_ADDR GENMASK_ULL(51, 2) +#define SYNC_NTF_DATA 0xffffffff + +static void decoder_cmdq_issue_sync(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + u64 cmd[CMDQ_ENT_DWORDS] = {}; + struct sync_entry entry = {}; + phys_addr_t sync_dma; + void __iomem *pi; + int i; + + entry.op = SYNC; + entry.cm = ENABLE_NOTIFY; + sync_dma = cmdq->base + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct sync_entry); + entry.ntf_sh = NTF_SH_NSH; + entry.ntf_attr = NTF_ATTR_IR_NC | NTF_ATTR_OR_NC; + entry.notify_data = SYNC_NTF_DATA; + entry.notify_addr = sync_dma >> NTF_DMA_ADDR_OFFSERT; + + cmd[0] |= FIELD_PREP(SYNC_0_OP, entry.op); + cmd[0] |= FIELD_PREP(SYNC_0_CM, entry.cm); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_ISH, entry.ntf_sh); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_ATTR, entry.ntf_attr); + cmd[0] |= FIELD_PREP(SYNC_0_NTF_DATA, entry.notify_data); + cmd[1] |= FIELD_PREP(SYNC_1_NTF_ADDR, entry.notify_addr); + + pi = cmdq->qbase + Q_IDX(cmdq->qs, cmdq->prod.cmdq_wr_idx) * + sizeof(struct sync_entry); + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + decoder->notify = pi; + cmdq->prod.cmdq_wr_idx = queue_inc_prod_n(cmdq, 1); +} + +static void decoder_cmdq_update_prod(struct ub_decoder *decoder) +{ + struct ub_entity *uent = decoder->uent; + struct queue_idx q; + int ret; + + ret = ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &q.val); + if (ret) + ub_err(uent, "update pi, read decoder cmdq prod fail\n"); + + decoder->cmdq.prod.cmdq_err_resp = q.cmdq_err_resp; + ret = ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, + decoder->cmdq.prod.val); + if (ret) + ub_err(uent, "update pi, write decoder cmdq prod fail\n"); +} + +static int wait_for_cmdq_free(struct ub_decoder *decoder, u32 n) +{ + ktime_t timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct ub_entity *uent = decoder->uent; + int ret; + + while (true) { + ret = ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, + &(cmdq->cons.val)); + if (ret) + return ret; + + if (queue_has_space(cmdq, n + 1)) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ub_err(uent, "decoder cmdq wait free entry timeout\n"); + return -ETIMEDOUT; + } + cpu_relax(); + } +} + +static int wait_for_cmdq_notify(struct ub_decoder *decoder) +{ + ktime_t timeout; + u32 val; + + timeout = ktime_add_us(ktime_get(), DECODER_QUEUE_TIMEOUT_US); + while (true) { + val = readl(decoder->notify); + if (val == SYNC_NTF_DATA) + return 0; + + if (ktime_compare(ktime_get(), timeout) > 0) { + ub_err(decoder->uent, "decoder cmdq wait notify timeout\n"); + return -ETIMEDOUT; + } + cpu_relax(); + } +} + +int hi_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op) +{ + int ret; + + ret = wait_for_cmdq_free(decoder, 1); + if (ret) + return ret; + + decoder_cmdq_issue_cmd(decoder, addr, size, op); + decoder_cmdq_issue_sync(decoder); + decoder_cmdq_update_prod(decoder); + + ret = wait_for_cmdq_notify(decoder); + return ret; +} +#ifdef UBUS_KP_TOOL_STUB +EXPORT_SYMBOL_GPL(hi_decoder_cmd_request); +#endif + +static bool queue_empty(struct ub_decoder_queue *q) +{ + return (Q_IDX(q->qs, q->prod.eventq_wr_idx) == + Q_IDX(q->qs, q->cons.eventq_rd_idx)) && + (Q_WRP(q->qs, q->prod.eventq_wr_idx) == + Q_WRP(q->qs, q->cons.eventq_rd_idx)); +} + +static void queue_inc_cons(struct ub_decoder_queue *q) +{ + u32 cons = (Q_WRP(q->qs, q->cons.eventq_rd_idx) | + Q_IDX(q->qs, q->cons.eventq_rd_idx)) + 1; + q->cons.eventq_rd_idx = Q_WRP(q->qs, cons) | Q_IDX(q->qs, cons); +} + +enum event_op_type { + RESERVED = 0x0, + EVENT_ADDR_OUT_OF_RANGE = 0x01, + EVENT_ILLEGAL_CMD = 0x02, +}; + +#define EVTQ_0_ID GENMASK_ULL(7, 0) +#define EVTQ_0_ADDR GENMASK_ULL(59, 32) +#define EVTQ_0_CMD_OPCODE GENMASK_ULL(39, 32) +#define EVTQ_ENT_DWORDS 2 +#define MAX_REASON_NUM 3 + +static const char *cmd_err_reason[MAX_REASON_NUM] = { + "no error", + "illegal command", + "abort error(read command with 2bit ecc)" +}; + +static void fix_err_cmd(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *cmdq = &(decoder->cmdq); + struct ub_entity *uent = decoder->uent; + u64 cmd[CMDQ_ENT_DWORDS] = {}; + struct queue_idx prod, cons; + void *pi; + int i; + + if (ub_cfg_read_dword(uent, DECODER_CMDQ_CONS, &cons.val)) { + ub_err(uent, "decoder fix error cmd, read ci failed\n"); + return; + } + if (ub_cfg_read_dword(uent, DECODER_CMDQ_PROD, &prod.val)) { + ub_err(uent, "decoder fix error cmd, read pi failed\n"); + return; + } + + cmd[0] |= FIELD_PREP(CMD_0_OP, TLBI_ALL); + pi = cmdq->qbase + Q_IDX(cmdq->qs, cons.cmdq_rd_idx) * + sizeof(struct tlbi_partial_entry); + + for (i = 0; i < CMDQ_ENT_DWORDS; i++) + writeq(cmd[i], pi + i * sizeof(u64)); + + if (cons.cmdq_err_reason >= MAX_REASON_NUM) + ub_err(uent, "cmdq err reason is invalid, reason=%u\n", + cons.cmdq_err_reason); + else + ub_err(uent, "cmdq err reason is %s\n", cmd_err_reason[cons.cmdq_err_reason]); + + prod.cmdq_err_resp = cons.cmdq_err; + + if (ub_cfg_write_dword(uent, DECODER_CMDQ_PROD, prod.val)) + ub_err(uent, "decoder fix error cmd, write pi err resp failed\n"); +} + +static void handle_evt(struct ub_decoder *decoder, u64 *evt) +{ + struct ub_entity *uent = decoder->uent; + + switch (FIELD_GET(EVTQ_0_ID, evt[0])) { + case EVENT_ADDR_OUT_OF_RANGE: + ub_err(uent, "decoder event, input addr out of range, addr=%#.7x00000\n", + (u32)FIELD_GET(EVTQ_0_ADDR, evt[0])); + break; + case EVENT_ILLEGAL_CMD: + ub_err(uent, "decoder event, illegal cmd, cmd_opcode=%#x\n", + (u32)FIELD_GET(EVTQ_0_CMD_OPCODE, evt[0])); + fix_err_cmd(decoder); + break; + default: + ub_err(uent, "invalid event opcode, opcode=%#x\n", + (u32)FIELD_GET(EVTQ_0_ID, evt[0])); + } +} + +void hi_decoder_event_deal(struct ub_decoder *decoder) +{ + struct ub_decoder_queue *evtq = &decoder->evtq; + struct ub_entity *uent = decoder->uent; + u64 evt[EVTQ_ENT_DWORDS]; + void *ci; + int i; + + if (ub_cfg_read_dword(uent, DECODER_EVENTQ_PROD, &(evtq->prod.val))) { + ub_err(uent, "decoder handle event, read eventq pi fail\n"); + return; + } + + while (!queue_empty(evtq)) { + ci = evtq->qbase + Q_IDX(evtq->qs, evtq->cons.eventq_rd_idx) * + EVT_ENTRY_SIZE; + + for (i = 0; i < EVTQ_ENT_DWORDS; i++) + evt[i] = readq(ci + i * sizeof(u64)); + + handle_evt(decoder, evt); + queue_inc_cons(evtq); + + if (ub_cfg_write_dword(uent, DECODER_EVENTQ_CONS, + evtq->cons.val)) + ub_err(uent, "decoder handle event, write eventq ci fail\n"); + } +} diff --git a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h index 50658ef7b9cb..fc49a25b80d6 100644 --- a/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h +++ b/drivers/ub/ubus/vendor/hisilicon/hisi-decoder.h @@ -38,8 +38,8 @@ DECODER_PAGE_SIZE * \ RGTLB_TO_PGTLB) -void hi_register_decoder_base_addr(struct ub_bus_controller *ubc, - u64 *cmd_queue, u64 *event_queue); +int hi_init_decoder_queue(struct ub_decoder *decoder); +void hi_uninit_decoder_queue(struct ub_decoder *decoder); int hi_create_decoder_table(struct ub_decoder *decoder); void hi_free_decoder_table(struct ub_decoder *decoder); @@ -47,4 +47,8 @@ void hi_free_decoder_table(struct ub_decoder *decoder); int hi_decoder_map(struct ub_decoder *decoder, struct decoder_map_info *info); int hi_decoder_unmap(struct ub_decoder *decoder, phys_addr_t addr, u64 size); +int hi_decoder_cmd_request(struct ub_decoder *decoder, phys_addr_t addr, + u64 size, enum ub_cmd_op_type op); +void hi_decoder_event_deal(struct ub_decoder *decoder); + #endif /* __HISI_DECODER_H__ */ -- Gitee From 903b1dc415f2cbaa36410ee3e760a2cdf88f56ec Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 23 Dec 2025 13:56:35 +0800 Subject: [PATCH 099/126] ub:ubus: fix ub_get_bus_controller getting null commit e7ad8b587f8fd0ef0cb74b894c7e53363c1a9c68 openEuler fix ub_get_bus_controller getting null Fixes: 26a640c9d1b1 ("ub:ubus: add ubus controller framework") Signed-off-by: Jianquan Lin Signed-off-by: zhao-lichang <943677312@qq.com> --- drivers/ub/ubus/ubus_driver.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/ub/ubus/ubus_driver.c b/drivers/ub/ubus/ubus_driver.c index 974020bf3c38..6d5594156c3d 100644 --- a/drivers/ub/ubus/ubus_driver.c +++ b/drivers/ub/ubus/ubus_driver.c @@ -61,6 +61,12 @@ int ub_get_bus_controller(struct ub_entity *ubc_dev[], unsigned int max_num, { struct ub_bus_controller *ubc; unsigned int ubc_num = 0; + int ret; + + if (!manage_subsystem_ops) { + pr_err("manage subsystem ops is null\n"); + return -EINVAL; + } if (!real_num || !ubc_dev) { pr_err("%s: input parameters invalid\n", __func__); @@ -70,16 +76,25 @@ int ub_get_bus_controller(struct ub_entity *ubc_dev[], unsigned int max_num, list_for_each_entry(ubc, &ubc_list, node) { if (ubc_num >= max_num) { pr_err("ubc list num over max num %u\n", max_num); - ub_put_bus_controller(ubc_dev, max_num); - return -ENOMEM; + ret = -ENOMEM; + goto ubc_put; } - ubc_dev[ubc_num] = ub_entity_get(ubc->uent); + if (!ub_entity_get(ubc->uent)) { + pr_err("The ub_entity of ubc is null\n"); + ret = -EINVAL; + goto ubc_put; + } + ubc_dev[ubc_num] = ubc->uent; ubc_num++; } *real_num = ubc_num; return 0; + +ubc_put: + ub_put_bus_controller(ubc_dev, max_num); + return ret; } EXPORT_SYMBOL_GPL(ub_get_bus_controller); -- Gitee From b9b1424101d482cd283fa3c30fa5bf7086fb6679 Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Fri, 5 Dec 2025 15:22:43 +0800 Subject: [PATCH 100/126] ub: ub_fwctl: Modify the problem of incorret data when querying entry information commit 7eb5e8a2cf2f97fa71075bbd55d292f58e10dd9d openEuler Modify the problem of incorret data when querying entry information. Fixes: 7ed154d74ca3 ("ub: ub_fwctl: query the MSG queue information and entry details within UB.") Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang --- drivers/fwctl/ub/ub_cmd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/fwctl/ub/ub_cmd.c b/drivers/fwctl/ub/ub_cmd.c index 5b3895107b31..4447c5da391b 100644 --- a/drivers/fwctl/ub/ub_cmd.c +++ b/drivers/fwctl/ub/ub_cmd.c @@ -608,13 +608,14 @@ static int ubctl_msgq_entry_move_data(struct ubctl_query_cmd_param *query_cmd_pa { u32 msgq_entry_data_size = block_size + offset * sizeof(u32); u32 *data_offset = query_cmd_param->out->data + offset; + u32 block_num = block_size / sizeof(u32); u32 i; if (msgq_entry_data_size > query_cmd_param->out_len) return -EINVAL; - for (i = 0; i < block_size / sizeof(u32); i++) - data_offset[i] = readl(entry_addr + i); + for (i = 0; i < block_num; i++) + data_offset[i] = readl((void __iomem *)((u32 *)entry_addr + i)); return 0; } -- Gitee From ca9433616b4ca61528a16d4b973f765474e5eefe Mon Sep 17 00:00:00 2001 From: Jiaqi Cheng Date: Tue, 9 Dec 2025 19:11:54 +0800 Subject: [PATCH 101/126] ub: ub_fwctl: Modify TP/TA/SCC register query process. commit f2923dd8d91ebeaf37dc393ccfdbfc71e0139f74 openEuler Modify TP/TA/SCC register query process. Fixes: 12da5b6ce2af ("ub: ub_fwctl: supports querying TP, BA related register information") Signed-off-by: Jiaqi Cheng Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/fwctl/ub/ub_cmd_reg.c | 45 +++++++++++++++++++++++++++++++++++ drivers/fwctl/ub/ub_cmdq.h | 12 +++++++++- drivers/fwctl/ub/ub_common.c | 6 +++++ drivers/fwctl/ub/ub_common.h | 4 ++++ include/uapi/fwctl/ub_fwctl.h | 12 ++++++++++ 5 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/fwctl/ub/ub_cmd_reg.c b/drivers/fwctl/ub/ub_cmd_reg.c index 026ac3f2fe90..c75de6543622 100644 --- a/drivers/fwctl/ub/ub_cmd_reg.c +++ b/drivers/fwctl/ub/ub_cmd_reg.c @@ -203,6 +203,7 @@ static int ubctl_query_tp_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, @@ -251,6 +252,7 @@ static int ubctl_query_tp_pkt_stats_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, }; return ubctl_query_data(ucdev, query_cmd_param, query_func, @@ -277,6 +279,7 @@ static int ubctl_query_ta_data(struct ubctl_dev *ucdev, { struct ubctl_query_dp query_dp[] = { { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, }; @@ -290,6 +293,7 @@ static int ubctl_query_ta_pkt_stats(struct ubctl_dev *ucdev, { struct ubctl_query_dp query_dp[] = { { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, }; return ubctl_query_data(ucdev, query_cmd_param, query_func, @@ -572,6 +576,42 @@ static int ubctl_config_prbs(struct ubctl_dev *ucdev, return ret; } +static int ubctl_query_nl_ssu_sw(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_SW_DFX, UBCTL_NL_SSU_SW_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_nl_ssu_oq(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_OQ_DFX, UBCTL_NL_SSU_OQ_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + +static int ubctl_query_nl_ssu_p2p(struct ubctl_dev *ucdev, + struct ubctl_query_cmd_param *query_cmd_param, + struct ubctl_func_dispatch *query_func) +{ + struct ubctl_query_dp query_dp[] = { + { UBCTL_QUERY_NL_SSU_P2P_DFX, UBCTL_NL_SSU_P2P_LEN, UBCTL_READ, NULL, 0 }, + }; + + return ubctl_query_data(ucdev, query_cmd_param, query_func, + query_dp, ARRAY_SIZE(query_dp)); +} + static int ubctl_query_dump_data(struct ubctl_dev *ucdev, struct ubctl_query_cmd_param *query_cmd_param, struct ubctl_func_dispatch *query_func) @@ -585,6 +625,7 @@ static int ubctl_query_dump_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_RX_DFX, UBCTL_TP_RX_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RQM_DFX, UBCTL_TP_RQM_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_STATE_DFX, UBCTL_TP_STATE_DFX_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TP_RX_EX_DFX, UBCTL_TP_RX_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_TX_ROUTE_DFX, UBCTL_TP_TX_ROUTE_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TP_RX_BANK_DFX, UBCTL_TP_RX_BANK_LEN, UBCTL_READ, NULL, 0 }, @@ -594,6 +635,7 @@ static int ubctl_query_dump_data(struct ubctl_dev *ucdev, { UBCTL_QUERY_TP_ABN_STATS_DFX, UBCTL_TP_REG_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_PKT_STATS_DFX, UBCTL_TA_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, + { UBCTL_QUERY_TA_PKT_STATS_EX_DFX, UBCTL_TA_PKT_STATS_EX_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_TA_ABN_STATS_DFX, UBCTL_TA_ABN_STATS_LEN, UBCTL_READ, NULL, 0 }, { UBCTL_QUERY_DL_PKT_STATS_DFX, UBCTL_DL_PKT_STATS_LEN, UBCTL_READ, NULL, 0 }, @@ -623,6 +665,9 @@ static struct ubctl_func_dispatch g_ubctl_query_reg[] = { { UTOOL_CMD_QUERY_NL_SSU_STATS, ubctl_query_nl_ssu_stats_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_NL_ABN, ubctl_query_nl_abn_data, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_SW, ubctl_query_nl_ssu_sw, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_OQ, ubctl_query_nl_ssu_oq, ubctl_query_data_deal }, + { UTOOL_CMD_QUERY_NL_SSU_P2P, ubctl_query_nl_ssu_p2p, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_DL, ubctl_query_dl_data, ubctl_query_data_deal }, { UTOOL_CMD_QUERY_DL_PKT_STATS, ubctl_query_dl_pkt_stats_data, diff --git a/drivers/fwctl/ub/ub_cmdq.h b/drivers/fwctl/ub/ub_cmdq.h index a8a4e63c42e3..691869977e65 100644 --- a/drivers/fwctl/ub/ub_cmdq.h +++ b/drivers/fwctl/ub/ub_cmdq.h @@ -9,9 +9,13 @@ #define UBCTL_QUERY_NL_PKT_STATS_DFX 0xA001 #define UBCTL_QUERY_NL_SSU_STATS_DFX 0xA002 #define UBCTL_QUERY_NL_ABN_DFX 0xA003 +#define UBCTL_QUERY_NL_SSU_SW_DFX 0xA028 +#define UBCTL_QUERY_NL_SSU_OQ_DFX 0xA029 +#define UBCTL_QUERY_NL_SSU_P2P_DFX 0xA02A #define UBCTL_QUERY_TP_TX_DFX 0xA004 #define UBCTL_QUERY_TP_RX_DFX 0xA005 +#define UBCTL_QUERY_TP_RX_EX_DFX 0xA02B #define UBCTL_QUERY_TP_TX_ROUTE_DFX 0xA01A #define UBCTL_QUERY_TP_RX_BANK_DFX 0xA01C #define UBCTL_QUERY_TP_ABN_STATS_DFX 0xA01D @@ -19,6 +23,7 @@ #define UBCTL_QUERY_TP_STATE_DFX 0xA024 #define UBCTL_QUERY_TA_PKT_STATS_DFX 0xA006 +#define UBCTL_QUERY_TA_PKT_STATS_EX_DFX 0xA02C #define UBCTL_QUERY_TA_ABN_STATS_DFX 0xA023 #define UBCTL_QUERY_DL_PKT_STATS_DFX 0xA007 @@ -58,9 +63,13 @@ #define UBCTL_NL_PKT_STATS_LEN 632 #define UBCTL_NL_SSU_STATS_LEN 408 #define UBCTL_NL_ABN_LEN 56 +#define UBCTL_NL_SSU_SW_LEN 24 +#define UBCTL_NL_SSU_OQ_LEN 24 +#define UBCTL_NL_SSU_P2P_LEN 24 #define UBCTL_TP_TX_STATS_LEN 904 #define UBCTL_TP_RX_STATS_LEN 704 +#define UBCTL_TP_RX_STATS_EX_LEN 120 #define UBCTL_TP_TX_ABN_LEN 948 #define UBCTL_TP_RX_ABN_LEN 760 #define UBCTL_TP_REG_LEN 24 @@ -70,7 +79,8 @@ #define UBCTL_TP_STATE_DFX_LEN 376 #define UBCTL_TA_PKT_STATS_LEN 920 -#define UBCTL_TA_ABN_STATS_LEN 168 +#define UBCTL_TA_PKT_STATS_EX_LEN 60 +#define UBCTL_TA_ABN_STATS_LEN 180 #define UBCTL_DL_PKT_STATS_LEN 984 #define UBCTL_DL_REPL_LEN 120 diff --git a/drivers/fwctl/ub/ub_common.c b/drivers/fwctl/ub/ub_common.c index 23d67829c8de..3827689c7565 100644 --- a/drivers/fwctl/ub/ub_common.c +++ b/drivers/fwctl/ub/ub_common.c @@ -96,6 +96,7 @@ static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, struct ubctl_query_dp *query_dp, struct ubctl_query_cmd_dp *cmd_data, u32 offset) { +#define UTOOL_EOPNOTSUPP (-95) int *retval = &query_cmd_param->out->retval; struct ubctl_cmd cmd = {}; int ret = 0; @@ -109,6 +110,11 @@ static int ubctl_cmd_send_deal(struct ubctl_dev *ucdev, } *retval = ubctl_ubase_cmd_send(ucdev->adev, &cmd); + if (*retval == UTOOL_EOPNOTSUPP) { + ubctl_warn(ucdev, "opcode is not support.\n"); + *retval = 0; + } + if (*retval) { ubctl_err(ucdev, "ubctl ubase cmd send failed, retval = %d.\n", *retval); diff --git a/drivers/fwctl/ub/ub_common.h b/drivers/fwctl/ub/ub_common.h index ab10576e3914..225218431254 100644 --- a/drivers/fwctl/ub/ub_common.h +++ b/drivers/fwctl/ub/ub_common.h @@ -26,6 +26,10 @@ dev_info(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ ##__VA_ARGS__) +#define ubctl_warn(ucdev, format, ...) \ + dev_warn(&ucdev->fwctl.dev, "PID %u: " format, current->pid, \ + ##__VA_ARGS__) + #define UBCTL_GET_PHY_ADDR(high, low) ((((u64)(high)) << 32) | (low)) #define UBCTL_EXTRACT_BITS(value, start, end) \ (((value) >> (start)) & ((1UL << ((end) - (start) + 1)) - 1)) diff --git a/include/uapi/fwctl/ub_fwctl.h b/include/uapi/fwctl/ub_fwctl.h index 38787e5cc8ca..5457c04585fb 100644 --- a/include/uapi/fwctl/ub_fwctl.h +++ b/include/uapi/fwctl/ub_fwctl.h @@ -58,6 +58,18 @@ enum ub_fwctl_cmdrpc_type { * @UTOOL_CMD_QUERY_NL_ABN: Query NL layer NL_ABN related registers */ UTOOL_CMD_QUERY_NL_ABN = 0x0004, + /** + * @UTOOL_CMD_QUERY_NL_SSU_SW: Query ssu_sw non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_SW = 0x0005, + /** + * @UTOOL_CMD_QUERY_NL_SSU_OQ: Query ssu_oq non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_OQ = 0x0006, + /** + * @UTOOL_CMD_QUERY_NL_SSU_P2P: Query ssu_p2p queue non-empty dfx statistics + */ + UTOOL_CMD_QUERY_NL_SSU_P2P = 0x0007, /** * @UTOOL_CMD_QUERY_TP: Query all registers at the TP layer -- Gitee From 8da583b7be56ac63a7f328f1d551259f00031648 Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 18 Nov 2025 16:39:47 +0800 Subject: [PATCH 102/126] iommu/ummu: Fix ubmem unmap return value error commit 5272139386db86969e427b04578a8671f4221ef6 openEuler Fix ubmem_mmu_unmap_pages return value error. In the original code, mdom->iova_len is directly returned. mmu_domain_cfg_clear cleared mdom->iova_len Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c b/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c index 4fe5d4635ea4..b8d22b7bfc64 100644 --- a/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c +++ b/drivers/iommu/hisilicon/ubmem-mmu/ubmem_mmu.c @@ -516,6 +516,7 @@ static size_t ubmem_mmu_unmap_pages(struct iommu_domain *domain, { struct ubmem_mmu_domain *mdom = to_ubmem_mmu_domain(domain); struct maple_tree *mt = (struct maple_tree *)mdom->cached_pa_list; + unsigned long unmapped = 0; struct pa_info *info; MA_STATE(mas, mt, 0, 0); @@ -539,10 +540,11 @@ static size_t ubmem_mmu_unmap_pages(struct iommu_domain *domain, mdom->pte_valid = false; } + unmapped = mdom->iova_len; clear_cached_pa_list(mt); mmu_domain_cfg_clear(mdom); - return mdom->iova_len; + return unmapped; } static int ubmem_mmu_iotlb_sync_map(struct iommu_domain *domain, -- Gitee From 48d8021256ea889b8d4cfbc7f8651b389d0ce424 Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 18 Nov 2025 18:39:53 +0800 Subject: [PATCH 103/126] iommu/ummu: Fix 2P virtualization error commit 32a6cbe9d7b61fa24701f930c914f6f17d948ab4 openEuler In the 2-socket virtualization scenario, the invalidation command needs to be issued on both UMMUs. In the original code, the invalidation command is issued twice on one UMMU. Signed-off-by: Jingbin Wu Signed-off-by: Liming An --- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 1 + drivers/iommu/hisilicon/nested.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index 448ad6e5514b..cd859bee39ec 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -1003,6 +1003,7 @@ logic_ummu_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, if (!domain->pgsize_bitmap) domain->pgsize_bitmap = drv_ops->pgsize_bitmap; nested_base_domain = to_ummu_base_domain(domain); + nested_base_domain->core_dev = &ummu->core_dev; nested_base_domain->parent = logic_vummu->parent; if (!domain->ops) { ret = -EOPNOTSUPP; diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 366d47f7821e..0eed1d9f8d25 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -210,7 +210,7 @@ int ummu_viommu_cache_invalidate_user(struct iommu_domain *domain, nested_domain = to_nested_domain(domain); tecte_tag = nested_domain->s2_parent->cfgs.tecte_tag; - ummu = core_to_ummu_device(nested_domain->s2_parent->base_domain.core_dev); + ummu = core_to_ummu_device(nested_domain->base_domain.core_dev); cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); if (!cmds) -- Gitee From 18eea4c9d7c7a8c73291e0996320c01378f6c166 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 19 Nov 2025 14:49:53 +0800 Subject: [PATCH 104/126] iommu/ummu: Fix builds under different kconfigs commit 00f81c7cc17c2e969149b4b51a6b9ae1f1f5b64a openEuler 1.Fix builds under disable UB_UMMU_SVA 2.Fix builds under disable UB_UMMU_BYPASSDEV 3.Fix builds under disable UB_UMMU_CORE_DRIVER Signed-off-by: WangJie Signed-off-by: Liming An --- drivers/iommu/hisilicon/Kconfig | 4 ++-- drivers/iommu/hisilicon/iommu.c | 3 ++- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 2 ++ drivers/iommu/hisilicon/sva.h | 5 +++++ include/linux/ummu_core.h | 2 -- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/hisilicon/Kconfig b/drivers/iommu/hisilicon/Kconfig index 9b1a2acddcbc..e41f492a7ca6 100644 --- a/drivers/iommu/hisilicon/Kconfig +++ b/drivers/iommu/hisilicon/Kconfig @@ -24,7 +24,8 @@ config UB_UMMU select IOMMU_IO_PGTABLE_LPAE select GENERIC_MSI_IRQ select IOMMUFD_DRIVER if IOMMUFD - select UMMU_CORE + select UB_UMMU_CORE + select UB_UMMU_CORE_DRIVER select UB_UMMU_BASE help Support for implementations of the hisilicon UMMU architecture. @@ -39,7 +40,6 @@ config UB_UMMU_SVA select IOMMU_SVA select IOMMU_KSVA select IOMMU_IOPF - select MMU_NOTIFIER default n help Support for sharing process address spaces with devices using diff --git a/drivers/iommu/hisilicon/iommu.c b/drivers/iommu/hisilicon/iommu.c index 5581b8a6ae98..2a50d7bed835 100644 --- a/drivers/iommu/hisilicon/iommu.c +++ b/drivers/iommu/hisilicon/iommu.c @@ -506,12 +506,13 @@ static int ummu_dev_disable_feat(struct device *dev, static int ummu_def_domain_type(struct device *dev) { +#ifdef CONFIG_UB_UMMU_BYPASSDEV int ret; ret = ummu_bypass_dev_domain_type(dev); if (ret) return ret; - +#endif if (iommu_default_passthrough()) return IOMMU_DOMAIN_IDENTITY; return 0; diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index cd859bee39ec..fd66ca5fa05f 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -1287,6 +1287,7 @@ static int logic_ummu_def_domain_type(struct device *dev) return ops->def_domain_type(dev); } +#ifdef CONFIG_UB_UMMU_SVA static void logic_ummu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain) { @@ -1322,6 +1323,7 @@ static void logic_ummu_remove_dev_pasid(struct device *dev, ioasid_t pasid, /* release the tid */ ummu_core_free_tid(&logic_ummu.core_dev, tid); } +#endif /* depend on MPAM static int logic_ummu_set_group_qos_params(struct iommu_group *group, diff --git a/drivers/iommu/hisilicon/sva.h b/drivers/iommu/hisilicon/sva.h index 2f6a9a383444..e91fa1e11920 100644 --- a/drivers/iommu/hisilicon/sva.h +++ b/drivers/iommu/hisilicon/sva.h @@ -44,6 +44,11 @@ static inline int ummu_master_enable_sva(struct ummu_master *master, return -ENODEV; } +static inline bool ummu_master_sva_enabled(struct ummu_master *master) +{ + return false; +} + static inline int ummu_master_disable_sva(struct ummu_master *master, enum iommu_dev_features feat) { diff --git a/include/linux/ummu_core.h b/include/linux/ummu_core.h index 29d0952e35e7..843a6ba336d0 100644 --- a/include/linux/ummu_core.h +++ b/include/linux/ummu_core.h @@ -423,8 +423,6 @@ enum ummu_device_config_type { #if IS_ENABLED(CONFIG_UB_UMMU_CORE_DRIVER) extern const struct tid_ops *ummu_core_tid_ops[TID_OPS_MAX]; -#else -static const struct tid_ops *ummu_core_tid_ops[TID_OPS_MAX]; #endif /* CONFIG_UB_UMMU_CORE_DRIVER */ static inline struct ummu_core_device *to_ummu_core(struct iommu_device *iommu) -- Gitee From 9dc27aff6c074938b321f01b3c01cd17508fdfdb Mon Sep 17 00:00:00 2001 From: Liming-An Date: Thu, 27 Nov 2025 21:50:11 +0800 Subject: [PATCH 105/126] iommu/ummu: Fix UMMU documentation error commit d6b2477ee785bf4f05cd9e6c0076aa21dde2bd1e openEuler This patch fix ummu documentation index error Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- Documentation/ub/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ub/index.rst b/Documentation/ub/index.rst index 22276b791363..ee12df0aa405 100644 --- a/Documentation/ub/index.rst +++ b/Documentation/ub/index.rst @@ -14,6 +14,6 @@ UnifiedBus Subsystem ubase/index ubfi/index ubus/index - ummu-core + ummu/index cdma/index urma/udma/index -- Gitee From 0e128f64bf4d54397162a5ed6dc6add306243ff2 Mon Sep 17 00:00:00 2001 From: Jianquan Lin Date: Tue, 23 Dec 2025 21:47:29 +0800 Subject: [PATCH 106/126] ub:ubus delete undefined class code commit 387bd97726ce3e73bad28ad8539d06d75b976efc openEuler Delete undefined class code macro. Fixes: e42bc0097589 ("ub:ubus: Support for ub bus driver framework") Signed-off-by: Junlong Zheng Signed-off-by: Jianquan Lin Signed-off-by: zhao-lichang <943677312@qq.com> --- include/ub/ubus/ubus_ids.h | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/include/ub/ubus/ubus_ids.h b/include/ub/ubus/ubus_ids.h index 5e5158ba5527..62ead65e5061 100644 --- a/include/ub/ubus/ubus_ids.h +++ b/include/ub/ubus/ubus_ids.h @@ -19,29 +19,16 @@ #define UB_BASE_CODE_STORAGE 0x01 #define UB_CLASS_STORAGE_LPC 0x0001 #define UB_CLASS_STORAGE_LBC 0x0101 -#define UB_CLASS_STORAGE_RAID 0x0201 #define UB_BASE_CODE_NETWORK 0x02 #define UB_CLASS_NETWORK_UB 0x0002 #define UB_CLASS_NETWORK_ETH 0x0102 -#define UB_BASE_CODE_DISPLAY 0x03 +#define UB_BASE_CODE_SWITCH 0x03 +#define UB_CLASS_SWITCH_UB 0x0003 -#define UB_BASE_CODE_SWITCH 0x04 -#define UB_CLASS_SWITCH_UB 0x0004 - -#define UB_BASE_CODE_VIRTIO 0x05 -#define UB_CLASS_LEGACY_VIRTIO_NETWORK 0x0005 -#define UB_CLASS_LEGACY_VIRTIO_BLOCK 0x0105 -#define UB_CLASS_LEGACY_VIRTIO_SCSI 0x0205 -#define UB_CLASS_LEGACY_VIRTIO_GRAPHIC 0x0305 -#define UB_CLASS_LEGACY_VIRTIO_SOCKET 0x0405 -#define UB_CLASS_LEGACY_VIRTIO_FS 0x0505 - -#define UB_BASE_CODE_VIRTUAL 0x06 - -#define UB_BASE_CODE_NPU 0x07 -#define UB_CLASS_NPU_UB 0x0007 +#define UB_BASE_CODE_NPU 0x04 +#define UB_CLASS_NPU_UB 0x0004 #define UB_BASE_CODE_UNKNOWN 0xFF #define UB_CLASS_UNKNOWN 0x00FF -- Gitee From a117193bef8f23b8fad91eeddf5bd2a11585fd6d Mon Sep 17 00:00:00 2001 From: Liming An Date: Tue, 9 Dec 2025 19:18:04 +0800 Subject: [PATCH 107/126] iommu/ummu: Delete unnecessary commands commit 8bc95ffa3088636af7537724f7f1b9e910e8b096 openEuler 1. The chip does not support the CMD_CFGI_TECTS_PIDM command on N6 and N7. 2. QEMU does not use the CMD_CFGI_TECTS_PIDM command. Fixes: 7de87cb06076 ("iommu/ummu: Support UMMU nested mode") Signed-off-by: Yanlong Zhu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/nested.c | 1 - drivers/iommu/hisilicon/queue.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 0eed1d9f8d25..23a233cbfa66 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -184,7 +184,6 @@ static int ummu_fix_user_cmd(struct ummu_device *ummu, case CMD_CFGI_TECT_RANGE: case CMD_CFGI_TCT: case CMD_CFGI_TCT_ALL: - case CMD_CFGI_TECTS_PIDM: cmd[2] &= ~CMD_CFGI_2_TECTE_TAG; cmd[2] |= FIELD_PREP(CMD_CFGI_2_TECTE_TAG, tecte_tag); break; diff --git a/drivers/iommu/hisilicon/queue.h b/drivers/iommu/hisilicon/queue.h index a1e3f28928ac..df82d58fac0f 100644 --- a/drivers/iommu/hisilicon/queue.h +++ b/drivers/iommu/hisilicon/queue.h @@ -204,7 +204,6 @@ struct ummu_mcmdq_ent { #define CMD_CFGI_TECT_RANGE 0x09 #define CMD_CFGI_TCT 0x0A #define CMD_CFGI_TCT_ALL 0x0B -#define CMD_CFGI_TECTS_PIDM 0x0C struct { bool leaf; u32 tid; -- Gitee From b28e9dcf12d4040c78f2682df0254518683d9573 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 08:26:21 +0800 Subject: [PATCH 108/126] iommu/ummu: UMMU doesn't send tect sync after delete eid commit b451b1e0768d121cb16cd0ceae754cfffd4e0ace openEuler Fixes: 322b574b8b2a ("iommu/ummu: Add tct/tect ops for configuration table") Signed-off-by: Sihui Jiang Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/cfg_table.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/hisilicon/cfg_table.c b/drivers/iommu/hisilicon/cfg_table.c index 7c7a486a7bdd..31f9a81317c3 100644 --- a/drivers/iommu/hisilicon/cfg_table.c +++ b/drivers/iommu/hisilicon/cfg_table.c @@ -1174,7 +1174,12 @@ void ummu_build_s2_domain_tecte(struct ummu_domain *u_domain, static bool check_tecte_can_set(const struct ummu_tecte_data *tecte, const struct ummu_tecte_data *src) { - u32 st_mode = FIELD_GET(TECT_ENT0_ST_MODE, le64_to_cpu(tecte->data[0])); + u32 st_mode; + + if (!src->data[0]) + return true; + + st_mode = FIELD_GET(TECT_ENT0_ST_MODE, le64_to_cpu(tecte->data[0])); switch (st_mode) { case TECT_ENT0_ST_MODE_ABORT: @@ -1298,8 +1303,11 @@ void ummu_del_eid(struct ummu_core_device *core_dev, guid_t *guid, eid_t eid, en } ummu_device_delete_kvtbl(ummu, meta->tecte_tag, eid, kv_index); - if (kref_read(&meta->ref) == 1) + /* 2 indicates that only the last EID remains. */ + if (kref_read(&meta->ref) == 2) { ummu_device_write_tecte(ummu, meta->tecte_tag, &ummu_clear_tecte); + meta->valid = false; + } os_meta_del_eid(meta, eid); } -- Gitee From 721ccdd51a0187ad89a676a08b8a0d3985494ff7 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 08:45:50 +0800 Subject: [PATCH 109/126] iommu/ummu: Fix compilation option configuration about CONFIG_ACPI commit 3a6d8249afbb244a4ce5308f9d2ea21f2ef2c1c9 openEuler Fixes: 0db2fc397b9d ("iommu/ummu: Support UMMU device") Signed-off-by: Jie Wang Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/perm_table.c | 16 +--------------- drivers/iommu/hisilicon/sva.h | 2 +- drivers/iommu/hisilicon/ummu_main.c | 10 ++++++++-- drivers/perf/hisilicon/ummu_pmu.c | 10 ++++++++-- drivers/ub/ubfi/ummu.c | 2 +- 5 files changed, 19 insertions(+), 21 deletions(-) diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index 3827fa7b017b..cc90d1e32364 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -75,20 +75,6 @@ static const u32 g_mapt_range_bits[MAPT_MAX_LVL_INDEX + 1][2] = { { 47, 39 }, (GET_BITS_MASK(g_mapt_range_bits[level][0] - \ g_mapt_range_bits[level][1] + 1))) -#define GET_LEVEL_INDEX_RANGE(base, limit, lvl, base_index, limit_index, \ - cross_level) \ - do { \ - (base_index) = GET_LEVEL_BLOCK_INDEX(base, lvl); \ - if ((limit) >> (g_mapt_range_bits[lvl][0] + 1) == \ - (base) >> (g_mapt_range_bits[lvl][0] + 1)) { \ - (limit_index) = GET_LEVEL_BLOCK_INDEX(limit, lvl); \ - cross_level = false; \ - } else { \ - (limit_index) = MAPT_MAX_ENTRY_INDEX - 1; \ - cross_level = true; \ - } \ - } while (0) - #define ENTRY_ADDR_LOW(addr) FIELD_GET(GENMASK(31, 0), (addr)) #define ENTRY_ADDR_HIGH(addr) FIELD_GET(GENMASK(47, 32), (addr)) @@ -945,7 +931,7 @@ static int ummu_table_clear_node_by_level(struct ummu_data_info *data_info, static int ummu_table_clear_head_node(struct ummu_data_info *data_info, u32 level, struct ummu_mapt_table_node *pre_node, struct ummu_mapt_table_node *cur_node, u64 node_base, - u64 node_limit) + u64 node_limit) { u16 loop_cnt, max_loop = MAPT_MAX_ENTRY_INDEX << MAPT_MAX_LVL_INDEX; u64 rest_node_base, cur_base, cur_limit; diff --git a/drivers/iommu/hisilicon/sva.h b/drivers/iommu/hisilicon/sva.h index e91fa1e11920..4b3189fcbcff 100644 --- a/drivers/iommu/hisilicon/sva.h +++ b/drivers/iommu/hisilicon/sva.h @@ -55,7 +55,7 @@ static inline int ummu_master_disable_sva(struct ummu_master *master, return -ENODEV; } -static void ummu_iopf_queue_free(struct ummu_device *ummu) +static inline void ummu_iopf_queue_free(struct ummu_device *ummu) { } diff --git a/drivers/iommu/hisilicon/ummu_main.c b/drivers/iommu/hisilicon/ummu_main.c index 53adf3d2fdcf..61e8f52a6a19 100644 --- a/drivers/iommu/hisilicon/ummu_main.c +++ b/drivers/iommu/hisilicon/ummu_main.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include "logic_ummu/logic_ummu.h" #include "ummu_impl.h" @@ -766,24 +768,28 @@ static void ummu_device_shutdown(struct platform_device *pdev) ummu_device_disable(ummu); } +#ifdef CONFIG_OF static const struct of_device_id hisi_ummu_of_match[] = { { .compatible = "ub,ummu", }, { } }; MODULE_DEVICE_TABLE(of, hisi_ummu_of_match); +#endif +#ifdef CONFIG_ACPI static const struct acpi_device_id hisi_ummu_acpi_match[] = { { "HISI0551", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_ummu_acpi_match); +#endif struct platform_driver ummu_driver = { .driver = { .name = UMMU_DRV_NAME, .suppress_bind_attrs = true, - .of_match_table = hisi_ummu_of_match, - .acpi_match_table = hisi_ummu_acpi_match, + .of_match_table = of_match_ptr(hisi_ummu_of_match), + .acpi_match_table = ACPI_PTR(hisi_ummu_acpi_match), }, .probe = ummu_device_probe, .remove = ummu_device_remove, diff --git a/drivers/perf/hisilicon/ummu_pmu.c b/drivers/perf/hisilicon/ummu_pmu.c index d145bcee10fa..69b79bf1efce 100644 --- a/drivers/perf/hisilicon/ummu_pmu.c +++ b/drivers/perf/hisilicon/ummu_pmu.c @@ -5,6 +5,8 @@ * Monitor Counter Groups (PMCG) associated with an UMMU node to monitor that node. */ +#include +#include #include #include #include @@ -1029,17 +1031,21 @@ static void ummu_pmu_shutdown(struct platform_device *pdev) ummu_pmu_disable(&ummu_pmu->pmu); } +#ifdef CONFIG_OF static const struct of_device_id hisi_ummu_pmu_of_match[] = { { .compatible = "ub,ummu_pmu", }, { } }; MODULE_DEVICE_TABLE(of, hisi_ummu_pmu_of_match); +#endif +#ifdef CONFIG_ACPI static const struct acpi_device_id hisi_ummu_pmu_acpi_match[] = { {"HISI0571", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_ummu_pmu_acpi_match); +#endif static ssize_t partid_store(struct device *kobj, struct device_attribute *attr, const char *buf, size_t count) @@ -1143,8 +1149,8 @@ static struct platform_driver ummu_pmu_driver = { .driver = { .name = UMMU_PMU_DRV_NAME, .suppress_bind_attrs = true, - .of_match_table = hisi_ummu_pmu_of_match, - .acpi_match_table = hisi_ummu_pmu_acpi_match, + .of_match_table = of_match_ptr(hisi_ummu_pmu_of_match), + .acpi_match_table = ACPI_PTR(hisi_ummu_pmu_acpi_match), .dev_groups = ummu_pmu_groups }, .probe = ummu_pmu_probe, diff --git a/drivers/ub/ubfi/ummu.c b/drivers/ub/ubfi/ummu.c index 93f6dcbf8aa6..a1f6dd61c51c 100644 --- a/drivers/ub/ubfi/ummu.c +++ b/drivers/ub/ubfi/ummu.c @@ -173,6 +173,7 @@ static int ummu_config_update(struct platform_device *pdev, return 0; } +#ifdef CONFIG_ACPI static acpi_status acpi_processor_ummu(acpi_handle handle, u32 lvl, void *context, void **rv) { @@ -239,7 +240,6 @@ static acpi_status acpi_processor_ummu(acpi_handle handle, u32 lvl, return status; } -#ifdef CONFIG_ACPI static int acpi_update_ummu_config(struct ummu_node *ummu_node, u32 index) { acpi_status status; -- Gitee From 163144e73b6d3d56da1232e7cbc3982d48745907 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 10:33:13 +0800 Subject: [PATCH 110/126] iommu/ummu: NO plbi for grant, config permq need dma_wmb commit dc81fdc0cc1c6e5ee0c7f7eb53f66bcafafac115 openEuler Fixes: f8bb769aa5fd ("iommu/ummu: Add UMMU permission queue") Signed-off-by: Lizhi He Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/perm_queue.c | 1 + drivers/iommu/hisilicon/perm_table.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/iommu/hisilicon/perm_queue.c b/drivers/iommu/hisilicon/perm_queue.c index ae85563bce78..a9eed282ebfc 100644 --- a/drivers/iommu/hisilicon/perm_queue.c +++ b/drivers/iommu/hisilicon/perm_queue.c @@ -335,6 +335,7 @@ int ummu_domain_config_permq(struct ummu_domain *domain) domain->qid = qid; ummu_init_permq_ctxtbl_ent(domain, permq); + dma_wmb(); ummu_init_permq_ctrltbl_ent(ummu->ucmdq_ctrl_page, qid); return 0; diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index cc90d1e32364..f984ccd5a9e8 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -1239,6 +1239,11 @@ int ummu_perm_grant(struct iommu_domain *domain, void *va, size_t size, ret = ummu_update_info(data_info.op, mapt_info, &data_info); plb_gather->va = (void *)data_info.data_base; + if (data_info.op == UMMU_GRANT) + plb_gather->size = 0; + else + plb_gather->size = data_info.data_size; + plb_gather->size = data_info.data_size; data_info.tokenval = 0; return ret; -- Gitee From b71af145028fccf3044f03b8ddb778c8205e82af Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 10:49:24 +0800 Subject: [PATCH 111/126] iommu/ummu: Fix VM multi-instance problem commit 005bd47130be6f04dc315eb4322531714d1c64bf openEuler In the scenario of multiple virtual machine instances, there is an issue with traversing UMMU instances. Fixes: 7876e979bbdb ("iommu/ummu: Implement domain and core ops in logic UMMU framework") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/iommu.c | 11 +++-------- drivers/iommu/hisilicon/logic_ummu/logic_ummu.c | 8 +++----- drivers/iommu/hisilicon/ummu.h | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/hisilicon/iommu.c b/drivers/iommu/hisilicon/iommu.c index 2a50d7bed835..b1f694ad10c3 100644 --- a/drivers/iommu/hisilicon/iommu.c +++ b/drivers/iommu/hisilicon/iommu.c @@ -676,7 +676,7 @@ static void ummu_cfg_sync(struct ummu_base_domain *base_domain) else u_domain = to_ummu_domain(&base_domain->domain); - ummu = core_to_ummu_device(u_domain->base_domain.core_dev); + ummu = core_to_ummu_device(base_domain->core_dev); tag = u_domain->cfgs.tecte_tag; tid = u_domain->base_domain.tid; @@ -713,18 +713,13 @@ static int ummu_sync_dom_cfg(struct ummu_base_domain *src, dst_domain->cfgs.tecte_tag = src_domain->cfgs.tecte_tag; dst_domain->cfgs.stage = src_domain->cfgs.stage; break; - case SYNC_NESTED_DOM_MUTI_CFG: - src_domain = to_nested_domain(&src->domain)->s2_parent; - dst_domain = to_nested_domain(&dst->domain)->s2_parent; - dst_domain->base_domain.tid = src_domain->base_domain.tid; - dst_domain->cfgs.tecte_tag = src_domain->cfgs.tecte_tag; - dst_domain->cfgs.stage = src_domain->cfgs.stage; - break; case SYNC_CLEAR_DOM_ALL_CFG: dst_domain = to_ummu_domain(&dst->domain); memset(&dst_domain->cfgs, 0, sizeof(dst_domain->cfgs)); dst_domain->base_domain.tid = UMMU_INVALID_TID; break; + case SYNC_TYPE_NONE: + break; default: return -EINVAL; } diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index fd66ca5fa05f..b5cd7999e9ab 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -192,11 +192,11 @@ static int logic_ummu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct logic_ummu_domain *logic_domain = iommu_to_logic_domain(domain); + const struct ummu_device_helper *helper = get_agent_helper(); const struct ummu_core_ops *core_ops = get_agent_core_ops(); struct ummu_base_domain *ummu_base_domain, *agent_domain; - const struct ummu_device_helper *helper = get_agent_helper(); + enum ummu_dom_cfg_sync_type sync_type = SYNC_TYPE_NONE; const struct iommu_domain_ops *ops; - enum ummu_dom_cfg_sync_type sync_type; int ret; agent_domain = logic_domain->agent_domain; @@ -217,9 +217,7 @@ static int logic_ummu_attach_dev(struct iommu_domain *domain, } /* the domain attributes might be changed, sync to logic domain */ logic_domain_update_attr(logic_domain); - if (domain->type == IOMMU_DOMAIN_NESTED) - sync_type = SYNC_NESTED_DOM_MUTI_CFG; - else + if (domain->type != IOMMU_DOMAIN_NESTED) sync_type = SYNC_DOM_MUTI_CFG; list_for_each_entry(ummu_base_domain, &logic_domain->base_domain.list, diff --git a/drivers/iommu/hisilicon/ummu.h b/drivers/iommu/hisilicon/ummu.h index 95ff2c927742..a378e39ce93b 100644 --- a/drivers/iommu/hisilicon/ummu.h +++ b/drivers/iommu/hisilicon/ummu.h @@ -240,9 +240,9 @@ struct ummu_hash_table_cfg { /* ummu device inner helper functions */ enum ummu_dom_cfg_sync_type { + SYNC_TYPE_NONE, SYNC_DOM_ALL_CFG, SYNC_DOM_MUTI_CFG, - SYNC_NESTED_DOM_MUTI_CFG, SYNC_CLEAR_DOM_ALL_CFG, }; -- Gitee From 1d8648ae045baa0d16311460aaa8214b3b3de7f5 Mon Sep 17 00:00:00 2001 From: Jiashun Wang Date: Mon, 1 Dec 2025 14:38:50 +0800 Subject: [PATCH 112/126] iommu/ummu: Optimize chip generational compatibility feature commit d0e9af046f4d46c4941d2a3a02b0addbd22d02ef openEuler To accommodate the functionalities of different generations of chips, some redundant fields have been designed in the struct ummu_device. These additional features help reduce the bloated data members in the struct ummu_device. Fixes: 0db2fc397b9d ("iommu/ummu: Support UMMU device") Signed-off-by: Jiashun Wang Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/cfg_table.c | 2 +- drivers/iommu/hisilicon/flush.c | 20 +++++++------ drivers/iommu/hisilicon/nested.c | 4 +-- drivers/iommu/hisilicon/perm_queue.c | 9 +++--- drivers/iommu/hisilicon/perm_queue.h | 3 ++ drivers/iommu/hisilicon/perm_table.c | 2 +- drivers/iommu/hisilicon/queue.c | 3 +- drivers/iommu/hisilicon/sva.c | 2 +- drivers/iommu/hisilicon/ummu.h | 11 ++----- drivers/iommu/hisilicon/ummu_main.c | 45 +++++++++++++++++----------- 10 files changed, 55 insertions(+), 46 deletions(-) diff --git a/drivers/iommu/hisilicon/cfg_table.c b/drivers/iommu/hisilicon/cfg_table.c index 31f9a81317c3..fe208646a8f3 100644 --- a/drivers/iommu/hisilicon/cfg_table.c +++ b/drivers/iommu/hisilicon/cfg_table.c @@ -1125,7 +1125,7 @@ static void ummu_device_make_default_tecte(struct ummu_device *ummu, TECT_ENT0_TCR_EL2 : TECT_ENT0_TCR_NSEL1; target->data[0] = cpu_to_le64( TECT_ENT0_V | FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | - (ummu->cap.support_mapt ? TECT_ENT0_MAPT_EN : 0) | + ((ummu->cap.features & UMMU_FEAT_MAPT) ? TECT_ENT0_MAPT_EN : 0) | FIELD_PREP(TECT_ENT0_ST_MODE, TECT_ENT0_ST_MODE_S1) | FIELD_PREP(TECT_ENT0_PRIV_SEL, TECT_ENT0_PRIV_SEL_PRIV)); diff --git a/drivers/iommu/hisilicon/flush.c b/drivers/iommu/hisilicon/flush.c index ea2aa23100d3..064f964b51be 100644 --- a/drivers/iommu/hisilicon/flush.c +++ b/drivers/iommu/hisilicon/flush.c @@ -328,17 +328,19 @@ void ummu_sync_tct(struct ummu_device *ummu, u32 tecte_tag, u32 tid, .deid_0 = tecte_tag, }, }; - struct ummu_mcmdq_ent cmd_plbi_all = { - .opcode = CMD_PLBI_OS_EIDTID, - .plbi = { - .tid = tid, - .tecte_tag = tecte_tag, - }, - }; - trace_ummu_sync_tct(dev_name(ummu->dev), tecte_tag, tid, leaf); - if (!ummu->cap.prod_ver) + if (ummu->cap.options & UMMU_OPT_SYNC_WITH_PLBI) { + struct ummu_mcmdq_ent cmd_plbi_all = { + .opcode = CMD_PLBI_OS_EIDTID, + .plbi = { + .tid = tid, + .tecte_tag = tecte_tag, + }, + }; ummu_mcmdq_issue_cmd(ummu, &cmd_plbi_all); + } + + trace_ummu_sync_tct(dev_name(ummu->dev), tecte_tag, tid, leaf); ummu_mcmdq_issue_cmd_with_sync(ummu, &cmd_cfgi_tct); } diff --git a/drivers/iommu/hisilicon/nested.c b/drivers/iommu/hisilicon/nested.c index 23a233cbfa66..f7804811a5ed 100644 --- a/drivers/iommu/hisilicon/nested.c +++ b/drivers/iommu/hisilicon/nested.c @@ -31,8 +31,8 @@ static void ummu_build_nested_domain_tct(struct ummu_domain *u_domain, tcr_sel = (ummu->cap.features & UMMU_FEAT_E2H) ? TECT_ENT0_TCR_EL2 : TECT_ENT0_TCR_NSEL1; target->data[0] |= cpu_to_le64( - FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | - (ummu->cap.support_mapt ? TECT_ENT0_MAPT_EN : 0)); + FIELD_PREP(TECT_ENT0_TCRC_SEL, tcr_sel) | + ((ummu->cap.features & UMMU_FEAT_MAPT) ? TECT_ENT0_MAPT_EN : 0)); } static void ummu_build_nested_domain_tecte( diff --git a/drivers/iommu/hisilicon/perm_queue.c b/drivers/iommu/hisilicon/perm_queue.c index a9eed282ebfc..2e80dec5eaf9 100644 --- a/drivers/iommu/hisilicon/perm_queue.c +++ b/drivers/iommu/hisilicon/perm_queue.c @@ -12,10 +12,7 @@ #include "regs.h" #include "perm_queue.h" -#define PCMDQ_ENT_BYTES 16U -#define PCPLQ_ENT_BYTES 4U #define PERMQ_CTXTBL_BYTES 64U - #define PERMQ_CTXTBL_STATUS GENMASK(1, 0) #define PERMQ_CTXTBL_RESET 0x0 #define PERMQ_CTXTBL_READY 0x1 @@ -44,8 +41,10 @@ void ummu_device_uninit_permqs(struct ummu_device *ummu) { - if (ummu->cap.support_mapt) - xa_destroy(&ummu->permq_ctx_cfg.permq_xa); + if (!(ummu->cap.features & UMMU_FEAT_MAPT)) + return; + + xa_destroy(&ummu->permq_ctx_cfg.permq_xa); mutex_destroy(&ummu->permq_ctx_cfg.permq_rel_mutex); } diff --git a/drivers/iommu/hisilicon/perm_queue.h b/drivers/iommu/hisilicon/perm_queue.h index d3e8c580e5b6..6f9ac919af29 100644 --- a/drivers/iommu/hisilicon/perm_queue.h +++ b/drivers/iommu/hisilicon/perm_queue.h @@ -10,6 +10,9 @@ #define UMMU_INVALID_QID ((u32)-1) +#define PCMDQ_ENT_BYTES 16U +#define PCPLQ_ENT_BYTES 4U + #define PQ_WRAP(idx, size) ((idx) & (size)) #define PQ_IDX(idx, size) ((idx) & ((size) - 1)) diff --git a/drivers/iommu/hisilicon/perm_table.c b/drivers/iommu/hisilicon/perm_table.c index f984ccd5a9e8..ec14575b3cba 100644 --- a/drivers/iommu/hisilicon/perm_table.c +++ b/drivers/iommu/hisilicon/perm_table.c @@ -203,7 +203,7 @@ static int ummu_alloc_mapt_mem_for_table(struct ummu_domain *ummu_domain, goto err_out; } - if (ummu->cap.prod_ver == NO_PROD_ID) { + if (ummu->cap.options & UMMU_OPT_CHK_MAPT_CONTINUITY) { ret = ummu_device_check_pa_continuity(ummu, virt_to_phys(alloc_ptr), PAGE_ORDER_TO_MAPT_ORDER(blk_para->block_size_order), diff --git a/drivers/iommu/hisilicon/queue.c b/drivers/iommu/hisilicon/queue.c index 1f23c54734c6..7d3640e6e9f3 100644 --- a/drivers/iommu/hisilicon/queue.c +++ b/drivers/iommu/hisilicon/queue.c @@ -251,7 +251,8 @@ static int ummu_mcmdq_init(struct ummu_device *ummu) int cpu, ret; ummu->nr_mcmdq = 1UL << ummu->cap.mcmdq_log2num; - ummu->nr_mcmdq -= 1; + if (ummu->cap.options & UMMU_OPT_MCMDQ_DECREASE) + ummu->nr_mcmdq -= 1; shift = order_base_2(num_possible_cpus() / ummu->nr_mcmdq); ummu->mcmdq = devm_alloc_percpu(ummu->dev, struct ummu_mcmdq *); diff --git a/drivers/iommu/hisilicon/sva.c b/drivers/iommu/hisilicon/sva.c index f9a63544ace0..192771e2304d 100644 --- a/drivers/iommu/hisilicon/sva.c +++ b/drivers/iommu/hisilicon/sva.c @@ -313,7 +313,7 @@ static int ummu_sva_collect_domain_cfg(struct ummu_domain *domain, ioasid_t id) domain->cfgs.sva_mode = UMMU_MODE_SVA; } - if (ummu->cap.support_mapt && + if ((ummu->cap.features & UMMU_FEAT_MAPT) && domain->cfgs.sva_mode != UMMU_MODE_SVA_DISABLE_PTB) { domain->cfgs.s1_cfg.io_pt_cfg.mode = mode; if (!ksva) { diff --git a/drivers/iommu/hisilicon/ummu.h b/drivers/iommu/hisilicon/ummu.h index a378e39ce93b..ea391498c97b 100644 --- a/drivers/iommu/hisilicon/ummu.h +++ b/drivers/iommu/hisilicon/ummu.h @@ -31,11 +31,6 @@ struct ummu_l1_tct_desc { phys_addr_t l2ptr_phys; }; -enum ummu_ver { - NO_PROD_ID = 0, - MAX_VER, -}; - enum ummu_device_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, @@ -172,7 +167,6 @@ struct ummu_capability { #define UMMU_FEAT_TOKEN_CHK BIT(26) #define UMMU_FEAT_PERMQ BIT(27) #define UMMU_FEAT_NESTING BIT(28) - u32 features; u32 deid_bits; u32 tid_bits; @@ -183,6 +177,9 @@ struct ummu_capability { #define UMMU_OPT_MSIPOLL (1UL << 0) #define UMMU_OPT_DOUBLE_PLBI (1UL << 1) #define UMMU_OPT_KCMD_PLBI (1UL << 2) +#define UMMU_OPT_CHK_MAPT_CONTINUITY (1UL << 3) +#define UMMU_OPT_MCMDQ_DECREASE (1UL << 4) +#define UMMU_OPT_SYNC_WITH_PLBI (1UL << 5) u32 options; #define UMMU_MAX_ASIDS (1UL << 16) @@ -190,7 +187,6 @@ struct ummu_capability { #define UMMU_MAX_VMIDS (1UL << 16) unsigned int vmid_bits; - bool support_mapt; u32 mcmdq_log2num; u32 mcmdq_log2size; u32 evtq_log2num; @@ -202,7 +198,6 @@ struct ummu_capability { } permq_ent_num; u32 mtm_gp_max; u32 mtm_id_max; - u16 prod_ver; }; struct ummu_permq_addr { diff --git a/drivers/iommu/hisilicon/ummu_main.c b/drivers/iommu/hisilicon/ummu_main.c index 61e8f52a6a19..14d6e08b2c2c 100644 --- a/drivers/iommu/hisilicon/ummu_main.c +++ b/drivers/iommu/hisilicon/ummu_main.c @@ -30,7 +30,7 @@ #define UMMU_DRV_NAME "ummu" #define HISI_VENDOR_ID 0xCC08 -static bool ummu_special_identify; +static u16 ummu_chip_identifier; int ummu_write_reg_sync(struct ummu_device *ummu, u32 val, u32 reg_off, u32 ack_off) @@ -125,7 +125,7 @@ static int ummu_init_structures(struct ummu_device *ummu) if (ret) goto resource_release; - if (ummu->cap.support_mapt) { + if (ummu->cap.features & UMMU_FEAT_MAPT) { /* ctrl page is private for every ummu hardware */ ummu_device_init_permq_ctrl_page(ummu); /* ctx table is common for every ummu hardware */ @@ -141,21 +141,26 @@ static int ummu_init_structures(struct ummu_device *ummu) return ret; } -static void ummu_device_hw_probe_ver(struct ummu_device *ummu) +static void ummu_device_hw_probe_iidr(struct ummu_device *ummu) { u32 reg = readl_relaxed(ummu->base + UMMU_IIDR); - ummu->cap.prod_ver = (u16)FIELD_GET(IIDR_PROD_ID, reg); /* - * On the hisi chip with IIDR_PROD_ID set to 0, - * ummu enables special_identify to perform some - * specialized operations. + * In the 1st generation On the hisi chip, IIDR_PROD_ID is set to 0, + * ummu enables chip_identifier to perform some specialized operations. */ - if (ummu_special_identify && !ummu->cap.prod_ver) { + if ((ummu_chip_identifier == HISI_VENDOR_ID) && + !FIELD_GET(IIDR_PROD_ID, reg)) { ummu->cap.options |= UMMU_OPT_DOUBLE_PLBI; ummu->cap.options |= UMMU_OPT_KCMD_PLBI; + ummu->cap.options |= UMMU_OPT_CHK_MAPT_CONTINUITY; + ummu->cap.options |= UMMU_OPT_MCMDQ_DECREASE; + ummu->cap.options |= UMMU_OPT_SYNC_WITH_PLBI; ummu->cap.features &= ~UMMU_FEAT_STALLS; } + + dev_notice(ummu->dev, "features 0x%08x, options 0x%08x.\n", + ummu->cap.features, ummu->cap.options); } static void ummu_device_hw_probe_cap0(struct ummu_device *ummu) @@ -178,7 +183,8 @@ static void ummu_device_hw_probe_cap0(struct ummu_device *ummu) ubrt_pasids = ummu->core_dev.iommu.max_pasids; cap_pasids = 1 << ummu->cap.tid_bits; if (ubrt_pasids > cap_pasids) - pr_warn("ubrt max_pasids[%u] beyond capacity.\n", ubrt_pasids); + dev_warn(ummu->dev, "ubrt max_pasids[%u] beyond capacity.\n", + ubrt_pasids); pasids = min(cap_pasids, (1UL << UB_MAX_TID_BITS)); ummu->core_dev.iommu.max_pasids = min(ubrt_pasids, pasids); /* TECTE_TAG size */ @@ -434,10 +440,14 @@ static int ummu_device_hw_probe_cap4(struct ummu_device *ummu) int hw_permq_ent; hw_permq_ent = 1 << FIELD_GET(CAP4_UCMDQ_LOG2SIZE, reg); - ummu->cap.permq_ent_num.cmdq_num = hw_permq_ent; + ummu->cap.permq_ent_num.cmdq_num = + min_t(int, round_up(PAGE_SIZE / PCMDQ_ENT_BYTES, PCMDQ_ENT_BYTES), + hw_permq_ent); hw_permq_ent = 1 << FIELD_GET(CAP4_UCPLQ_LOG2SIZE, reg); - ummu->cap.permq_ent_num.cplq_num = hw_permq_ent; + ummu->cap.permq_ent_num.cplq_num = + min_t(int, round_up(PAGE_SIZE / PCPLQ_ENT_BYTES, PCPLQ_ENT_BYTES), + hw_permq_ent); if (ummu->impl_ops && ummu->impl_ops->hw_probe) return ummu->impl_ops->hw_probe(ummu); @@ -452,7 +462,7 @@ static void ummu_device_hw_probe_cap5(struct ummu_device *ummu) ummu->cap.features |= UMMU_FEAT_RANGE_PLBI; if (reg & CAP5_MAPT_SUPPORT) - ummu->cap.support_mapt = true; + ummu->cap.features |= UMMU_FEAT_MAPT; if (reg & CAP5_PT_GRAN4K_BIT) ummu->cap.ptsize_bitmap |= SZ_4K; @@ -472,8 +482,8 @@ static void ummu_device_hw_probe_cap5(struct ummu_device *ummu) if (ummu_sva_supported(ummu)) ummu->cap.features |= UMMU_FEAT_SVA; - dev_info(ummu->dev, "ias = %u-bit, oas = %u-bit, features = 0x%08x.\n", - ummu->cap.ias, ummu->cap.oas, ummu->cap.features); + dev_info(ummu->dev, "ias %u-bit, oas %u-bit.\n", + ummu->cap.ias, ummu->cap.oas); } static void ummu_device_hw_probe_cap6(struct ummu_device *ummu) @@ -510,7 +520,7 @@ static int ummu_device_hw_init(struct ummu_device *ummu) ummu_device_hw_probe_cap5(ummu); ummu_device_hw_probe_cap6(ummu); - ummu_device_hw_probe_ver(ummu); + ummu_device_hw_probe_iidr(ummu); return 0; } @@ -603,7 +613,7 @@ static int ummu_device_reset(struct ummu_device *ummu) if (ret) return ret; - if (ummu->cap.support_mapt) { + if (ummu->cap.features & UMMU_FEAT_MAPT) { ummu_device_set_permq_ctxtbl(ummu); ret = ummu_device_mapt_enable(ummu); if (ret) @@ -646,8 +656,7 @@ static int ummu_device_ubrt_probe(struct ummu_device *ummu) } node = (struct ummu_node *)fw->ubrt_node; - if (node->vendor_id == HISI_VENDOR_ID) - ummu_special_identify = true; + ummu_chip_identifier = node->vendor_id; ummu->core_dev.iommu.min_pasids = node->min_tid; ummu->core_dev.iommu.max_pasids = node->max_tid; -- Gitee From 5cc0cb2cf76fd943b62982db3961c37027aae5f6 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:24:15 +0800 Subject: [PATCH 113/126] iommu/ummu: Fixing the issue of uninitialized resources in logic_ummu_viommu commit 30d93a98226eee1c07a8b40a78f45c75d8bd6c8e openEuler Fixes: 7876e979bbdb ("iommu/ummu: Implement domain and core ops in logic UMMU framework") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- .../iommu/hisilicon/logic_ummu/logic_ummu.c | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index b5cd7999e9ab..7dad1e2f4ca8 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -21,11 +21,6 @@ #include "../ummu_cfg_v1.h" #include "logic_ummu.h" -struct logic_ummu_domain { - struct ummu_base_domain base_domain; - struct ummu_base_domain *agent_domain; -}; - struct logic_ummu_device { struct ummu_core_device core_dev; struct ummu_device *agent_device; @@ -41,6 +36,12 @@ struct logic_ummu_viommu { struct iommu_domain *nested; }; +struct logic_ummu_domain { + struct ummu_base_domain base_domain; + struct ummu_base_domain *agent_domain; + struct logic_ummu_viommu *logic_viommu; +}; + struct eid_info { enum eid_type type; eid_t eid; @@ -514,10 +515,12 @@ static void logic_ummu_free(struct iommu_domain *domain) return; } - if (domain->type != IOMMU_DOMAIN_NESTED) + if (domain->type != IOMMU_DOMAIN_NESTED) { logic_domain_free(logic_domain, ops); - else + } else { logic_nested_domain_free(logic_domain, ops); + logic_domain->logic_viommu->nested = NULL; + } kfree(logic_domain); } @@ -1018,6 +1021,7 @@ logic_ummu_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, } } logic_vummu->nested = &logic_domain->base_domain.domain; + logic_domain->logic_viommu = logic_vummu; return &logic_domain->base_domain.domain; error_handle: list_for_each_entry_safe(nested_base_domain, iter, &logic_domain->base_domain.list, list) { @@ -1040,8 +1044,10 @@ logic_ummu_viommu_cache_invalidate(struct iommufd_viommu *viommu, u32 cmd_num, succ_cnt; int err, ret = 0; - if (!logic_vummu->nested || !array) + if (!logic_vummu->nested || !array) { + pr_debug("invalid viommu.\n"); return -EINVAL; + } if (!helper || !helper->cache_invalidate_user) return -EOPNOTSUPP; -- Gitee From d6b9a40de85b58673bb63d77e1526ee25b1f46e8 Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:45:06 +0800 Subject: [PATCH 114/126] iommu/ummu: Move tid_type attr to logic ummu commit 11e53fb0223606e325a54134bc3bdfc491d61bf3 openEuler 1. Move tid_type attr to logic ummu. 2. Modify the device IDs of the UMMU and PMU. Fixes: 1dc959aea0fd ("iommu/ummu: Support UMMU attribute show and store operations") Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- .../ABI/testing/sysfs-class-iommu-ummu-iommu | 2 +- drivers/iommu/hisilicon/attribute.c | 39 ------------- .../iommu/hisilicon/logic_ummu/logic_ummu.c | 57 ++++++++++++++++++- drivers/ub/ubfi/ummu.c | 9 ++- 4 files changed, 61 insertions(+), 46 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu index 48ba4d6d4c60..e5b576672af8 100644 --- a/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu +++ b/Documentation/ABI/testing/sysfs-class-iommu-ummu-iommu @@ -104,7 +104,7 @@ Contact: Jingbin Wu Description: Maximum TokenID bit width supported in non-secure state. -What: /sys/class/iommu/ummu./ummu-iommu/tid_type +What: /sys/class/iommu/logic_ummu/tid_type Date: Oct 2025 KernelVersion: 6.6 Contact: Jingbin Wu diff --git a/drivers/iommu/hisilicon/attribute.c b/drivers/iommu/hisilicon/attribute.c index adb360ea541c..3ee3c523bb41 100644 --- a/drivers/iommu/hisilicon/attribute.c +++ b/drivers/iommu/hisilicon/attribute.c @@ -187,44 +187,6 @@ static ssize_t eid_list_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(eid_list); -static const char *get_domain_type_str(u32 domain_type) -{ - switch (domain_type) { - case IOMMU_DOMAIN_DMA: - return "IOMMU_DOMAIN_DMA"; - case IOMMU_DOMAIN_SVA: - return "IOMMU_DOMAIN_SVA"; - default: - return "UNKNOWN DOMAIN TYPE"; - } -} - -static ssize_t tid_type_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ummu_core_device *ummu_core; - u32 tid = 0, tid_type; - int ret; - - ret = kstrtouint(buf, 0, &tid); - if (ret < 0 || tid >= UMMU_INVALID_TID) - return -EINVAL; - - ummu_core = to_ummu_core(dev_to_iommu_device(dev)); - ret = ummu_core_get_tid_type(ummu_core, tid, &tid_type); - if (ret) { - pr_err("Invalid tid = 0x%x, ret = %d.\n", tid, ret); - return ret; - } - - pr_info("tid = 0x%x, domain_type = %s.\n", tid, - get_domain_type_str(tid_type)); - - return (ssize_t)count; -} -static DEVICE_ATTR_WO(tid_type); - static struct attribute *ummu_iommu_attrs[] = { &dev_attr_features.attr, &dev_attr_tid_bits.attr, @@ -240,7 +202,6 @@ static struct attribute *ummu_iommu_attrs[] = { &dev_attr_permq_num.attr, &dev_attr_permq_ent_num.attr, &dev_attr_eid_list.attr, - &dev_attr_tid_type.attr, NULL, }; diff --git a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c index 7dad1e2f4ca8..ef33014e48ce 100644 --- a/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c +++ b/drivers/iommu/hisilicon/logic_ummu/logic_ummu.c @@ -2064,6 +2064,61 @@ static inline struct fwnode_handle *logic_ummu_alloc_fwnode_static(void) return handle; } +static const char *get_domain_type_str(u32 domain_type) +{ + switch (domain_type) { + case IOMMU_DOMAIN_DMA: + return "IOMMU_DOMAIN_DMA"; + case IOMMU_DOMAIN_IDENTITY: + return "IOMMU_DOMAIN_IDENTITY"; + case IOMMU_DOMAIN_SVA: + return "IOMMU_DOMAIN_SVA"; + default: + return "UNKNOWN DOMAIN TYPE"; + } +} + +static ssize_t tid_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ummu_core_device *ummu_core; + u32 tid = 0, tid_type; + int ret; + + ret = kstrtouint(buf, 0, &tid); + if (ret < 0 || tid >= UMMU_INVALID_TID) + return -EINVAL; + + ummu_core = to_ummu_core(dev_to_iommu_device(dev)); + ret = ummu_core_get_tid_type(ummu_core, tid, &tid_type); + if (ret) { + pr_err("Invalid tid = 0x%x, ret = %d.\n", tid, ret); + return ret; + } + + pr_info("tid = 0x%x, domain_type = %s.\n", tid, + get_domain_type_str(tid_type)); + + return (ssize_t)count; +} +static DEVICE_ATTR_WO(tid_type); + +static struct attribute *logic_ummu_attrs[] = { + &dev_attr_tid_type.attr, + NULL, +}; + +static struct attribute_group logic_ummu_group = { + .name = NULL, + .attrs = logic_ummu_attrs, +}; + +const struct attribute_group *logic_ummu_groups[] = { + &logic_ummu_group, + NULL, +}; + int logic_ummu_device_init(void) { int ret; @@ -2088,7 +2143,7 @@ int logic_ummu_device_init(void) pr_err("add logic ummu device failed\n"); goto out_free_fwnode; } - ret = iommu_device_sysfs_add(&logic_ummu.core_dev.iommu, NULL, NULL, + ret = iommu_device_sysfs_add(&logic_ummu.core_dev.iommu, NULL, logic_ummu_groups, "%s", "logic_ummu"); if (ret) { pr_err("register logic ummu to sysfs failed.\n"); diff --git a/drivers/ub/ubfi/ummu.c b/drivers/ub/ubfi/ummu.c index a1f6dd61c51c..b1e3618b8318 100644 --- a/drivers/ub/ubfi/ummu.c +++ b/drivers/ub/ubfi/ummu.c @@ -109,14 +109,15 @@ static int __init ummu_add_resources(struct platform_device *pdev, static int ummu_rename_device(struct platform_device *pdev, enum ubrt_node_type type) { - static int device_count; + static int device_ummu_count; + static int device_pmu_count; char new_name[32]; int ret; if (type == UBRT_UMMU) - ret = snprintf(new_name, sizeof(new_name), "ummu.%d", device_count); + ret = snprintf(new_name, sizeof(new_name), "ummu.%d", device_ummu_count++); else - ret = snprintf(new_name, sizeof(new_name), "ummu_pmu.%d", device_count); + ret = snprintf(new_name, sizeof(new_name), "ummu_pmu.%d", device_pmu_count++); if (ret < 0 || ret >= sizeof(new_name)) { dev_err(&pdev->dev, "failed to generate new device name\n"); @@ -130,8 +131,6 @@ static int ummu_rename_device(struct platform_device *pdev, enum ubrt_node_type } pdev->name = pdev->dev.kobj.name; - device_count++; - return 0; } -- Gitee From 6bc4859c62988f9b0bf27571f040570ec98ec15a Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 13:55:58 +0800 Subject: [PATCH 115/126] iommu/ummu-core: Duplicate EID are not allowed commit 304f42ff1e9f79bcc1bc06598a646ea1140ee014 openEuler GUID represents a globally unique identifier, while EID is a dynamically assigned unique identifier that may be reused. In certain usage scenarios, if the HOST's GUID is all zeros, it will bind to two different eid values, both of type EID_NONE. For a Virtual Machine, the GUID will only bind to one unique EID, with the type being EID_BYPASS. Fixes: 2778c6bb9286 ("iommu/ummu-core: add UMMU EID operation interfaces") Signed-off-by: Yanlong Zhu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/iommu/hisilicon/ummu-core/core_eid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/hisilicon/ummu-core/core_eid.c b/drivers/iommu/hisilicon/ummu-core/core_eid.c index fd3360d400d0..9877bb892904 100644 --- a/drivers/iommu/hisilicon/ummu-core/core_eid.c +++ b/drivers/iommu/hisilicon/ummu-core/core_eid.c @@ -55,7 +55,7 @@ int ummu_core_add_eid(guid_t *guid, eid_t eid, enum eid_type type) /* cached the eid */ spin_lock(&eid_func_lock); list_for_each_entry(info, &eid_pre_insmode, list) - if (guid_equal(guid, &info->guid) && info->eid == eid) { + if (info->eid == eid) { ret = -EEXIST; goto out_unlock_spin; } @@ -95,7 +95,7 @@ void ummu_core_del_eid(guid_t *guid, eid_t eid, enum eid_type type) /* uncache the eid */ spin_lock(&eid_func_lock); list_for_each_entry_safe(info, next, &eid_pre_insmode, list) - if (guid_equal(guid, &info->guid) && info->eid == eid) { + if (info->eid == eid) { list_del(&info->list); kfree(info); } -- Gitee From ce62212ba833fbc5981ca17b519c7863c20ba0cf Mon Sep 17 00:00:00 2001 From: Liming An Date: Wed, 10 Dec 2025 17:52:16 +0800 Subject: [PATCH 116/126] iommu/ummu: Remove redundant CONFIG_UB_UBRT_PLAT_DEV and code commit fa49aa1b3104a1abe0a76d4f3c82994a33447382 openEuler In the ACPI method, during the process of UMMU requesting interrupts, the interrupt ID can be directly obtained from the IORT table, eliminating the need to obtain it from the UBR table. Therefore, the CONFIG_UB_UBRT_PLAT_DEV and ubrt_pmsi_get_interrupt_id() functions, which were used to obtain the interrupt ID from the UBR table, have become redundant code. Fixes: 010c6364261c ("ub: ubfi: Parsing ummu node in the ubrt table") Signed-off-by: Jingbin Wu Signed-off-by: Liming An Signed-off-by: zhaolichang <943677312@qq.com> --- arch/arm64/configs/tencent.config | 1 - drivers/iommu/hisilicon/Kconfig | 2 +- drivers/irqchip/irq-gic-v3-its-platform-msi.c | 7 ++--- drivers/ub/ubfi/Kconfig | 12 -------- drivers/ub/ubfi/irq.c | 29 ------------------- include/ub/ubfi/ubfi.h | 7 ----- 6 files changed, 3 insertions(+), 55 deletions(-) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 7ca1fa88f877..3fe817902154 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1819,7 +1819,6 @@ CONFIG_UB_UBUS=y # UB Bus Core Driver CONFIG_UB_UBUS_BUS=m CONFIG_UB_UBFI=m -CONFIG_UB_UBRT_PLAT_DEV=y CONFIG_UB_UBUS_USI=y CONFIG_ARM_GIC_V3_ITS_UBUS=y CONFIG_VFIO_UB=m diff --git a/drivers/iommu/hisilicon/Kconfig b/drivers/iommu/hisilicon/Kconfig index e41f492a7ca6..60e612b57ca6 100644 --- a/drivers/iommu/hisilicon/Kconfig +++ b/drivers/iommu/hisilicon/Kconfig @@ -18,7 +18,7 @@ config UB_UMMU_BASE config UB_UMMU tristate "Hisilicon UB MMU Support" depends on ARM64 && ARCH_HISI - depends on UB_UBUS && UB_UBFI && UB_UBRT_PLAT_DEV + depends on UB_UBUS && UB_UBFI default n select IOMMU_API select IOMMU_IO_PGTABLE_LPAE diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 1ca7ef6186a2..884b088f4873 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -80,13 +80,10 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, } #endif - if (dev->of_node) { + if (dev->of_node) ret = of_pmsi_get_dev_id(domain, dev, &dev_id); - } else { + else ret = iort_pmsi_get_dev_id(dev, &dev_id); - if (ret) - ret = ubrt_pmsi_get_interrupt_id(dev, &dev_id); - } if (ret) return ret; diff --git a/drivers/ub/ubfi/Kconfig b/drivers/ub/ubfi/Kconfig index d3889afb2452..4cb8a264fe9a 100644 --- a/drivers/ub/ubfi/Kconfig +++ b/drivers/ub/ubfi/Kconfig @@ -15,16 +15,4 @@ config UB_UBFI within Linux. To compile this driver as a module, choose M here. Say 'M' here unless you know what you are doing -config UB_UBRT_PLAT_DEV - bool "Enable UBRT platform device support" - depends on UB_UBUS - default n - help - This option enables the configuration of platform devices related to - the ub ubrt table. - If enabled, the UBRT-related platform device will obtain the - interrupt ID from the ubrt table instead of the IORT table. - The obtained interrupt ID will be used for the MSI interrupt of the - UBRT-related platform device. - endif diff --git a/drivers/ub/ubfi/irq.c b/drivers/ub/ubfi/irq.c index 5835bc8421b3..3f449ff5aa0b 100644 --- a/drivers/ub/ubfi/irq.c +++ b/drivers/ub/ubfi/irq.c @@ -77,32 +77,3 @@ void ubrt_unregister_gsi(u32 hwirq) } EXPORT_SYMBOL_GPL(ubrt_unregister_gsi); -#if IS_ENABLED(CONFIG_UB_UBRT_PLAT_DEV) -int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id) -{ - struct ubrt_fwnode *fw; - struct ummu_node *node; - - if (!dev->fwnode) - return -EINVAL; - - fw = ubrt_fwnode_get(dev->fwnode); - if (!fw) - return -ENODEV; - - switch (fw->type) { - case UBRT_UMMU: - node = (struct ummu_node *)fw->ubrt_node; - *interrupt_id = node->intr_id; - break; - case UBRT_UMMU_PMU: - node = (struct ummu_node *)fw->ubrt_node; - *interrupt_id = node->pmu_intr_id; - break; - default: - return -ENODEV; - } - dev_info(dev, "ubct pmsi successfully obtained interrupt id[0x%x].\n", *interrupt_id); - return 0; -} -#endif diff --git a/include/ub/ubfi/ubfi.h b/include/ub/ubfi/ubfi.h index 5e9a3c2a287c..35d7b195ca16 100644 --- a/include/ub/ubfi/ubfi.h +++ b/include/ub/ubfi/ubfi.h @@ -219,11 +219,4 @@ extern u8 ubc_feature; void ubrt_iommu_get_resv_regions(struct device *dev, struct list_head *list); #endif /* CONFIG_UB_UBFI */ -#if IS_ENABLED(CONFIG_UB_UBRT_PLAT_DEV) -int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id); -#else -static inline int ubrt_pmsi_get_interrupt_id(struct device *dev, u32 *interrupt_id) -{ return -ENODEV; } -#endif /* CONFIG_UB_UBRT_PLAT_DEV */ - #endif /* _UB_UBFI_UBFI_H_ */ -- Gitee From 3693090e5bdb60b506b38dfbe6d892f923e50aee Mon Sep 17 00:00:00 2001 From: Zhipeng Lu Date: Wed, 24 Dec 2025 13:14:28 +0800 Subject: [PATCH 117/126] ub: cdma: add ioctl logs and error codes commit 9aa302d3264cab6ff26c2df119f1b7f30bdc7edb openEuler add ioctl logs and error codes Fixes: 34c67ed8f4c1 ("ub: cdma: support for cdma kernelspace north-south compatibility requirements") Signed-off-by: Zhipeng Lu Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/cdma/cdma_event.c | 26 ++++++++++++++++++-------- drivers/ub/cdma/cdma_ioctl.c | 2 +- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/ub/cdma/cdma_event.c b/drivers/ub/cdma/cdma_event.c index e8ecb7f8c4f6..240934a733ed 100644 --- a/drivers/ub/cdma/cdma_event.c +++ b/drivers/ub/cdma/cdma_event.c @@ -181,22 +181,25 @@ static long cdma_jfce_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cdma_jfce *jfce = (struct cdma_jfce *)filp->private_data; - unsigned int nr; - int ret; + unsigned int nr = (unsigned int)_IOC_NR(cmd); + long ret = -ENOIOCTLCMD; - if (!arg || !jfce || _IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { - pr_err("invalid parameter, cmd = %u.\n", cmd); + if (!arg || !jfce) { + pr_err("jfce ioctl invalid parameter.\n"); return -EINVAL; } - nr = (unsigned int)_IOC_NR(cmd); + if (_IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { + pr_err("jfce ioctl invalid cmd type, cmd = %u.\n", cmd); + return ret; + } + switch (nr) { case JFCE_CMD_WAIT_EVENT: ret = cdma_jfce_wait(jfce, filp, arg); break; default: - ret = -ENOIOCTLCMD; - break; + pr_err("jfce ioctl wrong nr = %u.\n", nr); } return ret; @@ -588,8 +591,15 @@ static long cdma_jfae_ioctl(struct file *filp, unsigned int cmd, unsigned long a unsigned int nr = (unsigned int)_IOC_NR(cmd); long ret = -ENOIOCTLCMD; - if (!jfae) + if (!jfae) { + pr_err("jfae ioctl invalid parameter.\n"); return -EINVAL; + } + + if (_IOC_TYPE(cmd) != CDMA_EVENT_CMD_MAGIC) { + pr_err("jfae ioctl invalid cmd type, cmd = %u.\n", cmd); + return ret; + } switch (nr) { case JFAE_CMD_GET_ASYNC_EVENT: diff --git a/drivers/ub/cdma/cdma_ioctl.c b/drivers/ub/cdma/cdma_ioctl.c index 4a30cbbd383f..70c3e0d3b4f2 100644 --- a/drivers/ub/cdma/cdma_ioctl.c +++ b/drivers/ub/cdma/cdma_ioctl.c @@ -811,7 +811,7 @@ int cdma_cmd_parse(struct cdma_file *cfile, struct cdma_ioctl_hdr *hdr) dev_err(cdev->dev, "invalid cdma user command or no handler, command = %u\n", hdr->command); - return -EINVAL; + return -ENOIOCTLCMD; } mutex_lock(&cfile->ctx_mutex); -- Gitee From d9bae9c5f2babb495aac515e2c9b797b214194d2 Mon Sep 17 00:00:00 2001 From: Yuhao Xiang Date: Thu, 25 Dec 2025 11:07:01 +0800 Subject: [PATCH 118/126] ub:ubfi:skipped address of subtable 0 in ubrt commit 89c7290fa351a4f062cb6508570e4a388d19a6b9 openEuler In the ubios method, the address of subtable 0 is an interval address, which is skipped during processing Fixes: 312a6b7fabe9 ("ub:ubfi: ubfi driver parse ubc information from ubrt") Signed-off-by: Yuhao Xiang Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/ubfi/ubrt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/ub/ubfi/ubrt.c b/drivers/ub/ubfi/ubrt.c index ecf975526e72..463707f4bcc2 100644 --- a/drivers/ub/ubfi/ubrt.c +++ b/drivers/ub/ubfi/ubrt.c @@ -143,6 +143,8 @@ int handle_dts_ubrt(void) pr_info("ubios sub table count is %u\n", ubios_table->count); for (i = 0; i < ubios_table->count; i++) { + if (ubios_table->sub_tables[i] == 0) + continue; memset(name, 0, UB_TABLE_HEADER_NAME_LEN); ret = get_ubrt_table_name(name, ubios_table->sub_tables[i]); if (ret) -- Gitee From 24543d411c071be2df164a0278c0bdb1f3b70652 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 27 Nov 2025 17:01:31 +0800 Subject: [PATCH 119/126] ub: udma: Support retrieving the migr field. commit 1685c9263f82b70a4f53a3719baf0de8847f9cec openEuler This patch support retrieving the migr field. Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_ctrlq_tp.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h index bdd4617cb4c4..9baf7d1c8495 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.h @@ -51,7 +51,9 @@ struct udma_ctrlq_tpid { uint32_t tpid : 24; uint32_t tpn_cnt : 8; uint32_t tpn_start : 24; - uint32_t rsv : 8; + uint32_t rsv0 : 4; + uint32_t migr : 1; + uint32_t rsv1 : 3; }; struct udma_ctrlq_tpid_list_rsp { -- Gitee From d6b8fe592fe1c955de941852bba02912d36c8386 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Wed, 10 Dec 2025 21:01:41 +0800 Subject: [PATCH 120/126] ub: udma: bugfix related to print location. commit 4650a8abb5f2656fbe01b9bc9c0898f3f369fb53 openEuler This patch fix a bug about print location. Fixes: d0c38b53548d ("ub: udma: Support query ub memory info.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 12 +++++++----- drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 1 - drivers/ub/urma/hw/udma/udma_ctx.c | 8 +++++--- drivers/ub/urma/hw/udma/udma_eq.c | 10 +++++----- drivers/ub/urma/hw/udma/udma_jfr.c | 2 +- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 07d57a5ce96b..830c22f732a1 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -249,8 +249,8 @@ int udma_id_alloc_auto_grow(struct udma_dev *udma_dev, struct udma_ida *ida_tabl id = ida_alloc_range(&ida_table->ida, ida_table->min, ida_table->max, GFP_ATOMIC); if (id < 0) { - dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); spin_unlock(&ida_table->lock); + dev_err(udma_dev->dev, "failed to alloc id, ret = %d.\n", id); return id; } } @@ -291,9 +291,9 @@ int udma_specify_adv_id(struct udma_dev *udma_dev, struct udma_group_bitmap *bit spin_lock(&bitmap_table->lock); if ((bit[block] & (1U << bit_idx)) == 0) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "user specify id %u been used.\n", user_id); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -347,10 +347,10 @@ int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitma ; if (i == bitmap_cnt) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "all bitmaps have been used, bitmap_cnt = %u.\n", bitmap_cnt); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -370,9 +370,9 @@ int udma_adv_id_alloc(struct udma_dev *udma_dev, struct udma_group_bitmap *bitma ; if (i == bitmap_cnt || (i + 1) * NUM_JETTY_PER_GROUP > bitmap_table->n_bits) { + spin_unlock(&bitmap_table->lock); dev_err(udma_dev->dev, "no completely bitmap for Jetty group.\n"); - spin_unlock(&bitmap_table->lock); return -ENOMEM; } @@ -858,9 +858,11 @@ void udma_init_hugepage(struct udma_dev *dev) void udma_destroy_hugepage(struct udma_dev *dev) { struct udma_hugepage_priv *priv; + struct udma_hugepage_priv *tmp; mutex_lock(&dev->hugepage_lock); - list_for_each_entry(priv, &dev->hugepage_list, list) { + list_for_each_entry_safe(priv, tmp, &dev->hugepage_list, list) { + list_del(&priv->list); dev_info(dev->dev, "unmap_hugepage, 2m_page_num=%u.\n", priv->va_len >> UDMA_HUGEPAGE_SHIFT); udma_unpin_k_addr(priv->umem); diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 86d68ace7000..2782fbe92907 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -73,7 +73,6 @@ static struct udma_ue_idx_table *udma_find_ue_idx_by_tpn(struct udma_dev *udev, xa_lock(&udev->tpn_ue_idx_table); tp_ue_idx_info = xa_load(&udev->tpn_ue_idx_table, tpn); if (!tp_ue_idx_info) { - dev_warn(udev->dev, "ue idx info not exist, tpn %u.\n", tpn); xa_unlock(&udev->tpn_ue_idx_table); return NULL; diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index ccc3b4905af9..8caa0765b420 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -96,6 +96,7 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) { struct udma_dev *udma_dev = to_udma_dev(ucontext->ub_dev); struct udma_hugepage_priv *priv; + struct udma_hugepage_priv *tmp; struct vm_area_struct *vma; struct udma_context *ctx; int ret; @@ -111,7 +112,8 @@ int udma_free_ucontext(struct ubcore_ucontext *ucontext) ummu_sva_unbind_device(ctx->sva); mutex_lock(&ctx->hugepage_lock); - list_for_each_entry(priv, &ctx->hugepage_list, list) { + list_for_each_entry_safe(priv, tmp, &ctx->hugepage_list, list) { + list_del(&priv->list); if (current->mm) { mmap_write_lock(current->mm); vma = find_vma(current->mm, (unsigned long)priv->va_base); @@ -148,9 +150,9 @@ static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uc xa_lock(&dev->jetty_table.xa); sq = xa_load(&dev->jetty_table.xa, j_id); if (!sq) { + xa_unlock(&dev->jetty_table.xa); dev_err(dev->dev, "mmap failed, j_id: %llu not exist\n", j_id); - xa_unlock(&dev->jetty_table.xa); return -EINVAL; } @@ -160,9 +162,9 @@ static int udma_mmap_jetty_dsqe(struct udma_dev *dev, struct ubcore_ucontext *uc jetty_uctx = to_udma_jfs_from_queue(sq)->ubcore_jfs.uctx; if (jetty_uctx != uctx) { + xa_unlock(&dev->jetty_table.xa); dev_err(dev->dev, "mmap failed, j_id: %llu, uctx invalid\n", j_id); - xa_unlock(&dev->jetty_table.xa); return -EINVAL; } xa_unlock(&dev->jetty_table.xa); diff --git a/drivers/ub/urma/hw/udma/udma_eq.c b/drivers/ub/urma/hw/udma/udma_eq.c index d3b6813b1d55..dab9130df761 100644 --- a/drivers/ub/urma/hw/udma/udma_eq.c +++ b/drivers/ub/urma/hw/udma/udma_eq.c @@ -84,9 +84,9 @@ static int udma_ae_jfs_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock(&udma_dev->jetty_table.xa); udma_sq = (struct udma_jetty_queue *)xa_load(&udma_dev->jetty_table.xa, queue_num); if (!udma_sq) { + xa_unlock(&udma_dev->jetty_table.xa); dev_warn(udma_dev->dev, "async event for bogus queue number = %u.\n", queue_num); - xa_unlock(&udma_dev->jetty_table.xa); return -EINVAL; } @@ -138,9 +138,9 @@ static int udma_ae_jfr_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock(&udma_dev->jfr_table.xa); udma_jfr = (struct udma_jfr *)xa_load(&udma_dev->jfr_table.xa, queue_num); if (!udma_jfr) { + xa_unlock(&udma_dev->jfr_table.xa); dev_warn(udma_dev->dev, "async event for bogus jfr number = %u.\n", queue_num); - xa_unlock(&udma_dev->jfr_table.xa); return -EINVAL; } @@ -172,9 +172,9 @@ static int udma_ae_jfc_check_err(struct auxiliary_device *adev, uint32_t queue_n xa_lock_irqsave(&udma_dev->jfc_table.xa, flags); udma_jfc = (struct udma_jfc *)xa_load(&udma_dev->jfc_table.xa, queue_num); if (!udma_jfc) { + xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); dev_warn(udma_dev->dev, "async event for bogus jfc number = %u.\n", queue_num); - xa_unlock_irqrestore(&udma_dev->jfc_table.xa, flags); return -EINVAL; } @@ -206,9 +206,9 @@ static int udma_ae_jetty_group_check_err(struct auxiliary_device *adev, uint32_t xa_lock(&udma_dev->jetty_grp_table.xa); udma_jetty_grp = (struct udma_jetty_grp *)xa_load(&udma_dev->jetty_grp_table.xa, queue_num); if (!udma_jetty_grp) { + xa_unlock(&udma_dev->jetty_grp_table.xa); dev_warn(udma_dev->dev, "async event for bogus jetty group number = %u.\n", queue_num); - xa_unlock(&udma_dev->jetty_grp_table.xa); return -EINVAL; } @@ -373,9 +373,9 @@ static int udma_save_tpn_ue_idx_info(struct udma_dev *udma_dev, uint8_t ue_idx, tp_ue_idx_info = xa_load(&udma_dev->tpn_ue_idx_table, tpn); if (tp_ue_idx_info) { if (tp_ue_idx_info->num >= UDMA_UE_NUM) { + xa_unlock(&udma_dev->tpn_ue_idx_table); dev_err(udma_dev->dev, "num exceeds the maximum value.\n"); - xa_unlock(&udma_dev->tpn_ue_idx_table); return -EINVAL; } diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index 8e98319715e0..b4ba66f934ec 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -393,10 +393,10 @@ static int udma_alloc_jfr_id(struct udma_dev *udma_dev, uint32_t cfg_id, uint32_ id = ida_alloc_range(&ida_table->ida, min = ida_table->min, max, GFP_ATOMIC); if (id < 0) { + spin_unlock(&ida_table->lock); dev_err(udma_dev->dev, "alloc jfr id range (%u - %u) failed, ret = %d.\n", min, max, id); - spin_unlock(&ida_table->lock); return id; } -- Gitee From 2c7f00c2da0fb66a47543cc8b39c67342b313d71 Mon Sep 17 00:00:00 2001 From: qinwei0930 Date: Mon, 22 Dec 2025 18:51:35 +0800 Subject: [PATCH 121/126] ub: udma: Bugfix related to 2M hugepage. commit 5be85f78760f3a4c849e63126199f8cf09b35ca6 openEuler This patch support fix bug about 2M hugepage. Fixes: 1ae22d037be8 ("ub: udma: Support 2M hugepage function") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- include/uapi/ub/urma/udma/udma_abi.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/uapi/ub/urma/udma/udma_abi.h b/include/uapi/ub/urma/udma/udma_abi.h index 5859f5254b5e..257962765d5a 100644 --- a/include/uapi/ub/urma/udma/udma_abi.h +++ b/include/uapi/ub/urma/udma/udma_abi.h @@ -6,9 +6,9 @@ #include -#define MAP_COMMAND_MASK 0xff -#define MAP_INDEX_MASK 0xffffff -#define MAP_INDEX_SHIFT 8 +#define MAP_COMMAND_MASK 0xf +#define MAP_INDEX_MASK 0xfffffff +#define MAP_INDEX_SHIFT 4 #define UDMA_SEGMENT_ACCESS_GUARD (1UL << 5) @@ -112,9 +112,9 @@ struct udma_create_jfr_resp { }; enum db_mmap_type { + UDMA_MMAP_HUGEPAGE, UDMA_MMAP_JFC_PAGE, UDMA_MMAP_JETTY_DSQE, - UDMA_MMAP_HUGEPAGE, }; enum { -- Gitee From 185eb83e3dcdb502a7d1fde48d6328b782a6ea6d Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Mon, 29 Dec 2025 16:51:06 +0800 Subject: [PATCH 122/126] ub: udma: bugfix related to spin lock. commit 858a66525c60e5396298e1a8122359f70f4f5578 openEuler This patch fix a bug related to spin lock in clean jfc. Fixes: d72435589dce ("ub: udma: Support poll jfc.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_jfc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_jfc.c b/drivers/ub/urma/hw/udma/udma_jfc.c index 50ef624629df..961dbfa28107 100644 --- a/drivers/ub/urma/hw/udma/udma_jfc.c +++ b/drivers/ub/urma/hw/udma/udma_jfc.c @@ -1068,6 +1068,7 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * struct udma_jfc *udma_jfc = to_udma_jfc(jfc); struct udma_jfc_cqe *dest; struct udma_jfc_cqe *cqe; + unsigned long flags; struct ubcore_cr cr; uint32_t nfreed = 0; uint32_t local_id; @@ -1078,7 +1079,7 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * return; if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_lock(&udma_jfc->lock); + spin_lock_irqsave(&udma_jfc->lock, flags); for (pi = udma_jfc->ci; get_next_cqe(udma_jfc, pi) != NULL; ++pi) { if (pi > udma_jfc->ci + udma_jfc->buf.entry_cnt) @@ -1113,5 +1114,5 @@ void udma_clean_jfc(struct ubcore_jfc *jfc, uint32_t jetty_id, struct udma_dev * } if (!jfc->jfc_cfg.flag.bs.lock_free) - spin_unlock(&udma_jfc->lock); + spin_unlock_irqrestore(&udma_jfc->lock, flags); } -- Gitee From 93eb6479a412f64529f02853916e08930997ee29 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 5 Dec 2025 15:06:48 +0800 Subject: [PATCH 123/126] ub: udma: bugfix for rx close. commit 82ba2bc6121dd1485447223291fd68cfbfaa1544 openEuler This patch fix a bug about rx close. Fixes: 534649e2be8e ("ub: udma: Support get tp list.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 44a93fd000b0..99c9fa2fe219 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1123,6 +1123,10 @@ int udma_probe(struct auxiliary_device *adev, void udma_remove(struct auxiliary_device *adev) { +#define MIN_SLEEP_TIME 100 +#define MAX_SLEEP_TIME 800 +#define TIME_SLEEP_RATE 2 + uint32_t wait_time = MIN_SLEEP_TIME; struct udma_dev *udma_dev; ubase_reset_unregister(adev); @@ -1135,12 +1139,14 @@ void udma_remove(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - if (udma_close_ue_rx(udma_dev, false, false, false, 0)) { - mutex_unlock(&udma_reset_mutex); - dev_err(&adev->dev, "udma close ue rx failed in remove process.\n"); - return; + while (true) { + if (!udma_close_ue_rx(udma_dev, false, false, false, 0)) + break; + msleep(wait_time); + if (wait_time < MAX_SLEEP_TIME) + wait_time *= TIME_SLEEP_RATE; + dev_err_ratelimited(&adev->dev, "udma close ue rx failed in remove process.\n"); } - udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); @@ -1150,7 +1156,8 @@ void udma_remove(struct auxiliary_device *adev) udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); - (void)ubase_activate_dev(adev); + if (is_rmmod) + (void)ubase_activate_dev(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); dev_info(&adev->dev, "udma device remove success.\n"); -- Gitee From 879624aa6385f5c7d3e9f5f0fefcf13447ab9739 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Fri, 5 Dec 2025 16:04:10 +0800 Subject: [PATCH 124/126] ub: udma: bugfix related to init xa flags. commit 430e4e841b46b8d9e07514cdbfe5ee3544e2ac02 openEuler This patch fix a bug about init xa flags. Fixes: d0c38b53548d ("ub: udma: Support query ub memory info.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_common.c | 15 +++++++++++---- drivers/ub/urma/hw/udma/udma_common.h | 4 ++-- drivers/ub/urma/hw/udma/udma_ctl.c | 2 ++ drivers/ub/urma/hw/udma/udma_ctrlq_tp.c | 12 +++++------- drivers/ub/urma/hw/udma/udma_ctx.c | 3 ++- drivers/ub/urma/hw/udma/udma_db.c | 1 + drivers/ub/urma/hw/udma/udma_jfr.c | 2 +- drivers/ub/urma/hw/udma/udma_jfs.c | 1 + drivers/ub/urma/hw/udma/udma_main.c | 12 ++++++------ 9 files changed, 31 insertions(+), 21 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_common.c b/drivers/ub/urma/hw/udma/udma_common.c index 830c22f732a1..8d5336622a2c 100644 --- a/drivers/ub/urma/hw/udma/udma_common.c +++ b/drivers/ub/urma/hw/udma/udma_common.c @@ -418,15 +418,21 @@ static void udma_init_ida_table(struct udma_ida *ida_table, uint32_t max, uint32 ida_table->next = min; } -void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min) +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min, bool irq_lock) { udma_init_ida_table(&table->ida_table, max, min); - xa_init(&table->xa); + if (irq_lock) + xa_init_flags(&table->xa, XA_FLAGS_LOCK_IRQ); + else + xa_init(&table->xa); } -void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex) +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex, bool irq_lock) { - xa_init(table); + if (irq_lock) + xa_init_flags(table, XA_FLAGS_LOCK_IRQ); + else + xa_init(table); mutex_init(udma_mutex); } @@ -582,6 +588,7 @@ int udma_alloc_normal_buf(struct udma_dev *udma_dev, size_t memory_size, if (IS_ERR(buf->umem)) { ret = PTR_ERR(buf->umem); vfree(buf->aligned_va); + buf->aligned_va = NULL; dev_err(udma_dev->dev, "pin kernel buf failed, ret = %d.\n", ret); return ret; } diff --git a/drivers/ub/urma/hw/udma/udma_common.h b/drivers/ub/urma/hw/udma/udma_common.h index dee92a4186d3..22992e94e2e1 100644 --- a/drivers/ub/urma/hw/udma/udma_common.h +++ b/drivers/ub/urma/hw/udma/udma_common.h @@ -316,8 +316,8 @@ struct udma_tp_ctx { struct ubcore_umem *udma_umem_get(struct udma_umem_param *param); void udma_umem_release(struct ubcore_umem *umem, bool is_kernel); -void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min); -void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex); +void udma_init_udma_table(struct udma_table *table, uint32_t max, uint32_t min, bool irq_lock); +void udma_init_udma_table_mutex(struct xarray *table, struct mutex *udma_mutex, bool irq_lock); void udma_destroy_npu_cb_table(struct udma_dev *dev); void udma_destroy_udma_table(struct udma_dev *dev, struct udma_table *table, const char *table_name); diff --git a/drivers/ub/urma/hw/udma/udma_ctl.c b/drivers/ub/urma/hw/udma/udma_ctl.c index af0568f3ce74..f2451c25fb02 100644 --- a/drivers/ub/urma/hw/udma/udma_ctl.c +++ b/drivers/ub/urma/hw/udma/udma_ctl.c @@ -1008,6 +1008,7 @@ static int copy_out_cqe_data_from_user(struct udma_dev *udma_dev, sizeof(uint32_t), GFP_KERNEL); if (!aux_info_out->aux_info_value) { kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; return -ENOMEM; } } @@ -1206,6 +1207,7 @@ static int copy_out_ae_data_from_user(struct udma_dev *udma_dev, sizeof(uint32_t), GFP_KERNEL); if (!aux_info_out->aux_info_value) { kfree(aux_info_out->aux_info_type); + aux_info_out->aux_info_type = NULL; return -ENOMEM; } } diff --git a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c index 2782fbe92907..ae9549de1e22 100644 --- a/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c +++ b/drivers/ub/urma/hw/udma/udma_ctrlq_tp.c @@ -331,7 +331,8 @@ static int udma_ctrlq_store_one_tpid(struct udma_dev *udev, struct xarray *ctrlq int ret; if (debug_switch) - dev_info(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", tpid->tpid); + dev_info_ratelimited(udev->dev, "udma ctrlq store one tpid start. tpid %u\n", + tpid->tpid); if (xa_load(ctrlq_tpid_table, tpid->tpid)) { dev_warn(udev->dev, @@ -418,7 +419,7 @@ static int udma_ctrlq_store_tpid_list(struct udma_dev *udev, int i; if (debug_switch) - dev_info(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", + dev_info_ratelimited(udev->dev, "udma ctrlq store tpid list tp_list_cnt = %u.\n", tpid_list_resp->tp_list_cnt); for (i = 0; i < (int)tpid_list_resp->tp_list_cnt; i++) { @@ -775,10 +776,6 @@ int udma_active_tp(struct ubcore_device *dev, struct ubcore_active_tp_cfg *activ struct udma_dev *udma_dev = to_udma_dev(dev); int ret; - if (debug_switch) - udma_dfx_ctx_print(udma_dev, "udma active tp ex", active_cfg->tp_handle.bs.tpid, - sizeof(struct ubcore_active_tp_cfg) / sizeof(uint32_t), - (uint32_t *)active_cfg); ret = udma_ctrlq_set_active_tp_ex(udma_dev, active_cfg); if (ret) dev_err(udma_dev->dev, "Failed to set active tp msg, ret %d.\n", ret); @@ -792,7 +789,8 @@ int udma_deactive_tp(struct ubcore_device *dev, union ubcore_tp_handle tp_handle struct udma_dev *udma_dev = to_udma_dev(dev); if (debug_switch) - dev_info(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", tp_handle.bs.tpid); + dev_info_ratelimited(udma_dev->dev, "udma deactivate tp ex tp_id = %u\n", + tp_handle.bs.tpid); return udma_k_ctrlq_deactive_tp(udma_dev, tp_handle, udata); } diff --git a/drivers/ub/urma/hw/udma/udma_ctx.c b/drivers/ub/urma/hw/udma/udma_ctx.c index 8caa0765b420..e842b523ab33 100644 --- a/drivers/ub/urma/hw/udma/udma_ctx.c +++ b/drivers/ub/urma/hw/udma/udma_ctx.c @@ -200,7 +200,8 @@ static int udma_mmap_hugepage(struct udma_dev *dev, struct ubcore_ucontext *uctx return -EINVAL; } - vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY); + vm_flags_set(vma, VM_IO | VM_LOCKED | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | + VM_WIPEONFORK); vma->vm_page_prot = __pgprot(((~PTE_ATTRINDX_MASK) & vma->vm_page_prot.pgprot) | PTE_ATTRINDX(MT_NORMAL)); if (udma_alloc_u_hugepage(to_udma_context(uctx), vma)) { diff --git a/drivers/ub/urma/hw/udma/udma_db.c b/drivers/ub/urma/hw/udma/udma_db.c index c66d6b23b2e8..ba8c3ffd265d 100644 --- a/drivers/ub/urma/hw/udma/udma_db.c +++ b/drivers/ub/urma/hw/udma/udma_db.c @@ -69,6 +69,7 @@ void udma_unpin_sw_db(struct udma_context *ctx, struct udma_sw_db *db) list_del(&db->page->list); udma_umem_release(db->page->umem, false); kfree(db->page); + db->page = NULL; } mutex_unlock(&ctx->pgdir_mutex); diff --git a/drivers/ub/urma/hw/udma/udma_jfr.c b/drivers/ub/urma/hw/udma/udma_jfr.c index b4ba66f934ec..a80f2cc0f1aa 100644 --- a/drivers/ub/urma/hw/udma/udma_jfr.c +++ b/drivers/ub/urma/hw/udma/udma_jfr.c @@ -81,7 +81,7 @@ static int udma_get_k_jfr_buf(struct udma_dev *dev, struct udma_jfr *jfr) goto err_alloc_db; } - udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0); + udma_init_udma_table(&jfr->idx_que.jfr_idx_table, jfr->idx_que.buf.entry_cnt - 1, 0, false); jfr->rq.tid = dev->tid; diff --git a/drivers/ub/urma/hw/udma/udma_jfs.c b/drivers/ub/urma/hw/udma/udma_jfs.c index 5d520a0cea00..5875e7e0ff80 100644 --- a/drivers/ub/urma/hw/udma/udma_jfs.c +++ b/drivers/ub/urma/hw/udma/udma_jfs.c @@ -102,6 +102,7 @@ void udma_free_sq_buf(struct udma_dev *dev, struct udma_jetty_queue *sq) if (sq->buf.kva) { udma_k_free_buf(dev, &sq->buf); kfree(sq->wrid); + sq->wrid = NULL; return; } diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 99c9fa2fe219..686e4a02026c 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -365,15 +365,15 @@ int udma_init_tables(struct udma_dev *udma_dev) } udma_init_udma_table(&udma_dev->jfr_table, udma_dev->caps.jfr.max_cnt + - udma_dev->caps.jfr.start_idx - 1, udma_dev->caps.jfr.start_idx); + udma_dev->caps.jfr.start_idx - 1, udma_dev->caps.jfr.start_idx, false); udma_init_udma_table(&udma_dev->jfc_table, udma_dev->caps.jfc.max_cnt + - udma_dev->caps.jfc.start_idx - 1, udma_dev->caps.jfc.start_idx); + udma_dev->caps.jfc.start_idx - 1, udma_dev->caps.jfc.start_idx, true); udma_init_udma_table(&udma_dev->jetty_grp_table, udma_dev->caps.jetty_grp.max_cnt + udma_dev->caps.jetty_grp.start_idx - 1, - udma_dev->caps.jetty_grp.start_idx); - udma_init_udma_table_mutex(&udma_dev->ksva_table, &udma_dev->ksva_mutex); - udma_init_udma_table_mutex(&udma_dev->npu_nb_table, &udma_dev->npu_nb_mutex); - xa_init(&udma_dev->tpn_ue_idx_table); + udma_dev->caps.jetty_grp.start_idx, true); + udma_init_udma_table_mutex(&udma_dev->ksva_table, &udma_dev->ksva_mutex, false); + udma_init_udma_table_mutex(&udma_dev->npu_nb_table, &udma_dev->npu_nb_mutex, true); + xa_init_flags(&udma_dev->tpn_ue_idx_table, XA_FLAGS_LOCK_IRQ); xa_init(&udma_dev->crq_nb_table); ida_init(&udma_dev->rsvd_jetty_ida_table.ida); mutex_init(&udma_dev->disable_ue_rx_mutex); -- Gitee From d38071fe0faab5935d122b6e2453858e710ad178 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Mon, 15 Dec 2025 21:21:43 +0800 Subject: [PATCH 125/126] ub: udma: Bugfix related to crq event unregister. commit e584b00d3d0f88496dca0256097c9a62f062354e openEuler This patch support fix bug about crq event unregister. Fixes: 1ae22d037be8 ("ub: udma: Support 2M hugepage function") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 686e4a02026c..6b4059cadc96 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -891,11 +891,10 @@ static int udma_register_event(struct auxiliary_device *adev) return ret; } -static void udma_unregister_event(struct auxiliary_device *adev) +static void udma_unregister_none_crq_event(struct auxiliary_device *adev) { ubase_port_unregister(adev); udma_unregister_ctrlq_event(adev); - udma_unregister_crq_event(adev); udma_unregister_ce_event(adev); udma_unregister_ae_event(adev); } @@ -1021,7 +1020,8 @@ static int udma_init_dev(struct auxiliary_device *adev) err_set_ubcore_dev: udma_unregister_activate_workqueue(udma_dev); err_register_act_init: - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); + udma_unregister_crq_event(adev); err_event_register: udma_destroy_dev(udma_dev); err_create: @@ -1096,12 +1096,12 @@ void udma_reset_uninit(struct auxiliary_device *adev) return; } - /* Event should unregister before unset ubcore dev. */ - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); udma_open_ue_rx(udma_dev, false, false, true, 0); + udma_unregister_crq_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); } @@ -1149,15 +1149,14 @@ void udma_remove(struct auxiliary_device *adev) } udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); - - /* Event should unregister before unset ubcore dev. */ - udma_unregister_event(adev); + udma_unregister_none_crq_event(adev); udma_unset_ubcore_dev(udma_dev); udma_unregister_debugfs(udma_dev); udma_unregister_activate_workqueue(udma_dev); check_and_wait_flush_done(udma_dev); if (is_rmmod) (void)ubase_activate_dev(adev); + udma_unregister_crq_event(adev); udma_destroy_dev(udma_dev); mutex_unlock(&udma_reset_mutex); dev_info(&adev->dev, "udma device remove success.\n"); -- Gitee From 23b3e528858dff9b6814ac15e009c0dba492adb8 Mon Sep 17 00:00:00 2001 From: Wei Qin Date: Thu, 25 Dec 2025 10:38:21 +0800 Subject: [PATCH 126/126] ub: udma: bugfix related to rx close. commit 69f805cd610bc634e8439a9bd3b939b7b35d8266 openEuler This patch fix a bug about rx close. Fixes: 534649e2be8e ("ub: udma: Support get tp list.") Signed-off-by: Wei Qin Signed-off-by: zhaolichang <943677312@qq.com> --- drivers/ub/urma/hw/udma/udma_main.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/ub/urma/hw/udma/udma_main.c b/drivers/ub/urma/hw/udma/udma_main.c index 6b4059cadc96..70999af9962d 100644 --- a/drivers/ub/urma/hw/udma/udma_main.c +++ b/drivers/ub/urma/hw/udma/udma_main.c @@ -1123,10 +1123,6 @@ int udma_probe(struct auxiliary_device *adev, void udma_remove(struct auxiliary_device *adev) { -#define MIN_SLEEP_TIME 100 -#define MAX_SLEEP_TIME 800 -#define TIME_SLEEP_RATE 2 - uint32_t wait_time = MIN_SLEEP_TIME; struct udma_dev *udma_dev; ubase_reset_unregister(adev); @@ -1139,14 +1135,9 @@ void udma_remove(struct auxiliary_device *adev) } ubcore_stop_requests(&udma_dev->ub_dev); - while (true) { - if (!udma_close_ue_rx(udma_dev, false, false, false, 0)) - break; - msleep(wait_time); - if (wait_time < MAX_SLEEP_TIME) - wait_time *= TIME_SLEEP_RATE; - dev_err_ratelimited(&adev->dev, "udma close ue rx failed in remove process.\n"); - } + if (udma_close_ue_rx(udma_dev, false, false, false, 0)) + dev_err(&adev->dev, "udma close ue rx failed in remove process.\n"); + udma_dev->status = UDMA_SUSPEND; udma_report_reset_event(UBCORE_EVENT_ELR_ERR, udma_dev); udma_unregister_none_crq_event(adev); -- Gitee