From d725a623405bbac0141b7dc2740f5b00d54306e0 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 01:36:54 +0000 Subject: [PATCH 01/35] style(nix): sort outputs in dpdk build Signed-off-by: Daniel Noland --- nix/pkgs/dpdk/default.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index 44d4474bd..fd5abdab3 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -257,10 +257,10 @@ stdenv.mkDerivation { ]; outputs = [ - "out" - "static" "dev" + "out" "share" + "static" ]; postInstall = '' From 071f0dbcfcea2d48e1fe97a0b8cd4ee5c7effbd7 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:24:31 +0000 Subject: [PATCH 02/35] refactor(nix): move profiles.nix to nix folder I put the profiles at the top level to start, but in retrospect there is little reason to put them there. It just makes the top level directory listing more noisy and spreads the nix code around. Signed-off-by: Daniel Noland --- default.nix | 2 +- profiles.nix => nix/profiles.nix | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename profiles.nix => nix/profiles.nix (100%) diff --git a/default.nix b/default.nix index 776d58bcf..e0b3f7ab9 100644 --- a/default.nix +++ b/default.nix @@ -1,6 +1,6 @@ let sources = import ./npins; - profiles = import ./profiles.nix; + profiles = import ./nix/profiles.nix; overlays.debug = import ./nix/overlays { inherit sources; env = profiles.debug; diff --git a/profiles.nix b/nix/profiles.nix similarity index 100% rename from profiles.nix rename to nix/profiles.nix From 9811294ed4e6e716be4d472d4a82959311fc478b Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:31:24 +0000 Subject: [PATCH 03/35] build(nix): introduce x86_64 march flags These flags will be used in a follow up commit to build the x86_64 rust wrapper to dpdk. Signed-off-by: Daniel Noland --- nix/profiles.nix | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index b3d52d626..491184e7d 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -52,6 +52,15 @@ let secure.NIX_CXXFLAGS_COMPILE = secure.NIX_CFLAGS_COMPILE; # handing the CFLAGS back to clang/lld is basically required for -fsanitize secure.NIX_CFLAGS_LINK = secure.NIX_CFLAGS_COMPILE; + march.x86_64.NIX_CFLAGS_COMPILE = [ + # DPDK functionally requires some -m flags on x86_64. + # These features have been available for a long time and can be found on any reasonably recent machine, so just + # enable them here for x86_64 builds. + "-mrtm" + "-mcrc32" + "-mssse3" + ]; + march.x86_64.NIX_CXXFLAGS_COMPILE = march.x86_64.NIX_CFLAGS_COMPILE; combine-profiles = features: builtins.foldl' ( From 7fc1a8214bec05e6ee4e63b967ec46f351ab72eb Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:32:20 +0000 Subject: [PATCH 04/35] build(nix): introduce aarch64 march flags These flags are empty for the moment, but may be needed in the future. They are included mostly for symmetry with the x86_64 build. Signed-off-by: Daniel Noland --- nix/profiles.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index 491184e7d..c85c2d8c8 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -61,6 +61,9 @@ let "-mssse3" ]; march.x86_64.NIX_CXXFLAGS_COMPILE = march.x86_64.NIX_CFLAGS_COMPILE; + march.aarch64.NIX_CFLAGS_COMPILE = [ ]; + march.aarch64.NIX_CXXFLAGS_COMPILE = march.aarch64.NIX_CFLAGS_COMPILE; + march.aarch64.NIX_CFLAGS_LINK = [ ]; combine-profiles = features: builtins.foldl' ( From 6c4d52a295bb2e2925d525a694d301aa2b50c78a Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:34:02 +0000 Subject: [PATCH 05/35] build(nix): introduce build profiles These are collections of commonly used C/CXX/LDFLAGS which can be recycled across other profiles. They will be connected to the build in a future commit. Signed-off-by: Daniel Noland --- nix/profiles.nix | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index c85c2d8c8..46aec3ec6 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -69,6 +69,17 @@ let builtins.foldl' ( acc: elem: builtins.mapAttrs (var: val: (acc.${var} or [ ]) ++ val) elem ) { } features; + profile = { + debug = combine-profiles [ + common + debug + ]; + release = combine-profiles [ + common + optimize + secure + ]; + }; in { debug = combine-profiles [ From 425447bb6c6ed072cffcc5d5c2f1fae3a8133981 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:38:01 +0000 Subject: [PATCH 06/35] build(nix): introduce cross compile / profile infrastructure This commit builds on prior work to introduce a functioning cross compile infrastructure for the dataplane, complete C/CXX/LDFLAGS calculated based on the user supplied target and profile. As of this commit, you should be able to compile dpdk with any combination of debug / release profiles, x86_64 / aarch64 processors, and gnu / musl libc. All of the following commands should work and should continue to work unless stated otherwise. ``` nix-build -f default.nix pkgs.dpdk nix-build -f default.nix --argstr prof 'debug' pkgs.dpdk nix-build -f default.nix --argstr prof 'release' pkgs.dpdk nix-build -f default.nix --argstr target 'x86_64-unknown-linux-gnu' pkgs.dpdk nix-build -f default.nix --argstr target 'x86_64-unknown-linux-musl' pkgs.dpdk nix-build -f default.nix --argstr target 'aarch64-unknown-linux-gnu' pkgs.dpdk nix-build -f default.nix --argstr target 'aarch64-unknown-linux-musl' pkgs.dpdk nix-build -f default.nix --argstr prof 'debug' --argstr target x86_64-unknown-linux-gnu pkgs.dpdk nix-build -f default.nix --argstr prof 'debug' --argstr target x86_64-unknown-linux-musl pkgs.dpdk nix-build -f default.nix --argstr prof 'release' --argstr target x86_64-unknown-linux-gnu pkgs.dpdk nix-build -f default.nix --argstr prof 'release' --argstr target x86_64-unknown-linux-musl pkgs.dpdk nix-build -f default.nix --argstr prof 'debug' --argstr target aarch64-unknown-linux-gnu pkgs.dpdk nix-build -f default.nix --argstr prof 'debug' --argstr target aarch64-unknown-linux-musl pkgs.dpdk nix-build -f default.nix --argstr prof 'release' --argstr target aarch64-unknown-linux-gnu pkgs.dpdk nix-build -f default.nix --argstr prof 'release' --argstr target aarch64-unknown-linux-musl pkgs.dpdk ``` Signed-off-by: Daniel Noland --- default.nix | 58 ++++++++++++++++++++++++++++++++++-------------- nix/profiles.nix | 19 +++++++--------- 2 files changed, 49 insertions(+), 28 deletions(-) diff --git a/default.nix b/default.nix index e0b3f7ab9..b9761e3e5 100644 --- a/default.nix +++ b/default.nix @@ -1,28 +1,52 @@ +{ + target ? "x86_64-unknown-linux-gnu", + prof ? "debug", +}: let + arch = + { + "x86_64-unknown-linux-gnu" = { + target = "x86_64-unknown-linux-gnu"; + machine = "x86_64"; + nixarch = "gnu64"; + libc = "gnu"; + }; + "x86_64-unknown-linux-musl" = { + target = "x86_64-unknown-linux-musl"; + machine = "x86_64"; + nixarch = "musl64"; + libc = "musl"; + }; + "aarch64-unknown-linux-gnu" = { + target = "aarch64-unknown-linux-gnu"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform"; + libc = "glibc"; + }; + "aarch64-unknown-linux-musl" = { + target = "aarch64-unknown-linux-musl"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform-musl"; + libc = "musl"; + }; + } + .${target}; sources = import ./npins; - profiles = import ./nix/profiles.nix; - overlays.debug = import ./nix/overlays { - inherit sources; - env = profiles.debug; + profile = import ./nix/profiles.nix { + inherit prof; + arch = arch.machine; }; - overlays.release = import ./nix/overlays { + overlays = import ./nix/overlays { inherit sources; - env = profiles.release; - }; - pkgs.debug = import sources.nixpkgs { - overlays = [ - overlays.debug.dataplane - ]; + env = profile; }; - pkgs.release = import sources.nixpkgs { + pkgs = import sources.nixpkgs { overlays = [ - overlays.release.dataplane + overlays.dataplane ]; }; in { - -}: -{ - inherit sources pkgs; + inherit sources; + pkgs = pkgs.pkgsCross.${arch.nixarch}; } diff --git a/nix/profiles.nix b/nix/profiles.nix index 46aec3ec6..385983a96 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -1,3 +1,7 @@ +{ + arch, + prof, +}: let common.NIX_CFLAGS_COMPILE = [ "-glldb" @@ -81,14 +85,7 @@ let ]; }; in -{ - debug = combine-profiles [ - common - debug - ]; - release = combine-profiles [ - common - optimize - secure - ]; -} +(combine-profiles [ + profile."${prof}" + march."${arch}" +]) From 87beade558c8b8d1e492805a9856b9525a5d2871 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:51:54 +0000 Subject: [PATCH 07/35] build(nix): compile dpdk_wrapper.a with nix Signed-off-by: Daniel Noland --- nix/overlays/dataplane.nix | 7 + nix/pkgs/dpdk-wrapper/default.nix | 35 + nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c | 13 + nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h | 1808 ++++++++++++++++++++++ 4 files changed, 1863 insertions(+) create mode 100644 nix/pkgs/dpdk-wrapper/default.nix create mode 100644 nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c create mode 100644 nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 3487cba14..ee22a0fbd 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -176,4 +176,11 @@ in # Also, while this library has a respectable security track record, this is also a super strong candidate for # cfi, safe-stack, and cf-protection. dpdk = dataplane-dep (final.callPackage ../pkgs/dpdk { src = sources.dpdk; }); + + # DPDK is largely composed of static-inline functions. + # We need to wrap those functions with "_w" variants so that we can actually call them from rust. + # + # This wrapping process does not really cause any performance issue due to lto; the compiler is going to "unwrap" + # these methods anyway. + dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper { }); } diff --git a/nix/pkgs/dpdk-wrapper/default.nix b/nix/pkgs/dpdk-wrapper/default.nix new file mode 100644 index 000000000..46bf9a52d --- /dev/null +++ b/nix/pkgs/dpdk-wrapper/default.nix @@ -0,0 +1,35 @@ +{ + stdenv, + dpdk, + libbsd, +}: +stdenv.mkDerivation { + pname = "dpdk-wrapper"; + version = dpdk.version; + + src = ./src; + + nativeBuildInptus = [ + dpdk + libbsd + ]; + + outputs = [ + "dev" + "out" + ]; + + # DPDK marks all experimental apis as deprecated, but we wish to wrap such apis as well. Thus, turn off deprecation + # warnings. + CFLAGS = [ "-Wno-deprecated-declarations" ]; + + buildPhase = '' + set euxo pipefail + mkdir -p $dev/include $out/lib + $CC $CFLAGS -I${dpdk}/include -I${libbsd.dev}/include -c $src/dpdk_wrapper.c -o wrapper.o; + $AR rcs $out/lib/libdpdk_wrapper.a wrapper.o; + $RANLIB $out/lib/libdpdk_wrapper.a; + cp $src/*.h $dev/include + ''; + +} diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c new file mode 100644 index 000000000..1e3f64324 --- /dev/null +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c @@ -0,0 +1,13 @@ +#include "dpdk_wrapper.h" + +int wrte_errno() { + return rte_errno; +} + +uint16_t wrte_eth_rx_burst(uint16_t const port_id, uint16_t const queue_id, struct rte_mbuf **rx_pkts, uint16_t const nb_pkts) { + return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +} + +uint16_t wrte_eth_tx_burst(uint16_t const port_id, uint16_t const queue_id, struct rte_mbuf **tx_pkts, uint16_t const nb_pkts) { + return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +} diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h new file mode 100644 index 000000000..96ec4cd0e --- /dev/null +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h @@ -0,0 +1,1808 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Things which are either duplicated, totally inapplicable or not needed +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // this is an internal header +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +/** + * Thin wrapper to expose `rte_errno`. + * + * @return + * The last rte_errno value (thread local value). + */ +int rte_errno_get() { return rte_errno; } + +/** + * TX offloads to be set in [`rte_eth_tx_mode.offloads`]. + * + * This is a bitfield. Union these to enable multiple offloads. + * + * I wrapped these because the enum must be explicitly typed as 64 bit, but + * DPDK is not yet using the C23 standard (which would allow the inheritance + * notation with `uint64_t` seen here.). + */ +enum rte_eth_tx_offload : uint64_t { + TX_OFFLOAD_VLAN_INSERT = RTE_ETH_TX_OFFLOAD_VLAN_INSERT, + TX_OFFLOAD_IPV4_CKSUM = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, + TX_OFFLOAD_UDP_CKSUM = RTE_ETH_TX_OFFLOAD_UDP_CKSUM, + TX_OFFLOAD_TCP_CKSUM = RTE_ETH_TX_OFFLOAD_TCP_CKSUM, + TX_OFFLOAD_SCTP_CKSUM = RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, + TX_OFFLOAD_TCP_TSO = RTE_ETH_TX_OFFLOAD_TCP_TSO, + TX_OFFLOAD_UDP_TSO = RTE_ETH_TX_OFFLOAD_UDP_TSO, + TX_OFFLOAD_OUTER_IPV4_CKSUM = RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, + TX_OFFLOAD_QINQ_INSERT = RTE_ETH_TX_OFFLOAD_QINQ_INSERT, + TX_OFFLOAD_VXLAN_TNL_TSO = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, + TX_OFFLOAD_GRE_TNL_TSO = RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, + TX_OFFLOAD_IPIP_TNL_TSO = RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, + TX_OFFLOAD_GENEVE_TNL_TSO = RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, + TX_OFFLOAD_MACSEC_INSERT = RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, + TX_OFFLOAD_MT_LOCKFREE = RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, + TX_OFFLOAD_MULTI_SEGS = RTE_ETH_TX_OFFLOAD_MULTI_SEGS, + TX_OFFLOAD_MBUF_FAST_FREE = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, + TX_OFFLOAD_SECURITY = RTE_ETH_TX_OFFLOAD_SECURITY, + TX_OFFLOAD_UDP_TNL_TSO = RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, + TX_OFFLOAD_IP_TNL_TSO = RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, + TX_OFFLOAD_OUTER_UDP_CKSUM = RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, + TX_OFFLOAD_SEND_ON_TIMESTAMP = RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP +}; + +/** + * RX offloads to be set in [`rte_eth_rx_mode.offloads`]. + * + * This is a bitfield. Union these to enable multiple offloads. + * + * I wrapped these because the enum must be explicitly typed as 64 bit, but + * DPDK is not yet using the C23 standard (which would allow the inheritance + * notation with `uint64_t` seen here.). + */ +enum wrte_eth_rx_offload : uint64_t { + RX_OFFLOAD_VLAN_STRIP = RTE_ETH_RX_OFFLOAD_VLAN_STRIP, + RX_OFFLOAD_IPV4_CKSUM = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, + RX_OFFLOAD_UDP_CKSUM = RTE_ETH_RX_OFFLOAD_UDP_CKSUM, + RX_OFFLOAD_TCP_CKSUM = RTE_ETH_RX_OFFLOAD_TCP_CKSUM, + RX_OFFLOAD_TCP_LRO = RTE_ETH_RX_OFFLOAD_TCP_LRO, + RX_OFFLOAD_QINQ_STRIP = RTE_ETH_RX_OFFLOAD_QINQ_STRIP, + RX_OFFLOAD_OUTER_IPV4_CKSUM = RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, + RX_OFFLOAD_MACSEC_STRIP = RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, + RX_OFFLOAD_VLAN_FILTER = RTE_ETH_RX_OFFLOAD_VLAN_FILTER, + RX_OFFLOAD_VLAN_EXTEND = RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, + RX_OFFLOAD_SCATTER = RTE_ETH_RX_OFFLOAD_SCATTER, + RX_OFFLOAD_TIMESTAMP = RTE_ETH_RX_OFFLOAD_TIMESTAMP, + RX_OFFLOAD_SECURITY = RTE_ETH_RX_OFFLOAD_SECURITY, + RX_OFFLOAD_KEEP_CRC = RTE_ETH_RX_OFFLOAD_KEEP_CRC, + RX_OFFLOAD_SCTP_CKSUM = RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, + RX_OFFLOAD_OUTER_UDP_CKSUM = RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, + RX_OFFLOAD_RSS_HASH = RTE_ETH_RX_OFFLOAD_RSS_HASH, + RX_OFFLOAD_BUFFER_SPLIT = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT, +}; + +// Static wrappers + +int rte_is_aligned_w(const const void *const ptr, const unsigned int align) { + return rte_is_aligned(ptr, align); +} +void rte_atomic_thread_fence_w(rte_memory_order memorder) { + rte_atomic_thread_fence(memorder); +} +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { + return rte_atomic16_cmpset(dst, exp, src); +} +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { + return rte_atomic16_exchange(dst, val); +} +void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } +int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { + return rte_atomic16_read(v); +} +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { + rte_atomic16_set(v, new_value); +} +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { + rte_atomic16_add(v, inc); +} +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { + rte_atomic16_sub(v, dec); +} +void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } +void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { + return rte_atomic16_add_return(v, inc); +} +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { + return rte_atomic16_sub_return(v, dec); +} +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_inc_and_test(v); +} +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_dec_and_test(v); +} +int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { + return rte_atomic16_test_and_set(v); +} +void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { + return rte_atomic32_cmpset(dst, exp, src); +} +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { + return rte_atomic32_exchange(dst, val); +} +void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } +int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { + return rte_atomic32_read(v); +} +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { + rte_atomic32_set(v, new_value); +} +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { + rte_atomic32_add(v, inc); +} +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { + rte_atomic32_sub(v, dec); +} +void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } +void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { + return rte_atomic32_add_return(v, inc); +} +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { + return rte_atomic32_sub_return(v, dec); +} +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_inc_and_test(v); +} +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_dec_and_test(v); +} +int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { + return rte_atomic32_test_and_set(v); +} +void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { + return rte_atomic64_cmpset(dst, exp, src); +} +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { + return rte_atomic64_exchange(dst, val); +} +void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } +int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { + rte_atomic64_set(v, new_value); +} +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { + rte_atomic64_add(v, inc); +} +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { + rte_atomic64_sub(v, dec); +} +void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } +void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { + return rte_atomic64_add_return(v, inc); +} +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { + return rte_atomic64_sub_return(v, dec); +} +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_inc_and_test(v); +} +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_dec_and_test(v); +} +int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { + return rte_atomic64_test_and_set(v); +} +void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } +void rte_smp_mb_w(void) { rte_smp_mb(); } +uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } +uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } +uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } +void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } +uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } +uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } +size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { + return rte_strlcpy(dst, src, size); +} +size_t rte_strlcat_w(char *dst, const char *src, size_t size) { + return rte_strlcat(dst, src, size); +} +const char *rte_str_skip_leading_spaces_w(const char *src) { + return rte_str_skip_leading_spaces(src); +} +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { + rte_uuid_copy(dst, src); +} +int rte_gettid_w(void) { return rte_gettid(); } +unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } +void rte_pause_w(void) { rte_pause(); } +void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_16(addr, expected, memorder); +} +void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_32(addr, expected, memorder); +} +void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_64(addr, expected, memorder); +} +void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } +void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } +void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } +int rte_spinlock_trylock_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock(sl); +} +int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { + return rte_spinlock_is_locked(sl); +} +int rte_tm_supported_w(void) { return rte_tm_supported(); } +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { + rte_spinlock_unlock_tm(sl); +} +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock_tm(sl); +} +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_init(slr); +} +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock(slr); +} +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock(slr); +} +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock(slr); +} +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock_tm(slr); +} +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock_tm(slr); +} +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock_tm(slr); +} +unsigned int rte_xbegin_w(void) { return rte_xbegin(); } +void rte_xend_w(void) { rte_xend(); } +int rte_xtest_w(void) { return rte_xtest(); } +int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_get32(nr, addr); +} +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_set32(nr, addr); +} +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_clear32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_set32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_clear32(nr, addr); +} +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_get64(nr, addr); +} +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_set64(nr, addr); +} +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_clear64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_set64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_clear64(nr, addr); +} +unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } +unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } +unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } +unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } +unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } +unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } +uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } +uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } +uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { + return rte_bsf32_safe(v, pos); +} +uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { + return rte_bsf64_safe(v, pos); +} +uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } +uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } +int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } +uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } +uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } +uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } +uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } +uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } +uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } +void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_read_trylock(rwl); +} +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock(rwl); +} +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_trylock(rwl); +} +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock(rwl); +} +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_is_locked(rwl); +} +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_lock_tm(rwl); +} +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock_tm(rwl); +} +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_lock_tm(rwl); +} +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock_tm(rwl); +} +unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); +} +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_prod_htd_max(r); +} +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_prod_htd_max(r, v); +} +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_cons_htd_max(r); +} +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_cons_htd_max(r, v); +} +unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_mp_enqueue_elem(r, obj, esize); +} +int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_sp_enqueue_elem(r, obj, esize); +} +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { + return rte_ring_enqueue_elem(r, obj, esize); +} +unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_mc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_sc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_dequeue_elem(r, obj_p, esize); +} +unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_start(r, n, free_space); +} +void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n) { + rte_ring_enqueue_elem_finish(r, obj_table, esize, n); +} +void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, + unsigned int n) { + rte_ring_enqueue_finish(r, obj_table, n); +} +unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_start(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_start(r, obj_table, n, available); +} +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_elem_finish(r, n); +} +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_finish(r, n); +} +unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); +} +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_elem_finish(r, n); +} +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_finish(r, n); +} +unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *available) { + return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); +} +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_elem_finish(r, n); +} +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_finish(r, n); +} +unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, + unsigned int n, unsigned int *free_space) { + return rte_ring_enqueue_bulk(r, obj_table, n, free_space); +} +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_mp_enqueue(r, obj); +} +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_sp_enqueue(r, obj); +} +int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_enqueue(r, obj); +} +unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_bulk(r, obj_table, n, available); +} +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_mc_dequeue(r, obj_p); +} +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_sc_dequeue(r, obj_p); +} +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_dequeue(r, obj_p); +} +unsigned int rte_ring_count_w(const struct rte_ring *r) { + return rte_ring_count(r); +} +unsigned int rte_ring_free_count_w(const struct rte_ring *r) { + return rte_ring_free_count(r); +} +int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } +int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } +unsigned int rte_ring_get_size_w(const struct rte_ring *r) { + return rte_ring_get_size(r); +} +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { + return rte_ring_get_capacity(r); +} +enum rte_ring_sync_type +rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_prod_sync_type(r); +} +int rte_ring_is_prod_single_w(const struct rte_ring *r) { + return rte_ring_is_prod_single(r); +} +enum rte_ring_sync_type +rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_cons_sync_type(r); +} +int rte_ring_is_cons_single_w(const struct rte_ring *r) { + return rte_ring_is_cons_single(r); +} +unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_burst(r, obj_table, n, available); +} +void *rte_memcpy_w(void *dst, const void *src, size_t n) { + return rte_memcpy(dst, src, n); +} +void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { + return rte_mov15_or_less(dst, src, n); +} +void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } +void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } +void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } +void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } +void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { + return rte_memcpy_generic(dst, src, n); +} +void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { + return rte_memcpy_aligned(dst, src, n); +} +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { + return rte_mempool_get_header(obj); +} +struct rte_mempool *rte_mempool_from_obj_w(void *obj) { + return rte_mempool_from_obj(obj); +} +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { + return rte_mempool_get_trailer(obj); +} +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { + return rte_mempool_get_ops(ops_index); +} +int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); +} +int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n) { + return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); +} +struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, + unsigned int lcore_id) { + return rte_mempool_default_cache(mp, lcore_id); +} +void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, + struct rte_mempool *mp) { + rte_mempool_cache_flush(cache, mp); +} +void rte_mempool_do_generic_put_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_do_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n) { + rte_mempool_put_bulk(mp, obj_table, n); +} +void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { + rte_mempool_put(mp, obj); +} +int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + return rte_mempool_do_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { + return rte_mempool_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_get_bulk(mp, obj_table, n); +} +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { + return rte_mempool_get(mp, obj_p); +} +int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) { + return rte_mempool_get_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_full_w(const struct rte_mempool *mp) { + return rte_mempool_full(mp); +} +int rte_mempool_empty_w(const struct rte_mempool *mp) { + return rte_mempool_empty(mp); +} +rte_iova_t rte_mempool_virt2iova_w(const void *elt) { + return rte_mempool_virt2iova(elt); +} +void *rte_mempool_get_priv_w(struct rte_mempool *mp) { + return rte_mempool_get_priv(mp); +} +void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } +void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } +void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } +void rte_prefetch_non_temporal_w(const void *p) { + rte_prefetch_non_temporal(p); +} +void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } +void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } +void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } +void rte_cldemote_w(const void *p) { rte_cldemote(p); } +uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } +uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } +uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } +uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } +uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } +uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part1(m); +} +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part2(m); +} +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_size(mp); +} +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { + return rte_mbuf_iova_get(m); +} +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { + rte_mbuf_iova_set(m, iova); +} +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova(mb); +} +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova_default(mb); +} +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { + return rte_mbuf_from_indirect(mi); +} +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { + return rte_mbuf_buf_addr(mb, mp); +} +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { + return rte_mbuf_data_addr_default(mb); +} +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } +void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_flags(mp); +} +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { + return rte_mbuf_refcnt_read(m); +} +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { + rte_mbuf_refcnt_set(m, new_value); +} +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { + return rte_mbuf_refcnt_update(m, value); +} +uint16_t +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { + return rte_mbuf_ext_refcnt_read(shinfo); +} +void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value) { + rte_mbuf_ext_refcnt_set(shinfo, new_value); +} +uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value) { + return rte_mbuf_ext_refcnt_update(shinfo, value); +} +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { + return rte_mbuf_raw_alloc(mp); +} +void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_data_room_size(mp); +} +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { + rte_pktmbuf_reset_headroom(m); +} +void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { + return rte_pktmbuf_alloc(mp); +} +int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned int count) { + return rte_pktmbuf_alloc_bulk(pool, mbufs, count); +} +struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, + void *fcb_opaque) { + return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, + fcb_opaque); +} +void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo) { + rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); +} +void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, + const struct rte_mbuf *msrc) { + rte_mbuf_dynfield_copy(mdst, msrc); +} +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { + rte_pktmbuf_attach(mi, m); +} +void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { + return rte_pktmbuf_prefree_seg(m); +} +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } +void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { + rte_pktmbuf_refcnt_update(m, v); +} +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_headroom(m); +} +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_tailroom(m); +} +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { + return rte_pktmbuf_lastseg(m); +} +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_prepend(m, len); +} +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_append(m, len); +} +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_adj(m, len); +} +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_trim(m, len); +} +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { + return rte_pktmbuf_is_contiguous(m); +} +const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf) { + return rte_pktmbuf_read(m, off, len, buf); +} +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { + return rte_pktmbuf_chain(head, tail); +} +uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, + uint64_t tso, uint64_t ol3, uint64_t ol2, + uint64_t unused) { + return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); +} +int rte_validate_tx_offload_w(const struct rte_mbuf *m) { + return rte_validate_tx_offload(m); +} +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { + return rte_pktmbuf_linearize(mbuf); +} +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_queue_get(m); +} +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_traffic_class_get(m); +} +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_color_get(m); +} +void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, uint8_t *color) { + rte_mbuf_sched_get(m, queue_id, traffic_class, color); +} +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { + rte_mbuf_sched_queue_set(m, queue_id); +} +void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, + uint8_t traffic_class) { + rte_mbuf_sched_traffic_class_set(m, traffic_class); +} +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { + rte_mbuf_sched_color_set(m, color); +} +void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, uint8_t color) { + rte_mbuf_sched_set(m, queue_id, traffic_class, color); +} +int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, + const struct rte_ether_addr *ea2) { + return rte_is_same_ether_addr(ea1, ea2); +} +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_zero_ether_addr(ea); +} +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_unicast_ether_addr(ea); +} +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_multicast_ether_addr(ea); +} +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_broadcast_ether_addr(ea); +} +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_universal_ether_addr(ea); +} +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_local_admin_ether_addr(ea); +} +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_valid_assigned_ether_addr(ea); +} +void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, + struct rte_ether_addr *ea_to) { + rte_ether_addr_copy(ea_from, ea_to); +} +int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } +int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { + return rte_bitmap_get_memory_footprint(n_bits); +} +struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init(n_bits, mem, mem_size); +} +struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); +} +void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } +void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_prefetch0(bmp, pos); +} +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { + return rte_bitmap_get(bmp, pos); +} +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_set(bmp, pos); +} +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, + uint64_t slab) { + rte_bitmap_set_slab(bmp, pos, slab); +} +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_clear(bmp, pos); +} +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { + return rte_bitmap_scan(bmp, pos, slab); +} +uint16_t rte_raw_cksum_w(const void *buf, size_t len) { + return rte_raw_cksum(buf, len); +} +int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, + uint16_t *cksum) { + return rte_raw_cksum_mbuf(m, off, len, cksum); +} +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_hdr_len(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum_simple(ipv4_hdr); +} +uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + uint64_t ol_flags) { + return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +} +uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); +} +uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); +} +int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); +} +int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); +} +bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b) { + return rte_ipv6_addr_eq(a, b); +} +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { + rte_ipv6_addr_mask(ip, depth); +} +bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b, uint8_t depth) { + return rte_ipv6_addr_eq_prefix(a, b, depth); +} +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { + return rte_ipv6_mask_depth(mask); +} +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_unspec(ip); +} +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_loopback(ip); +} +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_linklocal(ip); +} +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_sitelocal(ip); +} +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4compat(ip); +} +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4mapped(ip); +} +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_mcast(ip); +} +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_mc_scope(ip); +} +void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, + const struct rte_ether_addr *mac) { + rte_ipv6_llocal_from_ethernet(ip, mac); +} +void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, + const struct rte_ipv6_addr *ip) { + rte_ipv6_solnode_from_addr(sol, ip); +} +void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, + const struct rte_ipv6_addr *ip) { + rte_ether_mcast_from_ipv6(mac, ip); +} +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { + return rte_ipv6_check_version(ip); +} +uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + uint64_t ol_flags) { + return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +} +uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); +} +uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); +} +int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); +} +int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); +} +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { + return rte_ipv6_get_next_ext(p, proto, ext_len); +} +enum rte_color +rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, + struct rte_meter_srtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_srtcm_color_aware_check_w( + struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color +rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_color_aware_check_w( + struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, + uint32_t pkt_len) { + return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, + enum rte_color pkt_color) { + return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, + pkt_color); +} +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { + return rte_eth_rss_hf_refine(rss_hf); +} + +uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { + return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +} +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_rx_queue_count(port_id, queue_id); +} +int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_rx_descriptor_status(port_id, queue_id, offset); +} +int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_tx_descriptor_status(port_id, queue_id, offset); +} +uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer) { + return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); +} +uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer, + struct rte_mbuf *tx_pkt) { + return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); +} +uint16_t +rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) { + return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, + recycle_rxq_info); +} +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_tx_queue_count(port_id, queue_id); +} +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { + return rte_flow_dynf_metadata_get(m); +} +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { + rte_flow_dynf_metadata_set(m, v); +} +int rte_flow_dynf_metadata_avail_w(void) { + return rte_flow_dynf_metadata_avail(); +} +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { + return rte_hash_crc_1byte(data, init_val); +} +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { + return rte_hash_crc_2byte(data, init_val); +} +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { + return rte_hash_crc_4byte(data, init_val); +} +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { + return rte_hash_crc_8byte(data, init_val); +} +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, + uint32_t init_val) { + return rte_hash_crc(data, data_len, init_val); +} +void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_2hashes(key, length, pc, pb); +} +void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_32b_2hashes(k, length, pc, pb); +} +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { + return rte_jhash(key, length, initval); +} +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { + return rte_jhash_32b(k, length, initval); +} +uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, + uint32_t initval) { + return rte_jhash_3words(a, b, c, initval); +} +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { + return rte_jhash_2words(a, b, initval); +} +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { + return rte_jhash_1word(a, initval); +} +uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key) { + return rte_fbk_hash_get_bucket(ht, key); +} +int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value, + uint32_t bucket) { + return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); +} +int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, + uint16_t value) { + return rte_fbk_hash_add_key(ht, key, value); +} +int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_delete_key(ht, key); +} +int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_lookup(ht, key); +} +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { + rte_fbk_hash_clear_all(ht); +} +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { + return rte_fbk_hash_get_load_factor(ht); +} +void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_online(v, thread_id); +} +void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_offline(v, thread_id); +} +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_lock(v, thread_id); +} +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_unlock(v, thread_id); +} +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { + return rte_rcu_qsbr_start(v); +} +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_quiescent(v, thread_id); +} +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { + return rte_rcu_qsbr_check(v, t, wait); +} +uint8_t rte_read8_relaxed_w(const void *addr) { + return rte_read8_relaxed(addr); +} +uint16_t rte_read16_relaxed_w(const void *addr) { + return rte_read16_relaxed(addr); +} +uint32_t rte_read32_relaxed_w(const void *addr) { + return rte_read32_relaxed(addr); +} +uint64_t rte_read64_relaxed_w(const void *addr) { + return rte_read64_relaxed(addr); +} +void rte_write8_relaxed_w(uint8_t value, void *addr) { + rte_write8_relaxed(value, addr); +} +void rte_write16_relaxed_w(uint16_t value, void *addr) { + rte_write16_relaxed(value, addr); +} +void rte_write32_relaxed_w(uint32_t value, void *addr) { + rte_write32_relaxed(value, addr); +} +void rte_write64_relaxed_w(uint64_t value, void *addr) { + rte_write64_relaxed(value, addr); +} +uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } +uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } +uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } +uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } +void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } +void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } +void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } +void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } +void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { + rte_write32_wc_relaxed(value, addr); +} +void rte_write32_wc_w(uint32_t value, void *addr) { + rte_write32_wc(value, addr); +} +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_lock(msl, me); +} +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_unlock(msl, me); +} +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + return rte_mcslock_trylock(msl, me); +} +int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { + return rte_mcslock_is_locked(msl); +} +void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } +void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } +void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } +void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } +void rte_pflock_write_unlock_w(rte_pflock_t *pf) { + rte_pflock_write_unlock(pf); +} +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { + return rte_reciprocal_divide(a, R); +} +uint64_t rte_reciprocal_divide_u64_w(uint64_t a, + const struct rte_reciprocal_u64 *R) { + return rte_reciprocal_divide_u64(a, R); +} +void rte_seqcount_init_w(rte_seqcount_t *seqcount) { + rte_seqcount_init(seqcount); +} +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { + return rte_seqcount_read_begin(seqcount); +} +bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, + uint32_t begin_sn) { + return rte_seqcount_read_retry(seqcount, begin_sn); +} +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_begin(seqcount); +} +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_end(seqcount); +} +void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { + return rte_seqlock_read_begin(seqlock); +} +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { + return rte_seqlock_read_retry(seqlock, begin_sn); +} +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_lock(seqlock); +} +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_unlock(seqlock); +} +unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, + unsigned int n) { + return rte_stack_push(s, obj_table, n); +} +unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, + unsigned int n) { + return rte_stack_pop(s, obj_table, n); +} +unsigned int rte_stack_count_w(struct rte_stack *s) { + return rte_stack_count(s); +} +unsigned int rte_stack_free_count_w(struct rte_stack *s) { + return rte_stack_free_count(s); +} +uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss(input_tuple, input_len, rss_key); +} +uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss_be(input_tuple, input_len, rss_key); +} +void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } +void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { + rte_ticketlock_unlock(tl); +} +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { + return rte_ticketlock_trylock(tl); +} +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { + return rte_ticketlock_is_locked(tl); +} +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_init(tlr); +} +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_lock(tlr); +} +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_unlock(tlr); +} +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { + return rte_ticketlock_recursive_trylock(tlr); +} +uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, + uint64_t cycles) { + return rte_cyclecounter_cycles_to_ns(tc, cycles); +} +uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, + uint64_t cycle_now) { + return rte_timecounter_update(tc, cycle_now); +} +uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { + return rte_timespec_to_ns(ts); +} +struct timespec rte_ns_to_timespec_w(uint64_t nsec) { + return rte_ns_to_timespec(nsec); +} +bool rte_trace_feature_is_enabled_w(void) { + return rte_trace_feature_is_enabled(); +} From 73193d8bf4bdb3d1c5158fedae9270dccafb1666 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 04:39:24 +0000 Subject: [PATCH 08/35] build(nix): make overlay user selectable Signed-off-by: Daniel Noland --- default.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/default.nix b/default.nix index b9761e3e5..f00c69de9 100644 --- a/default.nix +++ b/default.nix @@ -1,4 +1,5 @@ { + overlay ? "dataplane", target ? "x86_64-unknown-linux-gnu", prof ? "debug", }: @@ -42,7 +43,7 @@ let }; pkgs = import sources.nixpkgs { overlays = [ - overlays.dataplane + overlays.${overlay} ]; }; in From 4796a4edf5d06f9dbd97679b2a7a9e8bc3f2b36c Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 16:11:36 +0000 Subject: [PATCH 09/35] build(nix): add address sanitize profile Signed-off-by: Daniel Noland --- nix/profiles.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index 385983a96..8b8a5187a 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -68,6 +68,13 @@ let march.aarch64.NIX_CFLAGS_COMPILE = [ ]; march.aarch64.NIX_CXXFLAGS_COMPILE = march.aarch64.NIX_CFLAGS_COMPILE; march.aarch64.NIX_CFLAGS_LINK = [ ]; + sanitize.address.NIX_CFLAGS_COMPILE = [ + "-fsanitize=address,local-bounds" + ]; + sanitize.address.NIX_CXXFLAGS_COMPILE = sanitize.address.NIX_CFLAGS_COMPILE; + sanitize.address.NIX_CFLAGS_LINK = sanitize.address.NIX_CFLAGS_COMPILE ++ [ + "-static-libasan" + ]; combine-profiles = features: builtins.foldl' ( From 927ccfcbb715a3548c730ef8541765f88b20a326 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 16:12:03 +0000 Subject: [PATCH 10/35] build(nix): add leak sanitize profile Signed-off-by: Daniel Noland --- nix/profiles.nix | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index 8b8a5187a..1d315876f 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -75,6 +75,11 @@ let sanitize.address.NIX_CFLAGS_LINK = sanitize.address.NIX_CFLAGS_COMPILE ++ [ "-static-libasan" ]; + sanitize.leak.NIX_CFLAGS_COMPILE = [ + "-fsanitize=leak" + ]; + sanitize.leak.NIX_CXXFLAGS_COMPILE = sanitize.leak.NIX_CFLAGS_COMPILE; + sanitize.leak.NIX_CFLAGS_LINK = sanitize.leak.NIX_CFLAGS_COMPILE; combine-profiles = features: builtins.foldl' ( From f96d5e7a1c64f79902f52ed6096d0587bb8f6e1e Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 20:41:18 +0000 Subject: [PATCH 11/35] build(nix): add cfi sanitize profile Signed-off-by: Daniel Noland --- nix/profiles.nix | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/nix/profiles.nix b/nix/profiles.nix index 1d315876f..81ceef29e 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -47,11 +47,6 @@ let "-fstack-clash-protection" # "-fcf-protection=full" # requires extra testing before we enable # "-fsanitize=safe-stack" # requires extra testing before we enable (not compatible with musl) - # "-fsanitize=cfi" # requires extra testing before we enable - # enable if you turn on cfi to properly link with rust - # "-fsanitize-cfi-icall-experimental-normalize-integers" - # consider enabling if you turn on cfi (not compatible with cross DSO cfi) - # "-fsanitize-cfi-icall-generalize-pointers" ]; secure.NIX_CXXFLAGS_COMPILE = secure.NIX_CFLAGS_COMPILE; # handing the CFLAGS back to clang/lld is basically required for -fsanitize @@ -80,6 +75,26 @@ let ]; sanitize.leak.NIX_CXXFLAGS_COMPILE = sanitize.leak.NIX_CFLAGS_COMPILE; sanitize.leak.NIX_CFLAGS_LINK = sanitize.leak.NIX_CFLAGS_COMPILE; + # note: cfi _requires_ LTO and is fundamentally ill suited to debug builds + sanitize.cfi.NIX_CFLAGS_COMPILE = [ + "-fsanitize=cfi" + # visibility=default is functionally required if you use basically any cfi higher than icall. + # In theory we could set -fvisibility=hidden, but in practice that doesn't work because too many dependencies + # fail to build with that setting enabled. + # NOTE: you also want to enable -Wl,--lto-whole-program-visibility in the linker flags if visibility=default so that + # symbols can be refined to hidden visibility at link time. + # This "whole-program-visibility" flag is already enabled by the optimize profile, and + # given that the optimize profile is required for cfi to even bild, we don't explicitly enable it again here. + "-fvisibility=default" + # required to properly link with rust + "-fsanitize-cfi-icall-experimental-normalize-integers" + # required in cases where perfect type strictness is not maintained but you still want to use CFI. + # Type fudging is common in C code, especially in cases where function pointers are used with lax const correctness. + # Ideally we wouldn't enable this, but we can't really re-write all of the C code in the world. + "-fsanitize-cfi-icall-generalize-pointers" + ]; + sanitize.cfi.NIX_CXXFLAGS_COMPILE = sanitize.cfi.NIX_CFLAGS_COMPILE; + sanitize.cfi.NIX_CFLAGS_LINK = sanitize.cfi.NIX_CFLAGS_COMPILE; combine-profiles = features: builtins.foldl' ( From 0f296935b08a8f3eeaf12aa4325cf92c1a48c502 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 20:41:35 +0000 Subject: [PATCH 12/35] build(nix): add safe-stack sanitize profile Signed-off-by: Daniel Noland --- nix/profiles.nix | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nix/profiles.nix b/nix/profiles.nix index 81ceef29e..e31461678 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -46,7 +46,6 @@ let "-fstack-protector-strong" "-fstack-clash-protection" # "-fcf-protection=full" # requires extra testing before we enable - # "-fsanitize=safe-stack" # requires extra testing before we enable (not compatible with musl) ]; secure.NIX_CXXFLAGS_COMPILE = secure.NIX_CFLAGS_COMPILE; # handing the CFLAGS back to clang/lld is basically required for -fsanitize @@ -95,6 +94,13 @@ let ]; sanitize.cfi.NIX_CXXFLAGS_COMPILE = sanitize.cfi.NIX_CFLAGS_COMPILE; sanitize.cfi.NIX_CFLAGS_LINK = sanitize.cfi.NIX_CFLAGS_COMPILE; + sanitize.safe-stack.NIX_CFLAGS_COMPILE = [ + "-fsanitize=safe-stack" + ]; + sanitize.safe-stack.NIX_CXXFLAGS_COMPILE = sanitize.safe-stack.NIX_CFLAGS_COMPILE; + sanitize.safe-stack.NIX_CFLAGS_LINK = sanitize.safe-stack.NIX_CFLAGS_COMPILE ++ [ + "-Wl,--allow-shlib-undefined" + ]; combine-profiles = features: builtins.foldl' ( From 7d708cb90538f19e502cadd065ce8faf970cf1d7 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 20:42:17 +0000 Subject: [PATCH 13/35] build(nix): add instrumentation profiles Signed-off-by: Daniel Noland --- nix/profiles.nix | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index e31461678..aa96e9b3d 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -101,6 +101,16 @@ let sanitize.safe-stack.NIX_CFLAGS_LINK = sanitize.safe-stack.NIX_CFLAGS_COMPILE ++ [ "-Wl,--allow-shlib-undefined" ]; + instrument.none.NIX_CFLAGS_COMPILE = [ ]; + instrument.none.NIX_CXXFLAGS_COMPILE = instrument.none.NIX_CFLAGS_COMPILE; + instrument.none.NIX_CFLAGS_LINK = instrument.none.NIX_CFLAGS_COMPILE; + instrument.produce.NIX_CFLAGS_COMPILE = [ + "-fprofile-instr-generate" + "-fcoverage-mapping" + "-fno-omit-frame-pointer" + ]; + instrument.produce.NIX_CXXFLAGS_COMPILE = instrument.produce.NIX_CFLAGS_COMPILE; + instrument.produce.NIX_CFLAGS_LINK = instrument.produce.NIX_CFLAGS_COMPILE; combine-profiles = features: builtins.foldl' ( From d3162008cffd295baf7505b489fe112e049d95c6 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 20:43:10 +0000 Subject: [PATCH 14/35] build(nix): pipe sanitizer and instrumentation into profile Signed-off-by: Daniel Noland --- default.nix | 9 +++++++-- nix/profiles.nix | 14 ++++++++++---- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/default.nix b/default.nix index f00c69de9..3b76eedb3 100644 --- a/default.nix +++ b/default.nix @@ -2,6 +2,8 @@ overlay ? "dataplane", target ? "x86_64-unknown-linux-gnu", prof ? "debug", + instrumentation ? "none", + sanitize ? "", }: let arch = @@ -32,9 +34,12 @@ let }; } .${target}; + # helper method to work around nix's contrived builtin string split function. + split-str = split: str: builtins.filter (elm: builtins.isString elm) (builtins.split split str); + sanitizers = if sanitize == null || sanitize == "" then [ ] else split-str ",+" sanitize; sources = import ./npins; profile = import ./nix/profiles.nix { - inherit prof; + inherit prof sanitizers instrumentation; arch = arch.machine; }; overlays = import ./nix/overlays { @@ -48,6 +53,6 @@ let }; in { - inherit sources; + inherit sources profile; pkgs = pkgs.pkgsCross.${arch.nixarch}; } diff --git a/nix/profiles.nix b/nix/profiles.nix index aa96e9b3d..3e2fad97e 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -1,6 +1,8 @@ { arch, prof, + sanitizers, + instrumentation, }: let common.NIX_CFLAGS_COMPILE = [ @@ -128,7 +130,11 @@ let ]; }; in -(combine-profiles [ - profile."${prof}" - march."${arch}" -]) +combine-profiles ( + [ + profile."${prof}" + march."${arch}" + instrument."${instrumentation}" + ] + ++ (builtins.map (s: sanitize.${s}) sanitizers) +) From b384843708d9c7f8afd0ca38247ec6c8532d8918 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 23:29:40 +0000 Subject: [PATCH 15/35] build(nix): add thread sanitize profile --- nix/profiles.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/profiles.nix b/nix/profiles.nix index 3e2fad97e..9a2cb62e1 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -76,6 +76,13 @@ let ]; sanitize.leak.NIX_CXXFLAGS_COMPILE = sanitize.leak.NIX_CFLAGS_COMPILE; sanitize.leak.NIX_CFLAGS_LINK = sanitize.leak.NIX_CFLAGS_COMPILE; + sanitize.thread.NIX_CFLAGS_COMPILE = [ + "-fsanitize=thread" + ]; + sanitize.thread.NIX_CXXFLAGS_COMPILE = sanitize.thread.NIX_CFLAGS_COMPILE; + sanitize.thread.NIX_CFLAGS_LINK = sanitize.thread.NIX_CFLAGS_COMPILE ++ [ + "-Wl,--allow-shlib-undefined" + ]; # note: cfi _requires_ LTO and is fundamentally ill suited to debug builds sanitize.cfi.NIX_CFLAGS_COMPILE = [ "-fsanitize=cfi" From f78b613d143a663cc3244620431f79b3f32f5b35 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 23:31:11 +0000 Subject: [PATCH 16/35] build(nix): forward cflags back to linker in optimize --- nix/profiles.nix | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nix/profiles.nix b/nix/profiles.nix index 9a2cb62e1..3780dbb52 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -37,8 +37,7 @@ let optimize.NIX_CXXFLAGS_COMPILE = optimize.NIX_CFLAGS_COMPILE ++ [ "-fwhole-program-vtables" ]; - optimize.NIX_CFLAGS_LINK = [ - "-flto=full" + optimize.NIX_CFLAGS_LINK = optimize.NIX_CXXFLAGS_COMPILE ++ [ "-Wl,--lto-whole-program-visibility" # just to keep the artifacts small, we don't currently use any linked artifact anyway "-Wl,--gc-sections" From 08214b24ff4b21218adbee5839229c3d11cdd103 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 23:31:46 +0000 Subject: [PATCH 17/35] fix(nix): missing acc // for one offs --- nix/profiles.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/profiles.nix b/nix/profiles.nix index 3780dbb52..3a9f7533b 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -122,7 +122,7 @@ let combine-profiles = features: builtins.foldl' ( - acc: elem: builtins.mapAttrs (var: val: (acc.${var} or [ ]) ++ val) elem + acc: elem: acc // (builtins.mapAttrs (var: val: (acc.${var} or [ ]) ++ val) elem) ) { } features; profile = { debug = combine-profiles [ From c0c2dfc34c99f4f57cd8a744bae6e970e705a41a Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Fri, 19 Dec 2025 23:32:52 +0000 Subject: [PATCH 18/35] build(nix): make most sanitizers work --- nix/overlays/dataplane.nix | 4 ++++ nix/pkgs/dpdk/default.nix | 2 +- nix/profiles.nix | 1 - 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index ee22a0fbd..a6fd559c9 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -160,6 +160,10 @@ in # doesn't even have symbol versioning / compatibility in the first place). Turning this off just reduces the # build's internal complexity and makes lto easier. "-DNO_COMPAT_SYMS=1" + # this allows thread sanitizer to build (thread/ub sanitizers do not like -Wl,-z,defs or -Wl,--no-undefined) + # Normally I would say that disabling -Wl,--no-undefined is a bad idea, but we throw away all the shared + # libs and executables from this build anwyway, so it it should be harmless. + "-DSUPPORTS_NO_UNDEFINED=0" # todo: find a way to enable this for only {thread,ub}san builds. ]; postInstall = (orig.postInstall or "") + '' mkdir -p $static/lib; diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index fd5abdab3..362711760 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -239,7 +239,7 @@ stdenv.mkDerivation { "-Dauto_features=disabled" "-Db_colorout=never" "-Db_lto=${lto}" - "-Db_lundef=true" + "-Db_lundef=false" "-Db_pgo=off" "-Db_pie=true" "-Dbackend=ninja" diff --git a/nix/profiles.nix b/nix/profiles.nix index 3a9f7533b..caad8e6d5 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -31,7 +31,6 @@ let optimize.NIX_CFLAGS_COMPILE = [ "-O3" "-flto=full" - "-ffat-lto-objects" "-fsplit-lto-unit" # important for compatibility with rust's LTO ]; optimize.NIX_CXXFLAGS_COMPILE = optimize.NIX_CFLAGS_COMPILE ++ [ From 2fdf972b9e733b2c3cc456b9e6de09c32ccf624c Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sat, 20 Dec 2025 00:00:09 +0000 Subject: [PATCH 19/35] build(nix): add copyright headers --- default.nix | 2 ++ nix/overlays/dataplane.nix | 2 ++ nix/overlays/default.nix | 2 ++ nix/pkgs/dpdk-wrapper/default.nix | 2 ++ nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c | 3 +++ nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h | 3 +++ nix/pkgs/dpdk/default.nix | 2 ++ nix/profiles.nix | 2 ++ shell.nix | 2 ++ 9 files changed, 20 insertions(+) diff --git a/default.nix b/default.nix index 3b76eedb3..7cfde5625 100644 --- a/default.nix +++ b/default.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { overlay ? "dataplane", target ? "x86_64-unknown-linux-gnu", diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index a6fd559c9..8634c4dae 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { sources, env ? { }, diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 6139dfb3c..d6d69a8f7 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { sources, env ? { }, diff --git a/nix/pkgs/dpdk-wrapper/default.nix b/nix/pkgs/dpdk-wrapper/default.nix index 46bf9a52d..60ffe4623 100644 --- a/nix/pkgs/dpdk-wrapper/default.nix +++ b/nix/pkgs/dpdk-wrapper/default.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { stdenv, dpdk, diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c index 1e3f64324..58e2f4df7 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c @@ -1,3 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Open Network Fabric Authors + #include "dpdk_wrapper.h" int wrte_errno() { diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h index 96ec4cd0e..2d4e77c47 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h @@ -1,3 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Open Network Fabric Authors + #include #include diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index 362711760..6028b8c2a 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { src, stdenv, diff --git a/nix/profiles.nix b/nix/profiles.nix index caad8e6d5..7e47d8b8f 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { arch, prof, diff --git a/shell.nix b/shell.nix index 918e6cad1..afa7cc93c 100644 --- a/shell.nix +++ b/shell.nix @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors { pkgs ? import { }, }: From 25b8d4d66fd25df4dee06442e1b5e0b45115c217 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sat, 20 Dec 2025 03:24:40 +0000 Subject: [PATCH 20/35] wip --- .sysroot-man | 1 + .sysroot-static | 1 + default.nix | 37 ++++++++++++++++++++++------- nix/overlays/dataplane.nix | 24 +++++++++++++++---- nix/pkgs/dpdk/cross/bluefield3 | 17 +++++++++++++ nix/pkgs/dpdk/cross/bluefield3.gnu | 19 +++++++++++++++ nix/pkgs/dpdk/cross/bluefield3.musl | 19 +++++++++++++++ nix/pkgs/dpdk/default.nix | 17 ++++++++++--- nix/pkgs/dpdk/src | 1 + result | 1 + result-bin | 1 + shell.nix | 6 +++++ sysroot | 1 + 13 files changed, 128 insertions(+), 17 deletions(-) create mode 120000 .sysroot-man create mode 120000 .sysroot-static create mode 100644 nix/pkgs/dpdk/cross/bluefield3 create mode 100644 nix/pkgs/dpdk/cross/bluefield3.gnu create mode 100644 nix/pkgs/dpdk/cross/bluefield3.musl create mode 160000 nix/pkgs/dpdk/src create mode 120000 result create mode 120000 result-bin create mode 120000 sysroot diff --git a/.sysroot-man b/.sysroot-man new file mode 120000 index 000000000..35825c4a6 --- /dev/null +++ b/.sysroot-man @@ -0,0 +1 @@ +/nix/store/fvf13rk7bimfzqwqyx73zxy132aqcqwj-aarch64-unknown-linux-gnu-pkg-config-wrapper-0.29.2-man \ No newline at end of file diff --git a/.sysroot-static b/.sysroot-static new file mode 120000 index 000000000..2cc68f664 --- /dev/null +++ b/.sysroot-static @@ -0,0 +1 @@ +/nix/store/4155d64l46f8qdl49dh3mpf81f9h4ivl-dpdk-aarch64-unknown-linux-gnu-25.11-static \ No newline at end of file diff --git a/default.nix b/default.nix index 7cfde5625..a4451f3e6 100644 --- a/default.nix +++ b/default.nix @@ -48,13 +48,32 @@ let inherit sources; env = profile; }; - pkgs = import sources.nixpkgs { - overlays = [ - overlays.${overlay} - ]; - }; + pkgs = + (import sources.nixpkgs { + overlays = [ + overlays.${overlay} + ]; + }).pkgsCross.${arch.nixarch}; in -{ - inherit sources profile; - pkgs = pkgs.pkgsCross.${arch.nixarch}; -} +pkgs.lib.fix (final: { + inherit pkgs sources profile; + sysroot-list = with final.pkgs; [ + libc.static + libc.out + libmd.static + libbsd.static + libnl.out + numactl.dev + numactl.static + rdma-core.static + dpdk.dev + dpdk.out + dpdk.static + dpdk-wrapper.dev + dpdk-wrapper.out + ]; + sysroot = pkgs.symlinkJoin { + name = "sysroot"; + paths = final.sysroot-list; + }; +}) diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 8634c4dae..516c66e60 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -21,13 +21,15 @@ let bintools lld ]; - }) (adapt.makeStaticLibraries final.buildPackages.llvmPackages.stdenv); + }) final.llvmPackages.stdenv; + # }) (adapt.makeStaticLibraries final.buildPackages.llvmPackages.stdenv); stdenv-llvm-with-flags = adapt.addAttrsToDerivation (orig: { env = helpers.addToEnv env (orig.env or { }); }) stdenv-llvm; dataplane-dep = pkg: pkg.override { stdenv = stdenv-llvm-with-flags; }; in { + stdenv' = stdenv-llvm-with-flags; # Don't bother adapting ethtool or iproute2's build to our custom flags / env. Failure to null this can trigger # _massive_ builds because ethtool depends on libnl (et al), and we _do_ overlay libnl. Thus, the ethtool / iproute2 # get rebuilt and you end up rebuilding the whole world. @@ -69,7 +71,7 @@ in # file. Ideally we would just _not_ build those .so files, but that would require doing brain surgery on dpdk's # meson build, and maintaining such a change set is not worth it to avoid building some .so files. configureFlags = (orig.configureFlags or [ ]) ++ [ - "--enable-shared" + "--enable-static" ]; postInstall = (orig.postInstall or "") + '' mkdir -p "$static/lib"; @@ -87,7 +89,7 @@ in # we need to enable shared (in addition to static) to build dpdk. # See the note on libmd for reasoning. configureFlags = orig.configureFlags ++ [ - "--enable-shared" + "--enable-static" ]; postInstall = (orig.postInstall or "") + '' mkdir -p "$static/lib"; @@ -107,7 +109,17 @@ in # More, this is a very low level library designed to send messages between a privileged process and the kernel. # The simple fact that this appears in our toolchain justifies sanitizers like safe-stack and cfi and/or flags like # -fcf-protection=full. - libnl = dataplane-dep prev.libnl; + libnl = (dataplane-dep prev.libnl).overrideAttrs (orig: { + outputs = (orig.outputs or [ "out" ]) ++ [ "static" ]; + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--enable-static" + ]; + postInstall = (orig.postInstall or "") + '' + mkdir -p $static/lib + find $out/lib -name '*.la' -exec rm {} \; + mv $out/lib/*.a $static/lib/ + ''; + }); # This is needed by DPDK in order to determine which pinned core runs on which numa node and which NIC is most # efficiently connected to which NUMA node. You can disable the need for this library entirely by editing dpdk's @@ -127,7 +139,7 @@ in # we need to enable shared (in addition to static) to build dpdk. # See the note on libmd for reasoning. configureFlags = (orig.configureFlags or [ ]) ++ [ - "--enable-shared" # dpdk does not like to build its .so files if we don't build numa.so as well + "--enable-static" # dpdk does not like to build its .so files if we don't build numa.so as well ]; postInstall = (orig.postInstall or "") + '' mkdir -p "$static/lib"; @@ -189,4 +201,6 @@ in # This wrapping process does not really cause any performance issue due to lto; the compiler is going to "unwrap" # these methods anyway. dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper { }); + + inherit env; } diff --git a/nix/pkgs/dpdk/cross/bluefield3 b/nix/pkgs/dpdk/cross/bluefield3 new file mode 100644 index 000000000..783a14210 --- /dev/null +++ b/nix/pkgs/dpdk/cross/bluefield3 @@ -0,0 +1,17 @@ +[binaries] +c = 'aarch64-unknown-linux-gnu-cc' +cpp = 'aarch64-unknown-linux-gnu-c++' +ar = 'aarch64-unknown-linux-gnu-ar' +strip = 'aarch64-unknown-linux-gnu-strip' +pkgconfig = 'aarch64-unknown-linux-gnu-pkg-config' +pkg-config = 'aarch64-unknown-linux-gnu-pkg-config' +pcap-config = '' + +[host_machine] +system = 'linux' +cpu_family = 'aarch64' +cpu = 'armv8.4-a' +endian = 'little' + +[properties] +platform = 'bluefield3' diff --git a/nix/pkgs/dpdk/cross/bluefield3.gnu b/nix/pkgs/dpdk/cross/bluefield3.gnu new file mode 100644 index 000000000..56fd5ae8b --- /dev/null +++ b/nix/pkgs/dpdk/cross/bluefield3.gnu @@ -0,0 +1,19 @@ +[binaries] +c = 'aarch64-unknown-linux-gnu-cc' +cpp = 'aarch64-unknown-linux-gnu-c++' +ar = 'aarch64-unknown-linux-gnu-ar' +strip = 'aarch64-unknown-linux-gnu-strip' +pkgconfig = 'aarch64-unknown-linux-gnu-pkg-config' +pkg-config = 'aarch64-unknown-linux-gnu-pkg-config' +pcap-config = '' + +[host_machine] +system = 'linux' +cpu_family = 'aarch64' +cpu = 'armv8.4-a' +endian = 'little' + +[properties] +platform = 'bluefield3' +libc = 'gnu' +ar = 'aarch64-unknown-linux-gnu-ar' diff --git a/nix/pkgs/dpdk/cross/bluefield3.musl b/nix/pkgs/dpdk/cross/bluefield3.musl new file mode 100644 index 000000000..7247ed091 --- /dev/null +++ b/nix/pkgs/dpdk/cross/bluefield3.musl @@ -0,0 +1,19 @@ +[binaries] +c = 'aarch64-unknown-linux-musl-cc' +cpp = 'aarch64-unknown-linux-musl-c++' +ar = 'aarch64-unknown-linux-musl-ar' +strip = 'aarch64-unknown-linux-musl-strip' +pkgconfig = 'aarch64-unknown-linux-musl-pkg-config' +pkg-config = 'aarch64-unknown-linux-musl-pkg-config' +pcap-config = '' + +[host_machine] +system = 'linux' +cpu_family = 'aarch64' +cpu = 'armv8.4-a' +endian = 'little' + +[properties] +platform = 'bluefield3' +libc = 'musl' +ar = 'aarch64-unknown-linux-musl-ar' diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index 6028b8c2a..165ee8506 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -13,15 +13,16 @@ libnl, python3, build-params ? { - lto = "true"; - build-type = "release"; # "debug" | "release" + lto = "false"; + build-type = "debug"; # "debug" | "release" }, }: stdenv.mkDerivation { pname = "dpdk"; version = src.branch; - src = src.outPath; + # src = src.outPath; + src = ./src; nativeBuildInputs = [ meson ninja @@ -256,6 +257,7 @@ stdenv.mkDerivation { ''-Denable_drivers=${lib.concatStringsSep "," enabledDrivers}'' ''-Denable_libs=${lib.concatStringsSep "," enabledLibs}'' ''-Ddisable_libs=${lib.concatStringsSep "," disabledLibs}'' + ''--cross-file=${./cross/bluefield3.gnu}'' ]; outputs = [ @@ -265,6 +267,15 @@ stdenv.mkDerivation { "static" ]; + # AR = "aarch64-unknown-linux-gnu-ar"; + + # configurePhase = '' + # meson setup arm-build --cross-file ${./cross/bluefield3} + # cd build + # ''; + # + CFLAGS = "-ffat-lto-objects -O1 -Wno-#warnings"; + postInstall = '' # Remove docs. We don't build these anyway rm -rf $out/share/doc diff --git a/nix/pkgs/dpdk/src b/nix/pkgs/dpdk/src new file mode 160000 index 000000000..cd60dcd50 --- /dev/null +++ b/nix/pkgs/dpdk/src @@ -0,0 +1 @@ +Subproject commit cd60dcd503b91956f966a1f6d595b35d256ac00f diff --git a/result b/result new file mode 120000 index 000000000..a40bbe7e4 --- /dev/null +++ b/result @@ -0,0 +1 @@ +/nix/store/xx7cm72qy2c0643cm1ipngd87aqwkcdp-glibc-2.40-66 \ No newline at end of file diff --git a/result-bin b/result-bin new file mode 120000 index 000000000..24bd090c0 --- /dev/null +++ b/result-bin @@ -0,0 +1 @@ +/nix/store/q6wgv06q39bfhx2xl8ysc05wi6m2zdss-glibc-2.40-66-bin \ No newline at end of file diff --git a/shell.nix b/shell.nix index afa7cc93c..07970296c 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,12 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Open Network Fabric Authors { + overlay ? "dataplane", + target ? "x86_64-unknown-linux-gnu", + prof ? "debug", + instrumentation ? "none", + sanitize ? "", + sources ? import ./npins, pkgs ? import { }, }: (pkgs.buildFHSEnv { diff --git a/sysroot b/sysroot new file mode 120000 index 000000000..761f3e8ec --- /dev/null +++ b/sysroot @@ -0,0 +1 @@ +/nix/store/m1gsdy5xjrc396yk2m1h7nrdly5d6jw9-sysroot \ No newline at end of file From eaddb02bb49635f98d39d1542163f04d8eab0b6a Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sat, 20 Dec 2025 16:05:10 +0000 Subject: [PATCH 21/35] wip --- .sysroot-man | 1 - .sysroot-static | 1 - nix/pkgs/dpdk/cross/bluefield3.gnu | 19 -------- .../cross/{bluefield3 => bluefield3.gnu.ini} | 3 +- .../{bluefield3.musl => bluefield3.musl.ini} | 3 +- nix/pkgs/dpdk/default.nix | 45 ++++++++++++++----- nix/profiles.nix | 6 ++- result | 1 - result-bin | 1 - 9 files changed, 41 insertions(+), 39 deletions(-) delete mode 120000 .sysroot-man delete mode 120000 .sysroot-static delete mode 100644 nix/pkgs/dpdk/cross/bluefield3.gnu rename nix/pkgs/dpdk/cross/{bluefield3 => bluefield3.gnu.ini} (92%) rename nix/pkgs/dpdk/cross/{bluefield3.musl => bluefield3.musl.ini} (88%) delete mode 120000 result delete mode 120000 result-bin diff --git a/.sysroot-man b/.sysroot-man deleted file mode 120000 index 35825c4a6..000000000 --- a/.sysroot-man +++ /dev/null @@ -1 +0,0 @@ -/nix/store/fvf13rk7bimfzqwqyx73zxy132aqcqwj-aarch64-unknown-linux-gnu-pkg-config-wrapper-0.29.2-man \ No newline at end of file diff --git a/.sysroot-static b/.sysroot-static deleted file mode 120000 index 2cc68f664..000000000 --- a/.sysroot-static +++ /dev/null @@ -1 +0,0 @@ -/nix/store/4155d64l46f8qdl49dh3mpf81f9h4ivl-dpdk-aarch64-unknown-linux-gnu-25.11-static \ No newline at end of file diff --git a/nix/pkgs/dpdk/cross/bluefield3.gnu b/nix/pkgs/dpdk/cross/bluefield3.gnu deleted file mode 100644 index 56fd5ae8b..000000000 --- a/nix/pkgs/dpdk/cross/bluefield3.gnu +++ /dev/null @@ -1,19 +0,0 @@ -[binaries] -c = 'aarch64-unknown-linux-gnu-cc' -cpp = 'aarch64-unknown-linux-gnu-c++' -ar = 'aarch64-unknown-linux-gnu-ar' -strip = 'aarch64-unknown-linux-gnu-strip' -pkgconfig = 'aarch64-unknown-linux-gnu-pkg-config' -pkg-config = 'aarch64-unknown-linux-gnu-pkg-config' -pcap-config = '' - -[host_machine] -system = 'linux' -cpu_family = 'aarch64' -cpu = 'armv8.4-a' -endian = 'little' - -[properties] -platform = 'bluefield3' -libc = 'gnu' -ar = 'aarch64-unknown-linux-gnu-ar' diff --git a/nix/pkgs/dpdk/cross/bluefield3 b/nix/pkgs/dpdk/cross/bluefield3.gnu.ini similarity index 92% rename from nix/pkgs/dpdk/cross/bluefield3 rename to nix/pkgs/dpdk/cross/bluefield3.gnu.ini index 783a14210..fec17cf95 100644 --- a/nix/pkgs/dpdk/cross/bluefield3 +++ b/nix/pkgs/dpdk/cross/bluefield3.gnu.ini @@ -10,8 +10,9 @@ pcap-config = '' [host_machine] system = 'linux' cpu_family = 'aarch64' -cpu = 'armv8.4-a' +cpu = 'armv8.6-a' endian = 'little' [properties] platform = 'bluefield3' +libc = 'gnu' diff --git a/nix/pkgs/dpdk/cross/bluefield3.musl b/nix/pkgs/dpdk/cross/bluefield3.musl.ini similarity index 88% rename from nix/pkgs/dpdk/cross/bluefield3.musl rename to nix/pkgs/dpdk/cross/bluefield3.musl.ini index 7247ed091..eb433ad26 100644 --- a/nix/pkgs/dpdk/cross/bluefield3.musl +++ b/nix/pkgs/dpdk/cross/bluefield3.musl.ini @@ -10,10 +10,9 @@ pcap-config = '' [host_machine] system = 'linux' cpu_family = 'aarch64' -cpu = 'armv8.4-a' +cpu = 'armv8.6-a' endian = 'little' [properties] platform = 'bluefield3' libc = 'musl' -ar = 'aarch64-unknown-linux-musl-ar' diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index 165ee8506..a942502ff 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -13,9 +13,11 @@ libnl, python3, build-params ? { - lto = "false"; - build-type = "debug"; # "debug" | "release" + lto = "true"; + build-type = "release"; # "debug" | "release" + platform = "bluefield3"; }, + writeText, }: stdenv.mkDerivation { @@ -235,6 +237,32 @@ stdenv.mkDerivation { "net/virtio" "vdpa/mlx5" ]; + arch = stdenv.hostPlatform.parsed.cpu.name; + cpu = stdenv.hostPlatform.parsed.cpu.arch; + kernel = stdenv.hostPlatform.parsed.kernel.name; + endian = + if stdenv.hostPlatform.parsed.cpu.significantByte.name == "littleEndian" then "little" else "big"; + libc = if stdenv.hostPlatform.libc == "glibc" then "gnu" else stdenv.hostPlatform.libc; + isCrossCompile = stdenv.buildPlatform.parsed != stdenv.hostPlatform.parsed; + cross-file = writeText "cross-file.ini" '' + [binaries] + c = '${arch}-unknown-${kernel}-${libc}-cc' + cpp = '${arch}-unknown-${kernel}-${libc}-c++' + ar = '${arch}-unknown-${kernel}-${libc}-ar' + strip = '${arch}-unknown-${kernel}-${libc}-strip' + pkgconfig = '${arch}-unknown-${kernel}-${libc}-pkg-config' + pkg-config = '${arch}-unknown-${kernel}-${libc}-pkg-config' + + [host_machine] + system = '${kernel}' + cpu_family = '${arch}' + cpu = '${cpu}' + endian = '${endian}' + + [properties] + platform = '${build-params.platform}' + libc = '${libc}' + ''; in with build-params; [ @@ -257,8 +285,8 @@ stdenv.mkDerivation { ''-Denable_drivers=${lib.concatStringsSep "," enabledDrivers}'' ''-Denable_libs=${lib.concatStringsSep "," enabledLibs}'' ''-Ddisable_libs=${lib.concatStringsSep "," disabledLibs}'' - ''--cross-file=${./cross/bluefield3.gnu}'' - ]; + ] + ++ (if isCrossCompile then [ ''--cross-file=${cross-file}'' ] else [ ]); outputs = [ "dev" @@ -267,14 +295,7 @@ stdenv.mkDerivation { "static" ]; - # AR = "aarch64-unknown-linux-gnu-ar"; - - # configurePhase = '' - # meson setup arm-build --cross-file ${./cross/bluefield3} - # cd build - # ''; - # - CFLAGS = "-ffat-lto-objects -O1 -Wno-#warnings"; + CFLAGS = if stdenv.targetPlatform.parsed.cpu.name == "aarch64" then "-ffat-lto-objects" else ""; postInstall = '' # Remove docs. We don't build these anyway diff --git a/nix/profiles.nix b/nix/profiles.nix index 7e47d8b8f..e978e7b32 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -61,7 +61,11 @@ let "-mssse3" ]; march.x86_64.NIX_CXXFLAGS_COMPILE = march.x86_64.NIX_CFLAGS_COMPILE; - march.aarch64.NIX_CFLAGS_COMPILE = [ ]; + march.aarch64.NIX_CFLAGS_COMPILE = [ + "-march=armv8.6-a" + "-mcpu=cortex-a78" + "-mtune=cortex-a78" + ]; march.aarch64.NIX_CXXFLAGS_COMPILE = march.aarch64.NIX_CFLAGS_COMPILE; march.aarch64.NIX_CFLAGS_LINK = [ ]; sanitize.address.NIX_CFLAGS_COMPILE = [ diff --git a/result b/result deleted file mode 120000 index a40bbe7e4..000000000 --- a/result +++ /dev/null @@ -1 +0,0 @@ -/nix/store/xx7cm72qy2c0643cm1ipngd87aqwkcdp-glibc-2.40-66 \ No newline at end of file diff --git a/result-bin b/result-bin deleted file mode 120000 index 24bd090c0..000000000 --- a/result-bin +++ /dev/null @@ -1 +0,0 @@ -/nix/store/q6wgv06q39bfhx2xl8ysc05wi6m2zdss-glibc-2.40-66-bin \ No newline at end of file From f01ebe116f024b24090a40f30d3010da3799be83 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sat, 20 Dec 2025 20:38:08 +0000 Subject: [PATCH 22/35] wip --- default.nix | 19 +++++++-- nix/machine.nix | 80 ++++++++++++++++++++++++++++++++++++++ nix/overlays/dataplane.nix | 80 +++++++++++++++++++++----------------- nix/overlays/default.nix | 5 ++- nix/pkgs/dpdk/default.nix | 28 ++++++++----- nix/profiles.nix | 6 +-- 6 files changed, 163 insertions(+), 55 deletions(-) create mode 100644 nix/machine.nix diff --git a/default.nix b/default.nix index a4451f3e6..d9920afb6 100644 --- a/default.nix +++ b/default.nix @@ -2,12 +2,16 @@ # Copyright Open Network Fabric Authors { overlay ? "dataplane", - target ? "x86_64-unknown-linux-gnu", + platform ? "x86-64-v3", + libc ? "gnu", prof ? "debug", instrumentation ? "none", sanitize ? "", }: let + lib = (import sources.nixpkgs { }).lib; + platform' = (import ./nix/machine.nix lib.recursiveUpdate).${platform}; + target = "${platform'.arch}-unknown-linux-${libc}"; arch = { "x86_64-unknown-linux-gnu" = { @@ -38,14 +42,14 @@ let .${target}; # helper method to work around nix's contrived builtin string split function. split-str = split: str: builtins.filter (elm: builtins.isString elm) (builtins.split split str); - sanitizers = if sanitize == null || sanitize == "" then [ ] else split-str ",+" sanitize; + sanitizers = split-str ",+" sanitize; sources = import ./npins; profile = import ./nix/profiles.nix { inherit prof sanitizers instrumentation; arch = arch.machine; }; overlays = import ./nix/overlays { - inherit sources; + inherit sources sanitizers; env = profile; }; pkgs = @@ -56,7 +60,14 @@ let }).pkgsCross.${arch.nixarch}; in pkgs.lib.fix (final: { - inherit pkgs sources profile; + inherit + pkgs + sources + profile + target + arch + ; + platform = platform'; sysroot-list = with final.pkgs; [ libc.static libc.out diff --git a/nix/machine.nix b/nix/machine.nix new file mode 100644 index 000000000..98c122343 --- /dev/null +++ b/nix/machine.nix @@ -0,0 +1,80 @@ +recursiveUpdate: rec { + x86-64-v3 = rec { + arch = "x86_64"; + march = "x86-64-v3"; + numa = { + max-nodes = 8; + }; + override = { + stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + dpdk = { + buildInputs = { + rdma-core = true; + libbsd = true; + libnl = true; + numactl = true; + }; + }; + }; + }; + x86-64-v4 = recursiveUpdate x86-64-v3 rec { + march = "x86-64-v4"; + override.stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + zen4 = recursiveUpdate x86-64-v4 rec { + march = "zen4"; + override.stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + zen5 = recursiveUpdate zen4 rec { + march = "zen5"; + override.stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + bluefield2 = rec { + arch = "aarch64"; + march = "armv8.2-a"; + mcpu = "cortex-a72"; + numa = { + max-nodes = 1; + }; + override = { + stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + dpdk = { + buildInputs = { + rdma-core = true; + libbsd = true; + libnl = true; + numactl = false; + }; + }; + }; + }; + bluefield3 = recursiveUpdate bluefield2 rec { + march = "armv8.6-a"; + mcpu = "cortex-a78ae"; + override.stdenv'.env = rec { + NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; +} diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 516c66e60..e4244c5e0 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -2,7 +2,8 @@ # Copyright Open Network Fabric Authors { sources, - env ? { }, + sanitizers, + env, }: final: prev: let @@ -15,21 +16,18 @@ let adapt = final.stdenvAdapters; bintools = final.buildPackages.llvmPackages.bintools; lld = final.buildPackages.llvmPackages.lld; - stdenv-llvm = adapt.addAttrsToDerivation (orig: { + stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; + env = helpers.addToEnv env (orig.env or { }); nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ bintools lld ]; }) final.llvmPackages.stdenv; - # }) (adapt.makeStaticLibraries final.buildPackages.llvmPackages.stdenv); - stdenv-llvm-with-flags = adapt.addAttrsToDerivation (orig: { - env = helpers.addToEnv env (orig.env or { }); - }) stdenv-llvm; - dataplane-dep = pkg: pkg.override { stdenv = stdenv-llvm-with-flags; }; + dataplane-dep = pkg: pkg.override { stdenv = stdenv'; }; in { - stdenv' = stdenv-llvm-with-flags; + inherit env stdenv'; # Don't bother adapting ethtool or iproute2's build to our custom flags / env. Failure to null this can trigger # _massive_ builds because ethtool depends on libnl (et al), and we _do_ overlay libnl. Thus, the ethtool / iproute2 # get rebuilt and you end up rebuilding the whole world. @@ -48,8 +46,8 @@ in mscgen = null; pandoc = null; - # We should avoid accepting anything in our dpdk + friends pkgs which depends on udev / systemd; our deploy won't - # support any such mechanisms. + # We should avoid accepting anything in our dpdk + friends pkgs which depends on udev / systemd; our deploy simply + # won't support any such mechanisms. # # Usually this type of dependency takes the form of udev rules / systemd service files being generated (which is no # problem). That said, builds which hard and fast depend on systemd or udev are very suspicious in this context, so @@ -130,16 +128,14 @@ in # function. In "the glorious future" we should bump all of this logic up to the dataplane's init process, compute # what we need to, pre-mmap _all_ of our heap memory, configure our cgroups and CPU affinities, and then pin our cores # and use memory pools local to the numa node of the pinned core. That would be a fair amount of work, but it would - # liminate a fairly large dependency and likely increase the performance and security of the dataplane. + # eliminate a dependency and likely increase the performance and security of the dataplane. # # For now, we leave this on so DPDK can do some of that for us. That said, this logic is quite cold and would ideally # be size optimized and punted far from all hot paths. BOLT should be helpful here. numactl = (dataplane-dep prev.numactl).overrideAttrs (orig: { outputs = (prev.lib.lists.remove "man" orig.outputs) ++ [ "static" ]; - # we need to enable shared (in addition to static) to build dpdk. - # See the note on libmd for reasoning. configureFlags = (orig.configureFlags or [ ]) ++ [ - "--enable-static" # dpdk does not like to build its .so files if we don't build numa.so as well + "--enable-static" ]; postInstall = (orig.postInstall or "") + '' mkdir -p "$static/lib"; @@ -159,29 +155,45 @@ in rdma-core = (dataplane-dep prev.rdma-core).overrideAttrs (orig: { version = sources.rdma-core.branch; src = sources.rdma-core.outPath; - outputs = [ - "dev" - "out" + outputs = (orig.outputs or [ ]) ++ [ "static" ]; - cmakeFlags = orig.cmakeFlags ++ [ - "-DENABLE_STATIC=1" - # we don't need pyverbs, and turning it off reduces build time / complexity. - "-DNO_PYVERBS=1" - # no need for docs in container images. - "-DNO_MAN_PAGES=1" - # we don't care about this lib's exported symbols / compat situation _at all_ because we static link (which - # doesn't even have symbol versioning / compatibility in the first place). Turning this off just reduces the - # build's internal complexity and makes lto easier. - "-DNO_COMPAT_SYMS=1" - # this allows thread sanitizer to build (thread/ub sanitizers do not like -Wl,-z,defs or -Wl,--no-undefined) - # Normally I would say that disabling -Wl,--no-undefined is a bad idea, but we throw away all the shared - # libs and executables from this build anwyway, so it it should be harmless. - "-DSUPPORTS_NO_UNDEFINED=0" # todo: find a way to enable this for only {thread,ub}san builds. - ]; + cmakeFlags = + orig.cmakeFlags + ++ [ + "-DENABLE_STATIC=1" + # we don't need pyverbs, and turning it off reduces build time / complexity. + "-DNO_PYVERBS=1" + # no need for docs in container images. + "-DNO_MAN_PAGES=1" + # we don't care about this lib's exported symbols / compat situation _at all_ because we static link (which + # doesn't even have symbol versioning / compatibility in the first place). Turning this off just reduces the + # build's internal complexity and makes lto easier. + "-DNO_COMPAT_SYMS=1" + # Very old versions of rdma-core used what they call the "legacy write path" to support rdma-operations. + # These have (long) since been superseded by the ioctl mode, but the library generates both code paths by + # default due to rdma-core's fairly aggressive backwards compatibility stance. + # We have absolutely no need or desire to support the legacy mode, and we can potentially save ourselves some + # instruction cache pressure by disabling that old code at compile time. + "-DIOCTL_MODE=ioctl" + ] + ++ + final.lib.optionals + ( + (builtins.elem "thread" sanitizers) + || (builtins.elem "address" sanitizers) + || (builtins.elem "safe-stack" sanitizers) + ) + [ + # This allows address / thread sanitizer to build (thread/ub sanitizers do not like -Wl,-z,defs or + # -Wl,--no-undefined). + # This isn't a hack: undefined symbols from sanitizers is a known issue and is not unique to us. + "-DSUPPORTS_NO_UNDEFINED=0" + ]; postInstall = (orig.postInstall or "") + '' - mkdir -p $static/lib; + mkdir -p $static/lib $man; mv $out/lib/*.a $static/lib/ + mv $out/share $man/ ''; }); @@ -201,6 +213,4 @@ in # This wrapping process does not really cause any performance issue due to lto; the compiler is going to "unwrap" # these methods anyway. dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper { }); - - inherit env; } diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index d6d69a8f7..8bc0891c6 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -2,10 +2,11 @@ # Copyright Open Network Fabric Authors { sources, - env ? { }, + sanitizers, + env, }: { dataplane = import ./dataplane.nix { - inherit sources env; + inherit sources sanitizers env; }; } diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index a942502ff..bc7b394de 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -241,17 +241,27 @@ stdenv.mkDerivation { cpu = stdenv.hostPlatform.parsed.cpu.arch; kernel = stdenv.hostPlatform.parsed.kernel.name; endian = - if stdenv.hostPlatform.parsed.cpu.significantByte.name == "littleEndian" then "little" else "big"; - libc = if stdenv.hostPlatform.libc == "glibc" then "gnu" else stdenv.hostPlatform.libc; + { + littleEndian = "little"; + bigEndian = "big"; + } + .${stdenv.hostPlatform.parsed.cpu.significantByte.name}; + libc-vendor = + { + glibc = "gnu"; + musl = "musl"; + } + .${stdenv.hostPlatform.libc}; isCrossCompile = stdenv.buildPlatform.parsed != stdenv.hostPlatform.parsed; + cross-prefix = "${arch}-unknown-${kernel}-${libc-vendor}"; cross-file = writeText "cross-file.ini" '' [binaries] - c = '${arch}-unknown-${kernel}-${libc}-cc' - cpp = '${arch}-unknown-${kernel}-${libc}-c++' - ar = '${arch}-unknown-${kernel}-${libc}-ar' - strip = '${arch}-unknown-${kernel}-${libc}-strip' - pkgconfig = '${arch}-unknown-${kernel}-${libc}-pkg-config' - pkg-config = '${arch}-unknown-${kernel}-${libc}-pkg-config' + c = '${cross-prefix}-cc' + cpp = '${cross-prefix}-c++' + ar = '${cross-prefix}-ar' + strip = '${cross-prefix}-strip' + pkgconfig = '${cross-prefix}-pkg-config' + pkg-config = '${cross-prefix}-pkg-config' [host_machine] system = '${kernel}' @@ -261,7 +271,7 @@ stdenv.mkDerivation { [properties] platform = '${build-params.platform}' - libc = '${libc}' + libc = '${libc-vendor}' ''; in with build-params; diff --git a/nix/profiles.nix b/nix/profiles.nix index e978e7b32..7e47d8b8f 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -61,11 +61,7 @@ let "-mssse3" ]; march.x86_64.NIX_CXXFLAGS_COMPILE = march.x86_64.NIX_CFLAGS_COMPILE; - march.aarch64.NIX_CFLAGS_COMPILE = [ - "-march=armv8.6-a" - "-mcpu=cortex-a78" - "-mtune=cortex-a78" - ]; + march.aarch64.NIX_CFLAGS_COMPILE = [ ]; march.aarch64.NIX_CXXFLAGS_COMPILE = march.aarch64.NIX_CFLAGS_COMPILE; march.aarch64.NIX_CFLAGS_LINK = [ ]; sanitize.address.NIX_CFLAGS_COMPILE = [ From a7b3775874a040de001fc91c32b43a27102d7b43 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sat, 20 Dec 2025 22:47:08 +0000 Subject: [PATCH 23/35] wip --- default.nix | 44 ++--------- nix/overlays/dataplane.nix | 8 +- nix/overlays/default.nix | 9 ++- nix/{machine.nix => platform.nix} | 15 ++-- nix/profiles.nix | 2 +- nix/target.nix | 127 ++++++++++++++++++++++++++++++ 6 files changed, 155 insertions(+), 50 deletions(-) rename nix/{machine.nix => platform.nix} (87%) create mode 100644 nix/target.nix diff --git a/default.nix b/default.nix index d9920afb6..f69dda8ed 100644 --- a/default.nix +++ b/default.nix @@ -10,54 +10,26 @@ }: let lib = (import sources.nixpkgs { }).lib; - platform' = (import ./nix/machine.nix lib.recursiveUpdate).${platform}; - target = "${platform'.arch}-unknown-linux-${libc}"; - arch = - { - "x86_64-unknown-linux-gnu" = { - target = "x86_64-unknown-linux-gnu"; - machine = "x86_64"; - nixarch = "gnu64"; - libc = "gnu"; - }; - "x86_64-unknown-linux-musl" = { - target = "x86_64-unknown-linux-musl"; - machine = "x86_64"; - nixarch = "musl64"; - libc = "musl"; - }; - "aarch64-unknown-linux-gnu" = { - target = "aarch64-unknown-linux-gnu"; - machine = "aarch64"; - nixarch = "aarch64-multiplatform"; - libc = "glibc"; - }; - "aarch64-unknown-linux-musl" = { - target = "aarch64-unknown-linux-musl"; - machine = "aarch64"; - nixarch = "aarch64-multiplatform-musl"; - libc = "musl"; - }; - } - .${target}; # helper method to work around nix's contrived builtin string split function. split-str = split: str: builtins.filter (elm: builtins.isString elm) (builtins.split split str); - sanitizers = split-str ",+" sanitize; + sanitizers = if sanitize == "" then [] else split-str ",+" sanitize; sources = import ./npins; + target = import ./nix/target.nix { + inherit lib platform libc; + }; profile = import ./nix/profiles.nix { inherit prof sanitizers instrumentation; - arch = arch.machine; + arch = target.platform.arch; }; overlays = import ./nix/overlays { - inherit sources sanitizers; - env = profile; + inherit sources sanitizers target profile; }; pkgs = (import sources.nixpkgs { overlays = [ overlays.${overlay} ]; - }).pkgsCross.${arch.nixarch}; + }).pkgsCross.${target.info.nixarch}; in pkgs.lib.fix (final: { inherit @@ -65,9 +37,7 @@ pkgs.lib.fix (final: { sources profile target - arch ; - platform = platform'; sysroot-list = with final.pkgs; [ libc.static libc.out diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index e4244c5e0..a8e1bddeb 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -3,7 +3,8 @@ { sources, sanitizers, - env, + target, + profile, }: final: prev: let @@ -16,9 +17,12 @@ let adapt = final.stdenvAdapters; bintools = final.buildPackages.llvmPackages.bintools; lld = final.buildPackages.llvmPackages.lld; + env = helpers.addToEnv target.platform.override.stdenv.env profile; stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; - env = helpers.addToEnv env (orig.env or { }); + env = helpers.addToEnv target.platform.override.stdenv.env ( + helpers.addToEnv env (orig.env or { }) + ); nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ bintools lld diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 8bc0891c6..488472a3c 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -1,12 +1,13 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Open Network Fabric Authors { - sources, - sanitizers, - env, + sources, + sanitizers, + target, + profile, }: { dataplane = import ./dataplane.nix { - inherit sources sanitizers env; + inherit sources sanitizers target profile; }; } diff --git a/nix/machine.nix b/nix/platform.nix similarity index 87% rename from nix/machine.nix rename to nix/platform.nix index 98c122343..94b1122c0 100644 --- a/nix/machine.nix +++ b/nix/platform.nix @@ -1,4 +1,7 @@ -recursiveUpdate: rec { +{ + lib ? (import { }).lib, +}: +rec { x86-64-v3 = rec { arch = "x86_64"; march = "x86-64-v3"; @@ -21,7 +24,7 @@ recursiveUpdate: rec { }; }; }; - x86-64-v4 = recursiveUpdate x86-64-v3 rec { + x86-64-v4 = lib.recursiveUpdate x86-64-v3 rec { march = "x86-64-v4"; override.stdenv'.env = rec { NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; @@ -29,7 +32,7 @@ recursiveUpdate: rec { NIX_CFLAGS_LINK = [ ]; }; }; - zen4 = recursiveUpdate x86-64-v4 rec { + zen4 = lib.recursiveUpdate x86-64-v4 rec { march = "zen4"; override.stdenv'.env = rec { NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; @@ -37,7 +40,7 @@ recursiveUpdate: rec { NIX_CFLAGS_LINK = [ ]; }; }; - zen5 = recursiveUpdate zen4 rec { + zen5 = lib.recursiveUpdate zen4 rec { march = "zen5"; override.stdenv'.env = rec { NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; @@ -68,8 +71,8 @@ recursiveUpdate: rec { }; }; }; - bluefield3 = recursiveUpdate bluefield2 rec { - march = "armv8.6-a"; + bluefield3 = lib.recursiveUpdate bluefield2 rec { + march = "armv8.4-a"; mcpu = "cortex-a78ae"; override.stdenv'.env = rec { NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; diff --git a/nix/profiles.nix b/nix/profiles.nix index 7e47d8b8f..eaa14c67f 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -55,7 +55,7 @@ let march.x86_64.NIX_CFLAGS_COMPILE = [ # DPDK functionally requires some -m flags on x86_64. # These features have been available for a long time and can be found on any reasonably recent machine, so just - # enable them here for x86_64 builds. + # enable them here for all x86_64 builds. "-mrtm" "-mcrc32" "-mssse3" diff --git a/nix/target.nix b/nix/target.nix new file mode 100644 index 000000000..7bd0ebb0a --- /dev/null +++ b/nix/target.nix @@ -0,0 +1,127 @@ +{ + lib ? (import { }).lib, + platform, + kernel ? "linux", + libc, +}: +let + platforms = rec { + x86-64-v3 = rec { + arch = "x86_64"; + march = "x86-64-v3"; + numa = { + max-nodes = 8; + }; + override = { + stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + dpdk = { + buildInputs = { + rdma-core = true; + libbsd = true; + libnl = true; + numactl = true; + }; + }; + }; + }; + x86-64-v4 = lib.recursiveUpdate x86-64-v3 rec { + march = "x86-64-v4"; + override.stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + zen4 = lib.recursiveUpdate x86-64-v4 rec { + march = "znver4"; + override.stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + zen5 = lib.recursiveUpdate zen4 rec { + march = "znver5"; + override.stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + bluefield2 = rec { + arch = "aarch64"; + march = "armv8.2-a"; + mcpu = "cortex-a72"; + numa = { + max-nodes = 1; + }; + override = { + stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + dpdk = { + buildInputs = { + rdma-core = true; + libbsd = true; + libnl = true; + numactl = false; + }; + }; + }; + }; + bluefield3 = lib.recursiveUpdate bluefield2 rec { + march = "armv8.4-a"; + mcpu = "cortex-a78ae"; + override.stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + }; +in +lib.fix (final: { + platform = platforms.${platform}; + info = + { + x86_64 = { + linux = { + gnu = { + target = "x86_64-unknown-linux-gnu"; + machine = "x86_64"; + nixarch = "gnu64"; + libc = "gnu"; + }; + musl = { + target = "x86_64-unknown-linux-musl"; + machine = "x86_64"; + nixarch = "musl64"; + libc = "musl"; + }; + }; + }; + aarch64 = { + linux = { + gnu = { + target = "aarch64-unknown-linux-gnu"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform"; + libc = "gnu"; + }; + musl = { + target = "aarch64-unknown-linux-musl"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform-musl"; + libc = "musl"; + }; + }; + }; + } + .${final.platform.arch}.${kernel}.${libc}; +}) From 09fb701052911b6f7a38be0ae35d531e39fff81b Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 04:12:21 +0000 Subject: [PATCH 24/35] wip --- .clangd | 3 + default.nix | 65 +- nix/overlays/dataplane.nix | 23 +- nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c | 1516 +++++++++++++++++++++- nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h | 1516 +--------------------- nix/pkgs/dpdk/default.nix | 1 + nix/target.nix | 12 +- shell.nix | 65 +- sysroot | 2 +- 9 files changed, 1628 insertions(+), 1575 deletions(-) create mode 100644 .clangd diff --git a/.clangd b/.clangd new file mode 100644 index 000000000..bb03d2f22 --- /dev/null +++ b/.clangd @@ -0,0 +1,3 @@ +CompileFlags: + Add: + - "-I/home/dnoland/code/githedgehog/dataplane/sysroot/include" diff --git a/default.nix b/default.nix index f69dda8ed..e5ade058e 100644 --- a/default.nix +++ b/default.nix @@ -11,8 +11,10 @@ let lib = (import sources.nixpkgs { }).lib; # helper method to work around nix's contrived builtin string split function. - split-str = split: str: builtins.filter (elm: builtins.isString elm) (builtins.split split str); - sanitizers = if sanitize == "" then [] else split-str ",+" sanitize; + split-str = + split: str: + if str == "" then [ ] else builtins.filter (elm: builtins.isString elm) (builtins.split split str); + sanitizers = split-str ",+" sanitize; sources = import ./npins; target = import ./nix/target.nix { inherit lib platform libc; @@ -22,7 +24,12 @@ let arch = target.platform.arch; }; overlays = import ./nix/overlays { - inherit sources sanitizers target profile; + inherit + sources + sanitizers + target + profile + ; }; pkgs = (import sources.nixpkgs { @@ -30,31 +37,51 @@ let overlays.${overlay} ]; }).pkgsCross.${target.info.nixarch}; -in -pkgs.lib.fix (final: { - inherit - pkgs - sources - profile - target - ; - sysroot-list = with final.pkgs; [ - libc.static - libc.out + + sysroot-list = with pkgs; [ + stdenv'.cc.libc.dev + stdenv'.cc.libc.out + libmd.dev libmd.static + libbsd.dev libbsd.static - libnl.out numactl.dev numactl.static + rdma-core.dev rdma-core.static dpdk.dev - dpdk.out dpdk.static dpdk-wrapper.dev dpdk-wrapper.out ]; - sysroot = pkgs.symlinkJoin { - name = "sysroot"; - paths = final.sysroot-list; + build-tools-list = with pkgs.buildPackages.llvmPackages; [ + bintools + clang + libclang.lib + lld + ]; +in +pkgs.lib.fix (final: { + inherit + pkgs + sources + profile + target + ; + sysroot = + with final.pkgs; + symlinkJoin { + name = "sysroot"; + paths = sysroot-list; + }; + build-tools = + with final.pkgs.buildPackages; + symlinkJoin { + name = "build-tools"; + paths = build-tools-list; + }; + dev-shell = final.pkgs.symlinkJoin { + name = "dataplane-dev-shell"; + paths = sysroot-list ++ build-tools-list; }; }) diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index a8e1bddeb..3415552c2 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -17,12 +17,11 @@ let adapt = final.stdenvAdapters; bintools = final.buildPackages.llvmPackages.bintools; lld = final.buildPackages.llvmPackages.lld; - env = helpers.addToEnv target.platform.override.stdenv.env profile; + added-to-env = helpers.addToEnv target.platform.override.stdenv.env profile; stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; - env = helpers.addToEnv target.platform.override.stdenv.env ( - helpers.addToEnv env (orig.env or { }) - ); + separateDebugInfo = true; + env = helpers.addToEnv added-to-env (orig.env or { }); nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ bintools lld @@ -31,7 +30,7 @@ let dataplane-dep = pkg: pkg.override { stdenv = stdenv'; }; in { - inherit env stdenv'; + inherit stdenv' added-to-env; # Don't bother adapting ethtool or iproute2's build to our custom flags / env. Failure to null this can trigger # _massive_ builds because ethtool depends on libnl (et al), and we _do_ overlay libnl. Thus, the ethtool / iproute2 # get rebuilt and you end up rebuilding the whole world. @@ -67,7 +66,7 @@ in # At minimum, the provided functions are generally quite small and likely to benefit from inlining, so static linking # is a solid plan. libmd = (dataplane-dep prev.libmd).overrideAttrs (orig: { - outputs = (orig.outputs or [ "out" ]) ++ [ "static" ]; + outputs = (orig.outputs or [ "out" ]) ++ [ "man" "dev" "static" ]; # we need to enable shared libs (in addition to static) to make dpdk's build happy. Basically, DPDK's build has no # means of disabling shared libraries, and it doesn't really make any sense to static link this into each .so # file. Ideally we would just _not_ build those .so files, but that would require doing brain surgery on dpdk's @@ -162,6 +161,12 @@ in outputs = (orig.outputs or [ ]) ++ [ "static" ]; + # CMake depends on -Werror to function, but the test program it uses to confirm that -Werror works "always produces + # warnings." The reason for this is that we have injected our own CFLAGS and they have nothing to do with the + # trivial program. This causes the unused-command-line-argument warning to trigger. + # We disable that warning here to make sure rdma-core can build (more specifically, to make sure that it can build + # with debug symbols). + CFLAGS = "-Wno-unused-command-line-argument"; cmakeFlags = orig.cmakeFlags ++ [ @@ -209,7 +214,11 @@ in # # Also, while this library has a respectable security track record, this is also a super strong candidate for # cfi, safe-stack, and cf-protection. - dpdk = dataplane-dep (final.callPackage ../pkgs/dpdk { src = sources.dpdk; }); + dpdk = dataplane-dep ( + final.callPackage ../pkgs/dpdk ( + target.platform.override.dpdk.buildInputs // { src = sources.dpdk; } + ) + ); # DPDK is largely composed of static-inline functions. # We need to wrap those functions with "_w" variants so that we can actually call them from rust. diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c index 58e2f4df7..118e3fc13 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c @@ -3,14 +3,1516 @@ #include "dpdk_wrapper.h" -int wrte_errno() { - return rte_errno; -} +int rte_errno_get() { return rte_errno; } + +// Static wrappers -uint16_t wrte_eth_rx_burst(uint16_t const port_id, uint16_t const queue_id, struct rte_mbuf **rx_pkts, uint16_t const nb_pkts) { - return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +int rte_is_aligned_w(const const void *const ptr, const unsigned int align) { + return rte_is_aligned(ptr, align); +} +void rte_atomic_thread_fence_w(rte_memory_order memorder) { + rte_atomic_thread_fence(memorder); +} +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { + return rte_atomic16_cmpset(dst, exp, src); +} +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { + return rte_atomic16_exchange(dst, val); +} +void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } +int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { + return rte_atomic16_read(v); +} +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { + rte_atomic16_set(v, new_value); +} +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { + rte_atomic16_add(v, inc); +} +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { + rte_atomic16_sub(v, dec); +} +void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } +void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { + return rte_atomic16_add_return(v, inc); +} +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { + return rte_atomic16_sub_return(v, dec); +} +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_inc_and_test(v); +} +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_dec_and_test(v); +} +int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { + return rte_atomic16_test_and_set(v); +} +void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { + return rte_atomic32_cmpset(dst, exp, src); +} +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { + return rte_atomic32_exchange(dst, val); +} +void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } +int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { + return rte_atomic32_read(v); +} +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { + rte_atomic32_set(v, new_value); +} +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { + rte_atomic32_add(v, inc); +} +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { + rte_atomic32_sub(v, dec); +} +void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } +void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { + return rte_atomic32_add_return(v, inc); +} +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { + return rte_atomic32_sub_return(v, dec); +} +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_inc_and_test(v); +} +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_dec_and_test(v); +} +int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { + return rte_atomic32_test_and_set(v); +} +void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { + return rte_atomic64_cmpset(dst, exp, src); +} +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { + return rte_atomic64_exchange(dst, val); +} +void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } +int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { + rte_atomic64_set(v, new_value); +} +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { + rte_atomic64_add(v, inc); +} +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { + rte_atomic64_sub(v, dec); +} +void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } +void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { + return rte_atomic64_add_return(v, inc); +} +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { + return rte_atomic64_sub_return(v, dec); +} +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_inc_and_test(v); +} +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_dec_and_test(v); +} +int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { + return rte_atomic64_test_and_set(v); +} +void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } +void rte_smp_mb_w(void) { rte_smp_mb(); } +uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } +uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } +uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } +void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } +uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } +uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } +size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { + return rte_strlcpy(dst, src, size); +} +size_t rte_strlcat_w(char *dst, const char *src, size_t size) { + return rte_strlcat(dst, src, size); +} +const char *rte_str_skip_leading_spaces_w(const char *src) { + return rte_str_skip_leading_spaces(src); +} +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { + rte_uuid_copy(dst, src); +} +int rte_gettid_w(void) { return rte_gettid(); } +unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } +void rte_pause_w(void) { rte_pause(); } +void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_16(addr, expected, memorder); +} +void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_32(addr, expected, memorder); +} +void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_64(addr, expected, memorder); +} +void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } +void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } +void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } +int rte_spinlock_trylock_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock(sl); +} +int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { + return rte_spinlock_is_locked(sl); +} +int rte_tm_supported_w(void) { return rte_tm_supported(); } +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { + rte_spinlock_unlock_tm(sl); +} +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock_tm(sl); +} +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_init(slr); +} +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock(slr); +} +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock(slr); +} +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock(slr); +} +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock_tm(slr); +} +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock_tm(slr); +} +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock_tm(slr); +} +// unsigned int rte_xbegin_w(void) { return rte_xbegin(); } +// void rte_xend_w(void) { rte_xend(); } +// int rte_xtest_w(void) { return rte_xtest(); } +// int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_get32(nr, addr); +} +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_set32(nr, addr); +} +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_clear32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_set32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_clear32(nr, addr); +} +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_get64(nr, addr); +} +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_set64(nr, addr); +} +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_clear64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_set64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_clear64(nr, addr); +} +unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } +unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } +unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } +unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } +unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } +unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } +uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } +uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } +uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { + return rte_bsf32_safe(v, pos); +} +uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { + return rte_bsf64_safe(v, pos); +} +uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } +uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } +int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } +uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } +uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } +uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } +uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } +uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } +uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } +void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_read_trylock(rwl); +} +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock(rwl); +} +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_trylock(rwl); +} +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock(rwl); +} +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_is_locked(rwl); +} +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_lock_tm(rwl); +} +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock_tm(rwl); +} +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_lock_tm(rwl); +} +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock_tm(rwl); +} +unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); +} +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_prod_htd_max(r); +} +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_prod_htd_max(r, v); +} +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_cons_htd_max(r); +} +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_cons_htd_max(r, v); +} +unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_mp_enqueue_elem(r, obj, esize); +} +int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_sp_enqueue_elem(r, obj, esize); +} +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { + return rte_ring_enqueue_elem(r, obj, esize); +} +unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_mc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_sc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_dequeue_elem(r, obj_p, esize); +} +unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_start(r, n, free_space); +} +void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n) { + rte_ring_enqueue_elem_finish(r, obj_table, esize, n); +} +void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, + unsigned int n) { + rte_ring_enqueue_finish(r, obj_table, n); +} +unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_start(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_start(r, obj_table, n, available); +} +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_elem_finish(r, n); +} +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_finish(r, n); +} +unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); +} +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_elem_finish(r, n); +} +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_finish(r, n); +} +unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *available) { + return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); +} +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_elem_finish(r, n); +} +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_finish(r, n); +} +unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, + unsigned int n, unsigned int *free_space) { + return rte_ring_enqueue_bulk(r, obj_table, n, free_space); +} +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_mp_enqueue(r, obj); +} +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_sp_enqueue(r, obj); +} +int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_enqueue(r, obj); +} +unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_bulk(r, obj_table, n, available); +} +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_mc_dequeue(r, obj_p); +} +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_sc_dequeue(r, obj_p); +} +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_dequeue(r, obj_p); +} +unsigned int rte_ring_count_w(const struct rte_ring *r) { + return rte_ring_count(r); +} +unsigned int rte_ring_free_count_w(const struct rte_ring *r) { + return rte_ring_free_count(r); +} +int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } +int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } +unsigned int rte_ring_get_size_w(const struct rte_ring *r) { + return rte_ring_get_size(r); +} +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { + return rte_ring_get_capacity(r); +} +enum rte_ring_sync_type +rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_prod_sync_type(r); +} +int rte_ring_is_prod_single_w(const struct rte_ring *r) { + return rte_ring_is_prod_single(r); +} +enum rte_ring_sync_type +rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_cons_sync_type(r); +} +int rte_ring_is_cons_single_w(const struct rte_ring *r) { + return rte_ring_is_cons_single(r); +} +unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_burst(r, obj_table, n, available); +} +void *rte_memcpy_w(void *dst, const void *src, size_t n) { + return rte_memcpy(dst, src, n); +} +// void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { +// return rte_mov15_or_less(dst, src, n); +// } +void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } +void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } +void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } +void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } +// void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_generic(dst, src, n); +// } +// void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_aligned(dst, src, n); +// } +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { + return rte_mempool_get_header(obj); +} +struct rte_mempool *rte_mempool_from_obj_w(void *obj) { + return rte_mempool_from_obj(obj); +} +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { + return rte_mempool_get_trailer(obj); +} +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { + return rte_mempool_get_ops(ops_index); +} +int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); +} +int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n) { + return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); +} +struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, + unsigned int lcore_id) { + return rte_mempool_default_cache(mp, lcore_id); +} +void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, + struct rte_mempool *mp) { + rte_mempool_cache_flush(cache, mp); +} +void rte_mempool_do_generic_put_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_do_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n) { + rte_mempool_put_bulk(mp, obj_table, n); +} +void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { + rte_mempool_put(mp, obj); +} +int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + return rte_mempool_do_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { + return rte_mempool_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_get_bulk(mp, obj_table, n); +} +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { + return rte_mempool_get(mp, obj_p); +} +int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) { + return rte_mempool_get_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_full_w(const struct rte_mempool *mp) { + return rte_mempool_full(mp); +} +int rte_mempool_empty_w(const struct rte_mempool *mp) { + return rte_mempool_empty(mp); +} +rte_iova_t rte_mempool_virt2iova_w(const void *elt) { + return rte_mempool_virt2iova(elt); +} +void *rte_mempool_get_priv_w(struct rte_mempool *mp) { + return rte_mempool_get_priv(mp); +} +void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } +void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } +void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } +void rte_prefetch_non_temporal_w(const void *p) { + rte_prefetch_non_temporal(p); +} +void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } +void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } +void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } +void rte_cldemote_w(const void *p) { rte_cldemote(p); } +uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } +uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } +uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } +// uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } +// uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } +// uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part1(m); +} +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part2(m); +} +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_size(mp); +} +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { + return rte_mbuf_iova_get(m); +} +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { + rte_mbuf_iova_set(m, iova); +} +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova(mb); +} +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova_default(mb); +} +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { + return rte_mbuf_from_indirect(mi); +} +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { + return rte_mbuf_buf_addr(mb, mp); +} +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { + return rte_mbuf_data_addr_default(mb); +} +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } +void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_flags(mp); +} +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { + return rte_mbuf_refcnt_read(m); +} +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { + rte_mbuf_refcnt_set(m, new_value); +} +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { + return rte_mbuf_refcnt_update(m, value); +} +uint16_t +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { + return rte_mbuf_ext_refcnt_read(shinfo); +} +void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value) { + rte_mbuf_ext_refcnt_set(shinfo, new_value); +} +uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value) { + return rte_mbuf_ext_refcnt_update(shinfo, value); +} +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { + return rte_mbuf_raw_alloc(mp); +} +void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_data_room_size(mp); +} +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { + rte_pktmbuf_reset_headroom(m); +} +void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { + return rte_pktmbuf_alloc(mp); +} +int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned int count) { + return rte_pktmbuf_alloc_bulk(pool, mbufs, count); +} +struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, + void *fcb_opaque) { + return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, + fcb_opaque); +} +void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo) { + rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); +} +void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, + const struct rte_mbuf *msrc) { + rte_mbuf_dynfield_copy(mdst, msrc); +} +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { + rte_pktmbuf_attach(mi, m); +} +void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { + return rte_pktmbuf_prefree_seg(m); +} +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } +void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { + rte_pktmbuf_refcnt_update(m, v); +} +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_headroom(m); +} +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_tailroom(m); +} +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { + return rte_pktmbuf_lastseg(m); +} +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_prepend(m, len); +} +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_append(m, len); +} +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_adj(m, len); +} +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_trim(m, len); +} +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { + return rte_pktmbuf_is_contiguous(m); +} +const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf) { + return rte_pktmbuf_read(m, off, len, buf); +} +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { + return rte_pktmbuf_chain(head, tail); +} +uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, + uint64_t tso, uint64_t ol3, uint64_t ol2, + uint64_t unused) { + return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); +} +int rte_validate_tx_offload_w(const struct rte_mbuf *m) { + return rte_validate_tx_offload(m); +} +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { + return rte_pktmbuf_linearize(mbuf); +} +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_queue_get(m); +} +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_traffic_class_get(m); +} +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_color_get(m); +} +void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, uint8_t *color) { + rte_mbuf_sched_get(m, queue_id, traffic_class, color); +} +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { + rte_mbuf_sched_queue_set(m, queue_id); +} +void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, + uint8_t traffic_class) { + rte_mbuf_sched_traffic_class_set(m, traffic_class); +} +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { + rte_mbuf_sched_color_set(m, color); +} +void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, uint8_t color) { + rte_mbuf_sched_set(m, queue_id, traffic_class, color); +} +int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, + const struct rte_ether_addr *ea2) { + return rte_is_same_ether_addr(ea1, ea2); +} +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_zero_ether_addr(ea); +} +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_unicast_ether_addr(ea); +} +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_multicast_ether_addr(ea); +} +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_broadcast_ether_addr(ea); +} +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_universal_ether_addr(ea); +} +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_local_admin_ether_addr(ea); +} +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_valid_assigned_ether_addr(ea); +} +void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, + struct rte_ether_addr *ea_to) { + rte_ether_addr_copy(ea_from, ea_to); +} +int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } +int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { + return rte_bitmap_get_memory_footprint(n_bits); +} +struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init(n_bits, mem, mem_size); +} +struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); +} +void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } +void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_prefetch0(bmp, pos); +} +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { + return rte_bitmap_get(bmp, pos); +} +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_set(bmp, pos); +} +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, + uint64_t slab) { + rte_bitmap_set_slab(bmp, pos, slab); +} +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_clear(bmp, pos); +} +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { + return rte_bitmap_scan(bmp, pos, slab); +} +uint16_t rte_raw_cksum_w(const void *buf, size_t len) { + return rte_raw_cksum(buf, len); +} +int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, + uint16_t *cksum) { + return rte_raw_cksum_mbuf(m, off, len, cksum); +} +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_hdr_len(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum_simple(ipv4_hdr); +} +uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + uint64_t ol_flags) { + return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +} +uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); +} +uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); +} +int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); +} +int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); +} +bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b) { + return rte_ipv6_addr_eq(a, b); +} +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { + rte_ipv6_addr_mask(ip, depth); +} +bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b, uint8_t depth) { + return rte_ipv6_addr_eq_prefix(a, b, depth); +} +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { + return rte_ipv6_mask_depth(mask); +} +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_unspec(ip); +} +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_loopback(ip); +} +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_linklocal(ip); +} +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_sitelocal(ip); +} +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4compat(ip); +} +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4mapped(ip); +} +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_mcast(ip); +} +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_mc_scope(ip); +} +void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, + const struct rte_ether_addr *mac) { + rte_ipv6_llocal_from_ethernet(ip, mac); +} +void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, + const struct rte_ipv6_addr *ip) { + rte_ipv6_solnode_from_addr(sol, ip); +} +void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, + const struct rte_ipv6_addr *ip) { + rte_ether_mcast_from_ipv6(mac, ip); +} +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { + return rte_ipv6_check_version(ip); +} +uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + uint64_t ol_flags) { + return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +} +uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); +} +uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); +} +int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); +} +int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); +} +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { + return rte_ipv6_get_next_ext(p, proto, ext_len); +} +enum rte_color +rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, + struct rte_meter_srtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_srtcm_color_aware_check_w( + struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color +rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_color_aware_check_w( + struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, + uint32_t pkt_len) { + return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, + enum rte_color pkt_color) { + return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, + pkt_color); +} +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { + return rte_eth_rss_hf_refine(rss_hf); } -uint16_t wrte_eth_tx_burst(uint16_t const port_id, uint16_t const queue_id, struct rte_mbuf **tx_pkts, uint16_t const nb_pkts) { - return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { + return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +} +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_rx_queue_count(port_id, queue_id); +} +int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_rx_descriptor_status(port_id, queue_id, offset); +} +int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_tx_descriptor_status(port_id, queue_id, offset); +} +uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer) { + return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); +} +uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer, + struct rte_mbuf *tx_pkt) { + return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); +} +uint16_t +rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) { + return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, + recycle_rxq_info); +} +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_tx_queue_count(port_id, queue_id); +} +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { + return rte_flow_dynf_metadata_get(m); +} +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { + rte_flow_dynf_metadata_set(m, v); +} +int rte_flow_dynf_metadata_avail_w(void) { + return rte_flow_dynf_metadata_avail(); +} +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { + return rte_hash_crc_1byte(data, init_val); +} +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { + return rte_hash_crc_2byte(data, init_val); +} +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { + return rte_hash_crc_4byte(data, init_val); +} +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { + return rte_hash_crc_8byte(data, init_val); +} +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, + uint32_t init_val) { + return rte_hash_crc(data, data_len, init_val); +} +void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_2hashes(key, length, pc, pb); +} +void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_32b_2hashes(k, length, pc, pb); +} +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { + return rte_jhash(key, length, initval); +} +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { + return rte_jhash_32b(k, length, initval); +} +uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, + uint32_t initval) { + return rte_jhash_3words(a, b, c, initval); +} +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { + return rte_jhash_2words(a, b, initval); +} +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { + return rte_jhash_1word(a, initval); +} +uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key) { + return rte_fbk_hash_get_bucket(ht, key); +} +int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value, + uint32_t bucket) { + return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); +} +int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, + uint16_t value) { + return rte_fbk_hash_add_key(ht, key, value); +} +int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_delete_key(ht, key); +} +int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_lookup(ht, key); +} +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { + rte_fbk_hash_clear_all(ht); +} +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { + return rte_fbk_hash_get_load_factor(ht); +} +void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_online(v, thread_id); +} +void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_offline(v, thread_id); +} +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_lock(v, thread_id); +} +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_unlock(v, thread_id); +} +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { + return rte_rcu_qsbr_start(v); +} +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_quiescent(v, thread_id); +} +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { + return rte_rcu_qsbr_check(v, t, wait); +} +uint8_t rte_read8_relaxed_w(const void *addr) { + return rte_read8_relaxed(addr); +} +uint16_t rte_read16_relaxed_w(const void *addr) { + return rte_read16_relaxed(addr); +} +uint32_t rte_read32_relaxed_w(const void *addr) { + return rte_read32_relaxed(addr); +} +uint64_t rte_read64_relaxed_w(const void *addr) { + return rte_read64_relaxed(addr); +} +void rte_write8_relaxed_w(uint8_t value, void *addr) { + rte_write8_relaxed(value, addr); +} +void rte_write16_relaxed_w(uint16_t value, void *addr) { + rte_write16_relaxed(value, addr); +} +void rte_write32_relaxed_w(uint32_t value, void *addr) { + rte_write32_relaxed(value, addr); +} +void rte_write64_relaxed_w(uint64_t value, void *addr) { + rte_write64_relaxed(value, addr); +} +uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } +uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } +uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } +uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } +void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } +void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } +void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } +void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } +void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { + rte_write32_wc_relaxed(value, addr); +} +void rte_write32_wc_w(uint32_t value, void *addr) { + rte_write32_wc(value, addr); +} +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_lock(msl, me); +} +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_unlock(msl, me); +} +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + return rte_mcslock_trylock(msl, me); +} +int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { + return rte_mcslock_is_locked(msl); +} +void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } +void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } +void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } +void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } +void rte_pflock_write_unlock_w(rte_pflock_t *pf) { + rte_pflock_write_unlock(pf); +} +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { + return rte_reciprocal_divide(a, R); +} +uint64_t rte_reciprocal_divide_u64_w(uint64_t a, + const struct rte_reciprocal_u64 *R) { + return rte_reciprocal_divide_u64(a, R); +} +void rte_seqcount_init_w(rte_seqcount_t *seqcount) { + rte_seqcount_init(seqcount); +} +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { + return rte_seqcount_read_begin(seqcount); +} +bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, + uint32_t begin_sn) { + return rte_seqcount_read_retry(seqcount, begin_sn); +} +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_begin(seqcount); +} +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_end(seqcount); +} +void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { + return rte_seqlock_read_begin(seqlock); +} +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { + return rte_seqlock_read_retry(seqlock, begin_sn); +} +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_lock(seqlock); +} +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_unlock(seqlock); +} +unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, + unsigned int n) { + return rte_stack_push(s, obj_table, n); +} +unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, + unsigned int n) { + return rte_stack_pop(s, obj_table, n); +} +unsigned int rte_stack_count_w(struct rte_stack *s) { + return rte_stack_count(s); +} +unsigned int rte_stack_free_count_w(struct rte_stack *s) { + return rte_stack_free_count(s); +} +uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss(input_tuple, input_len, rss_key); +} +uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss_be(input_tuple, input_len, rss_key); +} +void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } +void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { + rte_ticketlock_unlock(tl); +} +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { + return rte_ticketlock_trylock(tl); +} +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { + return rte_ticketlock_is_locked(tl); +} +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_init(tlr); +} +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_lock(tlr); +} +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_unlock(tlr); +} +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { + return rte_ticketlock_recursive_trylock(tlr); +} +uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, + uint64_t cycles) { + return rte_cyclecounter_cycles_to_ns(tc, cycles); +} +uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, + uint64_t cycle_now) { + return rte_timecounter_update(tc, cycle_now); +} +uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { + return rte_timespec_to_ns(ts); +} +struct timespec rte_ns_to_timespec_w(uint64_t nsec) { + return rte_ns_to_timespec(nsec); +} +bool rte_trace_feature_is_enabled_w(void) { + return rte_trace_feature_is_enabled(); } diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h index 2d4e77c47..53b05b961 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h @@ -116,7 +116,7 @@ #include #include #include -#include +// #include #include #include #include @@ -232,7 +232,7 @@ * @return * The last rte_errno value (thread local value). */ -int rte_errno_get() { return rte_errno; } +int rte_errno_get(); /** * TX offloads to be set in [`rte_eth_tx_mode.offloads`]. @@ -297,1515 +297,3 @@ enum wrte_eth_rx_offload : uint64_t { RX_OFFLOAD_RSS_HASH = RTE_ETH_RX_OFFLOAD_RSS_HASH, RX_OFFLOAD_BUFFER_SPLIT = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT, }; - -// Static wrappers - -int rte_is_aligned_w(const const void *const ptr, const unsigned int align) { - return rte_is_aligned(ptr, align); -} -void rte_atomic_thread_fence_w(rte_memory_order memorder) { - rte_atomic_thread_fence(memorder); -} -int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { - return rte_atomic16_cmpset(dst, exp, src); -} -uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { - return rte_atomic16_exchange(dst, val); -} -void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } -int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { - return rte_atomic16_read(v); -} -void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { - rte_atomic16_set(v, new_value); -} -void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { - rte_atomic16_add(v, inc); -} -void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { - rte_atomic16_sub(v, dec); -} -void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } -void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } -int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { - return rte_atomic16_add_return(v, inc); -} -int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { - return rte_atomic16_sub_return(v, dec); -} -int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_inc_and_test(v); -} -int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_dec_and_test(v); -} -int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { - return rte_atomic16_test_and_set(v); -} -void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } -int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { - return rte_atomic32_cmpset(dst, exp, src); -} -uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { - return rte_atomic32_exchange(dst, val); -} -void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } -int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { - return rte_atomic32_read(v); -} -void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { - rte_atomic32_set(v, new_value); -} -void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { - rte_atomic32_add(v, inc); -} -void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { - rte_atomic32_sub(v, dec); -} -void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } -void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } -int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { - return rte_atomic32_add_return(v, inc); -} -int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { - return rte_atomic32_sub_return(v, dec); -} -int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_inc_and_test(v); -} -int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_dec_and_test(v); -} -int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { - return rte_atomic32_test_and_set(v); -} -void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } -int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { - return rte_atomic64_cmpset(dst, exp, src); -} -uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { - return rte_atomic64_exchange(dst, val); -} -void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } -int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } -void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { - rte_atomic64_set(v, new_value); -} -void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { - rte_atomic64_add(v, inc); -} -void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { - rte_atomic64_sub(v, dec); -} -void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } -void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } -int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { - return rte_atomic64_add_return(v, inc); -} -int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { - return rte_atomic64_sub_return(v, dec); -} -int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_inc_and_test(v); -} -int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_dec_and_test(v); -} -int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { - return rte_atomic64_test_and_set(v); -} -void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } -void rte_smp_mb_w(void) { rte_smp_mb(); } -uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } -uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } -uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } -void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } -uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } -uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } -size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { - return rte_strlcpy(dst, src, size); -} -size_t rte_strlcat_w(char *dst, const char *src, size_t size) { - return rte_strlcat(dst, src, size); -} -const char *rte_str_skip_leading_spaces_w(const char *src) { - return rte_str_skip_leading_spaces(src); -} -void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { - rte_uuid_copy(dst, src); -} -int rte_gettid_w(void) { return rte_gettid(); } -unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } -void rte_pause_w(void) { rte_pause(); } -void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_16(addr, expected, memorder); -} -void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_32(addr, expected, memorder); -} -void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_64(addr, expected, memorder); -} -void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } -void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } -void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } -int rte_spinlock_trylock_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock(sl); -} -int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { - return rte_spinlock_is_locked(sl); -} -int rte_tm_supported_w(void) { return rte_tm_supported(); } -void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } -void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { - rte_spinlock_unlock_tm(sl); -} -int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock_tm(sl); -} -void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_init(slr); -} -void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock(slr); -} -void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock(slr); -} -int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock(slr); -} -void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock_tm(slr); -} -void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock_tm(slr); -} -int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock_tm(slr); -} -unsigned int rte_xbegin_w(void) { return rte_xbegin(); } -void rte_xend_w(void) { rte_xend(); } -int rte_xtest_w(void) { return rte_xtest(); } -int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } -uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_get32(nr, addr); -} -void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_set32(nr, addr); -} -void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_clear32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_set32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_clear32(nr, addr); -} -uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_get64(nr, addr); -} -void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_set64(nr, addr); -} -void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_clear64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_set64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_clear64(nr, addr); -} -unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } -unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } -unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } -unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } -unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } -unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } -uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } -uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } -uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } -int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { - return rte_bsf32_safe(v, pos); -} -uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } -int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { - return rte_bsf64_safe(v, pos); -} -uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } -uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } -int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } -uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } -uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } -uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } -uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } -uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } -uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } -void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } -void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } -int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_read_trylock(rwl); -} -void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock(rwl); -} -int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_trylock(rwl); -} -void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } -void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock(rwl); -} -int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_is_locked(rwl); -} -void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_lock_tm(rwl); -} -void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock_tm(rwl); -} -void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_lock_tm(rwl); -} -void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock_tm(rwl); -} -unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); -} -uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_prod_htd_max(r); -} -int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_prod_htd_max(r, v); -} -uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_cons_htd_max(r); -} -int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_cons_htd_max(r, v); -} -unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_mp_enqueue_elem(r, obj, esize); -} -int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_sp_enqueue_elem(r, obj, esize); -} -int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { - return rte_ring_enqueue_elem(r, obj, esize); -} -unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_mc_dequeue_elem(r, obj_p, esize); -} -int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_sc_dequeue_elem(r, obj_p, esize); -} -int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_dequeue_elem(r, obj_p, esize); -} -unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, - unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, - unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_start(r, n, free_space); -} -void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, - unsigned int esize, unsigned int n) { - rte_ring_enqueue_elem_finish(r, obj_table, esize, n); -} -void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, - unsigned int n) { - rte_ring_enqueue_finish(r, obj_table, n); -} -unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_start(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_start(r, obj_table, n, available); -} -void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_elem_finish(r, n); -} -void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_finish(r, n); -} -unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, - unsigned int esize, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_burst_elem_start_w( - struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); -} -void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_elem_finish(r, n); -} -void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_finish(r, n); -} -unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, - unsigned int esize, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_burst_elem_start_w( - struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *available) { - return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); -} -void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_elem_finish(r, n); -} -void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_finish(r, n); -} -unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, - unsigned int n, unsigned int *free_space) { - return rte_ring_enqueue_bulk(r, obj_table, n, free_space); -} -int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_mp_enqueue(r, obj); -} -int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_sp_enqueue(r, obj); -} -int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_enqueue(r, obj); -} -unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_bulk(r, obj_table, n, available); -} -int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_mc_dequeue(r, obj_p); -} -int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_sc_dequeue(r, obj_p); -} -int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_dequeue(r, obj_p); -} -unsigned int rte_ring_count_w(const struct rte_ring *r) { - return rte_ring_count(r); -} -unsigned int rte_ring_free_count_w(const struct rte_ring *r) { - return rte_ring_free_count(r); -} -int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } -int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } -unsigned int rte_ring_get_size_w(const struct rte_ring *r) { - return rte_ring_get_size(r); -} -unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { - return rte_ring_get_capacity(r); -} -enum rte_ring_sync_type -rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_prod_sync_type(r); -} -int rte_ring_is_prod_single_w(const struct rte_ring *r) { - return rte_ring_is_prod_single(r); -} -enum rte_ring_sync_type -rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_cons_sync_type(r); -} -int rte_ring_is_cons_single_w(const struct rte_ring *r) { - return rte_ring_is_cons_single(r); -} -unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_burst(r, obj_table, n, available); -} -void *rte_memcpy_w(void *dst, const void *src, size_t n) { - return rte_memcpy(dst, src, n); -} -void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { - return rte_mov15_or_less(dst, src, n); -} -void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } -void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } -void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } -void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } -void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { - return rte_memcpy_generic(dst, src, n); -} -void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { - return rte_memcpy_aligned(dst, src, n); -} -struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { - return rte_mempool_get_header(obj); -} -struct rte_mempool *rte_mempool_from_obj_w(void *obj) { - return rte_mempool_from_obj(obj); -} -struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { - return rte_mempool_get_trailer(obj); -} -struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { - return rte_mempool_get_ops(ops_index); -} -int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); -} -int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, - void **first_obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); -} -int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, - void *const *obj_table, unsigned int n) { - return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); -} -struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, - unsigned int lcore_id) { - return rte_mempool_default_cache(mp, lcore_id); -} -void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, - struct rte_mempool *mp) { - rte_mempool_cache_flush(cache, mp); -} -void rte_mempool_do_generic_put_w(struct rte_mempool *mp, - void *const *obj_table, unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_do_generic_put(mp, obj_table, n, cache); -} -void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_generic_put(mp, obj_table, n, cache); -} -void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n) { - rte_mempool_put_bulk(mp, obj_table, n); -} -void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { - rte_mempool_put(mp, obj); -} -int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, - unsigned int n, - struct rte_mempool_cache *cache) { - return rte_mempool_do_generic_get(mp, obj_table, n, cache); -} -int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, - unsigned int n, struct rte_mempool_cache *cache) { - return rte_mempool_generic_get(mp, obj_table, n, cache); -} -int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_get_bulk(mp, obj_table, n); -} -int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_get(mp, obj_p); -} -int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, - void **first_obj_table, unsigned int n) { - return rte_mempool_get_contig_blocks(mp, first_obj_table, n); -} -int rte_mempool_full_w(const struct rte_mempool *mp) { - return rte_mempool_full(mp); -} -int rte_mempool_empty_w(const struct rte_mempool *mp) { - return rte_mempool_empty(mp); -} -rte_iova_t rte_mempool_virt2iova_w(const void *elt) { - return rte_mempool_virt2iova(elt); -} -void *rte_mempool_get_priv_w(struct rte_mempool *mp) { - return rte_mempool_get_priv(mp); -} -void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } -void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } -void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } -void rte_prefetch_non_temporal_w(const void *p) { - rte_prefetch_non_temporal(p); -} -void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } -void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } -void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } -void rte_cldemote_w(const void *p) { rte_cldemote(p); } -uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } -uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } -uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } -uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } -uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } -uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } -void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part1(m); -} -void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part2(m); -} -uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_size(mp); -} -rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { - return rte_mbuf_iova_get(m); -} -void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { - rte_mbuf_iova_set(m, iova); -} -rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova(mb); -} -rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova_default(mb); -} -struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { - return rte_mbuf_from_indirect(mi); -} -char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { - return rte_mbuf_buf_addr(mb, mp); -} -char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { - return rte_mbuf_data_addr_default(mb); -} -char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } -void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } -uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_flags(mp); -} -uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { - return rte_mbuf_refcnt_read(m); -} -void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { - rte_mbuf_refcnt_set(m, new_value); -} -uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { - return rte_mbuf_refcnt_update(m, value); -} -uint16_t -rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { - return rte_mbuf_ext_refcnt_read(shinfo); -} -void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, - uint16_t new_value) { - rte_mbuf_ext_refcnt_set(shinfo, new_value); -} -uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, - int16_t value) { - return rte_mbuf_ext_refcnt_update(shinfo, value); -} -struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { - return rte_mbuf_raw_alloc(mp); -} -void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } -uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_data_room_size(mp); -} -void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { - rte_pktmbuf_reset_headroom(m); -} -void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } -struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { - return rte_pktmbuf_alloc(mp); -} -int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, - unsigned int count) { - return rte_pktmbuf_alloc_bulk(pool, mbufs, count); -} -struct rte_mbuf_ext_shared_info * -rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, - rte_mbuf_extbuf_free_callback_t free_cb, - void *fcb_opaque) { - return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, - fcb_opaque); -} -void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, - rte_iova_t buf_iova, uint16_t buf_len, - struct rte_mbuf_ext_shared_info *shinfo) { - rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); -} -void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, - const struct rte_mbuf *msrc) { - rte_mbuf_dynfield_copy(mdst, msrc); -} -void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { - rte_pktmbuf_attach(mi, m); -} -void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } -struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { - return rte_pktmbuf_prefree_seg(m); -} -void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } -void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } -void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { - rte_pktmbuf_refcnt_update(m, v); -} -uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_headroom(m); -} -uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_tailroom(m); -} -struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { - return rte_pktmbuf_lastseg(m); -} -char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_prepend(m, len); -} -char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_append(m, len); -} -char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_adj(m, len); -} -int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_trim(m, len); -} -int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { - return rte_pktmbuf_is_contiguous(m); -} -const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, - uint32_t len, void *buf) { - return rte_pktmbuf_read(m, off, len, buf); -} -int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { - return rte_pktmbuf_chain(head, tail); -} -uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, - uint64_t tso, uint64_t ol3, uint64_t ol2, - uint64_t unused) { - return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); -} -int rte_validate_tx_offload_w(const struct rte_mbuf *m) { - return rte_validate_tx_offload(m); -} -int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { - return rte_pktmbuf_linearize(mbuf); -} -uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_queue_get(m); -} -uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_traffic_class_get(m); -} -uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_color_get(m); -} -void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, - uint8_t *traffic_class, uint8_t *color) { - rte_mbuf_sched_get(m, queue_id, traffic_class, color); -} -void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { - rte_mbuf_sched_queue_set(m, queue_id); -} -void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, - uint8_t traffic_class) { - rte_mbuf_sched_traffic_class_set(m, traffic_class); -} -void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { - rte_mbuf_sched_color_set(m, color); -} -void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, - uint8_t traffic_class, uint8_t color) { - rte_mbuf_sched_set(m, queue_id, traffic_class, color); -} -int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, - const struct rte_ether_addr *ea2) { - return rte_is_same_ether_addr(ea1, ea2); -} -int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_zero_ether_addr(ea); -} -int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_unicast_ether_addr(ea); -} -int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_multicast_ether_addr(ea); -} -int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_broadcast_ether_addr(ea); -} -int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_universal_ether_addr(ea); -} -int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_local_admin_ether_addr(ea); -} -int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_valid_assigned_ether_addr(ea); -} -void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, - struct rte_ether_addr *ea_to) { - rte_ether_addr_copy(ea_from, ea_to); -} -int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } -int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } -uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { - return rte_bitmap_get_memory_footprint(n_bits); -} -struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init(n_bits, mem, mem_size); -} -struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); -} -void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } -void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } -void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_prefetch0(bmp, pos); -} -uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { - return rte_bitmap_get(bmp, pos); -} -void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_set(bmp, pos); -} -void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, - uint64_t slab) { - rte_bitmap_set_slab(bmp, pos, slab); -} -void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_clear(bmp, pos); -} -int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { - return rte_bitmap_scan(bmp, pos, slab); -} -uint16_t rte_raw_cksum_w(const void *buf, size_t len) { - return rte_raw_cksum(buf, len); -} -int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, - uint16_t *cksum) { - return rte_raw_cksum_mbuf(m, off, len, cksum); -} -uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_hdr_len(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum_simple(ipv4_hdr); -} -uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - uint64_t ol_flags) { - return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); -} -uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); -} -uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, - const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); -} -int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); -} -int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, - const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); -} -bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b) { - return rte_ipv6_addr_eq(a, b); -} -void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { - rte_ipv6_addr_mask(ip, depth); -} -bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b, uint8_t depth) { - return rte_ipv6_addr_eq_prefix(a, b, depth); -} -uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { - return rte_ipv6_mask_depth(mask); -} -bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_unspec(ip); -} -bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_loopback(ip); -} -bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_linklocal(ip); -} -bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_sitelocal(ip); -} -bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4compat(ip); -} -bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4mapped(ip); -} -bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_mcast(ip); -} -enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_mc_scope(ip); -} -void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, - const struct rte_ether_addr *mac) { - rte_ipv6_llocal_from_ethernet(ip, mac); -} -void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, - const struct rte_ipv6_addr *ip) { - rte_ipv6_solnode_from_addr(sol, ip); -} -void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, - const struct rte_ipv6_addr *ip) { - rte_ether_mcast_from_ipv6(mac, ip); -} -int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { - return rte_ipv6_check_version(ip); -} -uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - uint64_t ol_flags) { - return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); -} -uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); -} -uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, - const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); -} -int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); -} -int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, - const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); -} -int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { - return rte_ipv6_get_next_ext(p, proto, ext_len); -} -enum rte_color -rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, - struct rte_meter_srtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_srtcm_color_aware_check_w( - struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} -enum rte_color -rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, - struct rte_meter_trtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_trtcm_color_aware_check_w( - struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} -enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( - struct rte_meter_trtcm_rfc4115 *m, - struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, - uint32_t pkt_len) { - return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( - struct rte_meter_trtcm_rfc4115 *m, - struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, - enum rte_color pkt_color) { - return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, - pkt_color); -} -uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { - return rte_eth_rss_hf_refine(rss_hf); -} - -uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { - return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); -} -int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_rx_queue_count(port_id, queue_id); -} -int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_rx_descriptor_status(port_id, queue_id, offset); -} -int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_tx_descriptor_status(port_id, queue_id, offset); -} -uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); -} -uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); -} -uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, - struct rte_eth_dev_tx_buffer *buffer) { - return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); -} -uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, - struct rte_eth_dev_tx_buffer *buffer, - struct rte_mbuf *tx_pkt) { - return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); -} -uint16_t -rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, - uint16_t tx_port_id, uint16_t tx_queue_id, - struct rte_eth_recycle_rxq_info *recycle_rxq_info) { - return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, - recycle_rxq_info); -} -int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_tx_queue_count(port_id, queue_id); -} -uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { - return rte_flow_dynf_metadata_get(m); -} -void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { - rte_flow_dynf_metadata_set(m, v); -} -int rte_flow_dynf_metadata_avail_w(void) { - return rte_flow_dynf_metadata_avail(); -} -uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { - return rte_hash_crc_1byte(data, init_val); -} -uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { - return rte_hash_crc_2byte(data, init_val); -} -uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { - return rte_hash_crc_4byte(data, init_val); -} -uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { - return rte_hash_crc_8byte(data, init_val); -} -uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, - uint32_t init_val) { - return rte_hash_crc(data, data_len, init_val); -} -void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_2hashes(key, length, pc, pb); -} -void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_32b_2hashes(k, length, pc, pb); -} -uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { - return rte_jhash(key, length, initval); -} -uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { - return rte_jhash_32b(k, length, initval); -} -uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, - uint32_t initval) { - return rte_jhash_3words(a, b, c, initval); -} -uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { - return rte_jhash_2words(a, b, initval); -} -uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { - return rte_jhash_1word(a, initval); -} -uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key) { - return rte_fbk_hash_get_bucket(ht, key); -} -int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, - uint32_t key, uint16_t value, - uint32_t bucket) { - return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); -} -int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, - uint16_t value) { - return rte_fbk_hash_add_key(ht, key, value); -} -int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_delete_key(ht, key); -} -int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_lookup(ht, key); -} -void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { - rte_fbk_hash_clear_all(ht); -} -double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { - return rte_fbk_hash_get_load_factor(ht); -} -void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_online(v, thread_id); -} -void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_offline(v, thread_id); -} -void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_lock(v, thread_id); -} -void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_unlock(v, thread_id); -} -uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { - return rte_rcu_qsbr_start(v); -} -void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_quiescent(v, thread_id); -} -int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { - return rte_rcu_qsbr_check(v, t, wait); -} -uint8_t rte_read8_relaxed_w(const void *addr) { - return rte_read8_relaxed(addr); -} -uint16_t rte_read16_relaxed_w(const void *addr) { - return rte_read16_relaxed(addr); -} -uint32_t rte_read32_relaxed_w(const void *addr) { - return rte_read32_relaxed(addr); -} -uint64_t rte_read64_relaxed_w(const void *addr) { - return rte_read64_relaxed(addr); -} -void rte_write8_relaxed_w(uint8_t value, void *addr) { - rte_write8_relaxed(value, addr); -} -void rte_write16_relaxed_w(uint16_t value, void *addr) { - rte_write16_relaxed(value, addr); -} -void rte_write32_relaxed_w(uint32_t value, void *addr) { - rte_write32_relaxed(value, addr); -} -void rte_write64_relaxed_w(uint64_t value, void *addr) { - rte_write64_relaxed(value, addr); -} -uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } -uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } -uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } -uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } -void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } -void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } -void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } -void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } -void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { - rte_write32_wc_relaxed(value, addr); -} -void rte_write32_wc_w(uint32_t value, void *addr) { - rte_write32_wc(value, addr); -} -void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_lock(msl, me); -} -void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_unlock(msl, me); -} -int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - return rte_mcslock_trylock(msl, me); -} -int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { - return rte_mcslock_is_locked(msl); -} -void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } -void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } -void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } -void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } -void rte_pflock_write_unlock_w(rte_pflock_t *pf) { - rte_pflock_write_unlock(pf); -} -uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { - return rte_reciprocal_divide(a, R); -} -uint64_t rte_reciprocal_divide_u64_w(uint64_t a, - const struct rte_reciprocal_u64 *R) { - return rte_reciprocal_divide_u64(a, R); -} -void rte_seqcount_init_w(rte_seqcount_t *seqcount) { - rte_seqcount_init(seqcount); -} -uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { - return rte_seqcount_read_begin(seqcount); -} -bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, - uint32_t begin_sn) { - return rte_seqcount_read_retry(seqcount, begin_sn); -} -void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_begin(seqcount); -} -void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_end(seqcount); -} -void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } -uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { - return rte_seqlock_read_begin(seqlock); -} -bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { - return rte_seqlock_read_retry(seqlock, begin_sn); -} -void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_lock(seqlock); -} -void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_unlock(seqlock); -} -unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, - unsigned int n) { - return rte_stack_push(s, obj_table, n); -} -unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, - unsigned int n) { - return rte_stack_pop(s, obj_table, n); -} -unsigned int rte_stack_count_w(struct rte_stack *s) { - return rte_stack_count(s); -} -unsigned int rte_stack_free_count_w(struct rte_stack *s) { - return rte_stack_free_count(s); -} -uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss(input_tuple, input_len, rss_key); -} -uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss_be(input_tuple, input_len, rss_key); -} -void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } -void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } -void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { - rte_ticketlock_unlock(tl); -} -int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { - return rte_ticketlock_trylock(tl); -} -int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { - return rte_ticketlock_is_locked(tl); -} -void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_init(tlr); -} -void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_lock(tlr); -} -void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_unlock(tlr); -} -int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { - return rte_ticketlock_recursive_trylock(tlr); -} -uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, - uint64_t cycles) { - return rte_cyclecounter_cycles_to_ns(tc, cycles); -} -uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, - uint64_t cycle_now) { - return rte_timecounter_update(tc, cycle_now); -} -uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { - return rte_timespec_to_ns(ts); -} -struct timespec rte_ns_to_timespec_w(uint64_t nsec) { - return rte_ns_to_timespec(nsec); -} -bool rte_trace_feature_is_enabled_w(void) { - return rte_trace_feature_is_enabled(); -} diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index bc7b394de..ab1e0010d 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -294,6 +294,7 @@ stdenv.mkDerivation { ''-Ddisable_drivers=${lib.concatStringsSep "," disabledDrivers}'' ''-Denable_drivers=${lib.concatStringsSep "," enabledDrivers}'' ''-Denable_libs=${lib.concatStringsSep "," enabledLibs}'' + ''-Ddisable_apps=*'' ''-Ddisable_libs=${lib.concatStringsSep "," disabledLibs}'' ] ++ (if isCrossCompile then [ ''--cross-file=${cross-file}'' ] else [ ]); diff --git a/nix/target.nix b/nix/target.nix index 7bd0ebb0a..36cbe22ee 100644 --- a/nix/target.nix +++ b/nix/target.nix @@ -19,12 +19,7 @@ let NIX_CFLAGS_LINK = [ ]; }; dpdk = { - buildInputs = { - rdma-core = true; - libbsd = true; - libnl = true; - numactl = true; - }; + buildInputs = { }; }; }; }; @@ -67,10 +62,7 @@ let }; dpdk = { buildInputs = { - rdma-core = true; - libbsd = true; - libnl = true; - numactl = false; + numactl = null; }; }; }; diff --git a/shell.nix b/shell.nix index 07970296c..ef124bdb4 100644 --- a/shell.nix +++ b/shell.nix @@ -2,25 +2,56 @@ # Copyright Open Network Fabric Authors { overlay ? "dataplane", - target ? "x86_64-unknown-linux-gnu", + platform ? "x86-64-v3", + libc ? "gnu", prof ? "debug", instrumentation ? "none", - sanitize ? "", - sources ? import ./npins, - pkgs ? import { }, + sanitize ? "address", }: -(pkgs.buildFHSEnv { - name = "dataplane-shell"; +let + d = import ./default.nix { + inherit + overlay + platform + libc + prof + instrumentation + sanitize + ; + }; + pkgs = import {}; +in +(d.pkgs-super.buildPackages.buildFHSEnv { + name = "dataplane-dev"; targetPkgs = - pkgs: - (with pkgs; [ - # dev tools - bash - direnv - just - nil - nixd - npins - wget - ]); + pkgs: with pkgs; [ + stdenv.cc.libc.dev + stdenv.cc.libc.out + # libmd.dev + # libmd.static + libbsd.dev + # libbsd.static + numactl.dev + # numactl.static + rdma-core.dev + # rdma-core.static + # dpdk.dev + # dpdk.static + # dpdk-wrapper.dev + # dpdk-wrapper.out + ]; + # (with pkgs.buildPackages; [ + # # dev tools + # bash + # direnv + # just + # nil + # nixd + # npins + # wget + # llvmPackages.bintools + # llvmPackages.clang + # llvmPackages.libclang.lib + # llvmPackages.lld + # ]); }).env diff --git a/sysroot b/sysroot index 761f3e8ec..22ffe82ea 120000 --- a/sysroot +++ b/sysroot @@ -1 +1 @@ -/nix/store/m1gsdy5xjrc396yk2m1h7nrdly5d6jw9-sysroot \ No newline at end of file +/nix/store/40gcqs5h4gbs9ixw16wbg9161mrxb0k2-sysroot \ No newline at end of file From ab5bab5a7a04df98e19938f6ed05af2e892757ef Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 17:29:01 +0000 Subject: [PATCH 25/35] wip --- .cargo/config.toml | 2 +- .clangd | 4 +- cli/build.rs | 2 +- dataplane/build.rs | 2 +- default.nix | 16 +- dpdk-sys/build.rs | 3 +- dpdk/build.rs | 2 +- dpdk/src/lcore.rs | 2 +- hardware/build.rs | 2 +- init/build.rs | 2 +- nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c | 1513 --------------------- nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h | 1517 +++++++++++++++++++++- nix/target.nix | 10 +- sysfs/build.rs | 2 +- sysroot | 1 - 15 files changed, 1551 insertions(+), 1529 deletions(-) mode change 100644 => 120000 .clangd delete mode 120000 sysroot diff --git a/.cargo/config.toml b/.cargo/config.toml index 5cafed729..32fb6467d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,5 @@ [env] -COMPILE_ENV = { value = "compile-env", relative = true, force = false } +COMPILE_ENV = { value = "sysroot", relative = true, force = false } PATH = { value = "compile-env/bin", relative = true, force = true } LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = true } PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = true } diff --git a/.clangd b/.clangd deleted file mode 100644 index bb03d2f22..000000000 --- a/.clangd +++ /dev/null @@ -1,3 +0,0 @@ -CompileFlags: - Add: - - "-I/home/dnoland/code/githedgehog/dataplane/sysroot/include" diff --git a/.clangd b/.clangd new file mode 120000 index 000000000..841886296 --- /dev/null +++ b/.clangd @@ -0,0 +1 @@ +./build-tools/.clangd \ No newline at end of file diff --git a/cli/build.rs b/cli/build.rs index 52f5b0197..9d7c9069b 100644 --- a/cli/build.rs +++ b/cli/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/dataplane/build.rs b/dataplane/build.rs index 52f5b0197..9d7c9069b 100644 --- a/dataplane/build.rs +++ b/dataplane/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/default.nix b/default.nix index e5ade058e..1fde49210 100644 --- a/default.nix +++ b/default.nix @@ -45,6 +45,8 @@ let libmd.static libbsd.dev libbsd.static + libnl.dev + libnl.static numactl.dev numactl.static rdma-core.dev @@ -74,11 +76,23 @@ pkgs.lib.fix (final: { name = "sysroot"; paths = sysroot-list; }; + clangd = pkgs.writeTextFile { + name = ".clangd"; + text = '' + CompileFlags: + Add: + - "-I${final.sysroot}/include" + - "-I${final.pkgs.dpdk.dev}/include" + - "-Wno-deprecated-declarations" + ''; + executable = false; + destination = "/.clangd"; + }; build-tools = with final.pkgs.buildPackages; symlinkJoin { name = "build-tools"; - paths = build-tools-list; + paths = build-tools-list ++ [ final.clangd ]; }; dev-shell = final.pkgs.symlinkJoin { name = "dataplane-dev-shell"; diff --git a/dpdk-sys/build.rs b/dpdk-sys/build.rs index 556af520a..1eac21991 100644 --- a/dpdk-sys/build.rs +++ b/dpdk-sys/build.rs @@ -71,7 +71,7 @@ fn main() { let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); println!("cargo:rustc-link-search=all={sysroot}/lib"); // NOTE: DPDK absolutely requires whole-archive in the linking command. @@ -100,6 +100,7 @@ fn main() { "rte_rcu", "rte_ring", "rte_eal", + "rte_argparse", "rte_kvargs", "rte_telemetry", "rte_log", diff --git a/dpdk/build.rs b/dpdk/build.rs index 52f5b0197..9d7c9069b 100644 --- a/dpdk/build.rs +++ b/dpdk/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/dpdk/src/lcore.rs b/dpdk/src/lcore.rs index 7c35c7b97..12a4dbc48 100644 --- a/dpdk/src/lcore.rs +++ b/dpdk/src/lcore.rs @@ -237,7 +237,7 @@ impl LCoreId { #[tracing::instrument(level = "trace")] pub fn current() -> LCoreId { - LCoreId(unsafe { dpdk_sys::rte_lcore_id_w() }) + LCoreId(unsafe { dpdk_sys::rte_lcore_id() }) } #[tracing::instrument(level = "trace")] diff --git a/hardware/build.rs b/hardware/build.rs index 52f5b0197..9d7c9069b 100644 --- a/hardware/build.rs +++ b/hardware/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/init/build.rs b/init/build.rs index 52f5b0197..9d7c9069b 100644 --- a/init/build.rs +++ b/init/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c index 118e3fc13..c9a6f09b7 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c @@ -3,1516 +3,3 @@ #include "dpdk_wrapper.h" -int rte_errno_get() { return rte_errno; } - -// Static wrappers - -int rte_is_aligned_w(const const void *const ptr, const unsigned int align) { - return rte_is_aligned(ptr, align); -} -void rte_atomic_thread_fence_w(rte_memory_order memorder) { - rte_atomic_thread_fence(memorder); -} -int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { - return rte_atomic16_cmpset(dst, exp, src); -} -uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { - return rte_atomic16_exchange(dst, val); -} -void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } -int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { - return rte_atomic16_read(v); -} -void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { - rte_atomic16_set(v, new_value); -} -void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { - rte_atomic16_add(v, inc); -} -void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { - rte_atomic16_sub(v, dec); -} -void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } -void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } -int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { - return rte_atomic16_add_return(v, inc); -} -int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { - return rte_atomic16_sub_return(v, dec); -} -int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_inc_and_test(v); -} -int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_dec_and_test(v); -} -int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { - return rte_atomic16_test_and_set(v); -} -void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } -int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { - return rte_atomic32_cmpset(dst, exp, src); -} -uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { - return rte_atomic32_exchange(dst, val); -} -void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } -int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { - return rte_atomic32_read(v); -} -void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { - rte_atomic32_set(v, new_value); -} -void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { - rte_atomic32_add(v, inc); -} -void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { - rte_atomic32_sub(v, dec); -} -void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } -void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } -int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { - return rte_atomic32_add_return(v, inc); -} -int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { - return rte_atomic32_sub_return(v, dec); -} -int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_inc_and_test(v); -} -int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_dec_and_test(v); -} -int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { - return rte_atomic32_test_and_set(v); -} -void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } -int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { - return rte_atomic64_cmpset(dst, exp, src); -} -uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { - return rte_atomic64_exchange(dst, val); -} -void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } -int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } -void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { - rte_atomic64_set(v, new_value); -} -void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { - rte_atomic64_add(v, inc); -} -void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { - rte_atomic64_sub(v, dec); -} -void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } -void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } -int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { - return rte_atomic64_add_return(v, inc); -} -int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { - return rte_atomic64_sub_return(v, dec); -} -int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_inc_and_test(v); -} -int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_dec_and_test(v); -} -int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { - return rte_atomic64_test_and_set(v); -} -void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } -void rte_smp_mb_w(void) { rte_smp_mb(); } -uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } -uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } -uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } -void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } -uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } -uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } -size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { - return rte_strlcpy(dst, src, size); -} -size_t rte_strlcat_w(char *dst, const char *src, size_t size) { - return rte_strlcat(dst, src, size); -} -const char *rte_str_skip_leading_spaces_w(const char *src) { - return rte_str_skip_leading_spaces(src); -} -void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { - rte_uuid_copy(dst, src); -} -int rte_gettid_w(void) { return rte_gettid(); } -unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } -void rte_pause_w(void) { rte_pause(); } -void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_16(addr, expected, memorder); -} -void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_32(addr, expected, memorder); -} -void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_64(addr, expected, memorder); -} -void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } -void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } -void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } -int rte_spinlock_trylock_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock(sl); -} -int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { - return rte_spinlock_is_locked(sl); -} -int rte_tm_supported_w(void) { return rte_tm_supported(); } -void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } -void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { - rte_spinlock_unlock_tm(sl); -} -int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock_tm(sl); -} -void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_init(slr); -} -void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock(slr); -} -void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock(slr); -} -int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock(slr); -} -void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock_tm(slr); -} -void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock_tm(slr); -} -int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock_tm(slr); -} -// unsigned int rte_xbegin_w(void) { return rte_xbegin(); } -// void rte_xend_w(void) { rte_xend(); } -// int rte_xtest_w(void) { return rte_xtest(); } -// int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } -uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_get32(nr, addr); -} -void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_set32(nr, addr); -} -void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_clear32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_set32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_clear32(nr, addr); -} -uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_get64(nr, addr); -} -void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_set64(nr, addr); -} -void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_clear64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_set64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_clear64(nr, addr); -} -unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } -unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } -unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } -unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } -unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } -unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } -uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } -uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } -uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } -int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { - return rte_bsf32_safe(v, pos); -} -uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } -int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { - return rte_bsf64_safe(v, pos); -} -uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } -uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } -int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } -uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } -uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } -uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } -uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } -uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } -uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } -void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } -void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } -int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_read_trylock(rwl); -} -void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock(rwl); -} -int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_trylock(rwl); -} -void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } -void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock(rwl); -} -int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_is_locked(rwl); -} -void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_lock_tm(rwl); -} -void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock_tm(rwl); -} -void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_lock_tm(rwl); -} -void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock_tm(rwl); -} -unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); -} -uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_prod_htd_max(r); -} -int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_prod_htd_max(r, v); -} -uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_cons_htd_max(r); -} -int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_cons_htd_max(r, v); -} -unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} -int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_mp_enqueue_elem(r, obj, esize); -} -int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_sp_enqueue_elem(r, obj, esize); -} -int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { - return rte_ring_enqueue_elem(r, obj, esize); -} -unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); -} -int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_mc_dequeue_elem(r, obj_p, esize); -} -int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_sc_dequeue_elem(r, obj_p, esize); -} -int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_dequeue_elem(r, obj_p, esize); -} -unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, - const void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} -unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, - unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); -} -unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, - unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, - unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem_start(r, n, free_space); -} -unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_start(r, n, free_space); -} -void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, - unsigned int esize, unsigned int n) { - rte_ring_enqueue_elem_finish(r, obj_table, esize, n); -} -void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, - unsigned int n) { - rte_ring_enqueue_finish(r, obj_table, n); -} -unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_start(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, - void *obj_table, - unsigned int esize, - unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); -} -unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, - void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_start(r, obj_table, n, available); -} -void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_elem_finish(r, n); -} -void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_finish(r, n); -} -unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, - unsigned int esize, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_burst_elem_start_w( - struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); -} -unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); -} -void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_elem_finish(r, n); -} -void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_finish(r, n); -} -unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, - unsigned int esize, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_burst_elem_start_w( - struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *available) { - return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); -} -unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, - unsigned int n, - struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); -} -void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_elem_finish(r, n); -} -void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_finish(r, n); -} -unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); -} -unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, - unsigned int n, unsigned int *free_space) { - return rte_ring_enqueue_bulk(r, obj_table, n, free_space); -} -int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_mp_enqueue(r, obj); -} -int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_sp_enqueue(r, obj); -} -int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_enqueue(r, obj); -} -unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_bulk(r, obj_table, n, available); -} -int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_mc_dequeue(r, obj_p); -} -int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_sc_dequeue(r, obj_p); -} -int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_dequeue(r, obj_p); -} -unsigned int rte_ring_count_w(const struct rte_ring *r) { - return rte_ring_count(r); -} -unsigned int rte_ring_free_count_w(const struct rte_ring *r) { - return rte_ring_free_count(r); -} -int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } -int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } -unsigned int rte_ring_get_size_w(const struct rte_ring *r) { - return rte_ring_get_size(r); -} -unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { - return rte_ring_get_capacity(r); -} -enum rte_ring_sync_type -rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_prod_sync_type(r); -} -int rte_ring_is_prod_single_w(const struct rte_ring *r) { - return rte_ring_is_prod_single(r); -} -enum rte_ring_sync_type -rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_cons_sync_type(r); -} -int rte_ring_is_cons_single_w(const struct rte_ring *r) { - return rte_ring_is_cons_single(r); -} -unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, - void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst(r, obj_table, n, free_space); -} -unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst(r, obj_table, n, available); -} -unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_burst(r, obj_table, n, available); -} -void *rte_memcpy_w(void *dst, const void *src, size_t n) { - return rte_memcpy(dst, src, n); -} -// void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { -// return rte_mov15_or_less(dst, src, n); -// } -void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } -void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } -void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } -void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } -// void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { -// return rte_memcpy_generic(dst, src, n); -// } -// void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { -// return rte_memcpy_aligned(dst, src, n); -// } -struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { - return rte_mempool_get_header(obj); -} -struct rte_mempool *rte_mempool_from_obj_w(void *obj) { - return rte_mempool_from_obj(obj); -} -struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { - return rte_mempool_get_trailer(obj); -} -struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { - return rte_mempool_get_ops(ops_index); -} -int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); -} -int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, - void **first_obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); -} -int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, - void *const *obj_table, unsigned int n) { - return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); -} -struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, - unsigned int lcore_id) { - return rte_mempool_default_cache(mp, lcore_id); -} -void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, - struct rte_mempool *mp) { - rte_mempool_cache_flush(cache, mp); -} -void rte_mempool_do_generic_put_w(struct rte_mempool *mp, - void *const *obj_table, unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_do_generic_put(mp, obj_table, n, cache); -} -void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_generic_put(mp, obj_table, n, cache); -} -void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n) { - rte_mempool_put_bulk(mp, obj_table, n); -} -void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { - rte_mempool_put(mp, obj); -} -int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, - unsigned int n, - struct rte_mempool_cache *cache) { - return rte_mempool_do_generic_get(mp, obj_table, n, cache); -} -int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, - unsigned int n, struct rte_mempool_cache *cache) { - return rte_mempool_generic_get(mp, obj_table, n, cache); -} -int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_get_bulk(mp, obj_table, n); -} -int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_get(mp, obj_p); -} -int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, - void **first_obj_table, unsigned int n) { - return rte_mempool_get_contig_blocks(mp, first_obj_table, n); -} -int rte_mempool_full_w(const struct rte_mempool *mp) { - return rte_mempool_full(mp); -} -int rte_mempool_empty_w(const struct rte_mempool *mp) { - return rte_mempool_empty(mp); -} -rte_iova_t rte_mempool_virt2iova_w(const void *elt) { - return rte_mempool_virt2iova(elt); -} -void *rte_mempool_get_priv_w(struct rte_mempool *mp) { - return rte_mempool_get_priv(mp); -} -void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } -void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } -void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } -void rte_prefetch_non_temporal_w(const void *p) { - rte_prefetch_non_temporal(p); -} -void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } -void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } -void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } -void rte_cldemote_w(const void *p) { rte_cldemote(p); } -uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } -uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } -uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } -// uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } -// uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } -// uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } -void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part1(m); -} -void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part2(m); -} -uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_size(mp); -} -rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { - return rte_mbuf_iova_get(m); -} -void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { - rte_mbuf_iova_set(m, iova); -} -rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova(mb); -} -rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova_default(mb); -} -struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { - return rte_mbuf_from_indirect(mi); -} -char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { - return rte_mbuf_buf_addr(mb, mp); -} -char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { - return rte_mbuf_data_addr_default(mb); -} -char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } -void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } -uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_flags(mp); -} -uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { - return rte_mbuf_refcnt_read(m); -} -void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { - rte_mbuf_refcnt_set(m, new_value); -} -uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { - return rte_mbuf_refcnt_update(m, value); -} -uint16_t -rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { - return rte_mbuf_ext_refcnt_read(shinfo); -} -void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, - uint16_t new_value) { - rte_mbuf_ext_refcnt_set(shinfo, new_value); -} -uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, - int16_t value) { - return rte_mbuf_ext_refcnt_update(shinfo, value); -} -struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { - return rte_mbuf_raw_alloc(mp); -} -void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } -uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_data_room_size(mp); -} -void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { - rte_pktmbuf_reset_headroom(m); -} -void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } -struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { - return rte_pktmbuf_alloc(mp); -} -int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, - unsigned int count) { - return rte_pktmbuf_alloc_bulk(pool, mbufs, count); -} -struct rte_mbuf_ext_shared_info * -rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, - rte_mbuf_extbuf_free_callback_t free_cb, - void *fcb_opaque) { - return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, - fcb_opaque); -} -void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, - rte_iova_t buf_iova, uint16_t buf_len, - struct rte_mbuf_ext_shared_info *shinfo) { - rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); -} -void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, - const struct rte_mbuf *msrc) { - rte_mbuf_dynfield_copy(mdst, msrc); -} -void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { - rte_pktmbuf_attach(mi, m); -} -void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } -struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { - return rte_pktmbuf_prefree_seg(m); -} -void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } -void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } -void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { - rte_pktmbuf_refcnt_update(m, v); -} -uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_headroom(m); -} -uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_tailroom(m); -} -struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { - return rte_pktmbuf_lastseg(m); -} -char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_prepend(m, len); -} -char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_append(m, len); -} -char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_adj(m, len); -} -int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_trim(m, len); -} -int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { - return rte_pktmbuf_is_contiguous(m); -} -const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, - uint32_t len, void *buf) { - return rte_pktmbuf_read(m, off, len, buf); -} -int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { - return rte_pktmbuf_chain(head, tail); -} -uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, - uint64_t tso, uint64_t ol3, uint64_t ol2, - uint64_t unused) { - return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); -} -int rte_validate_tx_offload_w(const struct rte_mbuf *m) { - return rte_validate_tx_offload(m); -} -int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { - return rte_pktmbuf_linearize(mbuf); -} -uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_queue_get(m); -} -uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_traffic_class_get(m); -} -uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_color_get(m); -} -void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, - uint8_t *traffic_class, uint8_t *color) { - rte_mbuf_sched_get(m, queue_id, traffic_class, color); -} -void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { - rte_mbuf_sched_queue_set(m, queue_id); -} -void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, - uint8_t traffic_class) { - rte_mbuf_sched_traffic_class_set(m, traffic_class); -} -void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { - rte_mbuf_sched_color_set(m, color); -} -void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, - uint8_t traffic_class, uint8_t color) { - rte_mbuf_sched_set(m, queue_id, traffic_class, color); -} -int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, - const struct rte_ether_addr *ea2) { - return rte_is_same_ether_addr(ea1, ea2); -} -int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_zero_ether_addr(ea); -} -int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_unicast_ether_addr(ea); -} -int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_multicast_ether_addr(ea); -} -int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_broadcast_ether_addr(ea); -} -int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_universal_ether_addr(ea); -} -int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_local_admin_ether_addr(ea); -} -int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_valid_assigned_ether_addr(ea); -} -void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, - struct rte_ether_addr *ea_to) { - rte_ether_addr_copy(ea_from, ea_to); -} -int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } -int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } -uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { - return rte_bitmap_get_memory_footprint(n_bits); -} -struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init(n_bits, mem, mem_size); -} -struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); -} -void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } -void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } -void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_prefetch0(bmp, pos); -} -uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { - return rte_bitmap_get(bmp, pos); -} -void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_set(bmp, pos); -} -void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, - uint64_t slab) { - rte_bitmap_set_slab(bmp, pos, slab); -} -void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_clear(bmp, pos); -} -int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { - return rte_bitmap_scan(bmp, pos, slab); -} -uint16_t rte_raw_cksum_w(const void *buf, size_t len) { - return rte_raw_cksum(buf, len); -} -int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, - uint16_t *cksum) { - return rte_raw_cksum_mbuf(m, off, len, cksum); -} -uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_hdr_len(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum_simple(ipv4_hdr); -} -uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - uint64_t ol_flags) { - return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); -} -uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); -} -uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, - const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); -} -int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); -} -int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, - const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); -} -bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b) { - return rte_ipv6_addr_eq(a, b); -} -void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { - rte_ipv6_addr_mask(ip, depth); -} -bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b, uint8_t depth) { - return rte_ipv6_addr_eq_prefix(a, b, depth); -} -uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { - return rte_ipv6_mask_depth(mask); -} -bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_unspec(ip); -} -bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_loopback(ip); -} -bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_linklocal(ip); -} -bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_sitelocal(ip); -} -bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4compat(ip); -} -bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4mapped(ip); -} -bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_mcast(ip); -} -enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_mc_scope(ip); -} -void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, - const struct rte_ether_addr *mac) { - rte_ipv6_llocal_from_ethernet(ip, mac); -} -void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, - const struct rte_ipv6_addr *ip) { - rte_ipv6_solnode_from_addr(sol, ip); -} -void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, - const struct rte_ipv6_addr *ip) { - rte_ether_mcast_from_ipv6(mac, ip); -} -int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { - return rte_ipv6_check_version(ip); -} -uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - uint64_t ol_flags) { - return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); -} -uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); -} -uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, - const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); -} -int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); -} -int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, - const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); -} -int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { - return rte_ipv6_get_next_ext(p, proto, ext_len); -} -enum rte_color -rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, - struct rte_meter_srtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_srtcm_color_aware_check_w( - struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} -enum rte_color -rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, - struct rte_meter_trtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_trtcm_color_aware_check_w( - struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} -enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( - struct rte_meter_trtcm_rfc4115 *m, - struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, - uint32_t pkt_len) { - return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); -} -enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( - struct rte_meter_trtcm_rfc4115 *m, - struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, - enum rte_color pkt_color) { - return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, - pkt_color); -} -uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { - return rte_eth_rss_hf_refine(rss_hf); -} - -uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { - return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); -} -int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_rx_queue_count(port_id, queue_id); -} -int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_rx_descriptor_status(port_id, queue_id, offset); -} -int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_tx_descriptor_status(port_id, queue_id, offset); -} -uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); -} -uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); -} -uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, - struct rte_eth_dev_tx_buffer *buffer) { - return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); -} -uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, - struct rte_eth_dev_tx_buffer *buffer, - struct rte_mbuf *tx_pkt) { - return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); -} -uint16_t -rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, - uint16_t tx_port_id, uint16_t tx_queue_id, - struct rte_eth_recycle_rxq_info *recycle_rxq_info) { - return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, - recycle_rxq_info); -} -int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_tx_queue_count(port_id, queue_id); -} -uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { - return rte_flow_dynf_metadata_get(m); -} -void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { - rte_flow_dynf_metadata_set(m, v); -} -int rte_flow_dynf_metadata_avail_w(void) { - return rte_flow_dynf_metadata_avail(); -} -uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { - return rte_hash_crc_1byte(data, init_val); -} -uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { - return rte_hash_crc_2byte(data, init_val); -} -uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { - return rte_hash_crc_4byte(data, init_val); -} -uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { - return rte_hash_crc_8byte(data, init_val); -} -uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, - uint32_t init_val) { - return rte_hash_crc(data, data_len, init_val); -} -void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_2hashes(key, length, pc, pb); -} -void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_32b_2hashes(k, length, pc, pb); -} -uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { - return rte_jhash(key, length, initval); -} -uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { - return rte_jhash_32b(k, length, initval); -} -uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, - uint32_t initval) { - return rte_jhash_3words(a, b, c, initval); -} -uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { - return rte_jhash_2words(a, b, initval); -} -uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { - return rte_jhash_1word(a, initval); -} -uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key) { - return rte_fbk_hash_get_bucket(ht, key); -} -int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, - uint32_t key, uint16_t value, - uint32_t bucket) { - return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); -} -int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, - uint16_t value) { - return rte_fbk_hash_add_key(ht, key, value); -} -int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_delete_key(ht, key); -} -int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_lookup(ht, key); -} -void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { - rte_fbk_hash_clear_all(ht); -} -double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { - return rte_fbk_hash_get_load_factor(ht); -} -void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_online(v, thread_id); -} -void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_offline(v, thread_id); -} -void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_lock(v, thread_id); -} -void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_unlock(v, thread_id); -} -uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { - return rte_rcu_qsbr_start(v); -} -void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_quiescent(v, thread_id); -} -int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { - return rte_rcu_qsbr_check(v, t, wait); -} -uint8_t rte_read8_relaxed_w(const void *addr) { - return rte_read8_relaxed(addr); -} -uint16_t rte_read16_relaxed_w(const void *addr) { - return rte_read16_relaxed(addr); -} -uint32_t rte_read32_relaxed_w(const void *addr) { - return rte_read32_relaxed(addr); -} -uint64_t rte_read64_relaxed_w(const void *addr) { - return rte_read64_relaxed(addr); -} -void rte_write8_relaxed_w(uint8_t value, void *addr) { - rte_write8_relaxed(value, addr); -} -void rte_write16_relaxed_w(uint16_t value, void *addr) { - rte_write16_relaxed(value, addr); -} -void rte_write32_relaxed_w(uint32_t value, void *addr) { - rte_write32_relaxed(value, addr); -} -void rte_write64_relaxed_w(uint64_t value, void *addr) { - rte_write64_relaxed(value, addr); -} -uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } -uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } -uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } -uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } -void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } -void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } -void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } -void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } -void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { - rte_write32_wc_relaxed(value, addr); -} -void rte_write32_wc_w(uint32_t value, void *addr) { - rte_write32_wc(value, addr); -} -void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_lock(msl, me); -} -void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_unlock(msl, me); -} -int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - return rte_mcslock_trylock(msl, me); -} -int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { - return rte_mcslock_is_locked(msl); -} -void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } -void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } -void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } -void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } -void rte_pflock_write_unlock_w(rte_pflock_t *pf) { - rte_pflock_write_unlock(pf); -} -uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { - return rte_reciprocal_divide(a, R); -} -uint64_t rte_reciprocal_divide_u64_w(uint64_t a, - const struct rte_reciprocal_u64 *R) { - return rte_reciprocal_divide_u64(a, R); -} -void rte_seqcount_init_w(rte_seqcount_t *seqcount) { - rte_seqcount_init(seqcount); -} -uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { - return rte_seqcount_read_begin(seqcount); -} -bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, - uint32_t begin_sn) { - return rte_seqcount_read_retry(seqcount, begin_sn); -} -void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_begin(seqcount); -} -void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_end(seqcount); -} -void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } -uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { - return rte_seqlock_read_begin(seqlock); -} -bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { - return rte_seqlock_read_retry(seqlock, begin_sn); -} -void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_lock(seqlock); -} -void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_unlock(seqlock); -} -unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, - unsigned int n) { - return rte_stack_push(s, obj_table, n); -} -unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, - unsigned int n) { - return rte_stack_pop(s, obj_table, n); -} -unsigned int rte_stack_count_w(struct rte_stack *s) { - return rte_stack_count(s); -} -unsigned int rte_stack_free_count_w(struct rte_stack *s) { - return rte_stack_free_count(s); -} -uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss(input_tuple, input_len, rss_key); -} -uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss_be(input_tuple, input_len, rss_key); -} -void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } -void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } -void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { - rte_ticketlock_unlock(tl); -} -int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { - return rte_ticketlock_trylock(tl); -} -int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { - return rte_ticketlock_is_locked(tl); -} -void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_init(tlr); -} -void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_lock(tlr); -} -void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_unlock(tlr); -} -int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { - return rte_ticketlock_recursive_trylock(tlr); -} -uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, - uint64_t cycles) { - return rte_cyclecounter_cycles_to_ns(tc, cycles); -} -uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, - uint64_t cycle_now) { - return rte_timecounter_update(tc, cycle_now); -} -uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { - return rte_timespec_to_ns(ts); -} -struct timespec rte_ns_to_timespec_w(uint64_t nsec) { - return rte_ns_to_timespec(nsec); -} -bool rte_trace_feature_is_enabled_w(void) { - return rte_trace_feature_is_enabled(); -} diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h index 53b05b961..5a57ffc6d 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h @@ -277,7 +277,7 @@ enum rte_eth_tx_offload : uint64_t { * DPDK is not yet using the C23 standard (which would allow the inheritance * notation with `uint64_t` seen here.). */ -enum wrte_eth_rx_offload : uint64_t { +enum rte_eth_rx_offload : uint64_t { RX_OFFLOAD_VLAN_STRIP = RTE_ETH_RX_OFFLOAD_VLAN_STRIP, RX_OFFLOAD_IPV4_CKSUM = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, RX_OFFLOAD_UDP_CKSUM = RTE_ETH_RX_OFFLOAD_UDP_CKSUM, @@ -297,3 +297,1518 @@ enum wrte_eth_rx_offload : uint64_t { RX_OFFLOAD_RSS_HASH = RTE_ETH_RX_OFFLOAD_RSS_HASH, RX_OFFLOAD_BUFFER_SPLIT = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT, }; + + +int rte_errno_get() { return rte_errno; } + +// Static wrappers + +int rte_is_aligned_w(const void *const ptr, const unsigned int align) { + return rte_is_aligned(ptr, align); +} +void rte_atomic_thread_fence_w(rte_memory_order memorder) { + rte_atomic_thread_fence(memorder); +} +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { + return rte_atomic16_cmpset(dst, exp, src); +} +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { + return rte_atomic16_exchange(dst, val); +} +void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } +int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { + return rte_atomic16_read(v); +} +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { + rte_atomic16_set(v, new_value); +} +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { + rte_atomic16_add(v, inc); +} +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { + rte_atomic16_sub(v, dec); +} +void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } +void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { + return rte_atomic16_add_return(v, inc); +} +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { + return rte_atomic16_sub_return(v, dec); +} +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_inc_and_test(v); +} +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_dec_and_test(v); +} +int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { + return rte_atomic16_test_and_set(v); +} +void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { + return rte_atomic32_cmpset(dst, exp, src); +} +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { + return rte_atomic32_exchange(dst, val); +} +void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } +int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { + return rte_atomic32_read(v); +} +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { + rte_atomic32_set(v, new_value); +} +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { + rte_atomic32_add(v, inc); +} +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { + rte_atomic32_sub(v, dec); +} +void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } +void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { + return rte_atomic32_add_return(v, inc); +} +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { + return rte_atomic32_sub_return(v, dec); +} +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_inc_and_test(v); +} +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_dec_and_test(v); +} +int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { + return rte_atomic32_test_and_set(v); +} +void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { + return rte_atomic64_cmpset(dst, exp, src); +} +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { + return rte_atomic64_exchange(dst, val); +} +void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } +int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { + rte_atomic64_set(v, new_value); +} +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { + rte_atomic64_add(v, inc); +} +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { + rte_atomic64_sub(v, dec); +} +void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } +void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { + return rte_atomic64_add_return(v, inc); +} +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { + return rte_atomic64_sub_return(v, dec); +} +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_inc_and_test(v); +} +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_dec_and_test(v); +} +int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { + return rte_atomic64_test_and_set(v); +} +void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } +void rte_smp_mb_w(void) { rte_smp_mb(); } +uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } +uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } +uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } +void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } +uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } +uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } +size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { + return rte_strlcpy(dst, src, size); +} +size_t rte_strlcat_w(char *dst, const char *src, size_t size) { + return rte_strlcat(dst, src, size); +} +const char *rte_str_skip_leading_spaces_w(const char *src) { + return rte_str_skip_leading_spaces(src); +} +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { + rte_uuid_copy(dst, src); +} +int rte_gettid_w(void) { return rte_gettid(); } +unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } +void rte_pause_w(void) { rte_pause(); } +void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_16(addr, expected, memorder); +} +void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_32(addr, expected, memorder); +} +void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_64(addr, expected, memorder); +} +void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } +void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } +void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } +int rte_spinlock_trylock_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock(sl); +} +int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { + return rte_spinlock_is_locked(sl); +} +int rte_tm_supported_w(void) { return rte_tm_supported(); } +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { + rte_spinlock_unlock_tm(sl); +} +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock_tm(sl); +} +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_init(slr); +} +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock(slr); +} +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock(slr); +} +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock(slr); +} +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock_tm(slr); +} +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock_tm(slr); +} +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock_tm(slr); +} +// unsigned int rte_xbegin_w(void) { return rte_xbegin(); } +// void rte_xend_w(void) { rte_xend(); } +// int rte_xtest_w(void) { return rte_xtest(); } +// int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_get32(nr, addr); +} +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_set32(nr, addr); +} +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_clear32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_set32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_clear32(nr, addr); +} +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_get64(nr, addr); +} +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_set64(nr, addr); +} +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_clear64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_set64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_clear64(nr, addr); +} +unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } +unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } +unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } +unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } +unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } +unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } +uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } +uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } +uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { + return rte_bsf32_safe(v, pos); +} +uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { + return rte_bsf64_safe(v, pos); +} +uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } +uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } +int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } +uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } +uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } +uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } +uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } +uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } +uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } +void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_read_trylock(rwl); +} +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock(rwl); +} +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_trylock(rwl); +} +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock(rwl); +} +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_is_locked(rwl); +} +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_lock_tm(rwl); +} +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock_tm(rwl); +} +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_lock_tm(rwl); +} +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock_tm(rwl); +} +unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); +} +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_prod_htd_max(r); +} +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_prod_htd_max(r, v); +} +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_cons_htd_max(r); +} +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_cons_htd_max(r, v); +} +unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_mp_enqueue_elem(r, obj, esize); +} +int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_sp_enqueue_elem(r, obj, esize); +} +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { + return rte_ring_enqueue_elem(r, obj, esize); +} +unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_mc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_sc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_dequeue_elem(r, obj_p, esize); +} +unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_start(r, n, free_space); +} +void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n) { + rte_ring_enqueue_elem_finish(r, obj_table, esize, n); +} +void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, + unsigned int n) { + rte_ring_enqueue_finish(r, obj_table, n); +} +unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_start(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_start(r, obj_table, n, available); +} +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_elem_finish(r, n); +} +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_finish(r, n); +} +unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); +} +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_elem_finish(r, n); +} +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_finish(r, n); +} +unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *available) { + return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); +} +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_elem_finish(r, n); +} +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_finish(r, n); +} +unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, + unsigned int n, unsigned int *free_space) { + return rte_ring_enqueue_bulk(r, obj_table, n, free_space); +} +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_mp_enqueue(r, obj); +} +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_sp_enqueue(r, obj); +} +int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_enqueue(r, obj); +} +unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_bulk(r, obj_table, n, available); +} +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_mc_dequeue(r, obj_p); +} +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_sc_dequeue(r, obj_p); +} +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_dequeue(r, obj_p); +} +unsigned int rte_ring_count_w(const struct rte_ring *r) { + return rte_ring_count(r); +} +unsigned int rte_ring_free_count_w(const struct rte_ring *r) { + return rte_ring_free_count(r); +} +int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } +int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } +unsigned int rte_ring_get_size_w(const struct rte_ring *r) { + return rte_ring_get_size(r); +} +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { + return rte_ring_get_capacity(r); +} +enum rte_ring_sync_type +rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_prod_sync_type(r); +} +int rte_ring_is_prod_single_w(const struct rte_ring *r) { + return rte_ring_is_prod_single(r); +} +enum rte_ring_sync_type +rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_cons_sync_type(r); +} +int rte_ring_is_cons_single_w(const struct rte_ring *r) { + return rte_ring_is_cons_single(r); +} +unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_burst(r, obj_table, n, available); +} +void *rte_memcpy_w(void *dst, const void *src, size_t n) { + return rte_memcpy(dst, src, n); +} +// void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { +// return rte_mov15_or_less(dst, src, n); +// } +void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } +void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } +void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } +void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } +// void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_generic(dst, src, n); +// } +// void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_aligned(dst, src, n); +// } +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { + return rte_mempool_get_header(obj); +} +struct rte_mempool *rte_mempool_from_obj_w(void *obj) { + return rte_mempool_from_obj(obj); +} +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { + return rte_mempool_get_trailer(obj); +} +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { + return rte_mempool_get_ops(ops_index); +} +int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); +} +int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n) { + return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); +} +struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, + unsigned int lcore_id) { + return rte_mempool_default_cache(mp, lcore_id); +} +void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, + struct rte_mempool *mp) { + rte_mempool_cache_flush(cache, mp); +} +void rte_mempool_do_generic_put_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_do_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n) { + rte_mempool_put_bulk(mp, obj_table, n); +} +void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { + rte_mempool_put(mp, obj); +} +int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + return rte_mempool_do_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { + return rte_mempool_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_get_bulk(mp, obj_table, n); +} +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { + return rte_mempool_get(mp, obj_p); +} +int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) { + return rte_mempool_get_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_full_w(const struct rte_mempool *mp) { + return rte_mempool_full(mp); +} +int rte_mempool_empty_w(const struct rte_mempool *mp) { + return rte_mempool_empty(mp); +} +rte_iova_t rte_mempool_virt2iova_w(const void *elt) { + return rte_mempool_virt2iova(elt); +} +void *rte_mempool_get_priv_w(struct rte_mempool *mp) { + return rte_mempool_get_priv(mp); +} +void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } +void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } +void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } +void rte_prefetch_non_temporal_w(const void *p) { + rte_prefetch_non_temporal(p); +} +void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } +void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } +void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } +void rte_cldemote_w(const void *p) { rte_cldemote(p); } +uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } +uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } +uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } +// uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } +// uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } +// uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part1(m); +} +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part2(m); +} +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_size(mp); +} +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { + return rte_mbuf_iova_get(m); +} +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { + rte_mbuf_iova_set(m, iova); +} +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova(mb); +} +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova_default(mb); +} +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { + return rte_mbuf_from_indirect(mi); +} +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { + return rte_mbuf_buf_addr(mb, mp); +} +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { + return rte_mbuf_data_addr_default(mb); +} +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } +void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_flags(mp); +} +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { + return rte_mbuf_refcnt_read(m); +} +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { + rte_mbuf_refcnt_set(m, new_value); +} +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { + return rte_mbuf_refcnt_update(m, value); +} +uint16_t +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { + return rte_mbuf_ext_refcnt_read(shinfo); +} +void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value) { + rte_mbuf_ext_refcnt_set(shinfo, new_value); +} +uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value) { + return rte_mbuf_ext_refcnt_update(shinfo, value); +} +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { + return rte_mbuf_raw_alloc(mp); +} +void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_data_room_size(mp); +} +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { + rte_pktmbuf_reset_headroom(m); +} +void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { + return rte_pktmbuf_alloc(mp); +} +int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned int count) { + return rte_pktmbuf_alloc_bulk(pool, mbufs, count); +} +struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, + void *fcb_opaque) { + return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, + fcb_opaque); +} +void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo) { + rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); +} +void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, + const struct rte_mbuf *msrc) { + rte_mbuf_dynfield_copy(mdst, msrc); +} +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { + rte_pktmbuf_attach(mi, m); +} +void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { + return rte_pktmbuf_prefree_seg(m); +} +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } +void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { + rte_pktmbuf_refcnt_update(m, v); +} +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_headroom(m); +} +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_tailroom(m); +} +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { + return rte_pktmbuf_lastseg(m); +} +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_prepend(m, len); +} +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_append(m, len); +} +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_adj(m, len); +} +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_trim(m, len); +} +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { + return rte_pktmbuf_is_contiguous(m); +} +const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf) { + return rte_pktmbuf_read(m, off, len, buf); +} +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { + return rte_pktmbuf_chain(head, tail); +} +uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, + uint64_t tso, uint64_t ol3, uint64_t ol2, + uint64_t unused) { + return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); +} +int rte_validate_tx_offload_w(const struct rte_mbuf *m) { + return rte_validate_tx_offload(m); +} +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { + return rte_pktmbuf_linearize(mbuf); +} +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_queue_get(m); +} +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_traffic_class_get(m); +} +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_color_get(m); +} +void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, uint8_t *color) { + rte_mbuf_sched_get(m, queue_id, traffic_class, color); +} +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { + rte_mbuf_sched_queue_set(m, queue_id); +} +void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, + uint8_t traffic_class) { + rte_mbuf_sched_traffic_class_set(m, traffic_class); +} +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { + rte_mbuf_sched_color_set(m, color); +} +void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, uint8_t color) { + rte_mbuf_sched_set(m, queue_id, traffic_class, color); +} +int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, + const struct rte_ether_addr *ea2) { + return rte_is_same_ether_addr(ea1, ea2); +} +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_zero_ether_addr(ea); +} +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_unicast_ether_addr(ea); +} +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_multicast_ether_addr(ea); +} +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_broadcast_ether_addr(ea); +} +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_universal_ether_addr(ea); +} +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_local_admin_ether_addr(ea); +} +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_valid_assigned_ether_addr(ea); +} +void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, + struct rte_ether_addr *ea_to) { + rte_ether_addr_copy(ea_from, ea_to); +} +int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } +int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { + return rte_bitmap_get_memory_footprint(n_bits); +} +struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init(n_bits, mem, mem_size); +} +struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); +} +void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } +void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_prefetch0(bmp, pos); +} +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { + return rte_bitmap_get(bmp, pos); +} +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_set(bmp, pos); +} +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, + uint64_t slab) { + rte_bitmap_set_slab(bmp, pos, slab); +} +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_clear(bmp, pos); +} +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { + return rte_bitmap_scan(bmp, pos, slab); +} +uint16_t rte_raw_cksum_w(const void *buf, size_t len) { + return rte_raw_cksum(buf, len); +} +int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, + uint16_t *cksum) { + return rte_raw_cksum_mbuf(m, off, len, cksum); +} +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_hdr_len(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum_simple(ipv4_hdr); +} +uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + uint64_t ol_flags) { + return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +} +uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); +} +uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); +} +int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); +} +int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); +} +bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b) { + return rte_ipv6_addr_eq(a, b); +} +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { + rte_ipv6_addr_mask(ip, depth); +} +bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b, uint8_t depth) { + return rte_ipv6_addr_eq_prefix(a, b, depth); +} +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { + return rte_ipv6_mask_depth(mask); +} +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_unspec(ip); +} +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_loopback(ip); +} +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_linklocal(ip); +} +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_sitelocal(ip); +} +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4compat(ip); +} +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4mapped(ip); +} +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_mcast(ip); +} +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_mc_scope(ip); +} +void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, + const struct rte_ether_addr *mac) { + rte_ipv6_llocal_from_ethernet(ip, mac); +} +void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, + const struct rte_ipv6_addr *ip) { + rte_ipv6_solnode_from_addr(sol, ip); +} +void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, + const struct rte_ipv6_addr *ip) { + rte_ether_mcast_from_ipv6(mac, ip); +} +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { + return rte_ipv6_check_version(ip); +} +uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + uint64_t ol_flags) { + return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +} +uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); +} +uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); +} +int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); +} +int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); +} +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { + return rte_ipv6_get_next_ext(p, proto, ext_len); +} +enum rte_color +rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, + struct rte_meter_srtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_srtcm_color_aware_check_w( + struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color +rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_color_aware_check_w( + struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, + uint32_t pkt_len) { + return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, + enum rte_color pkt_color) { + return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, + pkt_color); +} +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { + return rte_eth_rss_hf_refine(rss_hf); +} + +uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { + return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +} +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_rx_queue_count(port_id, queue_id); +} +int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_rx_descriptor_status(port_id, queue_id, offset); +} +int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_tx_descriptor_status(port_id, queue_id, offset); +} +uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer) { + return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); +} +uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer, + struct rte_mbuf *tx_pkt) { + return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); +} +uint16_t +rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) { + return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, + recycle_rxq_info); +} +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_tx_queue_count(port_id, queue_id); +} +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { + return rte_flow_dynf_metadata_get(m); +} +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { + rte_flow_dynf_metadata_set(m, v); +} +int rte_flow_dynf_metadata_avail_w(void) { + return rte_flow_dynf_metadata_avail(); +} +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { + return rte_hash_crc_1byte(data, init_val); +} +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { + return rte_hash_crc_2byte(data, init_val); +} +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { + return rte_hash_crc_4byte(data, init_val); +} +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { + return rte_hash_crc_8byte(data, init_val); +} +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, + uint32_t init_val) { + return rte_hash_crc(data, data_len, init_val); +} +void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_2hashes(key, length, pc, pb); +} +void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_32b_2hashes(k, length, pc, pb); +} +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { + return rte_jhash(key, length, initval); +} +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { + return rte_jhash_32b(k, length, initval); +} +uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, + uint32_t initval) { + return rte_jhash_3words(a, b, c, initval); +} +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { + return rte_jhash_2words(a, b, initval); +} +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { + return rte_jhash_1word(a, initval); +} +uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key) { + return rte_fbk_hash_get_bucket(ht, key); +} +int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value, + uint32_t bucket) { + return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); +} +int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, + uint16_t value) { + return rte_fbk_hash_add_key(ht, key, value); +} +int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_delete_key(ht, key); +} +int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_lookup(ht, key); +} +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { + rte_fbk_hash_clear_all(ht); +} +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { + return rte_fbk_hash_get_load_factor(ht); +} +void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_online(v, thread_id); +} +void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_offline(v, thread_id); +} +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_lock(v, thread_id); +} +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_unlock(v, thread_id); +} +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { + return rte_rcu_qsbr_start(v); +} +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_quiescent(v, thread_id); +} +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { + return rte_rcu_qsbr_check(v, t, wait); +} +uint8_t rte_read8_relaxed_w(const void *addr) { + return rte_read8_relaxed(addr); +} +uint16_t rte_read16_relaxed_w(const void *addr) { + return rte_read16_relaxed(addr); +} +uint32_t rte_read32_relaxed_w(const void *addr) { + return rte_read32_relaxed(addr); +} +uint64_t rte_read64_relaxed_w(const void *addr) { + return rte_read64_relaxed(addr); +} +void rte_write8_relaxed_w(uint8_t value, void *addr) { + rte_write8_relaxed(value, addr); +} +void rte_write16_relaxed_w(uint16_t value, void *addr) { + rte_write16_relaxed(value, addr); +} +void rte_write32_relaxed_w(uint32_t value, void *addr) { + rte_write32_relaxed(value, addr); +} +void rte_write64_relaxed_w(uint64_t value, void *addr) { + rte_write64_relaxed(value, addr); +} +uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } +uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } +uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } +uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } +void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } +void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } +void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } +void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } +void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { + rte_write32_wc_relaxed(value, addr); +} +void rte_write32_wc_w(uint32_t value, void *addr) { + rte_write32_wc(value, addr); +} +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_lock(msl, me); +} +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_unlock(msl, me); +} +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + return rte_mcslock_trylock(msl, me); +} +int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { + return rte_mcslock_is_locked(msl); +} +void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } +void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } +void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } +void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } +void rte_pflock_write_unlock_w(rte_pflock_t *pf) { + rte_pflock_write_unlock(pf); +} +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { + return rte_reciprocal_divide(a, R); +} +uint64_t rte_reciprocal_divide_u64_w(uint64_t a, + const struct rte_reciprocal_u64 *R) { + return rte_reciprocal_divide_u64(a, R); +} +void rte_seqcount_init_w(rte_seqcount_t *seqcount) { + rte_seqcount_init(seqcount); +} +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { + return rte_seqcount_read_begin(seqcount); +} +bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, + uint32_t begin_sn) { + return rte_seqcount_read_retry(seqcount, begin_sn); +} +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_begin(seqcount); +} +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_end(seqcount); +} +void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { + return rte_seqlock_read_begin(seqlock); +} +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { + return rte_seqlock_read_retry(seqlock, begin_sn); +} +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_lock(seqlock); +} +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_unlock(seqlock); +} +unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, + unsigned int n) { + return rte_stack_push(s, obj_table, n); +} +unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, + unsigned int n) { + return rte_stack_pop(s, obj_table, n); +} +unsigned int rte_stack_count_w(struct rte_stack *s) { + return rte_stack_count(s); +} +unsigned int rte_stack_free_count_w(struct rte_stack *s) { + return rte_stack_free_count(s); +} +uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss(input_tuple, input_len, rss_key); +} +uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss_be(input_tuple, input_len, rss_key); +} +void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } +void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { + rte_ticketlock_unlock(tl); +} +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { + return rte_ticketlock_trylock(tl); +} +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { + return rte_ticketlock_is_locked(tl); +} +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_init(tlr); +} +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_lock(tlr); +} +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_unlock(tlr); +} +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { + return rte_ticketlock_recursive_trylock(tlr); +} +uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, + uint64_t cycles) { + return rte_cyclecounter_cycles_to_ns(tc, cycles); +} +uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, + uint64_t cycle_now) { + return rte_timecounter_update(tc, cycle_now); +} +uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { + return rte_timespec_to_ns(ts); +} +struct timespec rte_ns_to_timespec_w(uint64_t nsec) { + return rte_ns_to_timespec(nsec); +} +bool rte_trace_feature_is_enabled_w(void) { + return rte_trace_feature_is_enabled(); +} diff --git a/nix/target.nix b/nix/target.nix index 36cbe22ee..14c0f88e0 100644 --- a/nix/target.nix +++ b/nix/target.nix @@ -31,7 +31,15 @@ let NIX_CFLAGS_LINK = [ ]; }; }; - zen4 = lib.recursiveUpdate x86-64-v4 rec { + zen3 = lib.recursiveUpdate x86-64-v4 rec { + march = "znver3"; + override.stdenv.env = rec { + NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; + NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; + NIX_CFLAGS_LINK = [ ]; + }; + }; + zen4 = lib.recursiveUpdate zen3 rec { march = "znver4"; override.stdenv.env = rec { NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; diff --git a/sysfs/build.rs b/sysfs/build.rs index 52f5b0197..9d7c9069b 100644 --- a/sysfs/build.rs +++ b/sysfs/build.rs @@ -4,5 +4,5 @@ fn main() { let sysroot = dpdk_sysroot_helper::get_sysroot(); println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/sysroot b/sysroot deleted file mode 120000 index 22ffe82ea..000000000 --- a/sysroot +++ /dev/null @@ -1 +0,0 @@ -/nix/store/40gcqs5h4gbs9ixw16wbg9161mrxb0k2-sysroot \ No newline at end of file From df4fc7ab7a926436f5b8f4225269939dc2d63938 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 18:23:13 +0000 Subject: [PATCH 26/35] wip --- default.nix | 17 +- nix/overlays/dataplane.nix | 6 +- nix/overlays/default.nix | 15 +- nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c | 1513 +++++++++++++++++++ nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h | 1716 ++++++---------------- nix/platform.nix | 83 -- npins/sources.json | 4 +- 7 files changed, 2009 insertions(+), 1345 deletions(-) delete mode 100644 nix/platform.nix diff --git a/default.nix b/default.nix index 1fde49210..04d3d9edc 100644 --- a/default.nix +++ b/default.nix @@ -56,12 +56,17 @@ let dpdk-wrapper.dev dpdk-wrapper.out ]; - build-tools-list = with pkgs.buildPackages.llvmPackages; [ - bintools - clang - libclang.lib - lld - ]; + build-tools-list = + with pkgs.buildPackages; + [ + llvmPackages.bintools + llvmPackages.clang + llvmPackages.libclang.lib + llvmPackages.lld + ] + ++ [ + npins + ]; in pkgs.lib.fix (final: { inherit diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 3415552c2..44aef8cfb 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -66,7 +66,11 @@ in # At minimum, the provided functions are generally quite small and likely to benefit from inlining, so static linking # is a solid plan. libmd = (dataplane-dep prev.libmd).overrideAttrs (orig: { - outputs = (orig.outputs or [ "out" ]) ++ [ "man" "dev" "static" ]; + outputs = (orig.outputs or [ "out" ]) ++ [ + "man" + "dev" + "static" + ]; # we need to enable shared libs (in addition to static) to make dpdk's build happy. Basically, DPDK's build has no # means of disabling shared libraries, and it doesn't really make any sense to static link this into each .so # file. Ideally we would just _not_ build those .so files, but that would require doing brain surgery on dpdk's diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 488472a3c..ecb927248 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -1,13 +1,18 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Open Network Fabric Authors { - sources, - sanitizers, - target, - profile, + sources, + sanitizers, + target, + profile, }: { dataplane = import ./dataplane.nix { - inherit sources sanitizers target profile; + inherit + sources + sanitizers + target + profile + ; }; } diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c index c9a6f09b7..2309a4ec9 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.c @@ -3,3 +3,1516 @@ #include "dpdk_wrapper.h" +int rte_errno_get() { return rte_errno; } + +// Static wrappers + +int rte_is_aligned_w(const void *const ptr, const unsigned int align) { + return rte_is_aligned(ptr, align); +} +void rte_atomic_thread_fence_w(rte_memory_order memorder) { + rte_atomic_thread_fence(memorder); +} +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { + return rte_atomic16_cmpset(dst, exp, src); +} +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { + return rte_atomic16_exchange(dst, val); +} +void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } +int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { + return rte_atomic16_read(v); +} +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { + rte_atomic16_set(v, new_value); +} +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { + rte_atomic16_add(v, inc); +} +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { + rte_atomic16_sub(v, dec); +} +void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } +void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { + return rte_atomic16_add_return(v, inc); +} +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { + return rte_atomic16_sub_return(v, dec); +} +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_inc_and_test(v); +} +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { + return rte_atomic16_dec_and_test(v); +} +int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { + return rte_atomic16_test_and_set(v); +} +void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { + return rte_atomic32_cmpset(dst, exp, src); +} +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { + return rte_atomic32_exchange(dst, val); +} +void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } +int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { + return rte_atomic32_read(v); +} +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { + rte_atomic32_set(v, new_value); +} +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { + rte_atomic32_add(v, inc); +} +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { + rte_atomic32_sub(v, dec); +} +void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } +void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { + return rte_atomic32_add_return(v, inc); +} +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { + return rte_atomic32_sub_return(v, dec); +} +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_inc_and_test(v); +} +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { + return rte_atomic32_dec_and_test(v); +} +int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { + return rte_atomic32_test_and_set(v); +} +void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { + return rte_atomic64_cmpset(dst, exp, src); +} +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { + return rte_atomic64_exchange(dst, val); +} +void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } +int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { + rte_atomic64_set(v, new_value); +} +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { + rte_atomic64_add(v, inc); +} +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { + rte_atomic64_sub(v, dec); +} +void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } +void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { + return rte_atomic64_add_return(v, inc); +} +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { + return rte_atomic64_sub_return(v, dec); +} +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_inc_and_test(v); +} +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { + return rte_atomic64_dec_and_test(v); +} +int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { + return rte_atomic64_test_and_set(v); +} +void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } +void rte_smp_mb_w(void) { rte_smp_mb(); } +uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } +uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } +uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } +void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } +uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } +uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } +size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { + return rte_strlcpy(dst, src, size); +} +size_t rte_strlcat_w(char *dst, const char *src, size_t size) { + return rte_strlcat(dst, src, size); +} +const char *rte_str_skip_leading_spaces_w(const char *src) { + return rte_str_skip_leading_spaces(src); +} +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { + rte_uuid_copy(dst, src); +} +int rte_gettid_w(void) { return rte_gettid(); } +unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } +void rte_pause_w(void) { rte_pause(); } +void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_16(addr, expected, memorder); +} +void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_32(addr, expected, memorder); +} +void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, + rte_memory_order memorder) { + rte_wait_until_equal_64(addr, expected, memorder); +} +void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } +void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } +void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } +int rte_spinlock_trylock_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock(sl); +} +int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { + return rte_spinlock_is_locked(sl); +} +int rte_tm_supported_w(void) { return rte_tm_supported(); } +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { + rte_spinlock_unlock_tm(sl); +} +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { + return rte_spinlock_trylock_tm(sl); +} +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_init(slr); +} +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock(slr); +} +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock(slr); +} +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock(slr); +} +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_lock_tm(slr); +} +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { + rte_spinlock_recursive_unlock_tm(slr); +} +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { + return rte_spinlock_recursive_trylock_tm(slr); +} +// unsigned int rte_xbegin_w(void) { return rte_xbegin(); } +// void rte_xend_w(void) { rte_xend(); } +// int rte_xtest_w(void) { return rte_xtest(); } +// int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_get32(nr, addr); +} +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_set32(nr, addr); +} +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { + rte_bit_relaxed_clear32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_set32(nr, addr); +} +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { + return rte_bit_relaxed_test_and_clear32(nr, addr); +} +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_get64(nr, addr); +} +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_set64(nr, addr); +} +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { + rte_bit_relaxed_clear64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_set64(nr, addr); +} +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { + return rte_bit_relaxed_test_and_clear64(nr, addr); +} +unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } +unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } +unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } +unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } +unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } +unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } +uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } +uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } +uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { + return rte_bsf32_safe(v, pos); +} +uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { + return rte_bsf64_safe(v, pos); +} +uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } +uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } +int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } +uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } +uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } +uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } +uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } +uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } +uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } +void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_read_trylock(rwl); +} +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock(rwl); +} +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_trylock(rwl); +} +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock(rwl); +} +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { + return rte_rwlock_write_is_locked(rwl); +} +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_lock_tm(rwl); +} +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_read_unlock_tm(rwl); +} +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_lock_tm(rwl); +} +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { + rte_rwlock_write_unlock_tm(rwl); +} +unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); +} +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_prod_htd_max(r); +} +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_prod_htd_max(r, v); +} +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { + return rte_ring_get_cons_htd_max(r); +} +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { + return rte_ring_set_cons_htd_max(r, v); +} +unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); +} +int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_mp_enqueue_elem(r, obj, esize); +} +int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize) { + return rte_ring_sp_enqueue_elem(r, obj, esize); +} +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { + return rte_ring_enqueue_elem(r, obj, esize); +} +unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); +} +int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_mc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_sc_dequeue_elem(r, obj_p, esize); +} +int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize) { + return rte_ring_dequeue_elem(r, obj_p, esize); +} +unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); +} +unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_bulk_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_elem_start(r, n, free_space); +} +unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst_start(r, n, free_space); +} +void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n) { + rte_ring_enqueue_elem_finish(r, obj_table, esize, n); +} +void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, + unsigned int n) { + rte_ring_enqueue_finish(r, obj_table, n); +} +unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_bulk_start(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); +} +unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available) { + return rte_ring_dequeue_burst_start(r, obj_table, n, available); +} +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_elem_finish(r, n); +} +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_finish(r, n); +} +unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); +} +unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space) { + return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); +} +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_elem_finish(r, n); +} +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_enqueue_zc_finish(r, n); +} +unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *available) { + return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); +} +unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available) { + return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); +} +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_elem_finish(r, n); +} +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { + rte_ring_dequeue_zc_finish(r, n); +} +unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, + unsigned int n, unsigned int *free_space) { + return rte_ring_enqueue_bulk(r, obj_table, n, free_space); +} +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_mp_enqueue(r, obj); +} +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_sp_enqueue(r, obj); +} +int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { + return rte_ring_enqueue(r, obj); +} +unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_bulk(r, obj_table, n, available); +} +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_mc_dequeue(r, obj_p); +} +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_sc_dequeue(r, obj_p); +} +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { + return rte_ring_dequeue(r, obj_p); +} +unsigned int rte_ring_count_w(const struct rte_ring *r) { + return rte_ring_count(r); +} +unsigned int rte_ring_free_count_w(const struct rte_ring *r) { + return rte_ring_free_count(r); +} +int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } +int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } +unsigned int rte_ring_get_size_w(const struct rte_ring *r) { + return rte_ring_get_size(r); +} +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { + return rte_ring_get_capacity(r); +} +enum rte_ring_sync_type +rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_prod_sync_type(r); +} +int rte_ring_is_prod_single_w(const struct rte_ring *r) { + return rte_ring_is_prod_single(r); +} +enum rte_ring_sync_type +rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { + return rte_ring_get_cons_sync_type(r); +} +int rte_ring_is_cons_single_w(const struct rte_ring *r) { + return rte_ring_is_cons_single(r); +} +unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space) { + return rte_ring_enqueue_burst(r, obj_table, n, free_space); +} +unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_mc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available) { + return rte_ring_sc_dequeue_burst(r, obj_table, n, available); +} +unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { + return rte_ring_dequeue_burst(r, obj_table, n, available); +} +void *rte_memcpy_w(void *dst, const void *src, size_t n) { + return rte_memcpy(dst, src, n); +} +// void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { +// return rte_mov15_or_less(dst, src, n); +// } +void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } +void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } +void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } +void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } +// void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_generic(dst, src, n); +// } +// void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { +// return rte_memcpy_aligned(dst, src, n); +// } +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { + return rte_mempool_get_header(obj); +} +struct rte_mempool *rte_mempool_from_obj_w(void *obj) { + return rte_mempool_from_obj(obj); +} +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { + return rte_mempool_get_trailer(obj); +} +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { + return rte_mempool_get_ops(ops_index); +} +int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); +} +int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, + unsigned int n) { + return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n) { + return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); +} +struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, + unsigned int lcore_id) { + return rte_mempool_default_cache(mp, lcore_id); +} +void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, + struct rte_mempool *mp) { + rte_mempool_cache_flush(cache, mp); +} +void rte_mempool_do_generic_put_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_do_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + rte_mempool_generic_put(mp, obj_table, n, cache); +} +void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n) { + rte_mempool_put_bulk(mp, obj_table, n); +} +void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { + rte_mempool_put(mp, obj); +} +int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, + struct rte_mempool_cache *cache) { + return rte_mempool_do_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache) { + return rte_mempool_generic_get(mp, obj_table, n, cache); +} +int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n) { + return rte_mempool_get_bulk(mp, obj_table, n); +} +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { + return rte_mempool_get(mp, obj_p); +} +int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, unsigned int n) { + return rte_mempool_get_contig_blocks(mp, first_obj_table, n); +} +int rte_mempool_full_w(const struct rte_mempool *mp) { + return rte_mempool_full(mp); +} +int rte_mempool_empty_w(const struct rte_mempool *mp) { + return rte_mempool_empty(mp); +} +rte_iova_t rte_mempool_virt2iova_w(const void *elt) { + return rte_mempool_virt2iova(elt); +} +void *rte_mempool_get_priv_w(struct rte_mempool *mp) { + return rte_mempool_get_priv(mp); +} +void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } +void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } +void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } +void rte_prefetch_non_temporal_w(const void *p) { + rte_prefetch_non_temporal(p); +} +void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } +void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } +void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } +void rte_cldemote_w(const void *p) { rte_cldemote(p); } +uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } +uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } +uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } +// uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } +// uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } +// uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part1(m); +} +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { + rte_mbuf_prefetch_part2(m); +} +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_size(mp); +} +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { + return rte_mbuf_iova_get(m); +} +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { + rte_mbuf_iova_set(m, iova); +} +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova(mb); +} +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { + return rte_mbuf_data_iova_default(mb); +} +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { + return rte_mbuf_from_indirect(mi); +} +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { + return rte_mbuf_buf_addr(mb, mp); +} +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { + return rte_mbuf_data_addr_default(mb); +} +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } +void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { + return rte_pktmbuf_priv_flags(mp); +} +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { + return rte_mbuf_refcnt_read(m); +} +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { + rte_mbuf_refcnt_set(m, new_value); +} +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { + return rte_mbuf_refcnt_update(m, value); +} +uint16_t +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { + return rte_mbuf_ext_refcnt_read(shinfo); +} +void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value) { + rte_mbuf_ext_refcnt_set(shinfo, new_value); +} +uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value) { + return rte_mbuf_ext_refcnt_update(shinfo, value); +} +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { + return rte_mbuf_raw_alloc(mp); +} +void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { + return rte_pktmbuf_data_room_size(mp); +} +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { + rte_pktmbuf_reset_headroom(m); +} +void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { + return rte_pktmbuf_alloc(mp); +} +int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned int count) { + return rte_pktmbuf_alloc_bulk(pool, mbufs, count); +} +struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, + void *fcb_opaque) { + return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, + fcb_opaque); +} +void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo) { + rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); +} +void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, + const struct rte_mbuf *msrc) { + rte_mbuf_dynfield_copy(mdst, msrc); +} +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { + rte_pktmbuf_attach(mi, m); +} +void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { + return rte_pktmbuf_prefree_seg(m); +} +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } +void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { + rte_pktmbuf_refcnt_update(m, v); +} +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_headroom(m); +} +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { + return rte_pktmbuf_tailroom(m); +} +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { + return rte_pktmbuf_lastseg(m); +} +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_prepend(m, len); +} +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_append(m, len); +} +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_adj(m, len); +} +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { + return rte_pktmbuf_trim(m, len); +} +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { + return rte_pktmbuf_is_contiguous(m); +} +const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf) { + return rte_pktmbuf_read(m, off, len, buf); +} +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { + return rte_pktmbuf_chain(head, tail); +} +uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, + uint64_t tso, uint64_t ol3, uint64_t ol2, + uint64_t unused) { + return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); +} +int rte_validate_tx_offload_w(const struct rte_mbuf *m) { + return rte_validate_tx_offload(m); +} +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { + return rte_pktmbuf_linearize(mbuf); +} +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_queue_get(m); +} +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_traffic_class_get(m); +} +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { + return rte_mbuf_sched_color_get(m); +} +void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, uint8_t *color) { + rte_mbuf_sched_get(m, queue_id, traffic_class, color); +} +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { + rte_mbuf_sched_queue_set(m, queue_id); +} +void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, + uint8_t traffic_class) { + rte_mbuf_sched_traffic_class_set(m, traffic_class); +} +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { + rte_mbuf_sched_color_set(m, color); +} +void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, uint8_t color) { + rte_mbuf_sched_set(m, queue_id, traffic_class, color); +} +int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, + const struct rte_ether_addr *ea2) { + return rte_is_same_ether_addr(ea1, ea2); +} +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_zero_ether_addr(ea); +} +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_unicast_ether_addr(ea); +} +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_multicast_ether_addr(ea); +} +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_broadcast_ether_addr(ea); +} +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_universal_ether_addr(ea); +} +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_local_admin_ether_addr(ea); +} +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { + return rte_is_valid_assigned_ether_addr(ea); +} +void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, + struct rte_ether_addr *ea_to) { + rte_ether_addr_copy(ea_from, ea_to); +} +int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } +int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { + return rte_bitmap_get_memory_footprint(n_bits); +} +struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init(n_bits, mem, mem_size); +} +struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size) { + return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); +} +void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } +void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_prefetch0(bmp, pos); +} +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { + return rte_bitmap_get(bmp, pos); +} +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_set(bmp, pos); +} +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, + uint64_t slab) { + rte_bitmap_set_slab(bmp, pos, slab); +} +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { + rte_bitmap_clear(bmp, pos); +} +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { + return rte_bitmap_scan(bmp, pos, slab); +} +uint16_t rte_raw_cksum_w(const void *buf, size_t len) { + return rte_raw_cksum(buf, len); +} +int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, + uint16_t *cksum) { + return rte_raw_cksum_mbuf(m, off, len, cksum); +} +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_hdr_len(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum(ipv4_hdr); +} +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { + return rte_ipv4_cksum_simple(ipv4_hdr); +} +uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + uint64_t ol_flags) { + return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +} +uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); +} +uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); +} +int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr) { + return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); +} +int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off) { + return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); +} +bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b) { + return rte_ipv6_addr_eq(a, b); +} +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { + rte_ipv6_addr_mask(ip, depth); +} +bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b, uint8_t depth) { + return rte_ipv6_addr_eq_prefix(a, b, depth); +} +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { + return rte_ipv6_mask_depth(mask); +} +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_unspec(ip); +} +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_loopback(ip); +} +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_linklocal(ip); +} +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_sitelocal(ip); +} +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4compat(ip); +} +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_v4mapped(ip); +} +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_addr_is_mcast(ip); +} +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { + return rte_ipv6_mc_scope(ip); +} +void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, + const struct rte_ether_addr *mac) { + rte_ipv6_llocal_from_ethernet(ip, mac); +} +void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, + const struct rte_ipv6_addr *ip) { + rte_ipv6_solnode_from_addr(sol, ip); +} +void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, + const struct rte_ipv6_addr *ip) { + rte_ether_mcast_from_ipv6(mac, ip); +} +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { + return rte_ipv6_check_version(ip); +} +uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + uint64_t ol_flags) { + return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +} +uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); +} +uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); +} +int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr) { + return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); +} +int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off) { + return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); +} +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { + return rte_ipv6_get_next_ext(p, proto, ext_len); +} +enum rte_color +rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, + struct rte_meter_srtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_srtcm_color_aware_check_w( + struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color +rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p, + uint64_t time, uint32_t pkt_len) { + return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_color_aware_check_w( + struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color) { + return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); +} +enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, + uint32_t pkt_len) { + return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); +} +enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, + enum rte_color pkt_color) { + return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, + pkt_color); +} +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { + return rte_eth_rss_hf_refine(rss_hf); +} + +uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { + return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); +} +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_rx_queue_count(port_id, queue_id); +} +int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_rx_descriptor_status(port_id, queue_id, offset); +} +int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset) { + return rte_eth_tx_descriptor_status(port_id, queue_id, offset); +} +uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); +} +uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer) { + return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); +} +uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer, + struct rte_mbuf *tx_pkt) { + return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); +} +uint16_t +rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info) { + return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, + recycle_rxq_info); +} +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { + return rte_eth_tx_queue_count(port_id, queue_id); +} +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { + return rte_flow_dynf_metadata_get(m); +} +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { + rte_flow_dynf_metadata_set(m, v); +} +int rte_flow_dynf_metadata_avail_w(void) { + return rte_flow_dynf_metadata_avail(); +} +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { + return rte_hash_crc_1byte(data, init_val); +} +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { + return rte_hash_crc_2byte(data, init_val); +} +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { + return rte_hash_crc_4byte(data, init_val); +} +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { + return rte_hash_crc_8byte(data, init_val); +} +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, + uint32_t init_val) { + return rte_hash_crc(data, data_len, init_val); +} +void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_2hashes(key, length, pc, pb); +} +void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, + uint32_t *pb) { + rte_jhash_32b_2hashes(k, length, pc, pb); +} +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { + return rte_jhash(key, length, initval); +} +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { + return rte_jhash_32b(k, length, initval); +} +uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, + uint32_t initval) { + return rte_jhash_3words(a, b, c, initval); +} +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { + return rte_jhash_2words(a, b, initval); +} +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { + return rte_jhash_1word(a, initval); +} +uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key) { + return rte_fbk_hash_get_bucket(ht, key); +} +int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value, + uint32_t bucket) { + return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); +} +int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, + uint16_t value) { + return rte_fbk_hash_add_key(ht, key, value); +} +int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_delete_key(ht, key); +} +int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket) { + return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); +} +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { + return rte_fbk_hash_lookup(ht, key); +} +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { + rte_fbk_hash_clear_all(ht); +} +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { + return rte_fbk_hash_get_load_factor(ht); +} +void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_online(v, thread_id); +} +void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, + unsigned int thread_id) { + rte_rcu_qsbr_thread_offline(v, thread_id); +} +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_lock(v, thread_id); +} +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_unlock(v, thread_id); +} +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { + return rte_rcu_qsbr_start(v); +} +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { + rte_rcu_qsbr_quiescent(v, thread_id); +} +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { + return rte_rcu_qsbr_check(v, t, wait); +} +uint8_t rte_read8_relaxed_w(const void *addr) { + return rte_read8_relaxed(addr); +} +uint16_t rte_read16_relaxed_w(const void *addr) { + return rte_read16_relaxed(addr); +} +uint32_t rte_read32_relaxed_w(const void *addr) { + return rte_read32_relaxed(addr); +} +uint64_t rte_read64_relaxed_w(const void *addr) { + return rte_read64_relaxed(addr); +} +void rte_write8_relaxed_w(uint8_t value, void *addr) { + rte_write8_relaxed(value, addr); +} +void rte_write16_relaxed_w(uint16_t value, void *addr) { + rte_write16_relaxed(value, addr); +} +void rte_write32_relaxed_w(uint32_t value, void *addr) { + rte_write32_relaxed(value, addr); +} +void rte_write64_relaxed_w(uint64_t value, void *addr) { + rte_write64_relaxed(value, addr); +} +uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } +uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } +uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } +uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } +void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } +void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } +void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } +void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } +void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { + rte_write32_wc_relaxed(value, addr); +} +void rte_write32_wc_w(uint32_t value, void *addr) { + rte_write32_wc(value, addr); +} +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_lock(msl, me); +} +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + rte_mcslock_unlock(msl, me); +} +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { + return rte_mcslock_trylock(msl, me); +} +int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { + return rte_mcslock_is_locked(msl); +} +void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } +void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } +void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } +void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } +void rte_pflock_write_unlock_w(rte_pflock_t *pf) { + rte_pflock_write_unlock(pf); +} +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { + return rte_reciprocal_divide(a, R); +} +uint64_t rte_reciprocal_divide_u64_w(uint64_t a, + const struct rte_reciprocal_u64 *R) { + return rte_reciprocal_divide_u64(a, R); +} +void rte_seqcount_init_w(rte_seqcount_t *seqcount) { + rte_seqcount_init(seqcount); +} +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { + return rte_seqcount_read_begin(seqcount); +} +bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, + uint32_t begin_sn) { + return rte_seqcount_read_retry(seqcount, begin_sn); +} +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_begin(seqcount); +} +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { + rte_seqcount_write_end(seqcount); +} +void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { + return rte_seqlock_read_begin(seqlock); +} +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { + return rte_seqlock_read_retry(seqlock, begin_sn); +} +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_lock(seqlock); +} +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { + rte_seqlock_write_unlock(seqlock); +} +unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, + unsigned int n) { + return rte_stack_push(s, obj_table, n); +} +unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, + unsigned int n) { + return rte_stack_pop(s, obj_table, n); +} +unsigned int rte_stack_count_w(struct rte_stack *s) { + return rte_stack_count(s); +} +unsigned int rte_stack_free_count_w(struct rte_stack *s) { + return rte_stack_free_count(s); +} +uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss(input_tuple, input_len, rss_key); +} +uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key) { + return rte_softrss_be(input_tuple, input_len, rss_key); +} +void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } +void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { + rte_ticketlock_unlock(tl); +} +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { + return rte_ticketlock_trylock(tl); +} +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { + return rte_ticketlock_is_locked(tl); +} +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_init(tlr); +} +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_lock(tlr); +} +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { + rte_ticketlock_recursive_unlock(tlr); +} +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { + return rte_ticketlock_recursive_trylock(tlr); +} +uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, + uint64_t cycles) { + return rte_cyclecounter_cycles_to_ns(tc, cycles); +} +uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, + uint64_t cycle_now) { + return rte_timecounter_update(tc, cycle_now); +} +uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { + return rte_timespec_to_ns(ts); +} +struct timespec rte_ns_to_timespec_w(uint64_t nsec) { + return rte_ns_to_timespec(nsec); +} +bool rte_trace_feature_is_enabled_w(void) { + return rte_trace_feature_is_enabled(); +} diff --git a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h index 5a57ffc6d..3ebfe21e7 100644 --- a/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h +++ b/nix/pkgs/dpdk-wrapper/src/dpdk_wrapper.h @@ -298,1517 +298,737 @@ enum rte_eth_rx_offload : uint64_t { RX_OFFLOAD_BUFFER_SPLIT = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT, }; - -int rte_errno_get() { return rte_errno; } +int rte_errno_get(); // Static wrappers -int rte_is_aligned_w(const void *const ptr, const unsigned int align) { - return rte_is_aligned(ptr, align); -} -void rte_atomic_thread_fence_w(rte_memory_order memorder) { - rte_atomic_thread_fence(memorder); -} -int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src) { - return rte_atomic16_cmpset(dst, exp, src); -} -uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val) { - return rte_atomic16_exchange(dst, val); -} -void rte_atomic16_init_w(rte_atomic16_t *v) { rte_atomic16_init(v); } -int16_t rte_atomic16_read_w(const rte_atomic16_t *v) { - return rte_atomic16_read(v); -} -void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value) { - rte_atomic16_set(v, new_value); -} -void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc) { - rte_atomic16_add(v, inc); -} -void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec) { - rte_atomic16_sub(v, dec); -} -void rte_atomic16_inc_w(rte_atomic16_t *v) { rte_atomic16_inc(v); } -void rte_atomic16_dec_w(rte_atomic16_t *v) { rte_atomic16_dec(v); } -int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc) { - return rte_atomic16_add_return(v, inc); -} -int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec) { - return rte_atomic16_sub_return(v, dec); -} -int rte_atomic16_inc_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_inc_and_test(v); -} -int rte_atomic16_dec_and_test_w(rte_atomic16_t *v) { - return rte_atomic16_dec_and_test(v); -} -int rte_atomic16_test_and_set_w(rte_atomic16_t *v) { - return rte_atomic16_test_and_set(v); -} -void rte_atomic16_clear_w(rte_atomic16_t *v) { rte_atomic16_clear(v); } -int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src) { - return rte_atomic32_cmpset(dst, exp, src); -} -uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val) { - return rte_atomic32_exchange(dst, val); -} -void rte_atomic32_init_w(rte_atomic32_t *v) { rte_atomic32_init(v); } -int32_t rte_atomic32_read_w(const rte_atomic32_t *v) { - return rte_atomic32_read(v); -} -void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value) { - rte_atomic32_set(v, new_value); -} -void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc) { - rte_atomic32_add(v, inc); -} -void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec) { - rte_atomic32_sub(v, dec); -} -void rte_atomic32_inc_w(rte_atomic32_t *v) { rte_atomic32_inc(v); } -void rte_atomic32_dec_w(rte_atomic32_t *v) { rte_atomic32_dec(v); } -int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc) { - return rte_atomic32_add_return(v, inc); -} -int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec) { - return rte_atomic32_sub_return(v, dec); -} -int rte_atomic32_inc_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_inc_and_test(v); -} -int rte_atomic32_dec_and_test_w(rte_atomic32_t *v) { - return rte_atomic32_dec_and_test(v); -} -int rte_atomic32_test_and_set_w(rte_atomic32_t *v) { - return rte_atomic32_test_and_set(v); -} -void rte_atomic32_clear_w(rte_atomic32_t *v) { rte_atomic32_clear(v); } -int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src) { - return rte_atomic64_cmpset(dst, exp, src); -} -uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val) { - return rte_atomic64_exchange(dst, val); -} -void rte_atomic64_init_w(rte_atomic64_t *v) { rte_atomic64_init(v); } -int64_t rte_atomic64_read_w(rte_atomic64_t *v) { return rte_atomic64_read(v); } -void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value) { - rte_atomic64_set(v, new_value); -} -void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc) { - rte_atomic64_add(v, inc); -} -void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec) { - rte_atomic64_sub(v, dec); -} -void rte_atomic64_inc_w(rte_atomic64_t *v) { rte_atomic64_inc(v); } -void rte_atomic64_dec_w(rte_atomic64_t *v) { rte_atomic64_dec(v); } -int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc) { - return rte_atomic64_add_return(v, inc); -} -int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec) { - return rte_atomic64_sub_return(v, dec); -} -int rte_atomic64_inc_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_inc_and_test(v); -} -int rte_atomic64_dec_and_test_w(rte_atomic64_t *v) { - return rte_atomic64_dec_and_test(v); -} -int rte_atomic64_test_and_set_w(rte_atomic64_t *v) { - return rte_atomic64_test_and_set(v); -} -void rte_atomic64_clear_w(rte_atomic64_t *v) { rte_atomic64_clear(v); } -void rte_smp_mb_w(void) { rte_smp_mb(); } -uint64_t rte_get_tsc_cycles_w(void) { return rte_get_tsc_cycles(); } -uint64_t rte_get_timer_cycles_w(void) { return rte_get_timer_cycles(); } -uint64_t rte_get_timer_hz_w(void) { return rte_get_timer_hz(); } -void rte_delay_ms_w(unsigned int ms) { rte_delay_ms(ms); } -uint64_t rte_rdtsc_w(void) { return rte_rdtsc(); } -uint64_t rte_rdtsc_precise_w(void) { return rte_rdtsc_precise(); } -size_t rte_strlcpy_w(char *dst, const char *src, size_t size) { - return rte_strlcpy(dst, src, size); -} -size_t rte_strlcat_w(char *dst, const char *src, size_t size) { - return rte_strlcat(dst, src, size); -} -const char *rte_str_skip_leading_spaces_w(const char *src) { - return rte_str_skip_leading_spaces(src); -} -void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src) { - rte_uuid_copy(dst, src); -} -int rte_gettid_w(void) { return rte_gettid(); } -unsigned int rte_lcore_id_w(void) { return rte_lcore_id(); } -void rte_pause_w(void) { rte_pause(); } +void rte_atomic_thread_fence_w(rte_memory_order memorder); +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src); +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val); +void rte_atomic16_init_w(rte_atomic16_t *v); +int16_t rte_atomic16_read_w(const rte_atomic16_t *v); +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value); +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc); +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec); +void rte_atomic16_inc_w(rte_atomic16_t *v); +void rte_atomic16_dec_w(rte_atomic16_t *v); +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc); +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec); +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v); +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v); +int rte_atomic16_test_and_set_w(rte_atomic16_t *v); +void rte_atomic16_clear_w(rte_atomic16_t *v); +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src); +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val); +void rte_atomic32_init_w(rte_atomic32_t *v); +int32_t rte_atomic32_read_w(const rte_atomic32_t *v); +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value); +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc); +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec); +void rte_atomic32_inc_w(rte_atomic32_t *v); +void rte_atomic32_dec_w(rte_atomic32_t *v); +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc); +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec); +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v); +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v); +int rte_atomic32_test_and_set_w(rte_atomic32_t *v); +void rte_atomic32_clear_w(rte_atomic32_t *v); +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src); +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val); +void rte_atomic64_init_w(rte_atomic64_t *v); +int64_t rte_atomic64_read_w(rte_atomic64_t *v); +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value); +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc); +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec); +void rte_atomic64_inc_w(rte_atomic64_t *v); +void rte_atomic64_dec_w(rte_atomic64_t *v); +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc); +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec); +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v); +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v); +int rte_atomic64_test_and_set_w(rte_atomic64_t *v); +void rte_atomic64_clear_w(rte_atomic64_t *v); +void rte_smp_mb_w(void); +uint64_t rte_get_tsc_cycles_w(void); +uint64_t rte_get_timer_cycles_w(void); +uint64_t rte_get_timer_hz_w(void); +void rte_delay_ms_w(unsigned int ms); +uint64_t rte_rdtsc_w(void); +uint64_t rte_rdtsc_precise_w(void); +size_t rte_strlcpy_w(char *dst, const char *src, size_t size); +size_t rte_strlcat_w(char *dst, const char *src, size_t size); +const char *rte_str_skip_leading_spaces_w(const char *src); +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src); +int rte_gettid_w(void); +unsigned int rte_lcore_id_w(void); +void rte_pause_w(void); void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_16(addr, expected, memorder); -} + rte_memory_order memorder); void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_32(addr, expected, memorder); -} + rte_memory_order memorder); void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, - rte_memory_order memorder) { - rte_wait_until_equal_64(addr, expected, memorder); -} -void rte_spinlock_init_w(rte_spinlock_t *sl) { rte_spinlock_init(sl); } -void rte_spinlock_lock_w(rte_spinlock_t *sl) { rte_spinlock_lock(sl); } -void rte_spinlock_unlock_w(rte_spinlock_t *sl) { rte_spinlock_unlock(sl); } -int rte_spinlock_trylock_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock(sl); -} -int rte_spinlock_is_locked_w(rte_spinlock_t *sl) { - return rte_spinlock_is_locked(sl); -} -int rte_tm_supported_w(void) { return rte_tm_supported(); } -void rte_spinlock_lock_tm_w(rte_spinlock_t *sl) { rte_spinlock_lock_tm(sl); } -void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl) { - rte_spinlock_unlock_tm(sl); -} -int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl) { - return rte_spinlock_trylock_tm(sl); -} -void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_init(slr); -} -void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock(slr); -} -void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock(slr); -} -int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock(slr); -} -void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_lock_tm(slr); -} -void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr) { - rte_spinlock_recursive_unlock_tm(slr); -} -int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr) { - return rte_spinlock_recursive_trylock_tm(slr); -} -// unsigned int rte_xbegin_w(void) { return rte_xbegin(); } -// void rte_xend_w(void) { rte_xend(); } -// int rte_xtest_w(void) { return rte_xtest(); } -// int rte_try_tm_w(int *lock) { return rte_try_tm(lock); } -uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_get32(nr, addr); -} -void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_set32(nr, addr); -} -void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr) { - rte_bit_relaxed_clear32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_set32(nr, addr); -} -uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr) { - return rte_bit_relaxed_test_and_clear32(nr, addr); -} -uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_get64(nr, addr); -} -void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_set64(nr, addr); -} -void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr) { - rte_bit_relaxed_clear64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_set64(nr, addr); -} -uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr) { - return rte_bit_relaxed_test_and_clear64(nr, addr); -} -unsigned int rte_clz32_w(uint32_t v) { return rte_clz32(v); } -unsigned int rte_clz64_w(uint64_t v) { return rte_clz64(v); } -unsigned int rte_ctz32_w(uint32_t v) { return rte_ctz32(v); } -unsigned int rte_ctz64_w(uint64_t v) { return rte_ctz64(v); } -unsigned int rte_popcount32_w(uint32_t v) { return rte_popcount32(v); } -unsigned int rte_popcount64_w(uint64_t v) { return rte_popcount64(v); } -uint32_t rte_combine32ms1b_w(uint32_t x) { return rte_combine32ms1b(x); } -uint64_t rte_combine64ms1b_w(uint64_t v) { return rte_combine64ms1b(v); } -uint32_t rte_bsf32_w(uint32_t v) { return rte_bsf32(v); } -int rte_bsf32_safe_w(uint32_t v, uint32_t *pos) { - return rte_bsf32_safe(v, pos); -} -uint32_t rte_bsf64_w(uint64_t v) { return rte_bsf64(v); } -int rte_bsf64_safe_w(uint64_t v, uint32_t *pos) { - return rte_bsf64_safe(v, pos); -} -uint32_t rte_fls_u32_w(uint32_t x) { return rte_fls_u32(x); } -uint32_t rte_fls_u64_w(uint64_t x) { return rte_fls_u64(x); } -int rte_is_power_of_2_w(uint32_t n) { return rte_is_power_of_2(n); } -uint32_t rte_align32pow2_w(uint32_t x) { return rte_align32pow2(x); } -uint32_t rte_align32prevpow2_w(uint32_t x) { return rte_align32prevpow2(x); } -uint64_t rte_align64pow2_w(uint64_t v) { return rte_align64pow2(v); } -uint64_t rte_align64prevpow2_w(uint64_t v) { return rte_align64prevpow2(v); } -uint32_t rte_log2_u32_w(uint32_t v) { return rte_log2_u32(v); } -uint32_t rte_log2_u64_w(uint64_t v) { return rte_log2_u64(v); } -void rte_rwlock_init_w(rte_rwlock_t *rwl) { rte_rwlock_init(rwl); } -void rte_rwlock_read_lock_w(rte_rwlock_t *rwl) { rte_rwlock_read_lock(rwl); } -int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_read_trylock(rwl); -} -void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock(rwl); -} -int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_trylock(rwl); -} -void rte_rwlock_write_lock_w(rte_rwlock_t *rwl) { rte_rwlock_write_lock(rwl); } -void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock(rwl); -} -int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl) { - return rte_rwlock_write_is_locked(rwl); -} -void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_lock_tm(rwl); -} -void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_read_unlock_tm(rwl); -} -void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_lock_tm(rwl); -} -void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl) { - rte_rwlock_write_unlock_tm(rwl); -} + rte_memory_order memorder); +void rte_spinlock_init_w(rte_spinlock_t *sl); +void rte_spinlock_lock_w(rte_spinlock_t *sl); +void rte_spinlock_unlock_w(rte_spinlock_t *sl); +int rte_spinlock_trylock_w(rte_spinlock_t *sl); +int rte_spinlock_is_locked_w(rte_spinlock_t *sl); +int rte_tm_supported_w(void); +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl); +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl); +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl); +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr); +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr); +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr); +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr); +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr); +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr); +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr); +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr); +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr); +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr); +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr); +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr); +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr); +unsigned int rte_clz32_w(uint32_t v); +unsigned int rte_clz64_w(uint64_t v); +unsigned int rte_ctz32_w(uint32_t v); +unsigned int rte_ctz64_w(uint64_t v); +unsigned int rte_popcount32_w(uint32_t v); +unsigned int rte_popcount64_w(uint64_t v); +uint32_t rte_combine32ms1b_w(uint32_t x); +uint64_t rte_combine64ms1b_w(uint64_t v); +uint32_t rte_bsf32_w(uint32_t v); +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos); +uint32_t rte_bsf64_w(uint64_t v); +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos); +uint32_t rte_fls_u32_w(uint32_t x); +uint32_t rte_fls_u64_w(uint64_t x); +int rte_is_power_of_2_w(uint32_t n); +uint32_t rte_align32pow2_w(uint32_t x); +uint32_t rte_align32prevpow2_w(uint32_t x); +uint64_t rte_align64pow2_w(uint64_t v); +uint64_t rte_align64prevpow2_w(uint64_t v); +uint32_t rte_log2_u32_w(uint32_t v); +uint32_t rte_log2_u64_w(uint64_t v); +void rte_rwlock_init_w(rte_rwlock_t *rwl); +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl); +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl); +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl); +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl); +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl); +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl); +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl); +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl); unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_bulk(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_bulk(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_hts_enqueue_burst(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_hts_dequeue_burst(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_bulk(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_bulk(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_rts_enqueue_burst(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_rts_dequeue_burst(r, obj_table, n, available); -} -uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_prod_htd_max(r); -} -int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_prod_htd_max(r, v); -} -uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r) { - return rte_ring_get_cons_htd_max(r); -} -int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v) { - return rte_ring_set_cons_htd_max(r, v); -} + unsigned int *available); +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r); +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v); +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r); +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v); unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_mp_enqueue_elem(r, obj, esize); -} + unsigned int esize); int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, - unsigned int esize) { - return rte_ring_sp_enqueue_elem(r, obj, esize); -} -int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize) { - return rte_ring_enqueue_elem(r, obj, esize); -} + unsigned int esize); +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize); unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem(r, obj_table, esize, n, available); -} + unsigned int *available); int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_mc_dequeue_elem(r, obj_p, esize); -} + unsigned int esize); int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_sc_dequeue_elem(r, obj_p, esize); -} + unsigned int esize); int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, - unsigned int esize) { - return rte_ring_dequeue_elem(r, obj_p, esize); -} + unsigned int esize); unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem(r, obj_table, esize, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_elem_start(r, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_bulk_start(r, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_elem_start(r, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst_start(r, n, free_space); -} + unsigned int *free_space); void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, - unsigned int esize, unsigned int n) { - rte_ring_enqueue_elem_finish(r, obj_table, esize, n); -} + unsigned int esize, unsigned int n); void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, - unsigned int n) { - rte_ring_enqueue_finish(r, obj_table, n); -} + unsigned int n); unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_elem_start(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_bulk_start(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_elem_start(r, obj_table, esize, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_dequeue_burst_start(r, obj_table, n, available); -} -void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_elem_finish(r, n); -} -void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_finish(r, n); -} + unsigned int *available); +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n); unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, zcd, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_bulk_start(r, n, zcd, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_zc_burst_elem_start_w( struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_elem_start(r, esize, n, zcd, free_space); -} + struct rte_ring_zc_data *zcd, unsigned int *free_space); unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *free_space) { - return rte_ring_enqueue_zc_burst_start(r, n, zcd, free_space); -} -void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_elem_finish(r, n); -} -void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_enqueue_zc_finish(r, n); -} + unsigned int *free_space); +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n); unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, zcd, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_bulk_start(r, n, zcd, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_zc_burst_elem_start_w( struct rte_ring *r, unsigned int esize, unsigned int n, - struct rte_ring_zc_data *zcd, unsigned int *available) { - return rte_ring_dequeue_zc_burst_elem_start(r, esize, n, zcd, available); -} + struct rte_ring_zc_data *zcd, unsigned int *available); unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, - unsigned int *available) { - return rte_ring_dequeue_zc_burst_start(r, n, zcd, available); -} -void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_elem_finish(r, n); -} -void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n) { - rte_ring_dequeue_zc_finish(r, n); -} + unsigned int *available); +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n); unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_bulk(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_bulk(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, - unsigned int n, unsigned int *free_space) { - return rte_ring_enqueue_bulk(r, obj_table, n, free_space); -} -int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_mp_enqueue(r, obj); -} -int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_sp_enqueue(r, obj); -} -int rte_ring_enqueue_w(struct rte_ring *r, void *obj) { - return rte_ring_enqueue(r, obj); -} + unsigned int n, unsigned int *free_space); +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj); +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj); +int rte_ring_enqueue_w(struct rte_ring *r, void *obj); unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_bulk(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_bulk(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_bulk(r, obj_table, n, available); -} -int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_mc_dequeue(r, obj_p); -} -int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_sc_dequeue(r, obj_p); -} -int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p) { - return rte_ring_dequeue(r, obj_p); -} -unsigned int rte_ring_count_w(const struct rte_ring *r) { - return rte_ring_count(r); -} -unsigned int rte_ring_free_count_w(const struct rte_ring *r) { - return rte_ring_free_count(r); -} -int rte_ring_full_w(const struct rte_ring *r) { return rte_ring_full(r); } -int rte_ring_empty_w(const struct rte_ring *r) { return rte_ring_empty(r); } -unsigned int rte_ring_get_size_w(const struct rte_ring *r) { - return rte_ring_get_size(r); -} -unsigned int rte_ring_get_capacity_w(const struct rte_ring *r) { - return rte_ring_get_capacity(r); -} -enum rte_ring_sync_type -rte_ring_get_prod_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_prod_sync_type(r); -} -int rte_ring_is_prod_single_w(const struct rte_ring *r) { - return rte_ring_is_prod_single(r); -} -enum rte_ring_sync_type -rte_ring_get_cons_sync_type_w(const struct rte_ring *r) { - return rte_ring_get_cons_sync_type(r); -} -int rte_ring_is_cons_single_w(const struct rte_ring *r) { - return rte_ring_is_cons_single(r); -} + unsigned int n, unsigned int *available); +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p); +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p); +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p); +unsigned int rte_ring_count_w(const struct rte_ring *r); +unsigned int rte_ring_free_count_w(const struct rte_ring *r); +int rte_ring_full_w(const struct rte_ring *r); +int rte_ring_empty_w(const struct rte_ring *r); +unsigned int rte_ring_get_size_w(const struct rte_ring *r); +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r); +enum rte_ring_sync_type rte_ring_get_prod_sync_type_w(const struct rte_ring *r); +int rte_ring_is_prod_single_w(const struct rte_ring *r); +enum rte_ring_sync_type rte_ring_get_cons_sync_type_w(const struct rte_ring *r); +int rte_ring_is_cons_single_w(const struct rte_ring *r); unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_mp_enqueue_burst(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_sp_enqueue_burst(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, void *const *obj_table, unsigned int n, - unsigned int *free_space) { - return rte_ring_enqueue_burst(r, obj_table, n, free_space); -} + unsigned int *free_space); unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_mc_dequeue_burst(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, unsigned int n, - unsigned int *available) { - return rte_ring_sc_dequeue_burst(r, obj_table, n, available); -} + unsigned int *available); unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, - unsigned int n, unsigned int *available) { - return rte_ring_dequeue_burst(r, obj_table, n, available); -} -void *rte_memcpy_w(void *dst, const void *src, size_t n) { - return rte_memcpy(dst, src, n); -} -// void *rte_mov15_or_less_w(void *dst, const void *src, size_t n) { -// return rte_mov15_or_less(dst, src, n); -// } -void rte_mov16_w(uint8_t *dst, const uint8_t *src) { rte_mov16(dst, src); } -void rte_mov32_w(uint8_t *dst, const uint8_t *src) { rte_mov32(dst, src); } -void rte_mov64_w(uint8_t *dst, const uint8_t *src) { rte_mov64(dst, src); } -void rte_mov256_w(uint8_t *dst, const uint8_t *src) { rte_mov256(dst, src); } -// void *rte_memcpy_generic_w(void *dst, const void *src, size_t n) { -// return rte_memcpy_generic(dst, src, n); -// } -// void *rte_memcpy_aligned_w(void *dst, const void *src, size_t n) { -// return rte_memcpy_aligned(dst, src, n); -// } -struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj) { - return rte_mempool_get_header(obj); -} -struct rte_mempool *rte_mempool_from_obj_w(void *obj) { - return rte_mempool_from_obj(obj); -} -struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj) { - return rte_mempool_get_trailer(obj); -} -struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index) { - return rte_mempool_get_ops(ops_index); -} + unsigned int n, unsigned int *available); +void *rte_memcpy_w(void *dst, const void *src, size_t n); +void rte_mov16_w(uint8_t *dst, const uint8_t *src); +void rte_mov32_w(uint8_t *dst, const uint8_t *src); +void rte_mov64_w(uint8_t *dst, const uint8_t *src); +void rte_mov256_w(uint8_t *dst, const uint8_t *src); +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj); +struct rte_mempool *rte_mempool_from_obj_w(void *obj); +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj); +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index); int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_bulk(mp, obj_table, n); -} + unsigned int n); int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, void **first_obj_table, - unsigned int n) { - return rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); -} + unsigned int n); int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, - void *const *obj_table, unsigned int n) { - return rte_mempool_ops_enqueue_bulk(mp, obj_table, n); -} + void *const *obj_table, unsigned int n); struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, - unsigned int lcore_id) { - return rte_mempool_default_cache(mp, lcore_id); -} + unsigned int lcore_id); void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, - struct rte_mempool *mp) { - rte_mempool_cache_flush(cache, mp); -} + struct rte_mempool *mp); void rte_mempool_do_generic_put_w(struct rte_mempool *mp, void *const *obj_table, unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_do_generic_put(mp, obj_table, n, cache); -} + struct rte_mempool_cache *cache); void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n, - struct rte_mempool_cache *cache) { - rte_mempool_generic_put(mp, obj_table, n, cache); -} + unsigned int n, struct rte_mempool_cache *cache); void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, - unsigned int n) { - rte_mempool_put_bulk(mp, obj_table, n); -} -void rte_mempool_put_w(struct rte_mempool *mp, void *obj) { - rte_mempool_put(mp, obj); -} + unsigned int n); +void rte_mempool_put_w(struct rte_mempool *mp, void *obj); int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, unsigned int n, - struct rte_mempool_cache *cache) { - return rte_mempool_do_generic_get(mp, obj_table, n, cache); -} + struct rte_mempool_cache *cache); int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, - unsigned int n, struct rte_mempool_cache *cache) { - return rte_mempool_generic_get(mp, obj_table, n, cache); -} + unsigned int n, struct rte_mempool_cache *cache); int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, - unsigned int n) { - return rte_mempool_get_bulk(mp, obj_table, n); -} -int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p) { - return rte_mempool_get(mp, obj_p); -} + unsigned int n); +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p); int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, - void **first_obj_table, unsigned int n) { - return rte_mempool_get_contig_blocks(mp, first_obj_table, n); -} -int rte_mempool_full_w(const struct rte_mempool *mp) { - return rte_mempool_full(mp); -} -int rte_mempool_empty_w(const struct rte_mempool *mp) { - return rte_mempool_empty(mp); -} -rte_iova_t rte_mempool_virt2iova_w(const void *elt) { - return rte_mempool_virt2iova(elt); -} -void *rte_mempool_get_priv_w(struct rte_mempool *mp) { - return rte_mempool_get_priv(mp); -} -void rte_prefetch0_w(const void *p) { rte_prefetch0(p); } -void rte_prefetch1_w(const void *p) { rte_prefetch1(p); } -void rte_prefetch2_w(const void *p) { rte_prefetch2(p); } -void rte_prefetch_non_temporal_w(const void *p) { - rte_prefetch_non_temporal(p); -} -void rte_prefetch0_write_w(const void *p) { rte_prefetch0_write(p); } -void rte_prefetch1_write_w(const void *p) { rte_prefetch1_write(p); } -void rte_prefetch2_write_w(const void *p) { rte_prefetch2_write(p); } -void rte_cldemote_w(const void *p) { rte_cldemote(p); } -uint16_t rte_constant_bswap16_w(uint16_t x) { return rte_constant_bswap16(x); } -uint32_t rte_constant_bswap32_w(uint32_t x) { return rte_constant_bswap32(x); } -uint64_t rte_constant_bswap64_w(uint64_t x) { return rte_constant_bswap64(x); } -// uint16_t rte_arch_bswap16_w(uint16_t _x) { return rte_arch_bswap16(_x); } -// uint32_t rte_arch_bswap32_w(uint32_t _x) { return rte_arch_bswap32(_x); } -// uint64_t rte_arch_bswap64_w(uint64_t _x) { return rte_arch_bswap64(_x); } -void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part1(m); -} -void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m) { - rte_mbuf_prefetch_part2(m); -} -uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_size(mp); -} -rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m) { - return rte_mbuf_iova_get(m); -} -void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova) { - rte_mbuf_iova_set(m, iova); -} -rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova(mb); -} -rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb) { - return rte_mbuf_data_iova_default(mb); -} -struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi) { - return rte_mbuf_from_indirect(mi); -} -char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp) { - return rte_mbuf_buf_addr(mb, mp); -} -char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb) { - return rte_mbuf_data_addr_default(mb); -} -char *rte_mbuf_to_baddr_w(struct rte_mbuf *md) { return rte_mbuf_to_baddr(md); } -void *rte_mbuf_to_priv_w(struct rte_mbuf *m) { return rte_mbuf_to_priv(m); } -uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp) { - return rte_pktmbuf_priv_flags(mp); -} -uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m) { - return rte_mbuf_refcnt_read(m); -} -void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value) { - rte_mbuf_refcnt_set(m, new_value); -} -uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value) { - return rte_mbuf_refcnt_update(m, value); -} + void **first_obj_table, unsigned int n); +int rte_mempool_full_w(const struct rte_mempool *mp); +int rte_mempool_empty_w(const struct rte_mempool *mp); +rte_iova_t rte_mempool_virt2iova_w(const void *elt); +void *rte_mempool_get_priv_w(struct rte_mempool *mp); +void rte_prefetch0_w(const void *p); +void rte_prefetch1_w(const void *p); +void rte_prefetch2_w(const void *p); +void rte_prefetch_non_temporal_w(const void *p); +void rte_prefetch0_write_w(const void *p); +void rte_prefetch1_write_w(const void *p); +void rte_prefetch2_write_w(const void *p); +void rte_cldemote_w(const void *p); +uint16_t rte_constant_bswap16_w(uint16_t x); +uint32_t rte_constant_bswap32_w(uint32_t x); +uint64_t rte_constant_bswap64_w(uint64_t x); +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m); +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m); +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp); +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m); +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova); +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb); +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb); +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi); +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp); +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb); +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md); +void *rte_mbuf_to_priv_w(struct rte_mbuf *m); +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp); +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m); +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value); +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value); uint16_t -rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo) { - return rte_mbuf_ext_refcnt_read(shinfo); -} +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo); void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, - uint16_t new_value) { - rte_mbuf_ext_refcnt_set(shinfo, new_value); -} + uint16_t new_value); uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, - int16_t value) { - return rte_mbuf_ext_refcnt_update(shinfo, value); -} -struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp) { - return rte_mbuf_raw_alloc(mp); -} -void rte_mbuf_raw_free_w(struct rte_mbuf *m) { rte_mbuf_raw_free(m); } -uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp) { - return rte_pktmbuf_data_room_size(mp); -} -void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m) { - rte_pktmbuf_reset_headroom(m); -} -void rte_pktmbuf_reset_w(struct rte_mbuf *m) { rte_pktmbuf_reset(m); } -struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp) { - return rte_pktmbuf_alloc(mp); -} + int16_t value); +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp); +void rte_mbuf_raw_free_w(struct rte_mbuf *m); +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp); +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m); +void rte_pktmbuf_reset_w(struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp); int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, - unsigned int count) { - return rte_pktmbuf_alloc_bulk(pool, mbufs, count); -} + unsigned int count); struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, - void *fcb_opaque) { - return rte_pktmbuf_ext_shinfo_init_helper(buf_addr, buf_len, free_cb, - fcb_opaque); -} + void *fcb_opaque); void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, - struct rte_mbuf_ext_shared_info *shinfo) { - rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo); -} + struct rte_mbuf_ext_shared_info *shinfo); void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, - const struct rte_mbuf *msrc) { - rte_mbuf_dynfield_copy(mdst, msrc); -} -void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m) { - rte_pktmbuf_attach(mi, m); -} -void rte_pktmbuf_detach_w(struct rte_mbuf *m) { rte_pktmbuf_detach(m); } -struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m) { - return rte_pktmbuf_prefree_seg(m); -} -void rte_pktmbuf_free_seg_w(struct rte_mbuf *m) { rte_pktmbuf_free_seg(m); } -void rte_pktmbuf_free_w(struct rte_mbuf *m) { rte_pktmbuf_free(m); } -void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v) { - rte_pktmbuf_refcnt_update(m, v); -} -uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_headroom(m); -} -uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m) { - return rte_pktmbuf_tailroom(m); -} -struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m) { - return rte_pktmbuf_lastseg(m); -} -char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_prepend(m, len); -} -char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_append(m, len); -} -char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_adj(m, len); -} -int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len) { - return rte_pktmbuf_trim(m, len); -} -int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m) { - return rte_pktmbuf_is_contiguous(m); -} + const struct rte_mbuf *msrc); +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m); +void rte_pktmbuf_detach_w(struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m); +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m); +void rte_pktmbuf_free_w(struct rte_mbuf *m); +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v); +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m); +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m); +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len); +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len); +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len); +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len); +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m); const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, - uint32_t len, void *buf) { - return rte_pktmbuf_read(m, off, len, buf); -} -int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail) { - return rte_pktmbuf_chain(head, tail); -} + uint32_t len, void *buf); +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail); uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, uint64_t ol3, uint64_t ol2, - uint64_t unused) { - return rte_mbuf_tx_offload(il2, il3, il4, tso, ol3, ol2, unused); -} -int rte_validate_tx_offload_w(const struct rte_mbuf *m) { - return rte_validate_tx_offload(m); -} -int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf) { - return rte_pktmbuf_linearize(mbuf); -} -uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_queue_get(m); -} -uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_traffic_class_get(m); -} -uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m) { - return rte_mbuf_sched_color_get(m); -} + uint64_t unused); +int rte_validate_tx_offload_w(const struct rte_mbuf *m); +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf); +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m); +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m); +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m); void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, - uint8_t *traffic_class, uint8_t *color) { - rte_mbuf_sched_get(m, queue_id, traffic_class, color); -} -void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id) { - rte_mbuf_sched_queue_set(m, queue_id); -} + uint8_t *traffic_class, uint8_t *color); +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id); void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, - uint8_t traffic_class) { - rte_mbuf_sched_traffic_class_set(m, traffic_class); -} -void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color) { - rte_mbuf_sched_color_set(m, color); -} + uint8_t traffic_class); +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color); void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, - uint8_t traffic_class, uint8_t color) { - rte_mbuf_sched_set(m, queue_id, traffic_class, color); -} + uint8_t traffic_class, uint8_t color); int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, - const struct rte_ether_addr *ea2) { - return rte_is_same_ether_addr(ea1, ea2); -} -int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_zero_ether_addr(ea); -} -int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_unicast_ether_addr(ea); -} -int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_multicast_ether_addr(ea); -} -int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_broadcast_ether_addr(ea); -} -int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_universal_ether_addr(ea); -} -int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_local_admin_ether_addr(ea); -} -int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea) { - return rte_is_valid_assigned_ether_addr(ea); -} + const struct rte_ether_addr *ea2); +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea); void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, - struct rte_ether_addr *ea_to) { - rte_ether_addr_copy(ea_from, ea_to); -} -int rte_vlan_strip_w(struct rte_mbuf *m) { return rte_vlan_strip(m); } -int rte_vlan_insert_w(struct rte_mbuf **m) { return rte_vlan_insert(m); } -uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits) { - return rte_bitmap_get_memory_footprint(n_bits); -} + struct rte_ether_addr *ea_to); +int rte_vlan_strip_w(struct rte_mbuf *m); +int rte_vlan_insert_w(struct rte_mbuf **m); +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits); struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init(n_bits, mem, mem_size); -} + uint32_t mem_size); struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, - uint32_t mem_size) { - return rte_bitmap_init_with_all_set(n_bits, mem, mem_size); -} -void rte_bitmap_free_w(struct rte_bitmap *bmp) { return rte_bitmap_free(bmp); } -void rte_bitmap_reset_w(struct rte_bitmap *bmp) { rte_bitmap_reset(bmp); } -void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_prefetch0(bmp, pos); -} -uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos) { - return rte_bitmap_get(bmp, pos); -} -void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_set(bmp, pos); -} -void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, - uint64_t slab) { - rte_bitmap_set_slab(bmp, pos, slab); -} -void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos) { - rte_bitmap_clear(bmp, pos); -} -int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) { - return rte_bitmap_scan(bmp, pos, slab); -} -uint16_t rte_raw_cksum_w(const void *buf, size_t len) { - return rte_raw_cksum(buf, len); -} + uint32_t mem_size); +void rte_bitmap_free_w(struct rte_bitmap *bmp); +void rte_bitmap_reset_w(struct rte_bitmap *bmp); +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos); +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos); +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos); +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab); +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos); +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab); +uint16_t rte_raw_cksum_w(const void *buf, size_t len); int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, - uint16_t *cksum) { - return rte_raw_cksum_mbuf(m, off, len, cksum); -} -uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_hdr_len(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum(ipv4_hdr); -} -uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr) { - return rte_ipv4_cksum_simple(ipv4_hdr); -} + uint16_t *cksum); +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr); +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr); +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr); uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - uint64_t ol_flags) { - return rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); -} + uint64_t ol_flags); uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum(ipv4_hdr, l4_hdr); -} + const void *l4_hdr); uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf(m, ipv4_hdr, l4_off); -} + uint16_t l4_off); int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, - const void *l4_hdr) { - return rte_ipv4_udptcp_cksum_verify(ipv4_hdr, l4_hdr); -} + const void *l4_hdr); int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, const struct rte_ipv4_hdr *ipv4_hdr, - uint16_t l4_off) { - return rte_ipv4_udptcp_cksum_mbuf_verify(m, ipv4_hdr, l4_off); -} + uint16_t l4_off); bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b) { - return rte_ipv6_addr_eq(a, b); -} -void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth) { - rte_ipv6_addr_mask(ip, depth); -} + const struct rte_ipv6_addr *b); +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth); bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, - const struct rte_ipv6_addr *b, uint8_t depth) { - return rte_ipv6_addr_eq_prefix(a, b, depth); -} -uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask) { - return rte_ipv6_mask_depth(mask); -} -bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_unspec(ip); -} -bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_loopback(ip); -} -bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_linklocal(ip); -} -bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_sitelocal(ip); -} -bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4compat(ip); -} -bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_v4mapped(ip); -} -bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_addr_is_mcast(ip); -} -enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip) { - return rte_ipv6_mc_scope(ip); -} + const struct rte_ipv6_addr *b, uint8_t depth); +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask); +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip); +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip); void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, - const struct rte_ether_addr *mac) { - rte_ipv6_llocal_from_ethernet(ip, mac); -} + const struct rte_ether_addr *mac); void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, - const struct rte_ipv6_addr *ip) { - rte_ipv6_solnode_from_addr(sol, ip); -} + const struct rte_ipv6_addr *ip); void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, - const struct rte_ipv6_addr *ip) { - rte_ether_mcast_from_ipv6(mac, ip); -} -int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip) { - return rte_ipv6_check_version(ip); -} + const struct rte_ipv6_addr *ip); +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip); uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - uint64_t ol_flags) { - return rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); -} + uint64_t ol_flags); uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum(ipv6_hdr, l4_hdr); -} + const void *l4_hdr); uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf(m, ipv6_hdr, l4_off); -} + uint16_t l4_off); int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, - const void *l4_hdr) { - return rte_ipv6_udptcp_cksum_verify(ipv6_hdr, l4_hdr); -} + const void *l4_hdr); int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, const struct rte_ipv6_hdr *ipv6_hdr, - uint16_t l4_off) { - return rte_ipv6_udptcp_cksum_mbuf_verify(m, ipv6_hdr, l4_off); -} -int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len) { - return rte_ipv6_get_next_ext(p, proto, ext_len); -} + uint16_t l4_off); +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len); enum rte_color rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_srtcm_color_blind_check(m, p, time, pkt_len); -} + uint64_t time, uint32_t pkt_len); enum rte_color rte_meter_srtcm_color_aware_check_w( struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_srtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} + uint32_t pkt_len, enum rte_color pkt_color); enum rte_color rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, - uint64_t time, uint32_t pkt_len) { - return rte_meter_trtcm_color_blind_check(m, p, time, pkt_len); -} + uint64_t time, uint32_t pkt_len); enum rte_color rte_meter_trtcm_color_aware_check_w( struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, - uint32_t pkt_len, enum rte_color pkt_color) { - return rte_meter_trtcm_color_aware_check(m, p, time, pkt_len, pkt_color); -} + uint32_t pkt_len, enum rte_color pkt_color); enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( struct rte_meter_trtcm_rfc4115 *m, - struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, - uint32_t pkt_len) { - return rte_meter_trtcm_rfc4115_color_blind_check(m, p, time, pkt_len); -} + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len); enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( struct rte_meter_trtcm_rfc4115 *m, struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, - enum rte_color pkt_color) { - return rte_meter_trtcm_rfc4115_color_aware_check(m, p, time, pkt_len, - pkt_color); -} -uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf) { - return rte_eth_rss_hf_refine(rss_hf); -} - + enum rte_color pkt_color); +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf); uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { - return rte_eth_rx_burst(port_id, queue_id, rx_pkts, nb_pkts); -} -int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_rx_queue_count(port_id, queue_id); -} + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts); +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id); int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_rx_descriptor_status(port_id, queue_id, offset); -} + uint16_t offset); int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, - uint16_t offset) { - return rte_eth_tx_descriptor_status(port_id, queue_id, offset); -} + uint16_t offset); uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_burst(port_id, queue_id, tx_pkts, nb_pkts); -} + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return rte_eth_tx_prepare(port_id, queue_id, tx_pkts, nb_pkts); -} + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, - struct rte_eth_dev_tx_buffer *buffer) { - return rte_eth_tx_buffer_flush(port_id, queue_id, buffer); -} + struct rte_eth_dev_tx_buffer *buffer); uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, - struct rte_mbuf *tx_pkt) { - return rte_eth_tx_buffer(port_id, queue_id, buffer, tx_pkt); -} + struct rte_mbuf *tx_pkt); uint16_t rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, - struct rte_eth_recycle_rxq_info *recycle_rxq_info) { - return rte_eth_recycle_mbufs(rx_port_id, rx_queue_id, tx_port_id, tx_queue_id, - recycle_rxq_info); -} -int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id) { - return rte_eth_tx_queue_count(port_id, queue_id); -} -uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m) { - return rte_flow_dynf_metadata_get(m); -} -void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v) { - rte_flow_dynf_metadata_set(m, v); -} -int rte_flow_dynf_metadata_avail_w(void) { - return rte_flow_dynf_metadata_avail(); -} -uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val) { - return rte_hash_crc_1byte(data, init_val); -} -uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val) { - return rte_hash_crc_2byte(data, init_val); -} -uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val) { - return rte_hash_crc_4byte(data, init_val); -} -uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val) { - return rte_hash_crc_8byte(data, init_val); -} -uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, - uint32_t init_val) { - return rte_hash_crc(data, data_len, init_val); -} + struct rte_eth_recycle_rxq_info *recycle_rxq_info); +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id); +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m); +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v); +int rte_flow_dynf_metadata_avail_w(void); +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val); +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val); +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val); +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val); +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, uint32_t init_val); void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_2hashes(key, length, pc, pb); -} + uint32_t *pb); void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, - uint32_t *pb) { - rte_jhash_32b_2hashes(k, length, pc, pb); -} -uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval) { - return rte_jhash(key, length, initval); -} -uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval) { - return rte_jhash_32b(k, length, initval); -} + uint32_t *pb); +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval); +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval); uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, - uint32_t initval) { - return rte_jhash_3words(a, b, c, initval); -} -uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval) { - return rte_jhash_2words(a, b, initval); -} -uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval) { - return rte_jhash_1word(a, initval); -} + uint32_t initval); +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval); +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval); uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key) { - return rte_fbk_hash_get_bucket(ht, key); -} + uint32_t key); int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, uint32_t key, uint16_t value, - uint32_t bucket) { - return rte_fbk_hash_add_key_with_bucket(ht, key, value, bucket); -} + uint32_t bucket); int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, - uint16_t value) { - return rte_fbk_hash_add_key(ht, key, value); -} + uint16_t value); int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_delete_key_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_delete_key(ht, key); -} + uint32_t key, uint32_t bucket); +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key); int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, - uint32_t key, uint32_t bucket) { - return rte_fbk_hash_lookup_with_bucket(ht, key, bucket); -} -int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key) { - return rte_fbk_hash_lookup(ht, key); -} -void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht) { - rte_fbk_hash_clear_all(ht); -} -double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht) { - return rte_fbk_hash_get_load_factor(ht); -} + uint32_t key, uint32_t bucket); +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key); +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht); +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht); void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_online(v, thread_id); -} + unsigned int thread_id); void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, - unsigned int thread_id) { - rte_rcu_qsbr_thread_offline(v, thread_id); -} -void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_lock(v, thread_id); -} -void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_unlock(v, thread_id); -} -uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v) { - return rte_rcu_qsbr_start(v); -} -void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id) { - rte_rcu_qsbr_quiescent(v, thread_id); -} -int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait) { - return rte_rcu_qsbr_check(v, t, wait); -} -uint8_t rte_read8_relaxed_w(const void *addr) { - return rte_read8_relaxed(addr); -} -uint16_t rte_read16_relaxed_w(const void *addr) { - return rte_read16_relaxed(addr); -} -uint32_t rte_read32_relaxed_w(const void *addr) { - return rte_read32_relaxed(addr); -} -uint64_t rte_read64_relaxed_w(const void *addr) { - return rte_read64_relaxed(addr); -} -void rte_write8_relaxed_w(uint8_t value, void *addr) { - rte_write8_relaxed(value, addr); -} -void rte_write16_relaxed_w(uint16_t value, void *addr) { - rte_write16_relaxed(value, addr); -} -void rte_write32_relaxed_w(uint32_t value, void *addr) { - rte_write32_relaxed(value, addr); -} -void rte_write64_relaxed_w(uint64_t value, void *addr) { - rte_write64_relaxed(value, addr); -} -uint8_t rte_read8_w(const void *addr) { return rte_read8(addr); } -uint16_t rte_read16_w(const void *addr) { return rte_read16(addr); } -uint32_t rte_read32_w(const void *addr) { return rte_read32(addr); } -uint64_t rte_read64_w(const void *addr) { return rte_read64(addr); } -void rte_write8_w(uint8_t value, void *addr) { rte_write8(value, addr); } -void rte_write16_w(uint16_t value, void *addr) { rte_write16(value, addr); } -void rte_write32_w(uint32_t value, void *addr) { rte_write32(value, addr); } -void rte_write64_w(uint64_t value, void *addr) { rte_write64(value, addr); } -void rte_write32_wc_relaxed_w(uint32_t value, void *addr) { - rte_write32_wc_relaxed(value, addr); -} -void rte_write32_wc_w(uint32_t value, void *addr) { - rte_write32_wc(value, addr); -} -void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_lock(msl, me); -} -void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - rte_mcslock_unlock(msl, me); -} -int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me) { - return rte_mcslock_trylock(msl, me); -} -int rte_mcslock_is_locked_w(rte_mcslock_t *msl) { - return rte_mcslock_is_locked(msl); -} -void rte_pflock_init_w(struct rte_pflock *pf) { rte_pflock_init(pf); } -void rte_pflock_read_lock_w(rte_pflock_t *pf) { rte_pflock_read_lock(pf); } -void rte_pflock_read_unlock_w(rte_pflock_t *pf) { rte_pflock_read_unlock(pf); } -void rte_pflock_write_lock_w(rte_pflock_t *pf) { rte_pflock_write_lock(pf); } -void rte_pflock_write_unlock_w(rte_pflock_t *pf) { - rte_pflock_write_unlock(pf); -} -uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R) { - return rte_reciprocal_divide(a, R); -} + unsigned int thread_id); +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v); +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait); +uint8_t rte_read8_relaxed_w(const void *addr); +uint16_t rte_read16_relaxed_w(const void *addr); +uint32_t rte_read32_relaxed_w(const void *addr); +uint64_t rte_read64_relaxed_w(const void *addr); +void rte_write8_relaxed_w(uint8_t value, void *addr); +void rte_write16_relaxed_w(uint16_t value, void *addr); +void rte_write32_relaxed_w(uint32_t value, void *addr); +void rte_write64_relaxed_w(uint64_t value, void *addr); +uint8_t rte_read8_w(const void *addr); +uint16_t rte_read16_w(const void *addr); +uint32_t rte_read32_w(const void *addr); +uint64_t rte_read64_w(const void *addr); +void rte_write8_w(uint8_t value, void *addr); +void rte_write16_w(uint16_t value, void *addr); +void rte_write32_w(uint32_t value, void *addr); +void rte_write64_w(uint64_t value, void *addr); +void rte_write32_wc_relaxed_w(uint32_t value, void *addr); +void rte_write32_wc_w(uint32_t value, void *addr); +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +int rte_mcslock_is_locked_w(rte_mcslock_t *msl); +void rte_pflock_init_w(struct rte_pflock *pf); +void rte_pflock_read_lock_w(rte_pflock_t *pf); +void rte_pflock_read_unlock_w(rte_pflock_t *pf); +void rte_pflock_write_lock_w(rte_pflock_t *pf); +void rte_pflock_write_unlock_w(rte_pflock_t *pf); +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R); uint64_t rte_reciprocal_divide_u64_w(uint64_t a, - const struct rte_reciprocal_u64 *R) { - return rte_reciprocal_divide_u64(a, R); -} -void rte_seqcount_init_w(rte_seqcount_t *seqcount) { - rte_seqcount_init(seqcount); -} -uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount) { - return rte_seqcount_read_begin(seqcount); -} + const struct rte_reciprocal_u64 *R); +void rte_seqcount_init_w(rte_seqcount_t *seqcount); +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount); bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, - uint32_t begin_sn) { - return rte_seqcount_read_retry(seqcount, begin_sn); -} -void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_begin(seqcount); -} -void rte_seqcount_write_end_w(rte_seqcount_t *seqcount) { - rte_seqcount_write_end(seqcount); -} -void rte_seqlock_init_w(rte_seqlock_t *seqlock) { rte_seqlock_init(seqlock); } -uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock) { - return rte_seqlock_read_begin(seqlock); -} -bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn) { - return rte_seqlock_read_retry(seqlock, begin_sn); -} -void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_lock(seqlock); -} -void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock) { - rte_seqlock_write_unlock(seqlock); -} + uint32_t begin_sn); +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount); +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount); +void rte_seqlock_init_w(rte_seqlock_t *seqlock); +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock); +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn); +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock); +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock); unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, - unsigned int n) { - return rte_stack_push(s, obj_table, n); -} + unsigned int n); unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, - unsigned int n) { - return rte_stack_pop(s, obj_table, n); -} -unsigned int rte_stack_count_w(struct rte_stack *s) { - return rte_stack_count(s); -} -unsigned int rte_stack_free_count_w(struct rte_stack *s) { - return rte_stack_free_count(s); -} + unsigned int n); +unsigned int rte_stack_count_w(struct rte_stack *s); +unsigned int rte_stack_free_count_w(struct rte_stack *s); uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss(input_tuple, input_len, rss_key); -} + const uint8_t *rss_key); uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, - const uint8_t *rss_key) { - return rte_softrss_be(input_tuple, input_len, rss_key); -} -void rte_ticketlock_init_w(rte_ticketlock_t *tl) { rte_ticketlock_init(tl); } -void rte_ticketlock_lock_w(rte_ticketlock_t *tl) { rte_ticketlock_lock(tl); } -void rte_ticketlock_unlock_w(rte_ticketlock_t *tl) { - rte_ticketlock_unlock(tl); -} -int rte_ticketlock_trylock_w(rte_ticketlock_t *tl) { - return rte_ticketlock_trylock(tl); -} -int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl) { - return rte_ticketlock_is_locked(tl); -} -void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_init(tlr); -} -void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_lock(tlr); -} -void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr) { - rte_ticketlock_recursive_unlock(tlr); -} -int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr) { - return rte_ticketlock_recursive_trylock(tlr); -} + const uint8_t *rss_key); +void rte_ticketlock_init_w(rte_ticketlock_t *tl); +void rte_ticketlock_lock_w(rte_ticketlock_t *tl); +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl); +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl); +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl); +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr); +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr); +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr); +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr); uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, - uint64_t cycles) { - return rte_cyclecounter_cycles_to_ns(tc, cycles); -} + uint64_t cycles); uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, - uint64_t cycle_now) { - return rte_timecounter_update(tc, cycle_now); -} -uint64_t rte_timespec_to_ns_w(const struct timespec *ts) { - return rte_timespec_to_ns(ts); -} -struct timespec rte_ns_to_timespec_w(uint64_t nsec) { - return rte_ns_to_timespec(nsec); -} -bool rte_trace_feature_is_enabled_w(void) { - return rte_trace_feature_is_enabled(); -} + uint64_t cycle_now); +uint64_t rte_timespec_to_ns_w(const struct timespec *ts); +struct timespec rte_ns_to_timespec_w(uint64_t nsec); +bool rte_trace_feature_is_enabled_w(void); diff --git a/nix/platform.nix b/nix/platform.nix deleted file mode 100644 index 94b1122c0..000000000 --- a/nix/platform.nix +++ /dev/null @@ -1,83 +0,0 @@ -{ - lib ? (import { }).lib, -}: -rec { - x86-64-v3 = rec { - arch = "x86_64"; - march = "x86-64-v3"; - numa = { - max-nodes = 8; - }; - override = { - stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - dpdk = { - buildInputs = { - rdma-core = true; - libbsd = true; - libnl = true; - numactl = true; - }; - }; - }; - }; - x86-64-v4 = lib.recursiveUpdate x86-64-v3 rec { - march = "x86-64-v4"; - override.stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - }; - zen4 = lib.recursiveUpdate x86-64-v4 rec { - march = "zen4"; - override.stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - }; - zen5 = lib.recursiveUpdate zen4 rec { - march = "zen5"; - override.stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-march=${march}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - }; - bluefield2 = rec { - arch = "aarch64"; - march = "armv8.2-a"; - mcpu = "cortex-a72"; - numa = { - max-nodes = 1; - }; - override = { - stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - dpdk = { - buildInputs = { - rdma-core = true; - libbsd = true; - libnl = true; - numactl = false; - }; - }; - }; - }; - bluefield3 = lib.recursiveUpdate bluefield2 rec { - march = "armv8.4-a"; - mcpu = "cortex-a78ae"; - override.stdenv'.env = rec { - NIX_CFLAGS_COMPILE = [ "-mcpu=${mcpu}" ]; - NIX_CXXFLAGS_COMPILE = NIX_CFLAGS_COMPILE; - NIX_CFLAGS_LINK = [ ]; - }; - }; -} diff --git a/npins/sources.json b/npins/sources.json index bc55da807..75329024f 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -16,8 +16,8 @@ "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", - "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre911335.23735a82a828/nixexprs.tar.xz", - "hash": "03cv7yy3ldb3i50in6qkm98y68nlid874l52wayzgx0z7pbpq1rk" + "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre913981.7d853e518814/nixexprs.tar.xz", + "hash": "1cpg513zly625rw05kbz1hvfiqcrwbd71c1bqhp61sh6ng8ifg4c" }, "rdma-core": { "type": "Git", From 5b1363b01c1ca6bb2e19945a936203a49b0e47fe Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 20:15:24 +0000 Subject: [PATCH 27/35] pkgsBuildHost --- nix/overlays/dataplane.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 44aef8cfb..41e09208b 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -15,8 +15,8 @@ let with builtins; (mapAttrs (var: val: (toString (orig.${var} or "")) + " " + (toString val)) add) ); adapt = final.stdenvAdapters; - bintools = final.buildPackages.llvmPackages.bintools; - lld = final.buildPackages.llvmPackages.lld; + bintools = final.pkgsBuildHost.llvmPackages.bintools; + lld = final.pkgsBuildHost.llvmPackages.lld; added-to-env = helpers.addToEnv target.platform.override.stdenv.env profile; stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; From 9824d79e1c2ea21d9c0e46bdb3c06267e56176c7 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 20:15:38 +0000 Subject: [PATCH 28/35] wip --- default.nix | 96 +++++++++++++++++++++++------------------------------ 1 file changed, 42 insertions(+), 54 deletions(-) diff --git a/default.nix b/default.nix index 04d3d9edc..f6fc3835f 100644 --- a/default.nix +++ b/default.nix @@ -37,70 +37,58 @@ let overlays.${overlay} ]; }).pkgsCross.${target.info.nixarch}; - - sysroot-list = with pkgs; [ - stdenv'.cc.libc.dev - stdenv'.cc.libc.out - libmd.dev - libmd.static - libbsd.dev - libbsd.static - libnl.dev - libnl.static - numactl.dev - numactl.static - rdma-core.dev - rdma-core.static - dpdk.dev - dpdk.static - dpdk-wrapper.dev - dpdk-wrapper.out - ]; - build-tools-list = - with pkgs.buildPackages; - [ + sysroot = pkgs.symlinkJoin { + name = "sysroot"; + paths = with pkgs; [ + stdenv'.cc.libc.dev + stdenv'.cc.libc.out + libmd.dev + libmd.static + libbsd.dev + libbsd.static + libnl.dev + libnl.static + numactl.dev + numactl.static + rdma-core.dev + rdma-core.static + dpdk.dev + dpdk.static + dpdk-wrapper.dev + dpdk-wrapper.out + ]; + }; + clangd-config = pkgs.writeTextFile { + name = ".clangd"; + text = '' + CompileFlags: + Add: + - "-I${sysroot}/include" + - "-I${pkgs.dpdk.dev}/include" + - "-Wno-deprecated-declarations" + ''; + executable = false; + destination = "/.clangd"; + }; + dev-tools = pkgs.symlinkJoin { + name = "dataplane-dev-shell"; + paths = with pkgs.buildPackages; [ + clangd-config llvmPackages.bintools llvmPackages.clang llvmPackages.libclang.lib llvmPackages.lld - ] - ++ [ npins ]; + }; in -pkgs.lib.fix (final: { +{ inherit pkgs sources profile target + sysroot + dev-tools ; - sysroot = - with final.pkgs; - symlinkJoin { - name = "sysroot"; - paths = sysroot-list; - }; - clangd = pkgs.writeTextFile { - name = ".clangd"; - text = '' - CompileFlags: - Add: - - "-I${final.sysroot}/include" - - "-I${final.pkgs.dpdk.dev}/include" - - "-Wno-deprecated-declarations" - ''; - executable = false; - destination = "/.clangd"; - }; - build-tools = - with final.pkgs.buildPackages; - symlinkJoin { - name = "build-tools"; - paths = build-tools-list ++ [ final.clangd ]; - }; - dev-shell = final.pkgs.symlinkJoin { - name = "dataplane-dev-shell"; - paths = sysroot-list ++ build-tools-list; - }; -}) +} From f50e3eb53afee4eaa4947b04e6e369d1162c0197 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 20:46:48 +0000 Subject: [PATCH 29/35] pkgsBuildHost --- default.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/default.nix b/default.nix index f6fc3835f..f4c0a6631 100644 --- a/default.nix +++ b/default.nix @@ -72,7 +72,7 @@ let }; dev-tools = pkgs.symlinkJoin { name = "dataplane-dev-shell"; - paths = with pkgs.buildPackages; [ + paths = with pkgs.pkgsBuildHost; [ clangd-config llvmPackages.bintools llvmPackages.clang From 1ff532464953f05c927b8bcb09c25c6c648718f3 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Sun, 21 Dec 2025 23:33:05 +0000 Subject: [PATCH 30/35] wip (moderate rework) --- default.nix | 14 ++--- nix/.rust-toolchain.manifest-lock.json | 10 ++++ nix/overlays/dataplane.nix | 19 +++++-- nix/overlays/default.nix | 4 +- nix/{target.nix => platforms.nix} | 73 ++++++++++++++------------ nix/rust-toolchain.manifest-lock.nix | 18 +++++++ npins/sources.json | 5 ++ rust-toolchain.toml | 27 ++++++++++ 8 files changed, 121 insertions(+), 49 deletions(-) create mode 100644 nix/.rust-toolchain.manifest-lock.json rename nix/{target.nix => platforms.nix} (70%) create mode 100644 nix/rust-toolchain.manifest-lock.nix create mode 100644 rust-toolchain.toml diff --git a/default.nix b/default.nix index f4c0a6631..e9e245bdc 100644 --- a/default.nix +++ b/default.nix @@ -16,27 +16,27 @@ let if str == "" then [ ] else builtins.filter (elm: builtins.isString elm) (builtins.split split str); sanitizers = split-str ",+" sanitize; sources = import ./npins; - target = import ./nix/target.nix { + platform' = import ./nix/platforms.nix { inherit lib platform libc; }; profile = import ./nix/profiles.nix { inherit prof sanitizers instrumentation; - arch = target.platform.arch; + arch = platform'.arch; }; overlays = import ./nix/overlays { inherit sources sanitizers - target profile ; + platform = platform'; }; pkgs = (import sources.nixpkgs { overlays = [ overlays.${overlay} ]; - }).pkgsCross.${target.info.nixarch}; + }).pkgsCross.${platform'.info.nixarch}; sysroot = pkgs.symlinkJoin { name = "sysroot"; paths = with pkgs; [ @@ -84,11 +84,11 @@ let in { inherit + dev-tools pkgs - sources profile - target + sources sysroot - dev-tools ; + platform = platform'; } diff --git a/nix/.rust-toolchain.manifest-lock.json b/nix/.rust-toolchain.manifest-lock.json new file mode 100644 index 000000000..18160d91b --- /dev/null +++ b/nix/.rust-toolchain.manifest-lock.json @@ -0,0 +1,10 @@ +{ + "channel": "1.92.0", + "hash": { + "md5": "5925de5268fdcb9c70be7fa06de5583d", + "sha1": "aace35bc46aa55685f1e19c34d644f3d49b057c6", + "sha256": "b2a49624353173ecdacf59c158c00929300606e1963f6e4609fb483a508402d0", + "sha512": "0a8fc59360a5e6f3236d9a35ce2c8e0ba184e8be9b5a918f9bc7aae3e5cc1f8133eaa01fd044fdc6ba645805a98aa645e343fbb7b15a850ea0e83ff20d42ebaa" + }, + "url": "https://static.rust-lang.org/dist/channel-rust-1.92.0.toml" +} diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 41e09208b..b5644da3e 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -3,7 +3,7 @@ { sources, sanitizers, - target, + platform, profile, }: final: prev: @@ -17,7 +17,7 @@ let adapt = final.stdenvAdapters; bintools = final.pkgsBuildHost.llvmPackages.bintools; lld = final.pkgsBuildHost.llvmPackages.lld; - added-to-env = helpers.addToEnv target.platform.override.stdenv.env profile; + added-to-env = helpers.addToEnv platform.override.stdenv.env profile; stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; separateDebugInfo = true; @@ -28,6 +28,11 @@ let ]; }) final.llvmPackages.stdenv; dataplane-dep = pkg: pkg.override { stdenv = stdenv'; }; + fenix = import sources.fenix { }; + rust-toolchain = fenix.fromToolchainFile { + file = ../../rust-toolchain.toml; + sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; + }; in { inherit stdenv' added-to-env; @@ -219,9 +224,7 @@ in # Also, while this library has a respectable security track record, this is also a super strong candidate for # cfi, safe-stack, and cf-protection. dpdk = dataplane-dep ( - final.callPackage ../pkgs/dpdk ( - target.platform.override.dpdk.buildInputs // { src = sources.dpdk; } - ) + final.callPackage ../pkgs/dpdk (platform.override.dpdk.buildInputs // { src = sources.dpdk; }) ); # DPDK is largely composed of static-inline functions. @@ -230,4 +233,10 @@ in # This wrapping process does not really cause any performance issue due to lto; the compiler is going to "unwrap" # these methods anyway. dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper { }); + + # TODO: doc this + rustPlatform = final.makeRustPlatform { + cargo = rust-toolchain; + rustc = rust-toolchain; + }; } diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index ecb927248..02212edb8 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -3,7 +3,7 @@ { sources, sanitizers, - target, + platform, profile, }: { @@ -11,7 +11,7 @@ inherit sources sanitizers - target + platform profile ; }; diff --git a/nix/target.nix b/nix/platforms.nix similarity index 70% rename from nix/target.nix rename to nix/platforms.nix index 14c0f88e0..3c5b7825c 100644 --- a/nix/target.nix +++ b/nix/platforms.nix @@ -86,42 +86,45 @@ let }; }; in -lib.fix (final: { - platform = platforms.${platform}; - info = - { - x86_64 = { - linux = { - gnu = { - target = "x86_64-unknown-linux-gnu"; - machine = "x86_64"; - nixarch = "gnu64"; - libc = "gnu"; - }; - musl = { - target = "x86_64-unknown-linux-musl"; - machine = "x86_64"; - nixarch = "musl64"; - libc = "musl"; +lib.fix ( + final: + platforms.${platform} + // { + info = + { + x86_64 = { + linux = { + gnu = { + target = "x86_64-unknown-linux-gnu"; + machine = "x86_64"; + nixarch = "gnu64"; + libc = "gnu"; + }; + musl = { + target = "x86_64-unknown-linux-musl"; + machine = "x86_64"; + nixarch = "musl64"; + libc = "musl"; + }; }; }; - }; - aarch64 = { - linux = { - gnu = { - target = "aarch64-unknown-linux-gnu"; - machine = "aarch64"; - nixarch = "aarch64-multiplatform"; - libc = "gnu"; - }; - musl = { - target = "aarch64-unknown-linux-musl"; - machine = "aarch64"; - nixarch = "aarch64-multiplatform-musl"; - libc = "musl"; + aarch64 = { + linux = { + gnu = { + target = "aarch64-unknown-linux-gnu"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform"; + libc = "gnu"; + }; + musl = { + target = "aarch64-unknown-linux-musl"; + machine = "aarch64"; + nixarch = "aarch64-multiplatform-musl"; + libc = "musl"; + }; }; }; - }; - } - .${final.platform.arch}.${kernel}.${libc}; -}) + } + .${final.arch}.${kernel}.${libc}; + } +) diff --git a/nix/rust-toolchain.manifest-lock.nix b/nix/rust-toolchain.manifest-lock.nix new file mode 100644 index 000000000..2c333d943 --- /dev/null +++ b/nix/rust-toolchain.manifest-lock.nix @@ -0,0 +1,18 @@ +let + rust-toolchain = (builtins.fromTOML (builtins.readFile ../rust-toolchain.toml)).toolchain; + channel = rust-toolchain.channel; + url = "https://static.rust-lang.org/dist/channel-rust-${channel}.toml"; + manifest-path = builtins.fetchurl { + inherit url; + name = "manifest.toml"; + }; + hash = { + md5 = builtins.hashFile "md5" manifest-path; + sha1 = builtins.hashFile "sha1" manifest-path; + sha256 = builtins.hashFile "sha256" manifest-path; + sha512 = builtins.hashFile "sha512" manifest-path; + }; +in +{ + inherit channel url hash; +} diff --git a/npins/sources.json b/npins/sources.json index 75329024f..2e98b0c2a 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -13,6 +13,11 @@ "url": "https://github.com/DPDK/dpdk/archive/ed957165eadbe60a47d5ec223578cdd1c13d0bd9.tar.gz", "hash": "09h7wnmq4c9xm1nsyv5mz1yf91c1l6vy9sdcamb09qjjx4wgs0q9" }, + "fenix": { + "type": "Tarball", + "url": "https://github.com/nix-community/fenix/archive/main.tar.gz", + "hash": "1gkd9ppvsxl4jjg1jyw61wm99xhy4hdqx5dxqj06gfxi2zkamvzf" + }, "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000..610ccfcad --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,27 @@ +[toolchain] +# NOTE: you can and should manually update this on new rust releases + +channel = "1.92.0" +components = [ + "rustc", + "cargo", + "rust-std", + "rust-docs", + "rustfmt-preview", + "clippy-preview", + "rust-analyzer-preview", + "rust-src", + + # disabled components + # "rust-mingw", + # "llvm-tools-preview", ## we already have a full llvm in the npins + # "rust-analysis", ## obsolete + # "miri-preview", ## not yet functional for us + # "rustc-codegen-cranelift-preview" ## not relevant to us +] +targets = [ + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", + "aarch64-unknown-linux-musl" +] From af2674f692d14fbc29f4d633815ab2b94e3c6319 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Mon, 22 Dec 2025 03:54:01 +0000 Subject: [PATCH 31/35] wip --- default.nix | 47 +++++++++++++++++-------- nix/overlays/dataplane.nix | 20 +++++++++++ nix/overlays/default.nix | 4 +++ nix/pkgs/dpdk/cross/bluefield3.gnu.ini | 18 ---------- nix/pkgs/dpdk/cross/bluefield3.musl.ini | 18 ---------- nix/pkgs/dpdk/default.nix | 2 ++ npins/sources.json | 32 +++++++++++++++++ 7 files changed, 90 insertions(+), 51 deletions(-) delete mode 100644 nix/pkgs/dpdk/cross/bluefield3.gnu.ini delete mode 100644 nix/pkgs/dpdk/cross/bluefield3.musl.ini diff --git a/default.nix b/default.nix index e9e245bdc..53c1a6b10 100644 --- a/default.nix +++ b/default.nix @@ -1,7 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Open Network Fabric Authors { - overlay ? "dataplane", platform ? "x86-64-v3", libc ? "gnu", prof ? "debug", @@ -31,15 +30,20 @@ let ; platform = platform'; }; - pkgs = + dataplane-dev-pkgs = import sources.nixpkgs { + overlays = [ + overlays.dataplane-dev + ]; + }; + dataplane-pkgs = (import sources.nixpkgs { overlays = [ - overlays.${overlay} + overlays.dataplane ]; }).pkgsCross.${platform'.info.nixarch}; - sysroot = pkgs.symlinkJoin { + sysroot = dataplane-pkgs.symlinkJoin { name = "sysroot"; - paths = with pkgs; [ + paths = with dataplane-pkgs; [ stdenv'.cc.libc.dev stdenv'.cc.libc.out libmd.dev @@ -58,34 +62,47 @@ let dpdk-wrapper.out ]; }; - clangd-config = pkgs.writeTextFile { + clangd-config = dataplane-pkgs.writeTextFile { name = ".clangd"; text = '' CompileFlags: Add: - "-I${sysroot}/include" - - "-I${pkgs.dpdk.dev}/include" + - "-I${dataplane-pkgs.dpdk.dev}/include" - "-Wno-deprecated-declarations" ''; executable = false; destination = "/.clangd"; }; - dev-tools = pkgs.symlinkJoin { + dev-tools = dataplane-pkgs.symlinkJoin { name = "dataplane-dev-shell"; - paths = with pkgs.pkgsBuildHost; [ + paths = [ clangd-config - llvmPackages.bintools - llvmPackages.clang - llvmPackages.libclang.lib - llvmPackages.lld + ] + ++ (with dataplane-pkgs.pkgsBuildHost.llvmPackages; [ + bintools + clang + libclang.lib + lld + ]) + ++ (with dataplane-dev-pkgs; [ + bash + cargo-bolero + cargo-deny + cargo-depgraph + cargo-llvm-cov + cargo-nextest + direnv + just npins - ]; + ]); }; in { inherit + dataplane-dev-pkgs + dataplane-pkgs dev-tools - pkgs profile sources sysroot diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index b5644da3e..424f244a6 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -65,6 +65,8 @@ in udev = null; udevCheckHook = null; + llvmPackages = final.llvmPackages_21; + # libmd is used by libbsd (et al) which is an optional dependency of dpdk. # # We _might_ actually care about perf here, so we lto this package. @@ -167,6 +169,17 @@ in rdma-core = (dataplane-dep prev.rdma-core).overrideAttrs (orig: { version = sources.rdma-core.branch; src = sources.rdma-core.outPath; + + # Patching the shebang lines in the perl scripts causes nixgraph to (incorrectly) think we depend on perl at + # runtime. We absolutely do not (we don't even ship a perl interpreter), so don't patch these shebang lines. + # In fact, we don't use any of the scripts from this package. + dontPatchShebangs = true; + + # The upstream postFixup is broken by dontPatchShebangs = true + # It's whole function was to further mutate the shebang lines in perl scripts, so we don't care. + # Just null it. + postFixup = null; + outputs = (orig.outputs or [ ]) ++ [ "static" ]; @@ -179,6 +192,8 @@ in cmakeFlags = orig.cmakeFlags ++ [ + "-DRDMA_DYNAMIC_PROVIDERS=none" + "-DRDMA_STATIC_PROVIDERS=all" "-DENABLE_STATIC=1" # we don't need pyverbs, and turning it off reduces build time / complexity. "-DNO_PYVERBS=1" @@ -234,8 +249,13 @@ in # these methods anyway. dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper { }); + pciutils = dataplane-dep (prev.pciutils.override { static = true; }); + # This isn't directly required by dataplane, + perftest = dataplane-dep (final.callPackage ../pkgs/perftest { src = sources.perftest; }); + # TODO: doc this rustPlatform = final.makeRustPlatform { + stdenv = stdenv'; cargo = rust-toolchain; rustc = rust-toolchain; }; diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 02212edb8..8ce3c6604 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -15,4 +15,8 @@ profile ; }; + + dataplane-dev = import ./dataplane-dev.nix { + inherit sources; + }; } diff --git a/nix/pkgs/dpdk/cross/bluefield3.gnu.ini b/nix/pkgs/dpdk/cross/bluefield3.gnu.ini deleted file mode 100644 index fec17cf95..000000000 --- a/nix/pkgs/dpdk/cross/bluefield3.gnu.ini +++ /dev/null @@ -1,18 +0,0 @@ -[binaries] -c = 'aarch64-unknown-linux-gnu-cc' -cpp = 'aarch64-unknown-linux-gnu-c++' -ar = 'aarch64-unknown-linux-gnu-ar' -strip = 'aarch64-unknown-linux-gnu-strip' -pkgconfig = 'aarch64-unknown-linux-gnu-pkg-config' -pkg-config = 'aarch64-unknown-linux-gnu-pkg-config' -pcap-config = '' - -[host_machine] -system = 'linux' -cpu_family = 'aarch64' -cpu = 'armv8.6-a' -endian = 'little' - -[properties] -platform = 'bluefield3' -libc = 'gnu' diff --git a/nix/pkgs/dpdk/cross/bluefield3.musl.ini b/nix/pkgs/dpdk/cross/bluefield3.musl.ini deleted file mode 100644 index eb433ad26..000000000 --- a/nix/pkgs/dpdk/cross/bluefield3.musl.ini +++ /dev/null @@ -1,18 +0,0 @@ -[binaries] -c = 'aarch64-unknown-linux-musl-cc' -cpp = 'aarch64-unknown-linux-musl-c++' -ar = 'aarch64-unknown-linux-musl-ar' -strip = 'aarch64-unknown-linux-musl-strip' -pkgconfig = 'aarch64-unknown-linux-musl-pkg-config' -pkg-config = 'aarch64-unknown-linux-musl-pkg-config' -pcap-config = '' - -[host_machine] -system = 'linux' -cpu_family = 'aarch64' -cpu = 'armv8.6-a' -endian = 'little' - -[properties] -platform = 'bluefield3' -libc = 'musl' diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index ab1e0010d..b8f8d300b 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -311,6 +311,8 @@ stdenv.mkDerivation { postInstall = '' # Remove docs. We don't build these anyway rm -rf $out/share/doc + # Remove python files from bin output (we never use them and they confuse dependency reports) + rm $out/bin/*.py mkdir -p $static/lib $share; mv $out/lib/*.a $static/lib mv $out/share $share diff --git a/npins/sources.json b/npins/sources.json index 2e98b0c2a..0fa1f6258 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -18,12 +18,44 @@ "url": "https://github.com/nix-community/fenix/archive/main.tar.gz", "hash": "1gkd9ppvsxl4jjg1jyw61wm99xhy4hdqx5dxqj06gfxi2zkamvzf" }, + "kopium": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "kube-rs", + "repo": "kopium" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "submodules": false, + "version": "0.22.5", + "revision": "f346e2044500c8d71523c35d474bc2f6235a0060", + "url": "https://api.github.com/repos/kube-rs/kopium/tarball/0.22.5", + "hash": "14lr2qgfh50rlpw5wgy6cw1qvkz44bwwx06srlks243hgkw9p2fd" + }, "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre913981.7d853e518814/nixexprs.tar.xz", "hash": "1cpg513zly625rw05kbz1hvfiqcrwbd71c1bqhp61sh6ng8ifg4c" }, + "perftest": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "linux-rdma", + "repo": "perftest" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "submodules": false, + "version": "25.10.0-0.128", + "revision": "8a1d3d7234add23fe006b2ff51d650ff022077a8", + "url": "https://api.github.com/repos/linux-rdma/perftest/tarball/25.10.0-0.128", + "hash": "192m2xlds308y0p2h6f6zciwspgq3k0q93q9lp1j4ijmwrpbcrl0" + }, "rdma-core": { "type": "Git", "repository": { From b77c8f397f5e31bbb01bf2a6dc6e28f393dcac93 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Mon, 22 Dec 2025 19:25:12 +0000 Subject: [PATCH 32/35] hack --- .cargo/config.toml | 8 +- .envrc => .envrc.old | 0 Cargo.lock | 203 ++----- Cargo.toml | 2 +- cli/build.rs | 4 +- dataplane/build.rs | 4 +- default.nix | 158 +++++ dpdk-sys/build.rs | 20 +- dpdk-sys/dpdk_wrapper.h | 1034 ++++++++++++++++++++++++++++++++ dpdk/build.rs | 4 +- hardware/Cargo.toml | 4 +- hardware/build.rs | 4 +- hardware/src/scan.rs | 152 ++--- init/build.rs | 4 +- nix/overlays/dataplane-dev.nix | 27 + nix/overlays/dataplane.nix | 3 + nix/pkgs/kopium/default.nix | 11 + nix/pkgs/perftest/default.nix | 19 + nix/profiles.nix | 2 +- npins/sources.json | 29 + sysfs/Cargo.toml | 2 +- sysfs/build.rs | 4 +- 22 files changed, 1445 insertions(+), 253 deletions(-) rename .envrc => .envrc.old (100%) create mode 100644 dpdk-sys/dpdk_wrapper.h create mode 100644 nix/overlays/dataplane-dev.nix create mode 100644 nix/pkgs/kopium/default.nix create mode 100644 nix/pkgs/perftest/default.nix diff --git a/.cargo/config.toml b/.cargo/config.toml index 32fb6467d..02340991f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,12 +1,12 @@ [env] COMPILE_ENV = { value = "sysroot", relative = true, force = false } -PATH = { value = "compile-env/bin", relative = true, force = true } -LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = true } -PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = true } +#PATH = { value = "compile-env/bin", relative = true, force = true } +#LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = true } +# PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = true } [build] target = "x86_64-unknown-linux-gnu" -rustc = "compile-env/bin/rustc" +# rustc = "compile-env/bin/rustc" rustflags = ["--cfg", "tokio_unstable"] [target.x86_64-unknown-linux-gnu] diff --git a/.envrc b/.envrc.old similarity index 100% rename from .envrc rename to .envrc.old diff --git a/Cargo.lock b/Cargo.lock index 9645aa407..be2386be5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,9 +229,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.1" +version = "1.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b5ce75405893cd713f9ab8e297d8e438f624dde7d706108285f7e17a25a180f" +checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" dependencies = [ "aws-lc-sys", "zeroize", @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "179c3777a8b5e70e90ea426114ffc565b2c1a9f82f6c4a0c5a34aa6ef5e781b6" +checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" dependencies = [ "cc", "cmake", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ "axum-core", "bytes", @@ -335,7 +335,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytecheck" @@ -677,9 +677,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.49" +version = "1.2.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" dependencies = [ "find-msvc-tools", "jobserver", @@ -719,7 +719,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -805,9 +805,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b042e5d8a74ae91bb0961acd039822472ec99f8ab0948cbf6d1369588f8be586" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -1332,9 +1332,7 @@ dependencies = [ "dataplane-id", "dataplane-sysfs", "dataplane-test-utils", - "fixin", "hwlocality", - "n-vm", "num-derive", "num-traits", "pci-ids", @@ -1653,7 +1651,6 @@ version = "0.7.0" dependencies = [ "dataplane-dpdk-sysroot-helper", "dataplane-id", - "n-vm", "nix 0.30.1", "procfs", "strum", @@ -2142,9 +2139,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" +checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8" dependencies = [ "autocfg", "tokio", @@ -2302,21 +2299,22 @@ dependencies = [ "libc 0.2.178", "log", "rustversion", - "windows 0.48.0", + "windows", ] [[package]] name = "generator" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc 0.2.178", "log", "rustversion", - "windows 0.61.3", + "windows-link", + "windows-result", ] [[package]] @@ -2462,7 +2460,7 @@ checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" dependencies = [ "cfg-if", "libc 0.2.178", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -2669,7 +2667,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.2", + "windows-core", ] [[package]] @@ -2887,9 +2885,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" [[package]] name = "jobserver" @@ -3133,14 +3131,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link 0.2.1", + "windows-link", ] [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" dependencies = [ "bitflags 2.10.0", "libc 0.2.178", @@ -3222,7 +3220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", - "generator 0.8.7", + "generator 0.8.8", "scoped-tls", "tracing", "tracing-subscriber", @@ -3782,7 +3780,7 @@ dependencies = [ "libc 0.2.178", "redox_syscall", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -3947,9 +3945,9 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" [[package]] name = "potential_utf" @@ -4321,9 +4319,9 @@ dependencies = [ [[package]] name = "rapidhash" -version = "4.1.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e65c75143ce5d47c55b510297eeb1182f3c739b6043c537670e9fc18612dae" +checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" dependencies = [ "rustversion", ] @@ -4437,9 +4435,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.25" +version = "0.12.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a" +checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" dependencies = [ "base64 0.22.1", "bytes", @@ -4521,9 +4519,9 @@ checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" [[package]] name = "roaring" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08d6a905edb32d74a5d5737a0c9d7e950c312f3c46cb0ca0a2ca09ea11878a0" +checksum = "8ba9ce64a8f45d7fc86358410bb1a82e8c987504c0d4900e9141d69a9f26c885" [[package]] name = "rtnetlink" @@ -4615,9 +4613,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] @@ -4663,9 +4661,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" [[package]] name = "safemem" @@ -4999,7 +4997,7 @@ dependencies = [ "assoc", "bitvec", "cfg-if", - "generator 0.8.7", + "generator 0.8.8", "hex", "owo-colors 3.5.0", "rand 0.8.5", @@ -5164,9 +5162,9 @@ dependencies = [ [[package]] name = "supports-hyperlinks" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" [[package]] name = "supports-unicode" @@ -5661,9 +5659,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -5684,9 +5682,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -6085,41 +6083,6 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "windows" -version = "0.61.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" -dependencies = [ - "windows-collections", - "windows-core 0.61.2", - "windows-future", - "windows-link 0.1.3", - "windows-numerics", -] - -[[package]] -name = "windows-collections" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" -dependencies = [ - "windows-core 0.61.2", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - [[package]] name = "windows-core" version = "0.62.2" @@ -6128,20 +6091,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-future" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", - "windows-threading", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -6166,53 +6118,19 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-numerics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -6221,7 +6139,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6257,7 +6175,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6297,7 +6215,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -6308,15 +6226,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] -[[package]] -name = "windows-threading" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" diff --git a/Cargo.toml b/Cargo.toml index df7f24b85..387171704 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,7 +133,7 @@ metrics-exporter-prometheus = { version = "0.18.1", default-features = false, fe miette = { version = "7.6.0", default-features = false, features = [] } mio = { version = "1.1.1", default-features = false, features = [] } multi_index_map = { version = "0.15.0", default-features = false, features = [] } -n-vm = { git = "https://github.com/githedgehog/testn.git", tag = "v0.0.9", default-features = false, features = [], package = "n-vm" } +n-vm = { git = "https://github.com/githedgehog/testn.git", tag = "v0.0.9", default-features = false, features = [] } netdev = { version = "0.39.0", default-features = false, features = [] } nix = { version = "0.30.1", default-features = false, features = [] } num-derive = { version = "0.4.2", default-features = false, features = [] } diff --git a/cli/build.rs b/cli/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/cli/build.rs +++ b/cli/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/dataplane/build.rs b/dataplane/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/dataplane/build.rs +++ b/dataplane/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/default.nix b/default.nix index 53c1a6b10..e4823a42a 100644 --- a/default.nix +++ b/default.nix @@ -60,6 +60,7 @@ let dpdk.static dpdk-wrapper.dev dpdk-wrapper.out + hwloc ]; }; clangd-config = dataplane-pkgs.writeTextFile { @@ -97,6 +98,150 @@ let npins ]); }; + # crane = import sources.crane { pkgs = dataplane-dev-pkgs; }; + crane = import sources.crane { }; + dataplane-src = crane.cleanCargoSource ./.; + + # Common arguments can be set here to avoid repeating them later + commonArgs = { + src = dataplane-src; + strictDeps = true; + CARGO_PROFILE = "dev"; + + nativeBuildInputs = [ + dataplane-pkgs.pkg-config + # dataplane-pkgs.libclang.lib + ]; + buildInputs = [ + dataplane-pkgs.hwloc + ]; + + # Additional environment variables can be set directly + # MY_CUSTOM_VAR = "some value"; + }; + # Build *just* the cargo dependencies (of the entire workspace), + # so we can reuse all of that work (e.g. via cachix) when running in CI + # It is *highly* recommended to use something like cargo-hakari to avoid + # cache misses when building individual top-level-crates + cargoArtifacts = crane.buildDepsOnly commonArgs; + individualCrateArgs = commonArgs // { + inherit cargoArtifacts; + inherit (crane.crateNameFromCargoToml { src = dataplane-src; }) version; + # NB: we disable tests since we'll run them all via cargo-nextest + doCheck = false; + }; + fileSetForCrate = + crate: + lib.fileset.toSource { + root = ./.; + fileset = lib.fileset.unions [ + ./. + ./Cargo.toml + ./Cargo.lock + # (crane.fileset.commonCargoSources ./crates/my-common) + # (crane.fileset.commonCargoSources ./crates/my-workspace-hack) + (crane.fileset.commonCargoSources crate) + ]; + }; + rekon = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-rekon"; + cargoExtraArgs = "--package dataplane-rekon"; + src = fileSetForCrate ./rekon; + } + ); + net = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-net"; + cargoExtraArgs = "--package dataplane-net"; + src = fileSetForCrate ./net; + } + ); + cli = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-cli"; + cargoExtraArgs = "--package dataplane-cli"; + src = fileSetForCrate ./cli; + } + ); + dataplane-dpdk-sysroot-helper = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-dpdk-sysroot-helper"; + cargoExtraArgs = "--package dataplane-dpdk-sysroot-helper"; + src = fileSetForCrate ./dpdk-sysroot-helper; + } + ); + dpdk-sys = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-dpdk-sys"; + cargoExtraArgs = "--package dataplane-dpdk-sys"; + src = fileSetForCrate ./dpdk-sys; + env = { + LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; + C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; + LIBRARY_PATH = "${sysroot}/lib"; + }; + nativeBuildInputs = [ + dataplane-pkgs.pkg-config + dataplane-pkgs.llvmPackages.libclang.lib + dataplane-pkgs.llvmPackages.clang + dataplane-pkgs.llvmPackages.lld + ]; + buildInputs = [ + sysroot + ]; + } + ); + pdpdk = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane-dpdk"; + cargoExtraArgs = "--package dataplane-dpdk"; + src = fileSetForCrate ./dpdk; + env = { + LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; + C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; + LIBRARY_PATH = "${sysroot}/lib"; + }; + nativeBuildInputs = [ + dataplane-pkgs.pkg-config + dataplane-pkgs.llvmPackages.libclang.lib + dataplane-pkgs.llvmPackages.clang + dataplane-pkgs.llvmPackages.lld + ]; + buildInputs = [ + sysroot + ]; + } + ); + dataplane = crane.buildPackage ( + individualCrateArgs + // { + pname = "dataplane"; + cargoExtraArgs = "--package dataplane"; + src = fileSetForCrate ./dataplane; + env = { + LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; + C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; + LIBRARY_PATH = "${sysroot}/lib"; + }; + nativeBuildInputs = [ + dataplane-pkgs.pkg-config + dataplane-pkgs.llvmPackages.libclang.lib + dataplane-pkgs.llvmPackages.clang + dataplane-pkgs.llvmPackages.lld + ]; + buildInputs = [ + sysroot + ]; + } + ); + in { inherit @@ -106,6 +251,19 @@ in profile sources sysroot + crane + commonArgs + cargoArtifacts + rekon + net + cli + dataplane-dpdk-sysroot-helper + dpdk-sys + pdpdk + dataplane ; platform = platform'; + x = builtins.attrNames crane; + # y = crane.buildPackage + } diff --git a/dpdk-sys/build.rs b/dpdk-sys/build.rs index 1eac21991..8afec9363 100644 --- a/dpdk-sys/build.rs +++ b/dpdk-sys/build.rs @@ -20,11 +20,11 @@ impl ParseCallbacks for Cb { } } -fn bind(path: &Path, sysroot: &str) { +fn bind(path: &Path) { let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let static_fn_path = out_path.join("generated.h"); bindgen::Builder::default() - .header(format!("{sysroot}/include/dpdk_wrapper.h")) + .header(format!("./dpdk_wrapper.h")) .anon_fields_prefix("annon") .use_core() .generate_comments(true) @@ -56,7 +56,7 @@ fn bind(path: &Path, sysroot: &str) { .opaque_type("rte_arp_ipv4") .opaque_type("rte_gtp_psc_generic_hdr") .opaque_type("rte_l2tpv2_combined_msg_hdr") - .clang_arg(format!("-I{sysroot}/include")) + // .clang_arg(format!("-I{sysroot}/include")) .clang_arg("-fretain-comments-from-system-headers") .clang_arg("-fparse-all-comments") .rust_edition(bindgen::RustEdition::Edition2024) @@ -69,14 +69,13 @@ fn bind(path: &Path, sysroot: &str) { fn main() { let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); - let sysroot = dpdk_sysroot_helper::get_sysroot(); + let library_path = env::var("LIBRARY_PATH").unwrap().to_string(); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); + println!("cargo:rustc-link-search=all={library_path}"); - // NOTE: DPDK absolutely requires whole-archive in the linking command. - // While I find this very questionable, it is what it is. - // It is just more work for the LTO later on I suppose ¯\_(ツ)_/¯ let depends = [ "dpdk_wrapper", "rte_net_virtio", @@ -127,6 +126,9 @@ fn main() { "numa", ]; + // NOTE: DPDK absolutely requires whole-archive in the linking command. + // While I find this very questionable, it is what it is. + // It is just more work for the LTO later on I suppose ¯\_(ツ)_/¯ for dep in &depends { println!("cargo:rustc-link-lib=static:+whole-archive,+bundle={dep}"); } @@ -134,5 +136,5 @@ fn main() { for file in &rerun_if_changed { println!("cargo:rerun-if-changed={file}"); } - bind(&out_path, sysroot.as_str()); + bind(&out_path); } diff --git a/dpdk-sys/dpdk_wrapper.h b/dpdk-sys/dpdk_wrapper.h new file mode 100644 index 000000000..3ebfe21e7 --- /dev/null +++ b/dpdk-sys/dpdk_wrapper.h @@ -0,0 +1,1034 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Open Network Fabric Authors + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Things which are either duplicated, totally inapplicable or not needed +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // this is an internal header +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +/** + * Thin wrapper to expose `rte_errno`. + * + * @return + * The last rte_errno value (thread local value). + */ +int rte_errno_get(); + +/** + * TX offloads to be set in [`rte_eth_tx_mode.offloads`]. + * + * This is a bitfield. Union these to enable multiple offloads. + * + * I wrapped these because the enum must be explicitly typed as 64 bit, but + * DPDK is not yet using the C23 standard (which would allow the inheritance + * notation with `uint64_t` seen here.). + */ +enum rte_eth_tx_offload : uint64_t { + TX_OFFLOAD_VLAN_INSERT = RTE_ETH_TX_OFFLOAD_VLAN_INSERT, + TX_OFFLOAD_IPV4_CKSUM = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, + TX_OFFLOAD_UDP_CKSUM = RTE_ETH_TX_OFFLOAD_UDP_CKSUM, + TX_OFFLOAD_TCP_CKSUM = RTE_ETH_TX_OFFLOAD_TCP_CKSUM, + TX_OFFLOAD_SCTP_CKSUM = RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, + TX_OFFLOAD_TCP_TSO = RTE_ETH_TX_OFFLOAD_TCP_TSO, + TX_OFFLOAD_UDP_TSO = RTE_ETH_TX_OFFLOAD_UDP_TSO, + TX_OFFLOAD_OUTER_IPV4_CKSUM = RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, + TX_OFFLOAD_QINQ_INSERT = RTE_ETH_TX_OFFLOAD_QINQ_INSERT, + TX_OFFLOAD_VXLAN_TNL_TSO = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, + TX_OFFLOAD_GRE_TNL_TSO = RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, + TX_OFFLOAD_IPIP_TNL_TSO = RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, + TX_OFFLOAD_GENEVE_TNL_TSO = RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, + TX_OFFLOAD_MACSEC_INSERT = RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, + TX_OFFLOAD_MT_LOCKFREE = RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, + TX_OFFLOAD_MULTI_SEGS = RTE_ETH_TX_OFFLOAD_MULTI_SEGS, + TX_OFFLOAD_MBUF_FAST_FREE = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, + TX_OFFLOAD_SECURITY = RTE_ETH_TX_OFFLOAD_SECURITY, + TX_OFFLOAD_UDP_TNL_TSO = RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, + TX_OFFLOAD_IP_TNL_TSO = RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, + TX_OFFLOAD_OUTER_UDP_CKSUM = RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, + TX_OFFLOAD_SEND_ON_TIMESTAMP = RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP +}; + +/** + * RX offloads to be set in [`rte_eth_rx_mode.offloads`]. + * + * This is a bitfield. Union these to enable multiple offloads. + * + * I wrapped these because the enum must be explicitly typed as 64 bit, but + * DPDK is not yet using the C23 standard (which would allow the inheritance + * notation with `uint64_t` seen here.). + */ +enum rte_eth_rx_offload : uint64_t { + RX_OFFLOAD_VLAN_STRIP = RTE_ETH_RX_OFFLOAD_VLAN_STRIP, + RX_OFFLOAD_IPV4_CKSUM = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, + RX_OFFLOAD_UDP_CKSUM = RTE_ETH_RX_OFFLOAD_UDP_CKSUM, + RX_OFFLOAD_TCP_CKSUM = RTE_ETH_RX_OFFLOAD_TCP_CKSUM, + RX_OFFLOAD_TCP_LRO = RTE_ETH_RX_OFFLOAD_TCP_LRO, + RX_OFFLOAD_QINQ_STRIP = RTE_ETH_RX_OFFLOAD_QINQ_STRIP, + RX_OFFLOAD_OUTER_IPV4_CKSUM = RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, + RX_OFFLOAD_MACSEC_STRIP = RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, + RX_OFFLOAD_VLAN_FILTER = RTE_ETH_RX_OFFLOAD_VLAN_FILTER, + RX_OFFLOAD_VLAN_EXTEND = RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, + RX_OFFLOAD_SCATTER = RTE_ETH_RX_OFFLOAD_SCATTER, + RX_OFFLOAD_TIMESTAMP = RTE_ETH_RX_OFFLOAD_TIMESTAMP, + RX_OFFLOAD_SECURITY = RTE_ETH_RX_OFFLOAD_SECURITY, + RX_OFFLOAD_KEEP_CRC = RTE_ETH_RX_OFFLOAD_KEEP_CRC, + RX_OFFLOAD_SCTP_CKSUM = RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, + RX_OFFLOAD_OUTER_UDP_CKSUM = RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, + RX_OFFLOAD_RSS_HASH = RTE_ETH_RX_OFFLOAD_RSS_HASH, + RX_OFFLOAD_BUFFER_SPLIT = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT, +}; + +int rte_errno_get(); + +// Static wrappers + +void rte_atomic_thread_fence_w(rte_memory_order memorder); +int rte_atomic16_cmpset_w(uint16_t *dst, uint16_t exp, uint16_t src); +uint16_t rte_atomic16_exchange_w(uint16_t *dst, uint16_t val); +void rte_atomic16_init_w(rte_atomic16_t *v); +int16_t rte_atomic16_read_w(const rte_atomic16_t *v); +void rte_atomic16_set_w(rte_atomic16_t *v, int16_t new_value); +void rte_atomic16_add_w(rte_atomic16_t *v, int16_t inc); +void rte_atomic16_sub_w(rte_atomic16_t *v, int16_t dec); +void rte_atomic16_inc_w(rte_atomic16_t *v); +void rte_atomic16_dec_w(rte_atomic16_t *v); +int16_t rte_atomic16_add_return_w(rte_atomic16_t *v, int16_t inc); +int16_t rte_atomic16_sub_return_w(rte_atomic16_t *v, int16_t dec); +int rte_atomic16_inc_and_test_w(rte_atomic16_t *v); +int rte_atomic16_dec_and_test_w(rte_atomic16_t *v); +int rte_atomic16_test_and_set_w(rte_atomic16_t *v); +void rte_atomic16_clear_w(rte_atomic16_t *v); +int rte_atomic32_cmpset_w(uint32_t *dst, uint32_t exp, uint32_t src); +uint32_t rte_atomic32_exchange_w(uint32_t *dst, uint32_t val); +void rte_atomic32_init_w(rte_atomic32_t *v); +int32_t rte_atomic32_read_w(const rte_atomic32_t *v); +void rte_atomic32_set_w(rte_atomic32_t *v, int32_t new_value); +void rte_atomic32_add_w(rte_atomic32_t *v, int32_t inc); +void rte_atomic32_sub_w(rte_atomic32_t *v, int32_t dec); +void rte_atomic32_inc_w(rte_atomic32_t *v); +void rte_atomic32_dec_w(rte_atomic32_t *v); +int32_t rte_atomic32_add_return_w(rte_atomic32_t *v, int32_t inc); +int32_t rte_atomic32_sub_return_w(rte_atomic32_t *v, int32_t dec); +int rte_atomic32_inc_and_test_w(rte_atomic32_t *v); +int rte_atomic32_dec_and_test_w(rte_atomic32_t *v); +int rte_atomic32_test_and_set_w(rte_atomic32_t *v); +void rte_atomic32_clear_w(rte_atomic32_t *v); +int rte_atomic64_cmpset_w(uint64_t *dst, uint64_t exp, uint64_t src); +uint64_t rte_atomic64_exchange_w(uint64_t *dst, uint64_t val); +void rte_atomic64_init_w(rte_atomic64_t *v); +int64_t rte_atomic64_read_w(rte_atomic64_t *v); +void rte_atomic64_set_w(rte_atomic64_t *v, int64_t new_value); +void rte_atomic64_add_w(rte_atomic64_t *v, int64_t inc); +void rte_atomic64_sub_w(rte_atomic64_t *v, int64_t dec); +void rte_atomic64_inc_w(rte_atomic64_t *v); +void rte_atomic64_dec_w(rte_atomic64_t *v); +int64_t rte_atomic64_add_return_w(rte_atomic64_t *v, int64_t inc); +int64_t rte_atomic64_sub_return_w(rte_atomic64_t *v, int64_t dec); +int rte_atomic64_inc_and_test_w(rte_atomic64_t *v); +int rte_atomic64_dec_and_test_w(rte_atomic64_t *v); +int rte_atomic64_test_and_set_w(rte_atomic64_t *v); +void rte_atomic64_clear_w(rte_atomic64_t *v); +void rte_smp_mb_w(void); +uint64_t rte_get_tsc_cycles_w(void); +uint64_t rte_get_timer_cycles_w(void); +uint64_t rte_get_timer_hz_w(void); +void rte_delay_ms_w(unsigned int ms); +uint64_t rte_rdtsc_w(void); +uint64_t rte_rdtsc_precise_w(void); +size_t rte_strlcpy_w(char *dst, const char *src, size_t size); +size_t rte_strlcat_w(char *dst, const char *src, size_t size); +const char *rte_str_skip_leading_spaces_w(const char *src); +void rte_uuid_copy_w(rte_uuid_t dst, const rte_uuid_t src); +int rte_gettid_w(void); +unsigned int rte_lcore_id_w(void); +void rte_pause_w(void); +void rte_wait_until_equal_16_w(uint16_t *addr, uint16_t expected, + rte_memory_order memorder); +void rte_wait_until_equal_32_w(uint32_t *addr, uint32_t expected, + rte_memory_order memorder); +void rte_wait_until_equal_64_w(uint64_t *addr, uint64_t expected, + rte_memory_order memorder); +void rte_spinlock_init_w(rte_spinlock_t *sl); +void rte_spinlock_lock_w(rte_spinlock_t *sl); +void rte_spinlock_unlock_w(rte_spinlock_t *sl); +int rte_spinlock_trylock_w(rte_spinlock_t *sl); +int rte_spinlock_is_locked_w(rte_spinlock_t *sl); +int rte_tm_supported_w(void); +void rte_spinlock_lock_tm_w(rte_spinlock_t *sl); +void rte_spinlock_unlock_tm_w(rte_spinlock_t *sl); +int rte_spinlock_trylock_tm_w(rte_spinlock_t *sl); +void rte_spinlock_recursive_init_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_lock_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_unlock_w(rte_spinlock_recursive_t *slr); +int rte_spinlock_recursive_trylock_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_lock_tm_w(rte_spinlock_recursive_t *slr); +void rte_spinlock_recursive_unlock_tm_w(rte_spinlock_recursive_t *slr); +int rte_spinlock_recursive_trylock_tm_w(rte_spinlock_recursive_t *slr); +uint32_t rte_bit_relaxed_get32_w(unsigned int nr, uint32_t *addr); +void rte_bit_relaxed_set32_w(unsigned int nr, uint32_t *addr); +void rte_bit_relaxed_clear32_w(unsigned int nr, uint32_t *addr); +uint32_t rte_bit_relaxed_test_and_set32_w(unsigned int nr, uint32_t *addr); +uint32_t rte_bit_relaxed_test_and_clear32_w(unsigned int nr, uint32_t *addr); +uint64_t rte_bit_relaxed_get64_w(unsigned int nr, uint64_t *addr); +void rte_bit_relaxed_set64_w(unsigned int nr, uint64_t *addr); +void rte_bit_relaxed_clear64_w(unsigned int nr, uint64_t *addr); +uint64_t rte_bit_relaxed_test_and_set64_w(unsigned int nr, uint64_t *addr); +uint64_t rte_bit_relaxed_test_and_clear64_w(unsigned int nr, uint64_t *addr); +unsigned int rte_clz32_w(uint32_t v); +unsigned int rte_clz64_w(uint64_t v); +unsigned int rte_ctz32_w(uint32_t v); +unsigned int rte_ctz64_w(uint64_t v); +unsigned int rte_popcount32_w(uint32_t v); +unsigned int rte_popcount64_w(uint64_t v); +uint32_t rte_combine32ms1b_w(uint32_t x); +uint64_t rte_combine64ms1b_w(uint64_t v); +uint32_t rte_bsf32_w(uint32_t v); +int rte_bsf32_safe_w(uint32_t v, uint32_t *pos); +uint32_t rte_bsf64_w(uint64_t v); +int rte_bsf64_safe_w(uint64_t v, uint32_t *pos); +uint32_t rte_fls_u32_w(uint32_t x); +uint32_t rte_fls_u64_w(uint64_t x); +int rte_is_power_of_2_w(uint32_t n); +uint32_t rte_align32pow2_w(uint32_t x); +uint32_t rte_align32prevpow2_w(uint32_t x); +uint64_t rte_align64pow2_w(uint64_t v); +uint64_t rte_align64prevpow2_w(uint64_t v); +uint32_t rte_log2_u32_w(uint32_t v); +uint32_t rte_log2_u64_w(uint64_t v); +void rte_rwlock_init_w(rte_rwlock_t *rwl); +void rte_rwlock_read_lock_w(rte_rwlock_t *rwl); +int rte_rwlock_read_trylock_w(rte_rwlock_t *rwl); +void rte_rwlock_read_unlock_w(rte_rwlock_t *rwl); +int rte_rwlock_write_trylock_w(rte_rwlock_t *rwl); +void rte_rwlock_write_lock_w(rte_rwlock_t *rwl); +void rte_rwlock_write_unlock_w(rte_rwlock_t *rwl); +int rte_rwlock_write_is_locked_w(rte_rwlock_t *rwl); +void rte_rwlock_read_lock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_read_unlock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_write_lock_tm_w(rte_rwlock_t *rwl); +void rte_rwlock_write_unlock_tm_w(rte_rwlock_t *rwl); +unsigned int rte_ring_mp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_sp_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mp_hts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_hts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_hts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_hts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_hts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_hts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_hts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_hts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_rts_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_rts_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_rts_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_rts_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_rts_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_rts_dequeue_bulk_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); +unsigned int rte_ring_mp_rts_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_rts_dequeue_burst_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); +uint32_t rte_ring_get_prod_htd_max_w(const struct rte_ring *r); +int rte_ring_set_prod_htd_max_w(struct rte_ring *r, uint32_t v); +uint32_t rte_ring_get_cons_htd_max_w(const struct rte_ring *r); +int rte_ring_set_cons_htd_max_w(struct rte_ring *r, uint32_t v); +unsigned int rte_ring_enqueue_bulk_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space); +int rte_ring_mp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize); +int rte_ring_sp_enqueue_elem_w(struct rte_ring *r, void *obj, + unsigned int esize); +int rte_ring_enqueue_elem_w(struct rte_ring *r, void *obj, unsigned int esize); +unsigned int rte_ring_mc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available); +unsigned int rte_ring_sc_dequeue_bulk_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_bulk_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available); +int rte_ring_mc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize); +int rte_ring_sc_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize); +int rte_ring_dequeue_elem_w(struct rte_ring *r, void *obj_p, + unsigned int esize); +unsigned int rte_ring_mp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_sp_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_burst_elem_w(struct rte_ring *r, + const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_sc_dequeue_burst_elem_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_burst_elem_w(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available); +unsigned int rte_ring_enqueue_bulk_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_bulk_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_burst_elem_start_w(struct rte_ring *r, + unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_burst_start_w(struct rte_ring *r, unsigned int n, + unsigned int *free_space); +void rte_ring_enqueue_elem_finish_w(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n); +void rte_ring_enqueue_finish_w(struct rte_ring *r, void *const *obj_table, + unsigned int n); +unsigned int rte_ring_dequeue_bulk_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_bulk_start_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_burst_elem_start_w(struct rte_ring *r, + void *obj_table, + unsigned int esize, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_burst_start_w(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); +void rte_ring_dequeue_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_dequeue_finish_w(struct rte_ring *r, unsigned int n); +unsigned int rte_ring_enqueue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space); +unsigned int rte_ring_enqueue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space); +unsigned int rte_ring_enqueue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *free_space); +unsigned int rte_ring_enqueue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *free_space); +void rte_ring_enqueue_zc_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_enqueue_zc_finish_w(struct rte_ring *r, unsigned int n); +unsigned int rte_ring_dequeue_zc_bulk_elem_start_w(struct rte_ring *r, + unsigned int esize, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available); +unsigned int rte_ring_dequeue_zc_bulk_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available); +unsigned int rte_ring_dequeue_zc_burst_elem_start_w( + struct rte_ring *r, unsigned int esize, unsigned int n, + struct rte_ring_zc_data *zcd, unsigned int *available); +unsigned int rte_ring_dequeue_zc_burst_start_w(struct rte_ring *r, + unsigned int n, + struct rte_ring_zc_data *zcd, + unsigned int *available); +void rte_ring_dequeue_zc_elem_finish_w(struct rte_ring *r, unsigned int n); +void rte_ring_dequeue_zc_finish_w(struct rte_ring *r, unsigned int n); +unsigned int rte_ring_mp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_sp_enqueue_bulk_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_bulk_w(struct rte_ring *r, void *const *obj_table, + unsigned int n, unsigned int *free_space); +int rte_ring_mp_enqueue_w(struct rte_ring *r, void *obj); +int rte_ring_sp_enqueue_w(struct rte_ring *r, void *obj); +int rte_ring_enqueue_w(struct rte_ring *r, void *obj); +unsigned int rte_ring_mc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_sc_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_bulk_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available); +int rte_ring_mc_dequeue_w(struct rte_ring *r, void **obj_p); +int rte_ring_sc_dequeue_w(struct rte_ring *r, void **obj_p); +int rte_ring_dequeue_w(struct rte_ring *r, void **obj_p); +unsigned int rte_ring_count_w(const struct rte_ring *r); +unsigned int rte_ring_free_count_w(const struct rte_ring *r); +int rte_ring_full_w(const struct rte_ring *r); +int rte_ring_empty_w(const struct rte_ring *r); +unsigned int rte_ring_get_size_w(const struct rte_ring *r); +unsigned int rte_ring_get_capacity_w(const struct rte_ring *r); +enum rte_ring_sync_type rte_ring_get_prod_sync_type_w(const struct rte_ring *r); +int rte_ring_is_prod_single_w(const struct rte_ring *r); +enum rte_ring_sync_type rte_ring_get_cons_sync_type_w(const struct rte_ring *r); +int rte_ring_is_cons_single_w(const struct rte_ring *r); +unsigned int rte_ring_mp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_sp_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_enqueue_burst_w(struct rte_ring *r, + void *const *obj_table, unsigned int n, + unsigned int *free_space); +unsigned int rte_ring_mc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_sc_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, + unsigned int *available); +unsigned int rte_ring_dequeue_burst_w(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available); +void *rte_memcpy_w(void *dst, const void *src, size_t n); +void rte_mov16_w(uint8_t *dst, const uint8_t *src); +void rte_mov32_w(uint8_t *dst, const uint8_t *src); +void rte_mov64_w(uint8_t *dst, const uint8_t *src); +void rte_mov256_w(uint8_t *dst, const uint8_t *src); +struct rte_mempool_objhdr *rte_mempool_get_header_w(void *obj); +struct rte_mempool *rte_mempool_from_obj_w(void *obj); +struct rte_mempool_objtlr *rte_mempool_get_trailer_w(void *obj); +struct rte_mempool_ops *rte_mempool_get_ops_w(int ops_index); +int rte_mempool_ops_dequeue_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n); +int rte_mempool_ops_dequeue_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, + unsigned int n); +int rte_mempool_ops_enqueue_bulk_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n); +struct rte_mempool_cache *rte_mempool_default_cache_w(struct rte_mempool *mp, + unsigned int lcore_id); +void rte_mempool_cache_flush_w(struct rte_mempool_cache *cache, + struct rte_mempool *mp); +void rte_mempool_do_generic_put_w(struct rte_mempool *mp, + void *const *obj_table, unsigned int n, + struct rte_mempool_cache *cache); +void rte_mempool_generic_put_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n, struct rte_mempool_cache *cache); +void rte_mempool_put_bulk_w(struct rte_mempool *mp, void *const *obj_table, + unsigned int n); +void rte_mempool_put_w(struct rte_mempool *mp, void *obj); +int rte_mempool_do_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, + struct rte_mempool_cache *cache); +int rte_mempool_generic_get_w(struct rte_mempool *mp, void **obj_table, + unsigned int n, struct rte_mempool_cache *cache); +int rte_mempool_get_bulk_w(struct rte_mempool *mp, void **obj_table, + unsigned int n); +int rte_mempool_get_w(struct rte_mempool *mp, void **obj_p); +int rte_mempool_get_contig_blocks_w(struct rte_mempool *mp, + void **first_obj_table, unsigned int n); +int rte_mempool_full_w(const struct rte_mempool *mp); +int rte_mempool_empty_w(const struct rte_mempool *mp); +rte_iova_t rte_mempool_virt2iova_w(const void *elt); +void *rte_mempool_get_priv_w(struct rte_mempool *mp); +void rte_prefetch0_w(const void *p); +void rte_prefetch1_w(const void *p); +void rte_prefetch2_w(const void *p); +void rte_prefetch_non_temporal_w(const void *p); +void rte_prefetch0_write_w(const void *p); +void rte_prefetch1_write_w(const void *p); +void rte_prefetch2_write_w(const void *p); +void rte_cldemote_w(const void *p); +uint16_t rte_constant_bswap16_w(uint16_t x); +uint32_t rte_constant_bswap32_w(uint32_t x); +uint64_t rte_constant_bswap64_w(uint64_t x); +void rte_mbuf_prefetch_part1_w(struct rte_mbuf *m); +void rte_mbuf_prefetch_part2_w(struct rte_mbuf *m); +uint16_t rte_pktmbuf_priv_size_w(struct rte_mempool *mp); +rte_iova_t rte_mbuf_iova_get_w(const struct rte_mbuf *m); +void rte_mbuf_iova_set_w(struct rte_mbuf *m, rte_iova_t iova); +rte_iova_t rte_mbuf_data_iova_w(const struct rte_mbuf *mb); +rte_iova_t rte_mbuf_data_iova_default_w(const struct rte_mbuf *mb); +struct rte_mbuf *rte_mbuf_from_indirect_w(struct rte_mbuf *mi); +char *rte_mbuf_buf_addr_w(struct rte_mbuf *mb, struct rte_mempool *mp); +char *rte_mbuf_data_addr_default_w(struct rte_mbuf *mb); +char *rte_mbuf_to_baddr_w(struct rte_mbuf *md); +void *rte_mbuf_to_priv_w(struct rte_mbuf *m); +uint32_t rte_pktmbuf_priv_flags_w(struct rte_mempool *mp); +uint16_t rte_mbuf_refcnt_read_w(const struct rte_mbuf *m); +void rte_mbuf_refcnt_set_w(struct rte_mbuf *m, uint16_t new_value); +uint16_t rte_mbuf_refcnt_update_w(struct rte_mbuf *m, int16_t value); +uint16_t +rte_mbuf_ext_refcnt_read_w(const struct rte_mbuf_ext_shared_info *shinfo); +void rte_mbuf_ext_refcnt_set_w(struct rte_mbuf_ext_shared_info *shinfo, + uint16_t new_value); +uint16_t rte_mbuf_ext_refcnt_update_w(struct rte_mbuf_ext_shared_info *shinfo, + int16_t value); +struct rte_mbuf *rte_mbuf_raw_alloc_w(struct rte_mempool *mp); +void rte_mbuf_raw_free_w(struct rte_mbuf *m); +uint16_t rte_pktmbuf_data_room_size_w(struct rte_mempool *mp); +void rte_pktmbuf_reset_headroom_w(struct rte_mbuf *m); +void rte_pktmbuf_reset_w(struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_alloc_w(struct rte_mempool *mp); +int rte_pktmbuf_alloc_bulk_w(struct rte_mempool *pool, struct rte_mbuf **mbufs, + unsigned int count); +struct rte_mbuf_ext_shared_info * +rte_pktmbuf_ext_shinfo_init_helper_w(void *buf_addr, uint16_t *buf_len, + rte_mbuf_extbuf_free_callback_t free_cb, + void *fcb_opaque); +void rte_pktmbuf_attach_extbuf_w(struct rte_mbuf *m, void *buf_addr, + rte_iova_t buf_iova, uint16_t buf_len, + struct rte_mbuf_ext_shared_info *shinfo); +void rte_mbuf_dynfield_copy_w(struct rte_mbuf *mdst, + const struct rte_mbuf *msrc); +void rte_pktmbuf_attach_w(struct rte_mbuf *mi, struct rte_mbuf *m); +void rte_pktmbuf_detach_w(struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_prefree_seg_w(struct rte_mbuf *m); +void rte_pktmbuf_free_seg_w(struct rte_mbuf *m); +void rte_pktmbuf_free_w(struct rte_mbuf *m); +void rte_pktmbuf_refcnt_update_w(struct rte_mbuf *m, int16_t v); +uint16_t rte_pktmbuf_headroom_w(const struct rte_mbuf *m); +uint16_t rte_pktmbuf_tailroom_w(const struct rte_mbuf *m); +struct rte_mbuf *rte_pktmbuf_lastseg_w(struct rte_mbuf *m); +char *rte_pktmbuf_prepend_w(struct rte_mbuf *m, uint16_t len); +char *rte_pktmbuf_append_w(struct rte_mbuf *m, uint16_t len); +char *rte_pktmbuf_adj_w(struct rte_mbuf *m, uint16_t len); +int rte_pktmbuf_trim_w(struct rte_mbuf *m, uint16_t len); +int rte_pktmbuf_is_contiguous_w(const struct rte_mbuf *m); +const void *rte_pktmbuf_read_w(const struct rte_mbuf *m, uint32_t off, + uint32_t len, void *buf); +int rte_pktmbuf_chain_w(struct rte_mbuf *head, struct rte_mbuf *tail); +uint64_t rte_mbuf_tx_offload_w(uint64_t il2, uint64_t il3, uint64_t il4, + uint64_t tso, uint64_t ol3, uint64_t ol2, + uint64_t unused); +int rte_validate_tx_offload_w(const struct rte_mbuf *m); +int rte_pktmbuf_linearize_w(struct rte_mbuf *mbuf); +uint32_t rte_mbuf_sched_queue_get_w(const struct rte_mbuf *m); +uint8_t rte_mbuf_sched_traffic_class_get_w(const struct rte_mbuf *m); +uint8_t rte_mbuf_sched_color_get_w(const struct rte_mbuf *m); +void rte_mbuf_sched_get_w(const struct rte_mbuf *m, uint32_t *queue_id, + uint8_t *traffic_class, uint8_t *color); +void rte_mbuf_sched_queue_set_w(struct rte_mbuf *m, uint32_t queue_id); +void rte_mbuf_sched_traffic_class_set_w(struct rte_mbuf *m, + uint8_t traffic_class); +void rte_mbuf_sched_color_set_w(struct rte_mbuf *m, uint8_t color); +void rte_mbuf_sched_set_w(struct rte_mbuf *m, uint32_t queue_id, + uint8_t traffic_class, uint8_t color); +int rte_is_same_ether_addr_w(const struct rte_ether_addr *ea1, + const struct rte_ether_addr *ea2); +int rte_is_zero_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_unicast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_multicast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_broadcast_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_universal_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_local_admin_ether_addr_w(const struct rte_ether_addr *ea); +int rte_is_valid_assigned_ether_addr_w(const struct rte_ether_addr *ea); +void rte_ether_addr_copy_w(const struct rte_ether_addr *ea_from, + struct rte_ether_addr *ea_to); +int rte_vlan_strip_w(struct rte_mbuf *m); +int rte_vlan_insert_w(struct rte_mbuf **m); +uint32_t rte_bitmap_get_memory_footprint_w(uint32_t n_bits); +struct rte_bitmap *rte_bitmap_init_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size); +struct rte_bitmap *rte_bitmap_init_with_all_set_w(uint32_t n_bits, uint8_t *mem, + uint32_t mem_size); +void rte_bitmap_free_w(struct rte_bitmap *bmp); +void rte_bitmap_reset_w(struct rte_bitmap *bmp); +void rte_bitmap_prefetch0_w(struct rte_bitmap *bmp, uint32_t pos); +uint64_t rte_bitmap_get_w(struct rte_bitmap *bmp, uint32_t pos); +void rte_bitmap_set_w(struct rte_bitmap *bmp, uint32_t pos); +void rte_bitmap_set_slab_w(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab); +void rte_bitmap_clear_w(struct rte_bitmap *bmp, uint32_t pos); +int rte_bitmap_scan_w(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab); +uint16_t rte_raw_cksum_w(const void *buf, size_t len); +int rte_raw_cksum_mbuf_w(const struct rte_mbuf *m, uint32_t off, uint32_t len, + uint16_t *cksum); +uint8_t rte_ipv4_hdr_len_w(const struct rte_ipv4_hdr *ipv4_hdr); +uint16_t rte_ipv4_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr); +uint16_t rte_ipv4_cksum_simple_w(const struct rte_ipv4_hdr *ipv4_hdr); +uint16_t rte_ipv4_phdr_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + uint64_t ol_flags); +uint16_t rte_ipv4_udptcp_cksum_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr); +uint16_t rte_ipv4_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off); +int rte_ipv4_udptcp_cksum_verify_w(const struct rte_ipv4_hdr *ipv4_hdr, + const void *l4_hdr); +int rte_ipv4_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv4_hdr *ipv4_hdr, + uint16_t l4_off); +bool rte_ipv6_addr_eq_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b); +void rte_ipv6_addr_mask_w(struct rte_ipv6_addr *ip, uint8_t depth); +bool rte_ipv6_addr_eq_prefix_w(const struct rte_ipv6_addr *a, + const struct rte_ipv6_addr *b, uint8_t depth); +uint8_t rte_ipv6_mask_depth_w(const struct rte_ipv6_addr *mask); +bool rte_ipv6_addr_is_unspec_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_loopback_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_linklocal_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_sitelocal_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_v4compat_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_v4mapped_w(const struct rte_ipv6_addr *ip); +bool rte_ipv6_addr_is_mcast_w(const struct rte_ipv6_addr *ip); +enum rte_ipv6_mc_scope rte_ipv6_mc_scope_w(const struct rte_ipv6_addr *ip); +void rte_ipv6_llocal_from_ethernet_w(struct rte_ipv6_addr *ip, + const struct rte_ether_addr *mac); +void rte_ipv6_solnode_from_addr_w(struct rte_ipv6_addr *sol, + const struct rte_ipv6_addr *ip); +void rte_ether_mcast_from_ipv6_w(struct rte_ether_addr *mac, + const struct rte_ipv6_addr *ip); +int rte_ipv6_check_version_w(const struct rte_ipv6_hdr *ip); +uint16_t rte_ipv6_phdr_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + uint64_t ol_flags); +uint16_t rte_ipv6_udptcp_cksum_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr); +uint16_t rte_ipv6_udptcp_cksum_mbuf_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off); +int rte_ipv6_udptcp_cksum_verify_w(const struct rte_ipv6_hdr *ipv6_hdr, + const void *l4_hdr); +int rte_ipv6_udptcp_cksum_mbuf_verify_w(const struct rte_mbuf *m, + const struct rte_ipv6_hdr *ipv6_hdr, + uint16_t l4_off); +int rte_ipv6_get_next_ext_w(const uint8_t *p, int proto, size_t *ext_len); +enum rte_color +rte_meter_srtcm_color_blind_check_w(struct rte_meter_srtcm *m, + struct rte_meter_srtcm_profile *p, + uint64_t time, uint32_t pkt_len); +enum rte_color rte_meter_srtcm_color_aware_check_w( + struct rte_meter_srtcm *m, struct rte_meter_srtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color); +enum rte_color +rte_meter_trtcm_color_blind_check_w(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p, + uint64_t time, uint32_t pkt_len); +enum rte_color rte_meter_trtcm_color_aware_check_w( + struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, + uint32_t pkt_len, enum rte_color pkt_color); +enum rte_color rte_meter_trtcm_rfc4115_color_blind_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len); +enum rte_color rte_meter_trtcm_rfc4115_color_aware_check_w( + struct rte_meter_trtcm_rfc4115 *m, + struct rte_meter_trtcm_rfc4115_profile *p, uint64_t time, uint32_t pkt_len, + enum rte_color pkt_color); +uint64_t rte_eth_rss_hf_refine_w(uint64_t rss_hf); +uint16_t rte_eth_rx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, const uint16_t nb_pkts); +int rte_eth_rx_queue_count_w(uint16_t port_id, uint16_t queue_id); +int rte_eth_rx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset); +int rte_eth_tx_descriptor_status_w(uint16_t port_id, uint16_t queue_id, + uint16_t offset); +uint16_t rte_eth_tx_burst_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t rte_eth_tx_prepare_w(uint16_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t rte_eth_tx_buffer_flush_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer); +uint16_t rte_eth_tx_buffer_w(uint16_t port_id, uint16_t queue_id, + struct rte_eth_dev_tx_buffer *buffer, + struct rte_mbuf *tx_pkt); +uint16_t +rte_eth_recycle_mbufs_w(uint16_t rx_port_id, uint16_t rx_queue_id, + uint16_t tx_port_id, uint16_t tx_queue_id, + struct rte_eth_recycle_rxq_info *recycle_rxq_info); +int rte_eth_tx_queue_count_w(uint16_t port_id, uint16_t queue_id); +uint32_t rte_flow_dynf_metadata_get_w(struct rte_mbuf *m); +void rte_flow_dynf_metadata_set_w(struct rte_mbuf *m, uint32_t v); +int rte_flow_dynf_metadata_avail_w(void); +uint32_t rte_hash_crc_1byte_w(uint8_t data, uint32_t init_val); +uint32_t rte_hash_crc_2byte_w(uint16_t data, uint32_t init_val); +uint32_t rte_hash_crc_4byte_w(uint32_t data, uint32_t init_val); +uint32_t rte_hash_crc_8byte_w(uint64_t data, uint32_t init_val); +uint32_t rte_hash_crc_w(const void *data, uint32_t data_len, uint32_t init_val); +void rte_jhash_2hashes_w(const void *key, uint32_t length, uint32_t *pc, + uint32_t *pb); +void rte_jhash_32b_2hashes_w(const uint32_t *k, uint32_t length, uint32_t *pc, + uint32_t *pb); +uint32_t rte_jhash_w(const void *key, uint32_t length, uint32_t initval); +uint32_t rte_jhash_32b_w(const uint32_t *k, uint32_t length, uint32_t initval); +uint32_t rte_jhash_3words_w(uint32_t a, uint32_t b, uint32_t c, + uint32_t initval); +uint32_t rte_jhash_2words_w(uint32_t a, uint32_t b, uint32_t initval); +uint32_t rte_jhash_1word_w(uint32_t a, uint32_t initval); +uint32_t rte_fbk_hash_get_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key); +int rte_fbk_hash_add_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value, + uint32_t bucket); +int rte_fbk_hash_add_key_w(struct rte_fbk_hash_table *ht, uint32_t key, + uint16_t value); +int rte_fbk_hash_delete_key_with_bucket_w(struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket); +int rte_fbk_hash_delete_key_w(struct rte_fbk_hash_table *ht, uint32_t key); +int rte_fbk_hash_lookup_with_bucket_w(const struct rte_fbk_hash_table *ht, + uint32_t key, uint32_t bucket); +int rte_fbk_hash_lookup_w(const struct rte_fbk_hash_table *ht, uint32_t key); +void rte_fbk_hash_clear_all_w(struct rte_fbk_hash_table *ht); +double rte_fbk_hash_get_load_factor_w(struct rte_fbk_hash_table *ht); +void rte_rcu_qsbr_thread_online_w(struct rte_rcu_qsbr *v, + unsigned int thread_id); +void rte_rcu_qsbr_thread_offline_w(struct rte_rcu_qsbr *v, + unsigned int thread_id); +void rte_rcu_qsbr_lock_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +void rte_rcu_qsbr_unlock_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +uint64_t rte_rcu_qsbr_start_w(struct rte_rcu_qsbr *v); +void rte_rcu_qsbr_quiescent_w(struct rte_rcu_qsbr *v, unsigned int thread_id); +int rte_rcu_qsbr_check_w(struct rte_rcu_qsbr *v, uint64_t t, bool wait); +uint8_t rte_read8_relaxed_w(const void *addr); +uint16_t rte_read16_relaxed_w(const void *addr); +uint32_t rte_read32_relaxed_w(const void *addr); +uint64_t rte_read64_relaxed_w(const void *addr); +void rte_write8_relaxed_w(uint8_t value, void *addr); +void rte_write16_relaxed_w(uint16_t value, void *addr); +void rte_write32_relaxed_w(uint32_t value, void *addr); +void rte_write64_relaxed_w(uint64_t value, void *addr); +uint8_t rte_read8_w(const void *addr); +uint16_t rte_read16_w(const void *addr); +uint32_t rte_read32_w(const void *addr); +uint64_t rte_read64_w(const void *addr); +void rte_write8_w(uint8_t value, void *addr); +void rte_write16_w(uint16_t value, void *addr); +void rte_write32_w(uint32_t value, void *addr); +void rte_write64_w(uint64_t value, void *addr); +void rte_write32_wc_relaxed_w(uint32_t value, void *addr); +void rte_write32_wc_w(uint32_t value, void *addr); +void rte_mcslock_lock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +void rte_mcslock_unlock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +int rte_mcslock_trylock_w(rte_mcslock_t **msl, rte_mcslock_t *me); +int rte_mcslock_is_locked_w(rte_mcslock_t *msl); +void rte_pflock_init_w(struct rte_pflock *pf); +void rte_pflock_read_lock_w(rte_pflock_t *pf); +void rte_pflock_read_unlock_w(rte_pflock_t *pf); +void rte_pflock_write_lock_w(rte_pflock_t *pf); +void rte_pflock_write_unlock_w(rte_pflock_t *pf); +uint32_t rte_reciprocal_divide_w(uint32_t a, struct rte_reciprocal R); +uint64_t rte_reciprocal_divide_u64_w(uint64_t a, + const struct rte_reciprocal_u64 *R); +void rte_seqcount_init_w(rte_seqcount_t *seqcount); +uint32_t rte_seqcount_read_begin_w(const rte_seqcount_t *seqcount); +bool rte_seqcount_read_retry_w(const rte_seqcount_t *seqcount, + uint32_t begin_sn); +void rte_seqcount_write_begin_w(rte_seqcount_t *seqcount); +void rte_seqcount_write_end_w(rte_seqcount_t *seqcount); +void rte_seqlock_init_w(rte_seqlock_t *seqlock); +uint32_t rte_seqlock_read_begin_w(const rte_seqlock_t *seqlock); +bool rte_seqlock_read_retry_w(const rte_seqlock_t *seqlock, uint32_t begin_sn); +void rte_seqlock_write_lock_w(rte_seqlock_t *seqlock); +void rte_seqlock_write_unlock_w(rte_seqlock_t *seqlock); +unsigned int rte_stack_push_w(struct rte_stack *s, void *const *obj_table, + unsigned int n); +unsigned int rte_stack_pop_w(struct rte_stack *s, void **obj_table, + unsigned int n); +unsigned int rte_stack_count_w(struct rte_stack *s); +unsigned int rte_stack_free_count_w(struct rte_stack *s); +uint32_t rte_softrss_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key); +uint32_t rte_softrss_be_w(uint32_t *input_tuple, uint32_t input_len, + const uint8_t *rss_key); +void rte_ticketlock_init_w(rte_ticketlock_t *tl); +void rte_ticketlock_lock_w(rte_ticketlock_t *tl); +void rte_ticketlock_unlock_w(rte_ticketlock_t *tl); +int rte_ticketlock_trylock_w(rte_ticketlock_t *tl); +int rte_ticketlock_is_locked_w(rte_ticketlock_t *tl); +void rte_ticketlock_recursive_init_w(rte_ticketlock_recursive_t *tlr); +void rte_ticketlock_recursive_lock_w(rte_ticketlock_recursive_t *tlr); +void rte_ticketlock_recursive_unlock_w(rte_ticketlock_recursive_t *tlr); +int rte_ticketlock_recursive_trylock_w(rte_ticketlock_recursive_t *tlr); +uint64_t rte_cyclecounter_cycles_to_ns_w(struct rte_timecounter *tc, + uint64_t cycles); +uint64_t rte_timecounter_update_w(struct rte_timecounter *tc, + uint64_t cycle_now); +uint64_t rte_timespec_to_ns_w(const struct timespec *ts); +struct timespec rte_ns_to_timespec_w(uint64_t nsec); +bool rte_trace_feature_is_enabled_w(void); diff --git a/dpdk/build.rs b/dpdk/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/dpdk/build.rs +++ b/dpdk/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/hardware/Cargo.toml b/hardware/Cargo.toml index 7d4fbf476..b4f469ce1 100644 --- a/hardware/Cargo.toml +++ b/hardware/Cargo.toml @@ -36,9 +36,9 @@ tracing = { workspace = true, features = ["std"] } [dev-dependencies] # internal -fixin = { workspace = true, features = [] } +# fixin = { workspace = true, features = [] } id = { workspace = true, features = ["serde", "rkyv"] } -n-vm = { workspace = true } +# n-vm = { workspace = true } test-utils = { workspace = true, features = [] } # external diff --git a/hardware/build.rs b/hardware/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/hardware/build.rs +++ b/hardware/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/hardware/src/scan.rs b/hardware/src/scan.rs index ef8899164..1e20b49a9 100644 --- a/hardware/src/scan.rs +++ b/hardware/src/scan.rs @@ -225,82 +225,82 @@ mod test { support::{SupportedDevice, SupportedVendor}, }; - #[test] - #[n_vm::in_vm] - fn collect_them_all_and_bind_them() { - let system = Node::scan_all(); - let nics: Vec<_> = system - .iter() - .filter_map(|node| match node.attributes() { - Some(NodeAttributes::Pci(dev)) => { - if dev.vendor_id() == SupportedVendor::RedHat.vendor_id() - && SupportedDevice::VirtioNet - .device_ids() - .contains(&dev.device_id()) - { - let mut nic = PciNic::new(dev.address()).unwrap(); - nic.bind_to_vfio_pci().unwrap(); - Some(nic) - } else { - None - } - } - _ => None, - }) - .collect(); - assert_eq!(nics.len(), 3, "expected exactly 3 virtio network cards"); - } + // #[test] + // #[n_vm::in_vm] + // fn collect_them_all_and_bind_them() { + // let system = Node::scan_all(); + // let nics: Vec<_> = system + // .iter() + // .filter_map(|node| match node.attributes() { + // Some(NodeAttributes::Pci(dev)) => { + // if dev.vendor_id() == SupportedVendor::RedHat.vendor_id() + // && SupportedDevice::VirtioNet + // .device_ids() + // .contains(&dev.device_id()) + // { + // let mut nic = PciNic::new(dev.address()).unwrap(); + // nic.bind_to_vfio_pci().unwrap(); + // Some(nic) + // } else { + // None + // } + // } + // _ => None, + // }) + // .collect(); + // assert_eq!(nics.len(), 3, "expected exactly 3 virtio network cards"); + // } - #[test] - #[n_vm::in_vm] - fn bind_fabric_nics_and_skip_mgmt_nic() { - let system = Node::scan_all(); - let mgmt_nic_pci_address = "0000:00:02.0".try_into().unwrap(); - let nics: Vec<_> = system - .iter() - .filter_map(|node| match node.attributes() { - Some(NodeAttributes::Pci(dev)) => { - if dev.vendor_id() == SupportedVendor::RedHat.vendor_id() - && SupportedDevice::VirtioNet - .device_ids() - .contains(&dev.device_id()) - && dev.address() != mgmt_nic_pci_address - { - let mut nic = PciNic::new(dev.address()).unwrap(); - nic.bind_to_vfio_pci().unwrap(); - Some(nic) - } else { - None - } - } - _ => None, - }) - .collect(); - assert_eq!(nics.len(), 2, "expected exactly 2 virtio network cards"); - } + // #[test] + // #[n_vm::in_vm] + // fn bind_fabric_nics_and_skip_mgmt_nic() { + // let system = Node::scan_all(); + // let mgmt_nic_pci_address = "0000:00:02.0".try_into().unwrap(); + // let nics: Vec<_> = system + // .iter() + // .filter_map(|node| match node.attributes() { + // Some(NodeAttributes::Pci(dev)) => { + // if dev.vendor_id() == SupportedVendor::RedHat.vendor_id() + // && SupportedDevice::VirtioNet + // .device_ids() + // .contains(&dev.device_id()) + // && dev.address() != mgmt_nic_pci_address + // { + // let mut nic = PciNic::new(dev.address()).unwrap(); + // nic.bind_to_vfio_pci().unwrap(); + // Some(nic) + // } else { + // None + // } + // } + // _ => None, + // }) + // .collect(); + // assert_eq!(nics.len(), 2, "expected exactly 2 virtio network cards"); + // } - #[test] - #[n_vm::in_vm] - fn bind_nic_test() { - let system = Node::scan_all(); - let target_pci_address = "0001:00:02.0".try_into().unwrap(); - let Some(mut nic) = system.iter().find_map(|node| match node.attributes() { - Some(NodeAttributes::Pci(dev)) => { - if dev.address() == target_pci_address - && dev.vendor_id() == SupportedVendor::RedHat.vendor_id() - && SupportedDevice::VirtioNet - .device_ids() - .contains(&dev.device_id()) - { - Some(PciNic::new(dev.address()).unwrap()) - } else { - None - } - } - _ => None, - }) else { - panic!("target nic not found"); - }; - nic.bind_to_vfio_pci().unwrap(); - } + // #[test] + // #[n_vm::in_vm] + // fn bind_nic_test() { + // let system = Node::scan_all(); + // let target_pci_address = "0001:00:02.0".try_into().unwrap(); + // let Some(mut nic) = system.iter().find_map(|node| match node.attributes() { + // Some(NodeAttributes::Pci(dev)) => { + // if dev.address() == target_pci_address + // && dev.vendor_id() == SupportedVendor::RedHat.vendor_id() + // && SupportedDevice::VirtioNet + // .device_ids() + // .contains(&dev.device_id()) + // { + // Some(PciNic::new(dev.address()).unwrap()) + // } else { + // None + // } + // } + // _ => None, + // }) else { + // panic!("target nic not found"); + // }; + // nic.bind_to_vfio_pci().unwrap(); + // } } diff --git a/init/build.rs b/init/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/init/build.rs +++ b/init/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } diff --git a/nix/overlays/dataplane-dev.nix b/nix/overlays/dataplane-dev.nix new file mode 100644 index 000000000..b2c3c3d09 --- /dev/null +++ b/nix/overlays/dataplane-dev.nix @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors +{ + sources, +}: +let + fenix = import sources.fenix { }; + rust-toolchain = fenix.fromToolchainFile { + file = ../../rust-toolchain.toml; + sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; + }; +in +final: prev: { + # llvmPackages = final.llvmPackages_21; + + # # TODO: doc this + # rustPlatform = final.makeRustPlatform { + # stdenv = final.llvmPackages.stdenv; + # cargo = rust-toolchain; + # rustc = rust-toolchain; + # }; + + kopium = import ../pkgs/kopium { + src = sources.kopium; + inherit (final) rustPlatform; + }; +} diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 424f244a6..effb3f55c 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -253,6 +253,9 @@ in # This isn't directly required by dataplane, perftest = dataplane-dep (final.callPackage ../pkgs/perftest { src = sources.perftest; }); + inherit rust-toolchain; + cargo = rust-toolchain; + rustc = rust-toolchain; # TODO: doc this rustPlatform = final.makeRustPlatform { stdenv = stdenv'; diff --git a/nix/pkgs/kopium/default.nix b/nix/pkgs/kopium/default.nix new file mode 100644 index 000000000..6b3e2bf56 --- /dev/null +++ b/nix/pkgs/kopium/default.nix @@ -0,0 +1,11 @@ +{ + src, + rustPlatform, +}: +rustPlatform.buildRustPackage (final: { + pname = "kopium"; + version = src.version; + src = src.outPath; + cargoLock.lockFile = "${final.src}/Cargo.lock"; + doCheck = false; +}) diff --git a/nix/pkgs/perftest/default.nix b/nix/pkgs/perftest/default.nix new file mode 100644 index 000000000..0bc0ee783 --- /dev/null +++ b/nix/pkgs/perftest/default.nix @@ -0,0 +1,19 @@ +{ + src, + stdenv, + rdma-core, + autoreconfHook, + pciutils, +}: +stdenv.mkDerivation (final: { + pname = "perftest"; + version = src.version; + src = src.outPath; + nativeBuildInputs = [ + autoreconfHook + ]; + buildInputs = [ + pciutils + rdma-core + ]; +}) diff --git a/nix/profiles.nix b/nix/profiles.nix index eaa14c67f..0c9d1f8a9 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -8,7 +8,7 @@ }: let common.NIX_CFLAGS_COMPILE = [ - "-glldb" + "-g3" "-gdwarf-5" # odr or strict-aliasing violations are indicative of LTO incompatibility, so check for that "-Werror=odr" diff --git a/npins/sources.json b/npins/sources.json index 0fa1f6258..ba31a0c91 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -1,5 +1,21 @@ { "pins": { + "crane": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "ipetkov", + "repo": "crane" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "submodules": false, + "version": "v0.21.3", + "revision": "4a7cf504d83f7d0460f9cf28fe6cbaa5fb856034", + "url": "https://api.github.com/repos/ipetkov/crane/tarball/v0.21.3", + "hash": "1a27r58apm5arxyydn78j9yz0aa7d20v4ai688d23lbivfs0jaa5" + }, "dpdk": { "type": "Git", "repository": { @@ -34,6 +50,19 @@ "url": "https://api.github.com/repos/kube-rs/kopium/tarball/0.22.5", "hash": "14lr2qgfh50rlpw5wgy6cw1qvkz44bwwx06srlks243hgkw9p2fd" }, + "naersk": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "nix-community", + "repo": "naersk" + }, + "branch": "master", + "submodules": false, + "revision": "d4155d6ebb70fbe2314959842f744aa7cabbbf6a", + "url": "https://github.com/nix-community/naersk/archive/d4155d6ebb70fbe2314959842f744aa7cabbbf6a.tar.gz", + "hash": "1pmj1d3xp3fjz8m3msvmh8jnylwmgk76iah9qnfs8ddgwlij1v5g" + }, "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", diff --git a/sysfs/Cargo.toml b/sysfs/Cargo.toml index 5638e18ef..ab301ec3f 100644 --- a/sysfs/Cargo.toml +++ b/sysfs/Cargo.toml @@ -20,7 +20,7 @@ tracing-subscriber = { workspace = true, features = ["fmt"] } [dev-dependencies] # internal -n-vm = { workspace = true } +# n-vm = { workspace = true } # external diff --git a/sysfs/build.rs b/sysfs/build.rs index 9d7c9069b..1c30f3c66 100644 --- a/sysfs/build.rs +++ b/sysfs/build.rs @@ -2,7 +2,7 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); + // let sysroot = dpdk_sysroot_helper::get_sysroot(); + // println!("cargo:rustc-link-search=all={sysroot}/lib"); // println!("cargo:rustc-link-arg=--sysroot={sysroot}"); } From 4a5df4f07acc7743c14782ffdee2ab7887683e80 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Mon, 22 Dec 2025 22:53:57 +0000 Subject: [PATCH 33/35] fix rust build --- default.nix | 10 ++++++++-- nix/overlays/dataplane-dev.nix | 30 +++++++++++++++++++----------- nix/overlays/dataplane.nix | 17 ++++++++++------- npins/sources.json | 4 ++-- 4 files changed, 39 insertions(+), 22 deletions(-) diff --git a/default.nix b/default.nix index e4823a42a..2c832bfd9 100644 --- a/default.nix +++ b/default.nix @@ -75,6 +75,8 @@ let executable = false; destination = "/.clangd"; }; + crane = import sources.crane { }; + craneLib = crane.craneLib.overrideToolchain dataplane-pkgs.rust-toolchain; dev-tools = dataplane-pkgs.symlinkJoin { name = "dataplane-dev-shell"; paths = [ @@ -98,8 +100,6 @@ let npins ]); }; - # crane = import sources.crane { pkgs = dataplane-dev-pkgs; }; - crane = import sources.crane { }; dataplane-src = crane.cleanCargoSource ./.; # Common arguments can be set here to avoid repeating them later @@ -264,6 +264,12 @@ in ; platform = platform'; x = builtins.attrNames crane; + y = { + lib = { + x = (builtins.attrNames crane.craneLib); + }; + }; + z = dataplane-pkgs.runCommand "ls" { } "echo potato > $out"; # y = crane.buildPackage } diff --git a/nix/overlays/dataplane-dev.nix b/nix/overlays/dataplane-dev.nix index b2c3c3d09..46ae9fbef 100644 --- a/nix/overlays/dataplane-dev.nix +++ b/nix/overlays/dataplane-dev.nix @@ -10,18 +10,26 @@ let sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; }; in -final: prev: { - # llvmPackages = final.llvmPackages_21; - - # # TODO: doc this - # rustPlatform = final.makeRustPlatform { - # stdenv = final.llvmPackages.stdenv; - # cargo = rust-toolchain; - # rustc = rust-toolchain; - # }; - +final: prev: +let + rustPlatform = final.makeRustPlatform { + stdenv = final.llvmPackages.stdenv; + cargo = rust-toolchain; + rustc = rust-toolchain; + }; +in +{ + inherit rust-toolchain rustPlatform; + llvmPackages = final.llvmPackages_21; kopium = import ../pkgs/kopium { src = sources.kopium; - inherit (final) rustPlatform; + inherit rustPlatform; }; + cargo-bolero = prev.cargo-bolero.override { inherit rustPlatform; }; + cargo-deny = prev.cargo-deny.override { inherit rustPlatform; }; + cargo-pciutils = prev.cargo-deny.override { inherit rustPlatform; }; + cargo-llvm-cov = prev.cargo-deny.override { inherit rustPlatform; }; + cargo-nextest = prev.cargo-deny.override { inherit rustPlatform; }; + just = prev.cargo-deny.override { inherit rustPlatform; }; + npins = prev.cargo-deny.override { inherit rustPlatform; }; } diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index effb3f55c..0faab2ea6 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -254,12 +254,15 @@ in perftest = dataplane-dep (final.callPackage ../pkgs/perftest { src = sources.perftest; }); inherit rust-toolchain; - cargo = rust-toolchain; - rustc = rust-toolchain; - # TODO: doc this - rustPlatform = final.makeRustPlatform { - stdenv = stdenv'; - cargo = rust-toolchain; - rustc = rust-toolchain; + + rustPlatform' = final.makeRustPlatform { + stdenv = final.llvmPackages.stdenv; + cargo = final.rust-toolchain; + rustc = final.rust-toolchain; + }; + + kopium = import ../pkgs/kopium { + src = sources.kopium; + rustPlatform = final.rustPlatform'; }; } diff --git a/npins/sources.json b/npins/sources.json index ba31a0c91..bada50e49 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -66,8 +66,8 @@ "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", - "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre913981.7d853e518814/nixexprs.tar.xz", - "hash": "1cpg513zly625rw05kbz1hvfiqcrwbd71c1bqhp61sh6ng8ifg4c" + "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre914780.306ea70f9eb0/nixexprs.tar.xz", + "hash": "1xx42m8amnda6z8nbm2ksv0plpzcdqxzymb1qqnp0xnf0k2bz2d1" }, "perftest": { "type": "GitRelease", From 47bb784dc508dabfa9dc839f9c9e04d1005d7de1 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Tue, 23 Dec 2025 05:17:58 +0000 Subject: [PATCH 34/35] dataplane now builds, but need to undo lots of hacks --- .cargo/config.toml | 11 ++-- Cargo.lock | 70 --------------------- default.nix | 37 +++++------ k8s-intf/Cargo.toml | 4 +- k8s-intf/build.rs | 66 ++++++++----------- k8s-intf/src/generated/gateway_agent_crd.rs | 21 +++++-- nix/overlays/dataplane-dev.nix | 28 ++++++--- nix/overlays/dataplane.nix | 22 ++----- nix/overlays/default.nix | 4 ++ nix/overlays/llvm.nix | 36 +++++++++++ npins/sources.json | 29 ++++++--- rust-toolchain.toml | 39 ++++++------ 12 files changed, 169 insertions(+), 198 deletions(-) create mode 100644 nix/overlays/llvm.nix diff --git a/.cargo/config.toml b/.cargo/config.toml index 02340991f..33ead06f2 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,8 +1,11 @@ [env] -COMPILE_ENV = { value = "sysroot", relative = true, force = false } -#PATH = { value = "compile-env/bin", relative = true, force = true } -#LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = true } -# PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = true } +# COMPILE_ENV = { value = "sysroot", relative = true, force = false } +# PATH = { value = "compile-env/bin", relative = true, force = true } +LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = false } +PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = false } +LIBRARY_PATH = { value = "sysroot/lib", relative = true, force = false } +C_INCLUDE_PATH = { value = "sysroot/include", relative = true, force = false } +GW_CRD_PATH = { value = "devroot/src/gateway/config/crd/bases", relative = true, force = false } [build] target = "x86_64-unknown-linux-gnu" diff --git a/Cargo.lock b/Cargo.lock index be2386be5..bdbb45476 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1403,7 +1403,6 @@ dependencies = [ "dataplane-lpm", "dataplane-net", "dataplane-tracectl", - "dotenvy", "futures", "k8s-openapi", "kube", @@ -1416,7 +1415,6 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tracing", - "ureq", ] [[package]] @@ -1883,12 +1881,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - [[package]] name = "downcast-rs" version = "2.0.2" @@ -2100,16 +2092,6 @@ dependencies = [ "syn 2.0.111", ] -[[package]] -name = "flate2" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "fnv" version = "1.0.7" @@ -3379,7 +3361,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", - "simd-adler32", ] [[package]] @@ -4583,7 +4564,6 @@ dependencies = [ "aws-lc-rs", "log", "once_cell", - "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -5017,12 +4997,6 @@ dependencies = [ "libc 0.2.178", ] -[[package]] -name = "simd-adler32" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" - [[package]] name = "simdutf8" version = "0.1.5" @@ -5830,35 +5804,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "ureq" -version = "3.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39cb1dbab692d82a977c0392ffac19e188bd9186a9f32806f0aaa859d75585a" -dependencies = [ - "base64 0.22.1", - "flate2", - "log", - "percent-encoding", - "rustls", - "rustls-pki-types", - "ureq-proto", - "utf-8", - "webpki-roots", -] - -[[package]] -name = "ureq-proto" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" -dependencies = [ - "base64 0.22.1", - "http 1.4.0", - "httparse", - "log", -] - [[package]] name = "url" version = "2.5.7" @@ -5871,12 +5816,6 @@ dependencies = [ "serde", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -6043,15 +5982,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "winapi" version = "0.3.9" diff --git a/default.nix b/default.nix index 2c832bfd9..7e15c105b 100644 --- a/default.nix +++ b/default.nix @@ -32,12 +32,14 @@ let }; dataplane-dev-pkgs = import sources.nixpkgs { overlays = [ + overlays.llvm overlays.dataplane-dev ]; }; dataplane-pkgs = (import sources.nixpkgs { overlays = [ + overlays.llvm overlays.dataplane ]; }).pkgsCross.${platform'.info.nixarch}; @@ -75,8 +77,8 @@ let executable = false; destination = "/.clangd"; }; - crane = import sources.crane { }; - craneLib = crane.craneLib.overrideToolchain dataplane-pkgs.rust-toolchain; + crane-base = import sources.crane { }; + crane = crane-base.craneLib.overrideToolchain dataplane-dev-pkgs.rust-toolchain; dev-tools = dataplane-pkgs.symlinkJoin { name = "dataplane-dev-shell"; paths = [ @@ -96,6 +98,7 @@ let cargo-llvm-cov cargo-nextest direnv + gateway-crd just npins ]); @@ -116,6 +119,14 @@ let dataplane-pkgs.hwloc ]; + env = { + LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; + C_INCLUDE_PATH = "${sysroot}/include"; + LIBRARY_PATH = "${sysroot}/lib"; + PKG_CONFIG_PATH = "${sysroot}/lib/pkgconfig"; + GW_CRD_PATH = "${dataplane-dev-pkgs.gateway-crd}/src/gateway/config/crd/bases"; + }; + # Additional environment variables can be set directly # MY_CUSTOM_VAR = "some value"; }; @@ -136,11 +147,6 @@ let root = ./.; fileset = lib.fileset.unions [ ./. - ./Cargo.toml - ./Cargo.lock - # (crane.fileset.commonCargoSources ./crates/my-common) - # (crane.fileset.commonCargoSources ./crates/my-workspace-hack) - (crane.fileset.commonCargoSources crate) ]; }; rekon = crane.buildPackage ( @@ -181,11 +187,6 @@ let pname = "dataplane-dpdk-sys"; cargoExtraArgs = "--package dataplane-dpdk-sys"; src = fileSetForCrate ./dpdk-sys; - env = { - LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; - C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; - LIBRARY_PATH = "${sysroot}/lib"; - }; nativeBuildInputs = [ dataplane-pkgs.pkg-config dataplane-pkgs.llvmPackages.libclang.lib @@ -203,11 +204,6 @@ let pname = "dataplane-dpdk"; cargoExtraArgs = "--package dataplane-dpdk"; src = fileSetForCrate ./dpdk; - env = { - LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; - C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; - LIBRARY_PATH = "${sysroot}/lib"; - }; nativeBuildInputs = [ dataplane-pkgs.pkg-config dataplane-pkgs.llvmPackages.libclang.lib @@ -225,16 +221,12 @@ let pname = "dataplane"; cargoExtraArgs = "--package dataplane"; src = fileSetForCrate ./dataplane; - env = { - LIBCLANG_PATH = "${dataplane-pkgs.llvmPackages.libclang.lib}/lib"; - C_INCLUDE_PATH = "${dataplane-pkgs.dpdk.dev}/include:${dataplane-pkgs.libbsd.dev}/include:${dataplane-pkgs.stdenv'.cc.libc.dev}/include"; - LIBRARY_PATH = "${sysroot}/lib"; - }; nativeBuildInputs = [ dataplane-pkgs.pkg-config dataplane-pkgs.llvmPackages.libclang.lib dataplane-pkgs.llvmPackages.clang dataplane-pkgs.llvmPackages.lld + dataplane-dev-pkgs.kopium ]; buildInputs = [ sysroot @@ -269,7 +261,6 @@ in x = (builtins.attrNames crane.craneLib); }; }; - z = dataplane-pkgs.runCommand "ls" { } "echo potato > $out"; # y = crane.buildPackage } diff --git a/k8s-intf/Cargo.toml b/k8s-intf/Cargo.toml index a3efed406..73d542f03 100644 --- a/k8s-intf/Cargo.toml +++ b/k8s-intf/Cargo.toml @@ -37,5 +37,5 @@ lpm = { workspace = true, features = [] } net = { workspace = true, features = ["bolero", "test_buffer"] } [build-dependencies] -dotenvy = { workspace = true, features = [] } -ureq = { workspace = true, features = ["rustls", "gzip"] } +# dotenvy = { workspace = true, features = [] } +# ureq = { workspace = true, features = ["rustls", "gzip"] } diff --git a/k8s-intf/build.rs b/k8s-intf/build.rs index f0477bf9b..8a37ff1e3 100644 --- a/k8s-intf/build.rs +++ b/k8s-intf/build.rs @@ -1,47 +1,10 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Open Network Fabric Authors -use std::env; use std::fs; +use std::io::Read; use std::path::PathBuf; -fn workspace_root() -> PathBuf { - PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set")) - .ancestors() - .nth(1) - .expect("Workspace root not found") - .to_path_buf() -} - -fn get_agent_crd_url() -> String { - let env_file_path = workspace_root().join("scripts").join("k8s-crd.env"); - println!("cargo:rerun-if-changed={}", env_file_path.display()); - - let env_file = - dotenvy::from_path_iter(env_file_path).expect("Failed to read scripts/k8s-crd.env"); - - env_file - .filter_map(Result::ok) - .find_map(|(key, value)| { - if key == "K8S_GATEWAY_AGENT_CRD_URL" { - Some(value) - } else { - None - } - }) - .expect("K8S_GATEWAY_AGENT_CRD_URL not found in scripts/k8s-crd.env") -} - -fn fetch_crd(url: &str) -> String { - println!("cargo:note=Fetching CRD from: {url}"); - ureq::get(url) - .call() - .expect("Failed to fetch agent CRD from url") - .body_mut() - .read_to_string() - .expect("Failed to read response body") -} - const LICENSE_PREAMBLE: &str = "// SPDX-License-Identifier: Apache-2.0 // Copyright Open Network Fabric Authors @@ -124,8 +87,31 @@ fn code_needs_regen(new_code: &str) -> bool { } fn main() { - let agent_crd_url = get_agent_crd_url(); - let agent_crd_contents = fetch_crd(&agent_crd_url); + let agent_crd_contents = { + let agent_crd_path = + PathBuf::from(std::env::var("GW_CRD_PATH").expect("GW_CRD_PATH var unset")) + .join("gwint.githedgehog.com_gatewayagents.yaml"); + let mut agent_crd_file = std::fs::OpenOptions::new() + .read(true) + .write(false) + .open(&agent_crd_path) + .unwrap_or_else(|e| { + panic!( + "failed to open {path}: {e}", + path = agent_crd_path.to_str().expect("non unicode crd path") + ) + }); + let mut contents = String::with_capacity( + agent_crd_file + .metadata() + .expect("unable to get crd metadata") + .len() as usize, + ); + agent_crd_file + .read_to_string(&mut contents) + .unwrap_or_else(|e| panic!("unable to read crd data into string: {e}")); + contents + }; let agent_generated_code = generate_rust_for_crd(&agent_crd_contents); if !code_needs_regen(&agent_generated_code) { diff --git a/k8s-intf/src/generated/gateway_agent_crd.rs b/k8s-intf/src/generated/gateway_agent_crd.rs index 828617a4b..6138cedf6 100644 --- a/k8s-intf/src/generated/gateway_agent_crd.rs +++ b/k8s-intf/src/generated/gateway_agent_crd.rs @@ -140,12 +140,10 @@ pub struct GatewayAgentGroups { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct GatewayAgentGroupsMembers { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub name: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub priority: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "vtepIP")] - pub vtep_ip: Option, + pub name: String, + pub priority: i32, + #[serde(rename = "vtepIP")] + pub vtep_ip: String, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] @@ -177,18 +175,29 @@ pub struct GatewayAgentPeeringsPeeringExpose { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct GatewayAgentPeeringsPeeringExposeAs { + /// CIDR to include, only one of cidr, not can be set #[serde(default, skip_serializing_if = "Option::is_none")] pub cidr: Option, + /// CIDR to exclude, only one of cidr, not can be set #[serde(default, skip_serializing_if = "Option::is_none")] pub not: Option, + /// Port ranges (e.g. "80, 443, 3000-3100"), used together with exactly one of cidr, not + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ports: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct GatewayAgentPeeringsPeeringExposeIps { + /// CIDR to include, only one of cidr, not, vpcSubnet can be set #[serde(default, skip_serializing_if = "Option::is_none")] pub cidr: Option, + /// CIDR to exclude, only one of cidr, not, vpcSubnet can be set #[serde(default, skip_serializing_if = "Option::is_none")] pub not: Option, + /// Port ranges (e.g. "80, 443, 3000-3100"), used together with exactly one of cidr, not, vpcSubnet + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ports: Option, + /// CIDR by VPC subnet name to include, only one of cidr, not, vpcSubnet can be set #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSubnet")] pub vpc_subnet: Option, } diff --git a/nix/overlays/dataplane-dev.nix b/nix/overlays/dataplane-dev.nix index 46ae9fbef..9756d3c71 100644 --- a/nix/overlays/dataplane-dev.nix +++ b/nix/overlays/dataplane-dev.nix @@ -3,15 +3,13 @@ { sources, }: +final: prev: let fenix = import sources.fenix { }; rust-toolchain = fenix.fromToolchainFile { file = ../../rust-toolchain.toml; sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; }; -in -final: prev: -let rustPlatform = final.makeRustPlatform { stdenv = final.llvmPackages.stdenv; cargo = rust-toolchain; @@ -19,17 +17,27 @@ let }; in { - inherit rust-toolchain rustPlatform; - llvmPackages = final.llvmPackages_21; + inherit rust-toolchain; + rustPlatform' = rustPlatform; + kopium = import ../pkgs/kopium { src = sources.kopium; inherit rustPlatform; }; cargo-bolero = prev.cargo-bolero.override { inherit rustPlatform; }; cargo-deny = prev.cargo-deny.override { inherit rustPlatform; }; - cargo-pciutils = prev.cargo-deny.override { inherit rustPlatform; }; - cargo-llvm-cov = prev.cargo-deny.override { inherit rustPlatform; }; - cargo-nextest = prev.cargo-deny.override { inherit rustPlatform; }; - just = prev.cargo-deny.override { inherit rustPlatform; }; - npins = prev.cargo-deny.override { inherit rustPlatform; }; + cargo-llvm-cov = prev.cargo-llvm-cov.override { inherit rustPlatform; }; + cargo-nextest = prev.cargo-nextest.override { inherit rustPlatform; }; + just = prev.just.override { inherit rustPlatform; }; + npins = prev.npins.override { inherit rustPlatform; }; + gateway-crd = + let + path = "config/crd/bases/gwint.githedgehog.com_gatewayagents.yaml"; + in + final.writeTextFile { + name = "gateway-crd"; + text = builtins.readFile "${sources.gateway}/${path}"; + executable = false; + destination = "/src/gateway/${path}"; + }; } diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 0faab2ea6..b8b85dd99 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -33,9 +33,14 @@ let file = ../../rust-toolchain.toml; sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; }; + rustPlatform' = final.makeRustPlatform { + stdenv = stdenv'; + cargo = rust-toolchain; + rustc = rust-toolchain; + }; in { - inherit stdenv' added-to-env; + inherit rust-toolchain stdenv' rustPlatform'; # Don't bother adapting ethtool or iproute2's build to our custom flags / env. Failure to null this can trigger # _massive_ builds because ethtool depends on libnl (et al), and we _do_ overlay libnl. Thus, the ethtool / iproute2 # get rebuilt and you end up rebuilding the whole world. @@ -65,8 +70,6 @@ in udev = null; udevCheckHook = null; - llvmPackages = final.llvmPackages_21; - # libmd is used by libbsd (et al) which is an optional dependency of dpdk. # # We _might_ actually care about perf here, so we lto this package. @@ -252,17 +255,4 @@ in pciutils = dataplane-dep (prev.pciutils.override { static = true; }); # This isn't directly required by dataplane, perftest = dataplane-dep (final.callPackage ../pkgs/perftest { src = sources.perftest; }); - - inherit rust-toolchain; - - rustPlatform' = final.makeRustPlatform { - stdenv = final.llvmPackages.stdenv; - cargo = final.rust-toolchain; - rustc = final.rust-toolchain; - }; - - kopium = import ../pkgs/kopium { - src = sources.kopium; - rustPlatform = final.rustPlatform'; - }; } diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 8ce3c6604..d7bb9d7f7 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -7,6 +7,10 @@ profile, }: { + llvm = import ./llvm.nix { + inherit sources; + }; + dataplane = import ./dataplane.nix { inherit sources diff --git a/nix/overlays/llvm.nix b/nix/overlays/llvm.nix new file mode 100644 index 000000000..1fb641cfe --- /dev/null +++ b/nix/overlays/llvm.nix @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors +{ + sources, +}: +let + fenix = import sources.fenix { }; + rust-toolchain = fenix.fromToolchainFile { + file = ../../rust-toolchain.toml; + sha256 = (builtins.fromJSON (builtins.readFile ../.rust-toolchain.manifest-lock.json)).hash.sha256; + }; +in +final: prev: { + # It is essential that we always use the same version of llvm that our rustc is backed by. + # To minimize maintenance burden, we explicitly compute the version of LLVM we need by asking rustc + # which version it is using. + # This is significantly less error prone than hunting around for all versions of pkgs.llvmPackages_${version} + # every time rust updates. + llvmPackages = + let + version = builtins.readFile ( + final.runCommandLocal "llvm-version-for-our-rustc" + { + RUSTC = "${rust-toolchain.out}/bin/rustc"; + GREP = "${final.gnugrep}/bin/grep"; + SED = "${final.gnused}/bin/sed"; + } + '' + $RUSTC --version --verbose | \ + $GREP '^LLVM version:' | \ + $SED -z 's|LLVM version: \([0-9]\+\)\.[0-9]\+\.[0-9]\+\n|\1|' > $out + '' + ); + in + final."llvmPackages_${version}"; +} diff --git a/npins/sources.json b/npins/sources.json index bada50e49..ee0f05b92 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -34,6 +34,22 @@ "url": "https://github.com/nix-community/fenix/archive/main.tar.gz", "hash": "1gkd9ppvsxl4jjg1jyw61wm99xhy4hdqx5dxqj06gfxi2zkamvzf" }, + "gateway": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "githedgehog", + "repo": "gateway" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "submodules": false, + "version": "v0.33.0", + "revision": "18913fe34d78af396deb1e6a034d0258168122b1", + "url": "https://api.github.com/repos/githedgehog/gateway/tarball/v0.33.0", + "hash": "0sjdagppmmb8wjpf8p7gpxjsh18xyy1n6zs3hy08h15ldv0ip2sz" + }, "kopium": { "type": "GitRelease", "repository": { @@ -70,20 +86,17 @@ "hash": "1xx42m8amnda6z8nbm2ksv0plpzcdqxzymb1qqnp0xnf0k2bz2d1" }, "perftest": { - "type": "GitRelease", + "type": "Git", "repository": { "type": "GitHub", "owner": "linux-rdma", "repo": "perftest" }, - "pre_releases": false, - "version_upper_bound": null, - "release_prefix": null, + "branch": "master", "submodules": false, - "version": "25.10.0-0.128", - "revision": "8a1d3d7234add23fe006b2ff51d650ff022077a8", - "url": "https://api.github.com/repos/linux-rdma/perftest/tarball/25.10.0-0.128", - "hash": "192m2xlds308y0p2h6f6zciwspgq3k0q93q9lp1j4ijmwrpbcrl0" + "revision": "4e4f6629904ba07bf81f207b31433cb9dabbe8ab", + "url": "https://github.com/linux-rdma/perftest/archive/4e4f6629904ba07bf81f207b31433cb9dabbe8ab.tar.gz", + "hash": "0rvwmx9nbhr0m5zmrcpxcqwdjpl4bwsfwc7vmdn31n5gxn0qnn1x" }, "rdma-core": { "type": "Git", diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 610ccfcad..46e38a196 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,27 +1,28 @@ [toolchain] # NOTE: you can and should manually update this on new rust releases - channel = "1.92.0" + components = [ - "rustc", - "cargo", - "rust-std", - "rust-docs", - "rustfmt-preview", - "clippy-preview", - "rust-analyzer-preview", - "rust-src", + "rustc", + "cargo", + "rust-std", + "rust-docs", + "rustfmt-preview", + "clippy-preview", + "rust-analyzer-preview", + "rust-src", - # disabled components - # "rust-mingw", - # "llvm-tools-preview", ## we already have a full llvm in the npins - # "rust-analysis", ## obsolete - # "miri-preview", ## not yet functional for us - # "rustc-codegen-cranelift-preview" ## not relevant to us + ## other (disabled) components ## + # "rust-mingw", ## not relevant to us + # "llvm-tools-preview", ## we already have a full llvm in the npins, no need for another + # "rust-analysis", ## obsolete + # "miri-preview", ## not yet functional for us + # "rustc-codegen-cranelift-preview" ## not relevant to us ] + targets = [ - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-unknown-linux-gnu", - "aarch64-unknown-linux-musl" + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", + "aarch64-unknown-linux-musl" ] From 73ef3191224e5516d481be59c8cbb05fc41a1d82 Mon Sep 17 00:00:00 2001 From: Daniel Noland Date: Tue, 23 Dec 2025 05:28:24 +0000 Subject: [PATCH 35/35] fix cross compile errooooor --- nix/overlays/llvm.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nix/overlays/llvm.nix b/nix/overlays/llvm.nix index 1fb641cfe..9e7049db3 100644 --- a/nix/overlays/llvm.nix +++ b/nix/overlays/llvm.nix @@ -22,8 +22,8 @@ final: prev: { final.runCommandLocal "llvm-version-for-our-rustc" { RUSTC = "${rust-toolchain.out}/bin/rustc"; - GREP = "${final.gnugrep}/bin/grep"; - SED = "${final.gnused}/bin/sed"; + GREP = "${final.pkgsBuildHost.gnugrep}/bin/grep"; + SED = "${final.pkgsBuildHost.gnused}/bin/sed"; } '' $RUSTC --version --verbose | \