Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
3fd1b59
github actions: Add kernelCI for rlc-10
roxanan1996 Mar 12, 2026
fef9438
github actions: Use trigger for kernelCI
roxanan1996 Mar 26, 2026
e13283a
tools: hv: Enable debug logs for hv_kvp_daemon
PlaidCat Jun 9, 2025
2a2dc46
RDMA/mana_ib: Add device statistics support
shreeya-patel98 Dec 15, 2025
8cd6809
PCI/MSI: Export pci_msix_prepare_desc() for dynamic MSI-X allocations
shreeya-patel98 Dec 15, 2025
72add67
PCI: hv: Allow dynamic MSI-X vector allocation
shreeya-patel98 Dec 15, 2025
edb4814
net: mana: explain irq_setup() algorithm
shreeya-patel98 Dec 15, 2025
d9008f1
net: mana: Allow irq_setup() to skip cpus for affinity
shreeya-patel98 Dec 15, 2025
db5f535
net: mana: Allocate MSI-X vectors dynamically
shreeya-patel98 Dec 15, 2025
6620e0d
net: mana: Add support for net_shaper_ops
shreeya-patel98 Dec 16, 2025
b4087c6
net: mana: Add speed support in mana_get_link_ksettings
shreeya-patel98 Dec 16, 2025
c0f4ae9
net: mana: Handle unsupported HWC commands
shreeya-patel98 Dec 16, 2025
debbbfc
net: mana: Fix build errors when CONFIG_NET_SHAPER is disabled
shreeya-patel98 Dec 16, 2025
1e44695
RDMA/mana_ib: add additional port counters
shreeya-patel98 Dec 17, 2025
710a771
RDMA/mana_ib: Drain send wrs of GSI QP
shreeya-patel98 Dec 17, 2025
79e4960
net: hv_netvsc: fix loss of early receive events from host during cha…
shreeya-patel98 Dec 17, 2025
95a2562
net: mana: Reduce waiting time if HWC not responding
shreeya-patel98 Dec 17, 2025
acfbe0e
RDMA/mana_ib: Extend modify QP
shreeya-patel98 Dec 17, 2025
6587d70
scsi: storvsc: Prefer returning channel with the same CPU as on the I…
shreeya-patel98 Dec 17, 2025
96da5bd
net: mana: Use page pool fragments for RX buffers instead of full pag…
shreeya-patel98 Dec 17, 2025
ded636f
dcache: export shrink_dentry_list() and add new helper d_dispose_if_u…
roxanan1996 Feb 17, 2026
5cc6053
idpf: fix a race in txq wakeup
May 1, 2025
2632622
idpf: add support for Tx refillqs in flow scheduling mode
jahay1 Jul 25, 2025
64bf76e
idpf: improve when to set RE bit logic
jahay1 Jul 25, 2025
95b5c30
idpf: simplify and fix splitq Tx packet rollback error path
jahay1 Jul 25, 2025
1a47355
idpf: replace flow scheduling buffer ring with buffer pool
jahay1 Jul 25, 2025
77de562
idpf: stop Tx if there are insufficient buffer resources
jahay1 Jul 25, 2025
7a10273
idpf: remove obsolete stashing code
jahay1 Jul 25, 2025
bb99c7d
io_uring/cmd: let cmds to know about dying task
PlaidCat Mar 31, 2026
becd999
fuse: don't truncate cached, mutated symlink
PlaidCat Mar 31, 2026
784da2a
fuse: add more control over cache invalidation behaviour
PlaidCat Mar 31, 2026
139a6c1
fuse: fix possibly missing fuse_copy_finish() call in fuse_notify()
PlaidCat Mar 31, 2026
03495b1
fs: fuse: add dev id to /dev/fuse fdinfo
shreeya-patel98 May 4, 2026
fa9cf18
fuse: respect FOPEN_KEEP_CACHE on opendir
shreeya-patel98 May 4, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .container_build_image
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
rocky-10-kernel-builder
11 changes: 11 additions & 0 deletions .github/workflows/kernel-build-and-test-multiarch-trigger.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
name: Trigger Automated kernel build and test (multi-arch)

on:
push:
branches:
- '*_rlc-10/**'

jobs:
kernelCI:
uses: ctrliq/kernel-src-tree/.github/workflows/kernel-build-and-test-multiarch-trigger.yml@main
secrets: inherit
78 changes: 76 additions & 2 deletions drivers/infiniband/hw/mana/counters.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,32 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
[MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
[MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
[MANA_IB_CURRENT_RATE].name = "current_rate",
[MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
[MANA_IB_TX_BYTES].name = "tx_bytes",
[MANA_IB_RX_BYTES].name = "rx_bytes",
[MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
[MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
[MANA_IB_RX_READ_REQ].name = "rx_read_requests",
[MANA_IB_TX_PKT].name = "tx_packets",
[MANA_IB_RX_PKT].name = "rx_packets",
};

static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
[MANA_IB_SENT_CNPS].name = "sent_cnps",
[MANA_IB_RECEIVED_ECNS].name = "received_ecns",
[MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
[MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
[MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
[MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
};

struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
ARRAY_SIZE(mana_ib_device_stats_desc),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}

struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
Expand All @@ -42,8 +66,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}

int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index)
static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
ib_dev);
struct mana_rnic_query_device_cntrs_resp resp = {};
struct mana_rnic_query_device_cntrs_req req = {};
int err;

mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
sizeof(req), sizeof(resp));
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;

err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
err);
return err;
}

stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;

return ARRAY_SIZE(mana_ib_device_stats_desc);
}

static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
ib_dev);
Expand All @@ -53,6 +108,7 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,

mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
sizeof(req), sizeof(resp));
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;

Expand Down Expand Up @@ -101,5 +157,23 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;

stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;

return ARRAY_SIZE(mana_ib_port_stats_desc);
}

int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index)
{
if (!port_num)
return mana_ib_get_hw_device_stats(ibdev, stats);
else
return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
}
18 changes: 18 additions & 0 deletions drivers/infiniband/hw/mana/counters.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,28 @@ enum mana_ib_port_counters {
MANA_IB_RATE_INC_EVENTS,
MANA_IB_NUM_QPS_RECOVERED,
MANA_IB_CURRENT_RATE,
MANA_IB_DUP_RX_REQ,
MANA_IB_TX_BYTES,
MANA_IB_RX_BYTES,
MANA_IB_RX_SEND_REQ,
MANA_IB_RX_WRITE_REQ,
MANA_IB_RX_READ_REQ,
MANA_IB_TX_PKT,
MANA_IB_RX_PKT,
};

enum mana_ib_device_counters {
MANA_IB_SENT_CNPS,
MANA_IB_RECEIVED_ECNS,
MANA_IB_RECEIVED_CNP_COUNT,
MANA_IB_QP_CONGESTED_EVENTS,
MANA_IB_QP_RECOVERED_EVENTS,
MANA_IB_DEV_RATE_INC_EVENTS,
};

struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num);
struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index);
#endif /* _COUNTERS_H_ */
26 changes: 26 additions & 0 deletions drivers/infiniband/hw/mana/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,32 @@ static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc
return wc_index;
}

void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
{
struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
struct ud_sq_shadow_wqe *shadow_wqe;
struct mana_ib_cq *cq;
unsigned long flags;

if (!qp)
return;

cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);

spin_lock_irqsave(&cq->cq_lock, flags);
while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
!= NULL) {
shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
shadow_queue_advance_next_to_complete(&qp->shadow_sq);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);

if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);

mana_put_qp_ref(qp);
}

int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
Expand Down
9 changes: 9 additions & 0 deletions drivers/infiniband/hw/mana/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
.get_hw_stats = mana_ib_get_hw_stats,
};

static const struct ib_device_ops mana_ib_device_stats_ops = {
.alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
};

static int mana_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
Expand Down Expand Up @@ -153,6 +157,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
}

ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);

ret = mana_ib_create_eqs(dev);
if (ret) {
Expand Down Expand Up @@ -218,6 +224,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);

if (mana_ib_is_rnic(dev))
mana_drain_gsi_sqs(dev);

ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
Expand Down
41 changes: 39 additions & 2 deletions drivers/infiniband/hw/mana/mana_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
*/
#define MANA_AV_BUFFER_SIZE 64

#define MANA_GSI_QPN (1)

struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
Expand Down Expand Up @@ -210,6 +212,7 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
};

struct mana_ib_query_adapter_caps_req {
Expand All @@ -218,6 +221,7 @@ struct mana_ib_query_adapter_caps_req {

enum mana_ib_adapter_features {
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
};

struct mana_ib_query_adapter_caps_resp {
Expand Down Expand Up @@ -407,7 +411,7 @@ struct mana_ib_ah_attr {
u8 traffic_class;
u16 src_port;
u16 dest_port;
u32 reserved;
u32 flow_label;
};

struct mana_rnic_set_qp_state_req {
Expand All @@ -424,8 +428,15 @@ struct mana_rnic_set_qp_state_req {
u32 retry_cnt;
u32 rnr_retry;
u32 min_rnr_timer;
u32 reserved;
u32 rate_limit;
struct mana_ib_ah_attr ah_attr;
u64 reserved1;
u32 qkey;
u32 qp_access_flags;
u8 local_ack_timeout;
u8 max_rd_atomic;
u16 reserved2;
u32 reserved3;
}; /* HW Data */

struct mana_rnic_set_qp_state_resp {
Expand Down Expand Up @@ -514,6 +525,31 @@ struct mana_rnic_query_vf_cntrs_resp {
u64 rate_inc_events;
u64 num_qps_recovered;
u64 current_rate;
u64 dup_rx_req;
u64 tx_bytes;
u64 rx_bytes;
u64 rx_send_req;
u64 rx_write_req;
u64 rx_read_req;
u64 tx_pkt;
u64 rx_pkt;
}; /* HW Data */

struct mana_rnic_query_device_cntrs_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
}; /* HW Data */

struct mana_rnic_query_device_cntrs_resp {
struct gdma_resp_hdr hdr;
u32 sent_cnps;
u32 received_ecns;
u32 reserved1;
u32 received_cnp_count;
u32 qp_congested_events;
u32 qp_recovered_events;
u32 rate_inc_events;
u32 reserved2;
}; /* HW Data */

static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
Expand Down Expand Up @@ -689,6 +725,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);

void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);

Expand Down
9 changes: 9 additions & 0 deletions drivers/infiniband/hw/mana/qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;

mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));

req.hdr.req.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
Expand All @@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.retry_cnt = attr->retry_cnt;
req.rnr_retry = attr->rnr_retry;
req.min_rnr_timer = attr->min_rnr_timer;
req.rate_limit = attr->rate_limit;
req.qkey = attr->qkey;
req.local_ack_timeout = attr->timeout;
req.qp_access_flags = attr->qp_access_flags;
req.max_rd_atomic = attr->max_rd_atomic;

if (attr_mask & IB_QP_AV) {
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
if (!ndev) {
Expand All @@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ibqp->qp_num, attr->dest_qp_num);
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
}

err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
Expand Down
Loading
Loading