Skip to content
Open
19 changes: 18 additions & 1 deletion fs/smb/client/smbdirect.c
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbd_response *response =
container_of(wc->wr_cqe, struct smbd_response, cqe);
struct smbd_connection *info = response->info;
int data_length = 0;
u32 data_offset = 0;
u32 data_length = 0;
u32 remaining_data_length = 0;

log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
response, response->type, wc->status, wc->opcode,
Expand Down Expand Up @@ -478,7 +480,22 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
/* SMBD data transfer packet */
case SMBD_TRANSFER_DATA:
data_transfer = smbd_response_payload(response);

if (wc->byte_len <
offsetof(struct smbd_data_transfer, padding))
goto error;

remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
data_offset = le32_to_cpu(data_transfer->data_offset);
data_length = le32_to_cpu(data_transfer->data_length);
if (wc->byte_len < data_offset ||
(u64)wc->byte_len < (u64)data_offset + data_length)
goto error;

if (remaining_data_length > info->max_fragmented_recv_size ||
data_length > info->max_fragmented_recv_size ||
(u64)remaining_data_length + (u64)data_length > (u64)info->max_fragmented_recv_size)
goto error;

/*
* If this is a packet with data playload place the data in
Expand Down
9 changes: 7 additions & 2 deletions fs/squashfs/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
unsigned short flags;
unsigned int fragments;
u64 lookup_table_start, xattr_id_table_start, next_table;
int err;
int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);

TRACE("Entered squashfs_fill_superblock\n");

if (!devblksize) {
errorf(fc, "squashfs: unable to set blocksize\n");
return -EINVAL;
}

/*
* squashfs provides 'backing_dev_info' in order to disable read-ahead. For
* squashfs, I/O is not deferred, it is done immediately in read_folio,
Expand All @@ -169,7 +174,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)

msblk->panic_on_errors = (opts->errors == Opt_errors_panic);

msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
msblk->devblksize = devblksize;
msblk->devblksize_log2 = ffz(~msblk->devblksize);

mutex_init(&msblk->meta_index_mutex);
Expand Down
7 changes: 3 additions & 4 deletions io_uring/kbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
* the transfer completes (or if we get -EAGAIN and must poll of
* retry).
*/
req->flags &= ~REQ_F_BUFFERS_COMMIT;
io_kbuf_commit(req, bl, 1);
req->buf_list = NULL;
bl->head++;
}
return u64_to_user_ptr(buf->addr);
}
Expand Down Expand Up @@ -297,8 +296,8 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
* committed them, they cannot be put back in the queue.
*/
if (ret > 0) {
req->flags |= REQ_F_BL_NO_RECYCLE;
req->buf_list->head += ret;
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
io_kbuf_commit(req, bl, ret);
}
} else {
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
Expand Down
14 changes: 10 additions & 4 deletions io_uring/kbuf.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,15 +117,21 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false;
}

static inline void io_kbuf_commit(struct io_kiocb *req,
struct io_buffer_list *bl, int nr)
{
if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
return;
bl->head += nr;
req->flags &= ~REQ_F_BUFFERS_COMMIT;
}

static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
{
struct io_buffer_list *bl = req->buf_list;

if (bl) {
if (req->flags & REQ_F_BUFFERS_COMMIT) {
bl->head += nr;
req->flags &= ~REQ_F_BUFFERS_COMMIT;
}
io_kbuf_commit(req, bl, nr);
req->buf_index = bl->bgid;
}
req->flags &= ~REQ_F_BUFFER_RING;
Expand Down
27 changes: 15 additions & 12 deletions io_uring/net.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
return nbufs;
}

static int io_net_kbuf_recyle(struct io_kiocb *req,
struct io_async_msghdr *kmsg, int len)
{
req->flags |= REQ_F_BL_NO_RECYCLE;
if (req->flags & REQ_F_BUFFERS_COMMIT)
io_kbuf_commit(req, req->buf_list, io_bundle_nbufs(kmsg, len));
return -EAGAIN;
}

static inline bool io_send_finish(struct io_kiocb *req, int *ret,
struct io_async_msghdr *kmsg,
unsigned issue_flags)
Expand Down Expand Up @@ -546,8 +555,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down Expand Up @@ -635,8 +643,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down Expand Up @@ -1018,8 +1025,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down Expand Up @@ -1158,8 +1164,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down Expand Up @@ -1395,8 +1400,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down Expand Up @@ -1455,8 +1459,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)

if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
Expand Down
4 changes: 2 additions & 2 deletions net/bridge/br_mst.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
if (br_vlan_get_state(v) == state)
return;

br_vlan_set_state(v, state);

if (v->vid == vg->pvid)
br_vlan_set_pvid_state(vg, state);

br_vlan_set_state(v, state);
}

int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
Expand Down
112 changes: 104 additions & 8 deletions net/bridge/br_multicast.c
Original file line number Diff line number Diff line change
Expand Up @@ -2014,10 +2014,19 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port,

void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
{
struct net_bridge *br = pmctx->port->br;
bool del = false;

#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&pmctx->ip6_mc_router_timer);
#endif
del_timer_sync(&pmctx->ip4_mc_router_timer);

spin_lock_bh(&br->multicast_lock);
del |= br_ip6_multicast_rport_del(pmctx);
del |= br_ip4_multicast_rport_del(pmctx);
br_multicast_rport_del_notify(pmctx, del);
spin_unlock_bh(&br->multicast_lock);
}

int br_multicast_add_port(struct net_bridge_port *port)
Expand Down Expand Up @@ -2105,12 +2114,17 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
}
}

void br_multicast_enable_port(struct net_bridge_port *port)
static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
{
struct net_bridge *br = port->br;
struct net_bridge *br = pmctx->port->br;

spin_lock_bh(&br->multicast_lock);
__br_multicast_enable_port_ctx(&port->multicast_ctx);
if (br_multicast_port_ctx_is_vlan(pmctx) &&
!(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
spin_unlock_bh(&br->multicast_lock);
return;
}
__br_multicast_enable_port_ctx(pmctx);
spin_unlock_bh(&br->multicast_lock);
}

Expand All @@ -2137,11 +2151,67 @@ static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
br_multicast_rport_del_notify(pmctx, del);
}

static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
{
struct net_bridge *br = pmctx->port->br;

spin_lock_bh(&br->multicast_lock);
if (br_multicast_port_ctx_is_vlan(pmctx) &&
!(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
spin_unlock_bh(&br->multicast_lock);
return;
}

__br_multicast_disable_port_ctx(pmctx);
spin_unlock_bh(&br->multicast_lock);
}

static void br_multicast_toggle_port(struct net_bridge_port *port, bool on)
{
#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;

rcu_read_lock();
vg = nbp_vlan_group_rcu(port);
if (!vg) {
rcu_read_unlock();
return;
}

/* iterate each vlan, toggle vlan multicast context */
list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) {
struct net_bridge_mcast_port *pmctx =
&vlan->port_mcast_ctx;
u8 state = br_vlan_get_state(vlan);
/* enable vlan multicast context when state is
* LEARNING or FORWARDING
*/
if (on && br_vlan_state_allowed(state, true))
br_multicast_enable_port_ctx(pmctx);
else
br_multicast_disable_port_ctx(pmctx);
}
rcu_read_unlock();
return;
}
#endif
/* toggle port multicast context when vlan snooping is disabled */
if (on)
br_multicast_enable_port_ctx(&port->multicast_ctx);
else
br_multicast_disable_port_ctx(&port->multicast_ctx);
}

void br_multicast_enable_port(struct net_bridge_port *port)
{
br_multicast_toggle_port(port, true);
}

void br_multicast_disable_port(struct net_bridge_port *port)
{
spin_lock_bh(&port->br->multicast_lock);
__br_multicast_disable_port_ctx(&port->multicast_ctx);
spin_unlock_bh(&port->br->multicast_lock);
br_multicast_toggle_port(port, false);
}

static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
Expand Down Expand Up @@ -4211,6 +4281,32 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
#endif
}

void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state)
{
#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
struct net_bridge *br;

if (!br_vlan_should_use(v))
return;

if (br_vlan_is_master(v))
return;

br = v->port->br;

if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
return;

if (br_vlan_state_allowed(state, true))
br_multicast_enable_port_ctx(&v->port_mcast_ctx);

/* Multicast is not disabled for the vlan when it goes in
* blocking state because the timers will expire and stop by
* themselves without sending more queries.
*/
#endif
}

void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
{
struct net_bridge *br;
Expand Down Expand Up @@ -4304,9 +4400,9 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
__br_multicast_open(&br->multicast_ctx);
list_for_each_entry(p, &br->port_list, list) {
if (on)
br_multicast_disable_port(p);
br_multicast_disable_port_ctx(&p->multicast_ctx);
else
br_multicast_enable_port(p);
br_multicast_enable_port_ctx(&p->multicast_ctx);
}

list_for_each_entry(vlan, &vg->vlan_list, vlist)
Expand Down
11 changes: 10 additions & 1 deletion net/bridge/br_private.h
Original file line number Diff line number Diff line change
Expand Up @@ -1052,6 +1052,7 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port,
struct net_bridge_vlan *vlan,
struct net_bridge_mcast_port *pmctx);
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx);
void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state);
void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on);
int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
struct netlink_ext_ack *extack);
Expand Down Expand Up @@ -1502,6 +1503,11 @@ static inline void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pm
{
}

static inline void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v,
u8 state)
{
}

static inline void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan,
bool on)
{
Expand Down Expand Up @@ -1852,7 +1858,9 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
const struct net_bridge_vlan *v_opts);

/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
/* vlan state manipulation helpers using *_ONCE to annotate lock-free access,
* while br_vlan_set_state() may access data protected by multicast_lock.
*/
static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
{
return READ_ONCE(v->state);
Expand All @@ -1861,6 +1869,7 @@ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state)
{
WRITE_ONCE(v->state, state);
br_multicast_update_vlan_mcast_ctx(v, state);
}

static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg)
Expand Down