Merge 'master' into 'os-build'
This commit is contained in:
commit
971ad87901
|
@ -18,6 +18,7 @@ Introduction
|
|||
both access system memory directly and with the same effective
|
||||
addresses.
|
||||
|
||||
**This driver is deprecated and will be removed in a future release.**
|
||||
|
||||
Hardware overview
|
||||
=================
|
||||
|
@ -453,7 +454,7 @@ Sysfs Class
|
|||
|
||||
A cxl sysfs class is added under /sys/class/cxl to facilitate
|
||||
enumeration and tuning of the accelerators. Its layout is
|
||||
described in Documentation/ABI/testing/sysfs-class-cxl
|
||||
described in Documentation/ABI/obsolete/sysfs-class-cxl
|
||||
|
||||
|
||||
Udev rules
|
||||
|
|
|
@ -8,7 +8,7 @@ Landlock: unprivileged access control
|
|||
=====================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: October 2024
|
||||
:Date: January 2025
|
||||
|
||||
The goal of Landlock is to enable restriction of ambient rights (e.g. global
|
||||
filesystem or network access) for a set of processes. Because Landlock
|
||||
|
@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
|
|||
A sandboxed process can connect to a non-sandboxed process when its domain is
|
||||
not scoped. If a process's domain is scoped, it can only connect to sockets
|
||||
created by processes in the same scope.
|
||||
Moreover, If a process is scoped to send signal to a non-scoped process, it can
|
||||
Moreover, if a process is scoped to send signal to a non-scoped process, it can
|
||||
only send signals to processes in the same scope.
|
||||
|
||||
A connected datagram socket behaves like a stream socket when its domain is
|
||||
scoped, meaning if the domain is scoped after the socket is connected , it can
|
||||
scoped, meaning if the domain is scoped after the socket is connected, it can
|
||||
still :manpage:`send(2)` data just like a stream socket. However, in the same
|
||||
scenario, a non-connected datagram socket cannot send data (with
|
||||
:manpage:`sendto(2)`) outside its scope.
|
||||
|
|
|
@ -1669,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
|
|||
if (in_flight)
|
||||
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
|
||||
|
||||
/*
|
||||
* Only clear the driver-private command data if the LLD does not supply
|
||||
* a function to initialize that data.
|
||||
*/
|
||||
if (!shost->hostt->init_cmd_priv)
|
||||
memset(cmd + 1, 0, shost->hostt->cmd_size);
|
||||
|
||||
cmd->prot_op = SCSI_PROT_NORMAL;
|
||||
if (blk_rq_bytes(req))
|
||||
cmd->sc_data_direction = rq_dma_dir(req);
|
||||
|
@ -1842,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
|
||||
goto out_dec_target_busy;
|
||||
|
||||
/*
|
||||
* Only clear the driver-private command data if the LLD does not supply
|
||||
* a function to initialize that data.
|
||||
*/
|
||||
if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
|
||||
memset(cmd + 1, 0, shost->hostt->cmd_size);
|
||||
|
||||
if (!(req->rq_flags & RQF_DONTPREP)) {
|
||||
ret = scsi_prepare_cmd(req);
|
||||
if (ret != BLK_STS_OK)
|
||||
|
|
|
@ -194,10 +194,12 @@ out:
|
|||
ufshcd_rpm_put_sync(hba);
|
||||
kfree(buff);
|
||||
bsg_reply->result = ret;
|
||||
job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
|
||||
/* complete the job here only if no error */
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
|
||||
sizeof(struct ufs_bsg_reply);
|
||||
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
|
|||
|
||||
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
|
||||
{
|
||||
return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
|
||||
return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
|
||||
}
|
||||
|
||||
static const struct ufs_dev_quirk ufs_fixups[] = {
|
||||
|
@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
|
|||
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
|
||||
|
||||
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
|
||||
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
|
||||
hba->outstanding_reqs, hba->outstanding_tasks);
|
||||
dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
|
||||
scsi_host_busy(hba->host), hba->outstanding_tasks);
|
||||
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
|
||||
hba->saved_err, hba->saved_uic_err);
|
||||
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
|
||||
|
@ -8882,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
|
|||
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
|
||||
__func__, hba->outstanding_tasks);
|
||||
|
||||
return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
|
||||
return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
|
||||
}
|
||||
|
||||
static const struct attribute_group *ufshcd_driver_groups[] = {
|
||||
|
@ -10431,6 +10431,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
*/
|
||||
spin_lock_init(&hba->clk_gating.lock);
|
||||
|
||||
/*
|
||||
* Set the default power management level for runtime and system PM.
|
||||
* Host controller drivers can override them in their
|
||||
* 'ufs_hba_variant_ops::init' callback.
|
||||
*
|
||||
* Default power saving mode is to keep UFS link in Hibern8 state
|
||||
* and UFS device in sleep state.
|
||||
*/
|
||||
hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
|
||||
UFS_SLEEP_PWR_MODE,
|
||||
UIC_LINK_HIBERN8_STATE);
|
||||
hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
|
||||
UFS_SLEEP_PWR_MODE,
|
||||
UIC_LINK_HIBERN8_STATE);
|
||||
|
||||
err = ufshcd_hba_init(hba);
|
||||
if (err)
|
||||
goto out_error;
|
||||
|
@ -10544,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
goto out_disable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the default power management level for runtime and system PM if
|
||||
* not set by the host controller drivers.
|
||||
* Default power saving mode is to keep UFS link in Hibern8 state
|
||||
* and UFS device in sleep state.
|
||||
*/
|
||||
if (!hba->rpm_lvl)
|
||||
hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
|
||||
UFS_SLEEP_PWR_MODE,
|
||||
UIC_LINK_HIBERN8_STATE);
|
||||
if (!hba->spm_lvl)
|
||||
hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
|
||||
UFS_SLEEP_PWR_MODE,
|
||||
UIC_LINK_HIBERN8_STATE);
|
||||
|
||||
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
|
||||
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
|
||||
|
||||
|
|
|
@ -203,7 +203,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bch2_btree_lock_init(&b->c, 0);
|
||||
bch2_btree_lock_init(&b->c, 0, GFP_KERNEL);
|
||||
|
||||
__bch2_btree_node_to_freelist(bc, b);
|
||||
return b;
|
||||
|
@ -795,17 +795,18 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
|
|||
}
|
||||
|
||||
b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
|
||||
if (!b) {
|
||||
if (b) {
|
||||
bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT);
|
||||
} else {
|
||||
mutex_unlock(&bc->lock);
|
||||
bch2_trans_unlock(trans);
|
||||
b = __btree_node_mem_alloc(c, GFP_KERNEL);
|
||||
if (!b)
|
||||
goto err;
|
||||
bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
|
||||
mutex_lock(&bc->lock);
|
||||
}
|
||||
|
||||
bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
|
||||
|
||||
BUG_ON(!six_trylock_intent(&b->c.lock));
|
||||
BUG_ON(!six_trylock_write(&b->c.lock));
|
||||
|
||||
|
|
|
@ -996,7 +996,7 @@ drop_this_key:
|
|||
}
|
||||
got_good_key:
|
||||
le16_add_cpu(&i->u64s, -next_good_key);
|
||||
memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
|
||||
memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
|
||||
set_btree_node_need_rewrite(b);
|
||||
}
|
||||
fsck_err:
|
||||
|
|
|
@ -156,7 +156,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
|
|||
}
|
||||
|
||||
if (ck) {
|
||||
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
|
||||
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
|
||||
ck->c.cached = true;
|
||||
goto lock;
|
||||
}
|
||||
|
|
|
@ -7,9 +7,10 @@
|
|||
static struct lock_class_key bch2_btree_node_lock_key;
|
||||
|
||||
void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
|
||||
enum six_lock_init_flags flags)
|
||||
enum six_lock_init_flags flags,
|
||||
gfp_t gfp)
|
||||
{
|
||||
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
|
||||
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
|
||||
lockdep_set_notrack_class(&b->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "btree_iter.h"
|
||||
#include "six.h"
|
||||
|
||||
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
|
||||
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp);
|
||||
|
||||
void bch2_trans_unlock_noassert(struct btree_trans *);
|
||||
void bch2_trans_unlock_write(struct btree_trans *);
|
||||
|
|
|
@ -340,6 +340,7 @@ restart_drop_extra_replicas:
|
|||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
prt_str(&buf, "about to insert invalid key in data update path");
|
||||
prt_printf(&buf, "\nop.nonce: %u", m->op.nonce);
|
||||
prt_str(&buf, "\nold: ");
|
||||
bch2_bkey_val_to_text(&buf, c, old);
|
||||
prt_str(&buf, "\nk: ");
|
||||
|
|
|
@ -31,11 +31,6 @@ static inline unsigned dirent_val_u64s(unsigned len)
|
|||
sizeof(u64));
|
||||
}
|
||||
|
||||
static inline unsigned int dirent_occupied_size(const struct qstr *name)
|
||||
{
|
||||
return (BKEY_U64s + dirent_val_u64s(name->len)) * sizeof(u64);
|
||||
}
|
||||
|
||||
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
|
||||
struct bkey_s_c_dirent, subvol_inum *);
|
||||
|
||||
|
|
|
@ -704,7 +704,7 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
|
|||
ptr1.unwritten == ptr2.unwritten &&
|
||||
ptr1.offset == ptr2.offset &&
|
||||
ptr1.dev == ptr2.dev &&
|
||||
ptr1.dev == ptr2.dev);
|
||||
ptr1.gen == ptr2.gen);
|
||||
}
|
||||
|
||||
void bch2_ptr_swab(struct bkey_s);
|
||||
|
|
|
@ -152,7 +152,6 @@ int bch2_create_trans(struct btree_trans *trans,
|
|||
if (is_subdir_for_nlink(new_inode))
|
||||
dir_u->bi_nlink++;
|
||||
dir_u->bi_mtime = dir_u->bi_ctime = now;
|
||||
dir_u->bi_size += dirent_occupied_size(name);
|
||||
|
||||
ret = bch2_inode_write(trans, &dir_iter, dir_u);
|
||||
if (ret)
|
||||
|
@ -221,7 +220,6 @@ int bch2_link_trans(struct btree_trans *trans,
|
|||
}
|
||||
|
||||
dir_u->bi_mtime = dir_u->bi_ctime = now;
|
||||
dir_u->bi_size += dirent_occupied_size(name);
|
||||
|
||||
dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
|
||||
|
@ -324,7 +322,6 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
|||
|
||||
dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
|
||||
dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
|
||||
dir_u->bi_size -= dirent_occupied_size(name);
|
||||
|
||||
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash, &dirent_iter,
|
||||
|
@ -463,14 +460,6 @@ int bch2_rename_trans(struct btree_trans *trans,
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (mode == BCH_RENAME) {
|
||||
src_dir_u->bi_size -= dirent_occupied_size(src_name);
|
||||
dst_dir_u->bi_size += dirent_occupied_size(dst_name);
|
||||
}
|
||||
|
||||
if (mode == BCH_RENAME_OVERWRITE)
|
||||
src_dir_u->bi_size -= dirent_occupied_size(src_name);
|
||||
|
||||
if (src_inode_u->bi_parent_subvol)
|
||||
src_inode_u->bi_parent_subvol = dst_dir.subvol;
|
||||
|
||||
|
|
|
@ -466,6 +466,7 @@ int bchfs_truncate(struct mnt_idmap *idmap,
|
|||
ret = bch2_truncate_folio(inode, iattr->ia_size);
|
||||
if (unlikely(ret < 0))
|
||||
goto err;
|
||||
ret = 0;
|
||||
|
||||
truncate_setsize(&inode->v, iattr->ia_size);
|
||||
|
||||
|
|
|
@ -1978,31 +1978,10 @@ fsck_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int check_dir_i_size_notnested(struct btree_trans *trans, struct inode_walker *w)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
int ret = 0;
|
||||
|
||||
darray_for_each(w->inodes, i)
|
||||
if (fsck_err_on(i->inode.bi_size != i->i_size,
|
||||
trans, inode_dir_wrong_nlink,
|
||||
"directory %llu:%u with wrong i_size: got %llu, should be %llu",
|
||||
w->last_pos.inode, i->snapshot, i->inode.bi_size, i->i_size)) {
|
||||
i->inode.bi_size = i->i_size;
|
||||
ret = bch2_fsck_write_inode(trans, &i->inode);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
fsck_err:
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
{
|
||||
u32 restart_count = trans->restart_count;
|
||||
return check_subdir_count_notnested(trans, w) ?:
|
||||
check_dir_i_size_notnested(trans, w) ?:
|
||||
trans_was_restarted(trans, restart_count);
|
||||
}
|
||||
|
||||
|
|
|
@ -1194,7 +1194,9 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
|||
|
||||
closure_sync(&cl);
|
||||
|
||||
if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
|
||||
if (ret &&
|
||||
ret != -BCH_ERR_bucket_alloc_blocked &&
|
||||
ret != -BCH_ERR_open_buckets_empty)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,10 +90,7 @@
|
|||
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
|
||||
BCH_FSCK_ERR_accounting_mismatch, \
|
||||
BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
|
||||
BCH_FSCK_ERR_accounting_key_junk_at_end) \
|
||||
x(directory_size, \
|
||||
BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \
|
||||
BCH_FSCK_ERR_directory_size_mismatch) \
|
||||
BCH_FSCK_ERR_accounting_key_junk_at_end)
|
||||
|
||||
#define DOWNGRADE_TABLE() \
|
||||
x(bucket_stripe_sectors, \
|
||||
|
|
|
@ -850,7 +850,8 @@ void six_lock_exit(struct six_lock *lock)
|
|||
EXPORT_SYMBOL_GPL(six_lock_exit);
|
||||
|
||||
void __six_lock_init(struct six_lock *lock, const char *name,
|
||||
struct lock_class_key *key, enum six_lock_init_flags flags)
|
||||
struct lock_class_key *key, enum six_lock_init_flags flags,
|
||||
gfp_t gfp)
|
||||
{
|
||||
atomic_set(&lock->state, 0);
|
||||
raw_spin_lock_init(&lock->wait_lock);
|
||||
|
@ -873,7 +874,7 @@ void __six_lock_init(struct six_lock *lock, const char *name,
|
|||
* failure if they wish by checking lock->readers, but generally
|
||||
* will not want to treat it as an error.
|
||||
*/
|
||||
lock->readers = alloc_percpu(unsigned);
|
||||
lock->readers = alloc_percpu_gfp(unsigned, gfp);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -164,18 +164,19 @@ enum six_lock_init_flags {
|
|||
};
|
||||
|
||||
void __six_lock_init(struct six_lock *lock, const char *name,
|
||||
struct lock_class_key *key, enum six_lock_init_flags flags);
|
||||
struct lock_class_key *key, enum six_lock_init_flags flags,
|
||||
gfp_t gfp);
|
||||
|
||||
/**
|
||||
* six_lock_init - initialize a six lock
|
||||
* @lock: lock to initialize
|
||||
* @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU
|
||||
*/
|
||||
#define six_lock_init(lock, flags) \
|
||||
#define six_lock_init(lock, flags, gfp) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__six_lock_init((lock), #lock, &__key, flags); \
|
||||
__six_lock_init((lock), #lock, &__key, flags, gfp); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
|
|
|
@ -780,6 +780,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
|
||||
* @inode: inode to process
|
||||
*
|
||||
* This routine is called to request that the delegation be returned as soon
|
||||
* as the file is closed. If the file is already closed, the delegation is
|
||||
* immediately returned.
|
||||
*/
|
||||
void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct nfs_delegation *ret = NULL;
|
||||
|
||||
if (!inode)
|
||||
return;
|
||||
rcu_read_lock();
|
||||
delegation = nfs4_get_valid_delegation(inode);
|
||||
if (!delegation)
|
||||
goto out;
|
||||
spin_lock(&delegation->lock);
|
||||
if (!delegation->inode)
|
||||
goto out_unlock;
|
||||
if (list_empty(&NFS_I(inode)->open_files) &&
|
||||
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
|
||||
/* Refcount matched in nfs_end_delegation_return() */
|
||||
ret = nfs_get_delegation(delegation);
|
||||
} else
|
||||
set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
|
||||
out_unlock:
|
||||
spin_unlock(&delegation->lock);
|
||||
if (ret)
|
||||
nfs_clear_verifier_delegated(inode);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
nfs_end_delegation_return(inode, ret, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
|
||||
* @inode: inode to process
|
||||
|
|
|
@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
|
|||
unsigned long pagemod_limit, u32 deleg_type);
|
||||
int nfs4_inode_return_delegation(struct inode *inode);
|
||||
void nfs4_inode_return_delegation_on_close(struct inode *inode);
|
||||
void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
|
||||
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
|
||||
void nfs_inode_evict_delegation(struct inode *inode);
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include "delegation.h"
|
||||
#include "internal.h"
|
||||
#include "iostat.h"
|
||||
#include "pnfs.h"
|
||||
|
@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
|
|||
dreq->count = req_start;
|
||||
}
|
||||
|
||||
static void nfs_direct_file_adjust_size_locked(struct inode *inode,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
loff_t newsize = offset + (loff_t)count;
|
||||
loff_t oldsize = i_size_read(inode);
|
||||
|
||||
if (newsize > oldsize) {
|
||||
i_size_write(inode, newsize);
|
||||
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
|
||||
trace_nfs_size_grow(inode, newsize);
|
||||
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs_swap_rw - NFS address space operation for swap I/O
|
||||
* @iocb: target I/O control block
|
||||
|
@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
|
|||
nfs_direct_count_bytes(dreq, hdr);
|
||||
spin_unlock(&dreq->lock);
|
||||
|
||||
nfs_update_delegated_atime(dreq->inode);
|
||||
|
||||
while (!list_empty(&hdr->pages)) {
|
||||
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
|
||||
struct page *page = req->wb_page;
|
||||
|
@ -741,6 +758,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
|
|||
struct nfs_direct_req *dreq = hdr->dreq;
|
||||
struct nfs_commit_info cinfo;
|
||||
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
|
||||
struct inode *inode = dreq->inode;
|
||||
int flags = NFS_ODIRECT_DONE;
|
||||
|
||||
trace_nfs_direct_write_completion(dreq);
|
||||
|
@ -762,6 +780,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
|
|||
}
|
||||
spin_unlock(&dreq->lock);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
|
||||
nfs_update_delegated_mtime_locked(dreq->inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
while (!list_empty(&hdr->pages)) {
|
||||
|
||||
req = nfs_list_entry(hdr->pages.next);
|
||||
|
|
|
@ -133,6 +133,7 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
|
|||
if (err)
|
||||
return NULL;
|
||||
|
||||
label->lsmid = shim.id;
|
||||
label->label = shim.context;
|
||||
label->len = shim.len;
|
||||
return label;
|
||||
|
@ -145,7 +146,7 @@ nfs4_label_release_security(struct nfs4_label *label)
|
|||
if (label) {
|
||||
shim.context = label->label;
|
||||
shim.len = label->len;
|
||||
shim.id = LSM_ID_UNDEF;
|
||||
shim.id = label->lsmid;
|
||||
security_release_secctx(&shim);
|
||||
}
|
||||
}
|
||||
|
@ -3906,8 +3907,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
|
|||
|
||||
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
|
||||
{
|
||||
struct dentry *dentry = ctx->dentry;
|
||||
if (ctx->state == NULL)
|
||||
return;
|
||||
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
|
||||
nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
|
||||
if (is_sync)
|
||||
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
|
||||
else
|
||||
|
@ -6269,7 +6273,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
|
|||
size_t buflen)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs4_label label = {0, 0, buflen, buf};
|
||||
struct nfs4_label label = {0, 0, 0, buflen, buf};
|
||||
|
||||
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
|
||||
struct nfs_fattr fattr = {
|
||||
|
@ -6374,7 +6378,7 @@ static int nfs4_do_set_security_label(struct inode *inode,
|
|||
static int
|
||||
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
|
||||
{
|
||||
struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
|
||||
struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
|
||||
struct nfs_fattr *fattr;
|
||||
int status;
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ struct nfs4_acl {
|
|||
struct nfs4_label {
|
||||
uint32_t lfs;
|
||||
uint32_t pi;
|
||||
u32 lsmid;
|
||||
u32 len;
|
||||
char *label;
|
||||
};
|
||||
|
|
|
@ -158,7 +158,6 @@ enum {
|
|||
RPC_TASK_NEED_XMIT,
|
||||
RPC_TASK_NEED_RECV,
|
||||
RPC_TASK_MSG_PIN_WAIT,
|
||||
RPC_TASK_SIGNALLED,
|
||||
};
|
||||
|
||||
#define rpc_test_and_set_running(t) \
|
||||
|
@ -171,7 +170,7 @@ enum {
|
|||
|
||||
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
|
||||
|
||||
#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
|
||||
#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
|
||||
|
||||
/*
|
||||
* Task priorities.
|
||||
|
|
|
@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
|
|||
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
|
||||
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
|
||||
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
|
||||
{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
|
||||
{ (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
|
||||
{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_task_running,
|
||||
|
||||
|
|
|
@ -268,7 +268,9 @@ struct landlock_net_port_attr {
|
|||
* ~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* These flags enable to restrict a sandboxed process to a set of network
|
||||
* actions. This is supported since the Landlock ABI version 4.
|
||||
* actions.
|
||||
*
|
||||
* This is supported since Landlock ABI version 4.
|
||||
*
|
||||
* The following access rights apply to TCP port numbers:
|
||||
*
|
||||
|
@ -291,11 +293,13 @@ struct landlock_net_port_attr {
|
|||
* Setting a flag for a ruleset will isolate the Landlock domain to forbid
|
||||
* connections to resources outside the domain.
|
||||
*
|
||||
* This is supported since Landlock ABI version 6.
|
||||
*
|
||||
* Scopes:
|
||||
*
|
||||
* - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
|
||||
* connecting to an abstract UNIX socket created by a process outside the
|
||||
* related Landlock domain (e.g. a parent domain or a non-sandboxed process).
|
||||
* related Landlock domain (e.g., a parent domain or a non-sandboxed process).
|
||||
* - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
|
||||
* to another process outside the domain.
|
||||
*/
|
||||
|
|
|
@ -3117,7 +3117,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
|
|||
{
|
||||
struct task_struct *prev = rq->curr;
|
||||
struct task_struct *p;
|
||||
bool prev_on_scx = prev->sched_class == &ext_sched_class;
|
||||
bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
|
||||
bool kick_idle = false;
|
||||
|
||||
|
@ -3137,14 +3136,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
|
|||
* if pick_task_scx() is called without preceding balance_scx().
|
||||
*/
|
||||
if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
|
||||
if (prev_on_scx) {
|
||||
if (prev->scx.flags & SCX_TASK_QUEUED) {
|
||||
keep_prev = true;
|
||||
} else {
|
||||
keep_prev = false;
|
||||
kick_idle = true;
|
||||
}
|
||||
} else if (unlikely(keep_prev && !prev_on_scx)) {
|
||||
/* only allowed during transitions */
|
||||
} else if (unlikely(keep_prev &&
|
||||
prev->sched_class != &ext_sched_class)) {
|
||||
/*
|
||||
* Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
|
||||
* conditional on scx_enabled() and may have been skipped.
|
||||
*/
|
||||
WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
|
||||
keep_prev = false;
|
||||
}
|
||||
|
|
|
@ -2254,8 +2254,10 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||
* queues a new work item to a wq after destroy_workqueue(wq).
|
||||
*/
|
||||
if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
|
||||
WARN_ON_ONCE(!is_chained_work(wq))))
|
||||
WARN_ONCE(!is_chained_work(wq), "workqueue: cannot queue %ps on wq %s\n",
|
||||
work->func, wq->name))) {
|
||||
return;
|
||||
}
|
||||
rcu_read_lock();
|
||||
retry:
|
||||
/* pwq which will be used unless @work is executing elsewhere */
|
||||
|
|
|
@ -1674,12 +1674,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
|
||||
{
|
||||
struct proc_dir_entry *p;
|
||||
struct sunrpc_net *sn;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PROC_FS))
|
||||
return 0;
|
||||
|
||||
sn = net_generic(net, sunrpc_net_id);
|
||||
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
|
||||
if (cd->procfs == NULL)
|
||||
|
@ -1707,12 +1709,6 @@ out_nomem:
|
|||
remove_cache_proc_entries(cd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#else /* CONFIG_PROC_FS */
|
||||
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init cache_initialize(void)
|
||||
{
|
||||
|
|
|
@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
|
|||
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
|
||||
return;
|
||||
trace_rpc_task_signalled(task, task->tk_action);
|
||||
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
||||
smp_mb__after_atomic();
|
||||
queue = READ_ONCE(task->tk_waitqueue);
|
||||
if (queue)
|
||||
rpc_wake_up_queued_task(queue, task);
|
||||
|
|
|
@ -2581,7 +2581,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
|
|||
struct sock_xprt *lower_transport =
|
||||
container_of(lower_xprt, struct sock_xprt, xprt);
|
||||
|
||||
lower_transport->xprt_err = status ? -EACCES : 0;
|
||||
switch (status) {
|
||||
case 0:
|
||||
case -EACCES:
|
||||
case -ETIMEDOUT:
|
||||
lower_transport->xprt_err = status;
|
||||
break;
|
||||
default:
|
||||
lower_transport->xprt_err = -EACCES;
|
||||
}
|
||||
complete(&lower_transport->handshake_done);
|
||||
xprt_put(lower_xprt);
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
|
|||
}
|
||||
|
||||
/*
|
||||
* Dump large security xattr values as a continuous ascii hexademical string.
|
||||
* Dump large security xattr values as a continuous ascii hexadecimal string.
|
||||
* (pr_debug is limited to 64 bytes.)
|
||||
*/
|
||||
static void dump_security_xattr_l(const char *prefix, const void *src,
|
||||
|
|
|
@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry)
|
|||
* and compare it against the stored security.evm xattr.
|
||||
*
|
||||
* For performance:
|
||||
* - use the previoulsy retrieved xattr value and length to calculate the
|
||||
* - use the previously retrieved xattr value and length to calculate the
|
||||
* HMAC.)
|
||||
* - cache the verification result in the iint, when available.
|
||||
*
|
||||
|
|
|
@ -149,6 +149,9 @@ struct ima_kexec_hdr {
|
|||
#define IMA_CHECK_BLACKLIST 0x40000000
|
||||
#define IMA_VERITY_REQUIRED 0x80000000
|
||||
|
||||
/* Exclude non-action flags which are not rule-specific. */
|
||||
#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
|
||||
|
||||
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
|
||||
IMA_HASH | IMA_APPRAISE_SUBMASK)
|
||||
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
|
||||
|
|
|
@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
|
|||
mutex_lock(&iint->mutex);
|
||||
|
||||
if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
|
||||
/* reset appraisal flags if ima_inode_post_setattr was called */
|
||||
/*
|
||||
* Reset appraisal flags (action and non-action rule-specific)
|
||||
* if ima_inode_post_setattr was called.
|
||||
*/
|
||||
iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
|
||||
IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
|
||||
IMA_NONACTION_FLAGS);
|
||||
IMA_NONACTION_RULE_FLAGS);
|
||||
|
||||
/*
|
||||
* Re-evaulate the file if either the xattr has changed or the
|
||||
|
@ -1011,9 +1014,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap,
|
|||
}
|
||||
|
||||
/*
|
||||
* Both LSM hooks and auxilary based buffer measurements are
|
||||
* based on policy. To avoid code duplication, differentiate
|
||||
* between the LSM hooks and auxilary buffer measurements,
|
||||
* Both LSM hooks and auxiliary based buffer measurements are
|
||||
* based on policy. To avoid code duplication, differentiate
|
||||
* between the LSM hooks and auxiliary buffer measurements,
|
||||
* retrieving the policy rule information only for the LSM hook
|
||||
* buffer measurements.
|
||||
*/
|
||||
|
|
|
@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock,
|
|||
if (WARN_ON_ONCE(dom->num_layers < 1))
|
||||
return -EACCES;
|
||||
|
||||
/* Checks if it's a (potential) TCP socket. */
|
||||
if (sock->type != SOCK_STREAM)
|
||||
if (!sk_is_tcp(sock->sk))
|
||||
return 0;
|
||||
|
||||
/* Checks for minimal header length to safely read sa_family. */
|
||||
|
|
|
@ -124,7 +124,7 @@ create_rule(const struct landlock_id id,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
RB_CLEAR_NODE(&new_rule->node);
|
||||
if (is_object_pointer(id.type)) {
|
||||
/* This should be catched by insert_rule(). */
|
||||
/* This should have been caught by insert_rule(). */
|
||||
WARN_ON_ONCE(!id.key.object);
|
||||
landlock_get_object(id.key.object);
|
||||
}
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
/*_test
|
||||
/sandbox-and-launch
|
||||
/true
|
||||
/wait-pipe
|
||||
|
|
|
@ -207,6 +207,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
|
|||
struct protocol_variant {
|
||||
int domain;
|
||||
int type;
|
||||
int protocol;
|
||||
};
|
||||
|
||||
struct service_fixture {
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
CONFIG_AF_UNIX_OOB=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IPV6=y
|
||||
CONFIG_KEYS=y
|
||||
CONFIG_MPTCP=y
|
||||
CONFIG_MPTCP_IPV6=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_NET_NS=y
|
||||
CONFIG_OVERLAY_FS=y
|
||||
|
|
|
@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata)
|
|||
clear_ambient_cap(_metadata, CAP_NET_ADMIN);
|
||||
}
|
||||
|
||||
static bool prot_is_tcp(const struct protocol_variant *const prot)
|
||||
{
|
||||
return (prot->domain == AF_INET || prot->domain == AF_INET6) &&
|
||||
prot->type == SOCK_STREAM &&
|
||||
(prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP);
|
||||
}
|
||||
|
||||
static bool is_restricted(const struct protocol_variant *const prot,
|
||||
const enum sandbox_type sandbox)
|
||||
{
|
||||
switch (prot->domain) {
|
||||
case AF_INET:
|
||||
case AF_INET6:
|
||||
switch (prot->type) {
|
||||
case SOCK_STREAM:
|
||||
return sandbox == TCP_SANDBOX;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (sandbox == TCP_SANDBOX)
|
||||
return prot_is_tcp(prot);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv)
|
|||
int ret;
|
||||
|
||||
ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
|
||||
0);
|
||||
srv->protocol.protocol);
|
||||
if (ret < 0)
|
||||
return -errno;
|
||||
return ret;
|
||||
|
@ -290,22 +290,70 @@ FIXTURE_TEARDOWN(protocol)
|
|||
}
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
/* IPPROTO_IP == 0 */
|
||||
.protocol = IPPROTO_IP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_MPTCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
/* IPPROTO_IP == 0 */
|
||||
.protocol = IPPROTO_IP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) {
|
||||
/* clang-format on */
|
||||
.sandbox = NO_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_MPTCP,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -350,22 +398,70 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
|
|||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
/* IPPROTO_IP == 0 */
|
||||
.protocol = IPPROTO_IP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_MPTCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
/* IPPROTO_IP == 0 */
|
||||
.protocol = IPPROTO_IP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_TCP,
|
||||
},
|
||||
};
|
||||
|
||||
/* clang-format off */
|
||||
FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) {
|
||||
/* clang-format on */
|
||||
.sandbox = TCP_SANDBOX,
|
||||
.prot = {
|
||||
.domain = AF_INET6,
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_MPTCP,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue