From patchwork Tue Jun 21 08:22:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888805 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DA72FCCA473 for ; Tue, 21 Jun 2022 08:23:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229872AbiFUIX3 (ORCPT ); Tue, 21 Jun 2022 04:23:29 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41492 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229468AbiFUIXX (ORCPT ); Tue, 21 Jun 2022 04:23:23 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E204CA444; Tue, 21 Jun 2022 01:23:21 -0700 (PDT) Received: from fraeml715-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzvP0xZsz6H7Dt; Tue, 21 Jun 2022 16:19:29 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml715-chm.china.huawei.com (10.206.15.34) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:19 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:19 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 01/17] landlock: renames access mask Date: Tue, 21 Jun 2022 16:22:57 +0800 Message-ID: <20220621082313.3330667-2-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org To support network type rules, this modification extends and renames ruleset's access masks. This patch adds filesystem helper functions to set and get filesystem mask. Also the modification adds a helper structure landlock_access_mask to support managing multiple access mask. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Changes access_mask_t to u32. * Formats code with clang-format-14. Changes since v4: * Deletes struct landlock_access_mask. Changes since v3: * Splits commit. * Adds get_mask, set_mask helpers for filesystem. * Adds new struct landlock_access_mask. --- security/landlock/fs.c | 7 ++++--- security/landlock/ruleset.c | 18 +++++++++--------- security/landlock/ruleset.h | 25 ++++++++++++++++++++----- security/landlock/syscalls.c | 7 ++++--- 4 files changed, 37 insertions(+), 20 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index ec5a6247cd3e..e6da08ed99d1 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -167,7 +167,8 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, return -EINVAL; /* Transforms relative access rights to absolute ones. */ - access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0]; + access_rights |= LANDLOCK_MASK_ACCESS_FS & + ~landlock_get_fs_access_mask(ruleset, 0); object = get_inode_object(d_backing_inode(path->dentry)); if (IS_ERR(object)) return PTR_ERR(object); @@ -286,7 +287,7 @@ get_handled_accesses(const struct landlock_ruleset *const domain) for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { - if (domain->fs_access_masks[layer_level] & + if (landlock_get_fs_access_mask(domain, layer_level) & BIT_ULL(access_bit)) { access_dom |= BIT_ULL(access_bit); break; @@ -316,7 +317,7 @@ init_layer_masks(const struct landlock_ruleset *const domain, for_each_set_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks)) { - if (domain->fs_access_masks[layer_level] & + if (landlock_get_fs_access_mask(domain, layer_level) & BIT_ULL(access_bit)) { (*layer_masks)[access_bit] |= BIT_ULL(layer_level); diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 996484f98bfd..a3fd58d01f09 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -29,7 +29,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) struct landlock_ruleset *new_ruleset; new_ruleset = - kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers), + kzalloc(struct_size(new_ruleset, access_masks, num_layers), GFP_KERNEL_ACCOUNT); if (!new_ruleset) return ERR_PTR(-ENOMEM); @@ -40,22 +40,22 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) /* * hierarchy = NULL * num_rules = 0 - * fs_access_masks[] = 0 + * access_masks[] = 0 */ return new_ruleset; } struct landlock_ruleset * -landlock_create_ruleset(const access_mask_t fs_access_mask) +landlock_create_ruleset(const access_mask_t access_mask) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ - if (!fs_access_mask) + if (!access_mask) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); if (!IS_ERR(new_ruleset)) - new_ruleset->fs_access_masks[0] = fs_access_mask; + landlock_set_fs_access_mask(new_ruleset, access_mask, 0); return new_ruleset; } @@ -117,7 +117,7 @@ static void build_check_ruleset(void) .num_rules = ~0, .num_layers = ~0, }; - typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0; + typeof(ruleset.access_masks[0]) fs_access_mask = ~0; BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); @@ -281,7 +281,7 @@ static int merge_ruleset(struct landlock_ruleset *const dst, err = -EINVAL; goto out_unlock; } - dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0]; + dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; /* Merges the @src tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root, @@ -340,8 +340,8 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, goto out_unlock; } /* Copies the parent layer stack and leaves a space for the new layer. */ - memcpy(child->fs_access_masks, parent->fs_access_masks, - flex_array_size(parent, fs_access_masks, parent->num_layers)); + memcpy(child->access_masks, parent->access_masks, + flex_array_size(parent, access_masks, parent->num_layers)); if (WARN_ON_ONCE(!parent->hierarchy)) { err = -EINVAL; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index d43231b783e4..bd7ab39859bf 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -19,7 +19,7 @@ #include "limits.h" #include "object.h" -typedef u16 access_mask_t; +typedef u32 access_mask_t; /* Makes sure all filesystem access rights can be stored. */ static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS); /* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */ @@ -110,7 +110,7 @@ struct landlock_ruleset { * section. This is only used by * landlock_put_ruleset_deferred() when @usage reaches zero. * The fields @lock, @usage, @num_rules, @num_layers and - * @fs_access_masks are then unused. + * @access_masks are then unused. */ struct work_struct work_free; struct { @@ -137,7 +137,7 @@ struct landlock_ruleset { */ u32 num_layers; /** - * @fs_access_masks: Contains the subset of filesystem + * @access_masks: Contains the subset of filesystem * actions that are restricted by a ruleset. A domain * saves all layers of merged rulesets in a stack * (FAM), starting from the first layer to the last @@ -148,13 +148,13 @@ struct landlock_ruleset { * layers are set once and never changed for the * lifetime of the ruleset. */ - access_mask_t fs_access_masks[]; + access_mask_t access_masks[]; }; }; }; struct landlock_ruleset * -landlock_create_ruleset(const access_mask_t fs_access_mask); +landlock_create_ruleset(const access_mask_t access_mask); void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); @@ -177,4 +177,19 @@ static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) refcount_inc(&ruleset->usage); } +/* A helper function to set a filesystem mask. */ +static inline void +landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, + const access_mask_t access_maskset, u16 mask_level) +{ + ruleset->access_masks[mask_level] = access_maskset; +} + +/* A helper function to get a filesystem mask. */ +static inline u32 +landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, + u16 mask_level) +{ + return ruleset->access_masks[mask_level]; +} #endif /* _SECURITY_LANDLOCK_RULESET_H */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 735a0865ea11..5836736ce9d7 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -346,10 +346,11 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, } /* * Checks that allowed_access matches the @ruleset constraints - * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits). + * (ruleset->access_masks[0] is automatically upgraded to 64-bits). */ - if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) != - ruleset->fs_access_masks[0]) { + if ((path_beneath_attr.allowed_access | + landlock_get_fs_access_mask(ruleset, 0)) != + landlock_get_fs_access_mask(ruleset, 0)) { err = -EINVAL; goto out_put_ruleset; } From patchwork Tue Jun 21 08:22:58 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888806 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id ECF60CCA482 for ; Tue, 21 Jun 2022 08:23:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230007AbiFUIXa (ORCPT ); Tue, 21 Jun 2022 04:23:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41500 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229695AbiFUIXY (ORCPT ); Tue, 21 Jun 2022 04:23:24 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 57E76BE0E; Tue, 21 Jun 2022 01:23:23 -0700 (PDT) Received: from fraeml710-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzzR0Pf9z6GCp4; Tue, 21 Jun 2022 16:22:59 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml710-chm.china.huawei.com (10.206.15.59) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:21 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:20 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 02/17] landlock: refactors landlock_find/insert_rule Date: Tue, 21 Jun 2022 16:22:58 +0800 Message-ID: <20220621082313.3330667-3-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Adds a new object union to support a socket port rule type. Refactors landlock_insert_rule() and landlock_find_rule() to support coming network modifications. Now adding or searching a rule in a ruleset depends on a rule_type argument provided in refactored functions mentioned above. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * Refactors insert_rule() and create_rule() functions by deleting rule_type from their arguments list, it helps to reduce useless code. Changes since v3: * Splits commit. * Refactors landlock_insert_rule and landlock_find_rule functions. * Rename new_ruleset->root_inode. --- security/landlock/fs.c | 7 ++- security/landlock/ruleset.c | 105 ++++++++++++++++++++++++++---------- security/landlock/ruleset.h | 27 +++++----- 3 files changed, 96 insertions(+), 43 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index e6da08ed99d1..46aedc2a05a8 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -173,7 +173,8 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, if (IS_ERR(object)) return PTR_ERR(object); mutex_lock(&ruleset->lock); - err = landlock_insert_rule(ruleset, object, access_rights); + err = landlock_insert_rule(ruleset, object, 0, access_rights, + LANDLOCK_RULE_PATH_BENEATH); mutex_unlock(&ruleset->lock); /* * No need to check for an error because landlock_insert_rule() @@ -204,7 +205,9 @@ find_rule(const struct landlock_ruleset *const domain, inode = d_backing_inode(dentry); rcu_read_lock(); rule = landlock_find_rule( - domain, rcu_dereference(landlock_inode(inode)->object)); + domain, + (uintptr_t)rcu_dereference(landlock_inode(inode)->object), + LANDLOCK_RULE_PATH_BENEATH); rcu_read_unlock(); return rule; } diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index a3fd58d01f09..5f13f8a12aee 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -35,7 +35,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) return ERR_PTR(-ENOMEM); refcount_set(&new_ruleset->usage, 1); mutex_init(&new_ruleset->lock); - new_ruleset->root = RB_ROOT; + new_ruleset->root_inode = RB_ROOT; new_ruleset->num_layers = num_layers; /* * hierarchy = NULL @@ -69,7 +69,8 @@ static void build_check_rule(void) } static struct landlock_rule * -create_rule(struct landlock_object *const object, +create_rule(struct landlock_object *const object_ptr, + const uintptr_t object_data, const struct landlock_layer (*const layers)[], const u32 num_layers, const struct landlock_layer *const new_layer) { @@ -90,8 +91,15 @@ create_rule(struct landlock_object *const object, if (!new_rule) return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); - landlock_get_object(object); - new_rule->object = object; + + if (object_ptr) { + landlock_get_object(object_ptr); + new_rule->object.ptr = object_ptr; + } else if (object_ptr && object_data) { + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); + } + new_rule->num_layers = new_num_layers; /* Copies the original layer stack. */ memcpy(new_rule->layers, layers, @@ -107,7 +115,7 @@ static void free_rule(struct landlock_rule *const rule) might_sleep(); if (!rule) return; - landlock_put_object(rule->object); + landlock_put_object(rule->object.ptr); kfree(rule); } @@ -143,26 +151,42 @@ static void build_check_ruleset(void) * access rights. */ static int insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, + struct landlock_object *const object_ptr, + uintptr_t object_data, u16 rule_type, const struct landlock_layer (*const layers)[], size_t num_layers) { struct rb_node **walker_node; struct rb_node *parent_node = NULL; struct landlock_rule *new_rule; + struct rb_root *root; might_sleep(); lockdep_assert_held(&ruleset->lock); - if (WARN_ON_ONCE(!object || !layers)) + if (WARN_ON_ONCE(!layers)) return -ENOENT; - walker_node = &(ruleset->root.rb_node); + if (WARN_ON_ONCE(object_ptr && object_data)) + return -EINVAL; + /* Chooses rb_tree structure depending on a rule type. */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + if (WARN_ON_ONCE(!object_ptr)) + return -ENOENT; + object_data = (uintptr_t)object_ptr; + root = &ruleset->root_inode; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + walker_node = &root->rb_node; while (*walker_node) { struct landlock_rule *const this = rb_entry(*walker_node, struct landlock_rule, node); - if (this->object != object) { + if (this->object.data != object_data) { parent_node = *walker_node; - if (this->object < object) + if (this->object.data < object_data) walker_node = &((*walker_node)->rb_right); else walker_node = &((*walker_node)->rb_left); @@ -194,11 +218,16 @@ static int insert_rule(struct landlock_ruleset *const ruleset, * Intersects access rights when it is a merge between a * ruleset and a domain. */ - new_rule = create_rule(object, &this->layers, this->num_layers, - &(*layers)[0]); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + new_rule = create_rule(object_ptr, 0, &this->layers, + this->num_layers, &(*layers)[0]); + break; + } if (IS_ERR(new_rule)) return PTR_ERR(new_rule); - rb_replace_node(&this->node, &new_rule->node, &ruleset->root); + rb_replace_node(&this->node, &new_rule->node, + &ruleset->root_inode); free_rule(this); return 0; } @@ -207,11 +236,15 @@ static int insert_rule(struct landlock_ruleset *const ruleset, build_check_ruleset(); if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) return -E2BIG; - new_rule = create_rule(object, layers, num_layers, NULL); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + new_rule = create_rule(object_ptr, 0, layers, num_layers, NULL); + break; + } if (IS_ERR(new_rule)) return PTR_ERR(new_rule); rb_link_node(&new_rule->node, parent_node, walker_node); - rb_insert_color(&new_rule->node, &ruleset->root); + rb_insert_color(&new_rule->node, &ruleset->root_inode); ruleset->num_rules++; return 0; } @@ -229,8 +262,9 @@ static void build_check_layer(void) /* @ruleset must be locked by the caller. */ int landlock_insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, - const access_mask_t access) + struct landlock_object *const object_ptr, + const uintptr_t object_data, + const access_mask_t access, const u16 rule_type) { struct landlock_layer layers[] = { { .access = access, @@ -239,7 +273,8 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset, } }; build_check_layer(); - return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers)); + return insert_rule(ruleset, object_ptr, object_data, rule_type, &layers, + ARRAY_SIZE(layers)); } static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy) @@ -284,8 +319,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst, dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; /* Merges the @src tree. */ - rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root, - node) { + rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, + &src->root_inode, node) { struct landlock_layer layers[] = { { .level = dst->num_layers, } }; @@ -299,7 +334,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst, goto out_unlock; } layers[0].access = walker_rule->layers[0].access; - err = insert_rule(dst, walker_rule->object, &layers, + err = insert_rule(dst, walker_rule->object.ptr, 0, + LANDLOCK_RULE_PATH_BENEATH, &layers, ARRAY_SIZE(layers)); if (err) goto out_unlock; @@ -327,8 +363,9 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, /* Copies the @parent tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &parent->root, node) { - err = insert_rule(child, walker_rule->object, + &parent->root_inode, node) { + err = insert_rule(child, walker_rule->object.ptr, 0, + LANDLOCK_RULE_PATH_BENEATH, &walker_rule->layers, walker_rule->num_layers); if (err) @@ -361,7 +398,8 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) struct landlock_rule *freeme, *next; might_sleep(); - rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node) + rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, + node) free_rule(freeme); put_hierarchy(ruleset->hierarchy); kfree(ruleset); @@ -453,20 +491,29 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent, */ const struct landlock_rule * landlock_find_rule(const struct landlock_ruleset *const ruleset, - const struct landlock_object *const object) + const uintptr_t object_data, const u16 rule_type) { const struct rb_node *node; - if (!object) + if (!object_data) return NULL; - node = ruleset->root.rb_node; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + node = ruleset->root_inode.rb_node; + break; + default: + WARN_ON_ONCE(1); + return NULL; + } + while (node) { struct landlock_rule *this = rb_entry(node, struct landlock_rule, node); - if (this->object == object) + if (this->object.data == object_data) return this; - if (this->object < object) + if (this->object.data < object_data) node = node->rb_right; else node = node->rb_left; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index bd7ab39859bf..a22d132c32a7 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -53,15 +53,17 @@ struct landlock_rule { */ struct rb_node node; /** - * @object: Pointer to identify a kernel object (e.g. an inode). This - * is used as a key for this ruleset element. This pointer is set once - * and never modified. It always points to an allocated object because - * each rule increments the refcount of its object. - */ - struct landlock_object *object; - /** - * @num_layers: Number of entries in @layers. + * @object: A union to identify either a kernel object (e.g. an inode) or + * a raw data value (e.g. a network socket port). This is used as a key + * for this ruleset element. This pointer/@object.ptr/ is set once and + * never modified. It always points to an allocated object because each + * rule increments the refcount of its object (for inodes).; */ + union { + struct landlock_object *ptr; + uintptr_t data; + } object; + u32 num_layers; /** * @layers: Stack of layers, from the latest to the newest, implemented @@ -98,7 +100,7 @@ struct landlock_ruleset { * nodes. Once a ruleset is tied to a process (i.e. as a domain), this * tree is immutable until @usage reaches zero. */ - struct rb_root root; + struct rb_root root_inode; /** * @hierarchy: Enables hierarchy identification even when a parent * domain vanishes. This is needed for the ptrace protection. @@ -160,8 +162,9 @@ void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); int landlock_insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, - const access_mask_t access); + struct landlock_object *const object_ptr, + const uintptr_t object_data, + const access_mask_t access, const u16 rule_type); struct landlock_ruleset * landlock_merge_ruleset(struct landlock_ruleset *const parent, @@ -169,7 +172,7 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent, const struct landlock_rule * landlock_find_rule(const struct landlock_ruleset *const ruleset, - const struct landlock_object *const object); + const uintptr_t object_data, const u16 rule_type); static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) { From patchwork Tue Jun 21 08:22:59 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888807 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 31EFDCCA473 for ; Tue, 21 Jun 2022 08:23:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230347AbiFUIXc (ORCPT ); Tue, 21 Jun 2022 04:23:32 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41554 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229732AbiFUIX2 (ORCPT ); Tue, 21 Jun 2022 04:23:28 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E6F8BCE1A; Tue, 21 Jun 2022 01:23:24 -0700 (PDT) Received: from fraeml712-chm.china.huawei.com (unknown [172.18.147.206]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzvS0n8Qz683Pb; Tue, 21 Jun 2022 16:19:32 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml712-chm.china.huawei.com (10.206.15.61) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:22 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:21 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 03/17] landlock: refactors merge and inherit functions Date: Tue, 21 Jun 2022 16:22:59 +0800 Message-ID: <20220621082313.3330667-4-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Refactors merge_ruleset() and inherit_ruleset() functions to support new rule types. This patch adds tree_merge() and tree_copy() helpers. Each has rule_type argument to choose a particular rb_tree structure in a ruleset. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Refactors some logic errors. * Formats code with clang-format-14. Changes since v4: * None --- security/landlock/ruleset.c | 153 +++++++++++++++++++++++++----------- 1 file changed, 105 insertions(+), 48 deletions(-) -- 2.25.1 diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 5f13f8a12aee..820b6e6a4496 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -110,12 +110,16 @@ create_rule(struct landlock_object *const object_ptr, return new_rule; } -static void free_rule(struct landlock_rule *const rule) +static void free_rule(struct landlock_rule *const rule, const u16 rule_type) { might_sleep(); if (!rule) return; - landlock_put_object(rule->object.ptr); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + landlock_put_object(rule->object.ptr); + break; + } kfree(rule); } @@ -222,13 +226,13 @@ static int insert_rule(struct landlock_ruleset *const ruleset, case LANDLOCK_RULE_PATH_BENEATH: new_rule = create_rule(object_ptr, 0, &this->layers, this->num_layers, &(*layers)[0]); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_replace_node(&this->node, &new_rule->node, + &ruleset->root_inode); + free_rule(this, rule_type); break; } - if (IS_ERR(new_rule)) - return PTR_ERR(new_rule); - rb_replace_node(&this->node, &new_rule->node, - &ruleset->root_inode); - free_rule(this); return 0; } @@ -239,13 +243,13 @@ static int insert_rule(struct landlock_ruleset *const ruleset, switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: new_rule = create_rule(object_ptr, 0, layers, num_layers, NULL); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_link_node(&new_rule->node, parent_node, walker_node); + rb_insert_color(&new_rule->node, &ruleset->root_inode); + ruleset->num_rules++; break; } - if (IS_ERR(new_rule)) - return PTR_ERR(new_rule); - rb_link_node(&new_rule->node, parent_node, walker_node); - rb_insert_color(&new_rule->node, &ruleset->root_inode); - ruleset->num_rules++; return 0; } @@ -293,10 +297,54 @@ static void put_hierarchy(struct landlock_hierarchy *hierarchy) } } +static int tree_merge(struct landlock_ruleset *const src, + struct landlock_ruleset *const dst, u16 rule_type) +{ + struct landlock_rule *walker_rule, *next_rule; + struct rb_root *src_root; + int err = 0; + + /* Chooses rb_tree structure depending on a rule type. */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + src_root = &src->root_inode; + break; + default: + return -EINVAL; + } + /* Merges the @src tree. */ + rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root, + node) { + struct landlock_layer layers[] = { { + .level = dst->num_layers, + } }; + + if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { + err = -EINVAL; + return err; + } + if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { + err = -EINVAL; + return err; + } + layers[0].access = walker_rule->layers[0].access; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = insert_rule(dst, walker_rule->object.ptr, 0, + rule_type, &layers, + ARRAY_SIZE(layers)); + break; + } + if (err) + return err; + } + return err; +} + static int merge_ruleset(struct landlock_ruleset *const dst, struct landlock_ruleset *const src) { - struct landlock_rule *walker_rule, *next_rule; int err = 0; might_sleep(); @@ -318,28 +366,10 @@ static int merge_ruleset(struct landlock_ruleset *const dst, } dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; - /* Merges the @src tree. */ - rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &src->root_inode, node) { - struct landlock_layer layers[] = { { - .level = dst->num_layers, - } }; - - if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { - err = -EINVAL; - goto out_unlock; - } - if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { - err = -EINVAL; - goto out_unlock; - } - layers[0].access = walker_rule->layers[0].access; - err = insert_rule(dst, walker_rule->object.ptr, 0, - LANDLOCK_RULE_PATH_BENEATH, &layers, - ARRAY_SIZE(layers)); - if (err) - goto out_unlock; - } + /* Merges the @src inode tree. */ + err = tree_merge(src, dst, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; out_unlock: mutex_unlock(&src->lock); @@ -347,10 +377,40 @@ static int merge_ruleset(struct landlock_ruleset *const dst, return err; } +static int tree_copy(struct landlock_ruleset *const parent, + struct landlock_ruleset *const child, u16 rule_type) +{ + struct landlock_rule *walker_rule, *next_rule; + struct rb_root *parent_root; + int err = 0; + + /* Chooses rb_tree structure depending on a rule type. */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + parent_root = &parent->root_inode; + break; + default: + return -EINVAL; + } + /* Copies the @parent inode tree. */ + rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, + parent_root, node) { + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = insert_rule(child, walker_rule->object.ptr, 0, + rule_type, &walker_rule->layers, + walker_rule->num_layers); + break; + } + if (err) + return err; + } + return err; +} + static int inherit_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const child) { - struct landlock_rule *walker_rule, *next_rule; int err = 0; might_sleep(); @@ -361,22 +421,19 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, mutex_lock(&child->lock); mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); - /* Copies the @parent tree. */ - rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &parent->root_inode, node) { - err = insert_rule(child, walker_rule->object.ptr, 0, - LANDLOCK_RULE_PATH_BENEATH, - &walker_rule->layers, - walker_rule->num_layers); - if (err) - goto out_unlock; - } + /* Copies the @parent inode tree. */ + err = tree_copy(parent, child, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { err = -EINVAL; goto out_unlock; } - /* Copies the parent layer stack and leaves a space for the new layer. */ + /* + * Copies the parent layer stack and leaves a space + * for the new layer. + */ memcpy(child->access_masks, parent->access_masks, flex_array_size(parent, access_masks, parent->num_layers)); @@ -400,7 +457,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) might_sleep(); rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) - free_rule(freeme); + free_rule(freeme, LANDLOCK_RULE_PATH_BENEATH); put_hierarchy(ruleset->hierarchy); kfree(ruleset); } From patchwork Tue Jun 21 08:23:00 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888808 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DCABECCA473 for ; Tue, 21 Jun 2022 08:23:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229732AbiFUIXf (ORCPT ); Tue, 21 Jun 2022 04:23:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41574 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229782AbiFUIX2 (ORCPT ); Tue, 21 Jun 2022 04:23:28 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3AC85D135; Tue, 21 Jun 2022 01:23:26 -0700 (PDT) Received: from fraeml711-chm.china.huawei.com (unknown [172.18.147.201]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxj5HCGz67XGx; Tue, 21 Jun 2022 16:21:29 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml711-chm.china.huawei.com (10.206.15.60) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:24 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:23 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 04/17] landlock: moves helper functions Date: Tue, 21 Jun 2022 16:23:00 +0800 Message-ID: <20220621082313.3330667-5-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch moves unmask_layers(), init_layer_masks() and get_handled_accesses() helpers to ruleset.c to share with landlock network implementation in following commits. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Splits commit. * Moves init_layer_masks() and get_handled_accesses() helpers to ruleset.c and makes then non-static. * Formats code with clang-format-14. --- security/landlock/fs.c | 107 ------------------------------------ security/landlock/ruleset.c | 105 +++++++++++++++++++++++++++++++++++ security/landlock/ruleset.h | 12 ++++ 3 files changed, 117 insertions(+), 107 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 46aedc2a05a8..42fb02141b9c 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -212,60 +212,6 @@ find_rule(const struct landlock_ruleset *const domain, return rule; } -/* - * @layer_masks is read and may be updated according to the access request and - * the matching rule. - * - * Returns true if the request is allowed (i.e. relevant layer masks for the - * request are empty). - */ -static inline bool -unmask_layers(const struct landlock_rule *const rule, - const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) -{ - size_t layer_level; - - if (!access_request || !layer_masks) - return true; - if (!rule) - return false; - - /* - * An access is granted if, for each policy layer, at least one rule - * encountered on the pathwalk grants the requested access, - * regardless of its position in the layer stack. We must then check - * the remaining layers for each inode, from the first added layer to - * the last one. When there is multiple requested accesses, for each - * policy layer, the full set of requested accesses may not be granted - * by only one rule, but by the union (binary OR) of multiple rules. - * E.g. /a/b + /a => /a/b - */ - for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { - const struct landlock_layer *const layer = - &rule->layers[layer_level]; - const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); - const unsigned long access_req = access_request; - unsigned long access_bit; - bool is_empty; - - /* - * Records in @layer_masks which layer grants access to each - * requested access. - */ - is_empty = true; - for_each_set_bit(access_bit, &access_req, - ARRAY_SIZE(*layer_masks)) { - if (layer->access & BIT_ULL(access_bit)) - (*layer_masks)[access_bit] &= ~layer_bit; - is_empty = is_empty && !(*layer_masks)[access_bit]; - } - if (is_empty) - return true; - } - return false; -} - /* * Allows access to pseudo filesystems that will never be mountable (e.g. * sockfs, pipefs), but can still be reachable through @@ -278,59 +224,6 @@ static inline bool is_nouser_or_private(const struct dentry *dentry) unlikely(IS_PRIVATE(d_backing_inode(dentry)))); } -static inline access_mask_t -get_handled_accesses(const struct landlock_ruleset *const domain) -{ - access_mask_t access_dom = 0; - unsigned long access_bit; - - for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS; - access_bit++) { - size_t layer_level; - - for (layer_level = 0; layer_level < domain->num_layers; - layer_level++) { - if (landlock_get_fs_access_mask(domain, layer_level) & - BIT_ULL(access_bit)) { - access_dom |= BIT_ULL(access_bit); - break; - } - } - } - return access_dom; -} - -static inline access_mask_t -init_layer_masks(const struct landlock_ruleset *const domain, - const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) -{ - access_mask_t handled_accesses = 0; - size_t layer_level; - - memset(layer_masks, 0, sizeof(*layer_masks)); - /* An empty access request can happen because of O_WRONLY | O_RDWR. */ - if (!access_request) - return 0; - - /* Saves all handled accesses per layer. */ - for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { - const unsigned long access_req = access_request; - unsigned long access_bit; - - for_each_set_bit(access_bit, &access_req, - ARRAY_SIZE(*layer_masks)) { - if (landlock_get_fs_access_mask(domain, layer_level) & - BIT_ULL(access_bit)) { - (*layer_masks)[access_bit] |= - BIT_ULL(layer_level); - handled_accesses |= BIT_ULL(access_bit); - } - } - } - return handled_accesses; -} - /* * Check that a destination file hierarchy has more restrictions than a source * file hierarchy. This is only used for link and rename actions. diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 820b6e6a4496..32ec79d6559a 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -577,3 +577,108 @@ landlock_find_rule(const struct landlock_ruleset *const ruleset, } return NULL; } + +access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain) +{ + access_mask_t access_dom = 0; + unsigned long access_bit; + + for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS; + access_bit++) { + size_t layer_level; + + for (layer_level = 0; layer_level < domain->num_layers; + layer_level++) { + if (landlock_get_fs_access_mask(domain, layer_level) & + BIT_ULL(access_bit)) { + access_dom |= BIT_ULL(access_bit); + break; + } + } + } + return access_dom; +} + +/* + * @layer_masks is read and may be updated according to the access request and + * the matching rule. + * + * Returns true if the request is allowed (i.e. relevant layer masks for the + * request are empty). + */ +bool unmask_layers(const struct landlock_rule *const rule, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +{ + size_t layer_level; + + if (!access_request || !layer_masks) + return true; + if (!rule) + return false; + + /* + * An access is granted if, for each policy layer, at least one rule + * encountered on the pathwalk grants the requested access, + * regardless of its position in the layer stack. We must then check + * the remaining layers for each inode, from the first added layer to + * the last one. When there is multiple requested accesses, for each + * policy layer, the full set of requested accesses may not be granted + * by only one rule, but by the union (binary OR) of multiple rules. + * E.g. /a/b + /a => /a/b + */ + for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { + const struct landlock_layer *const layer = + &rule->layers[layer_level]; + const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); + const unsigned long access_req = access_request; + unsigned long access_bit; + bool is_empty; + + /* + * Records in @layer_masks which layer grants access to each + * requested access. + */ + is_empty = true; + for_each_set_bit(access_bit, &access_req, + ARRAY_SIZE(*layer_masks)) { + if (layer->access & BIT_ULL(access_bit)) + (*layer_masks)[access_bit] &= ~layer_bit; + is_empty = is_empty && !(*layer_masks)[access_bit]; + } + if (is_empty) + return true; + } + return false; +} + +access_mask_t +init_layer_masks(const struct landlock_ruleset *const domain, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +{ + access_mask_t handled_accesses = 0; + size_t layer_level; + + memset(layer_masks, 0, sizeof(*layer_masks)); + /* An empty access request can happen because of O_WRONLY | O_RDWR. */ + if (!access_request) + return 0; + + /* Saves all handled accesses per layer. */ + for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { + const unsigned long access_req = access_request; + unsigned long access_bit; + + for_each_set_bit(access_bit, &access_req, + ARRAY_SIZE(*layer_masks)) { + if (landlock_get_fs_access_mask(domain, layer_level) & + BIT_ULL(access_bit)) { + (*layer_masks)[access_bit] |= + BIT_ULL(layer_level); + handled_accesses |= BIT_ULL(access_bit); + } + } + } + return handled_accesses; +} diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index a22d132c32a7..ea09ab2f27c4 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -195,4 +195,16 @@ landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, { return ruleset->access_masks[mask_level]; } + +access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain); + +bool unmask_layers(const struct landlock_rule *const rule, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]); + +access_mask_t +init_layer_masks(const struct landlock_ruleset *const domain, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]); + #endif /* _SECURITY_LANDLOCK_RULESET_H */ From patchwork Tue Jun 21 08:23:01 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888809 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C00C0C433EF for ; Tue, 21 Jun 2022 08:23:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243716AbiFUIXj (ORCPT ); Tue, 21 Jun 2022 04:23:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41618 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229955AbiFUIXa (ORCPT ); Tue, 21 Jun 2022 04:23:30 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 02847DE98; Tue, 21 Jun 2022 01:23:28 -0700 (PDT) Received: from fraeml709-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzvW0pVSz6H80V; Tue, 21 Jun 2022 16:19:35 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml709-chm.china.huawei.com (10.206.15.37) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:25 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:24 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 05/17] landlock: refactors helper functions Date: Tue, 21 Jun 2022 16:23:01 +0800 Message-ID: <20220621082313.3330667-6-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Adds new rule_type argument to unmask_layers(), init_layer_masks() and get_handled_accesses() helper functions. This modification supports implementing new rule types in the next landlock versions. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Splits commit. * Formats code with clang-format-14. Changes since v4: * Refactors init_layer_masks(), get_handled_accesses() and unmask_layers() functions to support multiple rule types. * Refactors landlock_get_fs_access_mask() function with LANDLOCK_MASK_ACCESS_FS mask. Changes since v3: * Splits commit. * Refactors landlock_unmask_layers functions. --- security/landlock/fs.c | 45 ++++++++++++++++--------- security/landlock/ruleset.c | 67 +++++++++++++++++++++++-------------- security/landlock/ruleset.h | 16 +++++---- 3 files changed, 80 insertions(+), 48 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 42fb02141b9c..10f6c67f5c3b 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -400,7 +400,8 @@ static int check_access_path_dual( * a superset of the meaningful requested accesses). */ access_masked_parent1 = access_masked_parent2 = - get_handled_accesses(domain); + get_handled_accesses(domain, LANDLOCK_RULE_PATH_BENEATH, + LANDLOCK_NUM_ACCESS_FS); is_dom_check = true; } else { if (WARN_ON_ONCE(dentry_child1 || dentry_child2)) @@ -414,16 +415,22 @@ static int check_access_path_dual( if (unlikely(dentry_child1)) { unmask_layers(find_rule(domain, dentry_child1), init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, - &_layer_masks_child1), - &_layer_masks_child1); + &_layer_masks_child1, + sizeof(_layer_masks_child1), + LANDLOCK_RULE_PATH_BENEATH), + &_layer_masks_child1, + ARRAY_SIZE(_layer_masks_child1)); layer_masks_child1 = &_layer_masks_child1; child1_is_directory = d_is_dir(dentry_child1); } if (unlikely(dentry_child2)) { unmask_layers(find_rule(domain, dentry_child2), init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, - &_layer_masks_child2), - &_layer_masks_child2); + &_layer_masks_child2, + sizeof(_layer_masks_child2), + LANDLOCK_RULE_PATH_BENEATH), + &_layer_masks_child2, + ARRAY_SIZE(_layer_masks_child2)); layer_masks_child2 = &_layer_masks_child2; child2_is_directory = d_is_dir(dentry_child2); } @@ -475,15 +482,16 @@ static int check_access_path_dual( } rule = find_rule(domain, walker_path.dentry); - allowed_parent1 = unmask_layers(rule, access_masked_parent1, - layer_masks_parent1); - allowed_parent2 = unmask_layers(rule, access_masked_parent2, - layer_masks_parent2); + allowed_parent1 = unmask_layers( + rule, access_masked_parent1, layer_masks_parent1, + ARRAY_SIZE(*layer_masks_parent1)); + allowed_parent2 = unmask_layers( + rule, access_masked_parent2, layer_masks_parent2, + ARRAY_SIZE(*layer_masks_parent2)); /* Stops when a rule from each layer grants access. */ if (allowed_parent1 && allowed_parent2) break; - jump_up: if (walker_path.dentry == walker_path.mnt->mnt_root) { if (follow_up(&walker_path)) { @@ -539,7 +547,9 @@ static inline int check_access_path(const struct landlock_ruleset *const domain, { layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; - access_request = init_layer_masks(domain, access_request, &layer_masks); + access_request = init_layer_masks(domain, access_request, &layer_masks, + sizeof(layer_masks), + LANDLOCK_RULE_PATH_BENEATH); return check_access_path_dual(domain, path, access_request, &layer_masks, NULL, 0, NULL, NULL); } @@ -623,7 +633,8 @@ static bool collect_domain_accesses( return true; access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS, - layer_masks_dom); + layer_masks_dom, sizeof(*layer_masks_dom), + LANDLOCK_RULE_PATH_BENEATH); dget(dir); while (true) { @@ -631,7 +642,8 @@ static bool collect_domain_accesses( /* Gets all layers allowing all domain accesses. */ if (unmask_layers(find_rule(domain, dir), access_dom, - layer_masks_dom)) { + layer_masks_dom, + ARRAY_SIZE(*layer_masks_dom))) { /* * Stops when all handled accesses are allowed by at * least one rule in each layer. @@ -747,7 +759,8 @@ static int current_check_refer_path(struct dentry *const old_dentry, */ access_request_parent1 = init_layer_masks( dom, access_request_parent1 | access_request_parent2, - &layer_masks_parent1); + &layer_masks_parent1, sizeof(layer_masks_parent1), + LANDLOCK_RULE_PATH_BENEATH); return check_access_path_dual(dom, new_dir, access_request_parent1, &layer_masks_parent1, NULL, 0, @@ -755,7 +768,9 @@ static int current_check_refer_path(struct dentry *const old_dentry, } /* Backward compatibility: no reparenting support. */ - if (!(get_handled_accesses(dom) & LANDLOCK_ACCESS_FS_REFER)) + if (!(get_handled_accesses(dom, LANDLOCK_RULE_PATH_BENEATH, + LANDLOCK_NUM_ACCESS_FS) & + LANDLOCK_ACCESS_FS_REFER)) return -EXDEV; access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER; diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 32ec79d6559a..cbca85f5cc6d 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -578,23 +578,31 @@ landlock_find_rule(const struct landlock_ruleset *const ruleset, return NULL; } -access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain) +access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain, + u16 rule_type, u16 num_access) { access_mask_t access_dom = 0; unsigned long access_bit; - for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS; - access_bit++) { - size_t layer_level; - - for (layer_level = 0; layer_level < domain->num_layers; - layer_level++) { - if (landlock_get_fs_access_mask(domain, layer_level) & - BIT_ULL(access_bit)) { - access_dom |= BIT_ULL(access_bit); - break; + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS; + access_bit++) { + size_t layer_level; + + for (layer_level = 0; layer_level < domain->num_layers; + layer_level++) { + if (landlock_get_fs_access_mask(domain, + layer_level) & + BIT_ULL(access_bit)) { + access_dom |= BIT_ULL(access_bit); + break; + } } } + break; + default: + break; } return access_dom; } @@ -608,7 +616,7 @@ access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain) */ bool unmask_layers(const struct landlock_rule *const rule, const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) + layer_mask_t (*const layer_masks)[], size_t masks_array_size) { size_t layer_level; @@ -640,8 +648,7 @@ bool unmask_layers(const struct landlock_rule *const rule, * requested access. */ is_empty = true; - for_each_set_bit(access_bit, &access_req, - ARRAY_SIZE(*layer_masks)) { + for_each_set_bit(access_bit, &access_req, masks_array_size) { if (layer->access & BIT_ULL(access_bit)) (*layer_masks)[access_bit] &= ~layer_bit; is_empty = is_empty && !(*layer_masks)[access_bit]; @@ -652,15 +659,16 @@ bool unmask_layers(const struct landlock_rule *const rule, return false; } -access_mask_t -init_layer_masks(const struct landlock_ruleset *const domain, - const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) +access_mask_t init_layer_masks(const struct landlock_ruleset *const domain, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[], + size_t masks_size, u16 rule_type) { access_mask_t handled_accesses = 0; size_t layer_level; - memset(layer_masks, 0, sizeof(*layer_masks)); + memset(layer_masks, 0, masks_size); + /* An empty access request can happen because of O_WRONLY | O_RDWR. */ if (!access_request) return 0; @@ -670,14 +678,21 @@ init_layer_masks(const struct landlock_ruleset *const domain, const unsigned long access_req = access_request; unsigned long access_bit; - for_each_set_bit(access_bit, &access_req, - ARRAY_SIZE(*layer_masks)) { - if (landlock_get_fs_access_mask(domain, layer_level) & - BIT_ULL(access_bit)) { - (*layer_masks)[access_bit] |= - BIT_ULL(layer_level); - handled_accesses |= BIT_ULL(access_bit); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + for_each_set_bit(access_bit, &access_req, + LANDLOCK_NUM_ACCESS_FS) { + if (landlock_get_fs_access_mask(domain, + layer_level) & + BIT_ULL(access_bit)) { + (*layer_masks)[access_bit] |= + BIT_ULL(layer_level); + handled_accesses |= BIT_ULL(access_bit); + } } + break; + default: + return 0; } } return handled_accesses; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index ea09ab2f27c4..c1cf7cce2cb5 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -193,18 +193,20 @@ static inline u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level) { - return ruleset->access_masks[mask_level]; + return (ruleset->access_masks[mask_level] & LANDLOCK_MASK_ACCESS_FS); } -access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain); +access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain, + u16 rule_type, u16 num_access); bool unmask_layers(const struct landlock_rule *const rule, const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]); + layer_mask_t (*const layer_masks)[], + size_t masks_array_size); -access_mask_t -init_layer_masks(const struct landlock_ruleset *const domain, - const access_mask_t access_request, - layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]); +access_mask_t init_layer_masks(const struct landlock_ruleset *const domain, + const access_mask_t access_request, + layer_mask_t (*const layer_masks)[], + size_t masks_size, u16 rule_type); #endif /* _SECURITY_LANDLOCK_RULESET_H */ From patchwork Tue Jun 21 08:23:02 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888811 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D9F4FC43334 for ; Tue, 21 Jun 2022 08:23:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S241798AbiFUIXv (ORCPT ); Tue, 21 Jun 2022 04:23:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41780 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233269AbiFUIXi (ORCPT ); Tue, 21 Jun 2022 04:23:38 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 11D00BE27; Tue, 21 Jun 2022 01:23:29 -0700 (PDT) Received: from fraeml707-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxm45TCz67ZvX; Tue, 21 Jun 2022 16:21:32 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml707-chm.china.huawei.com (10.206.15.35) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:27 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:26 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 06/17] landlock: refactors landlock_add_rule syscall Date: Tue, 21 Jun 2022 16:23:02 +0800 Message-ID: <20220621082313.3330667-7-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Modifies landlock_add_rule syscall to support new rule types in future Landlock versions. Adds add_rule_path_beneath() helper to support current filesystem rules. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Refactors syscall landlock_add_rule() and add_rule_path_beneath() helper to make argument check ordering consistent and get rid of partial revertings in following patches. * Rolls back refactoring base_test.c seltest. * Formats code with clang-format-14. Changes since v4: * Refactors add_rule_path_beneath() and landlock_add_rule() functions to optimize code usage. * Refactors base_test.c seltest: adds LANDLOCK_RULE_PATH_BENEATH rule type in landlock_add_rule() call. Changes since v3: * Split commit. * Refactors landlock_add_rule syscall. --- security/landlock/syscalls.c | 99 +++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 47 deletions(-) -- 2.25.1 diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 5836736ce9d7..a209fd7d10c6 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -274,6 +274,47 @@ static int get_path_from_fd(const s32 fd, struct path *const path) return err; } +static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, + const void *const rule_attr) +{ + struct landlock_path_beneath_attr path_beneath_attr; + struct path path; + int res, err; + u32 mask; + + /* Copies raw user space buffer, only one type for now. */ + res = copy_from_user(&path_beneath_attr, rule_attr, + sizeof(path_beneath_attr)); + if (res) + return -EFAULT; + + /* + * Informs about useless rule: empty allowed_access (i.e. deny rules) + * are ignored in path walks. + */ + if (!path_beneath_attr.allowed_access) + return -ENOMSG; + /* + * Checks that allowed_access matches the @ruleset constraints + * (ruleset->access_masks[0] is automatically upgraded to 64-bits). + */ + mask = landlock_get_fs_access_mask(ruleset, 0); + if ((path_beneath_attr.allowed_access | mask) != mask) + return -EINVAL; + + /* Gets and checks the new rule. */ + err = get_path_from_fd(path_beneath_attr.parent_fd, &path); + if (err) + return err; + + /* Imports the new rule. */ + err = landlock_append_fs_rule(ruleset, &path, + path_beneath_attr.allowed_access); + path_put(&path); + + return err; +} + /** * sys_landlock_add_rule - Add a new rule to a ruleset * @@ -292,13 +333,14 @@ static int get_path_from_fd(const s32 fd, struct path *const path) * * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. - * &landlock_path_beneath_attr.allowed_access is not a subset of the - * ruleset handled accesses); + * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's + * accesses); * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a * member of @rule_attr is not a file descriptor as expected; * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of - * @rule_attr is not the expected file descriptor type; + * @rule_attr is not the expected file descriptor type (e.g. file open + * without O_PATH); * - EPERM: @ruleset_fd has no write access to the underlying ruleset; * - EFAULT: @rule_attr inconsistency. */ @@ -306,10 +348,8 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, const enum landlock_rule_type, rule_type, const void __user *const, rule_attr, const __u32, flags) { - struct landlock_path_beneath_attr path_beneath_attr; - struct path path; struct landlock_ruleset *ruleset; - int res, err; + int err; if (!landlock_initialized) return -EOPNOTSUPP; @@ -323,49 +363,14 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, if (IS_ERR(ruleset)) return PTR_ERR(ruleset); - if (rule_type != LANDLOCK_RULE_PATH_BENEATH) { + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = add_rule_path_beneath(ruleset, rule_attr); + break; + default: err = -EINVAL; - goto out_put_ruleset; - } - - /* Copies raw user space buffer, only one type for now. */ - res = copy_from_user(&path_beneath_attr, rule_attr, - sizeof(path_beneath_attr)); - if (res) { - err = -EFAULT; - goto out_put_ruleset; + break; } - - /* - * Informs about useless rule: empty allowed_access (i.e. deny rules) - * are ignored in path walks. - */ - if (!path_beneath_attr.allowed_access) { - err = -ENOMSG; - goto out_put_ruleset; - } - /* - * Checks that allowed_access matches the @ruleset constraints - * (ruleset->access_masks[0] is automatically upgraded to 64-bits). - */ - if ((path_beneath_attr.allowed_access | - landlock_get_fs_access_mask(ruleset, 0)) != - landlock_get_fs_access_mask(ruleset, 0)) { - err = -EINVAL; - goto out_put_ruleset; - } - - /* Gets and checks the new rule. */ - err = get_path_from_fd(path_beneath_attr.parent_fd, &path); - if (err) - goto out_put_ruleset; - - /* Imports the new rule. */ - err = landlock_append_fs_rule(ruleset, &path, - path_beneath_attr.allowed_access); - path_put(&path); - -out_put_ruleset: landlock_put_ruleset(ruleset); return err; } From patchwork Tue Jun 21 08:23:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888812 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A1C80CCA473 for ; Tue, 21 Jun 2022 08:23:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1347826AbiFUIXx (ORCPT ); Tue, 21 Jun 2022 04:23:53 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41786 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234738AbiFUIXi (ORCPT ); Tue, 21 Jun 2022 04:23:38 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 01BAFDF89; Tue, 21 Jun 2022 01:23:30 -0700 (PDT) Received: from fraeml708-chm.china.huawei.com (unknown [172.18.147.206]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzzY6yh9z682wj; Tue, 21 Jun 2022 16:23:05 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml708-chm.china.huawei.com (10.206.15.36) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:28 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:27 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 07/17] landlock: user space API network support Date: Tue, 21 Jun 2022 16:23:03 +0800 Message-ID: <20220621082313.3330667-8-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Refactors user space API to support network actions. Adds new network access flags, network rule and network attributes. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * None Changes since v3: * Splits commit. * Refactors User API for network rule type. --- include/uapi/linux/landlock.h | 49 +++++++++++++++++++++++++++++++++++ security/landlock/syscalls.c | 3 ++- 2 files changed, 51 insertions(+), 1 deletion(-) -- 2.25.1 diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 23df4e0e8ace..2a959b2e283d 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -31,6 +31,13 @@ struct landlock_ruleset_attr { * this access right. */ __u64 handled_access_fs; + + /** + * @handled_access_net: Bitmask of actions (cf. `Network flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. + */ + __u64 handled_access_net; }; /* @@ -54,6 +61,11 @@ enum landlock_rule_type { * landlock_path_beneath_attr . */ LANDLOCK_RULE_PATH_BENEATH = 1, + /** + * @LANDLOCK_RULE_NET_SERVICE: Type of a &struct + * landlock_net_service_attr . + */ + LANDLOCK_RULE_NET_SERVICE = 2, }; /** @@ -79,6 +91,24 @@ struct landlock_path_beneath_attr { */ } __attribute__((packed)); +/** + * struct landlock_net_service_attr - TCP subnet definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_net_service_attr { + /** + * @allowed_access: Bitmask of allowed access network for services + * (cf. `Network flags`_). + */ + __u64 allowed_access; + /** + * @port: Network port. + */ + __u16 port; + +} __attribute__((packed)); + /** * DOC: fs_access * @@ -162,4 +192,23 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_REFER (1ULL << 13) /* clang-format on */ +/** + * DOC: net_access + * + * Network flags + * ~~~~~~~~~~~~~~~~ + * + * These flags enable to restrict a sandboxed process to a set of network + * actions. + * + * TCP sockets with allowed actions: + * + * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port. + * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to + * a remote port. + */ +/* clang-format off */ +#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0) +#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1) +/* clang-format on */ #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index a209fd7d10c6..246bc48deba3 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -82,8 +82,9 @@ static void build_check_abi(void) * struct size. */ ruleset_size = sizeof(ruleset_attr.handled_access_fs); + ruleset_size += sizeof(ruleset_attr.handled_access_net); BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); - BUILD_BUG_ON(sizeof(ruleset_attr) != 8); + BUILD_BUG_ON(sizeof(ruleset_attr) != 16); path_beneath_size = sizeof(path_beneath_attr.allowed_access); path_beneath_size += sizeof(path_beneath_attr.parent_fd); From patchwork Tue Jun 21 08:23:04 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888815 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3A7D1C43334 for ; Tue, 21 Jun 2022 08:24:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348272AbiFUIYn (ORCPT ); Tue, 21 Jun 2022 04:24:43 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41792 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348130AbiFUIYJ (ORCPT ); Tue, 21 Jun 2022 04:24:09 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EADE2140F5; Tue, 21 Jun 2022 01:23:38 -0700 (PDT) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxx33C1z6897v; Tue, 21 Jun 2022 16:21:41 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:35 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:28 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 08/17] landlock: adds support network rules Date: Tue, 21 Jun 2022 16:23:04 +0800 Message-ID: <20220621082313.3330667-9-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This commit adds network rules support in internal landlock functions (presented in ruleset.c) and landlock_create_ruleset syscall. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Gets rid of partial revert from landlock_add_rule syscall. * Formats code with clang-format-14. Changes since v4: * Refactors landlock_create_ruleset() - splits ruleset and masks checks. * Refactors landlock_create_ruleset() and landlock mask setters/getters to support two rule types. * Refactors landlock_add_rule syscall add_rule_path_beneath function by factoring out get_ruleset_from_fd() and landlock_put_ruleset(). Changes since v3: * Splits commit. * Adds network rule support for internal landlock functions. * Adds set_mask and get_mask for network. * Adds rb_root root_net_port. --- security/landlock/limits.h | 8 +++- security/landlock/ruleset.c | 78 +++++++++++++++++++++++++++++++----- security/landlock/ruleset.h | 31 ++++++++++++-- security/landlock/syscalls.c | 8 +++- 4 files changed, 111 insertions(+), 14 deletions(-) -- 2.25.1 diff --git a/security/landlock/limits.h b/security/landlock/limits.h index b54184ab9439..23694bf05cb7 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -22,6 +22,12 @@ #define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1) #define LANDLOCK_NUM_ACCESS_FS __const_hweight64(LANDLOCK_MASK_ACCESS_FS) -/* clang-format on */ +#define LANDLOCK_LAST_ACCESS_NET LANDLOCK_ACCESS_NET_CONNECT_TCP +#define LANDLOCK_MASK_ACCESS_NET ((LANDLOCK_LAST_ACCESS_NET << 1) - 1) +#define LANDLOCK_NUM_ACCESS_NET __const_hweight64(LANDLOCK_MASK_ACCESS_NET) +#define LANDLOCK_MASK_SHIFT_NET 16 + +#define LANDLOCK_RULE_TYPE_NUM LANDLOCK_RULE_NET_SERVICE +/* clang-format on */ #endif /* _SECURITY_LANDLOCK_LIMITS_H */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index cbca85f5cc6d..6ca6373b3950 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -36,6 +36,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) refcount_set(&new_ruleset->usage, 1); mutex_init(&new_ruleset->lock); new_ruleset->root_inode = RB_ROOT; + new_ruleset->root_net_port = RB_ROOT; new_ruleset->num_layers = num_layers; /* * hierarchy = NULL @@ -46,16 +47,21 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) } struct landlock_ruleset * -landlock_create_ruleset(const access_mask_t access_mask) +landlock_create_ruleset(const access_mask_t access_mask_fs, + const access_mask_t access_mask_net) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ - if (!access_mask) + if (!access_mask_fs && !access_mask_net) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); - if (!IS_ERR(new_ruleset)) - landlock_set_fs_access_mask(new_ruleset, access_mask, 0); + if (IS_ERR(new_ruleset)) + return new_ruleset; + if (access_mask_fs) + landlock_set_fs_access_mask(new_ruleset, access_mask_fs, 0); + if (access_mask_net) + landlock_set_net_access_mask(new_ruleset, access_mask_net, 0); return new_ruleset; } @@ -92,9 +98,11 @@ create_rule(struct landlock_object *const object_ptr, return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); - if (object_ptr) { + if (object_ptr && !object_data) { landlock_get_object(object_ptr); new_rule->object.ptr = object_ptr; + } else if (object_data && !object_ptr) { + new_rule->object.data = object_data; } else if (object_ptr && object_data) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); @@ -130,10 +138,12 @@ static void build_check_ruleset(void) .num_layers = ~0, }; typeof(ruleset.access_masks[0]) fs_access_mask = ~0; + typeof(ruleset.access_masks[0]) net_access_mask = ~0; BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS); + BUILD_BUG_ON(net_access_mask < LANDLOCK_MASK_ACCESS_NET); } /** @@ -179,6 +189,11 @@ static int insert_rule(struct landlock_ruleset *const ruleset, object_data = (uintptr_t)object_ptr; root = &ruleset->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + if (WARN_ON_ONCE(object_ptr)) + return -EINVAL; + root = &ruleset->root_net_port; + break; default: WARN_ON_ONCE(1); return -EINVAL; @@ -232,6 +247,15 @@ static int insert_rule(struct landlock_ruleset *const ruleset, &ruleset->root_inode); free_rule(this, rule_type); break; + case LANDLOCK_RULE_NET_SERVICE: + new_rule = create_rule(NULL, object_data, &this->layers, + this->num_layers, &(*layers)[0]); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_replace_node(&this->node, &new_rule->node, + &ruleset->root_net_port); + free_rule(this, rule_type); + break; } return 0; } @@ -249,6 +273,15 @@ static int insert_rule(struct landlock_ruleset *const ruleset, rb_insert_color(&new_rule->node, &ruleset->root_inode); ruleset->num_rules++; break; + case LANDLOCK_RULE_NET_SERVICE: + new_rule = create_rule(NULL, object_data, layers, num_layers, + NULL); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_link_node(&new_rule->node, parent_node, walker_node); + rb_insert_color(&new_rule->node, &ruleset->root_net_port); + ruleset->num_rules++; + break; } return 0; } @@ -309,6 +342,9 @@ static int tree_merge(struct landlock_ruleset *const src, case LANDLOCK_RULE_PATH_BENEATH: src_root = &src->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + src_root = &src->root_net_port; + break; default: return -EINVAL; } @@ -335,6 +371,11 @@ static int tree_merge(struct landlock_ruleset *const src, rule_type, &layers, ARRAY_SIZE(layers)); break; + case LANDLOCK_RULE_NET_SERVICE: + err = insert_rule(dst, NULL, walker_rule->object.data, + rule_type, &layers, + ARRAY_SIZE(layers)); + break; } if (err) return err; @@ -370,6 +411,10 @@ static int merge_ruleset(struct landlock_ruleset *const dst, err = tree_merge(src, dst, LANDLOCK_RULE_PATH_BENEATH); if (err) goto out_unlock; + /* Merges the @src network tree. */ + err = tree_merge(src, dst, LANDLOCK_RULE_NET_SERVICE); + if (err) + goto out_unlock; out_unlock: mutex_unlock(&src->lock); @@ -389,10 +434,13 @@ static int tree_copy(struct landlock_ruleset *const parent, case LANDLOCK_RULE_PATH_BENEATH: parent_root = &parent->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + parent_root = &parent->root_net_port; + break; default: return -EINVAL; } - /* Copies the @parent inode tree. */ + /* Copies the @parent inode or network tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, parent_root, node) { switch (rule_type) { @@ -401,6 +449,11 @@ static int tree_copy(struct landlock_ruleset *const parent, rule_type, &walker_rule->layers, walker_rule->num_layers); break; + case LANDLOCK_RULE_NET_SERVICE: + err = insert_rule(child, NULL, walker_rule->object.data, + rule_type, &walker_rule->layers, + walker_rule->num_layers); + break; } if (err) return err; @@ -423,6 +476,10 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, /* Copies the @parent inode tree. */ err = tree_copy(parent, child, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; + /* Copies the @parent network tree. */ + err = tree_copy(parent, child, LANDLOCK_RULE_NET_SERVICE); if (err) goto out_unlock; @@ -458,6 +515,9 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) free_rule(freeme, LANDLOCK_RULE_PATH_BENEATH); + rbtree_postorder_for_each_entry_safe(freeme, next, + &ruleset->root_net_port, node) + free_rule(freeme, LANDLOCK_RULE_NET_SERVICE); put_hierarchy(ruleset->hierarchy); kfree(ruleset); } @@ -552,13 +612,13 @@ landlock_find_rule(const struct landlock_ruleset *const ruleset, { const struct rb_node *node; - if (!object_data) - return NULL; - switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: node = ruleset->root_inode.rb_node; break; + case LANDLOCK_RULE_NET_SERVICE: + node = ruleset->root_net_port.rb_node; + break; default: WARN_ON_ONCE(1); return NULL; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index c1cf7cce2cb5..0cedfe65e326 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -101,6 +101,12 @@ struct landlock_ruleset { * tree is immutable until @usage reaches zero. */ struct rb_root root_inode; + /** + * @root_net_port: Root of a red-black tree containing object nodes + * for network port. Once a ruleset is tied to a process (i.e. as a domain), + * this tree is immutable until @usage reaches zero. + */ + struct rb_root root_net_port; /** * @hierarchy: Enables hierarchy identification even when a parent * domain vanishes. This is needed for the ptrace protection. @@ -156,7 +162,8 @@ struct landlock_ruleset { }; struct landlock_ruleset * -landlock_create_ruleset(const access_mask_t access_mask); +landlock_create_ruleset(const access_mask_t access_mask_fs, + const access_mask_t access_mask_net); void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); @@ -183,9 +190,9 @@ static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) /* A helper function to set a filesystem mask. */ static inline void landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, - const access_mask_t access_maskset, u16 mask_level) + const access_mask_t access_mask_fs, u16 mask_level) { - ruleset->access_masks[mask_level] = access_maskset; + ruleset->access_masks[mask_level] = access_mask_fs; } /* A helper function to get a filesystem mask. */ @@ -196,6 +203,24 @@ landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, return (ruleset->access_masks[mask_level] & LANDLOCK_MASK_ACCESS_FS); } +/* A helper function to set a network mask. */ +static inline void +landlock_set_net_access_mask(struct landlock_ruleset *ruleset, + const access_mask_t access_mask_net, + u16 mask_level) +{ + ruleset->access_masks[mask_level] |= + (access_mask_net << LANDLOCK_MASK_SHIFT_NET); +} + +/* A helper function to get a network mask. */ +static inline u32 +landlock_get_net_access_mask(const struct landlock_ruleset *ruleset, + u16 mask_level) +{ + return (ruleset->access_masks[mask_level] >> LANDLOCK_MASK_SHIFT_NET); +} + access_mask_t get_handled_accesses(const struct landlock_ruleset *const domain, u16 rule_type, u16 num_access); diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 246bc48deba3..72fa01ba9de7 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -189,8 +189,14 @@ SYSCALL_DEFINE3(landlock_create_ruleset, LANDLOCK_MASK_ACCESS_FS) return -EINVAL; + /* Checks network content (and 32-bits cast). */ + if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) != + LANDLOCK_MASK_ACCESS_NET) + return -EINVAL; + /* Checks arguments and transforms to kernel struct. */ - ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs); + ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs, + ruleset_attr.handled_access_net); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); From patchwork Tue Jun 21 08:23:05 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888810 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1D6EDCCA482 for ; Tue, 21 Jun 2022 08:23:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1346753AbiFUIXt (ORCPT ); Tue, 21 Jun 2022 04:23:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41792 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236616AbiFUIXi (ORCPT ); Tue, 21 Jun 2022 04:23:38 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EBFC0101F1; Tue, 21 Jun 2022 01:23:32 -0700 (PDT) Received: from fraeml706-chm.china.huawei.com (unknown [172.18.147.201]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzvb69Npz6H8MG; Tue, 21 Jun 2022 16:19:39 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml706-chm.china.huawei.com (10.206.15.55) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:30 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:30 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 09/17] landlock: implements TCP network hooks Date: Tue, 21 Jun 2022 16:23:05 +0800 Message-ID: <20220621082313.3330667-10-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds support of socket_bind() and socket_connect() hooks. It's possible to restrict binding and connecting of TCP types of sockets to particular ports. It's just basic idea of how Landlock could support network confinement. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Fixes some logic errors. * Formats code with clang-format-14. Changes since v4: * Factors out CONFIG_INET into make file. * Refactors check_socket_access(). * Adds helper get_port(). * Adds CONFIG_IPV6 in get_port(), hook_socket_bind/connect functions to support AF_INET6 family. * Adds AF_UNSPEC family support in hook_socket_bind/connect functions. * Refactors add_rule_net_service() and landlock_add_rule syscall to support network rule inserting. * Refactors init_layer_masks() to support network rules. Changes since v3: * Splits commit. * Adds SECURITY_NETWORK in config. * Adds IS_ENABLED(CONFIG_INET) if a kernel has no INET configuration. * Adds hook_socket_bind and hook_socket_connect hooks. --- security/landlock/Kconfig | 1 + security/landlock/Makefile | 2 + security/landlock/net.c | 155 +++++++++++++++++++++++++++++++++++ security/landlock/net.h | 26 ++++++ security/landlock/ruleset.c | 12 +++ security/landlock/setup.c | 2 + security/landlock/syscalls.c | 59 ++++++++++++- 7 files changed, 254 insertions(+), 3 deletions(-) create mode 100644 security/landlock/net.c create mode 100644 security/landlock/net.h -- 2.25.1 diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig index 8e33c4e8ffb8..10c099097533 100644 --- a/security/landlock/Kconfig +++ b/security/landlock/Kconfig @@ -3,6 +3,7 @@ config SECURITY_LANDLOCK bool "Landlock support" depends on SECURITY && !ARCH_EPHEMERAL_INODES + select SECURITY_NETWORK select SECURITY_PATH help Landlock is a sandboxing mechanism that enables processes to restrict diff --git a/security/landlock/Makefile b/security/landlock/Makefile index 7bbd2f413b3e..53d3c92ae22e 100644 --- a/security/landlock/Makefile +++ b/security/landlock/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o landlock-y := setup.o syscalls.o object.o ruleset.o \ cred.o ptrace.o fs.o + +landlock-$(CONFIG_INET) += net.o \ No newline at end of file diff --git a/security/landlock/net.c b/security/landlock/net.c new file mode 100644 index 000000000000..da63e4f1dca4 --- /dev/null +++ b/security/landlock/net.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock LSM - Network management and hooks + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + */ + +#include +#include +#include +#include + +#include "cred.h" +#include "limits.h" +#include "net.h" + +int landlock_append_net_rule(struct landlock_ruleset *const ruleset, u16 port, + u32 access_rights) +{ + int err; + + /* Transforms relative access rights to absolute ones. */ + access_rights |= LANDLOCK_MASK_ACCESS_NET & + ~landlock_get_net_access_mask(ruleset, 0); + + BUILD_BUG_ON(sizeof(port) > sizeof(uintptr_t)); + mutex_lock(&ruleset->lock); + err = landlock_insert_rule(ruleset, NULL, port, access_rights, + LANDLOCK_RULE_NET_SERVICE); + mutex_unlock(&ruleset->lock); + + return err; +} + +static int check_socket_access(const struct landlock_ruleset *const domain, + u16 port, access_mask_t access_request) +{ + bool allowed = false; + layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_NET] = {}; + const struct landlock_rule *rule; + access_mask_t handled_access; + + if (WARN_ON_ONCE(!domain)) + return 0; + if (WARN_ON_ONCE(domain->num_layers < 1)) + return -EACCES; + + rule = landlock_find_rule(domain, port, LANDLOCK_RULE_NET_SERVICE); + + handled_access = init_layer_masks(domain, access_request, &layer_masks, + sizeof(layer_masks), + LANDLOCK_RULE_NET_SERVICE); + allowed = unmask_layers(rule, handled_access, &layer_masks, + ARRAY_SIZE(layer_masks)); + + return allowed ? 0 : -EACCES; +} + +static u16 get_port(const struct sockaddr *const address) +{ + /* Gets port value in host byte order. */ + switch (address->sa_family) { + case AF_UNSPEC: + case AF_INET: { + const struct sockaddr_in *const sockaddr = + (struct sockaddr_in *)address; + return ntohs(sockaddr->sin_port); + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: { + const struct sockaddr_in6 *const sockaddr_ip6 = + (struct sockaddr_in6 *)address; + return ntohs(sockaddr_ip6->sin6_port); + } +#endif + } + WARN_ON_ONCE(1); + return 0; +} + +static int hook_socket_bind(struct socket *sock, struct sockaddr *address, + int addrlen) +{ + const struct landlock_ruleset *const dom = + landlock_get_current_domain(); + + if (!dom) + return 0; + + /* Check if it's a TCP socket. */ + if (sock->type != SOCK_STREAM) + return 0; + + switch (address->sa_family) { + case AF_UNSPEC: + case AF_INET: +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: +#endif + return check_socket_access(dom, get_port(address), + LANDLOCK_ACCESS_NET_BIND_TCP); + default: + return 0; + } +} + +static int hook_socket_connect(struct socket *sock, struct sockaddr *address, + int addrlen) +{ + const struct landlock_ruleset *const dom = + landlock_get_current_domain(); + + if (!dom) + return 0; + + /* Check if it's a TCP socket. */ + if (sock->type != SOCK_STREAM) + return 0; + + /* Check if the hook is AF_INET* socket's action. */ + switch (address->sa_family) { + case AF_INET: +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: +#endif + return check_socket_access(dom, get_port(address), + LANDLOCK_ACCESS_NET_CONNECT_TCP); + case AF_UNSPEC: { + u16 i; + /* + * If just in a layer a mask supports connect access, + * the socket_connect() hook with AF_UNSPEC family flag + * must be banned. This prevents from disconnecting already + * connected sockets. + */ + for (i = 0; i < dom->num_layers; i++) { + if (landlock_get_net_access_mask(dom, i) & + LANDLOCK_ACCESS_NET_CONNECT_TCP) + return -EACCES; + } + } + } + return 0; +} + +static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = { + LSM_HOOK_INIT(socket_bind, hook_socket_bind), + LSM_HOOK_INIT(socket_connect, hook_socket_connect), +}; + +__init void landlock_add_net_hooks(void) +{ + security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks), + LANDLOCK_NAME); +} diff --git a/security/landlock/net.h b/security/landlock/net.h new file mode 100644 index 000000000000..7a79fb4bf3dd --- /dev/null +++ b/security/landlock/net.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock LSM - Network management and hooks + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + */ + +#ifndef _SECURITY_LANDLOCK_NET_H +#define _SECURITY_LANDLOCK_NET_H + +#include "common.h" +#include "ruleset.h" +#include "setup.h" + +#if IS_ENABLED(CONFIG_INET) +__init void landlock_add_net_hooks(void); + +int landlock_append_net_rule(struct landlock_ruleset *const ruleset, u16 port, + u32 access_hierarchy); +#else /* IS_ENABLED(CONFIG_INET) */ +static inline void landlock_add_net_hooks(void) +{ +} +#endif /* IS_ENABLED(CONFIG_INET) */ + +#endif /* _SECURITY_LANDLOCK_NET_H */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 6ca6373b3950..469811a77675 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -751,6 +751,18 @@ access_mask_t init_layer_masks(const struct landlock_ruleset *const domain, } } break; + case LANDLOCK_RULE_NET_SERVICE: + for_each_set_bit(access_bit, &access_req, + LANDLOCK_NUM_ACCESS_NET) { + if (landlock_get_net_access_mask(domain, + layer_level) & + BIT_ULL(access_bit)) { + (*layer_masks)[access_bit] |= + BIT_ULL(layer_level); + handled_accesses |= BIT_ULL(access_bit); + } + } + break; default: return 0; } diff --git a/security/landlock/setup.c b/security/landlock/setup.c index f8e8e980454c..8059dc0b47d3 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -14,6 +14,7 @@ #include "fs.h" #include "ptrace.h" #include "setup.h" +#include "net.h" bool landlock_initialized __lsm_ro_after_init = false; @@ -28,6 +29,7 @@ static int __init landlock_init(void) landlock_add_cred_hooks(); landlock_add_ptrace_hooks(); landlock_add_fs_hooks(); + landlock_add_net_hooks(); landlock_initialized = true; pr_info("Up and running.\n"); return 0; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 72fa01ba9de7..5069fac2ecf6 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -29,6 +29,7 @@ #include "cred.h" #include "fs.h" #include "limits.h" +#include "net.h" #include "ruleset.h" #include "setup.h" @@ -74,7 +75,8 @@ static void build_check_abi(void) { struct landlock_ruleset_attr ruleset_attr; struct landlock_path_beneath_attr path_beneath_attr; - size_t ruleset_size, path_beneath_size; + struct landlock_net_service_attr net_service_attr; + size_t ruleset_size, path_beneath_size, net_service_size; /* * For each user space ABI structures, first checks that there is no @@ -90,6 +92,11 @@ static void build_check_abi(void) path_beneath_size += sizeof(path_beneath_attr.parent_fd); BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); + + net_service_size = sizeof(net_service_attr.allowed_access); + net_service_size += sizeof(net_service_attr.port); + BUILD_BUG_ON(sizeof(net_service_attr) != net_service_size); + BUILD_BUG_ON(sizeof(net_service_attr) != 10); } /* Ruleset handling */ @@ -322,13 +329,54 @@ static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, return err; } +static int add_rule_net_service(struct landlock_ruleset *ruleset, + const void *const rule_attr) +{ +#if IS_ENABLED(CONFIG_INET) + struct landlock_net_service_attr net_service_attr; + int res; + u32 mask; + + /* Copies raw user space buffer, only one type for now. */ + res = copy_from_user(&net_service_attr, rule_attr, + sizeof(net_service_attr)); + if (res) + return -EFAULT; + + /* + * Informs about useless rule: empty allowed_access (i.e. deny rules) + * are ignored by network actions. + */ + if (!net_service_attr.allowed_access) + return -ENOMSG; + + /* + * Checks that allowed_access matches the @ruleset constraints + * (ruleset->access_masks[0] is automatically upgraded to 64-bits). + */ + mask = landlock_get_net_access_mask(ruleset, 0); + if ((net_service_attr.allowed_access | mask) != mask) + return -EINVAL; + + /* Denies inserting a rule with port 0. */ + if (net_service_attr.port == 0) + return -EINVAL; + + /* Imports the new rule. */ + return landlock_append_net_rule(ruleset, net_service_attr.port, + net_service_attr.allowed_access); +#else /* IS_ENABLED(CONFIG_INET) */ + return -EAFNOSUPPORT; +#endif /* IS_ENABLED(CONFIG_INET) */ +} + /** * sys_landlock_add_rule - Add a new rule to a ruleset * * @ruleset_fd: File descriptor tied to the ruleset that should be extended * with the new rule. - * @rule_type: Identify the structure type pointed to by @rule_attr (only - * LANDLOCK_RULE_PATH_BENEATH for now). + * @rule_type: Identify the structure type pointed to by @rule_attr: + * LANDLOCK_RULE_PATH_BENEATH or LANDLOCK_RULE_NET_SERVICE. * @rule_attr: Pointer to a rule (only of type &struct * landlock_path_beneath_attr for now). * @flags: Must be 0. @@ -339,6 +387,8 @@ static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, * Possible returned errors are: * * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; + * - EAFNOSUPPORT: @rule_type is LANDLOCK_RULE_NET_SERVICE but TCP/IP is not + * supported by the running kernel; * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's * accesses); @@ -374,6 +424,9 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, case LANDLOCK_RULE_PATH_BENEATH: err = add_rule_path_beneath(ruleset, rule_attr); break; + case LANDLOCK_RULE_NET_SERVICE: + err = add_rule_net_service(ruleset, rule_attr); + break; default: err = -EINVAL; break; From patchwork Tue Jun 21 08:23:06 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888813 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 400ACCCA473 for ; Tue, 21 Jun 2022 08:24:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229736AbiFUIYG (ORCPT ); Tue, 21 Jun 2022 04:24:06 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42116 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229955AbiFUIXr (ORCPT ); Tue, 21 Jun 2022 04:23:47 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 298E611A08; Tue, 21 Jun 2022 01:23:34 -0700 (PDT) Received: from fraeml705-chm.china.huawei.com (unknown [172.18.147.206]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxs3Nxdz67ZvX; Tue, 21 Jun 2022 16:21:37 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml705-chm.china.huawei.com (10.206.15.54) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:31 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:31 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 10/17] seltests/landlock: moves helper function Date: Tue, 21 Jun 2022 16:23:06 +0800 Message-ID: <20220621082313.3330667-11-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This commit moves enforce_ruleset() helper function to common.h so that to be used both by filesystem tests and network ones. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Splits commit. * Moves enforce_ruleset helper into common.h * Formats code with clang-format-14. --- tools/testing/selftests/landlock/common.h | 10 ++++++++++ tools/testing/selftests/landlock/fs_test.c | 10 ---------- 2 files changed, 10 insertions(+), 10 deletions(-) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h index 7ba18eb23783..48870afb054b 100644 --- a/tools/testing/selftests/landlock/common.h +++ b/tools/testing/selftests/landlock/common.h @@ -187,3 +187,13 @@ clear_cap(struct __test_metadata *const _metadata, const cap_value_t caps) { _effective_cap(_metadata, caps, CAP_CLEAR); } + +__attribute__((__unused__)) static void +enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd) +{ + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) + { + TH_LOG("Failed to enforce ruleset: %s", strerror(errno)); + } +} diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c index 21a2ce8fa739..036dd6f8f9ea 100644 --- a/tools/testing/selftests/landlock/fs_test.c +++ b/tools/testing/selftests/landlock/fs_test.c @@ -551,16 +551,6 @@ static int create_ruleset(struct __test_metadata *const _metadata, return ruleset_fd; } -static void enforce_ruleset(struct __test_metadata *const _metadata, - const int ruleset_fd) -{ - ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); - ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) - { - TH_LOG("Failed to enforce ruleset: %s", strerror(errno)); - } -} - TEST_F_FORK(layout1, proc_nsfs) { const struct rule rules[] = { From patchwork Tue Jun 21 08:23:07 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888819 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 91A91C43334 for ; Tue, 21 Jun 2022 08:24:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1347612AbiFUIYx (ORCPT ); Tue, 21 Jun 2022 04:24:53 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42158 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348150AbiFUIYK (ORCPT ); Tue, 21 Jun 2022 04:24:10 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1EFBE55A1; Tue, 21 Jun 2022 01:23:39 -0700 (PDT) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxx3HK4z6F8gD; Tue, 21 Jun 2022 16:21:41 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:36 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:32 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 11/17] seltests/landlock: adds tests for bind() hooks Date: Tue, 21 Jun 2022 16:23:07 +0800 Message-ID: <20220621082313.3330667-12-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Adds selftests for bind() socket action. The first is with no landlock restrictions: - bind without restrictions for ip4; - bind without restrictions for ip6; The second ones is with mixed landlock rules: - bind with restrictions for ip4; - bind with restrictions for ip6; Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Splits commit. * Adds local address 127.0.0.1. * Adds FIXTURE_VARIANT and FIXTURE_VARIANT_ADD helpers to support both ip4 and ip6 family tests and shorten the code. * Adds create_socket_variant() and bind_variant() helpers. * Gets rid of reuse_addr variable in create_socket_variant. * Formats code with clang-format-14. Changes since v4: * Adds port[MAX_SOCKET_NUM], struct sockaddr_in addr4 and struct sockaddr_in addr6 in FIXTURE. * Refactors FIXTURE_SETUP: - initializing self->port, self->addr4 and self->addr6. - adding network namespace. * Refactors code with self->port, self->addr4 and self->addr6 variables. * Adds selftests for IP6 family: - bind_no_restrictions_ip6. - bind_with_restrictions_ip6. * Refactors selftests/landlock/config * Moves enforce_ruleset() into common.h Changes since v3: * Split commit. * Add helper create_socket. * Add FIXTURE_SETUP. --- tools/testing/selftests/landlock/config | 4 + tools/testing/selftests/landlock/net_test.c | 180 ++++++++++++++++++++ 2 files changed, 184 insertions(+) create mode 100644 tools/testing/selftests/landlock/net_test.c -- 2.25.1 diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config index 0f0a65287bac..71f7e9a8a64c 100644 --- a/tools/testing/selftests/landlock/config +++ b/tools/testing/selftests/landlock/config @@ -1,3 +1,7 @@ +CONFIG_INET=y +CONFIG_IPV6=y +CONFIG_NET=y +CONFIG_NET_NS=y CONFIG_OVERLAY_FS=y CONFIG_SECURITY_LANDLOCK=y CONFIG_SECURITY_PATH=y diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c new file mode 100644 index 000000000000..a0e02b6bd79f --- /dev/null +++ b/tools/testing/selftests/landlock/net_test.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Landlock tests - Network + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define MAX_SOCKET_NUM 10 + +#define SOCK_PORT_START 3470 +#define SOCK_PORT_ADD 10 + +#define IP_ADDRESS "127.0.0.1" + +FIXTURE(socket) +{ + uint port[MAX_SOCKET_NUM]; + struct sockaddr_in addr4[MAX_SOCKET_NUM]; + struct sockaddr_in6 addr6[MAX_SOCKET_NUM]; +}; + +FIXTURE_VARIANT(socket) +{ + const bool is_ipv4; +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(socket, ipv4) { + /* clang-format on */ + .is_ipv4 = true, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(socket, ipv6) { + /* clang-format on */ + .is_ipv4 = false, +}; + +static int create_socket_variant(const FIXTURE_VARIANT(socket) * const variant, + const int type) +{ + if (variant->is_ipv4) + return socket(AF_INET, type | SOCK_CLOEXEC, 0); + else + return socket(AF_INET6, type | SOCK_CLOEXEC, 0); +} + +static int bind_variant(const FIXTURE_VARIANT(socket) * const variant, + const int sockfd, + const FIXTURE_DATA(socket) * const self, + const size_t index) +{ + if (variant->is_ipv4) + return bind(sockfd, &self->addr4[index], + sizeof(self->addr4[index])); + else + return bind(sockfd, &self->addr6[index], + sizeof(self->addr6[index])); +} + +FIXTURE_SETUP(socket) +{ + int i; + /* Creates IP4 socket addresses. */ + for (i = 0; i < MAX_SOCKET_NUM; i++) { + self->port[i] = SOCK_PORT_START + SOCK_PORT_ADD * i; + self->addr4[i].sin_family = AF_INET; + self->addr4[i].sin_port = htons(self->port[i]); + self->addr4[i].sin_addr.s_addr = inet_addr(IP_ADDRESS); + memset(&(self->addr4[i].sin_zero), '\0', 8); + } + + /* Creates IP6 socket addresses. */ + for (i = 0; i < MAX_SOCKET_NUM; i++) { + self->port[i] = SOCK_PORT_START + SOCK_PORT_ADD * i; + self->addr6[i].sin6_family = AF_INET6; + self->addr6[i].sin6_port = htons(self->port[i]); + inet_pton(AF_INET6, IP_ADDRESS, &(self->addr6[i].sin6_addr)); + } + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, unshare(CLONE_NEWNET)); + ASSERT_EQ(0, system("ip link set dev lo up")); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +FIXTURE_TEARDOWN(socket) +{ +} + +TEST_F(socket, bind_no_restrictions) +{ + int sockfd; + + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + + /* Binds a socket to port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + ASSERT_EQ(0, close(sockfd)); +} + +TEST_F(socket, bind_with_restrictions) +{ + int sockfd; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = self->port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = self->port[1], + }; + struct landlock_net_service_attr net_service_3 = { + .allowed_access = 0, + .port = self->port[2], + }; + + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows connect and bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and deny bind operations to the port[1] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + /* Empty allowed_access (i.e. deny rules) are ignored in network actions + * for port[2] socket. + */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + ASSERT_EQ(ENOMSG, errno); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + /* Binds a socket to port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* Close bounded socket. */ + ASSERT_EQ(0, close(sockfd)); + + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + /* Binds a socket to port[1]. */ + ASSERT_EQ(-1, bind_variant(variant, sockfd, self, 1)); + ASSERT_EQ(EACCES, errno); + + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + /* Binds a socket to port[2]. */ + ASSERT_EQ(-1, bind_variant(variant, sockfd, self, 2)); + ASSERT_EQ(EACCES, errno); +} +TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:08 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888816 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 74784C43334 for ; Tue, 21 Jun 2022 08:24:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348306AbiFUIYr (ORCPT ); Tue, 21 Jun 2022 04:24:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42226 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236616AbiFUIYi (ORCPT ); Tue, 21 Jun 2022 04:24:38 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E8075E001; Tue, 21 Jun 2022 01:23:40 -0700 (PDT) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxx6TSZz689TY; Tue, 21 Jun 2022 16:21:41 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:36 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:33 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 12/17] seltests/landlock: adds tests for connect() hooks Date: Tue, 21 Jun 2022 16:23:08 +0800 Message-ID: <20220621082313.3330667-13-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Adds selftests for connect socket action. The first are with no landlock restrictions: - connect without restrictions for ip4; - connect without restrictions for ip6; The second ones are with mixed landlock rules: - connect with restrictions ip4; - connect with restrictions ip6; Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Adds connect_variant() helper. * Formats code with clang-format-14. Changes since v4: * Adds selftests for IP6 family: - connect_no_restrictions_ip6. - connect_with_restrictions_ip6. * Refactors code with self->port, self->addr4 and self->addr6 variables. Changes since v3: * Split commit. --- tools/testing/selftests/landlock/net_test.c | 174 ++++++++++++++++++++ 1 file changed, 174 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index a0e02b6bd79f..ce6fd51a922d 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -26,6 +26,9 @@ #define IP_ADDRESS "127.0.0.1" +/* Number pending connections queue to be hold */ +#define BACKLOG 10 + FIXTURE(socket) { uint port[MAX_SOCKET_NUM]; @@ -72,6 +75,19 @@ static int bind_variant(const FIXTURE_VARIANT(socket) * const variant, sizeof(self->addr6[index])); } +static int connect_variant(const FIXTURE_VARIANT(socket) * const variant, + const int sockfd, + const FIXTURE_DATA(socket) * const self, + const size_t index) +{ + if (variant->is_ipv4) + return connect(sockfd, &self->addr4[index], + sizeof(self->addr4[index])); + else + return connect(sockfd, &self->addr6[index], + sizeof(self->addr6[index])); +} + FIXTURE_SETUP(socket) { int i; @@ -177,4 +193,162 @@ TEST_F(socket, bind_with_restrictions) ASSERT_EQ(-1, bind_variant(variant, sockfd, self, 2)); ASSERT_EQ(EACCES, errno); } + +TEST_F(socket, connect_no_restrictions) +{ + int sockfd, new_fd; + pid_t child; + int status; + + /* Creates a server socket. */ + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + + /* Binds a socket to port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* Makes listening socket. */ + ASSERT_EQ(0, listen(sockfd, BACKLOG)); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + int child_sockfd; + + /* Closes listening socket for the child. */ + ASSERT_EQ(0, close(sockfd)); + /* Create a stream client socket. */ + child_sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, child_sockfd, self, 0)); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Accepts connection from the child. */ + new_fd = accept(sockfd, NULL, 0); + ASSERT_LE(0, new_fd); + + /* Closes connection. */ + ASSERT_EQ(0, close(new_fd)); + + /* Closes listening socket for the parent. */ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + +TEST_F(socket, connect_with_restrictions) +{ + int new_fd; + int sockfd_1, sockfd_2; + pid_t child_1, child_2; + int status; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = self->port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = self->port[1], + }; + + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows connect and bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and deny bind operations to the port[1] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket 1. */ + sockfd_1 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_1); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd_1, self, 0)); + + /* Makes listening socket 1. */ + ASSERT_EQ(0, listen(sockfd_1, BACKLOG)); + + child_1 = fork(); + ASSERT_LE(0, child_1); + if (child_1 == 0) { + int child_sockfd; + + /* Closes listening socket for the child. */ + ASSERT_EQ(0, close(sockfd_1)); + /* Creates a stream client socket. */ + child_sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, child_sockfd, self, 0)); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Accepts connection from the child 1. */ + new_fd = accept(sockfd_1, NULL, 0); + ASSERT_LE(0, new_fd); + + /* Closes connection. */ + ASSERT_EQ(0, close(new_fd)); + + /* Closes listening socket 1 for the parent. */ + ASSERT_EQ(0, close(sockfd_1)); + + ASSERT_EQ(child_1, waitpid(child_1, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); + + /* Creates a server socket 2. */ + sockfd_2 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_2); + + /* Binds the socket 2 to address with port[1]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd_2, self, 1)); + + /* Makes listening socket 2. */ + ASSERT_EQ(0, listen(sockfd_2, BACKLOG)); + + child_2 = fork(); + ASSERT_LE(0, child_2); + if (child_2 == 0) { + int child_sockfd; + + /* Closes listening socket for the child. */ + ASSERT_EQ(0, close(sockfd_2)); + /* Creates a stream client socket. */ + child_sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket with port[1]. */ + ASSERT_EQ(-1, connect_variant(variant, child_sockfd, self, 1)); + ASSERT_EQ(EACCES, errno); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + + /* Closes listening socket 2 for the parent. */ + ASSERT_EQ(0, close(sockfd_2)); + + ASSERT_EQ(child_2, waitpid(child_2, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:09 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888814 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A2DDECCA473 for ; Tue, 21 Jun 2022 08:24:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348225AbiFUIYl (ORCPT ); Tue, 21 Jun 2022 04:24:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41788 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348129AbiFUIYJ (ORCPT ); Tue, 21 Jun 2022 04:24:09 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 92B2BBE0E; Tue, 21 Jun 2022 01:23:39 -0700 (PDT) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzxx6k1nz6F8x6; Tue, 21 Jun 2022 16:21:41 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:36 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:35 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 13/17] seltests/landlock: adds AF_UNSPEC family test Date: Tue, 21 Jun 2022 16:23:09 +0800 Message-ID: <20220621082313.3330667-14-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Adds two selftests for connect() action with AF_UNSPEC family flag. The one is with no landlock restrictions allows to disconnect already connected socket with connect(..., AF_UNSPEC, ...): - connect_afunspec_no_restictions; The second one refuses landlocked process to disconnect already connected socket: - connect_afunspec_with_restictions; Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * Refactors code with self->port, self->addr4 variables. * Adds bind() hook check for with AF_UNSPEC family. Changes since v3: * Adds connect_afunspec_no_restictions test. * Adds connect_afunspec_with_restictions test. --- tools/testing/selftests/landlock/net_test.c | 113 ++++++++++++++++++++ 1 file changed, 113 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index ce6fd51a922d..c5fe4284c38f 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -351,4 +351,117 @@ TEST_F(socket, connect_with_restrictions) ASSERT_EQ(1, WIFEXITED(status)); ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); } + +TEST_F(socket, connect_afunspec_no_restictions) +{ + int sockfd; + pid_t child; + int status; + + /* Creates a server socket 1. */ + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* Makes connection to the socket with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, sockfd, self, 0)); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + struct sockaddr addr_unspec = { .sa_family = AF_UNSPEC }; + + /* Child tries to disconnect already connected socket. */ + ASSERT_EQ(0, connect(sockfd, (struct sockaddr *)&addr_unspec, + sizeof(addr_unspec))); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Closes listening socket 1 for the parent. */ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + +TEST_F(socket, connect_afunspec_with_restictions) +{ + int sockfd; + pid_t child; + int status; + + struct landlock_ruleset_attr ruleset_attr_1 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = self->port[0], + }; + + struct landlock_ruleset_attr ruleset_attr_2 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = self->port[0], + }; + + const int ruleset_fd_1 = landlock_create_ruleset( + &ruleset_attr_1, sizeof(ruleset_attr_1), 0); + ASSERT_LE(0, ruleset_fd_1); + + /* Allows bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_1, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_1); + + /* Creates a server socket 1. */ + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* Makes connection to socket with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, sockfd, self, 0)); + + const int ruleset_fd_2 = landlock_create_ruleset( + &ruleset_attr_2, sizeof(ruleset_attr_2), 0); + ASSERT_LE(0, ruleset_fd_2); + + /* Allows connect and bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_2, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_2); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + struct sockaddr addr_unspec = { .sa_family = AF_UNSPEC }; + + /* Child tries to disconnect already connected socket. */ + ASSERT_EQ(-1, connect(sockfd, (struct sockaddr *)&addr_unspec, + sizeof(addr_unspec))); + ASSERT_EQ(EACCES, errno); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Closes listening socket 1 for the parent. */ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:10 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888818 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B9EA6C43334 for ; Tue, 21 Jun 2022 08:24:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345851AbiFUIYv (ORCPT ); Tue, 21 Jun 2022 04:24:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42164 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348155AbiFUIYK (ORCPT ); Tue, 21 Jun 2022 04:24:10 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BE64315824; Tue, 21 Jun 2022 01:23:40 -0700 (PDT) Received: from fraeml703-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzvk5937z6H7Dt; Tue, 21 Jun 2022 16:19:46 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml703-chm.china.huawei.com (10.206.15.52) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:37 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:36 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 14/17] seltests/landlock: adds rules overlapping test Date: Tue, 21 Jun 2022 16:23:10 +0800 Message-ID: <20220621082313.3330667-15-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds overlapping rules for one port. First rule adds just bind() access right for a port. The second one adds both bind() and connect() access rights for the same port. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * Refactors code with self->port, self->addr4 variables. Changes since v3: * Adds ruleset_overlap test. --- tools/testing/selftests/landlock/net_test.c | 89 +++++++++++++++++++++ 1 file changed, 89 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index c5fe4284c38f..18ffd36f959c 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -464,4 +464,93 @@ TEST_F(socket, connect_afunspec_with_restictions) ASSERT_EQ(1, WIFEXITED(status)); ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); } + +TEST_F(socket, ruleset_overlap) +{ + int sockfd; + int one = 1; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = self->port[0], + }; + + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = self->port[0], + }; + + int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket. */ + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* Binds the socket to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* Makes connection to socket with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, sockfd, self, 0)); + + /* Closes socket. */ + ASSERT_EQ(0, close(sockfd)); + + /* Creates another ruleset layer. */ + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* + * Allows bind operations to the port[0] socket in + * the new ruleset layer. + */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + + /* Enforces the new ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket. */ + sockfd = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* Binds the socket to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd, self, 0)); + + /* + * Forbids to connect the socket to address with port[0], + * cause just one ruleset layer has connect() access rule. + */ + ASSERT_EQ(-1, connect_variant(variant, sockfd, self, 0)); + ASSERT_EQ(EACCES, errno); + + /* Closes socket. */ + ASSERT_EQ(0, close(sockfd)); +} + TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:11 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888817 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CB9F7C43334 for ; Tue, 21 Jun 2022 08:24:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348349AbiFUIYs (ORCPT ); Tue, 21 Jun 2022 04:24:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41816 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1346525AbiFUIYi (ORCPT ); Tue, 21 Jun 2022 04:24:38 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5382A1582E; Tue, 21 Jun 2022 01:23:41 -0700 (PDT) Received: from fraeml702-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzzm3yfnz684Xd; Tue, 21 Jun 2022 16:23:16 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml702-chm.china.huawei.com (10.206.15.51) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:38 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:37 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 15/17] seltests/landlock: adds ruleset expanding test Date: Tue, 21 Jun 2022 16:23:11 +0800 Message-ID: <20220621082313.3330667-16-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds expanding rulesets in which rules are gradually added one by one, restricting sockets' connections. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * Refactors code with self->port, self->addr4 variables. Changes since v3: * Adds ruleset_expanding test. --- tools/testing/selftests/landlock/net_test.c | 166 ++++++++++++++++++++ 1 file changed, 166 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index 18ffd36f959c..a9cb47836a21 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -553,4 +553,170 @@ TEST_F(socket, ruleset_overlap) ASSERT_EQ(0, close(sockfd)); } +TEST_F(socket, ruleset_expanding) +{ + int sockfd_1, sockfd_2; + int one = 1; + + struct landlock_ruleset_attr ruleset_attr_1 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = self->port[0], + }; + + const int ruleset_fd_1 = landlock_create_ruleset( + &ruleset_attr_1, sizeof(ruleset_attr_1), 0); + ASSERT_LE(0, ruleset_fd_1); + + /* Adds rule to port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_1, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_1); + ASSERT_EQ(0, close(ruleset_fd_1)); + + /* Creates a socket 1. */ + sockfd_1 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_1); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd_1, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd_1, self, 0)); + + /* Makes connection to socket 1 with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, sockfd_1, self, 0)); + + /* Closes socket 1. */ + ASSERT_EQ(0, close(sockfd_1)); + + /* Creates a socket 2. */ + sockfd_2 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_2); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd_2, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* + * Forbids to bind the socket 2 to address with port[1], + * cause there is no rule with bind() access for port[1]. + */ + ASSERT_EQ(-1, bind_variant(variant, sockfd_2, self, 1)); + ASSERT_EQ(EACCES, errno); + + /* Expands network mask. */ + struct landlock_ruleset_attr ruleset_attr_2 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + + /* Adds connect() access to port[0]. */ + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = self->port[0], + }; + /* Adds bind() access to port[1]. */ + struct landlock_net_service_attr net_service_3 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = self->port[1], + }; + + const int ruleset_fd_2 = landlock_create_ruleset( + &ruleset_attr_2, sizeof(ruleset_attr_2), 0); + ASSERT_LE(0, ruleset_fd_2); + + /* Adds rule to port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_2, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + /* Adds rule to port[1] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_2, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_2); + ASSERT_EQ(0, close(ruleset_fd_2)); + + /* Creates a socket 1. */ + sockfd_1 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_1); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd_1, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd_1, self, 0)); + + /* Makes connection to socket 1 with port[0]. */ + ASSERT_EQ(0, connect_variant(variant, sockfd_1, self, 0)); + + /* Closes socket 1. */ + ASSERT_EQ(0, close(sockfd_1)); + + /* Creates a socket 2. */ + sockfd_2 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_2); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd_2, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* + * Forbids to bind the socket 2 to address with port[1], + * cause just one layer has bind() access rule. + */ + ASSERT_EQ(-1, bind_variant(variant, sockfd_1, self, 1)); + ASSERT_EQ(EACCES, errno); + + /* Expands network mask. */ + struct landlock_ruleset_attr ruleset_attr_3 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + + /* Restricts connect() access to port[0]. */ + struct landlock_net_service_attr net_service_4 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = self->port[0], + }; + + const int ruleset_fd_3 = landlock_create_ruleset( + &ruleset_attr_3, sizeof(ruleset_attr_3), 0); + ASSERT_LE(0, ruleset_fd_3); + + /* Adds rule to port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_3, LANDLOCK_RULE_NET_SERVICE, + &net_service_4, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_3); + ASSERT_EQ(0, close(ruleset_fd_3)); + + /* Creates a socket 1. */ + sockfd_1 = create_socket_variant(variant, SOCK_STREAM); + ASSERT_LE(0, sockfd_1); + /* Allows to reuse of local address. */ + ASSERT_EQ(0, setsockopt(sockfd_1, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one))); + + /* Binds the socket 1 to address with port[0]. */ + ASSERT_EQ(0, bind_variant(variant, sockfd_1, self, 0)); + + /* + * Forbids to connect the socket 1 to address with port[0], + * cause just one layer has connect() access rule. + */ + ASSERT_EQ(-1, connect_variant(variant, sockfd_1, self, 0)); + ASSERT_EQ(EACCES, errno); + + /* Closes socket 1. */ + ASSERT_EQ(0, close(sockfd_1)); +} TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:12 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888820 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4D712C43334 for ; Tue, 21 Jun 2022 08:24:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348154AbiFUIY4 (ORCPT ); Tue, 21 Jun 2022 04:24:56 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42560 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348173AbiFUIYj (ORCPT ); Tue, 21 Jun 2022 04:24:39 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EC66718B15; Tue, 21 Jun 2022 01:23:42 -0700 (PDT) Received: from fraeml701-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzy16PwGz687Z8; Tue, 21 Jun 2022 16:21:45 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml701-chm.china.huawei.com (10.206.15.50) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:40 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:39 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 16/17] seltests/landlock: adds invalid input data test Date: Tue, 21 Jun 2022 16:23:12 +0800 Message-ID: <20220621082313.3330667-17-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds rules with invalid user space supplied data: - unhandled allowed access; - zero port value; - zero access value; Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Formats code with clang-format-14. Changes since v4: * Refactors code with self->port variable. Changes since v3: * Adds inval test. --- tools/testing/selftests/landlock/net_test.c | 52 +++++++++++++++++++++ 1 file changed, 52 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index a9cb47836a21..ade834ab6497 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -719,4 +719,56 @@ TEST_F(socket, ruleset_expanding) /* Closes socket 1. */ ASSERT_EQ(0, close(sockfd_1)); } + +TEST_F(socket, inval) +{ + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = self->port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = 0, + }; + struct landlock_net_service_attr net_service_3 = { + .allowed_access = 0, + .port = self->port[1], + }; + struct landlock_net_service_attr net_service_4 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = self->port[2], + }; + + /* Gets ruleset. */ + const int ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Checks unhandled allowed_access. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + ASSERT_EQ(EINVAL, errno); + + /* Checks zero port value. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + ASSERT_EQ(EINVAL, errno); + + /* Checks zero access value. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + ASSERT_EQ(ENOMSG, errno); + + /* Adds with legitimate values. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_4, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} TEST_HARNESS_MAIN From patchwork Tue Jun 21 08:23:13 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12888821 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9549ECCA473 for ; Tue, 21 Jun 2022 08:25:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348197AbiFUIZf (ORCPT ); Tue, 21 Jun 2022 04:25:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41792 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348191AbiFUIYl (ORCPT ); Tue, 21 Jun 2022 04:24:41 -0400 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B6EF5237C9; Tue, 21 Jun 2022 01:23:44 -0700 (PDT) Received: from fraeml745-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4LRzy40VZHz6897v; Tue, 21 Jun 2022 16:21:48 +0800 (CST) Received: from lhreml745-chm.china.huawei.com (10.201.108.195) by fraeml745-chm.china.huawei.com (10.206.15.226) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 10:23:42 +0200 Received: from mscphis00759.huawei.com (10.123.66.134) by lhreml745-chm.china.huawei.com (10.201.108.195) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 21 Jun 2022 09:23:41 +0100 From: Konstantin Meskhidze To: CC: , , , , , Subject: [PATCH v6 17/17] samples/landlock: adds network demo Date: Tue, 21 Jun 2022 16:23:13 +0800 Message-ID: <20220621082313.3330667-18-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> References: <20220621082313.3330667-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.66.134] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To lhreml745-chm.china.huawei.com (10.201.108.195) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This commit adds network demo. It's possible to allow a sandoxer to bind/connect to a list of particular ports restricting networks actions to the rest of ports. Signed-off-by: Konstantin Meskhidze --- Changes since v5: * Makes network ports sandboxing optional. * Fixes some logic errors. * Formats code with clang-format-14. Changes since v4: * Adds ENV_TCP_BIND_NAME "LL_TCP_BIND" and ENV_TCP_CONNECT_NAME "LL_TCP_CONNECT" variables to insert TCP ports. * Renames populate_ruleset() to populate_ruleset_fs(). * Adds populate_ruleset_net() and parse_port_num() helpers. * Refactors main() to support network sandboxing. --- samples/landlock/sandboxer.c | 118 +++++++++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 11 deletions(-) -- 2.25.1 diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c index 3e404e51ec64..0606c676fded 100644 --- a/samples/landlock/sandboxer.c +++ b/samples/landlock/sandboxer.c @@ -51,6 +51,8 @@ static inline int landlock_restrict_self(const int ruleset_fd, #define ENV_FS_RO_NAME "LL_FS_RO" #define ENV_FS_RW_NAME "LL_FS_RW" +#define ENV_TCP_BIND_NAME "LL_TCP_BIND" +#define ENV_TCP_CONNECT_NAME "LL_TCP_CONNECT" #define ENV_PATH_TOKEN ":" static int parse_path(char *env_path, const char ***const path_list) @@ -71,6 +73,20 @@ static int parse_path(char *env_path, const char ***const path_list) return num_paths; } +static int parse_port_num(char *env_port) +{ + int i, num_ports = 0; + + if (env_port) { + num_ports++; + for (i = 0; env_port[i]; i++) { + if (env_port[i] == ENV_PATH_TOKEN[0]) + num_ports++; + } + } + return num_ports; +} + /* clang-format off */ #define ACCESS_FILE ( \ @@ -80,8 +96,8 @@ static int parse_path(char *env_path, const char ***const path_list) /* clang-format on */ -static int populate_ruleset(const char *const env_var, const int ruleset_fd, - const __u64 allowed_access) +static int populate_ruleset_fs(const char *const env_var, const int ruleset_fd, + const __u64 allowed_access) { int num_paths, i, ret = 1; char *env_path_name; @@ -142,6 +158,48 @@ static int populate_ruleset(const char *const env_var, const int ruleset_fd, return ret; } +static int populate_ruleset_net(const char *const env_var, const int ruleset_fd, + const __u64 allowed_access) +{ + int num_ports, i, ret = 1; + char *env_port_name; + struct landlock_net_service_attr net_service = { + .allowed_access = 0, + .port = 0, + }; + + env_port_name = getenv(env_var); + if (!env_port_name) { + ret = 0; + goto out_free_name; + } + env_port_name = strdup(env_port_name); + unsetenv(env_var); + num_ports = parse_port_num(env_port_name); + + if (num_ports == 1 && (strtok(env_port_name, ENV_PATH_TOKEN) == NULL)) { + ret = 0; + goto out_free_name; + } + + for (i = 0; i < num_ports; i++) { + net_service.allowed_access = allowed_access; + net_service.port = atoi(strsep(&env_port_name, ENV_PATH_TOKEN)); + if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service, 0)) { + fprintf(stderr, + "Failed to update the ruleset with port \"%d\": %s\n", + net_service.port, strerror(errno)); + goto out_free_name; + } + } + ret = 0; + +out_free_name: + free(env_port_name); + return ret; +} + /* clang-format off */ #define ACCESS_FS_ROUGHLY_READ ( \ @@ -172,32 +230,50 @@ int main(const int argc, char *const argv[], char *const *const envp) const char *cmd_path; char *const *cmd_argv; int ruleset_fd, abi; + char *env_port_name; __u64 access_fs_ro = ACCESS_FS_ROUGHLY_READ, - access_fs_rw = ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_WRITE; + access_fs_rw = ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_WRITE, + access_net_tcp = 0; struct landlock_ruleset_attr ruleset_attr = { .handled_access_fs = access_fs_rw, + .handled_access_net = access_net_tcp, }; if (argc < 2) { fprintf(stderr, - "usage: %s=\"...\" %s=\"...\" %s [args]...\n\n", - ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]); + "usage: %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\"%s " + " [args]...\n\n", + ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME, + ENV_TCP_CONNECT_NAME, argv[0]); fprintf(stderr, "Launch a command in a restricted environment.\n\n"); - fprintf(stderr, "Environment variables containing paths, " - "each separated by a colon:\n"); + fprintf(stderr, + "Environment variables containing paths and ports " + "each separated by a colon:\n"); fprintf(stderr, "* %s: list of paths allowed to be used in a read-only way.\n", ENV_FS_RO_NAME); fprintf(stderr, - "* %s: list of paths allowed to be used in a read-write way.\n", + "* %s: list of paths allowed to be used in a read-write way.\n\n", ENV_FS_RW_NAME); + fprintf(stderr, + "Environment variables containing ports are optional " + "and could be skipped.\n"); + fprintf(stderr, + "* %s: list of ports allowed to bind (server).\n", + ENV_TCP_BIND_NAME); + fprintf(stderr, + "* %s: list of ports allowed to connect (client).\n", + ENV_TCP_CONNECT_NAME); fprintf(stderr, "\nexample:\n" "%s=\"/bin:/lib:/usr:/proc:/etc:/dev/urandom\" " "%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" " + "%s=\"9418\" " + "%s=\"80:443\" " "%s bash -i\n", - ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]); + ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME, + ENV_TCP_CONNECT_NAME, argv[0]); return 1; } @@ -232,16 +308,36 @@ int main(const int argc, char *const argv[], char *const *const envp) access_fs_rw &= ~ACCESS_ABI_2; } + /* Adds optionally network bind() support. */ + env_port_name = getenv(ENV_TCP_BIND_NAME); + if (env_port_name) { + access_net_tcp |= LANDLOCK_ACCESS_NET_BIND_TCP; + } + /* Adds optionally network connect() support. */ + env_port_name = getenv(ENV_TCP_CONNECT_NAME); + if (env_port_name) { + access_net_tcp |= LANDLOCK_ACCESS_NET_CONNECT_TCP; + } + ruleset_attr.handled_access_net = access_net_tcp; + ruleset_fd = landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); if (ruleset_fd < 0) { perror("Failed to create a ruleset"); return 1; } - if (populate_ruleset(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) { + if (populate_ruleset_fs(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) { + goto err_close_ruleset; + } + if (populate_ruleset_fs(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) { + goto err_close_ruleset; + } + if (populate_ruleset_net(ENV_TCP_BIND_NAME, ruleset_fd, + LANDLOCK_ACCESS_NET_BIND_TCP)) { goto err_close_ruleset; } - if (populate_ruleset(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) { + if (populate_ruleset_net(ENV_TCP_CONNECT_NAME, ruleset_fd, + LANDLOCK_ACCESS_NET_CONNECT_TCP)) { goto err_close_ruleset; } if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {