From patchwork Wed Mar 9 13:44:45 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775086 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1C381C433F5 for ; Wed, 9 Mar 2022 13:45:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232105AbiCINqK (ORCPT ); Wed, 9 Mar 2022 08:46:10 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40690 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231710AbiCINqJ (ORCPT ); Wed, 9 Mar 2022 08:46:09 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9447417B893; Wed, 9 Mar 2022 05:45:10 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1Y53MVz67gYW; Wed, 9 Mar 2022 21:43:45 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:08 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 01/15] landlock: access mask renaming Date: Wed, 9 Mar 2022 21:44:45 +0800 Message-ID: <20220309134459.6448-2-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Currently Landlock supports filesystem restrictions. To support network type rules, this modification extends and renames ruleset's access masks. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. --- security/landlock/fs.c | 4 ++-- security/landlock/ruleset.c | 18 +++++++++--------- security/landlock/ruleset.h | 8 ++++---- security/landlock/syscalls.c | 6 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 97b8e421f617..d727bdab7840 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -163,7 +163,7 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, return -EINVAL; /* Transforms relative access rights to absolute ones. */ - access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0]; + access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->access_masks[0]; object = get_inode_object(d_backing_inode(path->dentry)); if (IS_ERR(object)) return PTR_ERR(object); @@ -252,7 +252,7 @@ static int check_access_path(const struct landlock_ruleset *const domain, /* Saves all layers handling a subset of requested accesses. */ layer_mask = 0; for (i = 0; i < domain->num_layers; i++) { - if (domain->fs_access_masks[i] & access_request) + if (domain->access_masks[i] & access_request) layer_mask |= BIT_ULL(i); } /* An access request not handled by the domain is allowed. */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index ec72b9262bf3..78341a0538de 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -28,7 +28,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) { struct landlock_ruleset *new_ruleset; - new_ruleset = kzalloc(struct_size(new_ruleset, fs_access_masks, + new_ruleset = kzalloc(struct_size(new_ruleset, access_masks, num_layers), GFP_KERNEL_ACCOUNT); if (!new_ruleset) return ERR_PTR(-ENOMEM); @@ -39,21 +39,21 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) /* * hierarchy = NULL * num_rules = 0 - * fs_access_masks[] = 0 + * access_masks[] = 0 */ return new_ruleset; } -struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask) +struct landlock_ruleset *landlock_create_ruleset(const u32 access_mask) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ - if (!fs_access_mask) + if (!access_mask) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); if (!IS_ERR(new_ruleset)) - new_ruleset->fs_access_masks[0] = fs_access_mask; + new_ruleset->access_masks[0] = access_mask; return new_ruleset; } @@ -116,7 +116,7 @@ static void build_check_ruleset(void) .num_rules = ~0, .num_layers = ~0, }; - typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0; + typeof(ruleset.access_masks[0]) fs_access_mask = ~0; BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); @@ -279,7 +279,7 @@ static int merge_ruleset(struct landlock_ruleset *const dst, err = -EINVAL; goto out_unlock; } - dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0]; + dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; /* Merges the @src tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, @@ -337,8 +337,8 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, goto out_unlock; } /* Copies the parent layer stack and leaves a space for the new layer. */ - memcpy(child->fs_access_masks, parent->fs_access_masks, - flex_array_size(parent, fs_access_masks, parent->num_layers)); + memcpy(child->access_masks, parent->access_masks, + flex_array_size(parent, access_masks, parent->num_layers)); if (WARN_ON_ONCE(!parent->hierarchy)) { err = -EINVAL; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 2d3ed7ec5a0a..32d90ce72428 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -97,7 +97,7 @@ struct landlock_ruleset { * section. This is only used by * landlock_put_ruleset_deferred() when @usage reaches zero. * The fields @lock, @usage, @num_rules, @num_layers and - * @fs_access_masks are then unused. + * @access_masks are then unused. */ struct work_struct work_free; struct { @@ -124,7 +124,7 @@ struct landlock_ruleset { */ u32 num_layers; /** - * @fs_access_masks: Contains the subset of filesystem + * @access_masks: Contains the subset of filesystem * actions that are restricted by a ruleset. A domain * saves all layers of merged rulesets in a stack * (FAM), starting from the first layer to the last @@ -135,12 +135,12 @@ struct landlock_ruleset { * layers are set once and never changed for the * lifetime of the ruleset. */ - u16 fs_access_masks[]; + u32 access_masks[]; }; }; }; -struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask); +struct landlock_ruleset *landlock_create_ruleset(const u32 access_mask); void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 32396962f04d..f1d86311df7e 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -341,10 +341,10 @@ SYSCALL_DEFINE4(landlock_add_rule, } /* * Checks that allowed_access matches the @ruleset constraints - * (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits). + * (ruleset->access_masks[0] is automatically upgraded to 64-bits). */ - if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) != - ruleset->fs_access_masks[0]) { + if ((path_beneath_attr.allowed_access | ruleset->access_masks[0]) != + ruleset->access_masks[0]) { err = -EINVAL; goto out_put_ruleset; } From patchwork Wed Mar 9 13:44:46 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775088 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DC00EC433F5 for ; Wed, 9 Mar 2022 13:45:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232244AbiCINqS (ORCPT ); Wed, 9 Mar 2022 08:46:18 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40846 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231219AbiCINqM (ORCPT ); Wed, 9 Mar 2022 08:46:12 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3CF5917B88E; Wed, 9 Mar 2022 05:45:13 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD2g48CVz67bVT; Wed, 9 Mar 2022 21:44:43 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:10 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 02/15] landlock: filesystem access mask helpers Date: Wed, 9 Mar 2022 21:44:46 +0800 Message-ID: <20220309134459.6448-3-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC This patch adds filesystem helper functions to set and get filesystem mask. Also the modification adds a helper structure landlock_access_mask to support managing multiple access mask. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Add get_mask, set_mask helpers for filesystem. * Add new struct landlock_access_mask. --- security/landlock/fs.c | 4 ++-- security/landlock/ruleset.c | 20 +++++++++++++++++--- security/landlock/ruleset.h | 19 ++++++++++++++++++- security/landlock/syscalls.c | 9 ++++++--- 4 files changed, 43 insertions(+), 9 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index d727bdab7840..97f5c455f5a7 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -163,7 +163,7 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, return -EINVAL; /* Transforms relative access rights to absolute ones. */ - access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->access_masks[0]; + access_rights |= LANDLOCK_MASK_ACCESS_FS & ~landlock_get_fs_access_mask(ruleset, 0); object = get_inode_object(d_backing_inode(path->dentry)); if (IS_ERR(object)) return PTR_ERR(object); @@ -252,7 +252,7 @@ static int check_access_path(const struct landlock_ruleset *const domain, /* Saves all layers handling a subset of requested accesses. */ layer_mask = 0; for (i = 0; i < domain->num_layers; i++) { - if (domain->access_masks[i] & access_request) + if (landlock_get_fs_access_mask(domain, i) & access_request) layer_mask |= BIT_ULL(i); } /* An access request not handled by the domain is allowed. */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 78341a0538de..a6212b752549 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -44,16 +44,30 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) return new_ruleset; } -struct landlock_ruleset *landlock_create_ruleset(const u32 access_mask) +/* A helper function to set a filesystem mask */ +void landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, + const struct landlock_access_mask *access_mask_set, + u16 mask_level) +{ + ruleset->access_masks[mask_level] = access_mask_set->fs; +} + +/* A helper function to get a filesystem mask */ +u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level) +{ + return ruleset->access_masks[mask_level]; +} + +struct landlock_ruleset *landlock_create_ruleset(const struct landlock_access_mask *access_mask_set) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ - if (!access_mask) + if (!access_mask_set->fs) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); if (!IS_ERR(new_ruleset)) - new_ruleset->access_masks[0] = access_mask; + landlock_set_fs_access_mask(new_ruleset, access_mask_set, 0); return new_ruleset; } diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 32d90ce72428..bc87e5f787f7 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -16,6 +16,16 @@ #include "object.h" +/** + * struct landlock_access_mask - A helper structure to handle different mask types + */ +struct landlock_access_mask { + /** + * @fs: Filesystem access mask. + */ + u16 fs; +}; + /** * struct landlock_layer - Access rights for a given layer */ @@ -140,7 +150,8 @@ struct landlock_ruleset { }; }; -struct landlock_ruleset *landlock_create_ruleset(const u32 access_mask); +struct landlock_ruleset *landlock_create_ruleset(const struct landlock_access_mask + *access_mask_set); void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); @@ -162,4 +173,10 @@ static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) refcount_inc(&ruleset->usage); } +void landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, + const struct landlock_access_mask *access_mask_set, + u16 mask_level); + +u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level); + #endif /* _SECURITY_LANDLOCK_RULESET_H */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index f1d86311df7e..5931b666321d 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -159,6 +159,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset, { struct landlock_ruleset_attr ruleset_attr; struct landlock_ruleset *ruleset; + struct landlock_access_mask access_mask_set = {.fs = 0}; int err, ruleset_fd; /* Build-time checks. */ @@ -185,9 +186,10 @@ SYSCALL_DEFINE3(landlock_create_ruleset, if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != LANDLOCK_MASK_ACCESS_FS) return -EINVAL; + access_mask_set.fs = ruleset_attr.handled_access_fs; /* Checks arguments and transforms to kernel struct. */ - ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs); + ruleset = landlock_create_ruleset(&access_mask_set); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); @@ -343,8 +345,9 @@ SYSCALL_DEFINE4(landlock_add_rule, * Checks that allowed_access matches the @ruleset constraints * (ruleset->access_masks[0] is automatically upgraded to 64-bits). */ - if ((path_beneath_attr.allowed_access | ruleset->access_masks[0]) != - ruleset->access_masks[0]) { + + if ((path_beneath_attr.allowed_access | landlock_get_fs_access_mask(ruleset, 0)) != + landlock_get_fs_access_mask(ruleset, 0)) { err = -EINVAL; goto out_put_ruleset; } From patchwork Wed Mar 9 13:44:47 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775089 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7E343C4167D for ; Wed, 9 Mar 2022 13:45:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232861AbiCINqT (ORCPT ); Wed, 9 Mar 2022 08:46:19 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41008 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232619AbiCINqN (ORCPT ); Wed, 9 Mar 2022 08:46:13 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9FE4B17B898; Wed, 9 Mar 2022 05:45:14 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1d5HCwz67kNP; Wed, 9 Mar 2022 21:43:49 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:12 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 03/15] landlock: landlock_find/insert_rule refactoring Date: Wed, 9 Mar 2022 21:44:47 +0800 Message-ID: <20220309134459.6448-4-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC A new object union added to support a socket port rule type. To support it landlock_insert_rule() and landlock_find_rule() were refactored. Now adding or searching a rule in a ruleset depends on a rule_type argument provided in refactored functions mentioned above. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Refactoring landlock_insert_rule and landlock_find_rule functions. * Rename new_ruleset->root_inode. --- security/landlock/fs.c | 5 +- security/landlock/ruleset.c | 108 +++++++++++++++++++++++++----------- security/landlock/ruleset.h | 26 +++++---- 3 files changed, 94 insertions(+), 45 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 97f5c455f5a7..1497948d754f 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -168,7 +168,7 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, if (IS_ERR(object)) return PTR_ERR(object); mutex_lock(&ruleset->lock); - err = landlock_insert_rule(ruleset, object, access_rights); + err = landlock_insert_rule(ruleset, object, 0, access_rights, LANDLOCK_RULE_PATH_BENEATH); mutex_unlock(&ruleset->lock); /* * No need to check for an error because landlock_insert_rule() @@ -195,7 +195,8 @@ static inline u64 unmask_layers( inode = d_backing_inode(path->dentry); rcu_read_lock(); rule = landlock_find_rule(domain, - rcu_dereference(landlock_inode(inode)->object)); + (uintptr_t)rcu_dereference(landlock_inode(inode)->object), + LANDLOCK_RULE_PATH_BENEATH); rcu_read_unlock(); if (!rule) return layer_mask; diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index a6212b752549..971685c48641 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -34,7 +34,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) return ERR_PTR(-ENOMEM); refcount_set(&new_ruleset->usage, 1); mutex_init(&new_ruleset->lock); - new_ruleset->root = RB_ROOT; + new_ruleset->root_inode = RB_ROOT; new_ruleset->num_layers = num_layers; /* * hierarchy = NULL @@ -81,10 +81,12 @@ static void build_check_rule(void) } static struct landlock_rule *create_rule( - struct landlock_object *const object, + struct landlock_object *const object_ptr, + const uintptr_t object_data, const struct landlock_layer (*const layers)[], const u32 num_layers, - const struct landlock_layer *const new_layer) + const struct landlock_layer *const new_layer, + const u16 rule_type) { struct landlock_rule *new_rule; u32 new_num_layers; @@ -103,8 +105,16 @@ static struct landlock_rule *create_rule( if (!new_rule) return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); - landlock_get_object(object); - new_rule->object = object; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + landlock_get_object(object_ptr); + new_rule->object.ptr = object_ptr; + break; + default: + return ERR_PTR(-EINVAL); + } + new_rule->num_layers = new_num_layers; /* Copies the original layer stack. */ memcpy(new_rule->layers, layers, @@ -120,7 +130,7 @@ static void free_rule(struct landlock_rule *const rule) might_sleep(); if (!rule) return; - landlock_put_object(rule->object); + landlock_put_object(rule->object.ptr); kfree(rule); } @@ -156,26 +166,38 @@ static void build_check_ruleset(void) * access rights. */ static int insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, + struct landlock_object *const object_ptr, + const uintptr_t object_data, const struct landlock_layer (*const layers)[], - size_t num_layers) + size_t num_layers, u16 rule_type) { struct rb_node **walker_node; struct rb_node *parent_node = NULL; struct landlock_rule *new_rule; + uintptr_t object; + struct rb_root *root; might_sleep(); lockdep_assert_held(&ruleset->lock); - if (WARN_ON_ONCE(!object || !layers)) - return -ENOENT; - walker_node = &(ruleset->root.rb_node); + /* Choose rb_tree structure depending on a rule type */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + if (WARN_ON_ONCE(!object_ptr || !layers)) + return -ENOENT; + object = (uintptr_t)object_ptr; + root = &ruleset->root_inode; + break; + default: + return -EINVAL; + } + walker_node = &root->rb_node; while (*walker_node) { struct landlock_rule *const this = rb_entry(*walker_node, struct landlock_rule, node); - if (this->object != object) { + if (this->object.data != object) { parent_node = *walker_node; - if (this->object < object) + if (this->object.data < object) walker_node = &((*walker_node)->rb_right); else walker_node = &((*walker_node)->rb_left); @@ -207,11 +229,15 @@ static int insert_rule(struct landlock_ruleset *const ruleset, * Intersects access rights when it is a merge between a * ruleset and a domain. */ - new_rule = create_rule(object, &this->layers, this->num_layers, - &(*layers)[0]); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + new_rule = create_rule(object_ptr, 0, &this->layers, this->num_layers, + &(*layers)[0], rule_type); + break; + } if (IS_ERR(new_rule)) return PTR_ERR(new_rule); - rb_replace_node(&this->node, &new_rule->node, &ruleset->root); + rb_replace_node(&this->node, &new_rule->node, &ruleset->root_inode); free_rule(this); return 0; } @@ -220,11 +246,15 @@ static int insert_rule(struct landlock_ruleset *const ruleset, build_check_ruleset(); if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) return -E2BIG; - new_rule = create_rule(object, layers, num_layers, NULL); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + new_rule = create_rule(object_ptr, 0, layers, num_layers, NULL, rule_type); + break; + } if (IS_ERR(new_rule)) return PTR_ERR(new_rule); rb_link_node(&new_rule->node, parent_node, walker_node); - rb_insert_color(&new_rule->node, &ruleset->root); + rb_insert_color(&new_rule->node, &ruleset->root_inode); ruleset->num_rules++; return 0; } @@ -242,7 +272,9 @@ static void build_check_layer(void) /* @ruleset must be locked by the caller. */ int landlock_insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, const u32 access) + struct landlock_object *const object_ptr, + const uintptr_t object_data, + const u32 access, const u16 rule_type) { struct landlock_layer layers[] = {{ .access = access, @@ -251,7 +283,8 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset, }}; build_check_layer(); - return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers)); + return insert_rule(ruleset, object_ptr, object_data, &layers, + ARRAY_SIZE(layers), rule_type); } static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy) @@ -297,7 +330,7 @@ static int merge_ruleset(struct landlock_ruleset *const dst, /* Merges the @src tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &src->root, node) { + &src->root_inode, node) { struct landlock_layer layers[] = {{ .level = dst->num_layers, }}; @@ -311,8 +344,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst, goto out_unlock; } layers[0].access = walker_rule->layers[0].access; - err = insert_rule(dst, walker_rule->object, &layers, - ARRAY_SIZE(layers)); + err = insert_rule(dst, walker_rule->object.ptr, 0, &layers, + ARRAY_SIZE(layers), LANDLOCK_RULE_PATH_BENEATH); if (err) goto out_unlock; } @@ -323,6 +356,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst, return err; } + + static int inherit_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const child) { @@ -339,9 +374,10 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, /* Copies the @parent tree. */ rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &parent->root, node) { - err = insert_rule(child, walker_rule->object, - &walker_rule->layers, walker_rule->num_layers); + &parent->root_inode, node) { + err = insert_rule(child, walker_rule->object.ptr, 0, + &walker_rule->layers, walker_rule->num_layers, + LANDLOCK_RULE_PATH_BENEATH); if (err) goto out_unlock; } @@ -372,7 +408,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) struct landlock_rule *freeme, *next; might_sleep(); - rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, + rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) free_rule(freeme); put_hierarchy(ruleset->hierarchy); @@ -465,20 +501,28 @@ struct landlock_ruleset *landlock_merge_ruleset( */ const struct landlock_rule *landlock_find_rule( const struct landlock_ruleset *const ruleset, - const struct landlock_object *const object) + const uintptr_t object_data, const u16 rule_type) { const struct rb_node *node; - if (!object) + if (!object_data) return NULL; - node = ruleset->root.rb_node; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + node = ruleset->root_inode.rb_node; + break; + default: + return ERR_PTR(-EINVAL); + } + while (node) { struct landlock_rule *this = rb_entry(node, struct landlock_rule, node); - if (this->object == object) + if (this->object.data == object_data) return this; - if (this->object < object) + if (this->object.data < object_data) node = node->rb_right; else node = node->rb_left; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index bc87e5f787f7..088b8d95f653 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -50,15 +50,17 @@ struct landlock_rule { */ struct rb_node node; /** - * @object: Pointer to identify a kernel object (e.g. an inode). This - * is used as a key for this ruleset element. This pointer is set once - * and never modified. It always points to an allocated object because - * each rule increments the refcount of its object. - */ - struct landlock_object *object; - /** - * @num_layers: Number of entries in @layers. + * @object: A union to identify either a kernel object (e.g. an inode) or + * a socket port object. This is used as a key for this ruleset element. + * This pointer is set once and never modified. It always points to an + * allocated object because each rule increments the refcount of its + * object (for inodes); */ + union { + struct landlock_object *ptr; + uintptr_t data; + } object; + u32 num_layers; /** * @layers: Stack of layers, from the latest to the newest, implemented @@ -95,7 +97,7 @@ struct landlock_ruleset { * nodes. Once a ruleset is tied to a process (i.e. as a domain), this * tree is immutable until @usage reaches zero. */ - struct rb_root root; + struct rb_root root_inode; /** * @hierarchy: Enables hierarchy identification even when a parent * domain vanishes. This is needed for the ptrace protection. @@ -157,7 +159,9 @@ void landlock_put_ruleset(struct landlock_ruleset *const ruleset); void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset); int landlock_insert_rule(struct landlock_ruleset *const ruleset, - struct landlock_object *const object, const u32 access); + struct landlock_object *const object_ptr, + const uintptr_t object_data, + const u32 access, const u16 rule_type); struct landlock_ruleset *landlock_merge_ruleset( struct landlock_ruleset *const parent, @@ -165,7 +169,7 @@ struct landlock_ruleset *landlock_merge_ruleset( const struct landlock_rule *landlock_find_rule( const struct landlock_ruleset *const ruleset, - const struct landlock_object *const object); + const uintptr_t object_data, const u16 rule_type); static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) { From patchwork Wed Mar 9 13:44:48 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775090 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 21AEEC46467 for ; Wed, 9 Mar 2022 13:45:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232910AbiCINqV (ORCPT ); Wed, 9 Mar 2022 08:46:21 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41324 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232717AbiCINqS (ORCPT ); Wed, 9 Mar 2022 08:46:18 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0C61417B8A8; Wed, 9 Mar 2022 05:45:16 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.201]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD2k2p0gz67bVT; Wed, 9 Mar 2022 21:44:46 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:13 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 04/15] landlock: merge and inherit function refactoring Date: Wed, 9 Mar 2022 21:44:48 +0800 Message-ID: <20220309134459.6448-5-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Merge_ruleset() and inherit_ruleset() functions were refactored to support new rule types. This patch adds tree_merge() and tree_copy() helpers. Each has rule_type argument to choose a particular rb_tree structure in a ruleset. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Refactoring functions: -insert_rule. -merge_ruleset. -tree_merge. -inherit_ruleset. -tree_copy. -free_rule. --- security/landlock/ruleset.c | 141 +++++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 44 deletions(-) -- 2.25.1 diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 971685c48641..f2baa1c96b16 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -125,12 +125,16 @@ static struct landlock_rule *create_rule( return new_rule; } -static void free_rule(struct landlock_rule *const rule) +static void free_rule(struct landlock_rule *const rule, const u16 rule_type) { might_sleep(); if (!rule) return; - landlock_put_object(rule->object.ptr); + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + landlock_put_object(rule->object.ptr); + break; + } kfree(rule); } @@ -233,12 +237,12 @@ static int insert_rule(struct landlock_ruleset *const ruleset, case LANDLOCK_RULE_PATH_BENEATH: new_rule = create_rule(object_ptr, 0, &this->layers, this->num_layers, &(*layers)[0], rule_type); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_replace_node(&this->node, &new_rule->node, &ruleset->root_inode); + free_rule(this, rule_type); break; } - if (IS_ERR(new_rule)) - return PTR_ERR(new_rule); - rb_replace_node(&this->node, &new_rule->node, &ruleset->root_inode); - free_rule(this); return 0; } @@ -249,13 +253,13 @@ static int insert_rule(struct landlock_ruleset *const ruleset, switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: new_rule = create_rule(object_ptr, 0, layers, num_layers, NULL, rule_type); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_link_node(&new_rule->node, parent_node, walker_node); + rb_insert_color(&new_rule->node, &ruleset->root_inode); + ruleset->num_rules++; break; } - if (IS_ERR(new_rule)) - return PTR_ERR(new_rule); - rb_link_node(&new_rule->node, parent_node, walker_node); - rb_insert_color(&new_rule->node, &ruleset->root_inode); - ruleset->num_rules++; return 0; } @@ -303,10 +307,53 @@ static void put_hierarchy(struct landlock_hierarchy *hierarchy) } } +static int tree_merge(struct landlock_ruleset *const src, + struct landlock_ruleset *const dst, u16 rule_type) +{ + struct landlock_rule *walker_rule, *next_rule; + struct rb_root *src_root; + int err = 0; + + /* Choose rb_tree structure depending on a rule type */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + src_root = &src->root_inode; + break; + default: + return -EINVAL; + } + /* Merges the @src tree. */ + rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, + src_root, node) { + struct landlock_layer layers[] = {{ + .level = dst->num_layers, + }}; + + if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { + err = -EINVAL; + return err; + } + if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { + err = -EINVAL; + return err; + } + layers[0].access = walker_rule->layers[0].access; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = insert_rule(dst, walker_rule->object.ptr, 0, &layers, + ARRAY_SIZE(layers), rule_type); + break; + } + if (err) + return err; + } + return err; +} + static int merge_ruleset(struct landlock_ruleset *const dst, struct landlock_ruleset *const src) { - struct landlock_rule *walker_rule, *next_rule; int err = 0; might_sleep(); @@ -328,27 +375,10 @@ static int merge_ruleset(struct landlock_ruleset *const dst, } dst->access_masks[dst->num_layers - 1] = src->access_masks[0]; - /* Merges the @src tree. */ - rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &src->root_inode, node) { - struct landlock_layer layers[] = {{ - .level = dst->num_layers, - }}; - - if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { - err = -EINVAL; - goto out_unlock; - } - if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { - err = -EINVAL; - goto out_unlock; - } - layers[0].access = walker_rule->layers[0].access; - err = insert_rule(dst, walker_rule->object.ptr, 0, &layers, - ARRAY_SIZE(layers), LANDLOCK_RULE_PATH_BENEATH); - if (err) - goto out_unlock; - } + /* Merges the @src inode tree. */ + err = tree_merge(src, dst, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; out_unlock: mutex_unlock(&src->lock); @@ -356,12 +386,40 @@ static int merge_ruleset(struct landlock_ruleset *const dst, return err; } +static int tree_copy(struct landlock_ruleset *const parent, + struct landlock_ruleset *const child, u16 rule_type) +{ + struct landlock_rule *walker_rule, *next_rule; + struct rb_root *parent_root; + int err = 0; + /* Choose rb_tree structure depending on a rule type */ + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + parent_root = &parent->root_inode; + break; + default: + return -EINVAL; + } + /* Copies the @parent inode tree. */ + rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, + parent_root, node) { + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = insert_rule(child, walker_rule->object.ptr, 0, + &walker_rule->layers, walker_rule->num_layers, + rule_type); + break; + } + if (err) + return err; + } + return err; +} static int inherit_ruleset(struct landlock_ruleset *const parent, struct landlock_ruleset *const child) { - struct landlock_rule *walker_rule, *next_rule; int err = 0; might_sleep(); @@ -372,15 +430,10 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, mutex_lock(&child->lock); mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); - /* Copies the @parent tree. */ - rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, - &parent->root_inode, node) { - err = insert_rule(child, walker_rule->object.ptr, 0, - &walker_rule->layers, walker_rule->num_layers, - LANDLOCK_RULE_PATH_BENEATH); - if (err) - goto out_unlock; - } + /* Copies the @parent inode tree. */ + err = tree_copy(parent, child, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { err = -EINVAL; @@ -410,7 +463,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) might_sleep(); rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) - free_rule(freeme); + free_rule(freeme, LANDLOCK_RULE_PATH_BENEATH); put_hierarchy(ruleset->hierarchy); kfree(ruleset); } From patchwork Wed Mar 9 13:44:49 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775092 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7A693C433F5 for ; Wed, 9 Mar 2022 13:45:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232912AbiCINqX (ORCPT ); Wed, 9 Mar 2022 08:46:23 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41358 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232807AbiCINqS (ORCPT ); Wed, 9 Mar 2022 08:46:18 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 567D517B8B1; Wed, 9 Mar 2022 05:45:17 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1h3RFfz67gYW; Wed, 9 Mar 2022 21:43:52 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:14 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 05/15] landlock: unmask_layers() function refactoring Date: Wed, 9 Mar 2022 21:44:49 +0800 Message-ID: <20220309134459.6448-6-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Unmask_layers() helper function moves to ruleset.c and rule_type argument is added. This modification supports implementing new rule types into next landlock versions. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Refactoring landlock_unmask_layers functions. --- security/landlock/fs.c | 67 +++++++++---------------------------- security/landlock/ruleset.c | 44 ++++++++++++++++++++++++ security/landlock/ruleset.h | 5 +++ 3 files changed, 64 insertions(+), 52 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 1497948d754f..75ebdce5cd16 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -178,51 +178,6 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset, return err; } -/* Access-control management */ - -static inline u64 unmask_layers( - const struct landlock_ruleset *const domain, - const struct path *const path, const u32 access_request, - u64 layer_mask) -{ - const struct landlock_rule *rule; - const struct inode *inode; - size_t i; - - if (d_is_negative(path->dentry)) - /* Ignore nonexistent leafs. */ - return layer_mask; - inode = d_backing_inode(path->dentry); - rcu_read_lock(); - rule = landlock_find_rule(domain, - (uintptr_t)rcu_dereference(landlock_inode(inode)->object), - LANDLOCK_RULE_PATH_BENEATH); - rcu_read_unlock(); - if (!rule) - return layer_mask; - - /* - * An access is granted if, for each policy layer, at least one rule - * encountered on the pathwalk grants the requested accesses, - * regardless of their position in the layer stack. We must then check - * the remaining layers for each inode, from the first added layer to - * the last one. - */ - for (i = 0; i < rule->num_layers; i++) { - const struct landlock_layer *const layer = &rule->layers[i]; - const u64 layer_level = BIT_ULL(layer->level - 1); - - /* Checks that the layer grants access to the full request. */ - if ((layer->access & access_request) == access_request) { - layer_mask &= ~layer_level; - - if (layer_mask == 0) - return layer_mask; - } - } - return layer_mask; -} - static int check_access_path(const struct landlock_ruleset *const domain, const struct path *const path, u32 access_request) { @@ -268,15 +223,23 @@ static int check_access_path(const struct landlock_ruleset *const domain, */ while (true) { struct dentry *parent_dentry; + const struct inode *inode; + struct landlock_object *object_ptr; - layer_mask = unmask_layers(domain, &walker_path, - access_request, layer_mask); - if (layer_mask == 0) { - /* Stops when a rule from each layer grants access. */ - allowed = true; - break; + /* Ignore nonexistent leafs. */ + if (!d_is_negative(walker_path.dentry)) { + + inode = d_backing_inode(walker_path.dentry); + object_ptr = landlock_inode(inode)->object; + layer_mask = landlock_unmask_layers(domain, object_ptr, + access_request, layer_mask, + LANDLOCK_RULE_PATH_BENEATH); + if (layer_mask == 0) { + /* Stops when a rule from each layer grants access. */ + allowed = true; + break; + } } - jump_up: if (walker_path.dentry == walker_path.mnt->mnt_root) { if (follow_up(&walker_path)) { diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index f2baa1c96b16..7179b10f3538 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -582,3 +582,47 @@ const struct landlock_rule *landlock_find_rule( } return NULL; } + +/* Access-control management */ +u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, + const struct landlock_object *object_ptr, + const u32 access_request, u64 layer_mask, + const u16 rule_type) +{ + const struct landlock_rule *rule; + size_t i; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + rcu_read_lock(); + rule = landlock_find_rule(domain, + (uintptr_t)rcu_dereference(object_ptr), + LANDLOCK_RULE_PATH_BENEATH); + rcu_read_unlock(); + break; + } + + if (!rule) + return layer_mask; + + /* + * An access is granted if, for each policy layer, at least one rule + * encountered on the pathwalk grants the requested accesses, + * regardless of their position in the layer stack. We must then check + * the remaining layers for each inode, from the first added layer to + * the last one. + */ + for (i = 0; i < rule->num_layers; i++) { + const struct landlock_layer *const layer = &rule->layers[i]; + const u64 layer_level = BIT_ULL(layer->level - 1); + + /* Checks that the layer grants access to the full request. */ + if ((layer->access & access_request) == access_request) { + layer_mask &= ~layer_level; + + if (layer_mask == 0) + return layer_mask; + } + } + return layer_mask; +} diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 088b8d95f653..0a7d4b1f51fd 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -183,4 +183,9 @@ void landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level); +u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, + const struct landlock_object *object_ptr, + const u32 access_request, u64 layer_mask, + const u16 rule_type); + #endif /* _SECURITY_LANDLOCK_RULESET_H */ From patchwork Wed Mar 9 13:44:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775091 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 75C65C35276 for ; Wed, 9 Mar 2022 13:45:25 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232911AbiCINqV (ORCPT ); Wed, 9 Mar 2022 08:46:21 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41324 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232835AbiCINqS (ORCPT ); Wed, 9 Mar 2022 08:46:18 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CAE4D17B8B8; Wed, 9 Mar 2022 05:45:18 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1Z3HVNz6H6mm; Wed, 9 Mar 2022 21:43:46 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:16 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 06/15] landlock: landlock_add_rule syscall refactoring Date: Wed, 9 Mar 2022 21:44:50 +0800 Message-ID: <20220309134459.6448-7-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Landlock_add_rule syscall was refactored to support new rule types in future Landlock versions. Add_rule_path_beneath() helper was added to support current filesystem rules. It is called by the switch case. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Refactoring landlock_add_rule syscall. --- security/landlock/syscalls.c | 95 ++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 42 deletions(-) -- 2.25.1 diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 5931b666321d..8c0f6165fe3a 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -274,54 +274,13 @@ static int get_path_from_fd(const s32 fd, struct path *const path) return err; } -/** - * sys_landlock_add_rule - Add a new rule to a ruleset - * - * @ruleset_fd: File descriptor tied to the ruleset that should be extended - * with the new rule. - * @rule_type: Identify the structure type pointed to by @rule_attr (only - * LANDLOCK_RULE_PATH_BENEATH for now). - * @rule_attr: Pointer to a rule (only of type &struct - * landlock_path_beneath_attr for now). - * @flags: Must be 0. - * - * This system call enables to define a new rule and add it to an existing - * ruleset. - * - * Possible returned errors are: - * - * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; - * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. - * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's - * accesses); - * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); - * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a - * member of @rule_attr is not a file descriptor as expected; - * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of - * @rule_attr is not the expected file descriptor type (e.g. file open - * without O_PATH); - * - EPERM: @ruleset_fd has no write access to the underlying ruleset; - * - EFAULT: @rule_attr inconsistency. - */ -SYSCALL_DEFINE4(landlock_add_rule, - const int, ruleset_fd, const enum landlock_rule_type, rule_type, - const void __user *const, rule_attr, const __u32, flags) +static int add_rule_path_beneath(const int ruleset_fd, const void *const rule_attr) { struct landlock_path_beneath_attr path_beneath_attr; struct path path; struct landlock_ruleset *ruleset; int res, err; - if (!landlock_initialized) - return -EOPNOTSUPP; - - /* No flag for now. */ - if (flags) - return -EINVAL; - - if (rule_type != LANDLOCK_RULE_PATH_BENEATH) - return -EINVAL; - /* Copies raw user space buffer, only one type for now. */ res = copy_from_user(&path_beneath_attr, rule_attr, sizeof(path_beneath_attr)); @@ -367,6 +326,58 @@ SYSCALL_DEFINE4(landlock_add_rule, return err; } +/** + * sys_landlock_add_rule - Add a new rule to a ruleset + * + * @ruleset_fd: File descriptor tied to the ruleset that should be extended + * with the new rule. + * @rule_type: Identify the structure type pointed to by @rule_attr (only + * LANDLOCK_RULE_PATH_BENEATH for now). + * @rule_attr: Pointer to a rule (only of type &struct + * landlock_path_beneath_attr for now). + * @flags: Must be 0. + * + * This system call enables to define a new rule and add it to an existing + * ruleset. + * + * Possible returned errors are: + * + * - EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; + * - EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. + * &landlock_path_beneath_attr.allowed_access is not a subset of the rule's + * accesses); + * - ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); + * - EBADF: @ruleset_fd is not a file descriptor for the current thread, or a + * member of @rule_attr is not a file descriptor as expected; + * - EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of + * @rule_attr is not the expected file descriptor type (e.g. file open + * without O_PATH); + * - EPERM: @ruleset_fd has no write access to the underlying ruleset; + * - EFAULT: @rule_attr inconsistency. + */ +SYSCALL_DEFINE4(landlock_add_rule, + const int, ruleset_fd, const enum landlock_rule_type, rule_type, + const void __user *const, rule_attr, const __u32, flags) +{ + int err; + + if (!landlock_initialized) + return -EOPNOTSUPP; + + /* No flag for now. */ + if (flags) + return -EINVAL; + + switch (rule_type) { + case LANDLOCK_RULE_PATH_BENEATH: + err = add_rule_path_beneath(ruleset_fd, rule_attr); + break; + default: + err = -EINVAL; + } + return err; +} + /* Enforcement */ /** From patchwork Wed Mar 9 13:44:51 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775095 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DBDE8C433F5 for ; Wed, 9 Mar 2022 13:45:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233199AbiCINqe (ORCPT ); Wed, 9 Mar 2022 08:46:34 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41528 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232881AbiCINqU (ORCPT ); Wed, 9 Mar 2022 08:46:20 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1E0AD17BC49; Wed, 9 Mar 2022 05:45:20 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1l1Mp2z67gYW; Wed, 9 Mar 2022 21:43:55 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:17 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 07/15] landlock: user space API network support Date: Wed, 9 Mar 2022 21:44:51 +0800 Message-ID: <20220309134459.6448-8-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC User space API was refactored to support network actions. New network access flags, network rule and network attributes were added. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Refactoring User API for network rule type. --- include/uapi/linux/landlock.h | 48 +++++++++++++++++++++++++++++++++++ security/landlock/syscalls.c | 5 ++-- 2 files changed, 51 insertions(+), 2 deletions(-) -- 2.25.1 diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index b3d952067f59..4fc6c793fdf4 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -25,6 +25,13 @@ struct landlock_ruleset_attr { * compatibility reasons. */ __u64 handled_access_fs; + + /** + * @handled_access_net: Bitmask of actions (cf. `Network flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. + */ + __u64 handled_access_net; }; /* @@ -46,6 +53,11 @@ enum landlock_rule_type { * landlock_path_beneath_attr . */ LANDLOCK_RULE_PATH_BENEATH = 1, + /** + * @LANDLOCK_RULE_NET_SERVICE: Type of a &struct + * landlock_net_service_attr . + */ + LANDLOCK_RULE_NET_SERVICE = 2, }; /** @@ -70,6 +82,24 @@ struct landlock_path_beneath_attr { */ } __attribute__((packed)); +/** + * struct landlock_net_service_attr - TCP subnet definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_net_service_attr { + /** + * @allowed_access: Bitmask of allowed access network for services + * (cf. `Network flags`_). + */ + __u64 allowed_access; + /** + * @port: Network port + */ + __u16 port; + +} __attribute__((packed)); + /** * DOC: fs_access * @@ -134,4 +164,22 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11) #define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12) +/** + * DOC: net_access + * + * Network flags + * ~~~~~~~~~~~~~~~~ + * + * These flags enable to restrict a sandboxed process to a set of network + * actions. + * + * TCP sockets with allowed actions: + * + * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port. + * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to + * a remote port. + */ +#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0) +#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1) + #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 8c0f6165fe3a..fcbce83d64ef 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -81,8 +81,9 @@ static void build_check_abi(void) * struct size. */ ruleset_size = sizeof(ruleset_attr.handled_access_fs); + ruleset_size += sizeof(ruleset_attr.handled_access_net); BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); - BUILD_BUG_ON(sizeof(ruleset_attr) != 8); + BUILD_BUG_ON(sizeof(ruleset_attr) != 16); path_beneath_size = sizeof(path_beneath_attr.allowed_access); path_beneath_size += sizeof(path_beneath_attr.parent_fd); @@ -184,7 +185,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset, /* Checks content (and 32-bits cast). */ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != - LANDLOCK_MASK_ACCESS_FS) + LANDLOCK_MASK_ACCESS_FS) return -EINVAL; access_mask_set.fs = ruleset_attr.handled_access_fs; From patchwork Wed Mar 9 13:44:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775093 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B5C90C433FE for ; Wed, 9 Mar 2022 13:45:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232871AbiCINqd (ORCPT ); Wed, 9 Mar 2022 08:46:33 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41326 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232932AbiCINqa (ORCPT ); Wed, 9 Mar 2022 08:46:30 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A07CB17B8A5; Wed, 9 Mar 2022 05:45:21 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1m5Dr0z67Nc8; Wed, 9 Mar 2022 21:43:56 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:19 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 08/15] landlock: add support network rules Date: Wed, 9 Mar 2022 21:44:52 +0800 Message-ID: <20220309134459.6448-9-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC This modification adds network rules support in internal landlock functions (presented in ruleset.c) and landlock_create_ruleset syscall. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Add network rule support for internal landlock functions. * Add set_masks and get_masks for network. * Add rb_root root_net_port. --- security/landlock/fs.c | 2 +- security/landlock/limits.h | 6 +++ security/landlock/ruleset.c | 88 +++++++++++++++++++++++++++++++++--- security/landlock/ruleset.h | 14 +++++- security/landlock/syscalls.c | 10 +++- 5 files changed, 109 insertions(+), 11 deletions(-) -- 2.25.1 diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 75ebdce5cd16..5cd339061cdb 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -231,7 +231,7 @@ static int check_access_path(const struct landlock_ruleset *const domain, inode = d_backing_inode(walker_path.dentry); object_ptr = landlock_inode(inode)->object; - layer_mask = landlock_unmask_layers(domain, object_ptr, + layer_mask = landlock_unmask_layers(domain, object_ptr, 0, access_request, layer_mask, LANDLOCK_RULE_PATH_BENEATH); if (layer_mask == 0) { diff --git a/security/landlock/limits.h b/security/landlock/limits.h index 2a0a1095ee27..fdbef85e4de0 100644 --- a/security/landlock/limits.h +++ b/security/landlock/limits.h @@ -18,4 +18,10 @@ #define LANDLOCK_LAST_ACCESS_FS LANDLOCK_ACCESS_FS_MAKE_SYM #define LANDLOCK_MASK_ACCESS_FS ((LANDLOCK_LAST_ACCESS_FS << 1) - 1) +#define LANDLOCK_LAST_ACCESS_NET LANDLOCK_ACCESS_NET_CONNECT_TCP +#define LANDLOCK_MASK_ACCESS_NET ((LANDLOCK_LAST_ACCESS_NET << 1) - 1) +#define LANDLOCK_MASK_SHIFT_NET 16 + +#define LANDLOCK_RULE_TYPE_NUM LANDLOCK_RULE_NET_SERVICE + #endif /* _SECURITY_LANDLOCK_LIMITS_H */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 7179b10f3538..1cecca59a942 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -35,6 +35,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers) refcount_set(&new_ruleset->usage, 1); mutex_init(&new_ruleset->lock); new_ruleset->root_inode = RB_ROOT; + new_ruleset->root_net_port = RB_ROOT; new_ruleset->num_layers = num_layers; /* * hierarchy = NULL @@ -58,16 +59,32 @@ u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask return ruleset->access_masks[mask_level]; } +/* A helper function to set a network mask */ +void landlock_set_net_access_mask(struct landlock_ruleset *ruleset, + const struct landlock_access_mask *access_mask_set, + u16 mask_level) +{ + ruleset->access_masks[mask_level] |= (access_mask_set->net << LANDLOCK_MASK_SHIFT_NET); +} + +/* A helper function to get a network mask */ +u32 landlock_get_net_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level) +{ + return (ruleset->access_masks[mask_level] >> LANDLOCK_MASK_SHIFT_NET); +} + struct landlock_ruleset *landlock_create_ruleset(const struct landlock_access_mask *access_mask_set) { struct landlock_ruleset *new_ruleset; /* Informs about useless ruleset. */ - if (!access_mask_set->fs) + if (!access_mask_set->fs && !access_mask_set->net) return ERR_PTR(-ENOMSG); new_ruleset = create_ruleset(1); - if (!IS_ERR(new_ruleset)) + if (!IS_ERR(new_ruleset) && access_mask_set->fs) landlock_set_fs_access_mask(new_ruleset, access_mask_set, 0); + if (!IS_ERR(new_ruleset) && access_mask_set->net) + landlock_set_net_access_mask(new_ruleset, access_mask_set, 0); return new_ruleset; } @@ -111,6 +128,9 @@ static struct landlock_rule *create_rule( landlock_get_object(object_ptr); new_rule->object.ptr = object_ptr; break; + case LANDLOCK_RULE_NET_SERVICE: + new_rule->object.data = object_data; + break; default: return ERR_PTR(-EINVAL); } @@ -145,10 +165,12 @@ static void build_check_ruleset(void) .num_layers = ~0, }; typeof(ruleset.access_masks[0]) fs_access_mask = ~0; + typeof(ruleset.access_masks[0]) net_access_mask = ~0; BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS); + BUILD_BUG_ON(net_access_mask < LANDLOCK_MASK_ACCESS_NET); } /** @@ -191,6 +213,12 @@ static int insert_rule(struct landlock_ruleset *const ruleset, object = (uintptr_t)object_ptr; root = &ruleset->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + if (WARN_ON_ONCE(!object_data || !layers)) + return -ENOENT; + object = object_data; + root = &ruleset->root_net_port; + break; default: return -EINVAL; } @@ -242,6 +270,14 @@ static int insert_rule(struct landlock_ruleset *const ruleset, rb_replace_node(&this->node, &new_rule->node, &ruleset->root_inode); free_rule(this, rule_type); break; + case LANDLOCK_RULE_NET_SERVICE: + new_rule = create_rule(NULL, object_data, &this->layers, this->num_layers, + &(*layers)[0], rule_type); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_replace_node(&this->node, &new_rule->node, &ruleset->root_net_port); + free_rule(this, rule_type); + break; } return 0; } @@ -259,6 +295,14 @@ static int insert_rule(struct landlock_ruleset *const ruleset, rb_insert_color(&new_rule->node, &ruleset->root_inode); ruleset->num_rules++; break; + case LANDLOCK_RULE_NET_SERVICE: + new_rule = create_rule(NULL, object_data, layers, num_layers, NULL, rule_type); + if (IS_ERR(new_rule)) + return PTR_ERR(new_rule); + rb_link_node(&new_rule->node, parent_node, walker_node); + rb_insert_color(&new_rule->node, &ruleset->root_net_port); + ruleset->num_rules++; + break; } return 0; } @@ -319,6 +363,9 @@ static int tree_merge(struct landlock_ruleset *const src, case LANDLOCK_RULE_PATH_BENEATH: src_root = &src->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + src_root = &src->root_net_port; + break; default: return -EINVAL; } @@ -338,11 +385,14 @@ static int tree_merge(struct landlock_ruleset *const src, return err; } layers[0].access = walker_rule->layers[0].access; - switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: err = insert_rule(dst, walker_rule->object.ptr, 0, &layers, - ARRAY_SIZE(layers), rule_type); + ARRAY_SIZE(layers), rule_type); + break; + case LANDLOCK_RULE_NET_SERVICE: + err = insert_rule(dst, NULL, walker_rule->object.data, &layers, + ARRAY_SIZE(layers), rule_type); break; } if (err) @@ -379,6 +429,10 @@ static int merge_ruleset(struct landlock_ruleset *const dst, err = tree_merge(src, dst, LANDLOCK_RULE_PATH_BENEATH); if (err) goto out_unlock; + /* Merges the @src network tree. */ + err = tree_merge(src, dst, LANDLOCK_RULE_NET_SERVICE); + if (err) + goto out_unlock; out_unlock: mutex_unlock(&src->lock); @@ -398,6 +452,9 @@ static int tree_copy(struct landlock_ruleset *const parent, case LANDLOCK_RULE_PATH_BENEATH: parent_root = &parent->root_inode; break; + case LANDLOCK_RULE_NET_SERVICE: + parent_root = &parent->root_net_port; + break; default: return -EINVAL; } @@ -410,6 +467,11 @@ static int tree_copy(struct landlock_ruleset *const parent, &walker_rule->layers, walker_rule->num_layers, rule_type); break; + case LANDLOCK_RULE_NET_SERVICE: + err = insert_rule(child, NULL, walker_rule->object.data, + &walker_rule->layers, walker_rule->num_layers, + rule_type); + break; } if (err) return err; @@ -432,6 +494,10 @@ static int inherit_ruleset(struct landlock_ruleset *const parent, /* Copies the @parent inode tree. */ err = tree_copy(parent, child, LANDLOCK_RULE_PATH_BENEATH); + if (err) + goto out_unlock; + /* Copies the @parent inode tree. */ + err = tree_copy(parent, child, LANDLOCK_RULE_NET_SERVICE); if (err) goto out_unlock; @@ -464,6 +530,9 @@ static void free_ruleset(struct landlock_ruleset *const ruleset) rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode, node) free_rule(freeme, LANDLOCK_RULE_PATH_BENEATH); + rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_net_port, + node) + free_rule(freeme, LANDLOCK_RULE_NET_SERVICE); put_hierarchy(ruleset->hierarchy); kfree(ruleset); } @@ -565,6 +634,9 @@ const struct landlock_rule *landlock_find_rule( case LANDLOCK_RULE_PATH_BENEATH: node = ruleset->root_inode.rb_node; break; + case LANDLOCK_RULE_NET_SERVICE: + node = ruleset->root_net_port.rb_node; + break; default: return ERR_PTR(-EINVAL); } @@ -586,8 +658,8 @@ const struct landlock_rule *landlock_find_rule( /* Access-control management */ u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, const struct landlock_object *object_ptr, - const u32 access_request, u64 layer_mask, - const u16 rule_type) + const u16 port, const u32 access_request, + u64 layer_mask, const u16 rule_type) { const struct landlock_rule *rule; size_t i; @@ -600,6 +672,10 @@ u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, LANDLOCK_RULE_PATH_BENEATH); rcu_read_unlock(); break; + case LANDLOCK_RULE_NET_SERVICE: + rule = landlock_find_rule(domain, (uintptr_t)port, + LANDLOCK_RULE_NET_SERVICE); + break; } if (!rule) diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 0a7d4b1f51fd..abf3e09a65cd 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -24,6 +24,10 @@ struct landlock_access_mask { * @fs: Filesystem access mask. */ u16 fs; + /** + * @net: Network access mask. + */ + u16 net; }; /** @@ -98,6 +102,12 @@ struct landlock_ruleset { * tree is immutable until @usage reaches zero. */ struct rb_root root_inode; + /** + * @root_net_port: Root of a red-black tree containing object nodes + * for network port. Once a ruleset is tied to a process (i.e. as a domain), + * this tree is immutable until @usage reaches zero. + */ + struct rb_root root_net_port; /** * @hierarchy: Enables hierarchy identification even when a parent * domain vanishes. This is needed for the ptrace protection. @@ -185,7 +195,7 @@ u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, const struct landlock_object *object_ptr, - const u32 access_request, u64 layer_mask, - const u16 rule_type); + const u16 port, const u32 access_request, + u64 layer_mask, const u16 rule_type); #endif /* _SECURITY_LANDLOCK_RULESET_H */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index fcbce83d64ef..b91455a19356 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -160,7 +160,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset, { struct landlock_ruleset_attr ruleset_attr; struct landlock_ruleset *ruleset; - struct landlock_access_mask access_mask_set = {.fs = 0}; + struct landlock_access_mask access_mask_set = {.fs = 0, .net = 0}; int err, ruleset_fd; /* Build-time checks. */ @@ -187,8 +187,14 @@ SYSCALL_DEFINE3(landlock_create_ruleset, if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != LANDLOCK_MASK_ACCESS_FS) return -EINVAL; - access_mask_set.fs = ruleset_attr.handled_access_fs; + /* Checks network content (and 32-bits cast). */ + if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) != + LANDLOCK_MASK_ACCESS_NET) + return -EINVAL; + + access_mask_set.fs = ruleset_attr.handled_access_fs; + access_mask_set.net = ruleset_attr.handled_access_net; /* Checks arguments and transforms to kernel struct. */ ruleset = landlock_create_ruleset(&access_mask_set); if (IS_ERR(ruleset)) From patchwork Wed Mar 9 13:44:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775094 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 35A2BC4167B for ; Wed, 9 Mar 2022 13:45:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232965AbiCINqd (ORCPT ); Wed, 9 Mar 2022 08:46:33 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42290 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232940AbiCINqa (ORCPT ); Wed, 9 Mar 2022 08:46:30 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D2AF917B8AB; Wed, 9 Mar 2022 05:45:22 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.206]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD2s1GqCz67bVT; Wed, 9 Mar 2022 21:44:53 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:20 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 09/15] landlock: TCP network hooks implementation Date: Wed, 9 Mar 2022 21:44:53 +0800 Message-ID: <20220309134459.6448-10-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Support of socket_bind() and socket_connect() hooks. Its possible to restrict binding and connecting of TCP types of sockets to particular ports. Its just basic idea how Landlock could support network confinement. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Add SECURITY_NETWORK in config. * Add IS_ENABLED(CONFIG_INET) if a kernel has no INET configuration. * Add hook_socket_bind and hook_socket_connect hooks. --- security/landlock/Kconfig | 1 + security/landlock/Makefile | 2 +- security/landlock/net.c | 180 +++++++++++++++++++++++++++++++++++ security/landlock/net.h | 22 +++++ security/landlock/ruleset.h | 6 ++ security/landlock/setup.c | 2 + security/landlock/syscalls.c | 61 +++++++++++- 7 files changed, 271 insertions(+), 3 deletions(-) create mode 100644 security/landlock/net.c create mode 100644 security/landlock/net.h -- 2.25.1 diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig index 8e33c4e8ffb8..2741f97169a7 100644 --- a/security/landlock/Kconfig +++ b/security/landlock/Kconfig @@ -4,6 +4,7 @@ config SECURITY_LANDLOCK bool "Landlock support" depends on SECURITY && !ARCH_EPHEMERAL_INODES select SECURITY_PATH + select SECURITY_NETWORK help Landlock is a sandboxing mechanism that enables processes to restrict themselves (and their future children) by gradually enforcing diff --git a/security/landlock/Makefile b/security/landlock/Makefile index 7bbd2f413b3e..afa44baaa83a 100644 --- a/security/landlock/Makefile +++ b/security/landlock/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o landlock-y := setup.o syscalls.o object.o ruleset.o \ - cred.o ptrace.o fs.o + cred.o ptrace.o fs.o net.o diff --git a/security/landlock/net.c b/security/landlock/net.c new file mode 100644 index 000000000000..7fbb857c39e2 --- /dev/null +++ b/security/landlock/net.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock LSM - Network management and hooks + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + * Author: Konstantin Meskhidze + * + */ + +#include +#include +#include +#include + +#include "cred.h" +#include "limits.h" +#include "net.h" + +int landlock_append_net_rule(struct landlock_ruleset *const ruleset, + u16 port, u32 access_rights) +{ + int err; + + /* Transforms relative access rights to absolute ones. */ + access_rights |= LANDLOCK_MASK_ACCESS_NET & + ~landlock_get_net_access_mask(ruleset, 0); + + mutex_lock(&ruleset->lock); + err = landlock_insert_rule(ruleset, NULL, (uintptr_t)port, access_rights, + LANDLOCK_RULE_NET_SERVICE); + mutex_unlock(&ruleset->lock); + + return err; +} + +static int check_socket_access(const struct landlock_ruleset *const domain, + u16 port, u32 access_request) +{ + bool allowed = false; + u64 layer_mask; + size_t i; + + /* Make sure all layers can be checked. */ + BUILD_BUG_ON(BITS_PER_TYPE(layer_mask) < LANDLOCK_MAX_NUM_LAYERS); + + if (WARN_ON_ONCE(!domain)) + return 0; + if (WARN_ON_ONCE(domain->num_layers < 1)) + return -EACCES; + + /* + * Saves all layers handling a subset of requested + * socket access rules. + */ + layer_mask = 0; + for (i = 0; i < domain->num_layers; i++) { + if (landlock_get_net_access_mask(domain, i) & access_request) + layer_mask |= BIT_ULL(i); + } + /* An access request not handled by the domain is allowed. */ + if (layer_mask == 0) + return 0; + + /* + * We need to walk through all the hierarchy to not miss any relevant + * restriction. + */ + layer_mask = landlock_unmask_layers(domain, NULL, port, + access_request, layer_mask, + LANDLOCK_RULE_NET_SERVICE); + if (layer_mask == 0) + allowed = true; + + return allowed ? 0 : -EACCES; +} + +static int hook_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) +{ +#if IS_ENABLED(CONFIG_INET) + short socket_type; + struct sockaddr_in *sockaddr; + struct sockaddr_in6 *sockaddr_ip6; + u16 port; + const struct landlock_ruleset *const dom = landlock_get_current_domain(); + + if (!dom) + return 0; + + /* Check if the hook is AF_INET* socket's action */ + if ((address->sa_family != AF_INET) && (address->sa_family != AF_INET6)) + return 0; + + socket_type = sock->type; + /* Check if it's a TCP socket */ + if (socket_type != SOCK_STREAM) + return 0; + + /* Get port value in host byte order */ + switch (address->sa_family) { + case AF_INET: + sockaddr = (struct sockaddr_in *)address; + port = ntohs(sockaddr->sin_port); + break; + case AF_INET6: + sockaddr_ip6 = (struct sockaddr_in6 *)address; + port = ntohs(sockaddr_ip6->sin6_port); + break; + } + + return check_socket_access(dom, port, LANDLOCK_ACCESS_NET_BIND_TCP); +#else + return 0; +#endif +} + +static int hook_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) +{ +#if IS_ENABLED(CONFIG_INET) + short socket_type; + struct sockaddr_in *sockaddr; + struct sockaddr_in6 *sockaddr_ip6; + u16 port; + const struct landlock_ruleset *const dom = landlock_get_current_domain(); + + if (!dom) + return 0; + + /* Check if the hook is AF_INET* socket's action */ + if ((address->sa_family != AF_INET) && (address->sa_family != AF_INET6)) { + /* Check if the socket_connect() hook has AF_UNSPEC flag*/ + if (address->sa_family == AF_UNSPEC) { + u16 i; + /* + * If just in a layer a mask supports connect access, + * the socket_connect() hook with AF_UNSPEC family flag + * must be banned. This prevents from disconnecting already + * connected sockets. + */ + for (i = 0; i < dom->num_layers; i++) { + if (landlock_get_net_access_mask(dom, i) & + LANDLOCK_ACCESS_NET_CONNECT_TCP) + return -EACCES; + } + } + return 0; + } + + socket_type = sock->type; + /* Check if it's a TCP socket */ + if (socket_type != SOCK_STREAM) + return 0; + + /* Get port value in host byte order */ + switch (address->sa_family) { + case AF_INET: + sockaddr = (struct sockaddr_in *)address; + port = ntohs(sockaddr->sin_port); + break; + case AF_INET6: + sockaddr_ip6 = (struct sockaddr_in6 *)address; + port = ntohs(sockaddr_ip6->sin6_port); + break; + } + + return check_socket_access(dom, port, LANDLOCK_ACCESS_NET_CONNECT_TCP); +#else + return 0; +#endif +} + +static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = { + LSM_HOOK_INIT(socket_bind, hook_socket_bind), + LSM_HOOK_INIT(socket_connect, hook_socket_connect), +}; + +__init void landlock_add_net_hooks(void) +{ + security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks), + LANDLOCK_NAME); +} diff --git a/security/landlock/net.h b/security/landlock/net.h new file mode 100644 index 000000000000..345bdc1dc84f --- /dev/null +++ b/security/landlock/net.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock LSM - Network management and hooks + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + * Author: Konstantin Meskhidze + * + */ + +#ifndef _SECURITY_LANDLOCK_NET_H +#define _SECURITY_LANDLOCK_NET_H + +#include "common.h" +#include "ruleset.h" +#include "setup.h" + +__init void landlock_add_net_hooks(void); + +int landlock_append_net_rule(struct landlock_ruleset *const ruleset, + u16 port, u32 access_hierarchy); + +#endif /* _SECURITY_LANDLOCK_NET_H */ diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index abf3e09a65cd..74e9d3d26bd6 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -193,6 +193,12 @@ void landlock_set_fs_access_mask(struct landlock_ruleset *ruleset, u32 landlock_get_fs_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level); +void landlock_set_net_access_mask(struct landlock_ruleset *ruleset, + const struct landlock_access_mask *access_mask_set, + u16 mask_level); + +u32 landlock_get_net_access_mask(const struct landlock_ruleset *ruleset, u16 mask_level); + u64 landlock_unmask_layers(const struct landlock_ruleset *const domain, const struct landlock_object *object_ptr, const u16 port, const u32 access_request, diff --git a/security/landlock/setup.c b/security/landlock/setup.c index f8e8e980454c..8059dc0b47d3 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -14,6 +14,7 @@ #include "fs.h" #include "ptrace.h" #include "setup.h" +#include "net.h" bool landlock_initialized __lsm_ro_after_init = false; @@ -28,6 +29,7 @@ static int __init landlock_init(void) landlock_add_cred_hooks(); landlock_add_ptrace_hooks(); landlock_add_fs_hooks(); + landlock_add_net_hooks(); landlock_initialized = true; pr_info("Up and running.\n"); return 0; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index b91455a19356..2d45ea94e6d2 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -29,6 +29,7 @@ #include "cred.h" #include "fs.h" #include "limits.h" +#include "net.h" #include "ruleset.h" #include "setup.h" @@ -73,7 +74,8 @@ static void build_check_abi(void) { struct landlock_ruleset_attr ruleset_attr; struct landlock_path_beneath_attr path_beneath_attr; - size_t ruleset_size, path_beneath_size; + struct landlock_net_service_attr net_service_attr; + size_t ruleset_size, path_beneath_size, net_service_size; /* * For each user space ABI structures, first checks that there is no @@ -89,6 +91,11 @@ static void build_check_abi(void) path_beneath_size += sizeof(path_beneath_attr.parent_fd); BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); + + net_service_size = sizeof(net_service_attr.allowed_access); + net_service_size += sizeof(net_service_attr.port); + BUILD_BUG_ON(sizeof(net_service_attr) != net_service_size); + BUILD_BUG_ON(sizeof(net_service_attr) != 10); } /* Ruleset handling */ @@ -311,7 +318,6 @@ static int add_rule_path_beneath(const int ruleset_fd, const void *const rule_at * Checks that allowed_access matches the @ruleset constraints * (ruleset->access_masks[0] is automatically upgraded to 64-bits). */ - if ((path_beneath_attr.allowed_access | landlock_get_fs_access_mask(ruleset, 0)) != landlock_get_fs_access_mask(ruleset, 0)) { err = -EINVAL; @@ -333,6 +339,50 @@ static int add_rule_path_beneath(const int ruleset_fd, const void *const rule_at return err; } +static int add_rule_net_service(const int ruleset_fd, const void *const rule_attr) +{ + struct landlock_net_service_attr net_service_attr; + struct landlock_ruleset *ruleset; + int res, err; + + /* Copies raw user space buffer, only one type for now. */ + res = copy_from_user(&net_service_attr, rule_attr, + sizeof(net_service_attr)); + if (res) + return -EFAULT; + + /* Gets and checks the ruleset. */ + ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* + * Informs about useless rule: empty allowed_access (i.e. deny rules) + * are ignored by network actions + */ + if (!net_service_attr.allowed_access) { + err = -ENOMSG; + goto out_put_ruleset; + } + /* + * Checks that allowed_access matches the @ruleset constraints + * (ruleset->access_masks[0] is automatically upgraded to 64-bits). + */ + if ((net_service_attr.allowed_access | landlock_get_net_access_mask(ruleset, 0)) != + landlock_get_net_access_mask(ruleset, 0)) { + err = -EINVAL; + goto out_put_ruleset; + } + + /* Imports the new rule. */ + err = landlock_append_net_rule(ruleset, net_service_attr.port, + net_service_attr.allowed_access); + +out_put_ruleset: + landlock_put_ruleset(ruleset); + return err; +} + /** * sys_landlock_add_rule - Add a new rule to a ruleset * @@ -379,6 +429,13 @@ SYSCALL_DEFINE4(landlock_add_rule, case LANDLOCK_RULE_PATH_BENEATH: err = add_rule_path_beneath(ruleset_fd, rule_attr); break; + case LANDLOCK_RULE_NET_SERVICE: +#if IS_ENABLED(CONFIG_INET) + err = add_rule_net_service(ruleset_fd, rule_attr); +#else + err = -EOPNOTSUPP; +#endif + break; default: err = -EINVAL; } From patchwork Wed Mar 9 13:44:54 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775096 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8D1DBC433FE for ; Wed, 9 Mar 2022 13:46:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230109AbiCINrB (ORCPT ); Wed, 9 Mar 2022 08:47:01 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41532 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233019AbiCINqb (ORCPT ); Wed, 9 Mar 2022 08:46:31 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3182D17B8BE; Wed, 9 Mar 2022 05:45:24 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.207]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1g5hg6z6H6mm; Wed, 9 Mar 2022 21:43:51 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:21 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 10/15] seltest/landlock: add tests for bind() hooks Date: Wed, 9 Mar 2022 21:44:54 +0800 Message-ID: <20220309134459.6448-11-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Adds two selftests for bind socket action. The one is with no landlock restrictions: - bind_no_restrictions; The second one is with mixed landlock rules: - bind_with_restrictions; Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. * Add helper create_socket. * Add FIXTURE_SETUP. --- .../testing/selftests/landlock/network_test.c | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 tools/testing/selftests/landlock/network_test.c -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c new file mode 100644 index 000000000000..4c60f6d973a8 --- /dev/null +++ b/tools/testing/selftests/landlock/network_test.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock tests - Network + * + * Copyright (C) 2022 Huawei Tech. Co., Ltd. + * Author: Konstantin Meskhidze + * + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define MAX_SOCKET_NUM 10 + +#define SOCK_PORT_START 3470 +#define SOCK_PORT_ADD 10 + +#define IP_ADDRESS "127.0.0.1" + +uint port[MAX_SOCKET_NUM]; +struct sockaddr_in addr[MAX_SOCKET_NUM]; + +const int one = 1; + +/* Number pending connections queue to be hold */ +#define BACKLOG 10 + +static int create_socket(struct __test_metadata *const _metadata) +{ + + int sockfd; + + sockfd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, 0); + ASSERT_LE(0, sockfd); + /* Allows to reuse of local address */ + ASSERT_EQ(0, setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))); + + return sockfd; +} + +static void enforce_ruleset(struct __test_metadata *const _metadata, + const int ruleset_fd) +{ + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) { + TH_LOG("Failed to enforce ruleset: %s", strerror(errno)); + } +} + +FIXTURE(socket) { }; + +FIXTURE_SETUP(socket) +{ + int i; + /* Creates socket addresses */ + for (i = 0; i < MAX_SOCKET_NUM; i++) { + port[i] = SOCK_PORT_START + SOCK_PORT_ADD*i; + addr[i].sin_family = AF_INET; + addr[i].sin_port = htons(port[i]); + addr[i].sin_addr.s_addr = inet_addr(IP_ADDRESS); + memset(&(addr[i].sin_zero), '\0', 8); + } +} + +FIXTURE_TEARDOWN(socket) +{ } + +TEST_F_FORK(socket, bind_no_restrictions) { + + int sockfd; + + sockfd = create_socket(_metadata); + ASSERT_LE(0, sockfd); + + /* Binds a socket to port[0] */ + ASSERT_EQ(0, bind(sockfd, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + ASSERT_EQ(0, close(sockfd)); +} + +TEST_F_FORK(socket, bind_with_restrictions) { + + int sockfd_1, sockfd_2, sockfd_3; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = port[1], + }; + struct landlock_net_service_attr net_service_3 = { + .allowed_access = 0, + .port = port[2], + }; + + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows connect and bind operations to the port[0] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and deny bind operations to the port[1] socket. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + /* Empty allowed_access (i.e. deny rules) are ignored in network actions + * for port[2] socket. + */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + ASSERT_EQ(ENOMSG, errno); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + sockfd_1 = create_socket(_metadata); + ASSERT_LE(0, sockfd_1); + /* Binds a socket to port[0] */ + ASSERT_EQ(0, bind(sockfd_1, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Close bounded socket*/ + ASSERT_EQ(0, close(sockfd_1)); + + sockfd_2 = create_socket(_metadata); + ASSERT_LE(0, sockfd_2); + /* Binds a socket to port[1] */ + ASSERT_EQ(-1, bind(sockfd_2, (struct sockaddr *)&addr[1], sizeof(addr[1]))); + ASSERT_EQ(EACCES, errno); + + sockfd_3 = create_socket(_metadata); + ASSERT_LE(0, sockfd_3); + /* Binds a socket to port[2] */ + ASSERT_EQ(-1, bind(sockfd_3, (struct sockaddr *)&addr[2], sizeof(addr[2]))); + ASSERT_EQ(EACCES, errno); +} +TEST_HARNESS_MAIN From patchwork Wed Mar 9 13:44:55 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775100 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D7156C433F5 for ; Wed, 9 Mar 2022 13:46:12 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233015AbiCINrH (ORCPT ); Wed, 9 Mar 2022 08:47:07 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42252 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233103AbiCINqb (ORCPT ); Wed, 9 Mar 2022 08:46:31 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8158717BC67; Wed, 9 Mar 2022 05:45:25 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.206]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1r4FJxz67gYW; Wed, 9 Mar 2022 21:44:00 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:23 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 11/15] seltest/landlock: add tests for connect() hooks Date: Wed, 9 Mar 2022 21:44:55 +0800 Message-ID: <20220309134459.6448-12-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Adds two selftests for connect socket action. The one is with no landlock restrictions: - connect_no_restrictions; The second one is with mixed landlock rules: - connect_with_restrictions; Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Split commit. --- .../testing/selftests/landlock/network_test.c | 162 ++++++++++++++++++ 1 file changed, 162 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c index 4c60f6d973a8..20f2d94d6d85 100644 --- a/tools/testing/selftests/landlock/network_test.c +++ b/tools/testing/selftests/landlock/network_test.c @@ -150,4 +150,166 @@ TEST_F_FORK(socket, bind_with_restrictions) { ASSERT_EQ(-1, bind(sockfd_3, (struct sockaddr *)&addr[2], sizeof(addr[2]))); ASSERT_EQ(EACCES, errno); } + +TEST_F_FORK(socket, connect_no_restrictions) { + + int sockfd, new_fd; + pid_t child; + int status; + + /* Creates a server socket */ + sockfd = create_socket(_metadata); + ASSERT_LE(0, sockfd); + + /* Binds a socket to port[0] */ + ASSERT_EQ(0, bind(sockfd, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes listening socket */ + ASSERT_EQ(0, listen(sockfd, BACKLOG)); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + int child_sockfd; + + /* Closes listening socket for the child */ + ASSERT_EQ(0, close(sockfd)); + /* Create a stream client socket */ + child_sockfd = create_socket(_metadata); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket */ + ASSERT_EQ(0, connect(child_sockfd, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Accepts connection from the child */ + new_fd = accept(sockfd, NULL, 0); + ASSERT_LE(0, new_fd); + + /* Closes connection */ + ASSERT_EQ(0, close(new_fd)); + + /* Closes listening socket for the parent*/ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + +TEST_F_FORK(socket, connect_with_restrictions) { + + int new_fd; + int sockfd_1, sockfd_2; + pid_t child_1, child_2; + int status; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = port[1], + }; + + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows connect and bind operations to the port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and deny bind operations to the port[1] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket 1 */ + sockfd_1 = create_socket(_metadata); + ASSERT_LE(0, sockfd_1); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd_1, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes listening socket 1 */ + ASSERT_EQ(0, listen(sockfd_1, BACKLOG)); + + child_1 = fork(); + ASSERT_LE(0, child_1); + if (child_1 == 0) { + int child_sockfd; + + /* Closes listening socket for the child */ + ASSERT_EQ(0, close(sockfd_1)); + /* Creates a stream client socket */ + child_sockfd = create_socket(_metadata); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket */ + ASSERT_EQ(0, connect(child_sockfd, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Accepts connection from the child 1 */ + new_fd = accept(sockfd_1, NULL, 0); + ASSERT_LE(0, new_fd); + + /* Closes connection */ + ASSERT_EQ(0, close(new_fd)); + + /* Closes listening socket 1 for the parent*/ + ASSERT_EQ(0, close(sockfd_1)); + + ASSERT_EQ(child_1, waitpid(child_1, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); + + /* Creates a server socket 2 */ + sockfd_2 = create_socket(_metadata); + ASSERT_LE(0, sockfd_2); + + /* Binds the socket 2 to address with port[1] */ + ASSERT_EQ(0, bind(sockfd_2, (struct sockaddr *)&addr[1], sizeof(addr[1]))); + + /* Makes listening socket 2 */ + ASSERT_EQ(0, listen(sockfd_2, BACKLOG)); + + child_2 = fork(); + ASSERT_LE(0, child_2); + if (child_2 == 0) { + int child_sockfd; + + /* Closes listening socket for the child */ + ASSERT_EQ(0, close(sockfd_2)); + /* Creates a stream client socket */ + child_sockfd = create_socket(_metadata); + ASSERT_LE(0, child_sockfd); + + /* Makes connection to the listening socket */ + ASSERT_EQ(-1, connect(child_sockfd, (struct sockaddr *)&addr[1], + sizeof(addr[1]))); + ASSERT_EQ(EACCES, errno); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + + /* Closes listening socket 2 for the parent*/ + ASSERT_EQ(0, close(sockfd_2)); + + ASSERT_EQ(child_2, waitpid(child_2, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + TEST_HARNESS_MAIN From patchwork Wed Mar 9 13:44:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775098 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E21ECC3525B for ; Wed, 9 Mar 2022 13:46:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233379AbiCINrD (ORCPT ); Wed, 9 Mar 2022 08:47:03 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43032 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232881AbiCINqj (ORCPT ); Wed, 9 Mar 2022 08:46:39 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E01B417C400; Wed, 9 Mar 2022 05:45:26 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.201]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1k3PFDz6H7L5; Wed, 9 Mar 2022 21:43:54 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:24 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 12/15] seltest/landlock: connect() with AF_UNSPEC tests Date: Wed, 9 Mar 2022 21:44:56 +0800 Message-ID: <20220309134459.6448-13-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC Adds two selftests for connect() action with AF_UNSPEC family flag. The one is with no landlock restrictions allows to disconnect already conneted socket with connect(..., AF_UNSPEC, ...): - connect_afunspec_no_restictions; The second one refuses landlocked process to disconnect already connected socket: - connect_afunspec_with_restictions; Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Add connect_afunspec_no_restictions test. * Add connect_afunspec_with_restictions test. --- .../testing/selftests/landlock/network_test.c | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c index 20f2d94d6d85..6fce31cad368 100644 --- a/tools/testing/selftests/landlock/network_test.c +++ b/tools/testing/selftests/landlock/network_test.c @@ -312,4 +312,98 @@ TEST_F_FORK(socket, connect_with_restrictions) { ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); } +TEST_F_FORK(socket, connect_afunspec_no_restictions) { + + int sockfd; + pid_t child; + int status; + + /* Creates a server socket 1 */ + sockfd = create_socket(_metadata); + ASSERT_LE(0, sockfd); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes connection to socket with port[0] */ + ASSERT_EQ(0, connect(sockfd, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + struct sockaddr addr_unspec = {.sa_family = AF_UNSPEC}; + + /* Child tries to disconnect already connected socket */ + ASSERT_EQ(0, connect(sockfd, (struct sockaddr *)&addr_unspec, + sizeof(addr_unspec))); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Closes listening socket 1 for the parent*/ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + +TEST_F_FORK(socket, connect_afunspec_with_restictions) { + + int sockfd; + pid_t child; + int status; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = port[0], + }; + + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows connect and bind operations to the port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket 1 */ + sockfd = create_socket(_metadata); + ASSERT_LE(0, sockfd); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes connection to socket with port[0] */ + ASSERT_EQ(0, connect(sockfd, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + struct sockaddr addr_unspec = {.sa_family = AF_UNSPEC}; + + /* Child tries to disconnect already connected socket */ + ASSERT_EQ(-1, connect(sockfd, (struct sockaddr *)&addr_unspec, + sizeof(addr_unspec))); + ASSERT_EQ(EACCES, errno); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + /* Closes listening socket 1 for the parent*/ + ASSERT_EQ(0, close(sockfd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} TEST_HARNESS_MAIN From patchwork Wed Mar 9 13:44:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775097 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8418EC433F5 for ; Wed, 9 Mar 2022 13:46:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233331AbiCINrB (ORCPT ); Wed, 9 Mar 2022 08:47:01 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42420 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233187AbiCINqc (ORCPT ); Wed, 9 Mar 2022 08:46:32 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3666E17C40D; Wed, 9 Mar 2022 05:45:28 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.200]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1v1J61z67Lqc; Wed, 9 Mar 2022 21:44:03 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:25 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 13/15] seltest/landlock: rules overlapping test Date: Wed, 9 Mar 2022 21:44:57 +0800 Message-ID: <20220309134459.6448-14-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC This patch adds overlapping rules for one port. First rule adds just bind() access right for a port. The second one adds both bind() and connect() access rights for the same port. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Add ruleset_overlap test. --- .../testing/selftests/landlock/network_test.c | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c index 6fce31cad368..e1f219fd9f31 100644 --- a/tools/testing/selftests/landlock/network_test.c +++ b/tools/testing/selftests/landlock/network_test.c @@ -406,4 +406,55 @@ TEST_F_FORK(socket, connect_afunspec_with_restictions) { ASSERT_EQ(1, WIFEXITED(status)); ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); } + +TEST_F_FORK(socket, ruleset_overlap) { + + int sockfd; + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = port[0], + }; + + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = port[0], + }; + + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Allows bind operations to the port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + /* Allows connect and bind operations to the port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Creates a server socket */ + sockfd = create_socket(_metadata); + ASSERT_LE(0, sockfd); + + /* Binds the socket to address with port[0] */ + ASSERT_EQ(0, bind(sockfd, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes connection to socket with port[0] */ + ASSERT_EQ(0, connect(sockfd, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + + /* Closes socket */ + ASSERT_EQ(0, close(sockfd)); +} + TEST_HARNESS_MAIN From patchwork Wed Mar 9 13:44:58 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775099 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id F173AC433F5 for ; Wed, 9 Mar 2022 13:46:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231390AbiCINrE (ORCPT ); Wed, 9 Mar 2022 08:47:04 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43592 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232996AbiCINqp (ORCPT ); Wed, 9 Mar 2022 08:46:45 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 98B9317C40E; Wed, 9 Mar 2022 05:45:29 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.226]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD1n16wNz6H76Z; Wed, 9 Mar 2022 21:43:57 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:27 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 14/15] seltest/landlock: ruleset expanding test Date: Wed, 9 Mar 2022 21:44:58 +0800 Message-ID: <20220309134459.6448-15-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC This patch adds expanding rulesets in which rules are gradually added one by one, restricting sockets' connections. Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Add ruleset_expanding test. --- .../testing/selftests/landlock/network_test.c | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c index e1f219fd9f31..8fa2a349329c 100644 --- a/tools/testing/selftests/landlock/network_test.c +++ b/tools/testing/selftests/landlock/network_test.c @@ -457,4 +457,157 @@ TEST_F_FORK(socket, ruleset_overlap) { ASSERT_EQ(0, close(sockfd)); } +TEST_F_FORK(socket, ruleset_expanding) { + + int sockfd_1, sockfd_2; + + struct landlock_ruleset_attr ruleset_attr_1 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP, + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = port[0], + }; + + const int ruleset_fd_1 = landlock_create_ruleset(&ruleset_attr_1, + sizeof(ruleset_attr_1), 0); + ASSERT_LE(0, ruleset_fd_1); + + /* Adds rule to port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_1, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_1); + ASSERT_EQ(0, close(ruleset_fd_1)); + + /* Creates a socket 1 */ + sockfd_1 = create_socket(_metadata); + ASSERT_LE(0, sockfd_1); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd_1, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes connection to socket 1 with port[0] */ + ASSERT_EQ(0, connect(sockfd_1, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + + /* Closes socket 1 */ + ASSERT_EQ(0, close(sockfd_1)); + + /* Creates a socket 2 */ + sockfd_2 = create_socket(_metadata); + ASSERT_LE(0, sockfd_2); + + /* + * Forbids to bind the socket 2 to address with port[1], + * cause there is no rule with bind() access for port[1]. + */ + ASSERT_EQ(-1, bind(sockfd_2, (struct sockaddr *)&addr[1], sizeof(addr[1]))); + ASSERT_EQ(EACCES, errno); + + /* Expands network mask */ + struct landlock_ruleset_attr ruleset_attr_2 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + + /* Adds connect() access to port[0] */ + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + + .port = port[0], + }; + /* Adds bind() access to port[1] */ + struct landlock_net_service_attr net_service_3 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = port[1], + }; + + const int ruleset_fd_2 = landlock_create_ruleset(&ruleset_attr_2, + sizeof(ruleset_attr_2), 0); + ASSERT_LE(0, ruleset_fd_2); + + /* Adds rule to port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_2, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + /* Adds rule to port[1] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_2, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_2); + ASSERT_EQ(0, close(ruleset_fd_2)); + + /* Creates a socket 1 */ + sockfd_1 = create_socket(_metadata); + ASSERT_LE(0, sockfd_1); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd_1, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* Makes connection to socket 1 with port[0] */ + ASSERT_EQ(0, connect(sockfd_1, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + /* Closes socket 1 */ + ASSERT_EQ(0, close(sockfd_1)); + + /* Creates a socket 2 */ + sockfd_2 = create_socket(_metadata); + ASSERT_LE(0, sockfd_2); + + /* + * Forbids to bind the socket 2 to address with port[1], + * cause just one layer has bind() access rule. + */ + ASSERT_EQ(-1, bind(sockfd_2, (struct sockaddr *)&addr[1], sizeof(addr[1]))); + ASSERT_EQ(EACCES, errno); + + /* Expands network mask */ + struct landlock_ruleset_attr ruleset_attr_3 = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + }; + + /* Restricts connect() access to port[0] */ + struct landlock_net_service_attr net_service_4 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + + .port = port[0], + }; + + const int ruleset_fd_3 = landlock_create_ruleset(&ruleset_attr_3, + sizeof(ruleset_attr_3), 0); + ASSERT_LE(0, ruleset_fd_3); + + /* Adds rule to port[0] socket */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_3, LANDLOCK_RULE_NET_SERVICE, + &net_service_4, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd_3); + ASSERT_EQ(0, close(ruleset_fd_3)); + + /* Creates a socket 1 */ + sockfd_1 = create_socket(_metadata); + ASSERT_LE(0, sockfd_1); + + /* Binds the socket 1 to address with port[0] */ + ASSERT_EQ(0, bind(sockfd_1, (struct sockaddr *)&addr[0], sizeof(addr[0]))); + + /* + * Forbids to bind the socket 1 to address with port[0], + * cause just one layer has connect() access rule. + */ + ASSERT_EQ(-1, connect(sockfd_1, (struct sockaddr *)&addr[0], + sizeof(addr[0]))); + ASSERT_EQ(EACCES, errno); + + /* Closes socket 1 */ + ASSERT_EQ(0, close(sockfd_1)); +} + TEST_HARNESS_MAIN From patchwork Wed Mar 9 13:44:59 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Konstantin Meskhidze (A)" X-Patchwork-Id: 12775101 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9005DC4332F for ; Wed, 9 Mar 2022 13:46:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233309AbiCINrK (ORCPT ); Wed, 9 Mar 2022 08:47:10 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44450 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232343AbiCINrA (ORCPT ); Wed, 9 Mar 2022 08:47:00 -0500 Received: from frasgout.his.huawei.com (frasgout.his.huawei.com [185.176.79.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B972E17C41E; Wed, 9 Mar 2022 05:45:30 -0800 (PST) Received: from fraeml704-chm.china.huawei.com (unknown [172.18.147.201]) by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4KDD310cfvz67bVT; Wed, 9 Mar 2022 21:45:01 +0800 (CST) Received: from mscphispre00059.huawei.com (10.123.71.64) by fraeml704-chm.china.huawei.com (10.206.15.53) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.21; Wed, 9 Mar 2022 14:45:28 +0100 From: Konstantin Meskhidze To: CC: , , , , , , Subject: [RFC PATCH v4 15/15] seltest/landlock: invalid user input data test Date: Wed, 9 Mar 2022 21:44:59 +0800 Message-ID: <20220309134459.6448-16-konstantin.meskhidze@huawei.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> References: <20220309134459.6448-1-konstantin.meskhidze@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.123.71.64] X-ClientProxiedBy: mscpeml500001.china.huawei.com (7.188.26.142) To fraeml704-chm.china.huawei.com (10.206.15.53) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-State: RFC This patch adds rules with invalid user space supplied data: - unhandled allowed access; - zero port value; - zero access value; Signed-off-by: Konstantin Meskhidze --- Changes since v3: * Add inval test. --- .../testing/selftests/landlock/network_test.c | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) -- 2.25.1 diff --git a/tools/testing/selftests/landlock/network_test.c b/tools/testing/selftests/landlock/network_test.c index 8fa2a349329c..f06b9d02128a 100644 --- a/tools/testing/selftests/landlock/network_test.c +++ b/tools/testing/selftests/landlock/network_test.c @@ -610,4 +610,56 @@ TEST_F_FORK(socket, ruleset_expanding) { ASSERT_EQ(0, close(sockfd_1)); } +TEST_F_FORK(socket, inval) { + + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_net = LANDLOCK_ACCESS_NET_BIND_TCP + }; + struct landlock_net_service_attr net_service_1 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP | + LANDLOCK_ACCESS_NET_CONNECT_TCP, + .port = port[0], + }; + struct landlock_net_service_attr net_service_2 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = 0, + }; + struct landlock_net_service_attr net_service_3 = { + .allowed_access = 0, + .port = port[1], + }; + struct landlock_net_service_attr net_service_4 = { + .allowed_access = LANDLOCK_ACCESS_NET_BIND_TCP, + .port = port[2], + }; + + /* Gets ruleset. */ + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + /* Checks unhandled allowed_access. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_1, 0)); + ASSERT_EQ(EINVAL, errno); + + /* Checks zero port value. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_2, 0)); + ASSERT_EQ(ENOENT, errno); + + /* Checks zero access value. */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_3, 0)); + ASSERT_EQ(ENOMSG, errno); + + /* Adds with legitimate values. */ + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_SERVICE, + &net_service_4, 0)); + + /* Enforces the ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} + TEST_HARNESS_MAIN