@@ -191,12 +191,14 @@ static void partition_struct(tree *fields, unsigned long length, struct partitio
static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
{
- unsigned long i, x;
+ unsigned long i, x, index;
struct partition_group size_group[length];
unsigned long num_groups = 0;
unsigned long randnum;
partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
+
+ /* FIXME: this group shuffle is currently a no-op. */
for (i = num_groups - 1; i > 0; i--) {
struct partition_group tmp;
randnum = ranval(prng_state) % (i + 1);
@@ -206,11 +208,14 @@ static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prn
}
for (x = 0; x < num_groups; x++) {
- for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
+ for (index = size_group[x].length - 1; index > 0; index--) {
tree tmp;
+
+ i = size_group[x].start + index;
if (DECL_BIT_FIELD_TYPE(newtree[i]))
continue;
- randnum = ranval(prng_state) % (i + 1);
+ randnum = ranval(prng_state) % (index + 1);
+ randnum += size_group[x].start;
// we could handle this case differently if desired
if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
continue;
The performance mode of the gcc-plugin randstruct was shuffling struct members outside of the cache-line groups. Limit the range to the specified group indexes. Cc: linux-hardening@vger.kernel.org Reported-by: Lukas Loidolt <e1634039@student.tuwien.ac.at> Closes: https://lore.kernel.org/all/f3ca77f0-e414-4065-83a5-ae4c4d25545d@student.tuwien.ac.at Signed-off-by: Kees Cook <keescook@chromium.org> --- scripts/gcc-plugins/randomize_layout_plugin.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)