@@ -158,11 +158,9 @@ Float:
expr->type = EXPR_FVALUE;
}
-static int check_shift_count(struct expression *expr, struct symbol *ctype, unsigned int count)
+static void check_shift_count(struct expression *expr, struct symbol *ctype, unsigned int count)
{
warning(expr->pos, "shift too big (%u) for type %s", count, show_typename(ctype));
- count &= ctype->bit_size-1;
- return count;
}
/*
@@ -186,8 +184,7 @@ static int simplify_int_binop(struct expression *expr, struct symbol *ctype)
if (r >= ctype->bit_size) {
if (conservative)
return 0;
- r = check_shift_count(expr, ctype, r);
- right->value = r;
+ check_shift_count(expr, ctype, r);
}
}
if (left->type != EXPR_VALUE)
Currently, at expansion time, shifts expressions with an amount corresponding to undefined behaviour (larger or equal than the type's width or negative) are 'reformed' by reducing the amount modulo the width. This correspond in fact to the run-time of several CPUs families (x86[-64], arm64, mips, ...) but not all of them (arm, ppc, ...). However, it is desirable for the front-end to not modify the exacution model, the virtual one given by the C standard, but also the effective one of the target machine. Change this, but no more doing the reduction modulo the width and leaving these expressions as-is (which leave the possibility to do something else, maybe target-specific, at a latter stage). Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com> --- expand.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)