builtin-align.c 12.3 KB
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
/// Check the code generation for the alignment builtins
/// To make the test case easier to read, run SROA after generating IR to remove the alloca instructions.
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_VOID_PTR \
// RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-VOID_PTR \
// RUN:   -enable-var-scope '-D$PTRTYPE=i8'
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_FLOAT_PTR \
// RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-FLOAT_PTR \
// RUN:   -enable-var-scope '-D$PTRTYPE=f32'
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_LONG \
// RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-LONG -enable-var-scope
/// Check that we can handle the case where the alignment parameter is wider
/// than the source type (generate a trunc on alignment instead of zext)
// RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_USHORT \
// RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-USHORT -enable-var-scope


#ifdef TEST_VOID_PTR
#define TYPE void *
#elif defined(TEST_FLOAT_PTR)
#define TYPE float *
#elif defined(TEST_LONG)
#define TYPE long
#elif defined(TEST_USHORT)
#define TYPE unsigned short
#else
#error MISSING TYPE
#endif

/// Check that constant initializers work and are correct
_Bool aligned_true = __builtin_is_aligned(1024, 512);
// CHECK: @aligned_true = global i8 1, align 1
_Bool aligned_false = __builtin_is_aligned(123, 512);
// CHECK: @aligned_false = global i8 0, align 1

int down_1 = __builtin_align_down(1023, 32);
// CHECK: @down_1 = global i32 992, align 4
int down_2 = __builtin_align_down(256, 32);
// CHECK: @down_2 = global i32 256, align 4

int up_1 = __builtin_align_up(1023, 32);
// CHECK: @up_1 = global i32 1024, align 4
int up_2 = __builtin_align_up(256, 32);
// CHECK: @up_2 = global i32 256, align 4

/// Capture the IR type here to use in the remaining FileCheck captures:
// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@get_type() #0
// CHECK-VOID_PTR-NEXT:  entry:
// CHECK-VOID_PTR-NEXT:    ret i8* null
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@get_type() #0
// CHECK-FLOAT_PTR-NEXT:  entry:
// CHECK-FLOAT_PTR-NEXT:    ret float* null
//
// CHECK-LONG-LABEL: define {{[^@]+}}@get_type() #0
// CHECK-LONG-NEXT:  entry:
// CHECK-LONG-NEXT:    ret i64 0
//
// CHECK-USHORT-LABEL: define {{[^@]+}}@get_type() #0
// CHECK-USHORT-NEXT:  entry:
// CHECK-USHORT-NEXT:    ret i16 0
//
TYPE get_type(void) {
  return (TYPE)0;
}

// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@is_aligned
// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-VOID_PTR-NEXT:  entry:
// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-VOID_PTR-NEXT:    [[SRC_ADDR:%.*]] = ptrtoint i8* [[PTR]] to i64
// CHECK-VOID_PTR-NEXT:    [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]]
// CHECK-VOID_PTR-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
// CHECK-VOID_PTR-NEXT:    ret i1 [[IS_ALIGNED]]
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@is_aligned
// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-FLOAT_PTR-NEXT:  entry:
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-FLOAT_PTR-NEXT:    [[SRC_ADDR:%.*]] = ptrtoint float* [[PTR]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]]
// CHECK-FLOAT_PTR-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
// CHECK-FLOAT_PTR-NEXT:    ret i1 [[IS_ALIGNED]]
//
// CHECK-LONG-LABEL: define {{[^@]+}}@is_aligned
// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-LONG-NEXT:  entry:
// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-LONG-NEXT:    [[SET_BITS:%.*]] = and i64 [[PTR]], [[MASK]]
// CHECK-LONG-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
// CHECK-LONG-NEXT:    ret i1 [[IS_ALIGNED]]
//
// CHECK-USHORT-LABEL: define {{[^@]+}}@is_aligned
// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-USHORT-NEXT:  entry:
// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
// CHECK-USHORT-NEXT:    [[SET_BITS:%.*]] = and i16 [[PTR]], [[MASK]]
// CHECK-USHORT-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i16 [[SET_BITS]], 0
// CHECK-USHORT-NEXT:    ret i1 [[IS_ALIGNED]]
//
_Bool is_aligned(TYPE ptr, unsigned align) {
  return __builtin_is_aligned(ptr, align);
}

// NOTYET-POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0i8.i64(i8* [[OVER_BOUNDARY]], [[ALIGN_TYPE]] [[INVERTED_MASK]])
// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_up
// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-VOID_PTR-NEXT:  entry:
// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-VOID_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64
// CHECK-VOID_PTR-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]]
// CHECK-VOID_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-VOID_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
// CHECK-VOID_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-VOID_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
// CHECK-VOID_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-VOID_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
// CHECK-VOID_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
// CHECK-VOID_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
// CHECK-VOID_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-VOID_PTR-NEXT:    ret i8* [[ALIGNED_RESULT]]
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up
// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-FLOAT_PTR-NEXT:  entry:
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-FLOAT_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]]
// CHECK-FLOAT_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
// CHECK-FLOAT_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-FLOAT_PTR-NEXT:    [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
// CHECK-FLOAT_PTR-NEXT:    [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
// CHECK-FLOAT_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-FLOAT_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
// CHECK-FLOAT_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
// CHECK-FLOAT_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-FLOAT_PTR-NEXT:    ret float* [[TMP1]]
//
// CHECK-LONG-LABEL: define {{[^@]+}}@align_up
// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-LONG-NEXT:  entry:
// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-LONG-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[PTR]], [[MASK]]
// CHECK-LONG-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-LONG-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
// CHECK-LONG-NEXT:    ret i64 [[ALIGNED_RESULT]]
//
// CHECK-USHORT-LABEL: define {{[^@]+}}@align_up
// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-USHORT-NEXT:  entry:
// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
// CHECK-USHORT-NEXT:    [[OVER_BOUNDARY:%.*]] = add i16 [[PTR]], [[MASK]]
// CHECK-USHORT-NEXT:    [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1
// CHECK-USHORT-NEXT:    [[ALIGNED_RESULT:%.*]] = and i16 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
// CHECK-USHORT-NEXT:    ret i16 [[ALIGNED_RESULT]]
//
TYPE align_up(TYPE ptr, unsigned align) {
  return __builtin_align_up(ptr, align);
}

// NOTYET-POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0[[$PTRTYPE]].i64([[$TYPE]] [[PTR]], [[ALIGN_TYPE]] [[INVERTED_MASK]])
// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_down
// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-VOID_PTR-NEXT:  entry:
// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-VOID_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64
// CHECK-VOID_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-VOID_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]
// CHECK-VOID_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-VOID_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
// CHECK-VOID_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-VOID_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
// CHECK-VOID_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
// CHECK-VOID_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
// CHECK-VOID_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-VOID_PTR-NEXT:    ret i8* [[ALIGNED_RESULT]]
//
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down
// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-FLOAT_PTR-NEXT:  entry:
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-FLOAT_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]
// CHECK-FLOAT_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
// CHECK-FLOAT_PTR-NEXT:    [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
// CHECK-FLOAT_PTR-NEXT:    [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
// CHECK-FLOAT_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-FLOAT_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
// CHECK-FLOAT_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
// CHECK-FLOAT_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
// CHECK-FLOAT_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
// CHECK-FLOAT_PTR-NEXT:    ret float* [[TMP1]]
//
// CHECK-LONG-LABEL: define {{[^@]+}}@align_down
// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-LONG-NEXT:  entry:
// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
// CHECK-LONG-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
// CHECK-LONG-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[PTR]], [[INVERTED_MASK]]
// CHECK-LONG-NEXT:    ret i64 [[ALIGNED_RESULT]]
//
// CHECK-USHORT-LABEL: define {{[^@]+}}@align_down
// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
// CHECK-USHORT-NEXT:  entry:
// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
// CHECK-USHORT-NEXT:    [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1
// CHECK-USHORT-NEXT:    [[ALIGNED_RESULT:%.*]] = and i16 [[PTR]], [[INVERTED_MASK]]
// CHECK-USHORT-NEXT:    ret i16 [[ALIGNED_RESULT]]
//
TYPE align_down(TYPE ptr, unsigned align) {
  return __builtin_align_down(ptr, align);
}