Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Tool for simplifying/optimizing logic? [closed]

Usually I would let the compiler do it's magic of optimizing complicated logical expressions, however, in this case the compiler I have to use is not very good at this (basically all it can do is to replaced things like /64 with bit-shifts and %512 with bitwise-and).

Is there any tool available that can analyze and provide optimized versions of expressions, (i.e. the same way good optimizing compilers do)?

e.g. I would like to optimize the following:

int w = 2 - z/2;

int y0 = y + (((v % 512) / 64) / 4) * 8           + ((v / 512) / mb)*16;
int x0 = x + (((v % 512) / 64) % 4) * 8 * (w - 1) + ((v / 512) % mb)*8 * w;

int i = x0 * (w ^ 3) * 2 + y0 * mb * 16 * 2 + (2*z - 3) * (z/2);
like image 734
ronag Avatar asked Dec 29 '25 06:12

ronag


1 Answers

Here's a test:

typedef int MyInt; // or unsigned int

MyInt get(MyInt x, MyInt y, MyInt z, MyInt v, MyInt mb)
{
    MyInt w = 2 - z/2;

    MyInt y0 = y + (((v % 512) / 64) / 4) * 8           + ((v / 512) / mb)*16;
    MyInt x0 = x + (((v % 512) / 64) % 4) * 8 * (w - 1) + ((v / 512) % mb)*8 * w;

    MyInt i = x0 * (w ^ 3) * 2 + y0 * mb * 16 * 2 + (2*z - 3) * (z/2);

    return i;
}

I compiled with GCC 4.7.0 with -O3.

With int:

.LFB0:
        movl    %ecx, %eax
        movq    %r12, -24(%rsp)
.LCFI0:
        movl    %edx, %r12d
        sarl    $31, %eax
        shrl    $31, %r12d
        movq    %r13, -16(%rsp)
        shrl    $23, %eax
        addl    %edx, %r12d
        movq    %rbx, -40(%rsp)
        leal    (%rcx,%rax), %r9d
        movl    %r12d, %r11d
        movq    %r14, -8(%rsp)
        sarl    %r11d
        movq    %rbp, -32(%rsp)
.LCFI1:
        movl    %edx, %ebp
        andl    $511, %r9d
        negl    %r11d
        subl    %eax, %r9d
        leal    511(%rcx), %eax
        testl   %ecx, %ecx
        leal    2(%r11), %r13d
        leal    63(%r9), %ebx
        cmovns  %ecx, %eax
        sarl    $9, %eax
        movl    %r13d, %r14d
        xorl    $3, %r14d
        movl    %eax, %edx
        testl   %r9d, %r9d
        cmovns  %r9d, %ebx
        sarl    $31, %edx
        addl    $1, %r11d
        idivl   %r8d
        movl    %ebx, %r10d
        sarl    $31, %ebx
        shrl    $30, %ebx
        sarl    $6, %r10d
        addl    %ebx, %r10d
        andl    $3, %r10d
        subl    %ebx, %r10d
        movq    -40(%rsp), %rbx
        sall    $3, %r10d
        sall    $3, %edx
        imull   %r11d, %r10d
        imull   %r13d, %edx
        movq    -16(%rsp), %r13
        addl    %edi, %r10d
        addl    %edx, %r10d
        leal    255(%r9), %edx
        imull   %r10d, %r14d
        testl   %r9d, %r9d
        cmovs   %edx, %r9d
        sall    $4, %eax
        sarl    %r12d
        sarl    $8, %r9d
        leal    (%rsi,%r9,8), %ecx
        addl    %eax, %ecx
        leal    -3(%rbp,%rbp), %eax
        movq    -32(%rsp), %rbp
        imull   %r8d, %ecx
        imull   %r12d, %eax
        movq    -24(%rsp), %r12
        sall    $4, %ecx
        addl    %r14d, %ecx
        movq    -8(%rsp), %r14
        leal    (%rax,%rcx,2), %eax
        ret

With unsigned int:

.LFB0:
        movl    %ecx, %eax
        movq    %rbp, -16(%rsp)
        movl    %edx, %r11d
.LCFI0:
        movl    %edx, %ebp
        shrl    $9, %eax
        xorl    %edx, %edx
        divl    %r8d
        movq    %r12, -8(%rsp)
.LCFI1:
        movl    %ecx, %r12d
        shrl    %r11d
        andl    $511, %r12d
        movq    %rbx, -24(%rsp)
.LCFI2:
        movl    $2, %r10d
        movl    %r12d, %r9d
        movl    $1, %ebx
        subl    %r11d, %r10d
        shrl    $6, %r9d
        subl    %r11d, %ebx
        shrl    $8, %r12d
        andl    $3, %r9d
        sall    $4, %r8d
        imull   %ebx, %r9d
        leal    (%r12,%rax,2), %eax
        movq    -24(%rsp), %rbx
        imull   %r10d, %edx
        xorl    $3, %r10d
        movq    -8(%rsp), %r12
        leal    (%rsi,%rax,8), %eax
        addl    %edx, %r9d
        leal    (%rdi,%r9,8), %edi
        imull   %eax, %r8d
        leal    -3(%rbp,%rbp), %eax
        movq    -16(%rsp), %rbp
        imull   %r10d, %edi
        imull   %r11d, %eax
        addl    %edi, %r8d
        leal    (%rax,%r8,2), %eax
        ret

"Optimizing" further by folding constants manually has (predictably) no further effect.

like image 93
Kerrek SB Avatar answered Dec 31 '25 22:12

Kerrek SB