movq %rdx,%r10 andq $15,%r10 jne ___dcox86_wl_3_ ___dcox86_wl_5_: movsd b(,%rax,8),%xmm8 addq $10,%rax movhpd b+-72(,%rax,8),%xmm8 subq $1,%rbx movsd b+-64(,%rax,8),%xmm7 movhpd b+-56(,%rax,8),%xmm7 movsd b+-48(,%rax,8),%xmm12 movhpd b+-40(,%rax,8),%xmm12 movsd b+-32(,%rax,8),%xmm13 movhpd b+-24(,%rax,8),%xmm13 movsd b+-16(,%rax,8),%xmm10 movhpd b+-8(,%rax,8),%xmm10 mulpd -80(%rdx,%rax,8),%xmm8 mulpd -64(%rdx,%rax,8),%xmm7 mulpd -48(%rdx,%rax,8),%xmm12 mulpd -32(%rdx,%rax,8),%xmm13 mulpd -16(%rdx,%rax,8),%xmm10 addpd %xmm8,%xmm1 addpd %xmm7,%xmm4 addpd %xmm12,%xmm5 addpd %xmm13,%xmm11 addpd %xmm10,%xmm6 jg ___dcox86_wl_5_ jmp ___dcox86_cc_7_ ___dcox86_wl_3_: movsd (%rdx,%rax,8),%xmm9 addq $10,%rax movhpd -72(%rdx,%rax,8),%xmm9 subq $1,%rbx movsd b+-80(,%rax,8),%xmm13 movhpd b+-72(,%rax,8),%xmm13 movsd -64(%rdx,%rax,8),%xmm8 movhpd -56(%rdx,%rax,8),%xmm8 movsd b+-64(,%rax,8),%xmm7 movhpd b+-56(,%rax,8),%xmm7 movsd -48(%rdx,%rax,8),%xmm15 movhpd -40(%rdx,%rax,8),%xmm15 movsd b+-48(,%rax,8),%xmm14 movhpd b+-40(,%rax,8),%xmm14 movsd -32(%rdx,%rax,8),%xmm2 movhpd -24(%rdx,%rax,8),%xmm2 mulpd %xmm13,%xmm9 movsd b+-32(,%rax,8),%xmm13 movhpd b+-24(,%rax,8),%xmm13 movsd -16(%rdx,%rax,8),%xmm12 movhpd -8(%rdx,%rax,8),%xmm12 mulpd %xmm7,%xmm8 movsd b+-16(,%rax,8),%xmm10 addpd %xmm9,%xmm1 movhpd b+-8(,%rax,8),%xmm10 mulpd %xmm14,%xmm15 addpd %xmm8,%xmm4 mulpd %xmm13,%xmm2 addpd %xmm15,%xmm5 mulpd %xmm10,%xmm12 addpd %xmm2,%xmm11 addpd %xmm12,%xmm6 jg ___dcox86_wl_3_ ___dcox86_cc_7_:
|
L50: movsd (%rdx,%rax,8), %xmm13 mulsd b(,%rax,8), %xmm13 addsd %xmm13, %xmm1 movsd 8(%rdx,%rax,8), %xmm12 mulsd b+8(,%rax,8), %xmm12 addsd %xmm12, %xmm1 movsd 16(%rdx,%rax,8), %xmm11 mulsd b+16(,%rax,8), %xmm11 addsd %xmm11, %xmm1 movsd 24(%rdx,%rax,8), %xmm10 mulsd b+24(,%rax,8), %xmm10 addsd %xmm10, %xmm1 movsd 32(%rdx,%rax,8), %xmm9 mulsd b+32(,%rax,8), %xmm9 addsd %xmm9, %xmm1 movsd 40(%rdx,%rax,8), %xmm8 mulsd b+40(,%rax,8), %xmm8 addsd %xmm8, %xmm1 movsd 48(%rdx,%rax,8), %xmm7 mulsd b+48(,%rax,8), %xmm7 addsd %xmm7, %xmm1 movsd 56(%rdx,%rax,8), %xmm6 mulsd b+56(,%rax,8), %xmm6 addsd %xmm6, %xmm1 movsd 64(%rdx,%rax,8), %xmm5 mulsd b+64(,%rax,8), %xmm5 addsd %xmm5, %xmm1 movsd 72(%rdx,%rax,8), %xmm4 mulsd b+72(,%rax,8), %xmm4 addsd %xmm4, %xmm1 addq $10, %rax cmpq $512, %rax jne .L50
|