Home | History | Annotate | Download | only in codeflinger

Lines Matching refs:AL

94                 MLA(AL, 0, c, x.reg, dvdx, c);
102 MOV(AL, 0, end, reg_imm(parts.count.reg, LSR, 16));
103 MLA(AL, 1, end, dvdx, end, c);
105 BIC(AL, 0, c, c, reg_imm(c, ASR, 31));
162 AND(AL, 0, parts.iterated.reg,
165 MOV(AL, 0, parts.iterated.reg,
206 ADD(AL, 0, dx, fragment.reg, dx);
223 BIC(AL, 0, fragment.reg, fragment.reg,
353 ADD(AL, 0, Rx, Rx, reg_imm(txPtr.reg, ASR, 16)); // x += (s>>16)
355 ADD(AL, 0, Ry, Ry, reg_imm(txPtr.reg, ASR, 16)); // y += (t>>16)
358 SMLABB(AL, Rx, Ry, txPtr.reg, Rx); // x+y*stride
382 MLA(AL, 0, s.reg, Rx, s.reg, ydsdy);
383 MLA(AL, 0, t.reg, Rx, t.reg, ydtdy);
523 SUB(AL, 0, u, u, imm(1<<(FRAC_BITS-1)));
524 SUB(AL, 0, v, v, imm(1<<(FRAC_BITS-1)));
527 AND(AL, 0, U, u, imm((1<<FRAC_BITS)-1));
528 AND(AL, 0, V, v, imm((1<<FRAC_BITS)-1));
531 SUB(AL, 0, width, width, imm(1));
532 SUB(AL, 0, height, height, imm(1));
538 MOV(AL, 1, u, reg_imm(u, ASR, FRAC_BITS));
540 CMP(AL, u, width);
559 CMP(AL, width, reg_imm(u, ASR, FRAC_BITS));
563 MOV(AL, 1, u, reg_imm(u, ASR, FRAC_BITS));
573 MOV(AL, 1, v, reg_imm(v, ASR, FRAC_BITS));
575 CMP(AL, v, height);
580 MUL(AL, 0, height, stride, height);
583 CMP(AL, height, reg_imm(v, ASR, FRAC_BITS));
591 MOV(AL, 1, v, reg_imm(v, ASR, FRAC_BITS));
611 ADD(AL, 0, s.reg, s.reg, dsdx);
612 ADD(AL, 0, t.reg, t.reg, dtdx);
633 SMLABB(AL, u, v, stride, u); // u+v*stride
671 ADD(AL, 0, txPtr.reg, txPtr.reg, imm(txPtr.size>>3));
686 ADD(AL, 0, s, s, dsdx);
687 ADD(AL, 0, t, t, dtdx);
708 LDRB(AL, texel.reg, txPtr.reg);
729 ADD(AL, 0, offset, lb, rt);
730 LDRB(AL, pixel, txPtr.reg, reg_scale_pre(offset));
731 SMULBB(AL, u, U, V);
732 SMULBB(AL, d, pixel, u);
733 RSB(AL, 0, k, u, imm(1<<(FRAC_BITS*2)));
736 RSB(AL, 0, U, U, imm(1<<FRAC_BITS));
737 LDRB(AL, pixel, txPtr.reg, reg_scale_pre(lb));
738 SMULBB(AL, u, U, V);
739 SMLABB(AL, d, pixel, u, d);
740 SUB(AL, 0, k, k, u);
743 RSB(AL, 0, V, V, imm(1<<FRAC_BITS));
744 LDRB(AL, pixel, txPtr.reg);
745 SMULBB(AL, u, U, V);
746 SMLABB(AL, d, pixel, u, d);
749 LDRB(AL, pixel, txPtr.reg, reg_scale_pre(rt));
750 SUB(AL, 0, u, k, u);
751 SMLABB(AL, texel.reg, pixel, u, d);
799 LDRH(AL, texel.reg, txPtr.reg);
830 ADD(AL, 0, offset, offset, u);
832 LDRH(AL, pixel, txPtr.reg, reg_pre(offset));
833 SMULBB(AL, u, U, V);
834 ORR(AL, 0, pixel, pixel, reg_imm(pixel, LSL, shift));
838 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
839 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
841 MUL(AL, 0, d, pixel, u);
842 RSB(AL, 0, k, u, imm(1<<prec));
846 RSB(AL, 0, U, U, imm(1<<FRAC_BITS));
847 LDRH(AL, pixel, txPtr.reg, reg_pre(offset));
848 SMULBB(AL, u, U, V);
849 ORR(AL, 0, pixel, pixel, reg_imm(pixel, LSL, shift));
853 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
854 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
856 MLA(AL, 0, d, pixel, u, d);
857 SUB(AL, 0, k, k, u);
860 RSB(AL, 0, V, V, imm(1<<FRAC_BITS));
861 LDRH(AL, pixel, txPtr.reg);
862 SMULBB(AL, u, U, V);
863 ORR(AL, 0, pixel, pixel, reg_imm(pixel, LSL, shift));
867 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
868 MOV(AL
870 MLA(AL, 0, d, pixel, u, d);
874 LDRH(AL, pixel, txPtr.reg, reg_pre(offset));
875 SUB(AL, 0, u, k, u);
876 ORR(AL, 0, pixel, pixel, reg_imm(pixel, LSL, shift));
878 MLA(AL, 0, texel.reg, pixel, u, d);
922 MOV(AL, 0, U, reg_imm(U, LSL, prescale));
924 ADD(AL, 0, u, offsetrt, offsetlb);
926 LDR(AL, pixel, txPtr.reg, reg_scale_pre(u));
928 SMULBB(AL, u, U, V);
929 RSB(AL, 0, U, U, imm(1<<FRAC_BITS));
931 SMULWB(AL, u, U, V);
932 RSB(AL, 0, U, U, imm(1<<(FRAC_BITS+prescale)));
934 UXTB16(AL, temp, pixel, 0);
936 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
937 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
939 LDR(AL, pixellb, txPtr.reg, reg_scale_pre(offsetlb));
940 MUL(AL, 0, dh, temp, u);
941 UXTB16(AL, temp, pixel, 8);
942 MUL(AL, 0, dl, temp, u);
943 RSB(AL, 0, k, u, imm(0x100));
947 SMULBB(AL, u, U, V);
949 SMULWB(AL, u, U, V);
951 UXTB16(AL, temp, pixellb, 0);
953 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
954 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
956 MLA(AL, 0, dh, temp, u, dh);
957 UXTB16(AL, temp, pixellb, 8);
958 MLA(AL, 0, dl, temp, u, dl);
959 SUB(AL, 0, k, k, u);
962 RSB(AL, 0, V, V, imm(1<<FRAC_BITS));
963 LDR(AL, pixel, txPtr.reg);
965 SMULBB(AL, u, U, V);
967 SMULWB(AL, u, U, V);
969 UXTB16(AL, temp, pixel, 0);
971 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
972 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
974 MLA(AL, 0, dh, temp, u, dh);
975 UXTB16(AL, temp, pixel, 8);
976 MLA(AL, 0, dl, temp, u, dl);
979 LDR(AL, pixel, txPtr.reg, reg_scale_pre(offsetrt));
980 SUB(AL, 0, u, k, u);
981 UXTB16(AL, temp, pixel, 0);
982 MLA(AL, 0, dh, temp, u, dh);
983 UXTB16(AL, temp, pixel, 8);
984 MLA(AL, 0, dl, temp, u, dl);
986 UXTB16(AL, dh, dh, 8);
987 UXTB16(AL, dl, dl, 8);
988 ORR(AL, 0, texel.reg, dh, reg_imm(dl, LSL, 8));
1013 MOV(AL, 0, mask, imm(0xFF));
1014 ORR(AL, 0, mask, mask, imm(0xFF0000));
1020 ADD(AL, 0, offset, offset, u);
1022 LDR(AL, pixel, txPtr.reg, reg_scale_pre(offset));
1023 SMULBB(AL, u, U, V);
1024 AND(AL, 0, temp, mask, pixel);
1027 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
1028 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
1030 MUL(AL, 0, dh, temp, u);
1031 AND(AL, 0, temp, mask, reg_imm(pixel, LSR, 8));
1032 MUL(AL, 0, dl, temp, u);
1033 RSB(AL, 0, k, u, imm(0x100));
1037 RSB(AL, 0, U, U, imm(1<<FRAC_BITS));
1038 LDR(AL, pixel, txPtr.reg, reg_scale_pre(offset));
1039 SMULBB(AL, u, U, V);
1040 AND(AL, 0, temp, mask, pixel);
1043 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
1044 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
1046 MLA(AL, 0, dh, temp, u, dh);
1047 AND(AL, 0, temp, mask, reg_imm(pixel, LSR, 8));
1048 MLA(AL, 0, dl, temp, u, dl);
1049 SUB(AL, 0, k, k, u);
1052 RSB(AL, 0, V, V, imm(1<<FRAC_BITS));
1053 LDR(AL, pixel, txPtr.reg);
1054 SMULBB(AL, u, U, V);
1055 AND(AL, 0, temp, mask, pixel);
1058 ADD(AL, 0, u, u, imm(1<<(adjust-1)));
1059 MOV(AL, 0, u, reg_imm(u, LSR, adjust));
1061 MLA(AL, 0, dh, temp, u, dh);
1062 AND(AL, 0, temp, mask, reg_imm(pixel, LSR, 8));
1063 MLA(AL, 0, dl, temp, u, dl);
1067 LDR(AL, pixel, txPtr.reg, reg_scale_pre(offset));
1068 SUB(AL, 0, u, k, u);
1069 AND(AL, 0, temp, mask, pixel);
1070 MLA(AL, 0, dh, temp, u, dh);
1071 AND(AL, 0, temp, mask, reg_imm(pixel, LSR, 8));
1072 MLA(AL, 0, dl, temp, u, dl);
1074 AND(AL, 0, dh, mask, reg_imm(dh, LSR, 8));
1075 AND(AL, 0, dl, dl, reg_imm(mask, LSL, 8));
1076 ORR(AL, 0, texel.reg, dh, dl);
1151 // UMULL(AL, 0, size, d, c, size);
1153 MOV(AL, 0, d, reg_imm(c, LSR, 16-tx_linear));
1154 SMULWB(AL, d, d, size);
1158 MOV(AL, 0, d, reg_imm(coord, ASR, 16-tx_linear));
1161 MOV(AL, 0, d, reg_imm(coord, ASR, 16));
1162 BIC(AL, 0, d, d, reg_imm(d, ASR, 31));
1163 CMP(AL, d, size);
1190 RSB(AL, 0, dest.reg, texel.reg, reg_imm(texel.reg, LSL, incoming.h));
1191 AND(AL, 0, dest.reg, dest.reg, incoming.reg);
1196 MOV(AL, 0, dest.reg, reg_imm(incoming.reg, LSL, 31-incoming.h));
1197 AND(AL, 0, dest.reg, texel.reg, reg_imm(dest.reg, ASR, 31));
1217 MOV(AL, 0, dest.reg, reg_imm(inReg, LSR, shift));
1224 ADD(AL, 0, dest.reg, inReg, reg_imm(inReg, LSR, Ni-1));
1225 if (Nt<16 && Ni<16) SMULBB(AL, dest.reg, texel.reg, dest.reg);
1226 else MUL(AL, 0, dest.reg, texel.reg, dest.reg);
1232 MOV(AL, 0, dest.reg, reg_imm(inReg, LSR, shift));
1243 ADD(AL, 0, t, texel.reg, reg_imm(texel.reg, LSR, Nt-1));
1245 if (shift==16) SMULBT(AL, dest.reg, t, inReg);
1246 else SMULBB(AL, dest.reg, t, inReg);
1247 } else MUL(AL, 0, dest.reg, t, inReg);
1284 MOV(AL, 0, dest.reg, reg_imm(incomingNorm.reg, LSR, shift));
1288 ADD(AL, 0, factor.reg, factor.reg, reg_imm(factor.reg, LSR, factor.s-1));
1309 LDRB(AL, color.reg, mBuilderContext.Rctx,
1322 MOV(AL, 0, dest.reg, reg_imm(incomingNorm.reg, LSR, shift));
1326 ADD(AL, 0, factor.reg, factor.reg, reg_imm(factor.reg, LSR, factor.s-1));
1360 ADD(AL, 0, dest.reg, texel.reg,
1363 ADD(AL, 0, dest.reg, texel.reg, incomingTemp.reg);