Lines Matching refs:parts
188 fragment_parts_t parts;
194 build_scanline_prolog(parts, needs);
207 MOV(AL, 0, parts.count.reg,
208 reg_imm(parts.count.reg, ROR, GGL_DITHER_ORDER_SHIFT));
209 ADD(AL, 0, parts.count.reg, parts.count.reg,
211 MOV(AL, 0, parts.count.reg,
212 reg_imm(parts.count.reg, ROR, 32 - GGL_DITHER_ORDER_SHIFT));
221 build_depth_test(parts, Z_TEST|Z_WRITE);
225 build_depth_test(parts, Z_TEST);
232 build_textures(parts, regs);
246 load(parts.cbPtr, mDstPixel);
254 if (directTex | parts.packed) {
257 pixel = directTex ? parts.texel[directTex-1] : parts.iterated;
263 parts.dither = reg_t(regs.obtain());
264 AND(AL, 0, parts.dither.reg, parts.count.reg, imm(mask));
265 ADDR_ADD(AL, 0, parts.dither.reg, ctxtReg, parts.dither.reg);
266 LDRB(AL, parts.dither.reg, parts.dither.reg,
273 build_component(pixel, parts, GGLFormat::ALPHA, regs);
278 build_depth_test(parts, Z_WRITE);
281 build_component(pixel, parts, GGLFormat::RED, regs);
282 build_component(pixel, parts, GGLFormat::GREEN, regs);
283 build_component(pixel, parts, GGLFormat::BLUE, regs);
305 store(parts.cbPtr, pixel, WRITE_BACK);
313 if (parts.reload != 3) {
314 build_smooth_shade(parts);
318 build_iterate_z(parts);
321 build_iterate_f(parts);
323 SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
331 build_iterate_texture_coordinates(parts);
334 build_smooth_shade(parts);
335 build_iterate_z(parts);
336 build_iterate_f(parts);
338 ADDR_ADD(AL, 0, parts.cbPtr.reg, parts.cbPtr.reg, imm(parts.cbPtr.size>>3));
340 SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16));
351 fragment_parts_t& parts, const needs_t& needs)
357 parts.count.setTo(obtainReg());
361 CONTEXT_LOAD(parts.count.reg, iterators.xr);
364 // parts.count = iterators.xr - Rx
365 SUB(AL, 0, parts.count.reg, parts.count.reg, Rx);
366 SUB(AL, 0, parts.count.reg, parts.count.reg, imm(1));
369 // parts.count.reg = 0xNNNNXXDD
379 ORR(AL, 0, parts.count.reg, tx, reg_imm(parts.count.reg, LSL, 16));
381 // parts.count.reg = 0xNNNN0000
383 MOV(AL, 0, parts.count.reg, reg_imm(parts.count.reg, LSL, 16));
391 parts.cbPtr.setTo(obtainReg(), cb_bits);
393 CONTEXT_ADDR_LOAD(parts.cbPtr.reg, state.buffers.color.data);
395 base_offset(parts.cbPtr, parts.cbPtr, Rs);
415 parts.z = reg_t(obtainReg());
419 int ydzdy = parts.z.reg;
422 MLA(AL, 0, parts.z.reg, Rx, dzdx, ydzdy);
424 // we're going to index zbase of parts.count
431 ADD(AL, 0, Rs, Rs, reg_imm(parts.count.reg, LSR, 16));
437 init_textures(parts.coords, reg_t(Rx), reg_t(Ry));
441 init_iterated_color(parts, reg_t(Rx));
445 parts.covPtr.setTo(obtainReg(), 16);
446 CONTEXT_ADDR_LOAD(parts.covPtr.reg, state.buffers.coverage);
447 ADDR_ADD(AL, 0, parts.covPtr.reg, parts.covPtr.reg, reg_imm(Rx, LSL, 1));
454 const fragment_parts_t& parts,
467 parts, component, scratches, regs);
475 downshift(pixel, component, temp, parts
482 const fragment_parts_t& parts,
507 (parts.texel[i].component_size(component) < dst_size)) {
533 build_iterated_color(fragment, parts, component, regs);
536 build_texture_environment(fragment, parts, component, regs);
560 build_coverage_application(fragment, parts, regs);
563 build_alpha_test(fragment, parts);
602 temp = component_t(parts.iterated, component);
608 temp = component_t(parts.texel[i], component);
627 void GGLAssembler::build_smooth_shade(const fragment_parts_t& parts)
629 if (mSmooth && !parts.iterated_packed) {
634 const int reload = parts.reload;
639 int c = parts.argb[i].reg;
640 int dx = parts.argb_dx[i].reg;
669 const fragment_parts_t& parts, Scratch& regs)
681 LDRH(AL, cf, parts.covPtr.reg, immed8_post(2));
695 const fragment_parts_t& /*parts*/)
722 const fragment_parts_t& parts, uint32_t mask)
764 int z = parts.z.reg;
767 ADDR_SUB(AL, 0, zbase, zbase, reg_imm(parts.count.reg, LSR, 15));
786 void GGLAssembler::build_iterate_z(const fragment_parts_t& parts)
793 ADD(AL, 0, parts.z.reg, parts.z.reg, dzdx);
797 void GGLAssembler::build_iterate_f(const fragment_parts_t& /*parts*/)