/prebuilts/python/linux-x86/2.7.5/lib/python2.7/site-packages/setoolsgui/networkx/linalg/tests/ |
test_laplacian.py | 57 Lsl = numpy.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0.], 67 assert_almost_equal(nx.normalized_laplacian_matrix(self.Gsl), Lsl, decimal=3)
|
/art/compiler/utils/ |
assembler_thumb_test.cc | 340 __ mov(R3, ShifterOperand(R4, LSL, 4)); 347 __ mov(R8, ShifterOperand(R4, LSL, 4)); [all...] |
/art/compiler/utils/arm/ |
assembler_arm32.h | 205 void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 216 void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_thumb2.h | 244 void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 255 void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_arm.h | 187 am_(am), is_immed_offset_(true), shift_(LSL) { 191 am_(am), is_immed_offset_(false), shift_(LSL) { 204 am_(Offset), is_immed_offset_(false), shift_(LSL) { 622 virtual void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 633 virtual void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_arm32.cc | [all...] |
assembler_thumb2.cc | 727 // However, there is no actual shift available, neither for ADD nor for MOV (ASR/LSR/LSL/ROR). 917 case LSL: thumb_opcode = 0U /* 0b00 */; break; [all...] |
/external/v8/src/compiler/arm64/ |
code-generator-arm64.cc | 277 ASSEMBLE_SHIFT(Lsl, 64); 280 ASSEMBLE_SHIFT(Lsl, 32);
|
/external/v8/src/arm64/ |
builtins-arm64.cc | 447 Operand(prealloc_fields, LSL, kPointerSizeLog2)); 451 __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2)); 465 __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2)); 598 __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2)); 726 __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2)); [all...] |
macro-assembler-arm64-inl.h | 888 void MacroAssembler::Lsl(const Register& rd, 893 lsl(rd, rn, shift); 897 void MacroAssembler::Lsl(const Register& rd, [all...] |
macro-assembler-arm64.h | 440 inline void Lsl(const Register& rd, const Register& rn, unsigned shift); 441 inline void Lsl(const Register& rd, const Register& rn, const Register& rm); [all...] |
codegen-arm64.cc | 170 __ Lsl(array_size, length, kDoubleSizeLog2); 200 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2)); 279 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2)); 297 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2)); 611 __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2)); 613 __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
|
code-stubs-arm64.cc | 193 __ Lsl(result, mantissa, exponent); [all...] |
lithium-codegen-arm64.cc | [all...] |
macro-assembler-arm64.cc | 461 return Operand(dst, LSL, shift_low); [all...] |
full-codegen-arm64.cc | [all...] |
/art/compiler/optimizing/ |
code_generator_arm.cc | [all...] |
code_generator_arm64.cc | [all...] |
/external/v8/test/cctest/ |
test-assembler-arm64.cc | 297 __ Mvn(w2, Operand(w0, LSL, 1)); 298 __ Mvn(x3, Operand(x1, LSL, 2)); 370 __ Mov(w13, Operand(w11, LSL, 1)); 371 __ Mov(x14, Operand(x12, LSL, 2)); 527 __ Orr(w3, w0, Operand(w1, LSL, 28)); 528 __ Orr(x4, x0, Operand(x1, LSL, 32)); 624 __ Orn(w3, w0, Operand(w1, LSL, 4)); 625 __ Orn(x4, x0, Operand(x1, LSL, 4)); 693 __ And(w3, w0, Operand(w1, LSL, 4)); 694 __ And(x4, x0, Operand(x1, LSL, 4)) [all...] |
/external/v8/src/ic/arm64/ |
ic-arm64.cc | 201 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); 292 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); 298 __ Lsl(scratch1, scratch1, kPointerSizeLog2); 330 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); 522 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1)); 555 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2)); 570 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); 579 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); [all...] |
/external/vixl/src/vixl/a64/ |
macro-assembler-a64.h | [all...] |
/external/vixl/test/ |
test-assembler-a64.cc | 290 __ Mvn(w2, Operand(w0, LSL, 1)); 291 __ Mvn(x3, Operand(x1, LSL, 2)); 463 __ Mov(w13, Operand(w11, LSL, 1)); 464 __ Mov(x14, Operand(x12, LSL, 2)); 522 __ Orr(w3, w0, Operand(w1, LSL, 28)); 523 __ Orr(x4, x0, Operand(x1, LSL, 32)); 616 __ Orn(w3, w0, Operand(w1, LSL, 4)); 617 __ Orn(x4, x0, Operand(x1, LSL, 4)); 683 __ And(w3, w0, Operand(w1, LSL, 4)); 684 __ And(x4, x0, Operand(x1, LSL, 4)) [all...] |