Home | History | Annotate | Download | only in memcheck

Lines Matching refs:Addr

261 Int find_chunk_for_OLD ( Addr       ptr, 
267 Addr a_lo, a_hi;
272 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
284 Int find_chunk_for ( Addr ptr,
288 Addr a_mid_lo, a_mid_hi;
522 lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
611 static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
629 Addr scan;
630 Addr scan_max;
641 scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
643 if (scan_max > seg->end - sizeof(Addr))
644 scan_max = seg->end - sizeof(Addr);
645 for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
646 Addr pot_fn = *((Addr *)scan);
658 pot_fn = *((Addr *)pot_fn);
684 static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
746 Addr first_addr;
747 Addr inner_addr;
749 // Avoid the call to is_vtable_addr when the addr is not
757 inner_addr = *((Addr*)ptr);
759 && inner_addr >= (Addr)VKI_PAGE_SIZE
761 first_addr = *((Addr*)ch->data);
763 && first_addr >= (Addr)VKI_PAGE_SIZE
780 lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
838 lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
847 lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
889 lc_push_if_a_chunk_ptr(Addr ptr,
900 static volatile Addr bad_scanned_addr;
903 void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
906 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
908 bad_scanned_addr = addr;
943 lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
945 Addr searched, SizeT szB)
948 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
949 end portions of the block if they are not aligned on sizeof(Addr):
957 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
958 const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
981 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
989 between VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
1015 // On other platforms, just skip one Addr.
1016 lc_sig_skipped_szB += sizeof(Addr);
1017 tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
1018 tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
1019 ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
1023 Addr addr;
1035 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1042 lc_scanned_szB += sizeof(Addr);
1044 addr = *(Addr *)ptr;
1048 if (addr >= searched && addr < searched + szB) {
1049 if (addr == searched) {
1057 ptr, (long unsigned) addr - searched, searched);
1059 if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
1062 if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
1065 ch->data, addr, pp_heuristic(h));
1077 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
1082 ptr += sizeof(Addr);
1581 static void scan_memory_root_set(Addr searched, SizeT szB)
1585 Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );
1645 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
1646 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
1710 Addr start1 = ch1->data;
1711 Addr start2 = ch2->data;
1712 Addr end1 = ch1->data + ch1->szB - 1;
1713 Addr end2 = ch2->data + ch2->szB - 1;
1837 static Addr searched_wpa;
1840 search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
1856 void MC_(who_points_at) ( Addr address, SizeT szB)