1 /* -*- mode: C; c-basic-offset: 3; -*- */ 2 /* 3 This file is part of drd, a thread error detector. 4 5 Copyright (C) 2006-2010 Bart Van Assche <bvanassche (at) acm.org>. 6 7 This program is free software; you can redistribute it and/or 8 modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 02111-1307, USA. 21 22 The GNU General Public License is contained in the file COPYING. 23 */ 24 25 26 #ifndef __DRD_THREAD_BITMAP_H 27 #define __DRD_THREAD_BITMAP_H 28 29 30 #include "drd_bitmap.h" 31 #include "drd_thread.h" /* running_thread_get_segment() */ 32 #include "pub_drd_bitmap.h" 33 34 35 static __inline__ 36 Bool bm_access_load_1_triggers_conflict(const Addr a1) 37 { 38 DRD_(bm_access_load_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1); 39 return DRD_(bm_load_1_has_conflict_with)(DRD_(thread_get_conflict_set)(), 40 a1); 41 } 42 43 static __inline__ 44 Bool bm_access_load_2_triggers_conflict(const Addr a1) 45 { 46 if ((a1 & 1) == 0) 47 { 48 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2); 49 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), 50 a1, 2); 51 } 52 else 53 { 54 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 55 a1, a1 + 2, eLoad); 56 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 57 a1, a1 + 2, eLoad); 58 } 59 } 60 61 static __inline__ 62 Bool bm_access_load_4_triggers_conflict(const Addr a1) 63 { 64 if ((a1 & 3) == 0) 65 { 66 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4); 67 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), 68 a1, 4); 69 } 70 else 71 { 72 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 73 a1, a1 + 4, eLoad); 74 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 75 a1, a1 + 4, eLoad); 76 } 77 } 78 79 static __inline__ 80 Bool bm_access_load_8_triggers_conflict(const Addr a1) 81 { 82 if ((a1 & 7) == 0) 83 { 84 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8); 85 return bm_aligned_load_has_conflict_with(DRD_(thread_get_conflict_set)(), 86 a1, 8); 87 } 88 else if ((a1 & 3) == 0) 89 { 90 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 0, 4); 91 bm_access_aligned_load(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1 + 4, 4); 92 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 93 a1, a1 + 8, eLoad); 94 } 95 else 96 { 97 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 98 a1, a1 + 8, eLoad); 99 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 100 a1, a1 + 8, eLoad); 101 } 102 } 103 104 static __inline__ 105 Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2) 106 { 107 DRD_(bm_access_range_load)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2); 108 return DRD_(bm_load_has_conflict_with)(DRD_(thread_get_conflict_set)(), 109 a1, a2); 110 } 111 112 static __inline__ 113 Bool bm_access_store_1_triggers_conflict(const Addr a1) 114 { 115 DRD_(bm_access_store_1)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1); 116 return DRD_(bm_store_1_has_conflict_with)(DRD_(thread_get_conflict_set)(), 117 a1); 118 } 119 120 static __inline__ 121 Bool bm_access_store_2_triggers_conflict(const Addr a1) 122 { 123 if ((a1 & 1) == 0) 124 { 125 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 2); 126 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), 127 a1, 2); 128 } 129 else 130 { 131 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 132 a1, a1 + 2, eStore); 133 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 134 a1, a1 + 2, eStore); 135 } 136 } 137 138 static __inline__ 139 Bool bm_access_store_4_triggers_conflict(const Addr a1) 140 { 141 if ((a1 & 3) == 0) 142 { 143 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 4); 144 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), 145 a1, 4); 146 } 147 else 148 { 149 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 150 a1, a1 + 4, eStore); 151 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 152 a1, a1 + 4, eStore); 153 } 154 } 155 156 static __inline__ 157 Bool bm_access_store_8_triggers_conflict(const Addr a1) 158 { 159 if ((a1 & 7) == 0) 160 { 161 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, 8); 162 return bm_aligned_store_has_conflict_with(DRD_(thread_get_conflict_set)(), 163 a1, 8); 164 } 165 else if ((a1 & 3) == 0) 166 { 167 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 168 a1 + 0, 4); 169 bm_access_aligned_store(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 170 a1 + 4, 4); 171 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 172 a1, a1 + 8, eStore); 173 } 174 else 175 { 176 DRD_(bm_access_range)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), 177 a1, a1 + 8, eStore); 178 return DRD_(bm_has_conflict_with)(DRD_(thread_get_conflict_set)(), 179 a1, a1 + 8, eStore); 180 } 181 } 182 183 static __inline__ 184 Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2) 185 { 186 DRD_(bm_access_range_store)(DRD_(sg_bm)(DRD_(running_thread_get_segment)()), a1, a2); 187 return DRD_(bm_store_has_conflict_with)(DRD_(thread_get_conflict_set)(), 188 a1, a2); 189 } 190 191 #endif // __DRD_THREAD_BITMAP_H 192