Home | History | Annotate | Download | only in newlib_tests
      1 /*
      2  * Copyright (c) 2017 Richard Palethorpe <rpalethorpe (at) suse.com>
      3  *
      4  * This program is free software: you can redistribute it and/or modify
      5  * it under the terms of the GNU General Public License as published by
      6  * the Free Software Foundation, either version 2 of the License, or
      7  * (at your option) any later version.
      8  *
      9  * This program is distributed in the hope that it will be useful,
     10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12  * GNU General Public License for more details.
     13  *
     14  * You should have received a copy of the GNU General Public License
     15  * along with this program. If not, see <http://www.gnu.org/licenses/>.
     16  */
     17 
     18 /*
     19  * A basic regression test for tst_atomic_{load,store}. Also provides a
     20  * limited check that atomic stores and loads order non-atomic memory
     21  * accesses. That is, we are checking that they implement memory fences or
     22  * barriers.
     23  *
     24  * Many architectures/machines will still pass the test even if you remove the
     25  * atomic functions. X86 in particular has strong memory ordering by default
     26  * so that should always pass (if you use volatile). However Aarch64
     27  * (Raspberry Pi 3 Model B) has been observed to fail without the atomic
     28  * functions.
     29  *
     30  * A failure can occur if an update to seq_n is not made globally visible by
     31  * the time the next thread needs to use it.
     32  */
     33 
     34 #include <stdint.h>
     35 #include <pthread.h>
     36 #include "tst_test.h"
     37 #include "tst_atomic.h"
     38 
     39 #define THREADS 64
     40 #define FILLER (1 << 20)
     41 
     42 /* Uncomment these to see what happens without atomics. To prevent the compiler
     43  * from removing/reording atomic and seq_n, mark them as volatile.
     44  */
     45 /* #define tst_atomic_load(v) (*(v)) */
     46 /* #define tst_atomic_store(i, v) *(v) = (i) */
     47 
     48 struct block {
     49 	int seq_n;
     50 	intptr_t id;
     51 	intptr_t filler[FILLER];
     52 };
     53 
     54 static int atomic;
     55 /* Instead of storing seq_n on the stack (probably next to the atomic variable
     56  * above), we store it in the middle of some anonymous mapped memory and keep
     57  * a pointer to it. This should decrease the probability that the value of
     58  * seq_n will be synchronised between processors as a byproduct of the atomic
     59  * variable being updated.
     60  */
     61 static int *seq_n;
     62 static struct block *m;
     63 
     64 static void *worker_load_store(void *aid)
     65 {
     66 	int id = (intptr_t)aid, i;
     67 
     68 	for (i = tst_atomic_load(&atomic);
     69 	     i != id;
     70 	     i = tst_atomic_load(&atomic))
     71 		;
     72 
     73 	(m + (*seq_n))->id = id;
     74 	*seq_n += 1;
     75 	tst_atomic_store(i + 1, &atomic);
     76 
     77 	return NULL;
     78 }
     79 
     80 /* Attempt to stress the memory transport so that memory operations are
     81  * contended and less predictable. This should increase the likelyhood of a
     82  * failure if a memory fence is missing.
     83  */
     84 static void *mem_spam(void *vp LTP_ATTRIBUTE_UNUSED)
     85 {
     86 	intptr_t i = 0, j;
     87 	struct block *cur = m;
     88 
     89 	tst_res(TINFO, "Memory spammer started");
     90 	while (tst_atomic_load(&atomic) > 0) {
     91 		for (j = 0; j < FILLER; j++)
     92 			cur->filler[j] = j;
     93 
     94 		if (i < THREADS - 1) {
     95 			cur = m + (++i);
     96 		} else {
     97 			i = 0;
     98 			cur = m;
     99 		}
    100 	}
    101 
    102 	return NULL;
    103 }
    104 
    105 static void do_test(void)
    106 {
    107 	intptr_t i, id;
    108 	pthread_t threads[THREADS + 1];
    109 
    110 	atomic = 0;
    111 	m = SAFE_MMAP(NULL, sizeof(*m) * THREADS,
    112 		      PROT_READ | PROT_WRITE,
    113 		      MAP_PRIVATE | MAP_ANONYMOUS,
    114 		      -1, 0);
    115 	seq_n = &((m + THREADS / 2)->seq_n);
    116 
    117 	pthread_create(&threads[THREADS], NULL, mem_spam, NULL);
    118 	for (i = THREADS - 1; i >= 0; i--)
    119 		pthread_create(&threads[i], NULL, worker_load_store, (void *)i);
    120 
    121 	for (i = 0; i < THREADS; i++) {
    122 		tst_res(TINFO, "Joining thread %li", i);
    123 		pthread_join(threads[i], NULL);
    124 	}
    125 	tst_atomic_store(-1, &atomic);
    126 	pthread_join(threads[THREADS], NULL);
    127 
    128 	tst_res(TINFO, "Expected\tFound");
    129 	for (i = 0; i < THREADS; i++) {
    130 		id = (m + i)->id;
    131 		if (id != i)
    132 			tst_res(TFAIL, "%d\t\t%d", (int)i, (int)id);
    133 		else
    134 			tst_res(TPASS, "%d\t\t%d", (int)i, (int)id);
    135 	}
    136 
    137 	SAFE_MUNMAP(m, sizeof(*m) * THREADS);
    138 }
    139 
    140 static struct tst_test test = {
    141 	.test_all = do_test,
    142 };
    143