21 #this example will not run if the one-sided operations are simply
22 #implemented on top of MPI_Isends and Irecvs
35 lockall_dt 4 timeLimit=240
36 lockall_dt_flush 4 timeLimit=240
37 lockall_dt_flushall 4 timeLimit=240
38 lockall_dt_flushlocal 4 timeLimit=240
39 lockall_dt_flushlocalall 4 timeLimit=240
40 lock_contention_dt 4 timeLimit=240
53 #fetchandadd_tree_am 7
55 test1_dt 2 timeLimit=30
59 #Needs MPI_Win_call_errhandler
71 #needs MPI_Type_create_subarray
72 #strided_acc_subarray 2
74 strided_putget_indexed 4
75 #strided_putget_indexed_shared 4 mpiversion=3.0
76 strided_getacc_indexed 4
77 #strided_getacc_indexed_shared 4 mpiversion=3.0
83 #win_flavors 4 mpiversion=3.0
84 #win_flavors 3 mpiversion=3.0
85 manyrma2 2 timeLimit=500
87 #win_shared 4 mpiversion=3.0
88 #win_shared_create_allocshm 4 mpiversion=3.0
89 #win_shared_create_no_allocshm 4 mpiversion=3.0
90 #win_shared_noncontig 4 mpiversion=3.0
91 #win_shared_noncontig_put 4 mpiversion=3.0
92 #win_zero 4 mpiversion=3.0
95 #issues with concurrent updates..
96 #linked_list 4 mpiversion=3.0
97 #linked_list_fop 4 mpiversion=3.0
100 #fetch_and_op_char 4 mpiversion=3.0
101 #fetch_and_op_short 4 mpiversion=3.0
102 #fetch_and_op_int 4 mpiversion=3.0
103 #fetch_and_op_long 4 mpiversion=3.0
104 #fetch_and_op_double 4 mpiversion=3.0
105 #fetch_and_op_long_double 4 mpiversion=3.0
106 #get_accumulate_double 4 mpiversion=3.0
107 #get_accumulate_double_derived 4 mpiversion=3.0
109 #get_accumulate_int_derived 4 mpiversion=3.0
110 #get_accumulate_long 4 mpiversion=3.0
111 #get_accumulate_long_derived 4 mpiversion=3.0
112 #get_accumulate_short 4 mpiversion=3.0
113 #get_accumulate_short_derived 4 mpiversion=3.0
117 rput_local_comp 2 mpiversion=3.0
118 racc_local_comp 2 mpiversion=3.0
120 #issues with concurrent updates..
121 #linked_list_lockall 4 mpiversion=3.0
123 linked_list_bench_lock_all 4
124 linked_list_bench_lock_excl 4 mpiversion=3.0
125 #linked_list_bench_lock_shr 4 mpiversion=3.0
126 #linked_list_bench_lock_shr_nocheck 4 mpiversion=3.0
127 #mutex_bench_shm 4 mpiversion=3.0
128 #mutex_bench_shm_ordered 4 mpiversion=3.0
129 rma-contig 2 timeLimit=720
132 #fence_shm 2 mpiversion=3.0
133 #mutex_bench 4 mpiversion=3.0
134 #mutex_bench_shared 4 mpiversion=3.0
135 win_shared_zerobyte 4 mpiversion=3.0
136 win_shared_put_flush_get 4 mpiversion=3.0
142 #atomic_get 3 mpiversion=3.0 timeLimit=300
143 #aint 2 mpiversion=3.1
146 #derived-acc-flush_local 3 mpiversion=3.0
147 #large-acc-flush_local 3 mpiversion=3.0
149 #win_shared_put_flush_load 3 mpiversion=3.0
150 #win_shared_acc_flush_load 3 mpiversion=3.0
151 #win_shared_gacc_flush_load 3 mpiversion=3.0
152 #win_shared_fop_flush_load 3 mpiversion=3.0
153 #win_shared_cas_flush_load 3 mpiversion=3.0
154 #put_flush_get 3 mpiversion=3.0
155 #acc_flush_get 3 mpiversion=3.0
156 #gacc_flush_get 3 mpiversion=3.0
157 #fop_flush_get 3 mpiversion=3.0
158 #cas_flush_get 3 mpiversion=3.0
159 #We still have an issue here, unlock should finish R* calls, but this causes issues.
168 ## This test is not strictly correct. This was meant to test out the
169 ## case when MPI_Test is not nonblocking. However, we ended up
170 ## assuming that MPI_Win_lock will be nonblocking. That is not
171 ## specified by the standard and might not be true. Commenting this
172 ## out till be find a better way to test the original problem with
174 # nb_test 2 mpiversion=3.0 xfail=ticket1910