- have group::rank check for parent process in case it is called by a subprocess.
- add SMPI_thread_create, which is rather empty, as there is apparently no need for suer_data anymore (?).
XBT_PUBLIC void SMPI_app_instance_register(const char* name, xbt_main_func_t code, int num_processes);
XBT_PUBLIC void SMPI_init();
XBT_PUBLIC void SMPI_finalize();
XBT_PUBLIC void SMPI_app_instance_register(const char* name, xbt_main_func_t code, int num_processes);
XBT_PUBLIC void SMPI_init();
XBT_PUBLIC void SMPI_finalize();
+XBT_PUBLIC void SMPI_thread_create();
if(smpi_init_sleep > 0)
simgrid::s4u::this_actor::sleep_for(smpi_init_sleep);
}
if(smpi_init_sleep > 0)
simgrid::s4u::this_actor::sleep_for(smpi_init_sleep);
}
+
+void SMPI_thread_create() {
+ TRACE_smpi_init(simgrid::s4u::this_actor::get_pid());
+}
int Group::rank(s4u::Actor* actor)
{
auto iterator = actor_to_rank_map_.find(actor);
int Group::rank(s4u::Actor* actor)
{
auto iterator = actor_to_rank_map_.find(actor);
+ //I'm not in the communicator ... but maybe my parent is ?
+ if (iterator == actor_to_rank_map_.end())
+ iterator = actor_to_rank_map_.find(s4u::Actor::by_pid(actor->get_ppid()).get());
return (iterator == actor_to_rank_map_.end()) ? MPI_UNDEFINED : (*iterator).second;
}
return (iterator == actor_to_rank_map_.end()) ? MPI_UNDEFINED : (*iterator).second;
}
struct threadwrap* t = (struct threadwrap*)sg_actor_self_data();
XBT_INFO("new thread has parameter rank %d and global variable rank %d", ((struct param*)(t->param))->rank,
the_global_rank);
struct threadwrap* t = (struct threadwrap*)sg_actor_self_data();
XBT_INFO("new thread has parameter rank %d and global variable rank %d", ((struct param*)(t->param))->rank,
the_global_rank);
- sg_actor_self_data_set(t->father_data);
- sg_actor_self_data_set(NULL);
free(t);
}
static void mpi_thread_create(const char* name, void* (*f)(void*), void* param)
{
struct threadwrap* threadwrap = (struct threadwrap*)malloc(sizeof(*threadwrap));
free(t);
}
static void mpi_thread_create(const char* name, void* (*f)(void*), void* param)
{
struct threadwrap* threadwrap = (struct threadwrap*)malloc(sizeof(*threadwrap));
- threadwrap->father_data = sg_actor_self_data();
threadwrap->f = f;
threadwrap->param = param;
sg_actor_t actor = sg_actor_init(name, sg_host_self());
threadwrap->f = f;
threadwrap->param = param;
sg_actor_t actor = sg_actor_init(name, sg_host_self());
struct param* param = (struct param*)bar;
int rank;
MPI_Status status;
struct param* param = (struct param*)bar;
int rank;
MPI_Status status;
+ char err_string[1024];
+ int length = 1024;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
XBT_INFO("%d has MPI rank %d and global variable rank %d", param->rank, rank, global_rank);
XBT_INFO("%d waiting request", rank);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
XBT_INFO("%d has MPI rank %d and global variable rank %d", param->rank, rank, global_rank);
XBT_INFO("%d waiting request", rank);
- MPI_Wait(param->req, &status);
- XBT_INFO("%d request done", rank);
+ int ret = MPI_Wait(param->req, &status);
+ MPI_Error_string(ret, err_string, &length);
+ XBT_INFO("%d request done, return %s", rank, err_string);
XBT_INFO("%d still has MPI rank %d and global variable %d", param->rank, rank, global_rank);
free(param);
return NULL;
XBT_INFO("%d still has MPI rank %d and global variable %d", param->rank, rank, global_rank);
free(param);
return NULL;
> [Tremblay:0:(1) 0.000000] [smpi_test/INFO] I'm 0/2
> [Jupiter:1:(2) 0.000000] [smpi_test/INFO] I'm 1/2
> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] new thread has parameter rank 0 and global variable rank 0
> [Tremblay:0:(1) 0.000000] [smpi_test/INFO] I'm 0/2
> [Jupiter:1:(2) 0.000000] [smpi_test/INFO] I'm 1/2
> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] new thread has parameter rank 0 and global variable rank 0
-> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 has MPI rank -333 and global variable rank 0
-> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] -333 waiting request
-> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] -333 request done
-> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 still has MPI rank -333 and global variable 0
+> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 has MPI rank 0 and global variable rank 0
+> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 waiting request
+> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 request done, return MPI_SUCCESS
+> [Tremblay:wait send:(3) 0.000000] [smpi_test/INFO] 0 still has MPI rank 0 and global variable 0
> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] new thread has parameter rank 1 and global variable rank 1
> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] new thread has parameter rank 1 and global variable rank 1
-> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] 1 has MPI rank -333 and global variable rank 1
-> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] -333 waiting request
-> [Jupiter:wait recv:(4) 0.002945] [smpi_test/INFO] -333 request done
-> [Jupiter:wait recv:(4) 0.002945] [smpi_test/INFO] 1 still has MPI rank -333 and global variable 1
+> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] 1 has MPI rank 1 and global variable rank 1
+> [Jupiter:wait recv:(4) 0.000000] [smpi_test/INFO] 1 waiting request
+> [Jupiter:wait recv:(4) 0.002945] [smpi_test/INFO] 1 request done, return MPI_SUCCESS
+> [Jupiter:wait recv:(4) 0.002945] [smpi_test/INFO] 1 still has MPI rank 1 and global variable 1
> [Tremblay:0:(1) 1.000000] [smpi_test/INFO] finally 42
> [Jupiter:1:(2) 2.000000] [smpi_test/INFO] finally 42
> [Tremblay:0:(1) 1.000000] [smpi_test/INFO] finally 42
> [Jupiter:1:(2) 2.000000] [smpi_test/INFO] finally 42