mpi4pyExample.py

You can view and download this file on Github: mpi4pyExample.py

  1#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
  2# This is an EXUDYN example
  3#
  4# Details:  This is an example for mpi4py
  5#
  6# on linux/WSL run with:
  7#           mpiexec -n 9 python3 -m mpi4py.futures mpi4pyExample.py
  8#           n represents 8 workers and 1 for running main script
  9#           on 4core/8threads optimum reached with n=9 (1 core running on 15%, all other cores around 95%)
 10#
 11# troubleshoot: you need to install mpi4py with conda; if your code starts n times, deinstall
 12#               all mpi4py versions (also if installed with pip, remove it with python -m pip uninstall)
 13#               MAY NOT run with virtual environments (best results with conda base, Python 3.9 under linux/WSL)
 14#
 15# Author:   Johannes Gerstmayr
 16# Date:     2023-03-17
 17#
 18# Copyright:This file is part of Exudyn. Exudyn is free software. You can redistribute it and/or modify it under the terms of the Exudyn license. See 'LICENSE.txt' for more details.
 19#
 20#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 21
 22
 23import exudyn as exu
 24from exudyn.utilities import *
 25from exudyn.processing import *
 26import time
 27
 28import numpy as np
 29import sys
 30
 31useMPI = True #True requires mpi4py to be installed
 32
 33
 34
 35
 36#function, which creates and runs model; executed in parallel!
 37def TestExudyn(parameterDict):
 38
 39    #create an environment for mini example
 40    SC = exu.SystemContainer()
 41    mbs = SC.AddSystem()
 42
 43    x=1
 44    y=1000
 45    computationIndex = 0
 46    x = parameterDict['mass']
 47    y = parameterDict['stiffness']
 48
 49    oGround=mbs.AddObject(ObjectGround(referencePosition= [0,0,0]))
 50    nGround = mbs.AddNode(NodePointGround(referenceCoordinates=[0,0,0]))
 51
 52    node = mbs.AddNode(Node1D(referenceCoordinates = [0],
 53                              initialCoordinates=[(x-0.5)**2],
 54                              initialVelocities=[(y-0.2)**2]))
 55    mass = mbs.AddObject(Mass1D(nodeNumber = node, physicsMass=1))
 56
 57    #assemble and solve system for default parameters
 58    mbs.Assemble()
 59    #exu.SolveDynamic(mbs, exu.SimulationSettings())
 60
 61    h=1e-3
 62    tEnd = 100 #nominal: 10
 63    #tEnd = 1000
 64    simulationSettings = exu.SimulationSettings()
 65    simulationSettings.timeIntegration.numberOfSteps = int(tEnd/h)
 66    simulationSettings.timeIntegration.endTime = tEnd
 67    simulationSettings.solutionSettings.writeSolutionToFile = False #no concurrent writing to files ...!
 68    #exu.StartRenderer() #don't do this in parallelization: will crash
 69    exu.SolveDynamic(mbs, simulationSettings)
 70    #exu.StopRenderer() #don't do this in parallelization: will crash
 71
 72    #check result, get current mass position at local position [0,0,0]
 73    result = mbs.GetObjectOutputBody(mass, exu.OutputVariableType.Position, [0,0,0])[0]
 74    #print("result ",x, "=",result)
 75
 76    del mbs #dont forget to delete variables, otherwise memory may leak significantly
 77    del SC
 78    return result
 79    #final x-coordinate of position shall be 2
 80
 81#now run parallelized parameter variation;
 82#make sure that this only runs in main process:
 83if __name__ == '__main__':
 84    n=640
 85    start_time = time.time()
 86    print('parameter variation '+'with MPI'*useMPI)
 87    [p,v]=ParameterVariation(parameterFunction=TestExudyn,
 88                             parameters={'mass':(1.,1.,1), 'stiffness':(1000,2000,n)},
 89                             # debugMode=True,
 90                             addComputationIndex=True,
 91                             useMultiProcessing=True,
 92                             #numberOfThreads=8, #automatically determined by mpi4py routines in ParameterVariationList(...)
 93                             resultsFile='solution/resultsMPI.txt',
 94                             useMPI = useMPI,
 95                             )
 96    print("--- %s seconds ---" % (time.time() - start_time))
 97    #print("values=",v)
 98    print('sum=',np.array(v).sum()) #gives sum= 14931163024.24202 with default values
 99
100
101# old, manual implementation of parameter variation with mpi
102# if useMPI:
103#     import mpi4py
104#     from mpi4py import MPI
105#
106#     comm = MPI.COMM_WORLD
107#     nprocs = comm.Get_size()
108#     rank   = comm.Get_rank()
109#     print('rank=', rank, ', size=', nprocs)
110#
111#     from mpi4py.futures import MPIPoolExecutor
112#
113#
114# if __name__ == '__main__' and useMPI:
115#     #MPI.Init()      # manual initialization of the MPI environment
116#     print('mpi4py test program\n')
117#     x=[]
118#     y=np.arange(1,10)
119#     #executor = MPIPoolExecutor(max_workers=8)
120#     executor = MPIPoolExecutor()
121#     #for result in executor.map(fmpi, [1,2,3,4]):
122#     for i in range(n):
123#         x+=[{'mass':1,
124#              'stiffness':1000+1000*i/(n-1),
125#              'computationIndex':i}]
126#     #print('x=',x)
127#     v=[]
128#     if False:
129#         start_time = time.time()
130#         for result in executor.map(TestExudyn, x):
131#             v.append(result)
132#         print("--- %s seconds ---" % (time.time() - start_time))
133#     else:
134#         nVariations=n
135#         import tqdm #progress bar
136#         try: #_instances only available after first run!
137#             tqdm.tqdm._instances.clear() #if open instances of tqdm, which leads to nasty newline
138#         except:
139#             pass
140#         useTQDM = True
141
142#         start_time = time.time()
143#         #for v in (tqdm.tqdm(p.imap(parameterFunction, vInput), total=nVariations)):
144#         for result in (tqdm.tqdm(executor.map(TestExudyn, x), total=nVariations)):
145#             v.append(result)
146#         print("--- %s seconds ---" % (time.time() - start_time))
147
148#     #print('rank=',rank)
149#     print('sum=',np.array(v).sum())
150#     #MPI.Finalize()  # manual finalization of the MPI environment