# coding: utf-8 # In[17]: #writing ensight Gold binary (comaptible with vtk 8) #parallelization borowed here https://gist.github.com/fspaolo/51eaf5a20d6d418bd4d0 import numpy as np #needs install import os import pandas #needs install import sys from mpi4py import MPI #needs install from multiprocessing import Queue WORKTAG = 1 DIETAG = 0 comm = MPI.COMM_WORLD my_rank = comm.Get_rank() my_name = MPI.Get_processor_name() # In[9]: #------------------------------------------------- # # Parse arguments # #------------------------------------------------- inputfolder = None project_name = None outputfolder = None #If true - read mpio, if false, read alyabin MPIO = None try: if my_rank==0: import argparse parser = argparse.ArgumentParser() parser.add_argument("task_name", help='Name of the alya task') parser.add_argument("input_folder", help='Folder with input alyabins') parser.add_argument("output_folder", help='Folder for the output ensight case') parser.add_argument("--format", help='Format of the data to expect: mpio, alyabin, auto(default)', default = 'auto') args = parser.parse_args() inputfolder = args.input_folder project_name = args.task_name outputfolder = args.output_folder if args.format == 'alyabin': MPIO = False elif args.format == 'mpio': MPIO = True elif args.format == 'auto': #check what kind of files are present filename_mpio = os.path.join(inputfolder,project_name+'-LNODS.post.mpio.bin'); filename_alyabin = os.path.join(inputfolder,project_name+'-LNODS.post.alyabin'); mpio_file_present = os.path.isfile(filename_mpio) alyabin_file_present = os.path.isfile(filename_alyabin) assert mpio_file_present!=alyabin_file_present, "Found both alyabin and mpio files. Specify format to use in the --format argument" if mpio_file_present: MPIO = True else: MPIO = False else: assert False, 'Unsupported format: '+str(args.format) #check if input path exists import pathlib path = pathlib.Path(inputfolder) assert path.exists(), input_folder + ' does not exist' #create output folders #path = pathlib.Path(outputfolder) #if not path.exists: # path.mkdir() if not os.path.exists(outputfolder): os.mkdir(outputfolder) print('-------------------------------------------------------'); print('Alya task: '+str(project_name)); print('Input path: '+str(inputfolder)); print('Output path: '+str(outputfolder)); if MPIO: print('Format: MPIO'); else: print('Format: ALYABIN'); print('-------------------------------------------------------'); sys.stdout.flush() finally: inputfolder = comm.bcast(inputfolder, root=0) project_name = comm.bcast(project_name, root=0) outputfolder = comm.bcast(outputfolder, root=0) MPIO = comm.bcast(MPIO, root=0) if MPIO: file_suffix = '.post.mpio.bin' else: file_suffix = '.post.alyabin' #------------------------------------------------- # # Important variable to adjust if needed # #------------------------------------------------- ensight_id_type = np.int32 ensight_float_type = np.float32 iterationid_number_of_digits = 6 #how many digits to use for the iteration id in variable files #------------------------------------------------- # # Important variable to adjust if needed # #------------------------------------------------- # # Functions # In[10]: def identify_alya_id_type(project_name): #read he header where element ids are stored and see if it's int8 or int4 filename = os.path.join(inputfolder,project_name+ '-LNODS'+str(file_suffix)); with open(filename, 'rb') as f: header = read_header(f) if '32' in header['DataType']: alya_id_type = np.int32 elif '64' in header['DataType']: alya_id_type = np.int64 else: assert False, 'Alya id type '+header[6]+' is not supported' return alya_id_type def read_one_fp90_record(file_object, number_of_elements, datatype): #fortran stores length with every block, in the beginning and the end count_read = 0 record = [] while count_read0).all(), "Some points in the mesh do not have a correspondence in the parittions" pt_ids = None #free memeory # LEINV = read_alya_array(os.path.join(inputfolder,f'{project_name}-LEINV.post.alyabin'), \ # number_of_blocks, alya_id_type) # inverse_el_correspondence = (LEINV['tuples']-1).ravel(); #convert ids to python LTYPE = read_alya_array(os.path.join(inputfolder,project_name + '-LTYPE'+file_suffix), \ number_of_blocks) element_types = LTYPE['tuples'].ravel() inverse_pt_correspondence = comm.bcast(inverse_pt_correspondence, root=0) element_types = comm.bcast(element_types, root=0) #inverse_el_correspondence = comm.bcast(inverse_el_correspondence, root=0) # # Unknown stuff # In[67]: #god knows what are these #LNINV = read_alya_array(os.path.join(inputfolder,f'{project_name}-LNINV.post.alyabin'), \ # number_of_blocks, alya_id_type) #LELCH = read_alya_array(os.path.join(inputfolder,f'{project_name}-LELCH.post.alyabin'), \ # number_of_blocks, alya_id_type) #LEINV = read_alya_array(os.path.join(inputfolder,f'{project_name}-LEINV.post.alyabin'), \ # number_of_blocks, alya_id_type) # # Write geometry and variables # In[7]: #blocks are mesh partitions # # # #for index, row in variable_info.iterrows(): # info = write_variable_pernode(row.field, row.iteration) # variable_info.loc[index, 'time_real'] = info['time_real'] # variable_info.loc[index, 'time_int']= info['time_int'] # variable_info.loc[index, 'variabletype'] = info['variable_type'] # variable_info.loc[index, 'association'] = info['variable_association'] # # # MPI stuff # In[ ]: class Work(object): def __init__(self, variable_info, number_of_blocks): #files are tuples of filename_input, filetype #filenames are the q = Queue() q.put({'filetype':'geometry','number_of_blocks':number_of_blocks}) for index, row in variable_info.iterrows(): q.put({'filetype':'variable','name':row.field, \ 'iteration':row.iteration,'table_index':index,\ 'number_of_blocks':number_of_blocks, \ 'table_index':index}) self.work = q def get_next(self): if not self.work.qsize()>0: return None return self.work.get() def get_size(self): return self.work.qsize() # In[ ]: def do_work(work): #print(f'Node: Using Alya id type {alya_id_type}') info = {} print(work) if work['filetype'] == 'geometry': write_geometry(work['number_of_blocks']) elif work['filetype'] == 'variable': if 'MATER' in work['name']: info = write_variable_percell(work['name'], work['iteration'], work['number_of_blocks']) else: info = write_variable_pernode(work['name'], work['iteration'], work['number_of_blocks']) info['table_index'] = work['table_index']; else: assert False, 'Unsupported file type '+str(work["filetype"]) info['filetype'] = work['filetype'] return info def process_result(result): if result['filetype'] == 'variable': index = result['table_index'] variable_info.loc[index, 'time_real'] = result['time_real'] variable_info.loc[index, 'time_int']= result['time_int'] variable_info.loc[index, 'variabletype'] = result['variable_type'] variable_info.loc[index, 'association'] = result['variable_association'] return # In[ ]: def master(comm): status = MPI.Status() # generate work queue print('Variables') print(variable_info) print('number_of_blocks') print(number_of_blocks) wq = Work(variable_info, number_of_blocks) num_procs = min(wq.get_size(), comm.Get_size()) print('Number of processes: '+str(num_procs)) sys.stdout.flush() # Seed the slaves, send one unit of work to each slave (rank) sent_jobs = 0 for rank in range(1, num_procs): work = wq.get_next() comm.send(work, dest=rank, tag=WORKTAG) sent_jobs = sent_jobs+1 print('Submitted the first batch') sys.stdout.flush() # Loop over getting new work requests until there is no more work to be done while True: print('Jobs in the queue: '+str(wq.get_size())) sys.stdout.flush() work = wq.get_next() if not work: break # Receive results from a slave result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) sent_jobs = sent_jobs-1 process_result(result) # Send the slave a new work unit comm.send(work, dest=status.Get_source(), tag=WORKTAG) sent_jobs = sent_jobs+1 # No more work to be done, receive all outstanding results from slaves for rank in range(sent_jobs): #print(f'Receiving {rank}') result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) #print(f'Received {rank}') process_result(result) print('Shutting down') # Tell all the slaves to exit by sending an empty message with DIETAG for rank in range(1, comm.Get_size()): print('Shutting down '+str(rank)) comm.send(0, dest=rank, tag=DIETAG) # In[ ]: def slave(comm): my_rank = comm.Get_rank() status = MPI.Status() print('running processor: '+str(my_rank)) while True: # Receive a message from the master #print(f'Proc {my_rank}: receiving') work = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) #print(f'Proc {my_rank}: received') #print(f'Process {my_rank}, kill signal: {status.Get_tag()==DIETAG}') # Check the tag of the received message if status.Get_tag() == DIETAG: break # Do the work result = do_work(work) # Send the result back comm.send(result, dest=0, tag=0) #print(f'Proc {my_rank}: sent') # In[ ]: comm.Barrier() if my_rank == 0: master(comm) else: slave(comm) # # Write ensight case file # In[ ]: comm.Barrier() if my_rank == 0: print(variable_info) print('Saving case') sys.stdout.flush() case_file = project_name+ '.ensi.case' with open(os.path.join(outputfolder, case_file), 'w') as f: f.write('# Converted from Alya\n') f.write('# Ensight Gold Format\n') f.write('#\n') f.write('# Problem name: '+str(project_name)+'\n') f.write('FORMAT\n') f.write('type: ensight gold\n') f.write('\n') f.write('GEOMETRY\n') f.write('model: 1 '+str(project_name)+'.ensi.geo\n') f.write('\n') f.write('VARIABLE\n') variables = variable_info.field.unique(); #define dataframe for each variable #define timeline for each variable base on the number of timesteps variable_info_per_variable = [] max_timeline_id = 1 timelines = {}; #each timeline will be identified only by the number of elements c = 0 for varname in variables: df = variable_info[variable_info.field==varname] ntimesteps = df.shape[0] if ntimesteps in timelines: timeline_id = timelines[ntimesteps]['timeline'] else: timeline_id = max_timeline_id timelines[ntimesteps] = {'timeline':timeline_id, 'array_loc':c} max_timeline_id = max_timeline_id+1 variable_info_per_variable = variable_info_per_variable + \ [{'varname':varname, \ 'data':variable_info[variable_info.field==varname].sort_values(by='iteration'),\ 'timeline':timeline_id}] c=c+1 for var_data in variable_info_per_variable: varname = var_data['varname'] timeline_id = var_data['timeline'] print('Varname '+str(varname)+'\n') df = var_data['data'].iloc[0] #get one record with this varibale line = str(df.variabletype)+' per '+str(df.association)+': '+str(timeline_id)+'\t'+str(varname)+'\t'+str(project_name)+'.ensi.'+str(varname)+'-'+'*'*iterationid_number_of_digits+'\n' f.write(line) #to make sure the whole array gets printed #otherwise numpy converts to string a summary, e.g. (1,2,3,...,5,6,7) np.set_printoptions(threshold=np.inf) f.write('\n') f.write('TIME\n') for timeset in timelines: var_data = variable_info_per_variable[ timelines[timeset]['array_loc'] ] f.write('time set: '+str(var_data["timeline"])+'\n') number_of_timesteps = var_data['data'].shape[0] f.write('number of steps: '+str(number_of_timesteps)+'\n') f.write('filename numbers: \n') f.write(str(var_data['data'].time_int.values)[1:-1]+'\n') f.write('time values:\n') f.write(str(var_data['data'].time_real.values)[1:-1]+'\n') # In[33]: