# forward or adjoint simulation SIMULATION_TYPE = 1 # set to 1 for forward simulations, 2 for adjoint simulations for sources, and 3 for kernel simulations NOISE_TOMOGRAPHY = 0 # flag of noise tomography, three steps (1,2,3). If earthquake simulation, set it to 0. SAVE_FORWARD = .false. # save last frame of forward simulation or not
# number of chunks (1,2,3 or 6) NCHUNKS = 6
# angular width of the first chunk (not used if full sphere with six chunks) ANGULAR_WIDTH_XI_IN_DEGREES = 15.d0 # angular size of a chunk ANGULAR_WIDTH_ETA_IN_DEGREES = 15.d0 CENTER_LATITUDE_IN_DEGREES = 4.5d0 CENTER_LONGITUDE_IN_DEGREES = 136.d0 GAMMA_ROTATION_AZIMUTH = 0.d0
# number of elements at the surface along the two sides of the first chunk # (must be multiple of 16 and 8 * multiple of NPROC below) NEX_XI = 32 NEX_ETA = 32
# number of MPI processors along the two sides of the first chunk NPROC_XI = 2 NPROC_ETA = 2
#----------------------------------------------------------- # # Model # #-----------------------------------------------------------
# 1D models with real structure: # 1D_isotropic_prem, 1D_transversely_isotropic_prem, 1D_iasp91, 1D_1066a, 1D_ak135f_no_mud, 1D_ref, 1D_ref_iso, 1D_jp3d,1D_sea99 # # 1D models with only one fictitious averaged crustal layer: # 1D_isotropic_prem_onecrust, 1D_transversely_isotropic_prem_onecrust, 1D_iasp91_onecrust, 1D_1066a_onecrust, 1D_ak135f_no_mud_onecrust # # fully 3D models: # transversely_isotropic_prem_plus_3D_crust_2.0, 3D_anisotropic, 3D_attenuation, # s20rts, s40rts, s362ani, s362iso, s362wmani, s362ani_prem, s362ani_3DQ, s362iso_3DQ, # s29ea, sea99_jp3d1994, sea99, jp3d1994, heterogen, full_sh, sgloberani_aniso, sgloberani_iso # # 3D crustal models: # crust1.0, crust2.0, EPcrust, EuCRUST, crustmaps, crustSH # # Mars models: # 1D_Sohl, 1D_Sohl_3D_crust, 1D_case65TAY, 1D_case65TAY_3D_crust, mars_1D, mars_1D_3D_crust # # Moon models: # vpremoon # # 3D models with 3D crust: append "_**crustname**" to the mantle model name # to take a 3D crustal model (by default crust2.0 is taken for 3D mantle models) # e.g. s20rts_crust1.0, s362ani_crustmaps, full_sh_crustSH, sglobe_EPcrust, etc. # # 3D models with 1D crust: append "_1Dcrust" to the 3D model name # to take the 1D crustal model from the # associated reference model rather than the default 3D crustal model # e.g. s20rts_1Dcrust, s362ani_1Dcrust, etc. # #MODEL = 1D_Sohl_3D_crust #MODEL = mars_1D #MODEL = 1D_case65TAY_3d_crust MODEL = 1D_isotropic_prem # parameters describing the Earth model OCEANS = .false. ELLIPTICITY = .false. TOPOGRAPHY = .false. GRAVITY = .true. ROTATION = .true. ATTENUATION = .true.
# record length in minutes #RECORD_LENGTH_IN_MINUTES = 2.5d0 RECORD_LENGTH_IN_MINUTES = 10.d0
## regional mesh cut-off # using this flag will cut-off the mesh in the mantle at a layer matching to the given cut-off depth. # this flag only has an effect for regional simulations, i.e., for NCHUNKS values less than 6. # regional mesh cutoff REGIONAL_MESH_CUTOFF = .false.
# regional mesh cut-off depth (in km) # possible selections are: 24.4d0, 80.d0, 220.d0, 400.d0, 600.d0, 670.d0, 771.d0 # regional msh cutoff depth REGIONAL_MESH_CUTOFF_DEPTH = 400.d0
# regional mesh cut-off w/ a second doubling layer below 220km interface # (by default, a first doubling layer will be added below the Moho, and a second one below the 771km-depth layer. # Setting this flag to .true., will move the second one below the 220km-depth layer for regional mesh cut-offs only.) # regional mesh add 2nd doubling REGIONAL_MESH_ADD_2ND_DOUBLING = .false.
# absorbing boundary conditions for a regional simulation ABSORBING_CONDITIONS = .false.
# run global simulation for a circular region and apply high attenuation for the rest of the model # this creates an absorbing boundary with less reflection than stacey at a cost of 6x the computational cost # NCHUNKS must be set to 6 to enable this flag # absorb using global sponge ABSORB_USING_GLOBAL_SPONGE = .false.
# location and size of the region with no sponge (the region to run simulation) SPONGE_LATITUDE_IN_DEGREES = 40.d0 SPONGE_LONGITUDE_IN_DEGREES = 25.d0 SPONGE_RADIUS_IN_DEGREES = 25.d0
# to undo attenuation for sensitivity kernel calculations or forward runs with SAVE_FORWARD # use one (and only one) of the two flags below. UNDO_ATTENUATION is much better (it is exact) # but requires a significant amount of disk space for temporary storage. # partial phys dispersion only # undo attenuation PARTIAL_PHYS_DISPERSION_ONLY = .false. UNDO_ATTENUATION = .true.
## undo attenuation memory # How much memory (in GB) is installed on your machine per CPU core # (only used for UNDO_ATTENUATION, can be ignored otherwise) # Beware, this value MUST be given per core, i.e. per MPI thread, i.e. per MPI rank, NOT per node. # This value is for instance: # - 4 GB on Tiger at Princeton # - 4 GB on TGCC Curie in Paris # - 4 GB on Titan at ORNL when using CPUs only (no GPUs); start your run with "aprun -n$NPROC -N8 -S4 -j1" # - 2 GB on the machine used by Christina Morency # - 2 GB on the TACC machine used by Min Chen # - 1.5 GB on the GPU cluster in Marseille # When running on GPU machines, it is simpler to set PERCENT_OF_MEM_TO_USE_PER_CORE = 100.d0 # and then set MEMORY_INSTALLED_PER_CORE_IN_GB to the amount of memory that you estimate is free (rather than installed) # on the host of the GPU card while running your GPU job. # For GPU runs on Titan at ORNL, use PERCENT_OF_MEM_TO_USE_PER_CORE = 100.d0 and MEMORY_INSTALLED_PER_CORE_IN_GB = 25.d0 # and run your job with "aprun -n$NPROC -N1 -S1 -j1" # (each host has 32 GB on Titan, each GPU has 6 GB, thus even if all the GPU arrays are duplicated on the host # this leaves 32 - 6 = 26 GB free on the host; leaving 1 GB for the Linux system, we can safely use 100% of 25 GB) MEMORY_INSTALLED_PER_CORE_IN_GB = 4.d0 # What percentage of this total do you allow us to use for arrays to undo attenuation, keeping in mind that you # need to leave some memory available for the GNU/Linux system to run # (a typical value is 85%; any value below is fine but the code will then save a lot of data to disk; # values above, say 90% or 92%, can be OK on some systems but can make the adjoint code run out of memory # on other systems, depending on how much memory per node the GNU/Linux system needs for itself; thus you can try # a higher value and if the adjoint crashes then try again with a lower value) # percent of mem to use per core PERCENT_OF_MEM_TO_USE_PER_CORE = 85.d0
## exact mass matrices for rotation # three mass matrices instead of one are needed to handle rotation very accurately; # otherwise rotation is handled slightly less accurately (but still reasonably well); # set to .true. if you are interested in precise effects related to rotation; # set to .false. if you are solving very large inverse problems at high frequency and also undoing attenuation exactly # # using the UNDO_ATTENUATION flag above, in which case saving as much memory as possible can be a good idea. # You can also safely set it to .false. if you are not in a period range in which rotation matters, # e.g. if you are targetting very short-period body waves. if in doubt, set to .true. # # You can safeely set it to .true. if you have ABSORBING_CONDITIONS above, because in that case the code # will use three mass matrices anyway and thus there is no additional memory cost. # this flag is of course unused if ROTATION above is set to .false. # exact mass matrix for rotation EXACT_MASS_MATRIX_FOR_ROTATION = .false.
#----------------------------------------------------------- # # LDDRK time scheme # #-----------------------------------------------------------
# this for LDDRK high-order time scheme instead of Newmark # use lddrk USE_LDDRK = .false.
# the maximum CFL of LDDRK is significantly higher than that of the Newmark scheme, # in a ratio that is theoretically 1.327 / 0.697 = 1.15 / 0.604 = 1.903 for a solid with Poisson's ratio = 0.25 # and for a fluid (see the manual of the 2D code, SPECFEM2D, Tables 4.1 and 4.2, and that ratio does not # depend on whether we are in 2D or in 3D). However in practice a ratio of about 1.5 to 1.7 is often safer # (for instance for models with a large range of Poisson's ratio values). # Since the code computes the time step using the Newmark scheme, for LDDRK we will simply # multiply that time step by this ratio when LDDRK is on and when flag INCREASE_CFL_FOR_LDDRK is true. # increase cfl for lddrk INCREASE_CFL_FOR_LDDRK = .true. RATIO_BY_WHICH_TO_INCREASE_IT = 1.5d0
# save AVS or OpenDX movies #MOVIE_COARSE saves movie only at corners of elements (SURFACE OR VOLUME) 仅在元素的角落保存电影 #MOVIE_COARSE does not work with create_movie_AVS_DX MOVIE_SURFACE = .false. MOVIE_VOLUME = .false. MOVIE_COARSE = .true. NTSTEP_BETWEEN_FRAMES = 50 HDUR_MOVIE = 0.d0
# save movie in volume. Will save element if center of element is in prescribed volume # top/bottom: depth in KM, use MOVIE_TOP = -100 to make sure the surface is stored. # west/east: longitude, degrees East [-180/180] top/bottom: latitute, degrees North [-90/90] # start/stop: frames will be stored at MOVIE_START + i*NSTEP_BETWEEN_FRAMES, where i=(0,1,2..) and iNSTEP_BETWEEN_FRAMES <= MOVIE_STOP # movie_volume_type: 1=strain, 2=time integral of strain, 3=\mu*time integral of strain # type 4 saves the trace and deviatoric stress in the whole volume, 5=displacement, 6=velocity MOVIE_VOLUME_TYPE = 2 MOVIE_TOP_KM = -100.0 MOVIE_BOTTOM_KM = 1000.0 MOVIE_WEST_DEG = -90.0 MOVIE_EAST_DEG = 90.0 MOVIE_NORTH_DEG = 90.0 MOVIE_SOUTH_DEG = -90.0 MOVIE_START = 0 MOVIE_STOP = 40000
# save mesh files to check the mesh SAVE_MESH_FILES = .true.
# restart files (number of runs can be 1 or higher, choose 1 for no restart files) NUMBER_OF_RUNS = 1 NUMBER_OF_THIS_RUN = 1
# path to store the local database files on each node LOCAL_PATH = ./DATABASES_MPI # temporary wavefield/kernel/movie files LOCAL_TMP_PATH = ./DATABASES_MPI
# interval at which we output time step info and max of norm of displacement NTSTEP_BETWEEN_OUTPUT_INFO = 500
# use a (tilted) FORCESOLUTION force point source (or several) instead of a CMTSOLUTION moment-tensor source. # This can be useful e.g. for asteroid simulations # in which the source is a vertical force, normal force, tilted force, impact etc. # If this flag is turned on, the FORCESOLUTION file must be edited by giving: # - the corresponding time-shift parameter, # - the half duration parameter of the source, # - the coordinates of the source, # - the source time function of the source, # - the magnitude of the force source, # - the components of a (non necessarily unitary) direction vector for the force source in the E/N/Z_UP basis. # The direction vector is made unitary internally in the code and thus only its direction matters here; # its norm is ignored and the norm of the force used is the factor force source times the source time function. # use force point source USE_FORCE_POINT_SOURCE = .false.
# use monochromatic source time function for CMTSOLUTION moment-tensor source. # half duration is interpreted as a PERIOD just to avoid changing CMTSOLUTION file format # default is .false. which uses Heaviside function # use monochromatic cmt source USE_MONOCHROMATIC_CMT_SOURCE = .false.
# print source time function # print source time function PRINT_SOURCE_TIME_FUNCTION = .false.
# interval in time steps for temporary writing of seismograms # ntstep between output seismos NTSTEP_BETWEEN_OUTPUT_SEISMOS = 5000000
# set to n to reduce the sampling rate of output seismograms by a factor of n # defaults to 1, which means no down-sampling # ntstep between output sample NTSTEP_BETWEEN_OUTPUT_SAMPLE = 1
# option to save strain seismograms # this option is useful for strain Green's tensor # this feature is currently under development # save seismograms strain SAVE_SEISMOGRAMS_STRAIN = .false.
# save seismograms also when running the adjoint runs for an inverse problem # (usually they are unused and not very meaningful, leave this off in almost all cases) # save seismograms in adjoint run SAVE_SEISMOGRAMS_IN_ADJOINT_RUN = .false.
# output format for the seismograms (one can use either or all of the three formats) OUTPUT_SEISMOS_ASCII_TEXT = .true. OUTPUT_SEISMOS_SAC_ALPHANUM = .false. OUTPUT_SEISMOS_SAC_BINARY = .false. OUTPUT_SEISMOS_ASDF = .false. OUTPUT_SEISMOS_3D_ARRAY = .false.
# rotate seismograms to Radial-Transverse-Z or use default North-East-Z reference frame # rotate seismograms rt ROTATE_SEISMOGRAMS_RT = .false.
# decide if main process writes all the seismograms or if all processes do it in parallel # write seismograms by main WRITE_SEISMOGRAMS_BY_MAIN = .false.
# save all seismograms in one large combined file instead of one file per seismogram # to avoid overloading shared non-local file systems such as LUSTRE or GPFS for instance # save all seismos in one file # use binary for large file SAVE_ALL_SEISMOS_IN_ONE_FILE = .false. USE_BINARY_FOR_LARGE_FILE = .false.
# flag to impose receivers at the surface or allow them to be buried # receivers can be buried RECEIVERS_CAN_BE_BURIED = .true.
# interval in time steps for reading adjoint traces # 0 = read the whole adjoint sources at start time NTSTEP_BETWEEN_READ_ADJSRC = 1000000
# use ASDF format for reading the adjoint sources READ_ADJSRC_ASDF = .false.
# this parameter must be set to .true. to compute anisotropic kernels # in crust and mantle (related to the 21 Cij in geographical coordinates) # default is .false. to compute isotropic kernels (related to alpha and beta) ANISOTROPIC_KL = .false.
# output only transverse isotropic kernels (alpha_v,alpha_h,beta_v,beta_h,eta,rho) # rather than fully anisotropic kernels when ANISOTROPIC_KL above is set to .true. # means to save radial anisotropic kernels, i.e., sensitivity kernels for beta_v, beta_h, etc. SAVE_TRANSVERSE_KL_ONLY = .false.
# output only the kernels used for the current azimuthally anisotropic inversions of surface waves, # i.e., bulk_c, bulk_betav, bulk_betah, eta, Gc_prime, Gs_prime and rho # (Gc' & Gs' which are the normalized Gc & Gs kernels by isotropic \rho\beta of the 1D reference model) SAVE_AZIMUTHAL_ANISO_KL_ONLY = .false.
# output approximate Hessian in crust mantle region. # means to save the preconditioning for gradients, they are cross correlations between forward and adjoint accelerations. APPROXIMATE_HESS_KL = .false.
# forces transverse isotropy for all mantle elements # (default is to use transverse isotropy only between crust and 220) # means we allow radial anisotropy throughout the whole crust/mantle region USE_FULL_TISO_MANTLE = .false.
# output kernel mask to zero out source region # to remove large values near the sources in the sensitivity kernels SAVE_SOURCE_MASK = .false.
# output kernels on a regular grid instead of on the GLL mesh points (a bit expensive) SAVE_REGULAR_KL = .false.
# compute steady state kernels for source encoding STEADY_STATE_KERNEL = .false. STEADY_STATE_LENGTH_IN_MINUTES = 0.d0
# Dimitri Komatitsch, July 2014, CNRS Marseille, France: # added the ability to run several calculations (several earthquakes) # in an embarrassingly-parallel fashion from within the same run; # this can be useful when using a very large supercomputer to compute # many earthquakes in a catalog, in which case it can be better from # a batch job submission point of view to start fewer and much larger jobs, # each of them computing several earthquakes in parallel. # To turn that option on, set parameter NUMBER_OF_SIMULTANEOUS_RUNS to a value greater than 1. # To implement that, we create NUMBER_OF_SIMULTANEOUS_RUNS MPI sub-communicators, # each of them being labeled "my_local_mpi_comm_world", and we use them # in all the routines in "src/shared/parallel.f90", except in MPI_ABORT() because in that case # we need to kill the entire run. # When that option is on, of course the number of processor cores used to start # the code in the batch system must be a multiple of NUMBER_OF_SIMULTANEOUS_RUNS, # all the individual runs must use the same number of processor cores, # which as usual is NPROC in the Par_file, # and thus the total number of processor cores to request from the batch system # should be NUMBER_OF_SIMULTANEOUS_RUNS * NPROC. # All the runs to perform must be placed in directories called run0001, run0002, run0003 and so on # (with exactly four digits). # # Imagine you have 10 independent calculations to do, each of them on 100 cores; you have three options: # # 1/ submit 10 jobs to the batch system # # 2/ submit a single job on 1000 cores to the batch, and in that script create a sub-array of jobs to start 10 jobs, # each running on 100 cores (see e.g. http://www.schedmd.com/slurmdocs/job_array.html ) # # 3/ submit a single job on 1000 cores to the batch, start SPECFEM3D on 1000 cores, create 10 sub-communicators, # cd into one of 10 subdirectories (called e.g. run0001, run0002,... run0010) depending on the sub-communicator # your MPI rank belongs to, and run normally on 100 cores using that sub-communicator. # # The option below implements 3/. # NUMBER_OF_SIMULTANEOUS_RUNS = 1
# if we perform simultaneous runs in parallel, if only the source and receivers vary between these runs # but not the mesh nor the model (velocity and density) then we can also read the mesh and model files # from a single run in the beginning and broadcast them to all the others; for a large number of simultaneous # runs for instance when solving inverse problems iteratively this can DRASTICALLY reduce I/Os to disk in the solver # (by a factor equal to NUMBER_OF_SIMULTANEOUS_RUNS), and reducing I/Os is crucial in the case of huge runs. # Thus, always set this option to .true. if the mesh and the model are the same for all simultaneous runs. # In that case there is no need to duplicate the mesh and model file database (the content of the DATABASES_MPI # directories) in each of the run0001, run0002,... directories, it is sufficient to have one in run0001 # and the code will broadcast it to the others) BROADCAST_SAME_MESH_AND_MODEL = .false.
# set to true to use GPUs GPU_MODE = .false. # Only used if GPU_MODE = .true. : GPU_RUNTIME = 1 # 2 (OpenCL), 1 (Cuda) ou 0 (Compile-time -- does not work if configured with --with-cuda *AND* --with-opencl) GPU_PLATFORM = NVIDIA GPU_DEVICE = Tesla
# set to true to use the ADIOS library for I/Os ADIOS_ENABLED = .false. ADIOS_FOR_FORWARD_ARRAYS = .true. ADIOS_FOR_MPI_ARRAYS = .true. ADIOS_FOR_ARRAYS_SOLVER = .true. ADIOS_FOR_SOLVER_MESHFILES = .true. ADIOS_FOR_AVS_DX = .true. ADIOS_FOR_KERNELS = .true. ADIOS_FOR_MODELS = .true. ADIOS_FOR_UNDO_ATTENUATION = .true.