From 56eeca46ab0cefb8b83500ea70e2eb3c0a4d278b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franck=20P=C3=A9rignon?= <franck.perignon@imag.fr> Date: Thu, 22 Sep 2016 14:23:33 +0200 Subject: [PATCH] Fix/update install process. Try to fix my local mess... --- CMakeLists.txt | 64 ++++++- cmake/HySoPInstallSetup.cmake | 1 - cmake/PythonInstallSetup.cmake | 6 +- cmake/fortran_utils.cmake | 2 +- docs/config/hysop.doxyfile.in | 113 +++++-------- docs/sphinx/users_guide/stretching.rst | 6 + hysop/constants.py | 20 ++- hysop/fields/tests/test_variable.py | 9 +- hysop/fields/variable_parameter.py | 7 +- hysop/methods_keys.py | 52 +++--- hysop/mpi/topology.py | 60 +++++++ hysop/numerics/interpolation.py | 159 +++++++++++++++++- hysop/numerics/odesolvers.py | 2 +- hysop/numerics/remeshing.py | 1 + .../numerics/tests/test_finite_differences.py | 9 - hysop/numerics/tests/test_remesh.py | 27 --- hysop/numerics/utils.py | 6 + .../{absorption_BC.py => absorption_bc.py} | 0 hysop/operator/discrete/absorption_BC.py | 132 --------------- hysop/operator/discrete/absorption_bc.py | 159 ++++++++++++++++++ .../discrete/multiresolution_filter.py | 106 +++++++----- hysop/operator/discrete/scales_advection.py | 61 ++++--- hysop/operator/discrete/stretching.py | 1 + hysop/problem/problem_with_GLRendering.py | 5 +- setup.py.in | 2 +- 25 files changed, 654 insertions(+), 356 deletions(-) mode change 100755 => 100644 hysop/constants.py rename hysop/operator/{absorption_BC.py => absorption_bc.py} (100%) delete mode 100755 hysop/operator/discrete/absorption_BC.py create mode 100755 hysop/operator/discrete/absorption_bc.py diff --git a/CMakeLists.txt b/CMakeLists.txt index d295a9c9b..f9ca15fbf 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,7 +49,7 @@ option(WITH_GOOGLE_TESTS "Enable google tests (c++). Default = OFF." OFF) option(FORTRAN_LAYOUT "Choose default data layout ('fortran', column-major or 'C' order, row-major) for arrays. Default = column-major." ON) option(WITH_DOCUMENTATION "Build Documentation. Default = OFF" OFF) option(ENABLE_LONG_TESTS "Enable tests that may run for long time with important memory print. Default = OFF." OFF) - +option(DEV_MODE "Enable devel mode (aggressive checking of warnings ..). Default = ON." ON) # Set python install mode: # - user --> behave as 'python setup.py install --user' # - standard --> install in python site-package (ie behave as python setup.py install) @@ -276,6 +276,46 @@ include(HySoPInstallSetup) # Remark : this must be done before add_subdir below, since install process in src needs CMAKE_INSTALL_PREFIX # to be properly set. +# # ============= Extra setup ============= +# set precision for real numbers +# used to generate precision.f95 file/module and constant.py +if(DOUBLEPREC) + set(WORKING_PRECISION DP) + set(MPI_PRECISION MPI_DOUBLE_PRECISION) + set(PYTHON_PREC np.float64) + set(MPI_PYTHON_PREC MPI.DOUBLE) +else() + set(WORKING_PRECISION SP) + set(MPI_PRECISION MPI_FLOAT) + set(PYTHON_PREC np.float32) + set(MPI_PYTHON_PREC MPI.FLOAT) +endif() + +# set data layout ('fortran' order or 'C' order) +if(FORTRAN_LAYOUT) + set(DATA_LAYOUT 'F') + set(MPI_DATA_LAYOUT MPI.ORDER_F) +else() + set(DATA_LAYOUT 'C') + set(MPI_DATA_LAYOUT MPI.ORDER_C) +endif() +if(EXISTS ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/constants.py.in) + message(STATUS "Generate constant.py file ...") + configure_file(${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/constants.py.in + ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/constants.py) +endif() +if(EXISTS ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/.f2py_f2cmap) + message(STATUS "Generate f2py map file ...") + configure_file(${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/.f2py_f2cmap + ${CMAKE_BINARY_DIR}/.f2py_f2cmap) +endif() +if(EXISTS ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/f2hysop.pyf.in) + message(STATUS "Generate f2hysop.pyf (f2py main signature file) ...") + configure_file(${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/f2hysop.pyf.in + ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/f2hysop.pyf) +endif() + + if(USE_FORTRAN) if(EXISTS ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/.f2py_f2cmap) message(STATUS "Generate f2py map file ...") @@ -304,8 +344,14 @@ if(USE_FORTRAN) list(APPEND HYSOP_LINK_LIBRARIES ${MPI_Fortran_LIBRARIES} ) endif(USE_MPI) - #set(Fortran_FLAGS ${CMAKE_Fortran_FLAGS}) - #append_flags(Fortran_FLAGS ${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}) + set(Fortran_FLAGS ${CMAKE_Fortran_FLAGS}) + append_flags(Fortran_FLAGS ${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}) + + if(EXISTS ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/precision.conf.in) + message(STATUS "Generate precision.f95 file ...") + configure_file(${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/precision.conf.in + ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/utils_f/precision.f95) + endif() endif() if(WITH_COMPILED_LIB) @@ -317,7 +363,10 @@ endif() if(WITH_LIB_CXX) #C++ variables used by setup.py.in for swig - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wall -Wextra -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-parameter ${FFTW_COMPILE_FLAGS} -fPIC -std=c++11") + if(DEV_MODE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wall -Wextra -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-parameter") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FFTW_COMPILE_FLAGS} -fPIC -std=c++11") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") @@ -519,6 +568,11 @@ if(VERBOSE_MODE) message(STATUS " Project uses GPU : ${WITH_GPU}") message(STATUS " ${PROJECT_NAME} debug mode : ${DEBUG}") message(STATUS " Enable -OO run? : ${OPTIM}") + if(DOUBLEPREC) + message(STATUS " Real numbers precision : double.") + else() + message(STATUS " Real numbers precision : simple.") + endif() message(STATUS "====================== ======= ======================") message(STATUS " ") message(STATUS "Try :") @@ -530,5 +584,5 @@ if(VERBOSE_MODE) message(STATUS " 'make uninstall' to clean install directory. Dry-run (make -n uninstall) is advisable to check what will really be deleted.") message(STATUS "\n\n/!\\ Warning /!\\ : depending on your python environment configuration, you may need to set PYTHONPATH.") message("Try to run python -c 'import hysop'. If it fails, add ${HYSOP_PYTHON_INSTALL_DIR} to PYTHONPATH environment variable.") - message("Example : \n export PYTHONPATH=${HYSOP_PYTHON_INSTALL_DIR}:\${PYTHONPATH}\n") + message("Example : \n export PYTHONPATH=${HYSOP_PYTHON_INSTALL_DIR}/:\${PYTHONPATH}\n") endif() diff --git a/cmake/HySoPInstallSetup.cmake b/cmake/HySoPInstallSetup.cmake index 537bea581..a21e4a94f 100755 --- a/cmake/HySoPInstallSetup.cmake +++ b/cmake/HySoPInstallSetup.cmake @@ -67,7 +67,6 @@ else() COMMENT "build/install hysop package") add_custom_target(uninstall echo >> ${CMAKE_CURRENT_BINARY_DIR}/install_manifest.txt - #COMMAND cat ${CMAKE_CURRENT_BINARY_DIR}/python_install_manifest.txt >> ${CMAKE_CURRENT_BINARY_DIR}/install_manifest.txt COMMAND pip uninstall ${PACKAGE_NAME} COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) endif() diff --git a/cmake/PythonInstallSetup.cmake b/cmake/PythonInstallSetup.cmake index 313f0b88f..c7b1ec86c 100755 --- a/cmake/PythonInstallSetup.cmake +++ b/cmake/PythonInstallSetup.cmake @@ -78,9 +78,9 @@ function(set_python_install_path) # "import site; print(site.getsitepackages()[0])") # --> this does not work properly: the order in resulting # list depends on the OS, the python version ... - configure_file(CMake/fake/setup.py tmp/setup.py) - configure_file(CMake/fake/__init__.py tmp/fake/__init__.py) - configure_file(CMake/find_python_install.py tmp/find_python_install.py) + configure_file(cmake/fake/setup.py tmp/setup.py) + configure_file(cmake/fake/__init__.py tmp/fake/__init__.py) + configure_file(cmake/find_python_install.py tmp/find_python_install.py) execute_process( COMMAND ${PYTHON_EXECUTABLE} find_python_install.py WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/tmp/ diff --git a/cmake/fortran_utils.cmake b/cmake/fortran_utils.cmake index 79c56db9b..8f5679445 100644 --- a/cmake/fortran_utils.cmake +++ b/cmake/fortran_utils.cmake @@ -15,7 +15,7 @@ # --> create f2hysop.pyf that will be used to generate hysop.f2hysop module. # function(write_main_pyf_file filename) - set(_file ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/${filename}.pyf) + set(_file ${CMAKE_SOURCE_DIR}/${PACKAGE_NAME}/${filename}.pyf.in) file(WRITE ${_file} "! -*- f90 -*-\n ! Generated file - Do not edit.\n diff --git a/docs/config/hysop.doxyfile.in b/docs/config/hysop.doxyfile.in index f38f4ce8f..db08da3ed 100644 --- a/docs/config/hysop.doxyfile.in +++ b/docs/config/hysop.doxyfile.in @@ -51,7 +51,7 @@ PROJECT_BRIEF = "Particle Methods simulation on hybrid architectures" # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. -PROJECT_LOGO = +PROJECT_LOGO = @CMAKE_CURRENT_SOURCE_DIR@/sphinx/figures/logo_hysop_nb.png # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is @@ -152,7 +152,7 @@ FULL_PATH_NAMES = NO # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. -STRIP_FROM_PATH = +STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which @@ -250,13 +250,13 @@ OPTIMIZE_OUTPUT_FOR_C = NO # qualified scopes will look different, etc. # The default value is: NO. -OPTIMIZE_OUTPUT_JAVA = YES +OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. -OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_FOR_FORTRAN = YES # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. @@ -291,7 +291,7 @@ EXTENSION_MAPPING = # case of backward compatibilities issues. # The default value is: YES. -MARKDOWN_SUPPORT = NO +MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can @@ -428,7 +428,7 @@ EXTRACT_PRIVATE = NO # scope will be included in the documentation. # The default value is: NO. -EXTRACT_PACKAGE = YES +EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. @@ -450,7 +450,7 @@ EXTRACT_LOCAL_CLASSES = YES # included. # The default value is: NO. -EXTRACT_LOCAL_METHODS = YES +EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called @@ -489,7 +489,7 @@ HIDE_FRIEND_COMPOUNDS = NO # blocks will be appended to the function's detailed documentation block. # The default value is: NO. -HIDE_IN_BODY_DOCS = NO +HIDE_IN_BODY_DOCS = YES # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation @@ -551,7 +551,7 @@ INLINE_INFO = YES # name. If set to NO, the members will appear in declaration order. # The default value is: YES. -SORT_MEMBER_DOCS = NO +SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member @@ -559,7 +559,7 @@ SORT_MEMBER_DOCS = NO # this will also influence the order of the classes in the class list. # The default value is: NO. -SORT_BRIEF_DOCS = NO +SORT_BRIEF_DOCS = YES # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and @@ -588,7 +588,7 @@ SORT_GROUP_NAMES = NO # list. # The default value is: NO. -SORT_BY_SCOPE_NAME = YES +SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between @@ -647,7 +647,7 @@ MAX_INITIALIZER_LINES = 29 # list will mention the files that were used to generate the documentation. # The default value is: YES. -SHOW_USED_FILES = YES +SHOW_USED_FILES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View @@ -661,7 +661,7 @@ SHOW_FILES = YES # Folder Tree View (if specified). # The default value is: YES. -SHOW_NAMESPACES = NO +SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from @@ -705,7 +705,7 @@ CITE_BIB_FILES = # messages are off. # The default value is: NO. -QUIET = NO +QUIET = @DOXY_QUIET@ # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES @@ -714,14 +714,14 @@ QUIET = NO # Tip: Turn warnings on while writing the documentation. # The default value is: YES. -WARNINGS = YES +WARNINGS = @DOXY_WARNINGS@ # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. -WARN_IF_UNDOCUMENTED = YES +WARN_IF_UNDOCUMENTED = @DOXY_WARNINGS@ # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters @@ -729,7 +729,7 @@ WARN_IF_UNDOCUMENTED = YES # markup commands wrongly. # The default value is: YES. -WARN_IF_DOC_ERROR = YES +WARN_IF_DOC_ERROR = @DOXY_WARNINGS@ # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return @@ -737,13 +737,7 @@ WARN_IF_DOC_ERROR = YES # parameter documentation, but not about the absence of documentation. # The default value is: NO. -WARN_NO_PARAMDOC = YES - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO +WARN_NO_PARAMDOC = @DOXY_WARNINGS@ # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which @@ -771,9 +765,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = @CMAKE_SOURCE_DIR@/hysop \ - @CMAKE_SOURCE_DIR@/DoxyConf/mainpage.doxygen \ - @CMAKE_SOURCE_DIR@/src/fftw +INPUT =@DOXYGEN_INPUTS@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -799,9 +791,11 @@ INPUT_ENCODING = UTF-8 # *.vhd, *.vhdl, *.ucf, *.qsf, *.as and *.js. FILE_PATTERNS = *.doxygen \ - *.py \ *.cl \ - *.f90 + *.f90 \ + *.f95 \ + *.F90 \ + *.F95 # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. @@ -816,14 +810,14 @@ RECURSIVE = YES # Note that relative paths are relative to the directory from which doxygen is # run. -EXCLUDE = +EXCLUDE = @EXCLUDE_DIRS@ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. -EXCLUDE_SYMLINKS = NO +EXCLUDE_SYMLINKS = YES # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude @@ -832,8 +826,7 @@ EXCLUDE_SYMLINKS = NO # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* -EXCLUDE_PATTERNS = */.svn/* \ - */tests/* +EXCLUDE_PATTERNS = */tests/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the @@ -857,7 +850,7 @@ EXAMPLE_PATH = # *.h) to filter out the source-files in the directories. If left blank all # files are included. -EXAMPLE_PATTERNS = * +EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands @@ -886,10 +879,6 @@ IMAGE_PATH = # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. INPUT_FILTER = @@ -899,12 +888,8 @@ INPUT_FILTER = # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. -FILTER_PATTERNS = *.py=doxypy.py +FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for @@ -939,7 +924,7 @@ USE_MDFILE_AS_MAINPAGE = # also VERBATIM_HEADERS is set to NO. # The default value is: NO. -SOURCE_BROWSER = NO +SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. @@ -958,13 +943,13 @@ STRIP_CODE_COMMENTS = YES # function all documented functions referencing it will be listed. # The default value is: NO. -REFERENCED_BY_RELATION = NO +REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. -REFERENCES_RELATION = NO +REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and @@ -1047,7 +1032,7 @@ IGNORE_PREFIX = # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. -GENERATE_HTML = YES +GENERATE_HTML = @GENERATE_HTML@ # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of @@ -1082,7 +1067,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = @CMAKE_SOURCE_DIR@/docs/doxygen_layout/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1092,7 +1077,7 @@ HTML_HEADER = # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_FOOTER = +HTML_FOOTER = @CMAKE_SOURCE_DIR@/docs/doxygen_layout/footer.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of @@ -1138,7 +1123,7 @@ HTML_EXTRA_FILES = # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE_HUE = 115 +HTML_COLORSTYLE_HUE = 192 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A @@ -1146,7 +1131,7 @@ HTML_COLORSTYLE_HUE = 115 # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE_SAT = 115 +HTML_COLORSTYLE_SAT = 30 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 @@ -1157,7 +1142,7 @@ HTML_COLORSTYLE_SAT = 115 # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE_GAMMA = 124 +HTML_COLORSTYLE_GAMMA = 67 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this @@ -1174,7 +1159,7 @@ HTML_TIMESTAMP = YES # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_DYNAMIC_SECTIONS = NO +HTML_DYNAMIC_SECTIONS = @GENERATE_HTML@ # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand @@ -1481,7 +1466,7 @@ MATHJAX_FORMAT = HTML-CSS # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. -MATHJAX_RELPATH = http://www.mathjax.org/mathjax +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example @@ -1517,7 +1502,7 @@ MATHJAX_CODEFILE = # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. -SEARCHENGINE = YES +SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. There @@ -1625,7 +1610,7 @@ MAKEINDEX_CMD_NAME = makeindex # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. -COMPACT_LATEX = YES +COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used by the # printer. @@ -1634,7 +1619,7 @@ COMPACT_LATEX = YES # The default value is: a4. # This tag requires that the tag GENERATE_LATEX is set to YES. -PAPER_TYPE = a4 +PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names # that should be included in the LaTeX output. The package can be specified just @@ -1646,7 +1631,7 @@ PAPER_TYPE = a4 # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. -EXTRA_PACKAGES = +EXTRA_PACKAGES = amsmath # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first @@ -1745,14 +1730,6 @@ LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain -# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_TIMESTAMP = NO - #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- @@ -1871,7 +1848,7 @@ MAN_LINKS = NO # captures the structure of the code including all documentation. # The default value is: NO. -GENERATE_XML = NO +GENERATE_XML = @GENERATE_XML@ # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of @@ -2068,7 +2045,7 @@ TAGFILES = # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. -GENERATE_TAGFILE = +GENERATE_TAGFILE = @CMAKE_BINARY_DIR@/docs/build/tags/@PROJECT_NAME@.tag # If the ALLEXTERNALS tag is set to YES, all external class will be listed in # the class index. If set to NO, only the inherited external classes will be diff --git a/docs/sphinx/users_guide/stretching.rst b/docs/sphinx/users_guide/stretching.rst index 6fa98012b..79d00d7f0 100644 --- a/docs/sphinx/users_guide/stretching.rst +++ b/docs/sphinx/users_guide/stretching.rst @@ -28,6 +28,8 @@ The default is 'conservative'. Usage ----- + +TO BE REVIEWED .. code:: m_guv = {Formulation:'GradUW', TimeIntegrator='RK3', SpaceDiscretisation='FDC4'} @@ -36,6 +38,10 @@ Usage stretch_conservative = Stretching(velocity=v, vorticity=w, discretisation=some_topo, method=m_cons) + stretch = Conservative(velocity=v, vorticity=w, discretisation=some_topo, + method={Formulation:'GradUW', TimeIntegrator='RK3', SpaceDiscretisation='FDC4') + + Linearized Stretching --------------------- diff --git a/hysop/constants.py b/hysop/constants.py old mode 100755 new mode 100644 index 48e29b8bf..7013c65cf --- a/hysop/constants.py +++ b/hysop/constants.py @@ -4,11 +4,16 @@ in hysop package. Mostly deals with float/int numbers precision and some labels for numerical methods and operators. +This file is generated from constants.py.in +and it's probably a bad idea to modify it. + """ from hysop import __DEBUG__, __PROFILE__ import numpy as np import math -from hysop.mpi import MPI +__MPI_ENABLED__ = "ON" is "ON" +if __MPI_ENABLED__: + from hysop.mpi import MPI PI = math.pi @@ -27,14 +32,15 @@ HYSOP_INTEGER = np.int32 HYSOP_DIM = np.int16 """integer used for arrays dimensions""" -HYSOP_MPI_REAL = MPI.DOUBLE -"""float type used in MPI""" +if __MPI_ENABLED__: + HYSOP_MPI_REAL = MPI.DOUBLE + """float type used in MPI""" -HYSOP_MPI_INTEGER = MPI.INT -"""integer type used in MPI""" + HYSOP_MPI_INTEGER = MPI.INT + """integer type used in MPI""" -ORDERMPI = MPI.ORDER_F -"""Default array layout for MPI""" + ORDERMPI = MPI.ORDER_F + """Default array layout for MPI""" ORDER = 'F' diff --git a/hysop/fields/tests/test_variable.py b/hysop/fields/tests/test_variable.py index 8068ecd51..e4fcd09d1 100755 --- a/hysop/fields/tests/test_variable.py +++ b/hysop/fields/tests/test_variable.py @@ -8,6 +8,9 @@ import numpy as np def test_constant_var(): + """Declare and update a 'variable' holding + constant values + """ uinf = 1. var = VariableParameter(data=uinf) var.data *= 3 @@ -21,11 +24,16 @@ def test_constant_var(): def func(s): + """test function, depends on simu + """ time = s.time return np.asarray((sin(time), cos(time))) def test_time_var(): + """Set and test variable parameter defined + with a function + """ var = VariableParameter(formula=func) simu = Simulation(start=0., end=1., time_step=0.1) simu.initialize() @@ -33,4 +41,3 @@ def test_time_var(): var.update(simu) assert np.allclose(var.data, [sin(simu.time), cos(simu.time)]) simu.advance() - diff --git a/hysop/fields/variable_parameter.py b/hysop/fields/variable_parameter.py index d4f1b788c..a2131b48c 100644 --- a/hysop/fields/variable_parameter.py +++ b/hysop/fields/variable_parameter.py @@ -46,8 +46,11 @@ class VariableParameter(object): self.data = data # Formula used to compute data (a python function) if formula is None: - formula = lambda simu: self.data - self.formula = formula + def _formula(simu): + return self.data + self.formula = _formula + else: + self.formula = formula def update(self, simu=None): """Update parameter value for the current simulation diff --git a/hysop/methods_keys.py b/hysop/methods_keys.py index c00043a2d..95e6d8a43 100644 --- a/hysop/methods_keys.py +++ b/hysop/methods_keys.py @@ -1,43 +1,49 @@ -""" -@file methods_keys.py -A list of authorized keys that may be used to set methods +"""A list of authorized keys that may be used to set methods in operators. -Usage: + +Usage +------ + method = {key: value, ...} + Key must be one of the constants given below. Value is usually a class name and sometimes a string. See details in each operator. -Example, the stretching case : -method = {TimeIntegrator: RK3, Formulation: Conservative, - SpaceDiscretisation: FDC4} +Example +-------- +For the stretching case, you may use: + +.. code:: + + method = {TimeIntegrator: RK3, Formulation: Conservative, + SpaceDiscretisation: FDC4} """ -## Authorized keys for method. -## Time integrator scheme (see hysop.numerics.integrators for -## available names) +# Time integrator scheme (see hysop.numerics.integrators for +# available names) TimeIntegrator = 11111 -## Remeshing scheme (hysop.numerics.remeshing) +# Remeshing scheme (hysop.numerics.remeshing) Remesh = 22222 -## Interpolation scheme (hysop.numerics.interpolation) +# Interpolation scheme (hysop.numerics.interpolation) Interpolation = 33333 -## Formulation (example in stretching : either Conservative or GradUW) +# Formulation (example in stretching : either Conservative or GradUW) Formulation = 44444 -## Space discretisation method -## (see for example hysop.numerics.finite_differences) +# Space discretisation method +# (see for example hysop.numerics.finite_differences) SpaceDiscretisation = 55555 -## Splitting method +# Splitting method Splitting = 77777 -## Device (either GPU or CPU) +# Device (either GPU or CPU) Support = 88888 -## method[GhostUpdate] = True if the operator deals with -## its ghost points update +# method[GhostUpdate] = True if the operator deals with +# its ghost points update GhostUpdate = 99999 -## List of criterions for adaptative time step computation +# List of criterions for adaptative time step computation dtCrit = 0 -## Multiscale method for multiscale advection +# Multiscale method for multiscale advection MultiScale = 12345 -## Float precision for advection +# Float precision for advection Precision = 12346 -## Extra arguments for discrete operators +# Extra arguments for discrete operators ExtraArgs = 9876 diff --git a/hysop/mpi/topology.py b/hysop/mpi/topology.py index 0734c357e..ed8dd6685 100644 --- a/hysop/mpi/topology.py +++ b/hysop/mpi/topology.py @@ -14,6 +14,7 @@ from hysop.tools.parameters import Discretization, MPIParams import numpy as np import hysop.tools.numpywrappers as npw from hysop.tools.misc import Utils +import math class Cartesian(object): @@ -763,3 +764,62 @@ class TopoTools(object): subtypes[rk].Commit() return subtypes + + @staticmethod + def set_group_size(topo): + """Set default size for groups of lines of particles, + depending on the local resolution + + Parameters + ---------- + topo: :class:`~hysop.mpi.topology.Cartesian` + + Notes + ----- + group_size shape == (problem dim - 1, problem dim) + group_size[i, d] = size of groups in direction i when + advection is performed in direction d. + + """ + resolution = topo.mesh.resolution - topo.ghosts() + size_ref = [8, 5, 4, 2, 1] + group_size = npw.int_ones((topo.domain.dimension - 1, + topo.domain.dimension)) + # Default --> same size in each dir. + # Note that a more complex mechanism is provided in scales + # to compute group size. + for i in size_ref: + if (resolution % i == 0).all(): + group_size[...] = i + return group_size + + @staticmethod + def initialize_tag_parameters(topo, group_size): + """Initialize tag_rank and tag_size, some + parameters used to compute unique tag/ids for MPI messages. + + Parameters + ---------- + topo: :class:`~hysop.mpi.topology.Cartesian` + group_size : numpy array + size of groups of lines of particles in other directions + than the current advection dir. + + Notes + ----- + group_size shape == (problem dim - 1, problem dim) + group_size[i, d] = size of groups in direction i when + advection is performed in direction d. + """ + # tag_size = smallest power of ten to ensure tag_size > max ind_group + reduced_res = np.asarray(topo.mesh.resolution - topo.ghosts()) + n_group = npw.zeros_like(group_size) + dimension = topo.domain.dimension + for i in xrange(dimension): + ind = [j for j in xrange(len(reduced_res)) if j != i] + n_group[:, i] = reduced_res[ind] / group_size[:, i] + + tag_size = npw.asintarray(np.ceil(np.log10(n_group))) + tag_rank = max(2, math.ceil(math.log10(3 * max(topo.shape)))) + + return tag_size, tag_rank diff --git a/hysop/numerics/interpolation.py b/hysop/numerics/interpolation.py index d8099c7d5..b77f333da 100644 --- a/hysop/numerics/interpolation.py +++ b/hysop/numerics/interpolation.py @@ -257,13 +257,160 @@ class Interpolation(object): work[0][ic][ilist] = work[1][ic][ilist] return True - def __str__(self): - """Print method + def _xy(self, vin, vout): + """Interpolate a field along X and Y-axis + + Parameters + ---------- + vin : list of numpy arrays + field on the source grid + vout : list of numpy arrays + field on the target/source grid + Notes + ----- + * vout and vin must have the same resolution along + the first direction. + """ + # Permutation to prepare first interpolation + for i in xrange(3): + self._rwork[2][:, i, :] = vin[:, :, i] + # Interpolation along last dir = Y + # in : vpermut(nc_x, nc_z, nc_y) + # out : vm(nc_x, nf_y, nc_z) + finterpol.interpol_lastdir_permut(self._rwork[2], self.hsource[:2], + self._rwork[3], self.htarget[:2], + self.neighbours[:, YDIR], + self.sub_comms[YDIR]) + # Interpolation along X + # in : vm(nc_x, nf_y, nc_z) + # out : vout(nf_x, nf_y, nc_z) + finterpol.interpol_firstdir_no_com(self._rwork[3], + self.hsource[:2], + vout, self.htarget[:2]) + + def _yz(self, vin, vout): + """Interpolate a field along Y and Z-axis + + Parameters + ---------- + vin : list of numpy arrays + field on the source grid + vout : list of numpy arrays + field on the target/source grid + Notes + ----- + * vout and vin must have the same resolution along + the first direction. + """ + # Interpolation along Z + permutation between Y and Z + # in : vin(nc_x, nc_y, nc_z) + # out : vtmp(nc_x, nf_z, nc_y) + finterpol.interpol_lastdir_permut(vin, self.hsource[1:], + self._rwork[0], self.htarget[1:], + self.neighbours[:, ZDIR], + self.sub_comms[ZDIR]) + # Interpolation along Y + # (=third direction thanks to previous permutation) + # + permutation between Y and Z + # in : vtmp(nc_x, nf_z, nc_y) + # out : vout(nc_x, nf_y, nf_z) + finterpol.interpol_lastdir_permut(self._rwork[0], self.hsource[XDIR], + vout, self.htarget[XDIR], + self.neighbours[YDIR, :], + self.sub_comms[YDIR]) + + def _xz_permut(self, vin, vout): + """Interpolate a field along X and Z-axis, result order = Y,X,Z + + Parameters + ---------- + vin : list of numpy arrays + field on the source grid + vout : list of numpy arrays + field on the target grid + + Notes + ----- + * vout and vin must have the same resolution along + the first direction. + """ + # Interpolation along Z + # Interpolation along Z + # in : vin(nc_x, nc_y, nc_z) + # out : vtmp(nc_x, nc_y, nf_z) + finterpol.interpol_lastdir(vin, self.hsource[::2], + self._rwork[1], self.htarget[::2], + self.neighbours[:, ZDIR], + self.sub_comms[ZDIR]) + # Interpolation along X + # in : vtmp(nc_x, nc_y, nf_z) + # out : vout(nc_y, nf_x, nf_z) + finterpol.interpol_firstdir_permut_no_com(self._rwork[1], + self.hsource[::2], + vout, self.htarget[::2]) + + def _xy_permut(self, vin, vout): + """Interpolate a field along X and Y-axis, result order = Z,X,Y + + Parameters + ---------- + vin : list of numpy arrays + field on the source grid + vout : list of numpy arrays + field on the target grid + + Notes + ----- + * vout and vin must have the same resolution along + the first direction. + """ + # Permutation to prepare first interpolation + for i in xrange(3): + self._rwork[2][:, i, :] = vin[:, :, i] + # Interpolation along last dir = Y + # in : vpermut(nc_x, nc_z, nc_y) + # out : vm(nc_x, nc_z, nf_y) + finterpol.interpol_lastdir(self._rwork[2], self.hsource[:2], + self._rwork[3], self.htarget[:2], + self.neighbours[:, YDIR], + self.sub_comms[YDIR]) + # Interpolation along X + # in : vm(nc_x, nc_z, nf_y) + # out : vout(nc_z, nf_x, nf_y) + finterpol.interpol_firstdir_permut_no_com(self._rwork[3], + self.hsource[:2], + vout, self.htarget[:2]) + + def _2d_3d_vect(self, vin, vout): + """Interpolate each component of a vector along a transverse direction + """ + # For Vx, interpolation along Y and Z + self._yz(vin[XDIR], vtmp[XDIR]) + # For Vy, interpolation along Z (with communications) + # then along X (no communication required) + self._xz_permut(vin[YDIR], vtmp[YDIR]) + # For Vz, interpolation along Y (with communications) + # then along X (no communication required) + self._xy_permut(vin[ZDIR], vtmp[ZDIR]) + + finterpol.interpol_firstdir_no_com(vtmp[XDIR], self.hsource[XDIR], + vout[XDIR], self.htarget[XDIR]) + finterpol.interpol_firstdir(vtmp[YDIR], self.hsource[YDIR], + vout[YDIR], self.htarget[YDIR], + self.neighbours[:, YDIR], + self.sub_comms[YDIR]) + finterpol.interpol_firstdir(vtmp[ZDIR], self.hsource[ZDIR], + vout[ZDIR], self.htarget[ZDIR], + self.neighbours[:, ZDIR], + self.sub_comms[ZDIR]) + + def _3d(self, vin, vout): + """3D interpolation of a field to a targetr grid - no transpositions. """ - disp = self.__class__.__name__ + ' interpolation scheme,' - disp += ' from grid of shape ' + str(self.topo_source.mesh.resolution) - disp += ' to grid of shape ' + str(self.topo_target.mesh.resolution) - return disp + # rwork = vtmp(nf_x, nc_y, nc_z) + finterpol.interpol_firstdir_no_com(vin, self.hsource[XDIR], + self._rwork[0], self.htarget[XDIR]) + self._yz(self._rwork[0], vout) class Linear(Interpolation): diff --git a/hysop/numerics/odesolvers.py b/hysop/numerics/odesolvers.py index d54eaebc0..33ab3027f 100755 --- a/hysop/numerics/odesolvers.py +++ b/hysop/numerics/odesolvers.py @@ -2,7 +2,7 @@ .. currentmodule hysop.numerics -* :class:`~hysop.numerics.odesolvers.Euler`, +* :class:`~odesolvers.Euler`, * :class:`~odesolvers.RK2`, * :class:`~odesolvers.RK3`, * :class:`~odesolvers.RK4`, diff --git a/hysop/numerics/remeshing.py b/hysop/numerics/remeshing.py index 6fea76787..f63f52eca 100644 --- a/hysop/numerics/remeshing.py +++ b/hysop/numerics/remeshing.py @@ -406,6 +406,7 @@ class L6_5(RemeshFormula): 0, 0, 0, 0, 0, 0]) / 720., ] + class L6_6(RemeshFormula): """L6_6 kernel.""" diff --git a/hysop/numerics/tests/test_finite_differences.py b/hysop/numerics/tests/test_finite_differences.py index 584df6a81..832d72cef 100644 --- a/hysop/numerics/tests/test_finite_differences.py +++ b/hysop/numerics/tests/test_finite_differences.py @@ -188,12 +188,3 @@ def test_fd_3d_reduce_all(): def test_fd_3d_setiout(): check_fd_schemes_setiout(d3d, [f3d, ref_f3d, ref2_f3d]) - - -if __name__ == '__main__': - test_fd_2d() - test_fd_3d() - test_fd_3d_reduce_input() - test_fd_3d_reduce_output() - test_fd_3d_reduce_all() - test_fd_3d_setiout() diff --git a/hysop/numerics/tests/test_remesh.py b/hysop/numerics/tests/test_remesh.py index 8ff48ed3f..66e1031bc 100644 --- a/hysop/numerics/tests/test_remesh.py +++ b/hysop/numerics/tests/test_remesh.py @@ -194,30 +194,3 @@ def test_l21_remesh_3d_wk_velocity_null(): def test_l21_remesh_3d_wk(): """L2_1 remesh on 3d domain, external work arrays""" run_remesh(L2_1, 3, 0.3 * dx[2], True) - - -if __name__ == "__main__": - test_linear_remesh_1d() - test_linear_remesh_1d_velocity_null() - test_linear_remesh_2d() - test_linear_remesh_2d_velocity_null() - test_linear_remesh_3d() - test_linear_remesh_3d_velocity_null() - test_linear_remesh_1d_wk() - test_linear_remesh_1d_wk_velocity_null() - test_linear_remesh_2d_wk() - test_linear_remesh_2d_wk_velocity_null() - test_linear_remesh_3d_wk() - test_linear_remesh_3d_wk_velocity_null() - test_l21_remesh_1d() - test_l21_remesh_1d_velocity_null() - test_l21_remesh_2d() - test_l21_remesh_2d_velocity_null() - test_l21_remesh_3d() - test_l21_remesh_3d_velocity_null() - test_l21_remesh_1d_wk() - test_l21_remesh_1d_wk_velocity_null() - test_l21_remesh_2d_wk() - test_l21_remesh_2d_wk_velocity_null() - test_l21_remesh_3d_wk() - test_l21_remesh_3d_wk_velocity_null() diff --git a/hysop/numerics/utils.py b/hysop/numerics/utils.py index 34cd0b8ea..d2cd2def2 100644 --- a/hysop/numerics/utils.py +++ b/hysop/numerics/utils.py @@ -1,3 +1,9 @@ +"""Tools (static methods) to manage slices or arrays. + +.. currentmodule hysop.numerics.utils + + +""" from hysop.constants import XDIR, YDIR, ZDIR import hysop.tools.numpywrappers as npw import numpy as np diff --git a/hysop/operator/absorption_BC.py b/hysop/operator/absorption_bc.py similarity index 100% rename from hysop/operator/absorption_BC.py rename to hysop/operator/absorption_bc.py diff --git a/hysop/operator/discrete/absorption_BC.py b/hysop/operator/discrete/absorption_BC.py deleted file mode 100755 index 16f9b98b3..000000000 --- a/hysop/operator/discrete/absorption_BC.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@file operator/discrete/absorption_BC.py - -Operator to kill the vorticity at the outlet boundary -(i.e. removal of the periodic BC in the flow direction -by vorticity absorption in order to set the far field -velocity to u_inf at the inlet) -""" - -from hysop.constants import debug, np -from hysop.operator.discrete.discrete import DiscreteOperator -from hysop.fields.variable_parameter import VariableParameter -from hysop.tools.profiler import profile -import hysop.tools.numpywrappers as npw -from hysop.constants import XDIR, YDIR, ZDIR - - -class AbsorptionBC_D(DiscreteOperator): - """ - The periodic boundary condition is modified at the outlet - in the flow direction in order to discard - in the downstream region the eddies coming - periodically from the outlet. - The vorticity absorption conserves div(omega)=0. - The far field velocity is set to u_inf at the inlet. - """ - - @debug - def __init__(self, velocity, vorticity, req_flowrate, - x_coords_absorp, cb, **kwds): - """ - @param[in] velocity field - @param[in, out] vorticity field to absorbe - @param[in] req_flowrate : required value for the flowrate - (used to set the u_inf velocity value at the inlet) - @param x_coords_absorp : array containing the x-coordinates delimitating - the absorption domain ([x_beginning, x_end]) - @param cb : control box for inlet surface computation - """ - assert 'variables' not in kwds, 'variables parameter is useless.' - super(AbsorptionBC_D, self).__init__( - variables=[velocity, vorticity], **kwds) - ## velocity discrete field - self.velocity = velocity - ## vorticity discrete field - self.vorticity = vorticity - ## domain dimension - self.dim = self.velocity.domain.dimension - # If 2D problem, vorticity must be a scalar - if self.dim == 2: - assert self.vorticity.nb_components == 1 - assert (self.dim >= 2),\ - "Wrong problem dimension: only 2D and 3D cases are implemented." - - self.input = self.variables - self.output = [self.vorticity] - ## A reference topology - self.topo = self.vorticity.topology - ## Volume of control - self.cb = cb - self.cb.discretize(self.topo) - # A reference surface, i.e. input surface for flow in x direction - self._in_surf = cb.surf[XDIR] - - sdirs = self._in_surf.t_dir - # Compute 1./ds and 1./dv ... - cb_length = self.cb.real_length[self.topo] - self._inv_ds = 1. / npw.prod(cb_length[sdirs]) - self._inv_dvol = 1. / npw.prod(cb_length) - ## Expected value for the flow rate through self.surfRef - self.req_flowrate = req_flowrate - assert isinstance(self.req_flowrate, VariableParameter),\ - "the required flowrate must be a VariableParameter object." - self.req_flowrate_val = None - ## x-coordinates delimitating the absorption band at the outlet - self.x_coords_absorp = x_coords_absorp - - ## setup for the absorption filter definition - self.xb = self.x_coords_absorp[0] - self.xe = self.x_coords_absorp[1] - self.xc = self.xb + (self.xe - self.xb) / 2.0 - self.eps = 10.0 - self.coeff = 1.0 / (np.tanh(self.eps * (self.xb - self.xc)) - - np.tanh(self.eps * (self.xe - self.xc))) - self.coords = self.topo.mesh.coords - - @debug - @profile - def apply(self, simulation=None): - # the required flowrate value is updated (depending on time) - self.req_flowrate.update(simulation) - # \warning : the flow rate value is divided by area of input surf. - self.req_flowrate_val = self.req_flowrate[self.req_flowrate.name] \ - * self._inv_ds - - # Definition of the filter function (for smooth vorticity absorption) - self._filter = npw.ones_like(self.vorticity.data[0]) - indFilter = np.where(np.logical_and(self.coords[0][:,0,0] >= self.xb, - self.coords[0][:,0,0] <= self.xe)) -# indFilterZero = np.where(self.coords[0][:,0,0] > self.xe) - FiltFormula = np.tanh(self.eps * (self.coords[0][indFilter] - - self.xc)) - FiltFormula -= np.tanh(self.eps * (self.xe - self.xc)) - FiltFormula *= self.coeff - self._filter[indFilter,:,:] = FiltFormula -# self._filter[indFilterZero] = 0.0 - - # Beginning of divergence free vorticity absorption - # for non-periodic inlet BC - for d in xrange(self.vorticity.nb_components): - self.vorticity[d][...] *= self._filter[...] - - # Definition of the X-derivative of the filter function - self._filter = npw.zeros_like(self.vorticity.data[0]) - indFilter = np.where(np.logical_and(self.coords[0][:,0,0] >= self.xb, - self.coords[0][:,0,0] <= self.xe)) - FiltFormula = self.eps * (1.0 - np.tanh(self.eps * - (self.coords[0][indFilter] - - self.xc)) ** 2) - FiltFormula *= self.coeff - self._filter[indFilter] = FiltFormula - - # End of divergence free vorticity absorption for non-periodic inlet BC - self.vorticity.data[YDIR][...] += (- self._filter[...] * - self.velocity[ZDIR][...]) + \ - (self._filter[...] * - self.req_flowrate_val[ZDIR]) - self.vorticity.data[ZDIR][...] += (self._filter[...] * - self.velocity[YDIR][...]) - \ - (self._filter[...] * - self.req_flowrate_val[YDIR]) diff --git a/hysop/operator/discrete/absorption_bc.py b/hysop/operator/discrete/absorption_bc.py new file mode 100755 index 000000000..179798a08 --- /dev/null +++ b/hysop/operator/discrete/absorption_bc.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +"""Operator to kill the vorticity at the outlet boundary +(i.e. removal of the periodic BC in the flow direction +by vorticity absorption in order to set the far field +velocity to u_inf at the inlet) +""" + +from hysop.constants import debug, np +from hysop.operator.discrete.discrete import DiscreteOperator +from hysop.fields.variable_parameter import VariableParameter +from hysop.tools.profiler import profile +import hysop.tools.numpywrappers as npw +from hysop.constants import YDIR, ZDIR +from hysop.tools.misc import WorkSpaceTools + + +class AbsorptionBC(DiscreteOperator): + """ + The periodic boundary condition is modified at the outlet + in the flow direction in order to discard + in the downstream region the eddies coming + periodically from the outlet. + The vorticity absorption conserves div(omega)=0. + The far field velocity is set to u_inf at the inlet. + """ + + @debug + def __init__(self, velocity, vorticity, req_flowrate, + x_coords_absorp, cb, **kwds): + """ + Parameters + ---------- + velocity, vorticity : :class:`~hysop.fields.discrete.DiscreteField` + req_flowrate : double + required value for the flow rate + absorption_box : :class:`~hysop.subsets.SubBox` + a box representing the area where filter is applied. + filter_func: list of python functions, optional + functions used to compute the filter and its differential. + **kwds : extra parameters for base class + + + Notes + ----- + * if set, filter_func[0] and filter_func[1] must be python function + returning a numpy array. For example to apply a sine inside + the absorption area use : + + .. code:: + + def func(x): + return np.sin(x) + + """ + assert 'variables' not in kwds, 'variables parameter is useless.' + # velocity discrete field + self.velocity = velocity + # vorticity discrete field + self.vorticity = vorticity + self.absorption_box = absorption_box + super(AbsorptionBC, self).__init__( + variables=[velocity, vorticity], **kwds) + # If 2D problem, vorticity must be a scalar + if self._dim == 2: + assert self.vorticity.nb_components == 1 + assert (self._dim > 2),\ + "Wrong problem dimension: only 3D cases are implemented." + topo = self.vorticity.topology + xcoords = topo.mesh.coords[0] + self.input = self.variables + self.output = [self.vorticity] + # Expected value for the flow rate through self.surfRef + msg = 'Wrong type or length for input req_flowrate.' + if not isinstance(req_flowrate, VariableParameter): + self.req_flowrate = VariableParameter( + data=np.asarray(req_flowrate)) + assert np.asarray(req_flowrate).size == self._dim, msg + else: + self.req_flowrate = req_flowrate + + t_dir = self.absorption_box.t_dir + dsurf = npw.prod(self.absorption_box.real_length[topo][t_dir]) + self._inv_ds = 1. / dsurf + ind = self.absorption_box.ind[topo][0] + + if filter_func is None: + self.absorption_filter = None + self.diff_filter = None + self._set_default_filter(xcoords[ind[0]]) + else: + assert isinstance(filter_func, list) + self.absorption_filter = filter_func[0](xcoords[ind[0]]) + self.diff_filter = filter_func[1](xcoords[ind[0]]) + + def _set_default_filter(self, x): + """Default values for the filter in the absorption box + """ + xb = x[0] + xe = x[-1] # [1] ???? A VERIFIER + xc = xb + (xe - xb) / 2.0 + eps = 10. + form = np.tanh(eps * (x - xc)) + self.absorption_filter = form - np.tanh(eps * (xe - xc)) + coeff = 1.0 / (np.tanh(eps * (xb - xc)) - np.tanh(eps * (xe - xc))) + self.absorption_filter[...] *= coeff + self.diff_filter = eps * (1.0 - form ** 2) + self.diff_filter *= coeff + + def _set_work_arrays(self, rwork=None, iwork=None): + + # Reference shape comes from the box at the end of the domain + subshape = self.absorption_box.mesh[self.vorticity.topology].resolution + self._rwork = WorkSpaceTools.check_work_array(1, subshape, rwork) + + @debug + @profile + def apply(self, simulation=None): + # the required flowrate value is updated (depending on time) + self.req_flowrate.update(simulation) + # \warning : the flow rate value is divided by area of input surf. + req_flowrate_val = self.req_flowrate.data * self._inv_ds + + # Definition of the filter function (for smooth vorticity absorption) + self._filter = npw.ones_like(self.vorticity.data[0]) + indFilter = np.where(np.logical_and(self.coords[0][:,0,0] >= self.xb, + self.coords[0][:,0,0] <= self.xe)) +# indFilterZero = np.where(self.coords[0][:,0,0] > self.xe) + FiltFormula = np.tanh(self.eps * (self.coords[0][indFilter] - + self.xc)) + FiltFormula -= np.tanh(self.eps * (self.xe - self.xc)) + FiltFormula *= self.coeff + self._filter[indFilter,:,:] = FiltFormula +# self._filter[indFilterZero] = 0.0 + + # Beginning of divergence free vorticity absorption + # for non-periodic inlet BC + for d in xrange(self.vorticity.nb_components): + np.multiply(self.vorticity[d][ind], self.absorption_filter, + self.vorticity[d][ind]) + + # Definition of the X-derivative of the filter function + self._filter = npw.zeros_like(self.vorticity.data[0]) + indFilter = np.where(np.logical_and(self.coords[0][:,0,0] >= self.xb, + self.coords[0][:,0,0] <= self.xe)) + FiltFormula = self.eps * (1.0 - np.tanh(self.eps * + (self.coords[0][indFilter] - + self.xc)) ** 2) + FiltFormula *= self.coeff + self._filter[indFilter] = FiltFormula + + # End of divergence free vorticity absorption for non-periodic inlet BC + self.vorticity.data[YDIR][...] += (- self._filter[...] * + self.velocity[ZDIR][...]) + \ + (self._filter[...] * + self.req_flowrate_val[ZDIR]) + self.vorticity.data[ZDIR][...] += (self._filter[...] * + self.velocity[YDIR][...]) - \ + (self._filter[...] * + self.req_flowrate_val[YDIR]) diff --git a/hysop/operator/discrete/multiresolution_filter.py b/hysop/operator/discrete/multiresolution_filter.py index 4639aa25b..157dbbb10 100644 --- a/hysop/operator/discrete/multiresolution_filter.py +++ b/hysop/operator/discrete/multiresolution_filter.py @@ -1,6 +1,5 @@ -""" -@file multiresolution_filter.py -Filter values from a fine grid to a coarse grid. +"""Filter values from a fine grid to a coarse grid. + """ from hysop.constants import debug, np, HYSOP_MPI_REAL from hysop.operator.discrete.discrete import DiscreteOperator @@ -15,23 +14,36 @@ class FilterFineToCoarse(DiscreteOperator): """ Discretized operator for filtering from fine to coarse grid. """ + + authorized_methods = [Linear, L21] + def __init__(self, field_in, field_out, **kwds): + """ + Parameters + ---------- + field_in, field_out : lists of + :class:`~hysop.fields.discrete.DiscreteFields` + kwds : base class parameters + + """ self.field_in, self.field_out = field_in, field_out + # Note : since discrete operators are usually + # supposed to work on one single topology, + # only field_in is passed to base class. + + # mesh_out must be set before base class init, + # to set rwork properly + + self._mesh_out = self.field_out[0].topology.mesh + self.gh_out = self.field_out[0].topology.ghosts() super(FilterFineToCoarse, self).__init__( variables=self.field_in, **kwds) self.input = [self.field_in] self.output = [self.field_out] self._mesh_in = self.field_in[0].topology.mesh - self._mesh_out = self.field_out[0].topology.mesh - self.gh_out = self.field_out[0].topology.ghosts() gh_in = self.field_in[0].topology.ghosts() - if self.method[Remesh] is Rmsh_Linear: - assert (self.gh_out >= 1).all() - elif self.method[Remesh] is L2_1: - assert (self.gh_out >= 2).all() - else: - raise ValueError('This scheme (' + self.method[Remesh].__name__ + - ') is not yet implemented for filter.') + self._rmsh_method = self._check_method() + resol_in = self._mesh_in.resolution - 2 * gh_in resol_out = self._mesh_out.resolution - 2 * self.gh_out pts_per_cell = resol_in / resol_out @@ -91,24 +103,6 @@ class FilterFineToCoarse(DiscreteOperator): self._sl_coarse = [] # Weights associated to offsets in coarse grid self._w_coarse = [] - try: - assert issubclass(self.method[Remesh], RemeshFormula), \ - "This operator works with a RemeshingFormula." - self._rmsh_method = self.method[Remesh]() - except KeyError: - self._rmsh_method = Rmsh_Linear() - if isinstance(self._rmsh_method, Rmsh_Linear): - # Linear interpolation - assert np.all(self.gh_out >= 1), "Output data must have at least" \ - "1 ghosts in all directions (" + str(self.gh_out) + " given)" - elif isinstance(self._rmsh_method, L2_1): - # L2_1 interpolation - assert np.all(self.gh_out >= 2), "Output data must have at least" \ - "2 ghosts in all directions (" + str(self.gh_out) + " given)" - else: - raise ValueError("The multiresolution filter is implemented for " + - "Linear or L2_1 interpolation (" + - str(self.method[Remesh]) + " given).") for i_x in xrange(len(self._rmsh_method.weights)): for i_y in xrange(len(self._rmsh_method.weights)): for i_z in xrange(len(self._rmsh_method.weights)): @@ -129,16 +123,44 @@ class FilterFineToCoarse(DiscreteOperator): self._rmsh_method.shift + i_z, None) )) - self._w_coarse.append( - (i_x, i_y, i_z)) - if self._rwork is None: - self._rwork = npw.zeros(resol_out) + self._w_coarse.append((i_x, i_y, i_z)) + + def _set_work_arrays(self, rwork=None, iwork=None): + subshape = tuple(self._mesh_out.resolution - 2 * self.gh_out) + subsize = np.prod(subshape) + if rwork is None: + self._rwork = [npw.zeros(subshape)] + else: + self._rwork = [] + assert isinstance(rwork, list), 'rwork must be a list.' + assert len(rwork) >= 1, 'Wrong length for work arrays list.' + for wk in rwork[:1]: + assert wk.size >= subsize + self._rwork.append(wk.ravel()[:subsize].reshape(subshape)) + + def _check_method(self): + """Check remeshing method parameters. + """ + try: + assert issubclass(self.method[Remesh], RemeshFormula), \ + "This operator works with a RemeshingFormula." + rmsh_method = self.method[Remesh]() + except KeyError: + rmsh_method = Linear() + + msg = 'Ghost layer must be increased for the chosen scheme.' + assert (self.gh_out >= + self.method[Remesh].ghosts_layer_size).all(), msg + msg = 'Remesh scheme not yet implemented.' + assert self.method[Remesh] in self.authorized_methods, msg + return rmsh_method @debug @profile def apply(self, simulation=None): assert simulation is not None, \ "Missing simulation value for computation." + wk = self._rwork[0] for v_in, v_out in zip(self.field_in, self.field_out): for d in xrange(v_in.nb_components): for sl_coarse, iw_fun in zip(self._sl_coarse, self._w_coarse): @@ -150,14 +172,14 @@ class FilterFineToCoarse(DiscreteOperator): iw_fun[2], self.dist_coords[2], self._work_weight[2]) # Loop over fine grid points sharing the same coarse cell for sl in self._sl: - self._rwork[...] = v_in[d][sl] + wk[...] = v_in[d][sl] # Compute weights - self._rwork[...] *= self._work_weight[0][sl[0], :, :] - self._rwork[...] *= self._work_weight[1][:, sl[1], :] - self._rwork[...] *= self._work_weight[2][:, :, sl[2]] - self._rwork[...] *= self.scale_factor + wk[...] *= self._work_weight[0][sl[0], :, :] + wk[...] *= self._work_weight[1][:, sl[1], :] + wk[...] *= self._work_weight[2][:, :, sl[2]] + wk[...] *= self.scale_factor # Add contributions in data - v_out[d][sl_coarse] += self._rwork[...] + v_out[d][sl_coarse] += wk[...] self._exchange_ghosts() def _exchange_ghosts_local_d(self, d): @@ -167,8 +189,12 @@ class FilterFineToCoarse(DiscreteOperator): sl_gh = [slice(None) for _ in xrange(self._dim)] sl[d] = slice(1 * s_gh, 2 * s_gh) sl_gh[d] = slice(-1 * s_gh, None) + # Add ghost points values at the end of the domain in direction + # d into corresponding points at the beginning of the domain. for v_out in self.field_out: v_out.data[0][tuple(sl)] += v_out.data[0][tuple(sl_gh)] + # Add ghost points values at the beginning of the domain in direction + # d into corresponding points at the end of the domain. sl[d] = slice(-2 * s_gh, -1 * s_gh) sl_gh[d] = slice(0, 1 * s_gh) for v_out in self.field_out: diff --git a/hysop/operator/discrete/scales_advection.py b/hysop/operator/discrete/scales_advection.py index 79064cecc..96ee6d4ac 100644 --- a/hysop/operator/discrete/scales_advection.py +++ b/hysop/operator/discrete/scales_advection.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -""" -@file scales_advection.py -Discrete Advection operator based on scales library (Jean-Baptiste) +"""Interface to scales solver (fortran) """ try: @@ -20,39 +18,52 @@ class ScalesAdvection(DiscreteOperator): Advection process, based on scales library. """ + _scales_remesh_kernels = ['p_M4', 'p_M8', 'p_44', 'p_64', + 'p_66', 'p_84', 'p_M6'] + """Kernels available in scales""" + _scales_remesh_corrected_kernels = ['p_O2', 'p_O4', 'p_L2'] + """Corrected kernels available in scales""" + @debug def __init__(self, velocity, advected_fields, **kwds): """ - Constructor. - @param velocity discrete field - @param advected_fields : list of discrete fields to be advected + Parameters + ---------- + velocity : :class:`~hysop.fields.discrete.DiscreteField` + advection velocity (discretized) + advected_fields : list of :class:`~hysop.fields.discrete.DiscreteField` + discrete field(s) to be advected + direction : int + advection direction + kwds : extra parameters for base class """ - ## Advection velocity + msg = 'Scales advection init : variables parameter is useless.' + msg += 'See user guide for details on the proper' + msg += ' way to build the operator.' + assert 'variables' not in kwds, msg + # Advection velocity self.velocity = velocity - if 'variables' in kwds: - assert advected_fields is None, 'too many input arguments.' - self.advected_fields = kwds['variables'].keys() - kwds['variables'][self.velocity] = kwds['discretization'] - kwds.pop('discretization') - super(ScalesAdvection, self).__init__(**kwds) + variables = [self.velocity] + if isinstance(advected_fields, list): + self.advected_fields = advected_fields else: - v = [self.velocity] - if isinstance(advected_fields, list): - self.advected_fields = advected_fields - else: - self.advected_fields = [advected_fields] - v += self.advected_fields - super(ScalesAdvection, self).__init__(variables=v, **kwds) + self.advected_fields = [advected_fields] + variables += self.advected_fields + + msg = 'Scales advection init : method parameter is compulsory.' + assert 'method' in kwds, msg + + super(ScalesAdvection, self).__init__(variables=variables, **kwds) - self.input = [self.velocity] + self.input = self.variables self.output = self.advected_fields - # Scales functions for each field (depending if vector) + # Connection to the proper scales functions. self._scales_func = [] - isMultiscale = self.method[MultiScale] is not None + is_multi_scale = self.method[MultiScale] is not None for adF in self.advected_fields: if adF.nb_components == 3: - if isMultiscale: + if is_multi_scale: # 3D interpolation of the velocity before advection self._scales_func.append( scales2py.solve_advection_inter_basic_vect) @@ -62,7 +73,7 @@ class ScalesAdvection(DiscreteOperator): else: self._scales_func.append(scales2py.solve_advection_vect) else: - if isMultiscale: + if is_multi_scale: self._scales_func.append( scales2py.solve_advection_inter_basic) else: diff --git a/hysop/operator/discrete/stretching.py b/hysop/operator/discrete/stretching.py index 7e96a901d..f35e2d07b 100755 --- a/hysop/operator/discrete/stretching.py +++ b/hysop/operator/discrete/stretching.py @@ -155,6 +155,7 @@ class Conservative(Stretching): @profile def __init__(self, **kwds): + # Right-hand side for time integration def rhs(t, y, result): """rhs used in time integrator """ diff --git a/hysop/problem/problem_with_GLRendering.py b/hysop/problem/problem_with_GLRendering.py index 94c885428..ac13a5078 100644 --- a/hysop/problem/problem_with_GLRendering.py +++ b/hysop/problem/problem_with_GLRendering.py @@ -1,7 +1,4 @@ -""" -@file problem_with_GLRendering.py - -Extends Problem description to handel real time rendering wit OpenGL. +"""Extend Problem description to handle real time rendering wit OpenGL. """ from hysop.constants import debug from hysop.mpi import main_rank diff --git a/setup.py.in b/setup.py.in index 4f3336ba8..e92172482 100644 --- a/setup.py.in +++ b/setup.py.in @@ -28,7 +28,7 @@ def parseCMakeVar(var): if var != "": return var.split(';') else: - return None + return [] def parseCMakeDefines(var): defines = parseCMakeVar(var) -- GitLab