diff --git a/hysop/core/graph/computational_graph.py b/hysop/core/graph/computational_graph.py index d8a5eba08c01846028b2aaf699380e476b969cd2..156d922841f637ab548d0f6fffee1e6d44ef4c6a 100644 --- a/hysop/core/graph/computational_graph.py +++ b/hysop/core/graph/computational_graph.py @@ -73,24 +73,24 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): self._profiler += node._profiler def node_requirements_report(self, requirements): - values = [(u'OPERATOR', u'TOPOLOGY', u'TSTATE', u'GHOSTS', u'MEMORY ORDER', - u'NODE.MRO[0]', u'NODE.MRO[1]', u'NODE.MRO[2]')] + values = [('OPERATOR', 'TOPOLOGY', 'TSTATE', 'GHOSTS', 'MEMORY ORDER', + 'NODE.MRO[0]', 'NODE.MRO[1]', 'NODE.MRO[2]')] for node in self.nodes: reqs = node.get_node_requirements() if not isinstance(reqs, OperatorRequirements): continue - opname = node.pretty_name.decode('utf-8') + opname = node.pretty_name optypes = type(node).__mro__ n = len(optypes) - optypes = tuple(_.__name__ for _ in optypes[:min(3, n)]) + (u'',)*(3-n) + optypes = tuple(_.__name__ for _ in optypes[:min(3, n)]) + ('',)*(3-n) vals = (opname, reqs.enforce_unique_topology_shape, reqs.enforce_unique_transposition_state, reqs.enforce_unique_ghosts, reqs.enforce_unique_memory_order, ) + optypes - vals = tuple(unicode(x) for x in vals) + vals = map(str, vals) values.append(vals) - template = u'\n {:<{name_size}} {:^{topology_size}} {:^{tstates_size}} {:^{ghosts_size}} {:^{order_size}} {:<{type_size0}} {:<{type_size1}} {:<{type_size2}}' + template = '\n {:<{name_size}} {:^{topology_size}} {:^{tstates_size}} {:^{ghosts_size}} {:^{order_size}} {:<{type_size0}} {:<{type_size1}} {:<{type_size2}}' name_size = max(strlen(s[0]) for s in values) topology_size = max(strlen(s[1]) for s in values) tstates_size = max(strlen(s[2]) for s in values) @@ -100,7 +100,7 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): type_size1 = max(strlen(s[6]) for s in values) type_size2 = max(strlen(s[7]) for s in values) - ss = u'' + ss = '' for (opname, enforce_unique_topology_shape, enforce_unique_transposition_state, enforce_unique_ghosts, enforce_unique_memory_order, optype0, optype1, optype2) in values: ss += template.format( @@ -119,9 +119,9 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): type_size1=type_size1, type_size2=type_size2) - title = u'ComputationalGraph {} node requirements report '.format( - self.pretty_name.decode('utf-8')) - return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])).encode('utf-8') + title = 'ComputationalGraph {} node requirements report '.format( + self.pretty_name) + return '\n{}\n'.format(framed_str(title=title, msg=ss[1:])) def field_requirements_report(self, requirements): inputs, outputs = {}, {} @@ -138,17 +138,17 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): sin = sinputs.setdefault(td, []) for field, reqs in td_reqs.items(): for req in sorted_reqs(reqs): - opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8') - fname = getattr(req.field, 'pretty_name', 'UnknownField').decode('utf-8') + opname = getattr(req.operator, 'pretty_name', 'UnknownOperator') + fname = getattr(req.field, 'pretty_name', 'UnknownField') min_ghosts = req.ghost_str(req.min_ghosts) max_ghosts = req.ghost_str(req.max_ghosts+1) discr = str(req.operator.input_fields[field].grid_resolution) - ghosts = u'{}<=ghosts<{}'.format(min_ghosts, max_ghosts) + ghosts = '{}<=ghosts<{}'.format(min_ghosts, max_ghosts) can_split = req.can_split.view(npw.int8) - memory_order = u'{}'.format(req.memory_order) if req.memory_order else u'ANY' - can_split = u'[{}]'.format( - u','.join('1' if cs else '0' for cs in req.can_split)) - tstates = u'{}'.format(u','.join(str(ts) for ts in req.tstates)) \ + memory_order = '{}'.format(req.memory_order) if req.memory_order else 'ANY' + can_split = '[{}]'.format( + ','.join('1' if cs else '0' for cs in req.can_split)) + tstates = '{}'.format(','.join(str(ts) for ts in req.tstates)) \ if req.tstates else 'ANY' sin.append((opname, fname, discr, ghosts, memory_order, can_split, tstates)) for field, mreqs in requirements.output_field_requirements.items(): @@ -159,22 +159,22 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): sout = soutputs.setdefault(td, []) for field, reqs in td_reqs.items(): for req in sorted_reqs(reqs): - opname = getattr(req.operator, 'pretty_name', 'UnknownOperator').decode('utf-8') - fname = getattr(req.field, 'pretty_name', 'UnknownField').decode('utf-8') + opname = getattr(req.operator, 'pretty_name', 'UnknownOperator') + fname = getattr(req.field, 'pretty_name', 'UnknownField') min_ghosts = req.ghost_str(req.min_ghosts) max_ghosts = req.ghost_str(req.max_ghosts+1) discr = str(req.operator.output_fields[field].grid_resolution) - ghosts = u'{}<=ghosts<{}'.format(min_ghosts, max_ghosts) + ghosts = '{}<=ghosts<{}'.format(min_ghosts, max_ghosts) can_split = req.can_split.view(npw.int8) - memory_order = u'{}'.format(req.memory_order) if req.memory_order else u'ANY' - can_split = u'[{}]'.format( - u','.join('1' if cs else '0' for cs in req.can_split)) - tstates = u'{}'.format(u','.join(str(ts) for ts in req.tstates)) \ - if req.tstates else u'ANY' + memory_order = '{}'.format(req.memory_order) if req.memory_order else 'ANY' + can_split = '[{}]'.format( + ','.join('1' if cs else '0' for cs in req.can_split)) + tstates = '{}'.format(','.join(str(ts) for ts in req.tstates)) \ + if req.tstates else 'ANY' sout.append((opname, fname, discr, ghosts, memory_order, can_split, tstates)) - titles = [[(u'OPERATOR', u'FIELD', u'DISCRETIZATION', u'GHOSTS', - u'MEMORY ORDER', u'CAN_SPLIT', u'TSTATES')]] + titles = [[('OPERATOR', 'FIELD', 'DISCRETIZATION', 'GHOSTS', + 'MEMORY ORDER', 'CAN_SPLIT', 'TSTATES')]] name_size = max(len(s[0]) for ss in sinputs.values()+soutputs.values()+titles for s in ss) field_size = max(len(s[1]) for ss in sinputs.values()+soutputs.values()+titles for s in ss) discr_size = max(len(s[2]) for ss in sinputs.values()+soutputs.values()+titles for s in ss) @@ -185,15 +185,15 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): tstates_size = max(len(s[6]) for ss in sinputs.values() + soutputs.values()+titles for s in ss) - template = u'\n {:<{name_size}} {:^{field_size}} {:^{discr_size}} {:^{ghosts_size}} {:^{order_size}} {:^{cansplit_size}} {:^{tstates_size}}' + template = '\n {:<{name_size}} {:^{field_size}} {:^{discr_size}} {:^{ghosts_size}} {:^{order_size}} {:^{cansplit_size}} {:^{tstates_size}}' - ss = u'>INPUTS:' + ss = '>INPUTS:' if sinputs: for (td, sreqs) in sinputs.items(): if isinstance(td, Topology): - ss += u'\n {}'.format(td.short_description()) + ss += '\n {}'.format(td.short_description()) else: - ss += u'\n {}'.format(td) + ss += '\n {}'.format(td) ss += template.format(*titles[0][0], name_size=name_size, field_size=field_size, discr_size=discr_size, ghosts_size=ghosts_size, @@ -207,14 +207,14 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): order_size=order_size, cansplit_size=cansplit_size, tstates_size=tstates_size) else: - ss += u' None' - ss += u'\n>OUTPUTS:' + ss += ' None' + ss += '\n>OUTPUTS:' if soutputs: for (td, sreqs) in soutputs.items(): if isinstance(td, Topology): - ss += u'\n {}'.format(td.short_description()) + ss += '\n {}'.format(td.short_description()) else: - ss += u'\n {}'.format(td) + ss += '\n {}'.format(td) ss += template.format(*titles[0][0], name_size=name_size, field_size=field_size, discr_size=discr_size, ghosts_size=ghosts_size, @@ -228,11 +228,11 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): order_size=order_size, cansplit_size=cansplit_size, tstates_size=tstates_size) else: - ss += u' None' + ss += ' None' - title = u'ComputationalGraph {} field requirements report '.format( - self.pretty_name.decode('utf-8')) - return u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8') + title = 'ComputationalGraph {} field requirements report '.format( + self.pretty_name) + return '\n{}\n'.format(framed_str(title=title, msg=ss)) def domain_report(self): domains = self.get_domains() @@ -247,30 +247,30 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): if (domain is None): continue for op in sorted(operators, key=lambda x: x.pretty_name): - finputs = u','.join(sorted([f.pretty_name.decode('utf-8') + finputs = ','.join(sorted([f.pretty_name for f in op.iter_input_fields() if f.domain is domain])) - foutputs = u','.join(sorted([f.pretty_name.decode('utf-8') + foutputs = ','.join(sorted([f.pretty_name for f in op.iter_output_fields() if f.domain is domain])) - pinputs = u','.join(sorted([p.pretty_name.decode('utf-8') + pinputs = ','.join(sorted([p.pretty_name for p in op.input_params.values()])) - poutputs = u','.join(sorted([p.pretty_name.decode('utf-8') + poutputs = ','.join(sorted([p.pretty_name for p in op.output_params.values()])) - infields = u'[{}]'.format(finputs) if finputs else u'' - outfields = u'[{}]'.format(foutputs) if foutputs else u'' - inparams = u'[{}]'.format(pinputs) if pinputs else u'' - outparams = u'[{}]'.format(poutputs) if poutputs else u'' - - inputs = u'{}{}{}'.format( - infields, u'x' if infields and inparams else u'', inparams) - outputs = u'{}{}{}'.format( - outfields, u'x' if outfields and outparams else u'', outparams) - - if inputs == u'': - inputs = u'no inputs' - if outputs == u'': - outputs = u'no outputs' - - opname = op.pretty_name.decode('utf-8') + infields = '[{}]'.format(finputs) if finputs else '' + outfields = '[{}]'.format(foutputs) if foutputs else '' + inparams = '[{}]'.format(pinputs) if pinputs else '' + outparams = '[{}]'.format(poutputs) if poutputs else '' + + inputs = '{}{}{}'.format( + infields, 'x' if infields and inparams else '', inparams) + outputs = '{}{}{}'.format( + outfields, 'x' if outfields and outparams else '', outparams) + + if inputs == '': + inputs = 'no inputs' + if outputs == '': + outputs = 'no outputs' + + opname = op.pretty_name optype = type(op).__name__ strdata = (opname, inputs, '->', outputs, optype) @@ -280,20 +280,20 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): if (None in domains): operators = domains[None] for op in sorted(operators, key=lambda x: x.pretty_name): - pinputs = u','.join(sorted([p.pretty_name.decode('utf-8') + pinputs = ','.join(sorted([p.pretty_name for p in op.input_params.values()])) - poutputs = u','.join(sorted([p.pretty_name.decode('utf-8') + poutputs = ','.join(sorted([p.pretty_name for p in op.output_params.values()])) - inparams = u'[{}]'.format(pinputs) if pinputs else '' - outparams = u'[{}]'.format(poutputs) if poutputs else '' + inparams = '[{}]'.format(pinputs) if pinputs else '' + outparams = '[{}]'.format(poutputs) if poutputs else '' - inputs = u'{}'.format(inparams) - outputs = u'{}'.format(outparams) + inputs = '{}'.format(inparams) + outputs = '{}'.format(outparams) if inputs == '': - inputs = u'no inputs' + inputs = 'no inputs' if outputs == '': - outputs = u'no outputs' - opname = op.pretty_name.decode('utf-8') + outputs = 'no outputs' + opname = op.pretty_name optype = type(op).__name__ strdata = (opname, inputs, '->', outputs, optype) @@ -306,49 +306,49 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): out_size = max(strlen(s[3]) for ss in ops.values() for s in ss) type_size = max(strlen(s[4]) for ss in ops.values() for s in ss) - ss = u'' + ss = '' for (domain, dops) in ops.items(): if (domain is None): continue - ss += u'\n>{}'.format(domain.short_description()) - ss += u'\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss += '\n>{}'.format(domain.short_description()) + ss += '\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( 'OPERATOR', 'INPUTS', '', 'OUTPUTS', 'OPERATOR TYPE', name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) for (opname, inputs, arrow, outputs, optype) in dops: - ss += u'\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss += '\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( opname, inputs, arrow, outputs, optype, name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) if (None in domains): - ss += u'\n>Domainless operators:' - ss += u'\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss += '\n>Domainless operators:' + ss += '\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( 'OPERATOR', 'INPUTS', '', 'OUTPUTS', 'OPERATOR TYPE', name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) for (opname, inputs, arrow, outputs, optype) in ops[None]: - ss += u'\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss += '\n {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( opname, inputs, arrow, outputs, optype, name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) - title = u'ComputationalGraph {} domain and operator report '.format( - self.pretty_name.decode('utf-8')) - return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])).encode('utf-8') + title = 'ComputationalGraph {} domain and operator report '.format( + self.pretty_name + return '\n{}\n'.format(framed_str(title=title, msg=ss[1:])) def topology_report(self): ss = '' for (backend, topologies) in self.get_topologies().items(): - ss += u'\n {}:'.format(backend.short_description()) - ss += u'\n *'+'\n *'.join(t.short_description() + ss += '\n {}:'.format(backend.short_description()) + ss += '\n *'+'\n *'.join(t.short_description() for t in sorted(topologies, key=lambda x: x.id)) - title = u'ComputationalGraph {} topology report '.format( - self.pretty_name.decode('utf-8')) - return u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])) + title = 'ComputationalGraph {} topology report '.format( + self.pretty_name + return '\n{}\n'.format(framed_str(title=title, msg=ss[1:])) def variable_report(self): fields = self.fields @@ -364,14 +364,14 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): topo = node.output_fields[field] field_topologies.setdefault(topo, []).append(node) for topo in sorted(field_topologies.keys(), key=lambda x: x.tag): - pnames = set(node.pretty_name.decode('utf-8') for node in field_topologies[topo]) + pnames = set(node.pretty_name for node in field_topologies[topo]) pnames = sorted(pnames) nbyline = 4 nentries = len(pnames)//nbyline n0 = len(str(topo.backend.kind).lower()) n1 = len(str(topo.tag)) for i in range(nentries): - sops = u', '.join(pnames[nbyline*i:nbyline*(i+1)]) + sops = ', '.join(pnames[nbyline*i:nbyline*(i+1)]) if (i != nentries-1) or (len(pnames) % nbyline != 0): sops += ',' if (i == 0): @@ -380,32 +380,32 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): entries = ('', '-'*n1, sops) topologies.setdefault(field, []).append(entries) if (len(pnames) % nbyline != 0): - sops = u', '.join(pnames[nbyline*nentries:]) + sops = ', '.join(pnames[nbyline*nentries:]) if (nentries == 0): entries = (str(topo.backend.kind).lower(), topo.tag, sops) else: entries = ('', '-'*n1, sops) topologies.setdefault(field, []).append(entries) - titles = [[(u'BACKEND', u'TOPOLOGY', u'OPERATORS')]] + titles = [[('BACKEND', 'TOPOLOGY', 'OPERATORS')]] backend_size = max(len(s[0]) for ss in topologies.values()+titles for s in ss) topo_size = max(len(s[1]) for ss in topologies.values()+titles for s in ss) - template = u'\n {:<{backend_size}} {:<{topo_size}} {}' + template = '\n {:<{backend_size}} {:<{topo_size}} {}' sizes = {'backend_size': backend_size, 'topo_size': topo_size} - ss = u'' + ss = '' for field in sorted(self.fields, key=lambda x: x.name): - ss += u'\n>FIELD {}::{}'.format(field.name, field.pretty_name.decode('utf-8')) + ss += '\n>FIELD {}::{}'.format(field.name, field.pretty_name) ss += template.format(*titles[0][0], **sizes) field_topologies = topologies[field] for entries in field_topologies: ss += template.format(*entries, **sizes) - title = u'ComputationalGraph {} fields report '.format( - self.pretty_name.decode('utf-8')) - ss = u'\n{}\n'.format(framed_str(title=title, msg=ss[1:])) - return ss.encode('utf-8') + title = 'ComputationalGraph {} fields report '.format( + self.pretty_name) + ss = '\n{}\n'.format(framed_str(title=title, msg=ss[1:])) + return ss def operator_report(self): maxlen = (None, None, 40, None, 40, None) @@ -422,46 +422,46 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): f0 = f.fields[0] t0 = node.input_fields[f0] if all((node.input_fields[fi] is t0) for fi in f.fields): - finputs.append(u'{}.{}'.format(f.pretty_name.decode('utf-8'), - t0.pretty_tag.decode('utf-8'))) + finputs.append('{}.{}'.format(f.pretty_name, + t0.pretty_tag)) handled_inputs += f.fields for f in node.output_tensor_fields: f0 = f.fields[0] t0 = node.output_fields[f0] if all((node.output_fields[fi] is t0) for fi in f.fields): - foutputs.append(u'{}.{}'.format(f.pretty_name.decode('utf-8'), - t0.pretty_tag.decode('utf-8'))) + foutputs.append('{}.{}'.format(f.pretty_name, + t0.pretty_tag)) handled_outputs += f.fields - finputs += [u'{}.{}'.format(f.pretty_name.decode('utf-8'), - t.pretty_tag.decode('utf-8')) + finputs += ['{}.{}'.format(f.pretty_name, + t.pretty_tag) for (f, t) in node.input_fields.items() if f not in handled_inputs] - foutputs += [u'{}.{}'.format(f.pretty_name.decode('utf-8'), - t.pretty_tag.decode('utf-8')) + foutputs += ['{}.{}'.format(f.pretty_name, + t.pretty_tag) for (f, t) in node.output_fields.items() if f not in handled_outputs] - finputs = u','.join(sorted(finputs)) - foutputs = u','.join(sorted(foutputs)) + finputs = ','.join(sorted(finputs)) + foutputs = ','.join(sorted(foutputs)) - pinputs = u','.join(sorted([p.pretty_name.decode('utf-8') + pinputs = ','.join(sorted([p.pretty_name for p in node.input_params.values()])) - poutputs = u','.join(sorted([p.pretty_name.decode('utf-8') + poutputs = ','.join(sorted([p.pretty_name for p in node.output_params.values()])) - infields = u'[{}]'.format(finputs) if finputs else u'' - outfields = u'[{}]'.format(foutputs) if foutputs else u'' - inparams = u'[{}]'.format(pinputs) if pinputs else u'' - outparams = u'[{}]'.format(poutputs) if poutputs else u'' + infields = '[{}]'.format(finputs) if finputs else '' + outfields = '[{}]'.format(foutputs) if foutputs else '' + inparams = '[{}]'.format(pinputs) if pinputs else '' + outparams = '[{}]'.format(poutputs) if poutputs else '' - inputs = u'{}{}{}'.format(infields, u'x' if infields and inparams else u'', inparams) - outputs = u'{}{}{}'.format( - outfields, u'x' if outfields and outparams else u'', outparams) + inputs = '{}{}{}'.format(infields, 'x' if infields and inparams else '', inparams) + outputs = '{}{}{}'.format( + outfields, 'x' if outfields and outparams else '', outparams) if inputs == '': - inputs = u'no inputs' + inputs = 'no inputs' if outputs == '': - outputs = u'no outputs' + outputs = 'no outputs' - opname = node.pretty_name.decode('utf-8') + opname = node.pretty_name optype = type(node).__name__ strdata = (str(i), opname, inputs, '->', outputs, optype) @@ -474,23 +474,23 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): out_size = max(strlen(s[4]) for s in ops) type_size = max(strlen(s[5]) for s in ops) - ss = u' {:<{isize}} {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss = ' {:<{isize}} {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( 'ID', 'OPERATOR', 'INPUTS', '', 'OUTPUTS', 'OPERATOR TYPE', isize=isize, name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) for (i, opname, inputs, arrow, outputs, optype) in ops: - ss += u'\n {:>{isize}} {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( + ss += '\n {:>{isize}} {:<{name_size}} {:<{in_size}} {:<{arrow_size}} {:<{out_size}} {:<{type_size}}'.format( i, opname, inputs, arrow, outputs, optype, isize=isize, name_size=name_size, in_size=in_size, arrow_size=arrow_size, out_size=out_size, type_size=type_size) - title = u'ComputationalGraph {} discrete operator report '.format( - self.pretty_name.decode('utf-8')) - return u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8') + title = 'ComputationalGraph {} discrete operator report '.format( + self.pretty_name) + return '\n{}\n'.format(framed_str(title=title, msg=ss)) def get_domains(self): domains = {} @@ -542,9 +542,9 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): def available_methods(self): avail_methods = {} if not self.nodes: - msg = u'No nodes present in ComputationalGraph {}.'.format( - self.pretty_name.decode('utf-8')) - raise RuntimeError(msg.encode('utf-8')) + msg = 'No nodes present in ComputationalGraph {}.'.format( + self.pretty_name) + raise RuntimeError(msg) for node in self.nodes: for (k, v) in node.available_methods().items(): v = to_set(v) @@ -570,8 +570,8 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): if is_root: self.pre_initialize(**kwds) - msg = u'ComputationalGraph {} is empty.' - assert len(self.nodes) > 0, msg.format(self.pretty_name.decode('utf-8')).encode('utf-8') + msg = 'ComputationalGraph {} is empty.' + assert len(self.nodes) > 0, msg.format(self.pretty_name) for node in self.nodes: node.pre_initialize(**kwds) @@ -846,10 +846,10 @@ class ComputationalGraph(ComputationalGraphNode, metaclass=ABCMeta): requests += node.get_work_properties() if __DEBUG__ or (__VERBOSE__ and self.level == 0) or self.__FORCE_REPORTS__: srequests = requests.sreport() - ss = (srequests if (srequests != u'') else u' *no extra work requested*') - title = u'ComputationalGraph {} work properties report '.format( - self.pretty_name.decode('utf-8')) - vprint(u'\n{}\n'.format(framed_str(title=title, msg=ss)).encode('utf-8')) + ss = (srequests if (srequests != '') else ' *no extra work requested*') + title = 'ComputationalGraph {} work properties report '.format( + self.pretty_name) + vprint('\n{}\n'.format(framed_str(title=title, msg=ss)) return requests @debug diff --git a/hysop/core/graph/computational_node.py b/hysop/core/graph/computational_node.py index bd394fa0682682c62e500fc18264a673b7463492..a5f1257573641bb5d082abd8dbae55ea194ff95f 100644 --- a/hysop/core/graph/computational_node.py +++ b/hysop/core/graph/computational_node.py @@ -83,7 +83,7 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta): name: str, optional name of this node (string), optional, defaults to top class name. Default name will be the top class name (ie. self.__class__.__name__). - pretty_name: str or unicode, optional + pretty_name: str, optional Pretty name of this node (string), optional, defaults to name. method: dict, optional user method specification for this graph node, optional, defaults to None. @@ -199,13 +199,10 @@ class ComputationalGraphNode(OperatorBase, metaclass=ABCMeta): name = first_not_None(name, self.__class__.__name__) pretty_name = first_not_None(pretty_name, name) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') - if not isinstance(name, str): msg = 'name is not a string but a {}.' raise ValueError(msg.format(name.__class__)) - if not isinstance(pretty_name, (str, unicode)): + if not isinstance(pretty_name, str): msg = 'pretty_name is not a string but a {}.' raise ValueError(msg.format(name.__class__)) if not isinstance(input_fields, dict): diff --git a/hysop/core/graph/computational_operator.py b/hysop/core/graph/computational_operator.py index afccabccc2afb225a3e4276396e32034db064ad2..1e16b96af3b978562c212d2d034ac480b43ceb31 100644 --- a/hysop/core/graph/computational_operator.py +++ b/hysop/core/graph/computational_operator.py @@ -341,29 +341,29 @@ class ComputationalGraphOperator(ComputationalGraphNode, metaclass=ABCMeta): try: ireq.check_topology(itopo) except RuntimeError: - msg=u'\nFATAL ERROR: {}::{}.handle_topologies() input topology mismatch.\n\n' + msg='\nFATAL ERROR: {}::{}.handle_topologies() input topology mismatch.\n\n' msg=msg.format(type(self).__name__, self.name) - msg+=u'Operator expected field {} topology to match the following requirements\n' + msg+='Operator expected field {} topology to match the following requirements\n' msg=msg.format(ifield.name) - msg+=u' {}\n'.format(str(ireq).decode('utf-8')) - msg+=u'but input effective discrete field topology is\n' - msg+=u' {}\n'.format(itopo) - msg+=u'\n' + msg+=' {}\n'.format(str(ireq)) + msg+='but input effective discrete field topology is\n' + msg+=' {}\n'.format(itopo) + msg+='\n' print(msg) raise try: ireq.check_discrete_topology_state(istate) except RuntimeError: - msg=u'\nFATAL ERROR: {}::{}.handle_topologies() input state mismatch.\n\n' + msg='\nFATAL ERROR: {}::{}.handle_topologies() input state mismatch.\n\n' msg=msg.format(type(self).__name__, self.name) - msg+=u'Operator expected field {} on topology id {} to match the following ' - msg+=u'requirements\n' + msg+='Operator expected field {} on topology id {} to match the following ' + msg+='requirements\n' msg=msg.format(ifield.name, itopo.id) - msg+=u' {}\n'.format(str(ireq).decode('utf-8')) - msg+=u'but input discrete topology state determined by the graph builder is\n' - msg+=u' {}\n'.format(istate) - msg+=u'\n' + msg+=' {}\n'.format(str(ireq)) + msg+='but input discrete topology state determined by the graph builder is\n' + msg+=' {}\n'.format(istate) + msg+='\n' raise istate_report = 'Field {}: {} (topo={})'.format(ifield.name, istate, itopo.id) diff --git a/hysop/core/memory/memory_request.py b/hysop/core/memory/memory_request.py index 5c4671f2731139784d7feeff4940829ccef163b4..e14a85c3dc1b10e4c19cebb3d34da0a40dbe21ed 100644 --- a/hysop/core/memory/memory_request.py +++ b/hysop/core/memory/memory_request.py @@ -468,7 +468,7 @@ class MultipleOperatorMemoryRequests(object): sop_request = all_requests.setdefault(backend, {}).setdefault(op, []) local_total=0 try: - opname=u'{}'.format(op.pretty_name.decode('utf-8')) + opname='{}'.format(op.pretty_name) except AttributeError: opname = None for req in op_requests: @@ -480,36 +480,36 @@ class MultipleOperatorMemoryRequests(object): if len(all_requests): sizes = {} - template = u'\n' - titles=(u'OPERATOR', u'REQUEST_ID', u'SIZE', u'COMPONENTS', u'SHAPE', u'DTYPE', u'ALIGNMENT') + template = '\n' + titles=('OPERATOR', 'REQUEST_ID', 'SIZE', 'COMPONENTS', 'SHAPE', 'DTYPE', 'ALIGNMENT') for (i,k) in enumerate(titles): k=k.lower() - template += u' ' + template += ' ' size = max(len(req[i]) for breqs in all_requests.values() for reqs in breqs.values() for req in reqs) size = max(size, len(k)) - name=k+u'_len' + name=k+'_len' sizes[name] = size - template += u'{:'+(u'<' if i==0 else u'^')+u'{'+name+u'}}' + template += '{:'+('<' if i==0 else '^')+'{'+name+'}}' ss='' for (backend, backend_srequests) in all_requests.items(): total = totals[backend] kind = backend.kind if (kind == Backend.OPENCL): - precision = u' on device {}'.format(backend.device.name.strip()) + precision = ' on device {}'.format(backend.device.name.strip()) else: - precision = u'' - ss+= u'\n {}{}:'.format(backend.full_tag, precision) + precision = '' + ss+= '\n {}{}:'.format(backend.full_tag, precision) ss+= template.format(*titles, **sizes) for op in sorted(backend_srequests.keys(), key=lambda op: getattr(op, 'name', None)): sop_reqs = backend_srequests[op] for sreq in sop_reqs: ss+= template.format(*sreq, **sizes) - ss +=u'\n Total extra work buffers requested: {} ({})'.format( + ss +='\n Total extra work buffers requested: {} ({})'.format( bytes2str(total,decimal=False), bytes2str(total,decimal=True)) - ss += u'\n' + ss += '\n' return ss[1:-1] else: - return u' No extra buffers have been requested.' + return ' No extra buffers have been requested.' diff --git a/hysop/core/memory/mempool.py b/hysop/core/memory/mempool.py index 7b0ffa361b4a5b3aa7ddbbf9a15d645026c50fd6..8521b58967a2d69c707746fa28ff87e08858ebce 100644 --- a/hysop/core/memory/mempool.py +++ b/hysop/core/memory/mempool.py @@ -76,8 +76,6 @@ class MemoryPool(object, metaclass=ABCMeta): max_alloc_bytes = max_alloc_bytes or default_limit verbose = verbose if isinstance(verbose,bool) else __DEBUG__ - if isinstance(name, unicode): - name = str(name) check_instance(name, str) check_instance(mantissa_bits, int) check_instance(max_alloc_bytes,(int,long), allow_none=True) diff --git a/hysop/fields/cartesian_discrete_field.py b/hysop/fields/cartesian_discrete_field.py index 3f678f565f6744cafb9eac517ccfa9160bc237c9..b98a727034736cb96e6fd629dcc5e5ac557fe554 100644 --- a/hysop/fields/cartesian_discrete_field.py +++ b/hysop/fields/cartesian_discrete_field.py @@ -1062,7 +1062,7 @@ class CartesianDiscreteScalarFieldView(CartesianDiscreteScalarFieldViewContainer from hysop.tools.sympy_utils import subscript default_name = '{}__{}'.format(self.name, self._dfield._clone_id) default_pname = '{}__{}'.format(self.pretty_name, - subscript(self._dfield._clone_id).encode('utf-8')) + subscript(self._dfield._clone_id)) default_vname = '{}__{}'.format(self.var_name, self._dfield._clone_id) default_lname = '{}__{}'.format(self.latex_name, self._dfield._clone_id) self._dfield._clone_id += 1 diff --git a/hysop/fields/continuous_field.py b/hysop/fields/continuous_field.py index 8eb8e09c08b03a079bfcd7820f76ec50e03a95c1..dbd3ae56fd193e8ba4d1bf6df026e034cc326f64 100644 --- a/hysop/fields/continuous_field.py +++ b/hysop/fields/continuous_field.py @@ -121,13 +121,9 @@ class FieldContainerI(TaggedObject): raise NotImplementedError('Call self.from_sympy_expression instead.') check_instance(exprs, npw.ndarray, values=sm.Expr) check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(scalar_name_prefix, str, allow_none=True) - check_instance(scalar_pretty_name_prefix, (str, unicode), allow_none=True) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') - if isinstance(scalar_pretty_name_prefix, unicode): - scalar_pretty_name_prefix = scalar_pretty_name_prefix.encode('utf-8') + check_instance(scalar_pretty_name_prefix, str, allow_none=True) fields = npw.empty(shape=exprs.shape, dtype=object) fields[...] = None @@ -208,7 +204,7 @@ class FieldContainerI(TaggedObject): shape = (ndirs,) name = first_not_None(name, 'grad_{}'.format(self.name)) - pretty_name = first_not_None(pretty_name, '{}{}'.format(nabla.encode('utf8'), + pretty_name = first_not_None(pretty_name, '{}{}'.format(nabla, self.pretty_name)) if shape==(1,): @@ -242,8 +238,8 @@ class FieldContainerI(TaggedObject): exprs = laplacian(self.symbol(*frame.vars), frame) name = first_not_None(name, 'laplacian_{}'.format(self.name)) - pretty_name = first_not_None(pretty_name, u'\u0394{}'.format( - self.pretty_name.decode('utf-8'))) + pretty_name = first_not_None(pretty_name, 'Δ{}'.format( + self.pretty_name)) if isinstance(exprs, npw.ndarray): if (exprs.size == 1): @@ -277,8 +273,8 @@ class FieldContainerI(TaggedObject): exprs = npw.asarray(div(self.symbol(*frame.vars), frame)) name = first_not_None(name, 'div_{}'.format(self.name)) - pretty_name = first_not_None(pretty_name, u'{}\u22c5{}'.format(nabla, - self.pretty_name.decode('utf-8'))) + pretty_name = first_not_None(pretty_name, '{}â‹…{}'.format(nabla, + self.pretty_name)) if exprs.size in (0,1): expr = npw.asscalar(exprs) @@ -323,8 +319,8 @@ class FieldContainerI(TaggedObject): exprs = curl(self.symbol(*frame.vars), frame) name = first_not_None(name, 'curl_{}'.format(self.name)) - pretty_name = first_not_None(pretty_name, u'{}\u2227{}'.format(nabla, - self.pretty_name.decode('utf-8'))) + pretty_name = first_not_None(pretty_name, '{}∧{}'.format(nabla, + self.pretty_name)) if isinstance(exprs, npw.ndarray): return self.from_sympy_expressions( @@ -541,7 +537,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI): Physical domain where this field is defined. name : string A name for the field. - pretty_name: string or unicode, optional. + pretty_name: str, optional. A pretty name used for display whenever possible. Defaults to name. var_name: string, optional. @@ -587,7 +583,7 @@ class ScalarField(NamedScalarContainerI, FieldContainerI): Numpy array mask, True is axis is periodic, else False. """ check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(latex_name, str, allow_none=True) check_instance(var_name, str, allow_none=True) check_instance(is_tmp, bool) @@ -606,8 +602,6 @@ class ScalarField(NamedScalarContainerI, FieldContainerI): # Name and pretty name pretty_name = first_not_None(pretty_name, name) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') check_instance(pretty_name, str) # Initial values @@ -904,9 +898,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI): base_kwds=None, **kwds): pretty_name = first_not_None(pretty_name, name) check_instance(name, str) - check_instance(pretty_name, (str, unicode)) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') + check_instance(pretty_name, str) check_instance(shape, tuple, values=int) if (len(shape)==1) and not issubclass(cls, VectorField): @@ -985,7 +977,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI): check_instance(fields, tuple, values=(ScalarField,), minsize=1) check_instance(shape, tuple, values=int) check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) cls._check_fields(*fields) @@ -1002,7 +994,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI): """Create a TensorField from numpy.ndarray of fields.""" assert (fields.size > 1) check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(fields, npw.ndarray, dtype=object, values=ScalarField) shape = fields.shape @@ -1039,7 +1031,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI): def default_pretty_name_formatter(cls, basename, idx): check_instance(basename, str) assert len(basename)>0 - pname = basename + subscripts(ids=idx, sep='').encode('utf-8') + pname = basename + subscripts(ids=idx, sep='') return pname @classmethod @@ -1129,9 +1121,7 @@ class TensorField(NamedTensorContainerI, FieldContainerI): pretty_name = first_not_None(pretty_name, name) check_instance(name, str) - check_instance(pretty_name, (str,unicode)) - if not isinstance(pretty_name, str): - pretty_name = pretty_name.encode('utf-8') + check_instance(pretty_name, str) if (nb_components == 1): return getattr(self.fields[0], fn)(name=name, pretty_name=pretty_name, **kwds) diff --git a/hysop/fields/discrete_field.py b/hysop/fields/discrete_field.py index aafa62d9994c11db36448b5fa1a490de7221ecd9..16432e85314daeb1ea435e38a9723479b5530ecc 100644 --- a/hysop/fields/discrete_field.py +++ b/hysop/fields/discrete_field.py @@ -444,7 +444,7 @@ class DiscreteScalarFieldView(DiscreteScalarFieldViewContainerI, TaggedObjectVie """Check properties and types.""" check_instance(self.dtype, np.dtype) check_instance(self.name, str) - check_instance(self.pretty_name, (str,unicode)) + check_instance(self.pretty_name, str) check_instance(self.dim, int, minval=1) check_instance(self.topology, TopologyView) check_instance(self.backend, ArrayBackend) @@ -632,7 +632,7 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject, metaclass=ABCMeta If set register input topology to input continuous field. name : string, optional A name for the field. - pretty_name: string or unicode, optional. + pretty_name: str, optional. A pretty name used for display whenever possible. Defaults to name. var_name: string, optional. @@ -647,7 +647,8 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject, metaclass=ABCMeta check_instance(field, Field) check_instance(topology, Topology) check_instance(name, str, allow_none=True) - check_instance(pretty_name, (str,unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) + _name, _pretty_name, _var_name, _latex_name = \ cls.format_discrete_names(field.name, @@ -697,8 +698,8 @@ class DiscreteScalarField(NamedScalarContainerI, TaggedObject, metaclass=ABCMeta # Scalar discrete field names name = '{}_t{}'.format(name, topology.id) pretty_name = '{}.{}{}'.format(pretty_name, - u'\u209c'.encode('utf-8'), - subscript(topology.id).encode('utf-8')) + 'â‚œ', + subscript(topology.id)) var_name = var_name + '_t{}'.format(topology.id) latex_name = latex_name + '.t_{{{}}}'.format(0) return (name, pretty_name, var_name, latex_name) @@ -801,7 +802,7 @@ class DiscreteTensorField(NamedTensorContainerI, DiscreteScalarFieldViewContaine Create a TensorField and a DiscreteTensorField from np.ndarray of discrete fields. """ check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(dfields, npw.ndarray, dtype=object, values=DiscreteScalarFieldView) shape = dfields.shape diff --git a/hysop/fields/field_requirements.py b/hysop/fields/field_requirements.py index b87f520988a3dc8a7edbbd930216fe05065bd7e8..03bd48cd15aa30ee0059d4f2714a493a2dca3cda 100644 --- a/hysop/fields/field_requirements.py +++ b/hysop/fields/field_requirements.py @@ -114,20 +114,20 @@ class DiscreteFieldRequirements(object): self.memory_order, self.tstates)) def ghost_str(self, array): - inf = u'+\u221e' - vals = [u''+str(x) if np.isfinite(x) else inf for x in array] - return u'[{}]'.format(u','.join(vals)).strip() + inf = '+∞' + vals = [''+str(x) if np.isfinite(x) else inf for x in array] + return '[{}]'.format(','.join(vals)).strip() def __str__(self): - return u'{:15s} {:>10s}<=ghosts<{:<10s} memory_order={} can_split={} tstates={}'.format( - u'{}::{}'.format(getattr(self.operator, 'name', u'UnknownOperator'), - getattr(self.field, 'name', u'UnknownField')), + return '{:15s} {:>10s}<=ghosts<{:<10s} memory_order={} can_split={} tstates={}'.format( + '{}::{}'.format(getattr(self.operator, 'name', 'UnknownOperator'), + getattr(self.field, 'name', 'UnknownField')), self.ghost_str(self.min_ghosts), self.ghost_str(self.max_ghosts+1), self.memory_order, - u''+str(self.can_split.view(np.int8)), - u'[{}]'.format(u','.join(u''+str(ts) for ts in self.tstates)) - if self.tstates else u'ANY').encode('utf-8') + ''+str(self.can_split.view(np.int8)), + '[{}]'.format(','.join(''+str(ts) for ts in self.tstates)) + if self.tstates else 'ANY') def get_axes(self): return self._axes diff --git a/hysop/operator/base/derivative.py b/hysop/operator/base/derivative.py index 5b860814b449b6bb6a10b5ddba618af00e0a2c68..fd6446bb20e1b812ff02679fc23dcc20bc12844a 100644 --- a/hysop/operator/base/derivative.py +++ b/hysop/operator/base/derivative.py @@ -120,7 +120,7 @@ class SpaceDerivativeBase(object, metaclass=ABCMeta): variables = first_not_None(variables, {}) check_instance(name, str) - check_instance(pretty_name, (str, unicode)) + check_instance(pretty_name, str) check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors) input_fields = { F: variables.get(F, None) } diff --git a/hysop/operator/base/enstrophy.py b/hysop/operator/base/enstrophy.py index f2c401b0548c3e6451d54cf6a7f76ba12b1d354f..9321cb45642126ff61e18327a5cb503a711c344e 100644 --- a/hysop/operator/base/enstrophy.py +++ b/hysop/operator/base/enstrophy.py @@ -49,8 +49,8 @@ class EnstrophyBase(object, metaclass=ABCMeta): check_instance(rho, Field, allow_none=True) check_instance(rho_0, float) - W = vorticity.pretty_name.decode('utf-8') - wdotw = u'{}\u22c5{}'.format(W,W).encode('utf-8') + W = vorticity.pretty_name + wdotw = '{}â‹…{}'.format(W,W) if (WdotW is None): assert (vorticity in variables), variables WdotW = vorticity.tmp_like(name='WdotW', pretty_name=wdotw, nb_components=1) @@ -65,7 +65,7 @@ class EnstrophyBase(object, metaclass=ABCMeta): input_fields[rho] = variables[rho] default_name = 'enstrophy' - default_pname = u'\u222b{}'.format(wdotw.decode('utf-8')).encode('utf-8') + default_pname = '∫{}'.format(wdotw) pretty_name = first_not_None(pretty_name, name, default_pname) name = first_not_None(name, default_name) diff --git a/hysop/operator/base/integrate.py b/hysop/operator/base/integrate.py index 2a95335576cffc9f5c7e04b32bf80d9371848dc7..aef3d35b2c57aa4e1ef69216df35d14518bb94e2 100644 --- a/hysop/operator/base/integrate.py +++ b/hysop/operator/base/integrate.py @@ -78,7 +78,7 @@ class IntegrateBase(object, metaclass=ABCMeta): output_params = {parameter.name: parameter} default_name = 'integrate_{}'.format(field.name) - default_pname = u'\u222b{}'.format(field.pretty_name.decode('utf-8')).encode('utf-8') + default_pname = '∫{}'.format(field.pretty_name) pretty_name = first_not_None(pretty_name, name, default_pname) name = first_not_None(name, default_name) diff --git a/hysop/operator/base/memory_reordering.py b/hysop/operator/base/memory_reordering.py index c07791e5cf586468e31f5b17660f9a94e4608233..66ec7b3dd553798225788c2e1df899ff7749bed7 100644 --- a/hysop/operator/base/memory_reordering.py +++ b/hysop/operator/base/memory_reordering.py @@ -55,10 +55,10 @@ class MemoryReorderingBase(object, metaclass=ABCMeta): raise NotImplementedError(target_memory_order) default_name = '{}_{}'.format(mr, input_field.name) - default_pname = u'{}_{}'.format(mr, input_field.pretty_name.decode('utf-8')) + default_pname = '{}_{}'.format(mr, input_field.pretty_name) if (output_field.name != input_field.name): default_name += '_{}'.format(output_field.name) - default_pname += u'_{}'.format(output_field.pretty_name.decode('utf-8')) + default_pname += '_{}'.format(output_field.pretty_name) name = first_not_None(name, default_name) pname = first_not_None(pretty_name, default_pname) diff --git a/hysop/operator/base/min_max.py b/hysop/operator/base/min_max.py index 9aadc64bca4bc579c49d4381188a8d14b1788341..d932ff6e8cffb0fc7f7651e8cac5dd684fc7a599 100644 --- a/hysop/operator/base/min_max.py +++ b/hysop/operator/base/min_max.py @@ -41,7 +41,7 @@ class MinMaxFieldStatisticsBase(object): if (field is not None): dtype = first_not_None(dtype, field.dtype) pbasename = first_not_None(pbasename, field.name) - ppbasename = first_not_None(ppbasename, field.pretty_name.decode('utf-8')) + ppbasename = first_not_None(ppbasename, field.pretty_name) def make_param(k, quiet): return TensorParameter(name=names[k], pretty_name=pretty_names[k], @@ -54,9 +54,9 @@ class MinMaxFieldStatisticsBase(object): } pretty_names = { - 'Fmin': u'{}\u208b'.format(ppbasename), - 'Fmax': u'{}\u208a'.format(ppbasename), - 'Finf': u'|{}|\u208a'.format(ppbasename), + 'Fmin': '{}â‚‹'.format(ppbasename), + 'Fmax': '{}â‚Š'.format(ppbasename), + 'Finf': '|{}|â‚Š'.format(ppbasename), } if (field is not None): @@ -170,8 +170,8 @@ class MinMaxFieldStatisticsBase(object): coeffs = first_not_None(coeffs, {}) name = first_not_None(name, 'MinMax[{}]'.format(field.name)) - pretty_name = first_not_None(pretty_name, u'|{}|\u221e'.format( - field.pretty_name.decode('utf-8'))) + pretty_name = first_not_None(pretty_name, '|{}|∞'.format( + field.pretty_name)) variables = first_not_None(variables, {field: None}) all_quiet = first_not_None(all_quiet, False) diff --git a/hysop/operator/base/poisson.py b/hysop/operator/base/poisson.py index f88d987ff070761e59f4ed4b46e3db6f75fed0d7..99c3380b15b5ccea38b01ffa1aa4c71a7842bec9 100644 --- a/hysop/operator/base/poisson.py +++ b/hysop/operator/base/poisson.py @@ -12,19 +12,19 @@ class PoissonOperatorBase(SpectralOperatorBase): """ Solves the poisson equation using a specific implementation. """ - + @debug - def __init__(self, Fin, Fout, variables, + def __init__(self, Fin, Fout, variables, name=None, pretty_name=None, dump_energy=None, dump_input_energy=None, dump_output_energy=None, plot_energy=None, plot_input_energy=None, plot_output_energy=None, plot_inout_energy=None, - **kwds): + **kwds): """ Initialize a n-dimensional Poisson operator base (using spectral methods). Solves: Laplacian(Fout) = Fin - + Parameters ---------- Fout: Field @@ -46,7 +46,7 @@ class PoissonOperatorBase(SpectralOperatorBase): plot_output_energy: IOParams, optional, defaults to None Plot output field energy in a custom file. Defaults to no plot. plot_inout_energy: IOParams, optional, defaults to None - Plot input and output field energy on the same graph in a custom file. + Plot input and output field energy on the same graph in a custom file. Defaults to no plot. kwds: dict, optional Base class arguments. @@ -62,21 +62,21 @@ class PoissonOperatorBase(SpectralOperatorBase): check_instance(plot_output_energy, (IOParams,int), allow_none=True) check_instance(plot_inout_energy, (IOParams,int), allow_none=True) assert Fin.nb_components == Fout.nb_components - + input_fields = { Fin: variables[Fin] } output_fields = { Fout: variables[Fout] } - + default_name = 'Poisson_{}_{}'.format(Fin.name, Fout.name) default_pretty_name = 'Poisson_{}_{}'.format(Fin.pretty_name, Fout.pretty_name) name = first_not_None(name, default_name) pretty_name = first_not_None(name, default_pretty_name) - + dump_input_E = first_not_None(dump_input_energy, dump_energy) dump_output_E = first_not_None(dump_output_energy, dump_energy) plot_input_E = first_not_None(plot_input_energy, plot_energy) plot_output_E = first_not_None(plot_output_energy, plot_energy) plot_inout_E = first_not_None(plot_inout_energy, plot_energy) - + do_plot_inout_E = isinstance(plot_inout_E, IOParams) and (plot_inout_E.frequency>=0) _, compute_input_E_freqs = EnergyDumper.do_compute_energy(dump_input_E, plot_input_E, plot_inout_E) _, compute_output_E_freqs = EnergyDumper.do_compute_energy(dump_output_E, plot_output_E, plot_inout_E) @@ -87,10 +87,10 @@ class PoissonOperatorBase(SpectralOperatorBase): forward_transforms = () backward_transforms = () wave_numbers = () - + tg = self.new_transform_group() for (Fi,Fo) in zip(Fin.fields, Fout.fields): - Ft = tg.require_forward_transform(Fi, custom_output_buffer='auto', + Ft = tg.require_forward_transform(Fi, custom_output_buffer='auto', dump_energy=dump_input_E, plot_energy=plot_input_E, compute_energy_frequencies=compute_input_E_freqs) Bt = tg.require_backward_transform(Fo, custom_input_buffer='auto', matching_forward_transform=Ft, @@ -99,7 +99,7 @@ class PoissonOperatorBase(SpectralOperatorBase): assert (Ft.output_dtype == Bt.input_dtype) expr = Assignment(Bt.s, laplacian(Ft.s, Ft.s.frame)) kds = tg.push_expressions(expr) - + forward_transforms += (Ft,) backward_transforms += (Bt,) wave_numbers += (kds,) @@ -115,7 +115,7 @@ class PoissonOperatorBase(SpectralOperatorBase): self.plot_inout_energy_ioparams = plot_inout_E self.input_energy_params = tuple(Ft._energy_parameter for Ft in self.forward_transforms) self.output_energy_params = tuple(Bt._energy_parameter for Bt in self.backward_transforms) - + @debug def discretize(self): super(PoissonOperatorBase, self).discretize() @@ -132,15 +132,15 @@ class PoissonOperatorBase(SpectralOperatorBase): all_nd_dkds += (all_dkds,) self.all_dkds = all_dkds self.all_nd_dkds = all_nd_dkds - + inout_energy_plotters = () if self.do_plot_inout_energy: for (dFin, dFout, Pin, Pout) in zip( self.dFin.dfields, self.dFout.dfields, self.input_energy_params, self.output_energy_params): fname = '{}_{}'.format(dFin.name, dFout.name) - iname = u'{}.input.{}'.format(type(self).__name__, dFin.pretty_name.decode('utf-8')) - oname = u'{}.output.{}'.format(type(self).__name__, dFout.pretty_name.decode('utf-8')) + iname = '{}.input.{}'.format(type(self).__name__, dFin.pretty_name) + oname = '{}.output.{}'.format(type(self).__name__, dFout.pretty_name) plt = EnergyPlotter(energy_parameters={iname: Pin, oname: Pout}, fname=fname, io_params=self.plot_inout_energy_ioparams) inout_energy_plotters += (plt,) diff --git a/hysop/operator/base/poisson_curl.py b/hysop/operator/base/poisson_curl.py index 68f0ad50e18f5691d0a5351e8217aa08aa5f927c..c7c6e4e2c5c466c69fc92f045f36f3fe3561fcc6 100644 --- a/hysop/operator/base/poisson_curl.py +++ b/hysop/operator/base/poisson_curl.py @@ -312,9 +312,9 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB return super(SpectralPoissonCurlOperatorBase, self).discretize() - fig_titles = {'Win': u'Energy of input vorticity {}', - 'Uout': u'Energy of output velocity {}', - 'Wout': u'Energy of output vorticity {}' } + fig_titles = {'Win': 'Energy of input vorticity {}', + 'Uout': 'Energy of output velocity {}', + 'Wout': 'Energy of output vorticity {}' } get_transforms = { 'Win': self.W_forward_transforms, 'Wout': self.W_backward_transforms, 'Uout': self.U_backward_transforms } @@ -351,11 +351,11 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB if self.do_plot_energy[k]: iop = self.plot_energy_ioparams[k] assert (iop is not None) - pname = u'{}.{}'.format(self.__class__.__name__, dfield.pretty_name.decode('utf-8')) + pname = '{}.{}'.format(self.__class__.__name__, dfield.pretty_name) energy_parameters = { pname : Ep } plt = EnergyPlotter(energy_parameters=energy_parameters, io_params=iop, fname=k, - fig_title=fig_titles[k].format(dfield.pretty_name.decode('utf-8'))) + fig_title=fig_titles[k].format(dfield.pretty_name)) else: plt = None @@ -379,8 +379,8 @@ class SpectralPoissonCurlOperatorBase(PoissonCurlOperatorBase, SpectralOperatorB if self.do_plot_energy['Winout']: iop = self.plot_energy_ioparams['Winout'] assert (iop is not None) - pname_in = u'{}.input.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8')) - pname_out = u'{}.output.{}'.format(self.__class__.__name__, self.dW.pretty_name.decode('utf-8')) + pname_in = '{}.input.{}'.format(self.__class__.__name__, self.dW.pretty_name) + pname_out = '{}.output.{}'.format(self.__class__.__name__, self.dW.pretty_name) energy_parameters = { pname_in: self.compute_energy_parameters['Win'], pname_out: self.compute_energy_parameters['Wout'] } plt = EnergyPlotter(energy_parameters=energy_parameters, diff --git a/hysop/operator/base/redistribute_operator.py b/hysop/operator/base/redistribute_operator.py index 0a7ebdf3a7aa5f692b9c6a4eaf2ea9d0ff31bee7..5da99e91b4d15467e52eddde461681dcd8bec5e6 100644 --- a/hysop/operator/base/redistribute_operator.py +++ b/hysop/operator/base/redistribute_operator.py @@ -55,12 +55,11 @@ class RedistributeOperatorBase(ComputationalGraphOperator, metaclass=ABCMeta): default_name = 'R{},{}_{}'.format(source_topo.id, target_topo.id, variable.name) - default_pname = u'R{}{}{}_{}'.format( + default_pname = 'R{}{}{}_{}'.format( subscript(source_topo.id), - u'\u2192', + '→', subscript(target_topo.id), - variable.pretty_name.decode('utf-8')) - default_pname = default_pname.encode('utf-8') + variable.pretty_name) pretty_name = first_not_None(pretty_name, name, default_pname) name = first_not_None(name, default_name) diff --git a/hysop/operator/base/spectral_operator.py b/hysop/operator/base/spectral_operator.py index d099078ab9738e4982586f841d9156aebe08ca27..3fea604efcec3cfc9c7d3e9f1a62ff2971c6d99f 100644 --- a/hysop/operator/base/spectral_operator.py +++ b/hysop/operator/base/spectral_operator.py @@ -1300,9 +1300,9 @@ class PlannedSpectralTransform(object): if self._do_plot_energy: piop = self._plot_energy_ioparams assert (piop is not None) - pname = u'{}.{}.{}'.format(self.op.__class__.__name__, + pname = '{}.{}.{}'.format(self.op.__class__.__name__, 'forward'if is_forward else 'backward', - dfield.pretty_name.decode('utf-8')) + dfield.pretty_name) energy_parameters = {pname: self._energy_parameter} self._energy_plotter = EnergyPlotter(energy_parameters=energy_parameters, io_params=self._plot_energy_ioparams, diff --git a/hysop/operator/base/transpose_operator.py b/hysop/operator/base/transpose_operator.py index e08f3fcd4707c9e2562bc06151380f5660af9ad7..905b7f4c2385de1c12256754c37f788c8e53aa31 100644 --- a/hysop/operator/base/transpose_operator.py +++ b/hysop/operator/base/transpose_operator.py @@ -55,10 +55,10 @@ class TransposeOperatorBase(object, metaclass=ABCMeta): saxes = ''.join([DirectionLabels[i] for i in axes]).lower() default_name = 'T{}_{}'.format(saxes, input_field.name) - default_pname = u'T{}_{}'.format(saxes, input_field.pretty_name.decode('utf-8')) + default_pname = 'T{}_{}'.format(saxes, input_field.pretty_name) if (output_field.name != input_field.name): default_name += '_{}'.format(output_field.name) - default_pname += u'_{}'.format(output_field.pretty_name.decode('utf-8')) + default_pname += '_{}'.format(output_field.pretty_name) name = first_not_None(name, default_name) pname = first_not_None(pretty_name, default_pname) diff --git a/hysop/operator/derivative.py b/hysop/operator/derivative.py index b7c8a91ac7ede6ba52518dda069b8f8dc4d811a8..cc475284f2010284ffba413ad2160b1ef485de67 100644 --- a/hysop/operator/derivative.py +++ b/hysop/operator/derivative.py @@ -106,7 +106,7 @@ class SpaceDerivative(ComputationalGraphNodeFrontend): check_instance(implementation, Implementation, allow_none=True) check_instance(base_kwds, dict, keys=str, allow_none=True) check_instance(name, str, allow_none=True) - check_instance(pretty_name, (str,unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) assert F in variables assert dF in variables diff --git a/hysop/operator/directional/directional.py b/hysop/operator/directional/directional.py index 6ef83b3a38c52509832534266acfc2ff75f32bba..96d6e53f6c9f97b4f8f1c021199df3715441bfb2 100644 --- a/hysop/operator/directional/directional.py +++ b/hysop/operator/directional/directional.py @@ -241,9 +241,8 @@ class DirectionalOperatorGenerator(DirectionalOperatorGeneratorI, metaclass=ABCM kargs.update(self.custom_directional_kwds(i)) name = '{}_{}_{}'.format(basename, DirectionLabels[i], self._direction_counter[i]) - pname = u'{}_{}{}'.format(basepname, DirectionLabels[i], + pname = '{}_{}{}'.format(basepname, DirectionLabels[i], subscript(self._direction_counter[i])) - pname = pname.encode('utf-8') try: op = self._operator(name=name, pretty_name=pname, diff --git a/hysop/operator/external_force.py b/hysop/operator/external_force.py index 0709bbfbe17bb743a2cce46336f34a92358116b4..d0c9744f65137276e3933a3ddaed0209aa17f08f 100644 --- a/hysop/operator/external_force.py +++ b/hysop/operator/external_force.py @@ -23,14 +23,14 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend): Implementation.OPENCL: OpenClSpectralExternalForce, } return __implementations - + @classmethod def default_implementation(cls): return Implementation.OPENCL @debug def __init__(self, vorticity, Fext, dt, variables, - Fmin=None, Fmax=None, Finf=None, + Fmin=None, Fmax=None, Finf=None, all_quiet=False, pbasename=None, ppbasename=None, implementation=None, base_kwds=None, **kwds): """ @@ -46,7 +46,7 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend): Fmax = max(tmp) Finf = max(abs(Fmin), abs(Fmax)) W += dt*tmp - + where Fext is computed from user given ExternalForce. Parameters @@ -63,7 +63,7 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend): If set to True, the TensorParameter will be generated automatically. all_quiet: bool Force all autogenerated TensorParameter to be quiet. - By default, only the autogenerated TensorParameters that are not required + By default, only the autogenerated TensorParameters that are not required by the user are set to be quiet. pbasename: str, optional Parameters basename for created parameters. @@ -96,29 +96,29 @@ class SpectralExternalForce(ComputationalGraphNodeFrontend): check_instance(Fext, ExternalForce) check_instance(dt, ScalarParameter) check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors) - + # Pregenerate parameters so that we can directly store them in self. default_pbasename = 'curl_{}'.format(Fext.name) - default_ppbasename = u'{}x{}'.format(nabla, Fext.pretty_name) + default_ppbasename = '{}x{}'.format(nabla, Fext.pretty_name) pbasename = first_not_None(pbasename, default_pbasename) ppbasename = first_not_None(ppbasename, default_ppbasename) - parameters = MinMaxFieldStatisticsBase.build_parameters(field=vorticity, - components=None, dtype=None, all_quiet=all_quiet, - Fmin=Fmin, Fmax=Fmax, Finf=Finf, + parameters = MinMaxFieldStatisticsBase.build_parameters(field=vorticity, + components=None, dtype=None, all_quiet=all_quiet, + Fmin=Fmin, Fmax=Fmax, Finf=Finf, pbasename=pbasename, ppbasename=ppbasename) (Fmin, Fmax, Finf) = tuple(parameters[k] for k in ('Fmin', 'Fmax', 'Finf')) - + check_instance(Fmin, TensorParameter, allow_none=True) check_instance(Fmax, TensorParameter, allow_none=True) check_instance(Finf, TensorParameter, allow_none=True) - super(SpectralExternalForce, self).__init__(vorticity=vorticity, + super(SpectralExternalForce, self).__init__(vorticity=vorticity, Fext=Fext, dt=dt, variables=variables, Fmin=Fmin, Fmax=Fmax, Finf=Finf, candidate_input_tensors=(vorticity,), candidate_output_tensors=(vorticity,), - implementation=implementation, + implementation=implementation, base_kwds=base_kwds, **kwds) self.Fmin = Fmin diff --git a/hysop/operator/gradient.py b/hysop/operator/gradient.py index 0aaf9da377950a6dd70b03766007f4fb8af54bac..9eb9c1bbe1a0206399d4da7d855e1bc6557cd95d 100644 --- a/hysop/operator/gradient.py +++ b/hysop/operator/gradient.py @@ -274,7 +274,7 @@ class MinMaxGradientStatistics(Gradient): allow_none=True) check_instance(name, str, allow_none=True) check_instance(pbasename, str, allow_none=True) - check_instance(ppbasename, (str,unicode), allow_none=True) + check_instance(ppbasename, str, allow_none=True) check_instance(implementation, Implementation, allow_none=True) check_instance(base_kwds, dict, allow_none=True) check_instance(all_quiet, bool, allow_none=True) @@ -313,16 +313,16 @@ class MinMaxGradientStatistics(Gradient): } _pretty_names = { - 'Fmin': u'{}\u208b', - 'Fmax': u'{}\u208a', - 'Finf': u'|{}|\u208a' + 'Fmin': '{}â‚‹', + 'Fmax': '{}â‚Š', + 'Finf': '|{}|â‚Š' } pbasename = first_not_None(pbasename, gradF.name) ppbasename = first_not_None(ppbasename, gradF.pretty_name) names = { k: v.format(pbasename) for (k,v) in _names.items() } - pretty_names = { k: v.format(ppbasename.decode('utf-8')) + pretty_names = { k: v.format(ppbasename) for (k,v) in _pretty_names.items() } def make_param(k, quiet): @@ -355,7 +355,7 @@ class MinMaxGradientStatistics(Gradient): raise ValueError(unused_coeffs) name = first_not_None(name, 'MinMax({})') - pretty_name = first_not_None(pretty_name, u'|\u00b1{}|') + pretty_name = first_not_None(pretty_name, '|\±{}|') extra_params = { 'name': gradF.new_empty_array(), 'pretty_name': gradF.new_empty_array(), @@ -367,13 +367,13 @@ class MinMaxGradientStatistics(Gradient): if (stat is None): continue pname = _names[statname].format(Fi.name) - ppname = _pretty_names[statname].format(Fi.pretty_name.decode('utf-8')) + ppname = _pretty_names[statname].format(Fi.pretty_name) S = stat.view(idx=idx, name=pname, pretty_name=ppname) stats = extra_params.setdefault(statname, gradF.new_empty_array()) stats[idx] = S extra_params['name'][idx] = name.format(Fi.name) extra_params['pretty_name'][idx] = pretty_name.format( - Fi.pretty_name.decode('utf-8')).encode('utf-8') + Fi.pretty_name) super(MinMaxGradientStatistics, self).__init__(F=F, gradF=gradF, directions=directions, extra_params=extra_params, @@ -404,7 +404,7 @@ class MinMaxGradientStatistics(Gradient): _phony_input_params.update({p.name:p for p in extra_params[pname].ravel()}) _phony_output_params[param.name] = param op = MergeTensorViewsOperator(name=name.format(gradF.name), - pretty_name=pretty_name.format(gradF.pretty_name.decode('utf-8')), + pretty_name=pretty_name.format(gradF.pretty_name), input_params=_phony_input_params, output_params=_phony_output_params) self._phony_op = op diff --git a/hysop/operator/hdf_io.py b/hysop/operator/hdf_io.py index a62bd7a8e38401a7f53e2cef8573f62ff11a8977..3cc7e687c1564289108664f0464f41892d38fb8b 100755 --- a/hysop/operator/hdf_io.py +++ b/hysop/operator/hdf_io.py @@ -308,9 +308,9 @@ class HDF_Writer(HDF_IO): check_instance(variables, dict, keys=Field, values=CartesianTopologyDescriptors) vnames = ['{}'.format(field.name) for field in variables.keys()] - vpnames = [field.pretty_name.decode('utf-8') for field in variables.keys()] + vpnames = [field.pretty_name for field in variables.keys()] name = first_not_None(name, 'write_{}'.format('_'.join(vnames))) - pname = first_not_None(pretty_name, u'write_{}'.format(u'_'.join(vpnames))) + pname = first_not_None(pretty_name, 'write_{}'.format('_'.join(vpnames))) super(HDF_Writer, self).__init__(input_fields=variables, output_fields=None, name=name, pretty_name=pname, **kwds) diff --git a/hysop/operator/min_max.py b/hysop/operator/min_max.py index c15b66cf2a1abb12f421f825e2434e8367f60679..b353d42344d7a75087fb7d7fe526c458a7ba4cdf 100644 --- a/hysop/operator/min_max.py +++ b/hysop/operator/min_max.py @@ -135,7 +135,7 @@ class MinMaxFieldStatistics(ComputationalGraphNodeFrontend): values=CartesianTopologyDescriptors, allow_none=True) check_instance(name, str, allow_none=True) check_instance(pbasename, str, allow_none=True) - check_instance(ppbasename, (str, unicode), allow_none=True) + check_instance(ppbasename, str, allow_none=True) check_instance(implementation, Implementation, allow_none=True) check_instance(base_kwds, dict, keys=str, allow_none=True) @@ -323,7 +323,7 @@ class MinMaxDerivativeStatistics(ComputationalGraphNodeFrontend): allow_none=True) check_instance(name, str, allow_none=True) check_instance(pbasename, str, allow_none=True) - check_instance(ppbasename, (str, unicode), allow_none=True) + check_instance(ppbasename, str, allow_none=True) check_instance(implementation, Implementation, allow_none=True) check_instance(base_kwds, dict, keys=str, allow_none=True) diff --git a/hysop/operator/tests/test_fd_derivative.py b/hysop/operator/tests/test_fd_derivative.py index 15e794282e3fc597de0390fe0b29b841e5651200..02f26f68a1e0f8b3abe1f6fcaa60017b52586e66 100644 --- a/hysop/operator/tests/test_fd_derivative.py +++ b/hysop/operator/tests/test_fd_derivative.py @@ -135,10 +135,10 @@ class TestFiniteDifferencesDerivative(object): from hysop.tools.sympy_utils import subscript, partial for (i,j) in npw.ndindex(*self.analytic_expressions[dim]['gradF'].shape): dFij = self.analytic_expressions[dim]['gradF'][i,j] - print(' *{p}F{}/{p}x{}(x,t) = {}'.format(subscript(i).encode('utf-8'), - subscript(j).encode('utf-8'), + print(' *{p}F{}/{p}x{}(x,t) = {}'.format(subscript(i), + subscript(j), dFij, - p=partial.encode('utf-8'))) + p=partial)) self.t.value = random.random() print(' >Parameter t has been set to {}.'.format(self.t())) print(' >Testing all implementations:') diff --git a/hysop/parameters/default_parameters.py b/hysop/parameters/default_parameters.py index 2e920f0c510d197b576aa835ff761e5a42253c89..d1ffec6ff1eb81da5ef11402c76205c050e29de8 100644 --- a/hysop/parameters/default_parameters.py +++ b/hysop/parameters/default_parameters.py @@ -26,7 +26,7 @@ def EnstrophyParameter(name=None, pretty_name=None, **kwds): def KineticEnergyParameter(name=None, pretty_name=None, **kwds): name = first_not_None(name, 'kinetic_energy') - pretty_name = first_not_None(pretty_name, u'E\u2096') + pretty_name = first_not_None(pretty_name, 'Eâ‚™') return ScalarParameter(name=name, pretty_name=pretty_name, **kwds) def VolumicIntegrationParameter(name=None, pretty_name=None, field=None, dtype=None, **kwds): @@ -36,7 +36,7 @@ def VolumicIntegrationParameter(name=None, pretty_name=None, field=None, dtype=N dtype = first_not_None(dtype, field.dtype) dtype = first_not_None(dtype, HYSOP_REAL) name += '_v' - pretty_name += u'\u1d65'.encode('utf-8') + pretty_name += 'áµ¥' if (field.nb_components>1): return TensorParameter(name=name, pretty_name=pretty_name, shape=(field.nb_components,), diff --git a/hysop/parameters/parameter.py b/hysop/parameters/parameter.py index 33c42064a1913ee15a10da4d32e1c65fa5c3db1a..74559bdfba02ecaabe206d017e40051e42af060a 100644 --- a/hysop/parameters/parameter.py +++ b/hysop/parameters/parameter.py @@ -61,7 +61,7 @@ class Parameter(TaggedObject, VariableTag, metaclass=ABCMeta): Return allowed parameter types for this parameter. """ check_instance(name, str) - check_instance(pretty_name, (str, unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(var_name, str, allow_none=True) check_instance(allow_None, bool) check_instance(const, bool) @@ -86,8 +86,6 @@ class Parameter(TaggedObject, VariableTag, metaclass=ABCMeta): tag_prefix='p', variable_kind=Variable.PARAMETER, **kwds) pretty_name = first_not_None(pretty_name, name) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') var_name = first_not_None(var_name, name) diff --git a/hysop/parameters/tensor_parameter.py b/hysop/parameters/tensor_parameter.py index 9ac2c0767244c69d2368b85dad6b9f8c9c1e7451..f7a2f59eb63798c58785c306dbc85e226d8221b4 100644 --- a/hysop/parameters/tensor_parameter.py +++ b/hysop/parameters/tensor_parameter.py @@ -12,8 +12,8 @@ class TensorParameter(Parameter): that may change value as simulation advances. """ - def __new__(cls, name, shape, dtype=HYSOP_REAL, - pretty_name=None, initial_value=None, + def __new__(cls, name, shape, dtype=HYSOP_REAL, + pretty_name=None, initial_value=None, min_value=None, max_value=None, ignore_nans=False, **kwds): """ Create or get an existing TensorParameter with a specific name @@ -25,7 +25,7 @@ class TensorParameter(Parameter): A name for the parameter that uniquely identifies it. pretty_name: string A pretty name for the parameter. - shape: array like of ints + shape: array like of ints Shape of this TensorParameter. dtype: type convertible to np.dtype, optional Underlying dtype of this TensorParameter. @@ -41,10 +41,10 @@ class TensorParameter(Parameter): Set this to True to allow NaN values. kwds: dict Base class arguments. - + Attributes ---------- - shape: array like of ints + shape: array like of ints Shape of this TensorParameter. dtype: type convertible to np.dtype Underlying dtype of this TensorParameter. @@ -55,13 +55,13 @@ class TensorParameter(Parameter): ignore_nans: bool True if this TensorParameter can have NaN values. """ - + if ('allow_None' in kwds) and (kwds['allow_None'] is True): msg='A TensorParameter cannot be allowed to be set to None.' raise ValueError(msg) check_instance(name, (str, SymbolicBase)) - + if isinstance(name, SymbolicBase): symbol = name name = symbol._name @@ -70,7 +70,7 @@ class TensorParameter(Parameter): symbol = None pretty_name = first_not_None(pretty_name, name) check_instance(name, str) - check_instance(pretty_name, (str, unicode)) + check_instance(pretty_name, str) check_instance(shape, (list,tuple), values=(int,long,np.integer), allow_none=True) check_instance(ignore_nans, bool) assert (min_value is None) or (max_value is None) or (min_value <= max_value) @@ -84,11 +84,11 @@ class TensorParameter(Parameter): parameter_types += ( np.float16, np.float32, np.float64, np.longdouble, float) elif is_complex(dtype): parameter_types += ( np.complex64, np.complex128, np.clongdouble, complex ) - + initial_value = cls._compute_initial_value(shape, dtype, initial_value, min_value, max_value, ignore_nans) - obj = super(TensorParameter,cls).__new__(cls, + obj = super(TensorParameter,cls).__new__(cls, name=name, pretty_name=pretty_name, parameter_types=parameter_types, allow_None=False, initial_value=initial_value, **kwds) @@ -96,7 +96,7 @@ class TensorParameter(Parameter): obj._min_value = min_value obj._max_value = max_value obj._ignore_nans = ignore_nans - + from hysop.symbolic.parameter import SymbolicTensorParameter, SymbolicScalarParameter if obj.__class__ is TensorParameter: if symbol: @@ -112,7 +112,7 @@ class TensorParameter(Parameter): obj._symbol = SymbolicScalarParameter(parameter=obj) return obj - + @classmethod def _compute_initial_value(cls, shape, dtype, initial_value, min_value=None, max_value=None, ignore_nans=None): @@ -151,36 +151,36 @@ class TensorParameter(Parameter): def _update_symbol(self): raise NotImplementedError - + def view(self, idx, name=None, pretty_name=None, **kwds): """Take a view on a scalar contained in the Parameter.""" assert (self._value is not None) initial_value = self._value[tuple(slice(k,k+1) for k in idx)] _name = self.name + '_' + '_'.join(str(i) for i in idx) - _pretty_name = self.pretty_name + subscripts(ids=idx, sep='').encode('utf-8') + _pretty_name = self.pretty_name + subscripts(ids=idx, sep='') name = first_not_None(name, _name) pretty_name = first_not_None(pretty_name, _pretty_name) if initial_value.size == 1: from scalar_parameter import ScalarParameter return ScalarParameter(name=name, pretty_name=pretty_name, - initial_value=initial_value.ravel(), dtype=self.dtype, - min_value=self.min_value, max_value=self.max_value, - ignore_nans=self.ignore_nans, - const=self.const, quiet=self.quiet, + initial_value=initial_value.ravel(), dtype=self.dtype, + min_value=self.min_value, max_value=self.max_value, + ignore_nans=self.ignore_nans, + const=self.const, quiet=self.quiet, is_view=True, **kwds) else: return TensorParameter(name=name, pretty_name=pretty_name, initial_value=initial_value, dtype=self.dtype, shape=initial_value.shape, - min_value=self.min_value, max_value=self.max_value, - ignore_nans=self.ignore_nans, - const=self.const, quiet=self.quiet, + min_value=self.min_value, max_value=self.max_value, + ignore_nans=self.ignore_nans, + const=self.const, quiet=self.quiet, is_view=True, **kwds) def iterviews(self): """Iterate over all parameters views to yield scalarparameters.""" for idx in np.ndindex(self.shape): yield (idx, self.view(idx)) - + @classmethod def __check_values(cls, a, min_value, max_value, ignore_nans, dtype): if np.isscalar(a): @@ -202,7 +202,7 @@ class TensorParameter(Parameter): assert np.all(np.max(a) <= max_value), 'max value constraint failed.' def check_values(self, a): - return self.__check_values(a, + return self.__check_values(a, dtype=self.dtype, min_value=self.min_value, max_value=self.max_value, @@ -247,7 +247,7 @@ class TensorParameter(Parameter): even for ScalarParameter parameters. """ return self._value.copy() - + def _set_value_impl(self, value): """Given value will be copied into internal buffer.""" assert (self._value is not None) @@ -295,7 +295,7 @@ TensorParameter[name={}, pname={}] self.min_value, self.max_value, self.ignore_nans, self.value) return ss - + def short_description(self): attrs=('name', 'pretty_name', 'shape', 'dtype', 'min_value', 'max_value') info = [] diff --git a/hysop/symbolic/__init__.py b/hysop/symbolic/__init__.py index 36dc81b562d0bfe0b8ba3544336805e509b132cb..bdf15020c14ff94f0f4494b23d92dbf4c791eae4 100644 --- a/hysop/symbolic/__init__.py +++ b/hysop/symbolic/__init__.py @@ -35,7 +35,7 @@ freq_symbols = tuple(SpaceSymbol( dspace_symbols = tuple(SpaceSymbol( name='dx_{}'.format(i), - pretty_name=u'd'+xsymbol+subscript(i), + pretty_name='d'+xsymbol+subscript(i), var_name='dx_{}'.format(i), latex_name='dx_{{{}}}'.format(i)) for i in range(16)) @@ -43,7 +43,7 @@ dspace_symbols = tuple(SpaceSymbol( local_indices_symbols = tuple(SpaceSymbol( name = 'i{}'.format(i), - pretty_name=u'i'+subscript(i), + pretty_name='i'+subscript(i), var_name='i{}'.format(i), latex_name='i_{{{}}}'.format(i)) for i in range(16)) @@ -51,7 +51,7 @@ local_indices_symbols = tuple(SpaceSymbol( global_indices_symbols = tuple(SpaceSymbol( name = 'I{}'.format(i), - pretty_name=u'I'+subscript(i), + pretty_name='I'+subscript(i), var_name='I{}'.format(i), latex_name='I_{{{}}}'.format(i)) for i in range(16)) diff --git a/hysop/symbolic/array.py b/hysop/symbolic/array.py index 45aa2c44edbe5dbf0003db431ea9ae42d9843804..067d0efd84b1da5f8b4a502d155c390bb685b0c0 100644 --- a/hysop/symbolic/array.py +++ b/hysop/symbolic/array.py @@ -125,7 +125,7 @@ class SymbolicArray(SymbolicMemoryObject): name=name, **kwds) msg='Dimension could not be deduced from memory_object, ' msg+='please specify a dimension for symbolic array {}.' - msg=msg.format(name.encode('utf-8')) + msg=msg.format(name) if (memory_object is None): if (dim is None): raise RuntimeError(msg) @@ -197,7 +197,7 @@ class SymbolicNdBuffer(SymbolicBuffer): name=name, **kwds) msg='Dimension could not be deduced from memory_object, ' msg+='please specify a dimension for symbolic array {}.' - msg=msg.format(name.encode('utf-8')) + msg=msg.format(name) if (memory_object is None): if (dim is None): raise RuntimeError(msg) diff --git a/hysop/symbolic/base.py b/hysop/symbolic/base.py index 232c6c8708671489745570c264759ab2d2c97b23..052d9d34a294d1c864a77d38aa98941cd33d59ed 100644 --- a/hysop/symbolic/base.py +++ b/hysop/symbolic/base.py @@ -137,7 +137,7 @@ class TensorBase(npw.ndarray): for idx in npw.ndindex(*shape): name = '{}_{}'.format(name, vsep.join(str(i) for i in idx)) - pname = u'{}{}'.format(pretty_name.decode('utf-8'), u''.join(subscript(i) + pname = '{}{}'.format(pretty_name, ''.join(subscript(i) for i in idx)) vname = '{}_{}'.format(name, vsep.join(str(i) for i in idx)) diff --git a/hysop/symbolic/constant.py b/hysop/symbolic/constant.py index 3ff7c109dc9d22a9929bd75555bbd3c11dffd6d5..4e6e7a431aff04ac0dc3b65463416abadac76be9 100644 --- a/hysop/symbolic/constant.py +++ b/hysop/symbolic/constant.py @@ -7,7 +7,7 @@ class SymbolicConstant(DummySymbolicScalar): def __new__(cls, name, **kwds): if ('dtype' not in kwds): msg='dtype has not been specified for SymbolicConstant {}.' - msg=msg.format(name).encode('utf-8') + msg=msg.format(name) raise RuntimeError(msg) dtype = kwds.pop('dtype') value = kwds.pop('value', None) @@ -27,7 +27,7 @@ class SymbolicConstant(DummySymbolicScalar): msg='{}::{} value has not been bound yet.' msg=msg.format(self.__class__.__name__, self.name) raise RuntimeError(msg) - + def bind_value(self, value, force=False): if (not force) and (self._value is not None): msg='A value has already been bound to SymbolicConstant {}.'.format(self.name) @@ -57,7 +57,7 @@ class SymbolicConstant(DummySymbolicScalar): self.assert_bound() return '{}[dtype={}, ctype={}]'.format( self.__class__.__name__, self.dtype, self.ctype) - + def __eq__(self, other): return id(self) == id(other) def __hash__(self): diff --git a/hysop/symbolic/tmp.py b/hysop/symbolic/tmp.py index 04ab6629d32a58a1a4c48f90a6aecafa59fb1622..1605e214880a5b9f74fd91daa88ee732fd24fe6b 100644 --- a/hysop/symbolic/tmp.py +++ b/hysop/symbolic/tmp.py @@ -5,17 +5,17 @@ class TmpScalar(SymbolicScalar): def __new__(cls, *args, **kwds): if ('dtype' not in kwds): msg='dtype has not been specified for TmpScalar {}.' - msg=msg.format(kwds.get('name','unknown').encode('utf-8')) + msg=msg.format(kwds.get('name','unknown')) raise RuntimeError(msg) dtype = kwds.pop('dtype') obj = super(TmpScalar,cls).__new__(cls, *args, **kwds) obj._dtype = dtype return obj - + @property def dtype(self): return self._dtype - + @property def ctype(self): from hysop.backend.device.codegen.base.variables import dtype_to_ctype diff --git a/hysop/tools/decorators.py b/hysop/tools/decorators.py index 2e0556a5013ed82409f438061cb206ec8abd9d61..2bbe977f20f6d7eb29605a30ff6d6417467ab266 100644 --- a/hysop/tools/decorators.py +++ b/hysop/tools/decorators.py @@ -69,7 +69,7 @@ def debug(f): '{} '.format(fw.__code__.co_firstlineno) if verbose else '', '>'*debug.call_depth if not verbose else '', '{}::'.format(cls) if (cls is not None) else '', - fw.__name__) + fw.__name__)) if 'name' in kw: print('with name {}.'.format(kw['name'])) diff --git a/hysop/tools/field_utils.py b/hysop/tools/field_utils.py index 5dca138946ced291dade9789f6047cb7651de708..10090f4849052fb08db9b44120c9fb5f7dc71117 100644 --- a/hysop/tools/field_utils.py +++ b/hysop/tools/field_utils.py @@ -9,12 +9,12 @@ from sympy.printing.latex import LatexPrinter class BasePrinter(object): def print_Derivative(self, expr): (bvar, pvar, vvar, lvar) = print_all_names(expr.args[0]) - pvar = pvar.decode('utf-8') + pvar = pvar all_xvars = get_derivative_variables(expr) xvars = tuple(set(all_xvars)) varpows = tuple(all_xvars.count(x) for x in xvars) bxvars = tuple(print_name(x) for x in xvars) - pxvars = tuple(print_pretty_name(x).decode('utf-8') for x in xvars) + pxvars = tuple(print_pretty_name(x) for x in xvars) vxvars = tuple(print_var_name(x) for x in xvars) lxvars = tuple(print_latex_name(x) for x in xvars) return DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, @@ -148,39 +148,36 @@ def to_str(*args): if len(args)==1: args=to_tuple(args[0]) def _to_str(x): - if isinstance(x, unicode): - return x.encode('utf-8') - else: - return str(x) + return str(x) return tuple(_to_str(y) for y in args) # exponents formatting functions bexp_fn = lambda x: '^{}'.format(x) if (x>1) else '' -pexp_fn = lambda x, sep=',': exponents(x, sep=sep) if (x>1) else u'' +pexp_fn = lambda x, sep=',': exponents(x, sep=sep) if (x>1) else '' vexp_fn = lambda x: 'e{}'.format(x) if (x>1) else '' lexp_fn = lambda x: '^<LBRACKET>{}<RBRACKET>'.format(x) if (x>1) else '' # powers formatting functions bpow_fn = lambda x: '**{}'.format(x) if (x>1) else '' -ppow_fn = lambda x, sep=',': exponents(x,sep=sep) if (x>1) else u'' +ppow_fn = lambda x, sep=',': exponents(x,sep=sep) if (x>1) else '' vpow_fn = lambda x: 'p{}'.format(x) if (x>1) else '' lpow_fn = lambda x: '^<LBRACKET>{}<RBRACKET>'.format(x) if (x>1) else '' # subcripts formatting functions bsub_fn = lambda x: '_{}'.format(x) if (x is not None) else '' -psub_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else u'' +psub_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else '' vsub_fn = lambda x: 's{}'.format(x) if (x is not None) else '' lsub_fn = lambda x: '_<LBRACKET>{}<RBRACKET>'.format(x) if (x is not None) else '' # components formatting functions bcomp_fn = lambda x: ','.join(to_str(x)) if (x is not None) else '' -pcomp_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else u'' +pcomp_fn = lambda x, sep=',': subscripts(x,sep=sep) if (x is not None) else '' vcomp_fn = lambda x: '_'+'_'.join(to_str(x)) if (x is not None) else '' lcomp_fn = lambda x: '_<LBRACKET>{}<RBRACKET>'.format(','.join(to_str(x))) if (x is not None) else '' # join formatting functions bjoin_fn = lambda x: '_'.join(to_str(x)) if (x is not None) else '' -pjoin_fn = lambda x: ''.join(to_str(x)) if (x is not None) else u'' +pjoin_fn = lambda x: ''.join(to_str(x)) if (x is not None) else '' vjoin_fn = lambda x: '_'.join(to_str(x)) if (x is not None) else '' ljoin_fn = lambda x: ''.join(to_str(x)) if (x is not None) else '' @@ -225,8 +222,6 @@ class DifferentialStringFormatter(object): } for (k,v) in special_characters.items(): ss = ss.replace(k,v) - if isinstance(ss, unicode): - ss = ss.encode('utf-8') return ss @classmethod @@ -267,7 +262,7 @@ class DifferentialStringFormatter(object): vrp = '' if len(vvar) <= trigp else vrp llp = '' if len(lvar) <= trigp else llp lrp = '' if len(lvar) <= trigp else lrp - template=u'{d}{dpow}{lp}{var}{components}{rp}{varpow}' + template='{d}{dpow}{lp}{var}{components}{rp}{varpow}' bname = template.format(d=bd, dpow=bpow_fn(dpow), components=bcomp_fn(components), var=bvar, varpow=bpow_fn(varpow), @@ -360,10 +355,10 @@ if __name__ == '__main__': for a in args: print(a) else: - print(u', '.join(a.decode('utf-8') for a in args)).encode('utf-8') + print(', '.join(a for a in args)) print - bvar, pvar, vvar, lvar = 'Fext', u'F\u1d49xt', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>' + bvar, pvar, vvar, lvar = 'Fext', 'Fâ‚‘â‚“â‚œ', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>' _print(DifferentialStringFormatter.return_names(bvar, pvar, vvar, lvar)) print @@ -396,7 +391,7 @@ if __name__ == '__main__': _print(DifferentialStringFormatter.format_partial_names(bvar, pvar, vvar, lvar, varpows=(2,2), components=((0,1),(1,0)))) print - bvar, pvar, vvar, lvar = 'Fext', u'F\u1d49xt', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>' + bvar, pvar, vvar, lvar = 'Fext', 'Fâ‚‘â‚“â‚œ', 'Fext', '<LBRACKET>F_<LBRACKET>ext<RBRACKET><RBRACKET>' bxvars, pxvars, vxvars, lxvars = (('x','y'),)*4 _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar)) _print(DifferentialStringFormatter.format_pd(bvar, pvar, vvar, lvar, varpows=2)) diff --git a/hysop/tools/handle.py b/hysop/tools/handle.py index 5a51e25e39c3497e605fa6c43f7d04a7d755ef01..4a148a5e8a38da4535b6f3732a851ba73ff19e24 100644 --- a/hysop/tools/handle.py +++ b/hysop/tools/handle.py @@ -156,10 +156,10 @@ class TaggedObject(object, metaclass=ABCMeta): tag_formatter = first_not_None(self.__tag_formatter, lambda x: x) tag_prefix = first_not_None(self.__tag_prefix, '') tag_postfix = first_not_None(self.__tag_postfix, '') - return u'{}{}{}'.format( + return '{}{}{}'.format( tag_prefix, tag_formatter(subscript(self.__tag_id)), - tag_postfix).encode('utf-8') + tag_postfix) def __get_object_full_tag(self): """ diff --git a/hysop/tools/interface.py b/hysop/tools/interface.py index d49604237fbe4ea0ecf133829678ae600af2eb17..789b52f20d5cf15ac8d0d7b5a904e00059f62f12 100644 --- a/hysop/tools/interface.py +++ b/hysop/tools/interface.py @@ -25,8 +25,8 @@ class NamedObjectI(object, metaclass=ABCMeta): Create an abstract named object that contains a symbolic value. name : string A name for the field. - pretty_name: string or unicode, optional. - A pretty name used for display whenever possible (unicode supported). + pretty_name: string, optional. + A pretty name used for display whenever possible. Defaults to name. kwds: dict Keywords arguments for base class. @@ -40,14 +40,12 @@ class NamedObjectI(object, metaclass=ABCMeta): def rename(self, name, pretty_name=None, latex_name=None, var_name=None): """Change the names of this object.""" check_instance(name, str) - check_instance(pretty_name, (str,unicode), allow_none=True) + check_instance(pretty_name, str, allow_none=True) check_instance(latex_name, str, allow_none=True) pretty_name = first_not_None(pretty_name, name) latex_name = first_not_None(latex_name, name) - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') check_instance(pretty_name, str) self._name = name diff --git a/hysop/tools/spectral_utils.py b/hysop/tools/spectral_utils.py index cb1a2f2886ad4dc98bef0c407adda0a21b3398e5..d6ca0fb8b58fa9e0fd85e0ade8f2cf2aaf3b44e1 100644 --- a/hysop/tools/spectral_utils.py +++ b/hysop/tools/spectral_utils.py @@ -735,8 +735,8 @@ class EnergyDumper(object): os.remove(filename) f = open(filename, 'a') - header = u'# Evolution of the power spectrum of {}'.format(energy_parameter.pretty_name.decode('utf-8')) - f.write(header.encode('utf-8')) + header = '# Evolution of the power spectrum of {}'.format(energy_parameter.pretty_name) + f.write(header) f.write('\n# with mean removed (first coefficient) and values clamped to ulp = epsilon^4 = {}'.format(ulp)) f.write('\n# ITERATION TIME log10(max(POWER_SPECTRUM[1:], ulp)))') f.flush() @@ -822,11 +822,11 @@ class EnergyPlotter(object): ax = axes[0] if len(energy_parameters)==1: - default_fig_title = u'Energy parameter {}'.format( - energy_parameters.values()[0].pretty_name.decode('utf-8')) + default_fig_title = 'Energy parameter {}'.format( + energy_parameters.values()[0].pretty_name) else: - default_fig_title = u'Energy parameters {}'.format( - u' | '.join(p.pretty_name.decode('utf-8') for p in energy_parameters.values())) + default_fig_title = 'Energy parameters {}'.format( + ' | '.join(p.pretty_name for p in energy_parameters.values())) fig_title = first_not_None(fig_title, default_fig_title) self.fig_title = fig_title @@ -933,7 +933,7 @@ class EnergyPlotter(object): ax.set_ylim(ymin, ymax) ax.yaxis.set_ticks(major_yticks) ax.yaxis.set_ticks(minor_yticks, minor=True) - ax.set_title(u'{}\niteration={}, t={}'.format(self.fig_title, iteration, time)) + ax.set_title('{}\niteration={}, t={}'.format(self.fig_title, iteration, time)) ax.relim() def _draw(self): diff --git a/hysop/tools/string_utils.py b/hysop/tools/string_utils.py index e8fce574b005831591301432e5d522976b1b2406..0db67a4e152365581eb77a356d65a987da004052 100644 --- a/hysop/tools/string_utils.py +++ b/hysop/tools/string_utils.py @@ -48,11 +48,10 @@ def framed_str(title, msg, c='=', at_border=2): title = c*at_border + title + c*at_border header = title + c*max(0, length-len(title)) footer = c*len(header) - return u'{}\n{}\n{}'.format(header, msg, footer) + return '{}\n{}\n{}'.format(header, msg, footer) def strlen(s): """Like length but replace unicode characters by space before applying len()""" - #res = len(s.decode('utf-8')) return len(s) def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None): @@ -75,9 +74,9 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None): replace: replacement when the string is too short (here ----) newline_prefix: prefix for each newline split, per column - All string inputs can be of type unicode or str. + All string inputs can be of type str. """ - sstr = (str, unicode) + sstr = str check_instance(strdata, tuple, values=sstr) ndata = len(strdata) @@ -105,7 +104,7 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None): if (ml is None) or strlen(s)<ml: data = [s] else: - s = s.encode('utf-8') + s = s split = [s] for sep in ss: split = list(y+(sep if (i!=len(x.split(sep))-1) else '') @@ -114,9 +113,9 @@ def multiline_split(strdata, maxlen, split_sep, replace, newline_prefix=None): data = [] s='' while split: - while split and (strlen(s.decode('utf-8')) < ml): + while split and (strlen(s) < ml): s += split.pop(0) - data.append(s.decode('utf-8')) + data.append(s) s=nlp splitted_data.append(data) diff --git a/hysop/tools/sympy_utils.py b/hysop/tools/sympy_utils.py index e49030b6e436317a4e39c3ac604c9c1aea69a91d..442367c8882dfd36cb62c3e0dbbce46d1fcf97f3 100644 --- a/hysop/tools/sympy_utils.py +++ b/hysop/tools/sympy_utils.py @@ -7,16 +7,15 @@ from sympy.printing.str import StrPrinter, StrReprPrinter from sympy.printing.latex import LatexPrinter # unicode subscripts for decimal numbers, signs and parenthesis -decimal_subscripts = tuple('\u208{}'.format(i).decode('unicode-escape') for i in range(10)) -decimal_exponents = (u'\u2070', u'\u00b9', u'\u00b2', u'\u00b3') -decimal_exponents += tuple('\u207{}'.format(i).decode('unicode-escape') for i in range(4,10)) -greak = tuple('\u{:04x}'.format(i).decode('unicode-escape') for i in range(945, 970)) -Greak = tuple('\u{:04x}'.format(i).decode('unicode-escape') for i in range(913, 938)) -signs = (u'\u208a',u'\u208b') -parenthesis = (u'\u208d', u'\u208e') -partial = u'\u2202' -nabla = u'\u2207' -xsymbol = u'x' +decimal_subscripts = 'â‚€â‚₂₃₄₅₆₇₈₉' +decimal_exponents = 'â°Â¹Â²Â³â´âµâ¶â·â¸â¹' +greak = 'αβγδεζηθικλμνξοπÏςστυφχψω' +Greak = 'ΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡ΢ΣΤΥΦΧΨΩ' +signs = 'â‚Šâ‚‹' +parenthesis = 'â‚â‚Ž' +partial = '∂' +nabla = '∇' +xsymbol = 'x' freq_symbol = greak[12] # nu def round_expr(expr, num_digits=3): @@ -35,7 +34,6 @@ def truncate_expr(expr, maxlen=80): class CustomStrPrinter(StrPrinter): def _print_Derivative(self, expr): - _partial = partial.encode('utf-8') syms = list(reversed(expr.variables)) nvars = len(syms) @@ -44,10 +42,10 @@ class CustomStrPrinter(StrPrinter): else: content = '[{}]'.format(self._print(expr.expr)) - prefix = '{}{}{}/{}'.format(_partial, exponent(nvars).encode('utf-8') if nvars>1 else '', - content, _partial) + prefix = '{}{}{}/{}'.format(partial, exponent(nvars) if nvars>1 else '', + content, partial) for (sym, num) in group(syms, multiple=False): - prefix += '{}{}'.format(sym, exponent(num).encode('utf-8') if num>1 else '') + prefix += '{}{}'.format(sym, exponent(num) if num>1 else '') return prefix class CustomStrReprPrinter(StrReprPrinter): @@ -74,12 +72,6 @@ def enable_pretty_printing(): class SymbolicBase(object): def __new__(cls, name, var_name=None, latex_name=None, pretty_name=None, **kwds): - if isinstance(name, unicode): - name = name.encode('utf-8') - if isinstance(pretty_name, unicode): - pretty_name = pretty_name.encode('utf-8') - if isinstance(latex_name, unicode): - latex_name = latex_name.encode('utf-8') check_instance(name, str) check_instance(var_name, str, allow_none=True) check_instance(latex_name, str, allow_none=True) @@ -167,7 +159,7 @@ def subscript(i, with_sign=False, disable_unicode=False): if disable_unicode: out = snumber else: - out =u'' + out ='' for s in snumber: if s in decimals: out += decimal_subscripts[int(s)] @@ -189,7 +181,7 @@ def exponent(i, with_sign=False): s0 = snumber[0] if s0 in decimals: snumber = '+'+snumber - out = u'' + out = '' for s in snumber: if s in decimals: out += decimal_exponents[int(s)] @@ -210,11 +202,11 @@ def subscripts(ids,sep,with_sign=False,with_parenthesis=False,prefix='',disable_ if with_parenthesis: lparen = '(' if disable_unicode else parenthesis[0] rparen = ')' if disable_unicode else parenthesis[1] - base = '{}{}{}{}' if disable_unicode else u'{}{}{}{}' + base = '{}{}{}{}' if disable_unicode else '{}{}{}{}' return base.format(prefix,lparen,sep.join([subscript(i,with_sign,disable_unicode) for i in ids]),rparen) else: - base = '{}{}' if disable_unicode else u'{}{}' + base = '{}{}' if disable_unicode else '{}{}' return base.format(prefix,sep.join([subscript(i,with_sign,disable_unicode) for i in ids])) def exponents(ids,sep,with_sign=False,with_parenthesis=False,prefix=''): @@ -224,9 +216,9 @@ def exponents(ids,sep,with_sign=False,with_parenthesis=False,prefix=''): """ ids = to_tuple(ids) if with_parenthesis: - return u'{}{}{}{}'.format(prefix,parenthesis[0],sep.join([exponent(i,with_sign) for i in ids]),parenthesis[1]) + return '{}{}{}{}'.format(prefix,parenthesis[0],sep.join([exponent(i,with_sign) for i in ids]),parenthesis[1]) else: - return u'{}{}'.format(prefix,sep.join([exponent(i,with_sign) for i in ids])) + return '{}{}'.format(prefix,sep.join([exponent(i,with_sign) for i in ids])) def tensor_symbol(prefix,shape,origin=None,mask=None, sep=None,with_parenthesis=False,force_sign=False): @@ -239,7 +231,7 @@ def tensor_symbol(prefix,shape,origin=None,mask=None, It also returns all generated Symbols as a list. """ origin = np.asarray(origin) if origin is not None else np.asarray([0]*len(shape)) - sep = sep if sep is not None else u',' + sep = sep if sep is not None else ',' with_sign = force_sign or ((origin>0).any() and len(shape)>1) tensor = np.empty(shape=shape,dtype=object) @@ -248,7 +240,7 @@ def tensor_symbol(prefix,shape,origin=None,mask=None, ids = idx-origin sname = subscripts(ids,sep,with_sign=with_sign, with_parenthesis=with_parenthesis,prefix=prefix) - tensor[idx] = sm.Symbol(sname.encode('utf-8'), real=True) + tensor[idx] = sm.Symbol(sname, real=True) else: tensor[idx] = 0 tensor_vars = tensor.ravel().tolist() diff --git a/hysop/tools/types.py b/hysop/tools/types.py index a58467525f57cc9ab690f2e85502efea37bf1a08..a9cd5dcbfbf136e0bd6fd3f54bb95dfb55de70ff 100644 --- a/hysop/tools/types.py +++ b/hysop/tools/types.py @@ -67,8 +67,6 @@ def check_instance(val, cls, allow_none=False, ', ...' if (len(val)>5) else '') except: types = type(val) - if isinstance(val, unicode): - val = val.encode('utf-8') msg='\nFATAL ERROR: Type did not match any of types {} for the following value:\n{}\n' msg+='which is of type {}.\n' msg=msg.format(all_cls, val, types) diff --git a/hysop_examples/examples/bubble/periodic_bubble.py b/hysop_examples/examples/bubble/periodic_bubble.py index 1b25b879d72ddc0c6a48f2c964ff16c025edef0c..adabd532a91588d41c3bc18343f7d6b7155ecb4c 100644 --- a/hysop_examples/examples/bubble/periodic_bubble.py +++ b/hysop_examples/examples/bubble/periodic_bubble.py @@ -1,7 +1,7 @@ ## HySoP Example: Bubble nD -## Osher1995 (first part): -## A Level Set Formulation of Eulerian Interface Capturing Methods for +## Osher1995 (first part): +## A Level Set Formulation of Eulerian Interface Capturing Methods for ## Incompressible Fluid flows. ## ## This example is without levelset ! @@ -20,7 +20,7 @@ def init_rho(data, coords, Br, Bc, rho1, rho2, eps, component): # initialize density with the levelset init_phi(data=data, coords=coords, component=component, Br=Br, Bc=Bc) data[...] = regularize(data, rho1, rho2, eps) - + def init_mu(data, coords, Br, Bc, mu1, mu2, eps, component): assert (component==0) # initialize viscosity with the levelset @@ -51,7 +51,7 @@ def H_eps(x, eps): H = np.empty_like(x) H[np.where(x<-eps)] = 0.0 H[np.where(x>+eps)] = 1.0 - + ie = np.where(np.abs(x)<=eps) xe = x[ie] H[ie] = (xe + eps)/(2*eps) + np.sin(np.pi*xe/eps)/(2*np.pi) @@ -79,11 +79,11 @@ def compute(args): dim = args.ndim npts = args.npts box = Box(origin=args.box_origin, length=args.box_length, dim=dim) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) - + # Setup usual implementation specific variables impl = args.impl extra_op_kwds = {'mpi_params': mpi_params} @@ -92,40 +92,40 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number cl_env = get_or_create_opencl_env( mpi_params=mpi_params, - platform_id=args.cl_platform_id, + platform_id=args.cl_platform_id, device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: msg='Unknown implementation \'{}\'.'.format(impl) raise ValueError(msg) - + # Define parameters and field (time, timestep, velocity, vorticity, enstrophy) t, dt = TimeParameters(dtype=args.dtype) velo = VelocityField(domain=box, dtype=args.dtype) vorti = VorticityField(velocity=velo) - rho = DensityField(domain=box, dtype=args.dtype) + rho = DensityField(domain=box, dtype=args.dtype) mu = ViscosityField(domain=box, dtype=args.dtype, mu=True) enstrophy = EnstrophyParameter(dtype=args.dtype) rhov = VolumicIntegrationParameter(field=rho) muv = VolumicIntegrationParameter(field=mu) - + ### Build the directional operators - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advec', - velocity = velo, + velocity = velo, advected_fields = (vorti, rho, mu), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, rho: npts, mu: npts}, @@ -148,14 +148,14 @@ def compute(args): pretty_name='sdiff', formulation = args.stretching_formulation, viscosity = mu, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts, mu: npts}, dt=dt, **extra_op_kwds) elif (dim==2): stretch_diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='diff{}'.format(vorti.pretty_name), coeffs = mu, fields = vorti, variables = {vorti: npts, mu: npts}, @@ -163,7 +163,7 @@ def compute(args): else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> External force rot(-rho*g) from hysop.symbolic import space_symbols from hysop.symbolic.base import SymbolicTensor @@ -178,7 +178,7 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, @@ -187,14 +187,14 @@ def compute(args): #> Directional splitting operator subgraph splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) - splitting.push_operators(advec, - diffuse, + splitting.push_operators(advec, + diffuse, stretch_diffuse, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, - variables={velo:npts, vorti: npts}, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + variables={velo:npts, vorti: npts}, projection=args.reprojection_frequency, implementation=impl, **extra_op_kwds) @@ -202,11 +202,11 @@ def compute(args): io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={velo: npts, - vorti: npts, - rho: npts, + variables={velo: npts, + vorti: npts, + rho: npts, mu: npts}) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -235,7 +235,7 @@ def compute(args): integrate_mu = Integrate(name='integrate_mu', field=mu, variables={mu: npts}, parameter=muv, scaling='normalize', implementation=impl, **extra_op_kwds) - + ### Adaptive timestep operator adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-3) @@ -243,13 +243,13 @@ def compute(args): equivalent_CFL=True) dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF) - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -257,11 +257,11 @@ def compute(args): } ) problem = Problem(method=method) - problem.insert(poisson, + problem.insert(poisson, dump_fields, - splitting, + splitting, # integrate_enstrophy, integrate_rho, integrate_mu, - min_max_rho, min_max_mu, min_max_U, min_max_W, + min_max_rho, min_max_mu, min_max_U, min_max_W, adapt_dt) problem.build(args) @@ -269,22 +269,22 @@ def compute(args): # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, + simu.write_parameters(t, dt_cfl, dt_advec, dt, enstrophy, rhov, muv, - min_max_U.Finf, min_max_W.Finf, + min_max_U.Finf, min_max_W.Finf, min_max_rho.Fmin, min_max_rho.Fmax, min_max_mu.Fmin, min_max_mu.Fmax, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, viscosity and density on all topologies Bc, Br = args.Bc, args.Br dx = np.max(np.divide(box.length, np.asarray(args.npts)-1)) @@ -295,10 +295,10 @@ def compute(args): problem.initialize_field(field=mu, formula=init_mu, mu1=args.mu1, mu2=args.mu2, Bc=Bc, Br=Br, reorder='Bc', eps=eps) # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -309,7 +309,7 @@ if __name__=='__main__': class PeriodicBubbleArgParser(HysopArgParser): def __init__(self): prog_name = 'periodic_bubble' - default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold') @@ -320,7 +320,7 @@ if __name__=='__main__': description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.' - + super(PeriodicBubbleArgParser, self).__init__( prog_name=prog_name, description=description, @@ -354,7 +354,7 @@ if __name__=='__main__': self._check_default(args, vars_, float, allow_none=False) self._check_positive(args, vars_, strict=False, allow_none=False) self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False) - + Bc, Br = args.Bc, args.Br if len(Bc)!=len(Br): msg='Specified {} bubble positions and {} bubble radi.' @@ -368,7 +368,7 @@ if __name__=='__main__': msg='Specified bubble radius is not a float, got {} which is of type {}.' msg=msg.format(br, type(br).__name__) self.error(msg) - + def _add_graphical_io_args(self): graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args() graphical_io.add_argument('-pp', '--plot-parameters', action='store_true', @@ -376,16 +376,16 @@ if __name__=='__main__': help=('Plot the density and viscosity integrals during simulation. '+ 'Simulation will stop at each time of interest and '+ 'the plot will be updated every specified freq iterations.')) - graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, + graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, dest='plot_freq', help='Plotting update frequency in terms of iterations.') - + def _check_file_io_args(self, args): super(PeriodicBubbleArgParser, self)._check_file_io_args(args) self._check_default(args, 'plot_parameters', bool, allow_none=False) self._check_default(args, 'plot_freq', int, allow_none=False) self._check_positive(args, 'plot_freq', strict=True, allow_none=False) - + def _setup_parameters(self, args): super(PeriodicBubbleArgParser, self)._setup_parameters(args) dim = args.ndim @@ -399,10 +399,10 @@ if __name__=='__main__': parser = PeriodicBubbleArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(256,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=0.51, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=0.51, dt=1e-5, cfl=0.5, lcfl=0.125, - dump_freq=10, + dump_freq=10, dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50), rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050, Bc = ((0.5,0.15,0.5),(0.5,0.45,0.5),), Br = (0.1,0.15,)) diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset.py b/hysop_examples/examples/bubble/periodic_bubble_levelset.py index d3ba8e80fda7fec5edd53930232ca6a90f20f174..ee4204e6374da8fd1453ccfe2232d388b523ae53 100644 --- a/hysop_examples/examples/bubble/periodic_bubble_levelset.py +++ b/hysop_examples/examples/bubble/periodic_bubble_levelset.py @@ -1,7 +1,7 @@ ## HySoP Example: Bubble nD -## Osher1995 (first part): -## A Level Set Formulation of Eulerian Interface Capturing Methods for +## Osher1995 (first part): +## A Level Set Formulation of Eulerian Interface Capturing Methods for ## Incompressible Fluid flows. import os @@ -15,7 +15,7 @@ def init_velocity(data, **kwds): def init_rho(data, **kwds): data[...] = 0.0 - + def init_mu(data, **kwds): data[...] = 0.0 @@ -53,7 +53,7 @@ def compute(args): from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation - + from hysop.symbolic import sm, space_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -65,11 +65,11 @@ def compute(args): dim = args.ndim npts = args.npts box = Box(origin=args.box_origin, length=args.box_length, dim=dim) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) - + # Setup usual implementation specific variables impl = args.impl extra_op_kwds = {'mpi_params': mpi_params} @@ -78,49 +78,49 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number cl_env = get_or_create_opencl_env( mpi_params=mpi_params, - platform_id=args.cl_platform_id, + platform_id=args.cl_platform_id, device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: msg='Unknown implementation \'{}\'.'.format(impl) raise ValueError(msg) - + # Define parameters and field (time, timestep, velocity, vorticity, enstrophy) t, dt = TimeParameters(dtype=args.dtype) velo = VelocityField(domain=box, dtype=args.dtype) vorti = VorticityField(velocity=velo) phi = LevelSetField(domain=box, dtype=args.dtype) - rho = DensityField(domain=box, dtype=args.dtype) + rho = DensityField(domain=box, dtype=args.dtype) mu = ViscosityField(domain=box, dtype=args.dtype, mu=True) enstrophy = EnstrophyParameter(dtype=args.dtype) rhov = VolumicIntegrationParameter(field=rho) muv = VolumicIntegrationParameter(field=mu) - + # Symbolic fields frame = rho.domain.frame phis = phi.s(*frame.vars) rhos = rho.s(*frame.vars) mus = mu.s(*frame.vars) Ws = vorti.s(*frame.vars) - + ### Build the directional operators - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advection', pretty_name='Adv', - velocity = velo, + velocity = velo, advected_fields = (vorti, phi), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, phi: npts}, @@ -141,15 +141,15 @@ def compute(args): e0 = Assignment(pi, np.pi) e1 = Assignment(eps, 2.5*dx) e2 = Assignment(x, phis) - e3 = Assignment(H, H_eps) + e3 = Assignment(H, H_eps) e4 = Assignment(rhos, rho1 + (rho2-rho1)*H) e5 = Assignment(mus, mu1 + (mu2-mu1)*H) exprs = (e0,e1,e2,e3,e4,e5) - eval_fields = DirectionalSymbolic(name='eval_fields', - pretty_name=u'{}({},{})'.format( - phi.pretty_name.decode('utf-8'), - rho.pretty_name.decode('utf-8'), - mu.pretty_name.decode('utf-8')), + eval_fields = DirectionalSymbolic(name='eval_fields', + pretty_name='{}({},{})'.format( + phi.pretty_name, + rho.pretty_name, + mu.pretty_name), no_split=True, implementation=impl, exprs=exprs, dt=dt, @@ -161,17 +161,17 @@ def compute(args): if (dim==3): stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl, name='stretch_diffuse', - pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='SD{}'.format(vorti.pretty_name), formulation = args.stretching_formulation, viscosity = mu, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts, mu: npts}, dt=dt, **extra_op_kwds) elif (dim==2): stretch_diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='D{}'.format(vorti.pretty_name), coeffs = mu, fields = vorti, variables = {vorti: npts, mu: npts}, @@ -179,7 +179,7 @@ def compute(args): else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> External force rot(-rho*g) Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor) Fext[1] = -1.0 #-9.8196 @@ -187,7 +187,7 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, @@ -197,11 +197,11 @@ def compute(args): #> Directional splitting operator subgraph splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) splitting.push_operators(advec, eval_fields, stretch_diffuse, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, - variables={velo:npts, vorti: npts}, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + variables={velo:npts, vorti: npts}, projection=args.reprojection_frequency, implementation=impl, **extra_op_kwds) @@ -209,12 +209,12 @@ def compute(args): io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={velo: npts, - vorti: npts, + variables={velo: npts, + vorti: npts, phi: npts, - rho: npts, + rho: npts, mu: npts}) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -233,24 +233,24 @@ def compute(args): integrate_mu = Integrate(field=mu, variables={mu: npts}, parameter=muv, scaling='normalize', implementation=impl, **extra_op_kwds) - + ### Adaptive timestep operator adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-3, name='merge_dt', pretty_name='dt', ) dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, Finf=min_max_U.Finf, - equivalent_CFL=True, + equivalent_CFL=True, name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF, name='dt_lcfl', pretty_name='LCFL') - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -258,11 +258,11 @@ def compute(args): } ) problem = Problem(method=method) - problem.insert(poisson, - splitting, + problem.insert(poisson, + splitting, dump_fields, integrate_enstrophy, integrate_rho, integrate_mu, - min_max_U, min_max_W, + min_max_U, min_max_W, adapt_dt) problem.build(args) @@ -270,20 +270,20 @@ def compute(args): # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, + simu.write_parameters(t, dt_cfl, dt_advec, dt, enstrophy, rhov, muv, - min_max_U.Finf, min_max_W.Finf, + min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, viscosity and density on all topologies Bc, Br = args.Bc, args.Br problem.initialize_field(field=velo, formula=init_velocity) @@ -293,10 +293,10 @@ def compute(args): problem.initialize_field(field=phi, formula=init_phi, Bc=Bc, Br=Br, reorder='Bc') # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -307,7 +307,7 @@ if __name__=='__main__': class PeriodicBubbleArgParser(HysopArgParser): def __init__(self): prog_name = 'periodic_bubble_levelset' - default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold') @@ -318,7 +318,7 @@ if __name__=='__main__': description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.' - + super(PeriodicBubbleArgParser, self).__init__( prog_name=prog_name, description=description, @@ -352,7 +352,7 @@ if __name__=='__main__': self._check_default(args, vars_, float, allow_none=False) self._check_positive(args, vars_, strict=False, allow_none=False) self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False) - + Bc, Br = args.Bc, args.Br if len(Bc)!=len(Br): msg='Specified {} bubble positions and {} bubble radi.' @@ -366,7 +366,7 @@ if __name__=='__main__': msg='Specified bubble radius is not a float, got {} which is of type {}.' msg=msg.format(br, type(br).__name__) self.error(msg) - + def _add_graphical_io_args(self): graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args() graphical_io.add_argument('-pp', '--plot-parameters', action='store_true', @@ -374,16 +374,16 @@ if __name__=='__main__': help=('Plot the density and viscosity integrals during simulation. '+ 'Simulation will stop at each time of interest and '+ 'the plot will be updated every specified freq iterations.')) - graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, + graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, dest='plot_freq', help='Plotting update frequency in terms of iterations.') - + def _check_file_io_args(self, args): super(PeriodicBubbleArgParser, self)._check_file_io_args(args) self._check_default(args, 'plot_parameters', bool, allow_none=False) self._check_default(args, 'plot_freq', int, allow_none=False) self._check_positive(args, 'plot_freq', strict=True, allow_none=False) - + def _setup_parameters(self, args): super(PeriodicBubbleArgParser, self)._setup_parameters(args) dim = args.ndim @@ -397,10 +397,10 @@ if __name__=='__main__': parser = PeriodicBubbleArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(256,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=0.51, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=0.51, dt=1e-6, cfl=0.5, lcfl=0.125, - dump_freq=0, + dump_freq=0, dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50), rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050, Bc = ((0.5,0.15,0.5),(0.5,0.45,0.5),), Br = (0.1,0.15,)) diff --git a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py index 126bf71ca0da7114e8b3827d085f5a581767a62a..8701627f5283e62dc9ecfa8d7a008aa13271ec9e 100644 --- a/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py +++ b/hysop_examples/examples/bubble/periodic_bubble_levelset_penalization.py @@ -1,7 +1,7 @@ ## HySoP Example: Bubble nD -## Osher1995 (first part): -## A Level Set Formulation of Eulerian Interface Capturing Methods for +## Osher1995 (first part): +## A Level Set Formulation of Eulerian Interface Capturing Methods for ## Incompressible Fluid flows. import os @@ -15,7 +15,7 @@ def init_velocity(data, **kwds): def init_rho(data, **kwds): data[...] = 0.0 - + def init_mu(data, **kwds): data[...] = 0.0 @@ -61,7 +61,7 @@ def compute(args): from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation from hysop.numerics.odesolvers.runge_kutta import Euler, RK2, RK3, RK4 - + from hysop.symbolic import sm, space_symbols, local_indices_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -74,11 +74,11 @@ def compute(args): dim = args.ndim npts = args.npts box = Box(origin=args.box_origin, length=args.box_length, dim=dim) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) - + # Setup usual implementation specific variables impl = args.impl extra_op_kwds = {'mpi_params': mpi_params} @@ -87,37 +87,37 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number cl_env = get_or_create_opencl_env( mpi_params=mpi_params, - platform_id=args.cl_platform_id, + platform_id=args.cl_platform_id, device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: msg='Unknown implementation \'{}\'.'.format(impl) raise ValueError(msg) - + # Define parameters and field (time, timestep, velocity, vorticity, enstrophy) t, dt = TimeParameters(dtype=args.dtype) velo = VelocityField(domain=box, dtype=args.dtype) vorti = VorticityField(velocity=velo) phi = LevelSetField(domain=box, dtype=args.dtype) _lambda = PenalizationField(domain=box, dtype=args.dtype) - rho = DensityField(domain=box, dtype=args.dtype) + rho = DensityField(domain=box, dtype=args.dtype) mu = ViscosityField(domain=box, dtype=args.dtype, mu=True) enstrophy = EnstrophyParameter(dtype=args.dtype) rhov = VolumicIntegrationParameter(field=rho) muv = VolumicIntegrationParameter(field=mu) - + # Symbolic fields frame = rho.domain.frame phis = phi.s(*frame.vars) @@ -127,7 +127,7 @@ def compute(args): Ws = vorti.s(*frame.vars) lambdas = _lambda.s(*frame.vars) dts = dt.s - + ### Build the directional operators #> Directional penalization penalization = -dts*lambdas*Us / (1+lambdas*dts) @@ -135,18 +135,18 @@ def compute(args): lhs = Ws rhs = curl(penalization, frame) exprs = Assignment.assign(lhs, rhs) - penalization = DirectionalSymbolic(name='penalization', + penalization = DirectionalSymbolic(name='penalization', implementation=impl, exprs=exprs, fixed_residue=Ws, variables={vorti: npts, velo: npts, _lambda: npts}, method={TimeIntegrator: Euler}, dt=dt, **extra_op_kwds) - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advection', pretty_name='Adv', - velocity = velo, + velocity = velo, advected_fields = (vorti, phi), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, phi: npts}, @@ -167,15 +167,15 @@ def compute(args): e0 = Assignment(pi, np.pi) e1 = Assignment(eps, 2.5*dx) e2 = Assignment(x, phis) - e3 = Assignment(H, H_eps) + e3 = Assignment(H, H_eps) e4 = Assignment(rhos, rho1 + (rho2-rho1)*H) e5 = Assignment(mus, mu1 + (mu2-mu1)*H) exprs = (e0,e1,e2,e3,e4,e5) - eval_fields = DirectionalSymbolic(name='eval_fields', - pretty_name=u'{}({},{})'.format( - phi.pretty_name.decode('utf-8'), - rho.pretty_name.decode('utf-8'), - mu.pretty_name.decode('utf-8')), + eval_fields = DirectionalSymbolic(name='eval_fields', + pretty_name='{}({},{})'.format( + phi.pretty_name, + rho.pretty_name, + mu.pretty_name), no_split=True, implementation=impl, exprs=exprs, dt=dt, @@ -187,17 +187,17 @@ def compute(args): if (dim==3): stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl, name='stretch_diffuse', - pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='SD{}'.format(vorti.pretty_name), formulation = args.stretching_formulation, viscosity = mu, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts, mu: npts}, dt=dt, **extra_op_kwds) elif (dim==2): stretch_diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='D{}'.format(vorti.pretty_name), coeffs = mu, fields = vorti, variables = {vorti: npts, mu: npts}, @@ -205,7 +205,7 @@ def compute(args): else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> External force rot(-rho*g) Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor) Fext[1] = -1.0 #-9.8196 @@ -213,7 +213,7 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, @@ -223,11 +223,11 @@ def compute(args): #> Directional splitting operator subgraph splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) splitting.push_operators(penalization, advec, eval_fields, stretch_diffuse, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, - variables={velo:npts, vorti: npts}, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + variables={velo:npts, vorti: npts}, projection=args.reprojection_frequency, implementation=impl, **extra_op_kwds) @@ -235,17 +235,17 @@ def compute(args): io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={#velo: npts, - vorti: npts, + variables={#velo: npts, + vorti: npts, #phi: npts, - rho: npts, + rho: npts, #mu: npts, }) io_params = IOParams(filename='lambda', frequency=0) dump_lambda = HDF_Writer(name='dump_lambda', io_params=io_params, variables = {_lambda: npts}) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -264,7 +264,7 @@ def compute(args): integrate_mu = Integrate(field=mu, variables={mu: npts}, parameter=muv, scaling='normalize', implementation=impl, **extra_op_kwds) - + ### Adaptive timestep operator dx = np.min(np.divide(box.length, np.asarray(npts)-1)) @@ -277,21 +277,21 @@ def compute(args): max_dt = min(W0_dt, W1_dt) adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=max_dt, name='merge_dt', pretty_name='dt', ) - dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, + dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, Fmin=min_max_U.Fmin, Fmax=min_max_U.Fmax, - equivalent_CFL=True, + equivalent_CFL=True, name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF, name='dt_lcfl', pretty_name='LCFL') - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -299,11 +299,11 @@ def compute(args): } ) problem = Problem(method=method) - problem.insert(poisson, - splitting, + problem.insert(poisson, + splitting, dump_fields, dump_lambda, integrate_enstrophy, integrate_rho, integrate_mu, - min_max_U, min_max_W, + min_max_U, min_max_W, adapt_dt) problem.build(args) @@ -311,20 +311,20 @@ def compute(args): # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, + simu.write_parameters(t, dt_cfl, dt_advec, dt, enstrophy, rhov, muv, - min_max_U.Finf, min_max_W.Finf, + min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, viscosity and density on all topologies Bc, Br = args.Bc, args.Br problem.initialize_field(field=velo, formula=init_velocity) @@ -335,10 +335,10 @@ def compute(args): problem.initialize_field(field=_lambda, formula=init_lambda) # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -349,7 +349,7 @@ if __name__=='__main__': class PeriodicBubbleArgParser(HysopArgParser): def __init__(self): prog_name = 'periodic_bubble_levelset_penalization' - default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold') @@ -360,7 +360,7 @@ if __name__=='__main__': description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.' - + super(PeriodicBubbleArgParser, self).__init__( prog_name=prog_name, description=description, @@ -394,7 +394,7 @@ if __name__=='__main__': self._check_default(args, vars_, float, allow_none=False) self._check_positive(args, vars_, strict=False, allow_none=False) self._check_default(args, ('Bc', 'Br'), tuple, allow_none=False) - + Bc, Br = args.Bc, args.Br if len(Bc)!=len(Br): msg='Specified {} bubble positions and {} bubble radi.' @@ -408,7 +408,7 @@ if __name__=='__main__': msg='Specified bubble radius is not a float, got {} which is of type {}.' msg=msg.format(br, type(br).__name__) self.error(msg) - + def _add_graphical_io_args(self): graphical_io = super(PeriodicBubbleArgParser, self)._add_graphical_io_args() graphical_io.add_argument('-pp', '--plot-parameters', action='store_true', @@ -416,16 +416,16 @@ if __name__=='__main__': help=('Plot the density and viscosity integrals during simulation. '+ 'Simulation will stop at each time of interest and '+ 'the plot will be updated every specified freq iterations.')) - graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, + graphical_io.add_argument('-pf', '--plot-freq', type=int, default=100, dest='plot_freq', help='Plotting update frequency in terms of iterations.') - + def _check_file_io_args(self, args): super(PeriodicBubbleArgParser, self)._check_file_io_args(args) self._check_default(args, 'plot_parameters', bool, allow_none=False) self._check_default(args, 'plot_freq', int, allow_none=False) self._check_positive(args, 'plot_freq', strict=True, allow_none=False) - + def _setup_parameters(self, args): super(PeriodicBubbleArgParser, self)._setup_parameters(args) dim = args.ndim @@ -439,10 +439,10 @@ if __name__=='__main__': parser = PeriodicBubbleArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(256,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=1.75, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=1.75, dt=1e-6, cfl=0.5, lcfl=0.125, - dump_freq=25, + dump_freq=25, dump_times=(0.0, 0.1, 0.20, 0.30, 0.325, 0.4, 0.45, 0.50), rho1=1.0, rho2=10.0, mu1=0.00025, mu2=0.00050, Bc = ((0.5,0.15,0.5),), Br = (0.1,)) diff --git a/hysop_examples/examples/bubble/periodic_jet_levelset.py b/hysop_examples/examples/bubble/periodic_jet_levelset.py index 3bc7b83db0d2920f27c66d8ee74d7b775a2e2ff9..885613e711b6564a4716eeaf1079cf75da30d9d3 100644 --- a/hysop_examples/examples/bubble/periodic_jet_levelset.py +++ b/hysop_examples/examples/bubble/periodic_jet_levelset.py @@ -1,7 +1,7 @@ ## HySoP Example: Periodic jet 2D -## Osher1995 (second part): -## A Level Set Formulation of Eulerian Interface Capturing Methods for +## Osher1995 (second part): +## A Level Set Formulation of Eulerian Interface Capturing Methods for ## Incompressible Fluid flows. import os @@ -46,7 +46,7 @@ def compute(args): from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation - + from hysop.symbolic import sm, space_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -58,11 +58,11 @@ def compute(args): dim = args.ndim npts = args.npts box = Box(origin=args.box_origin, length=args.box_length, dim=dim) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) - + # Setup usual implementation specific variables impl = args.impl extra_op_kwds = {'mpi_params': mpi_params} @@ -71,46 +71,46 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env, get_device_number cl_env = get_or_create_opencl_env( mpi_params=mpi_params, - platform_id=args.cl_platform_id, + platform_id=args.cl_platform_id, device_id=box.machine_rank%get_device_number() if args.cl_device_id is None else None) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: msg='Unknown implementation \'{}\'.'.format(impl) raise ValueError(msg) - + # Define parameters and field (time, timestep, velocity, vorticity, enstrophy) t, dt = TimeParameters(dtype=args.dtype) velo = VelocityField(domain=box, dtype=args.dtype) vorti = VorticityField(velocity=velo) phi = LevelSetField(domain=box, dtype=args.dtype) - rho = DensityField(domain=box, dtype=args.dtype) + rho = DensityField(domain=box, dtype=args.dtype) enstrophy = EnstrophyParameter(dtype=args.dtype) rhov = VolumicIntegrationParameter(field=rho) - + # Symbolic fields frame = rho.domain.frame phis = phi.s(*frame.vars) rhos = rho.s(*frame.vars) Ws = vorti.s(*frame.vars) - + ### Build the directional operators - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advection', pretty_name='Adv', - velocity = velo, + velocity = velo, advected_fields = (vorti, phi), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, phi: npts}, @@ -130,13 +130,13 @@ def compute(args): e0 = Assignment(pi, np.pi) e1 = Assignment(eps, 2.5*dx) e2 = Assignment(x, phis) - e3 = Assignment(H, H_eps) + e3 = Assignment(H, H_eps) e4 = Assignment(rhos, rho1 + (rho2-rho1)*H) exprs = (e0,e1,e2,e3,e4) - eval_fields = DirectionalSymbolic(name='eval_fields', - pretty_name=u'{}({})'.format( - phi.pretty_name.decode('utf-8'), - rho.pretty_name.decode('utf-8')), + eval_fields = DirectionalSymbolic(name='eval_fields', + pretty_name='{}({})'.format( + phi.pretty_name, + rho.pretty_name), no_split=True, implementation=impl, exprs=exprs, dt=dt, @@ -146,7 +146,7 @@ def compute(args): #> Directional stretching + diffusion diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='D{}'.format(vorti.pretty_name), coeffs = (args.mu/10.0,), fields = (phi,), variables = {vorti: npts , phi: npts}, @@ -155,17 +155,17 @@ def compute(args): if (dim==3): stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl, name='stretch_diffuse', - pretty_name=u'SD{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='SD{}'.format(vorti.pretty_name), formulation = args.stretching_formulation, viscosity = args.mu, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts}, dt=dt, **extra_op_kwds) elif (dim==2): stretch_diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'D{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='D{}'.format(vorti.pretty_name), coeffs = args.mu, fields = vorti, variables = {vorti: npts}, @@ -173,7 +173,7 @@ def compute(args): else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> External force rot(-rho*g) Fext = np.zeros(shape=(dim,), dtype=object).view(SymbolicTensor) Fext[1] = +1 @@ -181,7 +181,7 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, @@ -191,11 +191,11 @@ def compute(args): #> Directional splitting operator subgraph splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) splitting.push_operators(advec, diffuse, stretch_diffuse, eval_fields, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, - variables={velo:npts, vorti: npts}, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + variables={velo:npts, vorti: npts}, projection=args.reprojection_frequency, implementation=impl, **extra_op_kwds) @@ -203,11 +203,11 @@ def compute(args): io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={#velo: npts, - vorti: npts, + variables={#velo: npts, + vorti: npts, #phi: npts, rho: npts}) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -228,18 +228,18 @@ def compute(args): adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1e-2, name='merge_dt', pretty_name='dt', ) dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, Finf=min_max_U.Finf, - equivalent_CFL=True, + equivalent_CFL=True, name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF, name='dt_lcfl', pretty_name='LCFL') - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -247,11 +247,11 @@ def compute(args): } ) problem = Problem(method=method) - problem.insert(poisson, - splitting, + problem.insert(poisson, + splitting, dump_fields, integrate_enstrophy, integrate_rho, - min_max_U, min_max_W, + min_max_U, min_max_W, adapt_dt) problem.build(args) @@ -259,31 +259,31 @@ def compute(args): # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, - enstrophy, rhov, - min_max_U.Finf, min_max_W.Finf, + simu.write_parameters(t, dt_cfl, dt_advec, dt, + enstrophy, rhov, + min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, viscosity and density on all topologies problem.initialize_field(field=velo, formula=init_velocity) problem.initialize_field(field=vorti, formula=init_vorticity) problem.initialize_field(field=rho, formula=init_rho) - problem.initialize_field(field=phi, formula=init_phi, L=box.length) + problem.initialize_field(field=phi, formula=init_phi, L=box.length) # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -294,7 +294,7 @@ if __name__=='__main__': class PeriodicJetArgParser(HysopArgParser): def __init__(self): prog_name = 'periodic_jet' - default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Periodic Bubble Example: ', fg='blue', style='bold') @@ -305,7 +305,7 @@ if __name__=='__main__': description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method for varying densities without using a levelset function.' - + super(PeriodicJetArgParser, self).__init__( prog_name=prog_name, description=description, @@ -329,7 +329,7 @@ if __name__=='__main__': vars_ = ('rho1', 'rho2', 'mu') self._check_default(args, vars_, float, allow_none=False) self._check_positive(args, vars_, strict=False, allow_none=False) - + def _setup_parameters(self, args): super(PeriodicJetArgParser, self)._setup_parameters(args) dim = args.ndim @@ -341,10 +341,10 @@ if __name__=='__main__': parser = PeriodicJetArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(128,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=0.66, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=0.66, dt=1e-5, cfl=0.5, lcfl=0.125, - dump_freq=10, + dump_freq=10, dump_times=(0.0, 0.1, 0.3, 0.45, 0.55, 0.65), rho1=10.0, rho2=20.0, mu=0.00025) diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py index e4cc7645791a927b0e2b9ed7fe960b943513e8b4..1e990519294414af1d792743e922a0755f5cb6af 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_periodic.py @@ -1,6 +1,6 @@ ## See Meiburg 2012 & 2014 ## Sediment-laden fresh water above salt water. - + import numpy as np import scipy as sp import sympy as sm @@ -21,7 +21,7 @@ def delta(Ys, l0): for Yi in Ys: Y0 = Y0*Yi return 0.1*l0*(np.random.rand(*Y0.shape)-0.5) - + def init_concentration(data, coords, l0, component): assert (component==0) X = coords[-1] @@ -60,7 +60,7 @@ def compute(args): from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation - + from hysop.symbolic import sm, space_symbols, local_indices_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -80,7 +80,7 @@ def compute(args): dim = args.ndim npts = args.npts box = Box(origin=Xo, length=np.subtract(Xn,Xo)) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) @@ -93,17 +93,17 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env - cl_env = get_or_create_opencl_env(mpi_params=mpi_params, - platform_id=args.cl_platform_id, + cl_env = get_or_create_opencl_env(mpi_params=mpi_params, + platform_id=args.cl_platform_id, device_id=args.cl_device_id) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: @@ -117,7 +117,7 @@ def compute(args): C = Field(domain=box, name='C', dtype=args.dtype) S = Field(domain=box, name='S', dtype=args.dtype) _lambda = PenalizationField(domain=box, dtype=args.dtype) - + # Symbolic fields frame = velo.domain.frame Us = velo.s(*frame.vars) @@ -134,33 +134,33 @@ def compute(args): lhs = Ws rhs = curl(penalization, frame) exprs = Assignment.assign(lhs, rhs) - penalization = DirectionalSymbolic(name='penalization', + penalization = DirectionalSymbolic(name='penalization', implementation=impl, exprs=exprs, fixed_residue=Ws, variables={vorti: npts, velo: npts, _lambda: npts}, **extra_op_kwds) - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advec', - velocity = velo, + velocity = velo, advected_fields = (vorti,S), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, S: npts}, dt=dt, **extra_op_kwds) - + V0 = [0]*dim VP = [0]*dim VP[-1] = Vp advec_C = DirectionalAdvection(implementation=impl, name='advec_C', - velocity = velo, + velocity = velo, advected_fields = (C,), relative_velocity = VP, velocity_cfl = args.cfl, variables = {velo: npts, C: npts}, dt=dt, **extra_op_kwds) - + #> Stretch and diffuse vorticity if (dim==3): stretch_diffuse = DirectionalStretchingDiffusion(implementation=impl, @@ -168,14 +168,14 @@ def compute(args): pretty_name='sdiff', formulation = args.stretching_formulation, viscosity = 1.0, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts}, dt=dt, **extra_op_kwds) elif (dim==2): stretch_diffuse = DirectionalDiffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='diff{}'.format(vorti.pretty_name), coeffs = 1.0, fields = vorti, variables = {vorti: npts}, @@ -183,7 +183,7 @@ def compute(args): else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> Diffusion of S and C diffuse_S = DirectionalDiffusion(implementation=impl, name='diffuse_S', @@ -207,25 +207,25 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, variables={vorti: npts, S: npts, C: npts}, **extra_op_kwds) - - splitting = StrangSplitting(splitting_dim=dim, + + splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) - splitting.push_operators(penalization, advec, advec_C, stretch_diffuse, + splitting.push_operators(penalization, advec, advec_C, stretch_diffuse, diffuse_S, diffuse_C, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, variables={velo:npts, vorti: npts}, implementation=impl, **extra_op_kwds) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -234,14 +234,14 @@ def compute(args): min_max_W = MinMaxFieldStatistics(field=vorti, Finf=True, implementation=impl, variables={vorti:npts}, **extra_op_kwds) - + #> Operators to dump all fields io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={velo: npts, - vorti: npts, - C: npts, + variables={velo: npts, + vorti: npts, + C: npts, S: npts, _lambda: npts}) @@ -251,7 +251,7 @@ def compute(args): view[0] = (-200.0,+200.0) view = tuple(view) io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0) - compute_mean_fields = ComputeMeanField(name='mean', + compute_mean_fields = ComputeMeanField(name='mean', fields={C: (view, axes), S: (view, axes)}, variables={C: npts, S: npts}, io_params=io_params) @@ -269,23 +269,23 @@ def compute(args): adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=max_dt, name='merge_dt', pretty_name='dt', ) - dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, + dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, Fmin=min_max_U.Fmin, Fmax=min_max_U.Fmax, - equivalent_CFL=True, + equivalent_CFL=True, relative_velocities=[V0, VP], name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF, name='dt_lcfl', pretty_name='LCFL') - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -294,43 +294,43 @@ def compute(args): ) problem = Problem(method=method) - problem.insert(poisson, - splitting, + problem.insert(poisson, + splitting, dump_fields, compute_mean_fields, - min_max_U, min_max_W, + min_max_U, min_max_W, adapt_dt) problem.build(args) - + # If a visu_rank was provided, and show_graph was set, # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, + simu.write_parameters(t, dt_cfl, dt_advec, dt, min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, S and C on all topologies problem.initialize_field(field=velo, formula=init_velocity) problem.initialize_field(field=vorti, formula=init_vorticity) problem.initialize_field(field=C, formula=init_concentration, l0=l0) problem.initialize_field(field=S, formula=init_salinity, l0=l0) problem.initialize_field(field=_lambda, formula=init_lambda) - + # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -341,18 +341,18 @@ if __name__=='__main__': class ParticleAboveSaltArgParser(HysopArgParser): def __init__(self): prog_name = 'particle_above_salt_periodic' - default_dump_dir = '{}/hysop_examples/periodic_{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/periodic_{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Particles Above Salt Example: ', fg='blue', style='bold') description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold') - description+=colors.color('\nSediment-laden fresh water above salt water.', + description+=colors.color('\nSediment-laden fresh water above salt water.', fg='yellow') description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method in the Boussinesq approximation.' - + super(ParticleAboveSaltArgParser, self).__init__( prog_name=prog_name, description=description, @@ -369,8 +369,8 @@ if __name__=='__main__': parser = ParticleAboveSaltArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(64,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=500.0, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=500.0, dt=1e-6, cfl=0.5, lcfl=0.125, dump_times=tuple(float(x) for x in range(0,500,5)), dump_freq=0) diff --git a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py index 9f9f71677e11fa636e23d83a673badec93dd7e6b..cf3aa0b13613dea02d60b91c3abe6e44522da698 100644 --- a/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py +++ b/hysop_examples/examples/particles_above_salt/particles_above_salt_symmetrized.py @@ -1,6 +1,6 @@ ## See Meiburg 2012 & 2014 ## Sediment-laden fresh water above salt water. - + import numpy as np import scipy as sp import sympy as sm @@ -21,7 +21,7 @@ def delta(Ys, l0): for Yi in Ys: Y0 = Y0*Yi return 0.1*l0*(np.random.rand(*Y0.shape)-0.5) - + def init_concentration(data, coords, component, l0): assert (component==0) X = coords[-1].copy() @@ -55,7 +55,7 @@ def compute(args): from hysop.methods import SpaceDiscretization, Remesh, TimeIntegrator, \ ComputeGranularity, Interpolation - + from hysop.symbolic import sm, space_symbols, local_indices_symbols from hysop.symbolic.base import SymbolicTensor from hysop.symbolic.field import curl @@ -79,7 +79,7 @@ def compute(args): Xo = (0,0) Xn = (2400,750) box = Box(origin=Xo, length=np.subtract(Xn,Xo)) - + # Get default MPI Parameters from domain (even for serial jobs) mpi_params = MPIParams(comm=box.task_comm, task_id=box.current_task()) @@ -92,19 +92,19 @@ def compute(args): elif (impl is Implementation.OPENCL): # For the OpenCL implementation we need to setup the compute device # and configure how the code is generated and compiled at runtime. - + # Create an explicit OpenCL context from user parameters from hysop.backend.device.opencl.opencl_tools import get_or_create_opencl_env - cl_env = get_or_create_opencl_env(mpi_params=mpi_params, - platform_id=args.cl_platform_id, + cl_env = get_or_create_opencl_env(mpi_params=mpi_params, + platform_id=args.cl_platform_id, device_id=args.cl_device_id) tg = cl_env.build_typegen(args.dtype, 'dec', False, False) - + # Configure OpenCL kernel generation and tuning (already done by HysopArgParser) from hysop.methods import OpenClKernelConfig method = { OpenClKernelConfig: args.opencl_kernel_config } - + # Setup opencl specific extra operator keyword arguments extra_op_kwds['cl_env'] = cl_env else: @@ -117,7 +117,7 @@ def compute(args): vorti = VorticityField(velocity=velo) C = Field(domain=box, name='C', dtype=args.dtype) S = Field(domain=box, name='S', dtype=args.dtype) - + # Symbolic fields frame = velo.domain.frame Us = velo.s(*frame.vars) @@ -127,22 +127,22 @@ def compute(args): dts = dt.s ### Build the directional operators - #> Directional advection + #> Directional advection advec = DirectionalAdvection(implementation=impl, name='advec', - velocity = velo, + velocity = velo, advected_fields = (vorti,S), velocity_cfl = args.cfl, variables = {velo: npts, vorti: npts, S: npts}, dt=dt, **extra_op_kwds) - + # mirror sediment settling speed at Y=1200 (Y in [0..2400]) VP = [0]*dim VP[-1] = 'select({}, {}, ({})(X<{}))'.format(tg.dump(+Vp), tg.dump(-Vp), 'int' if tg.fbtype=='float' else 'long', tg.dump(1200.0)) - + V0 = [0]*dim pVP = [0]*dim mVP = [0]*dim @@ -151,33 +151,33 @@ def compute(args): advec_C = DirectionalAdvection(implementation=impl, name='advec_C', - velocity = velo, + velocity = velo, advected_fields = (C,), relative_velocity = VP, velocity_cfl = args.cfl, variables = {velo: npts, C: npts}, dt=dt, **extra_op_kwds) - + #> Stretch vorticity if (dim==3): stretch = DirectionalStretching(implementation=impl, name='stretch', pretty_name='stretch', formulation = args.stretching_formulation, - velocity = velo, + velocity = velo, vorticity = vorti, variables = {velo: npts, vorti: npts}, dt=dt, **extra_op_kwds) elif (dim==2): - stretch = None + stretch = None else: msg='Unsupported dimension {}.'.format(dim) raise RuntimeError(msg) - + #> Diffusion of vorticity, S and C diffuse_W = Diffusion(implementation=impl, name='diffuse_{}'.format(vorti.name), - pretty_name=u'diff{}'.format(vorti.pretty_name.decode('utf-8')), + pretty_name='diff{}'.format(vorti.pretty_name), nu = nu_W, Fin = vorti, variables = {vorti: npts}, @@ -206,7 +206,7 @@ def compute(args): lhs = Ws.diff(frame.time) rhs = curl(Fext, frame) exprs = Assignment.assign(lhs, rhs) - external_force = DirectionalSymbolic(name='Fext', + external_force = DirectionalSymbolic(name='Fext', implementation=impl, exprs=exprs, dt=dt, force_residue=0, @@ -214,16 +214,16 @@ def compute(args): S: npts, C: npts}, **extra_op_kwds) - - splitting = StrangSplitting(splitting_dim=dim, + + splitting = StrangSplitting(splitting_dim=dim, order=args.strang_order) splitting.push_operators(advec, advec_C, stretch, external_force) - + ### Build standard operators #> Poisson operator to recover the velocity from the vorticity - poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, + poisson = PoissonCurl(name='poisson', velocity=velo, vorticity=vorti, variables={velo:npts, vorti: npts}) - + #> Operator to compute the infinite norm of the velocity min_max_U = MinMaxFieldStatistics(name='min_max_U', field=velo, Finf=True, implementation=impl, variables={velo:npts}, @@ -232,14 +232,14 @@ def compute(args): min_max_W = MinMaxFieldStatistics(field=vorti, Finf=True, implementation=impl, variables={vorti:npts}, **extra_op_kwds) - + #> Operators to dump all fields io_params = IOParams(filename='fields', frequency=args.dump_freq) dump_fields = HDF_Writer(name='dump', io_params=io_params, - variables={velo: npts, - vorti: npts, - C: npts, + variables={velo: npts, + vorti: npts, + C: npts, S: npts}) #> Operator to compute and save mean fields @@ -248,7 +248,7 @@ def compute(args): view[0] = (+400.0,+800.0) view = tuple(view) io_params = IOParams(filename='horizontally_averaged_profiles', frequency=0) - compute_mean_fields = ComputeMeanField(name='mean', + compute_mean_fields = ComputeMeanField(name='mean', fields={C: (view, axes), S: (view, axes)}, variables={C: npts, S: npts}, io_params=io_params) @@ -256,23 +256,23 @@ def compute(args): ### Adaptive timestep operator adapt_dt = AdaptiveTimeStep(dt, equivalent_CFL=True, max_dt=1.0, name='merge_dt', pretty_name='dt', ) - dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, + dt_cfl = adapt_dt.push_cfl_criteria(cfl=args.cfl, Fmin=min_max_U.Fmin, Fmax=min_max_U.Fmax, - equivalent_CFL=True, + equivalent_CFL=True, relative_velocities=[V0, pVP, mVP], name='dt_cfl', pretty_name='CFL') dt_advec = adapt_dt.push_advection_criteria(lcfl=args.lcfl, Finf=min_max_W.Finf, criteria=AdvectionCriteria.W_INF, name='dt_lcfl', pretty_name='LCFL') - - ## Create the problem we want to solve and insert our + + ## Create the problem we want to solve and insert our # directional splitting subgraph and the standard operators. # The method dictionnary passed to this graph will be dispatched # accross all operators contained in the graph. method.update( - { + { ComputeGranularity: args.compute_granularity, SpaceDiscretization: args.fd_order, TimeIntegrator: args.time_integrator, @@ -281,43 +281,43 @@ def compute(args): ) problem = Problem(method=method) - problem.insert(poisson, + problem.insert(poisson, diffuse_W, diffuse_S, diffuse_C, - splitting, + splitting, dump_fields, compute_mean_fields, - min_max_U, min_max_W, + min_max_U, min_max_W, adapt_dt) problem.build(args) - + # If a visu_rank was provided, and show_graph was set, # display the graph on the given process rank. if args.display_graph: problem.display(args.visu_rank) - + # Create a simulation # (do not forget to specify the t and dt parameters here) - simu = Simulation(start=args.tstart, end=args.tend, + simu = Simulation(start=args.tstart, end=args.tend, nb_iter=args.nb_iter, max_iter=args.max_iter, dt0=args.dt, times_of_interest=args.times_of_interest, t=t, dt=dt) - simu.write_parameters(t, dt_cfl, dt_advec, dt, + simu.write_parameters(t, dt_cfl, dt_advec, dt, min_max_U.Finf, min_max_W.Finf, adapt_dt.equivalent_CFL, filename='parameters.txt', precision=8) - + # Initialize vorticity, velocity, S and C on all topologies problem.initialize_field(field=velo, formula=init_velocity) problem.initialize_field(field=vorti, formula=init_vorticity) problem.initialize_field(field=C, formula=init_concentration, l0=l0) problem.initialize_field(field=S, formula=init_salinity, l0=l0) - + # Finally solve the problem - problem.solve(simu, dry_run=args.dry_run, + problem.solve(simu, dry_run=args.dry_run, debug_dumper=args.debug_dumper, checkpoint_handler=args.checkpoint_handler) - + # Finalize problem.finalize() @@ -328,18 +328,18 @@ if __name__=='__main__': class ParticleAboveSaltArgParser(HysopArgParser): def __init__(self): prog_name = 'particle_above_salt_symmetrized' - default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), + default_dump_dir = '{}/hysop_examples/{}'.format(HysopArgParser.tmp_dir(), prog_name) description=colors.color('HySoP Particles Above Salt Example: ', fg='blue', style='bold') description+=colors.color('[Meiburg 2014]', fg='yellow', style='bold') - description+=colors.color('\nSediment-laden fresh water above salt water.', + description+=colors.color('\nSediment-laden fresh water above salt water.', fg='yellow') description+='\n' description+='\nThis example focuses on a validation study for the ' description+='hybrid particle-mesh vortex method in the Boussinesq approximation.' - + super(ParticleAboveSaltArgParser, self).__init__( prog_name=prog_name, description=description, @@ -356,8 +356,8 @@ if __name__=='__main__': parser = ParticleAboveSaltArgParser() parser.set_defaults(impl='cl', ndim=2, npts=(64,), - box_origin=(0.0,), box_length=(1.0,), - tstart=0.0, tend=500.0, + box_origin=(0.0,), box_length=(1.0,), + tstart=0.0, tend=500.0, dt=1e-6, cfl=0.5, lcfl=0.125, dump_times=tuple(float(x) for x in range(0,500,5)), dump_freq=0) diff --git a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py index 0616e7240792c2fa8294ba2a53e6be1c86288ef5..14a1c1a6868b74916bc4dfd05336ecfed1e5a54b 100644 --- a/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py +++ b/hysop_examples/examples/sediment_deposit/sediment_deposit_levelset.py @@ -241,9 +241,9 @@ def compute(args): #e = Assignment(Ss, 0.5*LogicalLE(phis, 0)) exprs = (e,) eval_fields = DirectionalSymbolic(name='eval_fields', - pretty_name=u'{}({})'.format( - phi.pretty_name.decode('utf-8'), - S.pretty_name.decode('utf-8')), + pretty_name='{}({})'.format( + phi.pretty_name, + S.pretty_name), no_split=True, implementation=impl, exprs=exprs, dt=dt,