PRAgMaTIc  master
adaptivity.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 # Copyright (C) 2010 Imperial College London and others.
4 #
5 # Please see the AUTHORS file in the main source directory for a
6 # full list of copyright holders.
7 #
8 # Gerard Gorman
9 # Applied Modelling and Computation Group
10 # Department of Earth Science and Engineering
11 # Imperial College London
12 #
13 # g.gorman@imperial.ac.uk
14 #
15 # Redistribution and use in source and binary forms, with or without
16 # modification, are permitted provided that the following conditions
17 # are met:
18 # 1. Redistributions of source code must retain the above copyright
19 # notice, this list of conditions and the following disclaimer.
20 # 2. Redistributions in binary form must reproduce the above
21 # copyright notice, this list of conditions and the following
22 # disclaimer in the documentation and/or other materials provided
23 # with the distribution.
24 #
25 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
26 # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
27 # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
30 # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
32 # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
34 # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
36 # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 # SUCH DAMAGE.
38 #
39 # Many thanks to:
40 # Patrick Farrell for mesh based metric used for coarsening.
41 # James Maddinson for the original version of Dolfin interface.
42 # Davide Longoni for p-norm function.
43 # Kristian E. Jensen for ellipse function, test cases, vectorization opt., 3D glue
44 
45 """@package PRAgMaTIc
46 
47 The python interface to PRAgMaTIc (Parallel anisotRopic Adaptive Mesh
48 ToolkIt) provides anisotropic mesh adaptivity for meshes of
49 simplexes. The target applications are finite element and finite
50 volume methods although the it can also be used as a lossy compression
51 algorithm for data (e.g. image compression). It takes as its input the
52 mesh and a metric tensor field which encodes desired mesh element size
53 anisotropically.
54 """
55 
56 import ctypes, ctypes.util, numpy, scipy.sparse, scipy.sparse.linalg
57 from numpy import array, zeros, ones, any, arange, isnan
58 from numpy.linalg import eig as pyeig
59 from dolfin import *
60 
61 __all__ = ["_libpragmatic",
62  "InvalidArgumentException",
63  "LibraryException",
64  "NotImplementedException",
65  "ParameterException",
66  "adapt",
67  "edge_lengths",
68  "mesh_metric",
69  "refine_metric",
70  "metric_pnorm"]
71 
72 class InvalidArgumentException(TypeError):
73  pass
74 class LibraryException(SystemError):
75  pass
76 class NotImplementedException(Exception):
77  pass
78 class ParameterException(Exception):
79  pass
80 
81 try:
82  _libpragmatic = ctypes.cdll.LoadLibrary("libpragmatic.so")
83 except:
84  raise LibraryException("Failed to load libpragmatic.so")
85 
86 def refine_metric(M, factor):
87  class RefineExpression(Expression):
88  def eval(self, value, x):
89  value[:] = M(x) * (factor * factor)
90  return
91  def value_shape(self):
92  return M.ufl_shape
93 
94  space = M.function_space()
95  M2 = interpolate(RefineExpression(), space)
96  name = "mesh_metric_refined_x%.6g" % factor
97  M2.rename(name, name)
98 
99  return M2
100 
102  class EdgeLengthExpression(Expression):
103  def eval(self, value, x):
104  mat = M(x)
105  mat.shape = (2, 2)
106  evals, evecs = numpy.linalg.eig(mat)
107  value[:] = 1.0 / numpy.sqrt(evals)
108  return
109  def value_shape(self):
110  return (2,)
111  e = interpolate(EdgeLengthExpression(), VectorFunctionSpace(M.function_space().mesh(), "CG", 1))
112  name = "%s_edge_lengths" % M.name()
113  e.rename(name, name)
114 
115  return e
116 
118  #this function calculates a surface mesh assuming a polyhedral geometry, i.e. not suitable for
119  #curved geometries and the output will have to be modified for problems colinear faces.
120  #a surface mesh is required for the adaptation, so this function is called, if no surface mesh
121  #is provided by the user, but the user can load this function herself, use it, modify the output
122  #and provide the modified surface mesh to adapt()
123  #INPUT : DOLFIN MESH
124  #OUTPUT: bfaces.shape = (n,3)
125  #OUTPUT: IDs.shape = (n,)
126  cells = mesh.cells()
127  coords = mesh.coordinates()
128  ntri = len(cells)
129  v1 = coords[cells[:,1],:]-coords[cells[:,0],:]
130  v2 = coords[cells[:,2],:]-coords[cells[:,0],:]
131  v3 = coords[cells[:,3],:]-(coords[cells[:,0],:]+coords[cells[:,1],:]+coords[cells[:,2],:])/3.
132  crossprod = array([v1[:,1]*v2[:,2]-v1[:,2]*v2[:,1], \
133  v1[:,2]*v2[:,0]-v1[:,0]*v2[:,2], \
134  v1[:,0]*v2[:,1]-v1[:,1]*v2[:,0]]).T
135  badcells = (crossprod*v3).sum(1)>0
136  tri = cells.flatten()
137  R = range(0,4*ntri,4); m1 = ones(ntri,dtype=numpy.int64)
138  tri = array([tri[R+badcells],tri[R+m1-badcells],tri[R+2*m1],tri[R+3*m1]])
139  fac = array([tri[0],tri[1],tri[2],tri[3],\
140  tri[1],tri[0],tri[3],tri[2],\
141  tri[3],tri[2],tri[1],tri[0]]).reshape([3,ntri*4])
142  #putting large node number in later row, smaller in first
143  C = fac.argsort(0)
144  Cgood = (C[0,:] == 0)*(C[0,:] == 1)+(C[1,:] == 1)*(C[0,:] == 2)+(C[1,:] == 2)*(C[0,:] == 0)
145  Cinv = Cgood==False
146  R = arange(ntri*4)
147  fac = fac.transpose().flatten()
148  fac = array([fac[R*3+C[0,:]],fac[R*3+C[1,:]],fac[R*3+C[2,:]]])
149  #sort according to first node number (with fall back to second node number)
150  I2 = numpy.argsort(array(zip(fac[0,:],fac[1,:],fac[2,:]),dtype=[('e1',int),('e2',int),('e3',int)]),order=['e1','e2','e3'])
151  fac = fac[:,I2]
152  #find unique faces
153  d = array([any(fac[:,0] != fac[:,1])] +\
154  (any((fac[:,range(1,ntri*4-1)] != fac[:,range(2,ntri*4)]),0) *\
155  any((fac[:,range(1,ntri*4-1)] != fac[:,range(0,ntri*4-2)]),0)).tolist() +\
156  [any(fac[:,ntri*4-1] != fac[:,ntri*4-2])])
157  fac = fac[:,d]
158  #rearrange face to correct orientation
159  R = arange(sum(d))
160  fac = fac.transpose().flatten()
161  bfaces = array([fac[R*3+Cgood[I2[d]]],fac[R*3+Cinv[I2[d]]],fac[R*3+2]]).transpose()
162  # compute normal vector (n)
163  v1 = coords[bfaces[:,1],:]-coords[bfaces[:,0],:]
164  v2 = coords[bfaces[:,2],:]-coords[bfaces[:,0],:]
165  n = array([v1[:,1]*v2[:,2]-v1[:,2]*v2[:,1], \
166  v1[:,2]*v2[:,0]-v1[:,0]*v2[:,2], \
167  v1[:,0]*v2[:,1]-v1[:,1]*v2[:,0]]).T
168  n = n/numpy.sqrt(n[:,0]**2+n[:,1]**2+n[:,2]**2).repeat(3).reshape([len(n),3])
169  # compute sets of co-linear faces (this is specific to polyhedral geometries)
170  IDs = zeros(len(n), dtype = numpy.intc)
171  while True:
172  nn = IDs.argmin()
173  IDs[nn] = IDs.max() + 1
174  I = arange(0,len(IDs))
175  notnset = I != nn * ones(len(I),dtype=numpy.int64)
176  dists = abs(n[notnset,0]*(coords[bfaces[notnset,0],0] - ones(sum(notnset))*coords[bfaces[nn,0],0])+\
177  n[notnset,1]*(coords[bfaces[notnset,0],1] - ones(sum(notnset))*coords[bfaces[nn,0],1])+\
178  n[notnset,2]*(coords[bfaces[notnset,0],2] - ones(sum(notnset))*coords[bfaces[nn,0],2])) < 1e-12
179  angles = ones(sum(notnset))-abs(n[notnset,0]*n[nn,0]+n[notnset,1]*n[nn,1]+n[notnset,2]*n[nn,2])<1e-12 # angles = arccos(abs(t[notnset,0]*t[n,0]+t[notnset,1]*t[n,1]))<1e-12
180  IDs[I[notnset][angles*dists]] = IDs[nn]
181  if all(IDs != zeros(len(IDs),dtype=numpy.int64)):
182  info("Found %i co-linear faces" % IDs.max())
183  break
184 
185  #compatibility fixes
186  IDs += 1
187  bfaces_pair = zip(bfaces[:,0],bfaces[:,1],bfaces[:,2])
188  return [bfaces,IDs]
189 
191  #calculates a surface mesh assuming a polygonal geometry, i.e. not suitable for
192  #curved geometries and the output will have to be modified for problems with colinear faces.
193  #a surface mesh is required for the adaptation, so this function is called, if no surface mesh
194  #is provided by the user, but the user can load this function herself, use it, modify the output
195  #and provide the modified surface mesh to adapt()
196  #INPUT : DOLFIN MESH
197  #OUTPUT: bfaces.shape = (n,2)
198  #OUTPUT: IDs.shape = (n,)
199  cells = mesh.cells()
200  coords = mesh.coordinates()
201  ntri = len(cells)
202  v1 = coords[cells[:,1],:]-coords[cells[:,0],:]
203  v2 = coords[cells[:,2],:]-coords[cells[:,0],:]
204  badcells = v1[:,0]*v2[:,1]-v1[:,1]*v2[:,0]>0
205  tri = cells.flatten()
206  R = range(0,3*ntri,3); m1 = ones(ntri,dtype=numpy.int64)
207  tri = array([tri[R+badcells],tri[R+m1-badcells],tri[R+2*m1]])
208  edg = array([tri[0],tri[1],tri[2],\
209  tri[1],tri[2],tri[0]]).reshape([2,ntri*3])
210  #putting large node number in later row
211  C = edg.argsort(0)
212  R = arange(ntri*3)
213  edg = edg.transpose().flatten()
214  edg = array([edg[R*2+C[0,:]],edg[R*2+C[1,:]]])
215  #sort according to first node number (with fall back to second node number)
216  I2 = numpy.argsort(array(zip(edg[0,:],edg[1,:]),dtype=[('e1',int),('e2',int)]),order=['e1','e2'])
217  edg = edg[:,I2]
218  #find unique edges
219  d = array([any(edg[:,0] != edg[:,1])] +\
220  (any((edg[:,range(1,ntri*3-1)] != edg[:,range(2,ntri*3)]),0) *\
221  any((edg[:,range(1,ntri*3-1)] != edg[:,range(0,ntri*3-2)]),0)).tolist() +\
222  [any(edg[:,ntri*3-1] != edg[:,ntri*3-2])])
223  edg = edg[:,d]
224  #put correct node number back in later row
225  R = arange(sum(d))
226  edg = edg.transpose().flatten()
227  bfaces = array([edg[R*2+C[0,I2[d]]],edg[R*2+C[1,I2[d]]]]).transpose()
228  # compute normalized tangent (t)
229  t = coords[bfaces[:,0],:]-coords[bfaces[:,1],:]
230  t = t/numpy.sqrt(t[:,0]**2+t[:,1]**2).repeat(2).reshape([len(t),2])
231  # compute sets of co-linear edges (this is specific to polygonal geometries)
232  IDs = zeros(len(t), dtype = numpy.intc)
233  while True:
234  n = IDs.argmin()
235  IDs[n] = IDs.max() + 1
236  I = arange(0,len(IDs))
237  notnset = I != n * ones(len(I),dtype=numpy.int64)
238  dists = abs(t[notnset,1]*(coords[bfaces[notnset,0],0] - ones(sum(notnset))*coords[bfaces[n,0],0])-\
239  t[notnset,0]*(coords[bfaces[notnset,0],1] - ones(sum(notnset))*coords[bfaces[n,0],1])) < 1e-12
240  angles = ones(sum(notnset))-abs(t[notnset,0]*t[n,0]+t[notnset,1]*t[n,1])<1e-12 # angles = arccos(abs(t[notnset,0]*t[n,0]+t[notnset,1]*t[n,1]))<1e-12
241  IDs[I[notnset][angles*dists]] = IDs[n]
242  if all(IDs != zeros(len(IDs),dtype=numpy.int64)):
243  info("Found %i co-linear edges" % IDs.max())
244  break
245  #compatibility fixes
246  IDs += 1
247  #bfaces_pair = zip(bfaces[:,0],bfaces[:,1])
248  return [bfaces,IDs]
249 
250 def set_mesh(n_xy, n_enlist, mesh=None, dx=None, debugon=False):
251  #this function generates a mesh 2D or 3D DOLFIN mesh given coordinates (nxy) and cells(n_enlist).
252  #it is used in the adaptation, but can also be used in the context of debugging, i.e. if one
253  #one saves the mesh coordinates and cells using pickle between iterations.
254  #INPUT : n_xy.shape = (2,N) or n_xy.shape = (3,N) for 2D and 3D, respectively.
255  #INPUT : n_enlist.shape = (3*M,) or n_enlist.shape = (4*M,)
256  #INPUT : mesh is the oldmesh used for checking area/volume conservation
257  #INPUT : dx, operator !? for arae/volume conservation check
258  #INPUT : debugon flag for checking area/volume preservation,
259  # should be off for curved geometries.
260  #OUTOUT: DOLFIN MESH
261  startTime = time()
262  nvtx = n_xy.shape[1]
263  n_mesh = Mesh()
264  ed = MeshEditor()
265  ed.open(n_mesh, len(n_xy), len(n_xy))
266  ed.init_vertices(nvtx) #n_NNodes.value
267  if len(n_xy) == 1:
268  for i in range(nvtx):
269  ed.add_vertex(i, n_xy[0,i])
270  ed.init_cells(len(n_enlist)/2)
271  for i in range(len(n_enlist)/2): #n_NElements.value
272  ed.add_cell(i, n_enlist[i * 2], n_enlist[i * 2 + 1])
273  elif len(n_xy) == 2:
274  for i in range(nvtx): #n_NNodes.value
275  ed.add_vertex(i, n_xy[0,i], n_xy[1,i])
276  ed.init_cells(len(n_enlist)/3) #n_NElements.value
277  for i in range(len(n_enlist)/3): #n_NElements.value
278  ed.add_cell(i, n_enlist[i * 3], n_enlist[i * 3 + 1], n_enlist[i * 3 + 2])
279  else: #3D
280  for i in range(nvtx): #n_NNodes.value
281  ed.add_vertex(i, n_xy[0,i], n_xy[1,i], n_xy[2,i])
282  ed.init_cells(len(n_enlist)/4) #n_NElements.value
283  for i in range(len(n_enlist)/4): #n_NElements.value
284  ed.add_cell(i, n_enlist[i * 4], n_enlist[i * 4 + 1], n_enlist[i * 4 + 2], n_enlist[i * 4 + 3])
285  ed.close()
286  info("mesh definition took %0.1fs (not vectorized)" % (time()-startTime))
287  if debugon==True and dx is not None:
288  # Sanity check to be deleted or made optional
289  area = assemble(interpolate(Constant(1.0),FunctionSpace(mesh,'DG',0)) * dx)
290  n_area = assemble(interpolate(Constant(1.0),FunctionSpace(n_mesh,'DG',0)) * dx)
291  err = abs(area - n_area)
292  info("Donor mesh area : %.17e" % area)
293  info("Target mesh area: %.17e" % n_area)
294  info("Change : %.17e" % err)
295  info("Relative change : %.17e" % (err / area))
296 
297  assert(err < 2.0e-11 * area)
298  return n_mesh
299 
300 
301 def impose_maxN(metric, maxN):
302  #enforces complexity constraint on the
303  #INPUT : metric, DOLFIN SPD TENSOR VARIABLE
304  #INPUT : maxN upper complexity (~ number of nodes) constraint
305  #OUTPUT: metric, DOLFIN SPD TENSOR VARIABLE
306  #OUTPUT: fak, factor with which the metric was coarsened - usefull for
307  #throwing warnings
308  gdim = metric.function_space().ufl_element().cell().geometric_dimension()
309  targetN = assemble(sqrt(det(metric))*dx)
310  fak = 1.
311  if targetN > maxN:
312  fak = (targetN/maxN)**(gdim/2)
313  metric.vector().set_local(metric.vector().array()/fak)
314  info('metric coarsened to meet target node number')
315  return [metric,fak]
316 
317 def adapt(metric, bfaces=None, bfaces_IDs=None, debugon=True, eta=1e-2, grada=None, maxN=None):
318  #this is the actual adapt function. It currently works with vertex
319  #numbers rather than DOF numbers, but as of DOLFIN.__VERSION__ >= "1.3.0",
320  #there is no difference.
321  # INPUT : metric is a DG0 or CG1 SPD DOLFIN TENSOR VARIABLE or
322  # a DOLFIN SCALAR VARIABLE. In the latter case metric_pnorm is called
323  # to calculate a DOLFIN CG1 TENSOR VARIABLE
324  # INPUT : bfaces.shape = (n,2) or bfaces.shape = (n,3) is a list of edges or
325  # faces for the mesh boundary. If not specified, it will be calculated
326  # using the polygon_surfmesh or polyhedron_surfmesh functions.
327  # If specified, it should be together with
328  # INPUT : bfaces_IDs.shape = (n,), which gives each edge and face and ID, so
329  # that corners implicitly specified in 2D and edges in 3D.
330  # All corners can be specified this way in 3D, but it can require
331  # definition of dummy IDs, if the corner is in the middle of face and
332  # thus only related to two IDs.
333  # INPUT : debugon=True (default) checks for conservation of area/volume
334  # INPUT : eta is the scaling factor used, if the metric input is a
335  # SCALAR DOLFIN FUNCTION
336  # INPUT : grada enables gradation of the input metric, (1 for slight gradation,
337  # 2 for more etc... off by default)
338  # INPUT : maxN facilitates rescaling of the input metric to meet a
339  # mesh complexity constraint (~number of nodes). This can prevent
340  # OUT OF MEMORY ERRORS in the context of direct solvers, but it can
341  # also be headache for convergence analysis, which is why a warning
342  # is thrown if the constraint is active
343  # OUTPUT: DOLFIN MESH
344  mesh = metric.function_space().mesh()
345 
346  #check if input is not a metric
347  if metric.function_space().ufl_element().num_sub_elements() == 0:
348  metric = metric_pnorm(metric, eta=eta, CG1out=True)
349 
350  if metric.function_space().ufl_element().degree() == 0 and metric.function_space().ufl_element().family()[0] == 'D':
351  metric = project(metric, TensorFunctionSpace(mesh, "CG", 1)) #metric = logproject(metric)
352  metric = fix_CG1_metric(metric) #fixes negative eigenvalues
353  if grada is not None:
354  metric = gradate(metric,grada)
355  if maxN is not None:
356  [metric,fak] = impose_maxN(metric, maxN)
357 
358  # warn before generating huge mesh
359  targetN = assemble(sqrt(det(metric))*dx)
360  if targetN < 1e6:
361  info("target mesh has %0.0f nodes" % targetN)
362  else:
363  warning("target mesh has %0.0f nodes" % targetN)
364 
365  space = metric.function_space() #FunctionSpace(mesh, "CG", 1)
366  element = space.ufl_element()
367 
368  # Sanity checks
369  if not (mesh.geometry().dim() == 2 or mesh.geometry().dim() == 3)\
370  or not (element.cell().geometric_dimension() == 2 \
371  or element.cell().geometric_dimension() == 3) \
372  or not (element.cell().topological_dimension() == 2 \
373  or element.cell().topological_dimension() == 3) \
374  or not element.family() == "Lagrange" \
375  or not element.degree() == 1:
376  raise InvalidArgumentException("Require 2D P1 function space for metric tensor field")
377 
378  nodes = array(range(0,mesh.num_vertices()),dtype=numpy.intc)
379  cells = mesh.cells()
380  coords = mesh.coordinates()
381  # create boundary mesh and associated list of co-linear edges
382  if bfaces is None:
383  if element.cell().geometric_dimension() == 2:
384  [bfaces,bfaces_IDs] = polygon_surfmesh(mesh)
385  else:
386  [bfaces,bfaces_IDs] = polyhedron_surfmesh(mesh)
387 
388  x = coords[nodes,0]
389  y = coords[nodes,1]
390  if element.cell().geometric_dimension() == 3:
391  z = coords[nodes,2]
392  cells = array(cells,dtype=numpy.intc)
393 
394  # Dolfin stores the tensor as:
395  # |dxx dxy|
396  # |dyx dyy|
397  ## THE (CG1-)DOF NUMBERS ARE DIFFERENT FROM THE VERTEX NUMBERS (and we wish to work with the former)
398  if dolfin.__version__ != '1.2.0':
399  dof2vtx = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1))
400  else:
401  dof2vtx = FunctionSpace(mesh,'CG',1).dofmap().vertex_to_dof_map(mesh).argsort()
402 
403  metric_arr = numpy.empty(metric.vector().array().size, dtype = numpy.float64)
404  if element.cell().geometric_dimension() == 2:
405  metric_arr[range(0,metric.vector().array().size,4)] = metric.vector().array()[arange(0,metric.vector().array().size,4)[dof2vtx]]
406  metric_arr[range(1,metric.vector().array().size,4)] = metric.vector().array()[arange(2,metric.vector().array().size,4)[dof2vtx]]
407  metric_arr[range(2,metric.vector().array().size,4)] = metric.vector().array()[arange(2,metric.vector().array().size,4)[dof2vtx]]
408  metric_arr[range(3,metric.vector().array().size,4)] = metric.vector().array()[arange(3,metric.vector().array().size,4)[dof2vtx]]
409  else:
410  metric_arr[range(0,metric.vector().array().size,9)] = metric.vector().array()[arange(0,metric.vector().array().size,9)[dof2vtx]]
411  metric_arr[range(1,metric.vector().array().size,9)] = metric.vector().array()[arange(3,metric.vector().array().size,9)[dof2vtx]]
412  metric_arr[range(2,metric.vector().array().size,9)] = metric.vector().array()[arange(6,metric.vector().array().size,9)[dof2vtx]]
413  metric_arr[range(3,metric.vector().array().size,9)] = metric.vector().array()[arange(3,metric.vector().array().size,9)[dof2vtx]]
414  metric_arr[range(4,metric.vector().array().size,9)] = metric.vector().array()[arange(4,metric.vector().array().size,9)[dof2vtx]]
415  metric_arr[range(5,metric.vector().array().size,9)] = metric.vector().array()[arange(7,metric.vector().array().size,9)[dof2vtx]]
416  metric_arr[range(6,metric.vector().array().size,9)] = metric.vector().array()[arange(6,metric.vector().array().size,9)[dof2vtx]]
417  metric_arr[range(7,metric.vector().array().size,9)] = metric.vector().array()[arange(7,metric.vector().array().size,9)[dof2vtx]]
418  metric_arr[range(8,metric.vector().array().size,9)] = metric.vector().array()[arange(8,metric.vector().array().size,9)[dof2vtx]]
419  info("Beginning PRAgMaTIc adapt")
420  info("Initialising PRAgMaTIc ...")
421  NNodes = ctypes.c_int(x.shape[0])
422 
423  NElements = ctypes.c_int(cells.shape[0])
424 
425  if element.cell().geometric_dimension() == 2:
426  _libpragmatic.pragmatic_2d_init(ctypes.byref(NNodes),
427  ctypes.byref(NElements),
428  cells.ctypes.data,
429  x.ctypes.data,
430  y.ctypes.data)
431  else:
432  _libpragmatic.pragmatic_3d_init(ctypes.byref(NNodes),
433  ctypes.byref(NElements),
434  cells.ctypes.data,
435  x.ctypes.data,
436  y.ctypes.data,
437  z.ctypes.data)
438  info("Setting surface ...")
439  nfacets = ctypes.c_int(len(bfaces))
440  facets = array(bfaces.flatten(),dtype=numpy.intc)
441 
442  _libpragmatic.pragmatic_set_boundary(ctypes.byref(nfacets),
443  facets.ctypes.data,
444  bfaces_IDs.ctypes.data)
445 
446  info("Setting metric tensor field ...")
447  _libpragmatic.pragmatic_set_metric(metric_arr.ctypes.data)
448 
449  info("Entering adapt ...")
450  startTime = time()
451  _libpragmatic.pragmatic_adapt()
452 
453  info("adapt took %0.1fs" % (time()-startTime))
454  n_NNodes = ctypes.c_int()
455  n_NElements = ctypes.c_int()
456  n_NSElements = ctypes.c_int()
457 
458  info("Querying output ...")
459  _libpragmatic.pragmatic_get_info(ctypes.byref(n_NNodes),
460  ctypes.byref(n_NElements),
461  ctypes.byref(n_NSElements))
462 
463  if element.cell().geometric_dimension() == 2:
464  n_enlist = numpy.empty(3 * n_NElements.value, numpy.intc)
465  else:
466  n_enlist = numpy.empty(4 * n_NElements.value, numpy.intc)
467 
468  info("Extracting output ...")
469  n_x = numpy.empty(n_NNodes.value)
470  n_y = numpy.empty(n_NNodes.value)
471  if element.cell().geometric_dimension() == 3:
472  n_z = numpy.empty(n_NNodes.value)
473  _libpragmatic.pragmatic_get_coords_3d(n_x.ctypes.data,
474  n_y.ctypes.data,
475  n_z.ctypes.data)
476  else:
477  _libpragmatic.pragmatic_get_coords_2d(n_x.ctypes.data,
478  n_y.ctypes.data)
479 
480  _libpragmatic.pragmatic_get_elements(n_enlist.ctypes.data)
481 
482  info("Finalising PRAgMaTIc ...")
483  _libpragmatic.pragmatic_finalize()
484  info("PRAgMaTIc adapt complete")
485 
486  if element.cell().geometric_dimension() == 2:
487  n_mesh = set_mesh(array([n_x,n_y]),n_enlist,mesh=mesh,dx=dx,debugon=debugon)
488  else:
489  n_mesh = set_mesh(array([n_x,n_y,n_z]),n_enlist,mesh=mesh,dx=dx,debugon=debugon)
490 
491  return n_mesh
492 
493 def consistent_interpolation(mesh, fields=[]):
494  if not isinstance(fields, list):
495  return consistent_interpolation(mesh, [fields])
496 
497  n_space = FunctionSpace(n_mesh, "CG", 1)
498  n_fields = []
499  for field in fields:
500  n_field = Function(n_space)
501  n_field.rename(field.name(), field.name())
502  val = numpy.empty(1)
503  coord = numpy.empty(2)
504  nx = interpolate(Expression("x[0]"), n_space).vector().array()
505  ny = interpolate(Expression("x[1]"), n_space).vector().array()
506  n_field_arr = numpy.empty(n_NNodes.value)
507  for i in range(n_NNodes.value):
508  coord[0] = nx[i]
509  coord[1] = ny[i]
510  field.eval(val, coord)
511  n_field_arr[i] = val
512  n_field.vector().set_local(n_field_arr)
513  n_field.vector().apply("insert")
514  n_fields.append(n_field)
515 
516  if len(n_fields) > 0:
517  return n_fields
518  else:
519  return n_mesh
520 
522  #makes the eigenvalues of a metric positive (this property can be lost during
523  #the projection step)
524  #INPUT and OUTPUT: DOLFIN TENSOR VARIABLE
525  [H,cell2dof] = get_dofs(Mp)
526  [eigL,eigR] = analytic_eig(H)
527 # if any(lambda1<zeros(len(lambda2))) or any(lambda2<zeros(len(lambda2))):
528 # warning('negative eigenvalue in metric fixed')
529  eigL = numpy.abs(eigL)
530  H = analyt_rot(fulleig(eigL),eigR)
531  out = sym2asym(H).transpose().flatten()
532  Mp.vector().set_local(out)
533  return Mp
534 
535 def metric_pnorm(f, eta, max_edge_length=None, min_edge_length=None, max_edge_ratio=10, p=2, CG1out=False, CG0H=3):
536  # p-norm scaling to the metric, as in Chen, Sun and Xu, Mathematics of
537  # Computation, Volume 76, Number 257, January 2007, pp. 179-204.
538  # INPUT : f, SCALAR DOLFIN VARIABLE
539  # INPUT : eta, scaling factor (0.04-0.005 are good values for engineering tolerances)
540  # INPUT : max_edge_length is an optional lower bound on the metric eigenvalues
541  # INPUT : min_edge_length is an optional upper bound on the metric eigenvalues
542  # INPUT : max_edge_ratio is an optional local lower bound on the metric eigenvalues,
543  # which enforce a maximum ratio between the smaller and large eigenvalue
544  # INPUT : p is the interpolation norm to be minimised (default to 2)
545  # INPUT : CG1out enables projection of Hessian to CG1 space, such that this
546  # projection does not have to be performed at a later stage
547  # INPUT : CG0H controls how a DG0 Hessian is extracted from a SCALAR DOLFIN CG2 VARIABLE
548  # OUTPUT: DOLFIN (CG1 or DG0) SPD TENSOR VARIABLE
549  mesh = f.function_space().mesh()
550  # Sanity checks
551  if max_edge_ratio is not None and max_edge_ratio < 1.0:
552  raise InvalidArgumentException("The maximum edge ratio must be greater greater or equal to 1")
553  else:
554  if max_edge_ratio is not None:
555  max_edge_ratio = max_edge_ratio**2 # ie we are going to be looking at eigenvalues
556 
557  n = mesh.geometry().dim()
558 
559  if f.function_space().ufl_element().degree() == 2 and f.function_space().ufl_element().family() == 'Lagrange':
560  if CG0H == 0:
561  S = VectorFunctionSpace(mesh,'DG',1) #False and
562  A = assemble(inner(TrialFunction(S), TestFunction(S))*dx)
563  b = assemble(inner(grad(f), TestFunction(S))*dx)
564  ones_ = Function(S)
565  ones_.vector()[:] = 1
566  A_diag = A * ones_.vector()
567  A_diag.set_local(1.0/A_diag.array())
568  gradf = Function(S)
569  gradf.vector()[:] = b * A_diag
570 
571  S = TensorFunctionSpace(mesh,'DG',0)
572  A = assemble(inner(TrialFunction(S), TestFunction(S))*dx)
573  b = assemble(inner(grad(gradf), TestFunction(S))*dx)
574  ones_ = Function(S)
575  ones_.vector()[:] = 1
576  A_diag = A * ones_.vector()
577  A_diag.set_local(1.0/A_diag.array())
578  elif CG0H == 1:
579  S = TensorFunctionSpace(mesh,'DG',0)
580  A = assemble(inner(TrialFunction(S), TestFunction(S))*dx)
581  b = assemble(inner(grad(grad(f)), TestFunction(S))*dx)
582  ones_ = Function(S)
583  ones_.vector()[:] = 1
584  A_diag = A * ones_.vector()
585  A_diag.set_local(1.0/A_diag.array())
586  H = Function(S)
587  H.vector()[:] = b * A_diag
588  else:
589  H = project(grad(grad(f)), TensorFunctionSpace(mesh, "DG", 0))
590  else:
591  gradf = project(grad(f), VectorFunctionSpace(mesh, "CG", 1))
592  H = project(sym(grad(gradf)), TensorFunctionSpace(mesh, "DG", 0))
593 
594  if CG1out or dolfin.__version__ >= '1.4.0':
595  H = project(H,TensorFunctionSpace(mesh,'CG',1))
596  # EXTRACT HESSIAN
597  [HH,cell2dof] = get_dofs(H)
598  # add DOLFIN_EPS on the diagonal to avoid zero eigenvalues
599  HH[0,:] += DOLFIN_EPS
600  HH[2,:] += DOLFIN_EPS
601  if n==3: #3D
602  HH[5,:] += DOLFIN_EPS
603 
604  # CALCULATE EIGENVALUES
605  [eigL,eigR] = analytic_eig(HH)
606 
607  # Make H positive definite and calculate the p-norm.
608  #enforce hardcoded min and max contraints
609  min_eigenvalue = 1e-20; max_eigenvalue = 1e20
610  onesC = ones(eigL.shape)
611  eigL = array([numpy.abs(eigL),onesC*min_eigenvalue]).max(0)
612  eigL = array([numpy.abs(eigL),onesC*max_eigenvalue]).min(0)
613  #enforce constraint on aspect ratio
614  if max_edge_ratio is not None:
615  RR = arange(HH.shape[1])
616  CC = eigL.argmax(0)
617  I_ = array([False]).repeat(array(eigL.shape).prod())
618  I_[CC+(RR-1)*eigL.shape[0]] = True
619  I_ = I_.reshape(eigL.shape)
620  eigL[I_==False] = array([eigL[I_==False],eigL[I_].repeat(eigL.shape[0]-1)/max_edge_ratio]).max(0)
621 
622  #check (will not trigger with min_eigenvalue > 0)
623  det = eigL.prod(0)
624  if any(det==0):
625  raise FloatingPointError("Eigenvalues are zero")
626 
627  #compute metric
628  exponent = -1.0/(2*p + n)
629  eigL *= 1./eta*(det**exponent).repeat(eigL.shape[0]).reshape([eigL.shape[1],eigL.shape[0]]).T
630 
631 # HH = analyt_rot(fulleig(eigL),eigR)
632 # HH *= 1./eta*det**exponent
633 # [eigL,eigR] = analytic_eig(HH)
634 
635  #enforce min and max contraints
636  if max_edge_length is not None:
637  min_eigenvalue = 1.0/max_edge_length**2
638  if eigL.flatten().min()<min_eigenvalue:
639  info('upper bound on element edge length is active')
640  if min_edge_length is not None:
641  max_eigenvalue = 1.0/min_edge_length**2
642  if eigL.flatten().max()>max_eigenvalue:
643  info('lower bound on element edge length is active')
644  eigL = array([eigL,onesC*min_eigenvalue]).max(0)
645  eigL = array([eigL,onesC*max_eigenvalue]).min(0)
646  HH = analyt_rot(fulleig(eigL),eigR)
647 
648  Hfinal = sym2asym(HH)
649  cbig=zeros((H.vector().array()).size)
650  cbig[cell2dof.flatten()] = Hfinal.transpose().flatten()
651  H.vector().set_local(cbig)
652  return H
653 
654 def metric_ellipse(H1, H2, method='in', qualtesting=False):
655  # calculates the inner or outer ellipse (depending on the value of the method input)
656  # of two the two input metrics.
657  # INPUT : H1 is a DOLFIN SPD TENSOR VARIABLE (CG1 or DG0)
658  # INPUT : H2 is a DOLFIN SPD TENSOR VARIABLE (CG1 or DG0)
659  # INPUT : method determines calculation method, 'in' for inner ellipse (default)
660  # INPUT : qualtesting flag that can be used to to trigger return of scalar a variable
661  # that indicates if one ellipse is entirely within the other (-1,1) or if they
662  # intersect (0)
663  # OUTPUT: H1, DOLFIN SPD TENSOR VARIABLE (CG1 or DG0)
664  [HH1,cell2dof] = get_dofs(H1)
665  [HH2,cell2dof] = get_dofs(H2)
666  cbig = zeros((H1.vector().array()).size)
667 
668  # CALCULATE EIGENVALUES using analytic expression numpy._version__>1.8.0 can do this more elegantly
669  [eigL1,eigR1] = analytic_eig(HH1)
670  # convert metric2 to metric1 space
671  tmp = analyt_rot(HH2, transpose_eigR(eigR1))
672  tmp = prod_eig(tmp, 1/eigL1)
673  [eigL2,eigR2] = analytic_eig(tmp)
674  # enforce inner or outer ellipse
675  if method == 'in':
676  if qualtesting:
677  HH = Function(FunctionSpace(H1.function_space().mesh(),'DG',0))
678  HH.vector().set_local((eigL2<ones(eigL2.shape)).sum(0)-ones(eigL2.shape[1]))
679  return HH
680  else:
681  eigL2 = array([eigL2 ,ones(eigL2.shape)]).max(0)
682  else:
683  eigL2 = array([eigL2, ones(eigL2.shape)]).min(0)
684 
685  #convert metric2 back to original space
686  tmp = analyt_rot(fulleig(eigL2), eigR2)
687  tmp = prod_eig(tmp, eigL1)
688  HH = analyt_rot(tmp,eigR1)
689  HH = sym2asym(HH)
690  #set metric
691  cbig[cell2dof.flatten()] = HH.transpose().flatten()
692  H1.vector().set_local(cbig)
693  return H1
694 
695 def get_dofs(H):
696  #converts a DOLFIN SPD TENSOR VARIABLE to a numpy array, see sym2asym for storage convention
697  #OUTPUT: argout.shape = (3,N) or argout.shape = (6,N) for 2D and 3D, respectively.
698  #INPUT : DOLFIN TENSOR VARIABLE
699  mesh = H.function_space().mesh()
700  n = mesh.geometry().dim()
701  if H.function_space().ufl_element().degree() == 0 and H.function_space().ufl_element().family()[0] == 'D':
702  cell2dof = c_cell_dofs(mesh,H.function_space())
703  cell2dof = cell2dof.reshape([mesh.num_cells(),n**2])
704  else: #CG1 metric
705  cell2dof = arange(mesh.num_vertices()*n**2)
706  cell2dof = cell2dof.reshape([mesh.num_vertices(),n**2])
707  if n == 2:
708  H11 = H.vector().array()[cell2dof[:,0]]
709  H12 = H.vector().array()[cell2dof[:,1]] #;H21 = H.vector().array()[cell2dof[:,2]]
710  H22 = H.vector().array()[cell2dof[:,3]]
711  return [array([H11,H12,H22]),cell2dof]
712  else: #n==3
713  H11 = H.vector().array()[cell2dof[:,0]]
714  H12 = H.vector().array()[cell2dof[:,1]] #;H21 = H.vector().array()[cell2dof[:,3]]
715  H13 = H.vector().array()[cell2dof[:,2]] #;H31 = H.vector().array()[cell2dof[:,6]]
716  H22 = H.vector().array()[cell2dof[:,4]]
717  H23 = H.vector().array()[cell2dof[:,5]] #H32 = H.vector().array()[cell2dof[:,7]]
718  H33 = H.vector().array()[cell2dof[:,8]]
719  return [array([H11,H12,H22,H13,H23,H33]),cell2dof]
720 
721 def transpose_eigR(eigR):
722  #transposes a rotation matrix (eigenvectors)
723  #INPUT and OUTPUT: .shape = (4,N) or .shape = (9,N)
724  if eigR.shape[0] == 4:
725  return array([eigR[0,:],eigR[2,:],\
726  eigR[1,:],eigR[3,:]])
727  else: #3D
728  return array([eigR[0,:],eigR[3,:],eigR[6,:],\
729  eigR[1,:],eigR[4,:],eigR[7,:],\
730  eigR[2,:],eigR[5,:],eigR[8,:]])
731 
732 def sym2asym(HH):
733  #converts between upper diagonal storage and full storage of a
734  #SPD tensor
735  #INPUT : HH.shape = (3,N) or HH.shape(6,N) for 2D and 3D, respectively.
736  #OUTPUT: argout.shape = (4,N) or outarg.shape(9,N) for 2D and 3D, respectively.
737  if HH.shape[0] == 3:
738  return array([HH[0,:],HH[1,:],\
739  HH[1,:],HH[2,:]])
740  else:
741  return array([HH[0,:],HH[1,:],HH[3,:],\
742  HH[1,:],HH[2,:],HH[4,:],\
743  HH[3,:],HH[4,:],HH[5,:]])
744 
745 def fulleig(eigL):
746  #creates a diagonal tensor from a vector
747  #INPUT : eigL.shape = (2,N) or eigL.shape = (3,N) for 2D and 3D, respetively.
748  #OUTPUT: outarg.shape = (3,N) or outarg.shape = (6,N) for 2D and 3D, respectively.
749  zeron = zeros(eigL.shape[1])
750  if eigL.shape[0] == 2:
751  return array([eigL[0,:],zeron,eigL[1,:]])
752  else: #3D
753  return array([eigL[0,:],zeron,eigL[1,:],zeron,zeron,eigL[2,:]])
754 
755 def analyt_rot(H,eigR):
756  #rotates a symmetric matrix, i.e. it calculates the tensor product
757  # R*h*R.T, where
758  #INPUT : H.shape = (3,N) or H.shape = (6,N) for 2D and 3D matrices, respectively.
759  #INPUT : eigR.shape = (2,N) or eigR.shape = (3,N) for 2D and 3D, respetively.
760  #OUTPUT: A.shape = (3,N) or A.shape = (6,N) for 2D and 3D, respectively
761  if H.shape[0] == 3: #2D
762  inds = array([[0,1],[1,2]])
763  indA = array([[0,1],[2,3]])
764  else: #3D
765  inds = array([[0,1,3],[1,2,4],[3,4,5]])
766  indA = array([[0,1,2],[3,4,5],[6,7,8]])
767  indB = indA.T
768  A = zeros(H.shape)
769  for i in range(len(inds)):
770  for j in range(len(inds)):
771  for m in range(len(inds)):
772  for n in range(len(inds)):
773  if i<n:
774  continue
775  A[inds[i,n],:] += eigR[indB[i,j],:]*H[inds[j,m],:]*eigR[indA[m,n],:]
776  return A
777 
778 def prod_eig(H, eigL):
779  #calculates the tensor product of H and diag(eigL), where
780  #H is a tensor and eigL is a vector (diag(eigL) is a diagonal tensor).
781  #INPUT : H.shape = (3,N) or H.shape(6,N) for 2D and 3D, respectively and
782  #INPUT : eigL.shape = (2,N) or eigL.shape = (3,N) for 2D and 3D, respectively
783  #OUTPUT: argout.shape = (3,N) or argout.shape = (6,N) for 2D and 3Dm respectively
784  if H.shape[0] == 3:
785  return array([H[0,:]*eigL[0,:], H[1,:]*numpy.sqrt(eigL[0,:]*eigL[1,:]), \
786  H[2,:]*eigL[1,:]])
787  else:
788  return array([H[0,:]*eigL[0,:], H[1,:]*numpy.sqrt(eigL[0,:]*eigL[1,:]), H[2,:]*eigL[1,:], \
789  H[3,:]*numpy.sqrt(eigL[0,:]*eigL[2,:]), H[4,:]*numpy.sqrt(eigL[2,:]*eigL[1,:]),\
790  H[5,:]*eigL[2,:]])
791 
792 def analytic_eig(H, tol=1e-12):
793  #calculates the eigenvalues and eigenvectors using explicit analytical
794  #expression for an array of 2x2 and a 3x3 symmetric matrices.
795  #if numpy.__version__ >= "1.8.0", the vectorisation functionality of numpy.linalg.eig is used
796  #INPUT: H.shape = (3,N) or H.shape = (6,N) for 2x2 and 3x3, respectively. Refer to sym2asym for ordering convention
797  # tol, is an optinal numerical tolerance for identifying diagonal matrices
798  #OUTPUT: eigL.shape = (2,N) or eigL.shape = (3,N) for 2x2 and 3x3, resptively.
799  #OUTPUT: eigR.shape = (4,N) or eigr.shape = (9,N) for 2x2 and 3x3, resptively. Refer to transpose_eigR for ordering convention
800  H11 = H[0,:]
801  H12 = H[1,:]
802  H22 = H[2,:]
803  onesC = ones(len(H11))
804  if H.shape[0] == 3:
805  if numpy.__version__ < "1.8.0":
806  lambda1 = 0.5*(H11+H22-numpy.sqrt((H11-H22)**2+4*H12**2))
807  lambda2 = 0.5*(H11+H22+numpy.sqrt((H11-H22)**2+4*H12**2))
808  v1x = ones(len(H11)); v1y = zeros(len(H11))
809  #identical eigenvalues
810  I2 = numpy.abs(lambda1-lambda2)<onesC*tol;
811  #diagonal matrix
812  I1 = numpy.abs(H12)<onesC*tol
813  lambda1[I1] = H11[I1]
814  lambda2[I1] = H22[I1]
815  #general case
816  nI = (I1==False)*(I2==False)
817  v1x[nI] = -H12[nI]
818  v1y[nI] = H11[nI]-lambda1[nI]
819  L1 = numpy.sqrt(v1x**2+v1y**2)
820  v1x /= L1
821  v1y /= L1
822  eigL = array([lambda1,lambda2])
823  eigR = array([v1x,v1y,-v1y,v1x])
824  else:
825  Hin = zeros([len(H11),2,2])
826  Hin[:,0,0] = H11; Hin[:,0,1] = H12
827  Hin[:,1,0] = H12; Hin[:,1,1] = H22
828  else: #3D
829  H13 = H[3,:]
830  H23 = H[4,:]
831  H33 = H[5,:]
832  if numpy.__version__ < "1.8.0":
833  p1 = H12**2 + H13**2 + H23**2
834  zeroC = zeros(len(H11))
835  eig1 = array(H11); eig2 = array(H22); eig3 = array(H33) #do not modify input
836  v1 = array([onesC, zeroC, zeroC])
837  v2 = array([zeroC, onesC, zeroC])
838  v3 = array([zeroC, zeroC, onesC])
839  # A is not diagonal.
840  nI = (numpy.abs(p1) > tol**2)
841  p1 = p1[nI]
842  H11 = H11[nI]; H12 = H12[nI]; H22 = H22[nI];
843  H13 = H13[nI]; H23 = H23[nI]; H33 = H33[nI];
844  q = array((H11+H22+H33)/3.)
845 # H11 /= q; H12 /= q; H22 /= q; H13 /= q; H23 /= q; H33 /= q
846 # p1 /= q**2; qold = q; q = ones(len(H11))
847  p2 = (H11-q)**2 + (H22-q)**2 + (H33-q)**2 + 2.*p1
848  p = numpy.sqrt(p2 / 6.)
849  I = array([onesC,zeroC,onesC,zeroC,zeroC,onesC])#I = array([1., 0., 1., 0., 0., 1.]).repeat(len(H11)).reshape(6,len(H11)) #identity matrix
850  HH = array([H11,H12,H22,H13,H23,H33])
851  B = (1./p) * (HH-q.repeat(6).reshape(len(H11),6).T*I[:,nI])
852  #detB = B11*B22*B33+2*(B12*B23*B13)-B13*B22*B13-B12*B12*B33-B11*B23*B23
853  detB = B[0,:]*B[2,:]*B[5,:]+2*(B[1,:]*B[4,:]*B[3,:])-B[3,:]*B[2,:]*B[3,:]-B[1,:]*B[1,:]*B[5,:]-B[0,:]*B[4,:]*B[4,:]
854 
855  #calc r
856  r = detB / 2.
857  rsmall = r<=-1.
858  rbig = r>= 1.
859  rgood = (rsmall==False)*(rbig==False)
860  phi = zeros(len(H11))
861  phi[rsmall] = pi / 3.
862  phi[rbig] = 0.
863  phi[rgood] = numpy.arccos(r[rgood]) / 3.
864 
865  eig1[nI] = q + 2.*p*numpy.cos(phi)
866  eig3[nI] = q + 2.*p*numpy.cos(phi + (2.*pi/3.))
867  eig2[nI] = array(3.*q - eig1[nI] - eig3[nI])
868 # eig1[nI] *= qold; eig2[nI] *= qold; eig3[nI] *= qold
869  v1[0,nI] = H22*H33 - H23**2 + eig1[nI]*(eig1[nI]-H33-H22)
870  v1[1,nI] = H12*(eig1[nI]-H33)+H13*H23
871  v1[2,nI] = H13*(eig1[nI]-H22)+H12*H23
872  v2[0,nI] = H12*(eig2[nI]-H33)+H23*H13
873  v2[1,nI] = H11*H33 - H13**2 + eig2[nI]*(eig2[nI]-H11-H33)
874  v2[2,nI] = H23*(eig2[nI]-H11)+H12*H13
875  v3[0,nI] = H13*(eig3[nI]-H22)+H23*H12
876  v3[1,nI] = H23*(eig3[nI]-H11)+H13*H12
877  v3[2,nI] = H11*H22 - H12**2 + eig3[nI]*(eig3[nI]-H11-H22)
878  L1 = numpy.sqrt((v1[:,nI]**2).sum(0))
879  L2 = numpy.sqrt((v2[:,nI]**2).sum(0))
880  L3 = numpy.sqrt((v3[:,nI]**2).sum(0))
881  v1[:,nI] /= L1.repeat(3).reshape(len(L1),3).T
882  v2[:,nI] /= L2.repeat(3).reshape(len(L1),3).T
883  v3[:,nI] /= L3.repeat(3).reshape(len(L1),3).T
884  eigL = array([eig1,eig2,eig3])
885  eigR = array([v1[0,:],v1[1,:],v1[2,:],\
886  v2[0,:],v2[1,:],v2[2,:],\
887  v3[0,:],v3[1,:],v3[2,:]])
888  bad = (numpy.abs(analyt_rot(fulleig(eigL),eigR)-H).sum(0) > tol) | isnan(eigR).any(0) | isnan(eigL).any(0)
889  if any(bad):
890  log(INFO,'%0.0f problems in eigendecomposition' % bad.sum())
891  for i in numpy.where(bad)[0]:
892  [eigL_,eigR_] = pyeig(array([[H[0,i],H[1,i],H[3,i]],\
893  [H[1,i],H[2,i],H[4,i]],\
894  [H[3,i],H[4,i],H[5,i]]]))
895  eigL[:,i] = eigL_
896  eigR[:,i] = eigR_.T.flatten()
897  else:
898  Hin = zeros([len(H11),3,3])
899  Hin[:,0,0] = H11; Hin[:,0,1] = H12; Hin[:,0,2] = H13
900  Hin[:,1,0] = H12; Hin[:,1,1] = H22; Hin[:,1,2] = H23
901  Hin[:,2,0] = H13; Hin[:,2,1] = H23; Hin[:,2,2] = H33
902  if numpy.__version__ >= "1.8.0":
903  [eigL,eigR] = pyeig(Hin)
904  eigL = eigL.T
905  eigR = eigR.transpose([0,2,1]).reshape([len(H11),array(Hin.shape[1:3]).prod()]).T
906  return [eigL,eigR]
907 
908 def logexpmetric(Mp,logexp='log'):
909  #calculates various tensor transformations in the principal frame
910  #INPUT : DOLFIN TENSOR VARIABLE
911  #INPUT : logexp is an optinal argument specifying the transformation,
912  # valid values are:
913  # 'log' , natural logarithm (default)
914  # 'exp' , exponential
915  # 'inv' , inverse
916  # 'sqr' , square
917  # 'sqrt' , square root
918  # 'sqrtinv', inverse square root
919  # 'sqrinv' , inverse square
920 
921  #OUTPUT: DOLFIN TENSOR VARIABLE
922  [H,cell2dof] = get_dofs(Mp)
923  [eigL,eigR] = analytic_eig(H)
924  if logexp=='log':
925  eigL = numpy.log(eigL)
926  elif logexp=='sqrt':
927  eigL = numpy.sqrt(eigL)
928  elif logexp=='inv':
929  eigL = 1./eigL
930  elif logexp=='sqr':
931  eigL = eigL**2
932  elif logexp=='sqrtinv':
933  eigL = numpy.sqrt(1./eigL)
934  elif logexp=='sqrinv':
935  eigL = 1./eigL**2
936  elif logexp=='exp':
937  eigL = numpy.exp(eigL)
938  else:
939  error('logexp='+logexp+' is an invalid value')
940  HH = analyt_rot(fulleig(eigL),eigR)
941  out = sym2asym(HH).transpose().flatten()
942  Mp.vector().set_local(out)
943  return Mp
944 
945 def minimum_eig(Mp):
946  # calculates the minimum eigenvalue of a DOLFIN TENSOR VARIABLE
947  # INPUT : DOLFIN TENSOR VARIABLE
948  # OUTPUT: DOLFIN SCALAR VARIABLE
949  mesh = Mp.function_space().mesh()
950  element = Mp.function_space().ufl_element()
951  [H,cell2dof] = get_dofs(Mp)
952  [eigL,eigR] = analytic_eig(H)
953  out = Function(FunctionSpace(mesh,element.family(),element.degree()))
954  out.vector().set_local(eigL.min(0))
955  return out
956 
957 def get_rot(Mp):
958  mesh = Mp.function_space().mesh()
959  element = Mp.function_space().ufl_element()
960  [H,cell2dof] = get_dofs(Mp)
961  [eigL,eigR] = analytic_eig(H)
962  out = Function(TensorFunctionSpace(mesh,element.family(),element.degree()))
963  out.vector().set_local(eigR.transpose().flatten())
964  return out
965 
966 def logproject(Mp):
967  # provides projection to a CG1 tensor space in log-space.
968  # That is,
969  # #1 An eigen decomposition is calculated for the input tensor
970  # #2 This is used to calculate the tensor logarithm
971  # #3 which is the projected onto the CG1 tensor space
972  # #4 Finally, the inverse operation, a tensor exponential is performed.
973  # This approach requires SPD input, but also preserves this attribute.
974  # INPUT : DOLFIN SPD TENSOR VARIABLE
975  # OUTPUT: DOLFIN SPD CG1 TENSOR VARIABLE
976  mesh = Mp.function_space().mesh()
977  logMp = project(logexpmetric(Mp),TensorFunctionSpace(mesh,'CG',1))
978  return logexpmetric(logMp,logexp='exp')
979 
980 def mesh_metric(mesh):
981  # calculates a mesh metric (that is it has unit of squared inverse length,
982  # use mesh_metric2 to get units of length)
983  # For each simplex, the function solves the linear problem
984  #
985  # |'L1x*L1x L1x*L1y L1y*L1y'| |'Mxy | |'1'|
986  # | L2x*L2x L2x*L2y L2y*L2y | | Mxy | = | 1_|
987  # |_L3x*L3x L3x*L3y L3y*L3y_| |_Myy_| |_1_|, where
988  #
989  #L1x is p1x-p2x and p1x is the x-coordinate of the 1st vertex in the simplex.
990  #The problem is vectorised by construction of a block diagonal problem.
991  #INPUT : DOLFIN MESH
992  #OUTPUT: DOLFIN DG0 SPD TENSOR VARIABLE
993  #
994  #NOTE: For stabilising advective problems where the local velocity direction
995  # is vdir (unit length), the local element size, Lh, can be taken as
996  # htmp = dot(vdir,M); Lh = 1/sqrt(dot(htmp,htmp)), where M is the mesh metric
997  cell2dof = c_cell_dofs(mesh,TensorFunctionSpace(mesh, "DG", 0))
998  cells = mesh.cells()
999  coords = mesh.coordinates()
1000  p1 = coords[cells[:,0],:];
1001  p2 = coords[cells[:,1],:];
1002  p3 = coords[cells[:,2],:];
1003  r1 = p1-p2; r2 = p1-p3; r3 = p2-p3
1004  Nedg = 3
1005  if mesh.geometry().dim() == 3:
1006  Nedg = 6
1007  p4 = coords[cells[:,3],:];
1008  r4 = p1-p4; r5 = p2-p4; r6 = p3-p4
1009  rall = zeros([p1.shape[0],p1.shape[1],Nedg])
1010  rall[:,:,0] = r1; rall[:,:,1] = r2; rall[:,:,2] = r3;
1011  if mesh.geometry().dim() == 3:
1012  rall[:,:,3] = r4; rall[:,:,4] = r5; rall[:,:,5] = r6
1013  All = zeros([p1.shape[0],Nedg**2])
1014  inds = arange(Nedg**2).reshape([Nedg,Nedg])
1015  for i in range(Nedg):
1016  All[:,inds[i,0]] = rall[:,0,i]**2; All[:,inds[i,1]] = 2.*rall[:,0,i]*rall[:,1,i]; All[:,inds[i,2]] = rall[:,1,i]**2
1017  if mesh.geometry().dim() == 3:
1018  All[:,inds[i,3]] = 2.*rall[:,0,i]*rall[:,2,i]; All[:,inds[i,4]] = 2.*rall[:,1,i]*rall[:,2,i]; All[:,inds[i,5]] = rall[:,2,i]**2
1019  Ain = zeros([Nedg*2-1,Nedg*p1.shape[0]])
1020  ndia = zeros(Nedg*2-1)
1021  for i in range(Nedg):
1022  for j in range(i,Nedg):
1023  iks1 = arange(j,Ain.shape[1],Nedg)
1024  if i==0:
1025  Ain[i,iks1] = All[:,inds[j,j]]
1026  else:
1027  iks2 = arange(j-i,Ain.shape[1],Nedg)
1028  Ain[2*i-1,iks1] = All[:,inds[j-i,j]]
1029  Ain[2*i,iks2] = All[:,inds[j,j-i]]
1030  ndia[2*i-1] = i
1031  ndia[2*i] = -i
1032 
1033  A = scipy.sparse.spdiags(Ain, ndia, Ain.shape[1], Ain.shape[1]).tocsr()
1034  b = ones(Ain.shape[1])
1035  X = scipy.sparse.linalg.spsolve(A,b)
1036  #set solution
1037  XX = sym2asym(X.reshape([mesh.num_cells(),Nedg]).transpose())
1038  M = Function(TensorFunctionSpace(mesh,"DG", 0))
1039  M.vector().set_local(XX.transpose().flatten()[cell2dof])
1040  return M
1041 
1042 def mesh_metric1(mesh):
1043  #this is just the inverse of mesh_metric2
1044  M = mesh_metric(mesh)
1045  #M = logexpmetric(M,logexp='sqrt')
1046  [MM,cell2dof] = get_dofs(M)
1047  [eigL,eigR] = analytic_eig(MM)
1048  eigL = numpy.sqrt(eigL)
1049  MM = analyt_rot(fulleig(eigL),eigR)
1050  MM = sym2asym(MM).transpose().flatten()
1051  M.vector().set_local(MM[cell2dof.flatten()])
1052  return M
1053 
1054 def mesh_metric2(mesh):
1055  #calculates a metric field, which when divided by sqrt(3) corresponds to the steiner
1056  #ellipse for the individual elements, see the test case mesh_metric2_example
1057  #the sqrt(3) ensures that the unit element maps to the identity tensor
1058  M = mesh_metric(mesh)
1059  #M = logexpmetric(M,logexp='sqrtinv')
1060  [MM,cell2dof] = get_dofs(M)
1061  [eigL,eigR] = analytic_eig(MM)
1062  eigL = numpy.sqrt(1./eigL)
1063  MM = analyt_rot(fulleig(eigL),eigR)
1064  MM = sym2asym(MM).transpose().flatten()
1065  M.vector().set_local(MM[cell2dof.flatten()])
1066  return M
1067 
1068 def gradate(H, grada, itsolver=False):
1069  # provides anisotropic Helm-holtz smoothing on the logarithm
1070  #of a metric based on the metric of the mesh times a scaling factor(grada)
1071  if itsolver:
1072  solverp = {"linear_solver": "cg", "preconditioner": "ilu"}
1073  else:
1074  solverp = {"linear_solver": "lu"}
1075  mesh = H.function_space().mesh()
1076  grada = Constant(grada)
1077  mm2 = mesh_metric2(mesh)
1078  mm2sq = dot(grada*mm2,grada*mm2)
1079  Hold = Function(H); H = logexpmetric(H) #avoid logexpmetric side-effect
1080  V = TensorFunctionSpace(mesh,'CG',1); H_trial = TrialFunction(V); H_test = TestFunction(V); Hnew=Function(V)
1081  a = (inner(grad(H_test),dot(mm2sq,grad(H_trial)))+inner(H_trial,H_test))*dx
1082  L = inner(H,H_test)*dx
1083  solve(a==L,Hnew,[], solver_parameters=solverp)
1084  Hnew = metric_ellipse(logexpmetric(Hnew,logexp='exp'), Hold)
1085  return Hnew
1086 
1087 
1088 def c_cell_dofs(mesh,V):
1089  #returns the degree of free numbers in each cell (for DG0 input) input or a each
1090  # vertex (CG1 input).
1091  # INPUT : DOLFIN TENSOR VARIABLE (CG1 or DG0)
1092  # OUTPUT: outarg.shape = (4*N,) or outarg.shape = (9*N,)
1093  # The DOLFIN storage CONVENTION was greatly simplified at 1.3.0 :
1094  if dolfin.__version__ >= '1.3.0':
1095  if V.ufl_element().is_cellwise_constant():
1096  return arange(mesh.num_cells()*mesh.coordinates().shape[1]**2)
1097  else:
1098  return arange(mesh.num_vertices()*mesh.coordinates().shape[1]**2)
1099  else:
1100  #returns the degrees of freedom numbers in a cell
1101  code = """
1102  void cell_dofs(boost::shared_ptr<GenericDofMap> dofmap,
1103  const std::vector<std::size_t>& cell_indices,
1104  std::vector<std::size_t>& dofs)
1105  {
1106  assert(dofmap);
1107  std::size_t local_dof_size = dofmap->cell_dofs(0).size();
1108  const std::size_t size = cell_indices.size()*local_dof_size;
1109  dofs.resize(size);
1110  for (std::size_t i=0; i<cell_indices.size(); i++)
1111  for (std::size_t j=0; j<local_dof_size;j++)
1112  dofs[i*local_dof_size+j] = dofmap->cell_dofs(cell_indices[i])[j];
1113  }
1114  """
1115  module = compile_extension_module(code)
1116  return module.cell_dofs(V.dofmap(), arange(mesh.num_cells(), dtype=numpy.uintp))
1117 
1118 
1119 if __name__=="__main__":
1120  testcase = 3
1121  if testcase == 0:
1122  from minimal_example import minimal_example
1123  minimal_example(width=5e-2)
1124  elif testcase == 1:
1125  from minimal_example_minell import check_metric_ellipse
1126  check_metric_ellipse(width=2e-2)
1127  elif testcase == 2:
1128  from play_multigrid import test_refine_metric
1130  elif testcase == 3:
1131  from mesh_metric2_example import test_mesh_metric
1133  elif testcase == 4:
1134  from circle_convergence import circle_convergence
1135  circle_convergence()
1136  elif testcase == 5:
1137  from maximal_example import maximal_example
1138  maximal_example()
def edge_lengths
Definition: adaptivity.py:101
def polyhedron_surfmesh
Definition: adaptivity.py:117
def get_dofs
Definition: adaptivity.py:695
def minimum_eig
Definition: adaptivity.py:945
def metric_ellipse
Definition: adaptivity.py:654
def polygon_surfmesh
Definition: adaptivity.py:190
def sym2asym
Definition: adaptivity.py:732
def analyt_rot
Definition: adaptivity.py:755
Manages mesh data.
Definition: Mesh.h:70
def refine_metric
Definition: adaptivity.py:86
def c_cell_dofs
Definition: adaptivity.py:1088
def transpose_eigR
Definition: adaptivity.py:721
def impose_maxN
Definition: adaptivity.py:301
def mesh_metric1
Definition: adaptivity.py:1042
def metric_pnorm
Definition: adaptivity.py:535
def logproject
Definition: adaptivity.py:966
def prod_eig
Definition: adaptivity.py:778
def mesh_metric2
Definition: adaptivity.py:1054
def logexpmetric
Definition: adaptivity.py:908
def consistent_interpolation
Definition: adaptivity.py:493
def fix_CG1_metric
Definition: adaptivity.py:521
def analytic_eig
Definition: adaptivity.py:792
def mesh_metric
Definition: adaptivity.py:980
def set_mesh
Definition: adaptivity.py:250