Closed adtzlr closed 3 years ago
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 17:22:29 2021
@author: adtzlr
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import bmat
import skfem
from skfem.helpers import grad
mu = 1.0
bulk = 5000.0
# Newton-Rhapson parameters
tol = 1e-12
maxiter = 8
# array with values of "External Displacement Z" on boundary "move"
move_uz = -np.arange(0,0.6,0.1)
print("Init Stage")
print("==========")
print("* Init mesh.")
m = skfem.mesh.MeshHex().refined(2)
eu = skfem.element.ElementVectorH1(skfem.element.ElementHex1())
ep = skfem.element.ElementHex0()
eJ = skfem.element.ElementHex0()
e = {
"u": eu,
"p": ep,
"J": eJ
}
print("* Init basis for (Hu-Washizu) Three-Field-Variation (u-p-J̅)")
print(" for nearly-incompressible hyperelastic materials.\n")
print(" Π_int = ∫ ̂ψ(̂F) dV + ∫ U(J̅) dV + ∫ p ((det(F) - J̅) dV\n")
basis = {"u": skfem.InteriorBasis(m,eu, intorder=1),
"p": skfem.InteriorBasis(m,ep, intorder=0),
"J": skfem.InteriorBasis(m,ep, intorder=0)
}
def transpose(A):
"Transpose of 2d and Major-transpose of 4d Arrays with trailing axes."
if len(np.shape(A))-2 == 0:
return A.T
elif len(np.shape(A))-2 == 2:
return np.transpose(A,(1,0,2,3))
elif len(np.shape(A))-2 == 4:
return np.transpose(A,(2,3,0,1,4,5))
elif len(np.shape(A))-2 > 4:
return np.transpose(A,(2,3,0,1,*range(len(A.shape))[4:]))
else:
raise NotImplementedError("Unknown shape of array.")
def identity(A):
"Identity Matrix of same shape as A with trailing axes."
if isinstance(A,np.ndarray):
ndim = A.shape[0]
a,b = A.shape[-2:]
else:
# scikit-fem
ndim = A.value.shape[0]
a,b = A.value.shape[-2:]
return np.tile(np.identity(ndim), (b,a,1,1)).T
def dya(A,B=None):
"Dyadic (outer tensor) product of two Tensors."
if B is None:
B = A
return np.einsum("ij...,kl...->ijkl...",A,B)
def cdyau(A,B=None):
"ik-crossed dyadic product of two Tensors."
if B is None:
B = A
return np.einsum("ij...,kl...->ikjl...",A,B)
def cdyal(A,B=None):
"il-crossed dyadic product of two Arrays."
if B is None:
B = A
return np.einsum("ij...,kl...->ilkj...",A,B)
def cdya(A,B=None):
"Symmetric crossed dyadic product of two Arrays."
if B is None:
B = A
return (cdyau(A,B) + cdyal(A,B)) / 2
def __dot22(A,B):
"Dot product of a 2d and a 2d Array."
return np.einsum("ij...,jk...->ik...",A,B)
def __dot42(A,B):
"Dot product of a 4d and a 2d Array."
return np.einsum("ijkl...,lm...->ijkm...",A,B)
def __dot24(A,B):
"Dot product of a 2d and a 4d Array."
return np.einsum("ij...,jklm...->iklm...",A,B)
def __dot44(A,B):
"Dot product of a 4d and a 4d Array."
return np.einsum("ijkl...,lmnp...->ijkmnp...",A,B)
def _singledot(A,B):
"Dot product of two Arrays."
if len(np.shape(A))-2 == 2 and len(np.shape(B))-2 == 4:
return __dot24(A,B)
elif len(np.shape(A))-2 == 4 and len(np.shape(B))-2 == 2:
return __dot42(A,B)
else:
return __dot22(A,B)
def dot(A,B,*args):
"Multi-argument dot product of two Arrays."
C = _singledot(A,B)
for i in range(len(args)):
C = _singledot(C,args[i])
return C
def __ddot22(A,B):
"Double dot product of a 2d and a 2d Array."
return np.einsum("ij...,ij...->...",A,B)
def __ddot24(A,B):
"Double dot product of a 2d and a 4d Array."
return np.einsum("ij...,ijkl...->kl...",A,B)
def __ddot42(A,B):
"Double dot product of a 4d and a 2d Array."
return np.einsum("ijkl...,kl...->ij...",A,B)
def __ddot44(A,B):
"Double dot product of a 4d and a 4d Array."
return np.einsum("ijkl...,klmn...->ijmn...",A,B)
def _singledoubledot(A,B):
"Double-Dot product of two Arrays."
if len(np.shape(A))-2 == 2 and len(np.shape(B))-2 == 4:
return __ddot24(A,B)
elif len(np.shape(A))-2 == 4 and len(np.shape(B))-2 == 2:
return __ddot42(A,B)
else:
return __ddot22(A,B)
def ddot(A,B,*args):
"Multi-argument double-dot product of two Arrays."
C = _singledoubledot(A,B)
for i in range(len(args)):
C = _singledoubledot(C,args[i])
return C
def det(A):
"Determinant with optional trailing axes."
return np.linalg.det(A.T).T
def inv(A):
"Inverse with optional trailing axes."
return np.linalg.inv(A.T).T
def trace(A):
"Trace with optional trailing axes."
return np.trace(A)
def vol(A):
"Spherical (volumetric) part of A (with optional trailing axes)."
return trace(A)/3 * identity(A)
def dev(A):
"Deviatoric part of A (with optional trailing axes)."
return A - vol(A)
def fields(w):
return w["displacements"], w["pressure"].value, w["volumeratio"].value
def F1(w):
"""1st Piola-Kirchhoff stress tensor obtained from
variation of the potential w.r.t. the displacements."""
u,p,theta = fields(w)
F = grad(u) + identity(u)
J = det(F)
Pdev = mu * J**(-2/3) * (F - ddot(F,F)/3*transpose(inv(F)))
Pvol = p * J * transpose(inv(F))
return Pdev + Pvol
def F2(w):
"""Equilibrium equation obtained from variation
of the potential w.r.t. the pressure."""
u,p,theta = fields(w)
F = grad(u) + identity(u)
J = det(F)
return J-theta
def F3(w):
"""Equilibrium equation obtained from variation
of the potential w.r.t. the volume ratio 'theta'."""
u,p,theta = fields(w)
return bulk * (theta-1) - p
def A11(w):
"""Elasticity tensor obtained from the linearization
w.r.t. the displacements of the variation of the
potential w.r.t. the displacements."""
u,p,theta = fields(w)
eye = identity(u)
F = grad(u) + eye
J = det(F)
iFT = transpose(inv(F))
A4_dev = mu * J**(-2/3) * ( cdyau(eye)
-2/3 * dya(F,iFT)
-2/3 * dya(iFT,F)
+2/9 * ddot(F,F) * dya(iFT)
+1/3 * ddot(F,F) * cdyal(iFT)
)
A4_vol = p * J * (dya(iFT) - cdyal(iFT))
return A4_dev + A4_vol
def A12(w):
"""Linearization w.r.t. the displacements of the variation of the
potential w.r.t. the pressure."""
u,p,theta = fields(w)
F = grad(u) + identity(u)
J = det(F)
return J*transpose(inv(F))
def A13(w):
"""Linearization w.r.t. the displacements of the variation of the
potential w.r.t. the volume ratio."""
u,p,theta = fields(w)
return np.zeros_like(grad(u))
def A22(w):
"""Linearization w.r.t. the pressure of the variation of the
potential w.r.t. the pressure."""
u,p,theta = fields(w)
return np.zeros_like(p)
def A23(w):
"""Linearization w.r.t. the volume ratio of the variation of the
potential w.r.t. the pressure."""
u,p,theta = fields(w)
return - np.ones_like(p)
def A33(w):
"""Linearization w.r.t. the volume ratio of the variation of the
potential w.r.t. the volume ratio."""
u,p,theta = fields(w)
return np.ones_like(theta) * bulk
print("* Init forms.")
@skfem.LinearForm
def a1(v, w):
return ddot(F1(w), grad(v))
@skfem.LinearForm
def a2(v, w):
return F2(w) * v
@skfem.LinearForm
def a3(v, w):
return F3(w) * v
@skfem.BilinearForm
def b11(u, v, w):
return ddot(grad(u), A11(w), grad(v))
@skfem.BilinearForm
def b22(u, v, w):
return A22(w) * u * v
@skfem.BilinearForm
def b33(u, v, w):
return A33(w) * u * v
@skfem.BilinearForm
def b12(u, v, w):
return ddot(A12(w), grad(v)) * u
@skfem.BilinearForm
def b13(u, v, w):
return ddot(A13(w), grad(v)) * u
@skfem.BilinearForm
def b23(u, v, w):
return A23(w) * u * v
print("* Init Boundary conditions.")
dofs_left = basis["u"].find_dofs(
{"left": m.facets_satisfying(lambda x: x[0] <= 0.0)},
skip=["u^2", "u^3"]
)
dofs_bottom = basis["u"].find_dofs(
{"bottom": m.facets_satisfying(lambda x: x[1] <= 0.0)},
skip=["u^1", "u^3"]
)
dofs_back = basis["u"].find_dofs(
{"back": m.facets_satisfying(lambda x: x[2] <= 0.0)},
skip=["u^1", "u^2"]
)
dofs_front = basis["u"].find_dofs(
{"front": m.facets_satisfying(lambda x: np.abs(x[2] - 1.0) <= 0.0)},
skip=["u^3"]
)
dofs_move = basis["u"].find_dofs(
{"move": m.facets_satisfying(lambda x: np.abs(x[2] - 1.0) <= 0.0)},
skip=["u^1", "u^2"]
)
print("* Assemble boundaries conditions.")
dofs = {}
for dof in [dofs_left,
dofs_bottom,
dofs_back,
dofs_front,
dofs_move,
]:
dofs.update(dof)
print("* Obtain degrees of freedom to keep.")
I = np.hstack((
basis["u"].complement_dofs(dofs),
basis["u"].N + np.arange(basis["p"].N),
basis["u"].N + basis["p"].N + np.arange(basis["J"].N)
))
print("* Init total and incremental unknowns.")
u = basis["u"].zeros()
p = basis["p"].zeros()
J = basis["J"].zeros()
δu = basis["u"].zeros()
δp = basis["p"].zeros()
δJ = basis["J"].zeros()
print("* Initial assembly of stiffness matrix.")
asmprops = {
"displacements": basis["u"].interpolate(u),
"pressure": basis["p"].interpolate(p),
"volumeratio": basis["J"].interpolate(J)
}
K11 = skfem.asm(b11, basis["u"], basis["u"], **asmprops)
K22 = skfem.asm(b22, basis["p"], basis["p"], **asmprops)
K33 = skfem.asm(b33, basis["J"], basis["J"], **asmprops)
K12 = skfem.asm(b12, basis["p"], basis["u"], **asmprops)
K13 = skfem.asm(b13, basis["J"], basis["u"], **asmprops)
K23 = skfem.asm(b23, basis["J"], basis["p"], **asmprops)
K = bmat(
[[K11, K12, K13],
[K12.T, K22, K23],
[K13.T, K23.T, K33]], "csr"
)
print("* Initial assembly of equilibrium equations.\n")
f1 = skfem.asm(a1, basis["u"], **asmprops)
f2 = skfem.asm(a2, basis["p"], **asmprops)
f3 = skfem.asm(a3, basis["J"], **asmprops)
f = np.concatenate((f1,f2,f3))
# init array for "Reaction Force Z" on boundary "move" for all increments
move_fz = np.zeros_like(move_uz)
# increment loop for ramping up "External Displacement Z" on "move"
for increment, move in enumerate(move_uz[1:]):
print(f"Increment {(increment+1):2d}")
print("============")
print(f'* prescribed displacement Z on boundary "move" U_Z={move:1.3e}''')
# newton-rhapson iteration loop
print("\nNewton-Rhapson iterations")
print("-------------------------")
# init converged flag
converged = False
# Newton-Rhapson iteration loop
for iteration in range(maxiter):
# enforce displacements on boundary "move"
δupJ_prescribed = np.hstack((u,p,J))
δupJ_prescribed[dofs["move"].all()] = move - δupJ_prescribed[dofs["move"].all()]
if not np.allclose(δupJ_prescribed[dofs["move"].all()],0):
print(f'* increase incremental displacements on boundary "move" δU_Z={δupJ_prescribed[dofs["move"].all()][0]:1.3e}\n')
# solve system
system = skfem.condense(K, -f, x=δupJ_prescribed, I=I)
uvpJ = skfem.solve(*system, use_umfpack=True)
δu, pJ = np.split(uvpJ, [u.shape[0]])
δp, δJ = np.split(pJ, [p.shape[0]])
# calculate norms of unknowns
norm_δu = np.linalg.norm(δu)
norm_δp = np.linalg.norm(δp)
norm_δJ = np.linalg.norm(δJ)
# check if solution is valid
if np.isnan(norm_δu) or np.isnan(norm_δp) or np.isnan(norm_δJ):
break
# update unknowns
u += δu
p += δp
J += δJ
# keyword arguments for assembly
asmprops = {
"displacements": basis["u"].interpolate(u),
"pressure": basis["p"].interpolate(p),
"volumeratio": basis["J"].interpolate(J)
}
# update equilibrium equations
f1 = skfem.asm(a1, basis["u"], **asmprops)
f2 = skfem.asm(a2, basis["p"], **asmprops)
f3 = skfem.asm(a3, basis["J"], **asmprops)
f = np.concatenate((f1,f2,f3))
# get active displacement dofs
dof1 = basis["u"].complement_dofs(dofs)
# reference force as norm of nodals reaction forces at prescribed dofs
f1_ref = np.linalg.norm(np.delete(f1,dof1))
# check nodal equilibrium for nodal force residuals at active dofs
norm_f1 = np.linalg.norm(f1[dof1]) / f1_ref
# update stiffness
K11 = skfem.asm(b11, basis["u"], basis["u"], **asmprops)
K22 = skfem.asm(b22, basis["p"], basis["p"], **asmprops)
K33 = skfem.asm(b33, basis["J"], basis["J"], **asmprops)
K12 = skfem.asm(b12, basis["p"], basis["u"], **asmprops)
K13 = skfem.asm(b13, basis["J"], basis["u"], **asmprops)
K23 = skfem.asm(b23, basis["J"], basis["p"], **asmprops)
# build sparse stiffness matrix from sparse sub-blocks
K = bmat(
[[K11, K12, K13],
[K12.T, K22, K23],
[K13.T, K23.T, K33]], "csr"
)
print(f"#{iteration+1:2d}: |f|={norm_f1:1.3e} (|δu|={norm_δu:1.3e} |δp|={norm_δp:1.3e} |δJ|={norm_δJ:1.3e})")
if norm_f1 < tol:
converged = True
move_fz[increment+1] = np.sum(f1[dofs["move"].all()])
print(f'\n* final "Reaction Force Z" on boundary "move" F_Z={move_fz[increment+1]:1.3e}\n')
break
if np.isnan(norm_δu) or not converged:
# reset counter for last converged increment and break
increment = increment - 1
break
else:
# export results to xdmf-file and go to next increment
savedata = {"u": u[basis["u"].nodal_dofs].T,
"p": p[basis["p"].element_dofs[0]],
"J": J[basis["J"].element_dofs[0]]}
m.save("results.xdmf", savedata)
# plot force-displacement curve on boundary "move"
plt.plot(move_uz[:increment+2], move_fz[:increment+2], 'o-')
plt.xlabel('Displacement Z on "move"')
plt.ylabel('Total Reaction Force Z on "move"')
plt.grid()
Just a hint: if you stick to m = skfem.mesh.MeshHex().refined(2)
(a cube of 4x4x4 Hex-Elements) the script runs quite fast. With more elements it becomes really slow!
Oh, and another note: The resulting force-displacement curve of the deformed cube matches perfectly the solution obtained with the commercial FEA package "MSC.Marc" 😄
Thanks for this; a little outside my area of expertise but it looks good.
(Thanks also for the heads-up on contique in nschloe/pacopy#21; sorry for not replying to that yet. I am very interested in continuation; it's on my TODO list.)
Is it possible to export several timesteps into a single XDMF file or do I have to use the native meshio interface for that?
No, there's no special support for initial value problems in scikit-fem thus far. It's not clear whether there should be or if so what form it should take. My opinion is that getting the abstraction right is tricky: so many existing packages muck this up completely. My hope is that a good clean abstract Python package for dynamical systems, bifurcation theory, &c., will come along and then I can just hook scikit-fem and it together with neither really needing to know about the other. I'd then just demonstrate that with something like ex19. In the absence of that dream package, it might be worth developing a skfem.ivp
module #531.
But specifically for your question about XDMF, no and yes, I do explicitly use meshio.xdmf.TimeSeriesWriter
; see, e.g.,
How can I export elemental values like the Deformation Gradient or a stress tensor inside mesh.save() in order to visualize it (in ParaView)?
If they're not in finite element spaces that are understood by meshio, XDMF (or some other format), or ParaView (or whatever other viewer), the easiest approach is to project onto one that is.
Sometimes dirty tricks are possible like faking that using e.g. .nodal_dofs
to get something in a linear Lagrangian space; e.g.
though that might not work for the deformation gradient or stress tensor; I'm not sure what function space they're in in your example.
Thank you for your hints @gdmcbain!
(Thanks also for the heads-up on contique in nschloe/pacopy#21; sorry for not replying to that yet. I am very interested in continuation; it's on my TODO list.)
In the end it would be great if contique and something like scikit-fem could be coupled. Unfortunately contique is not as flexible as pacopy and it was designed for relatively small problems. I first heard about numeric continuation in a course held by my PhD's supervisor where I created trusspy as a tool to calculate my homework examples of nonlinear truss structures. It was also my first python package 🚀. Some years later I thought of extracting the numeric continuation part of it as I'm forced to use this technique in continuum mechanics of solids in my actual research.
If they're not in finite element spaces that are understood by meshio, XDMF (or some other format), or ParaView (or whatever other viewer), the easiest approach is to project onto one that is.
Are there any easy to understand examples which use the project
feature? I think it is in the contact example for the von mises stress
if I remember correctly. But I did not really understand what this code does. I even do not understand the first line of the postprocessing section. What are DG
elements in example 04? 😊
Sometimes dirty tricks are possible like faking that using e.g. .nodal_dofs to get something in a linear Lagrangian space;
Yeah, I already did that for p
and J
as elemental dof because they are constant fields. Is it possible to just "shift" (without interpolation) a function value at integration points to the nodal displacement dofs and then performing a nodal averaging of the "shifted" values? Or is an interpolation easier (and more correct...)? For example let's take the first component of the deformation gradient F[0,0]
as a simple scalar example which is evaluated at the integration points. How can I project
it to the nodal degrees of freedom?
But specifically for your question about XDMF, no and yes, I do explicitly use meshio.xdmf.TimeSeriesWriter; see, e.g.,
Thank you for your hint, will have a look at it!
I think I will split my code up in several examples and I'm going to collect it in a new repo. One example would be the
condense
function, (and probably another one for defining custom helpers functions...)
The last two could be left out or are way easier if I manage to couple contique with scikit-fem.
I have another question: How often is A11(w)
actually called in the assembly process of the stiffness matrix for a mesh consisting of linear hexahedrons? Is there any chance of speeding this process up?
My function is actually quite fast because I used a simple nearly-incompressible neo-hookean material formulated directly on the work-conjugate pair of first Piola-Kirchhoff stress and the Deformation Gradient instead of calculating the stress and elasticity matrix first in a lagrangian or eulerian setting and then perfom some push forward and pull back operations. A little side-note: One benefit of doing this is that the (unsymmetric) elasticity tensor ∂P/∂F : δF = δF : ∂²ψ/∂F∂F : ΔF
automatically contains the geometric
or initial stress
part of the fourth order elasticity tensor because ΔδF = 0
(at least in cartesian coordinates...).
In the end it would be great if contique and something like scikit-fem could be coupled. Unfortunately contique is not as flexible as pacopy and it was designed for relatively small problems.
I did take a stab at writing a small natural parameter function for scikit-fem as part of #119: skfem.algorithms.continuation.natural_jfnk
. It's on the shelf for now though, more for difficulties with JFNK in #119 than any shortcomings with it. One feature that I did like in it and would recommend is generating the successive solutions rather than building a list and returning it; this means that they can be postprocessed on the fly and means that the continuation function doesn't have to worry so much about termination criteria. A generator is also used for an initial value problem #531 in
which works well with matplotlib.animation.FuncAnimation
: the solution is displayed as it evolves rather than at the end.
There are many questions but I'll start answering one by one.
I have another question: How often is
A11(w)
actually called in the assembly process of the stiffness matrix for a mesh consisting of linear hexahedrons? Is there any chance of speeding this process up?
As many times as there are entries in the local stiffness matrix. I think (tri)linear hexahedron has 8 basis functions, and now we have vectorial 3D problem, so it should be 24 x 24 = 576 times in total.
We have thought many times about doing the form evaluations in parallel, but unfortunately a suitably general approach has not presented itself yet. Usually in complex forms significant savings can be also obtained by making the form evaluation faster which can be done in different ways. In theory you could also write the form function in some low-level language such as C or Fortran and decorate that. Some POCs did use numba to make the evaluation faster.
Edit: Let me add that the arguments to the form are largish numpy arrays which attempts to reduce the amount of times form gets called.
Edit 2: I calculated wrong; added the correct total number.
There are many questions but I'll start answering one by one.
Sorry for that. I know there are a lot, next time I'll split it up to have one or two related questions per issue 😊
(One could in theory hook ffcx
from FEniCS to scikit-fem and use the forms compiled to C-language in assembly but the interface is not very well documented I think.)
(One could in theory hook
ffcx
from FEniCS to scikit-fem and use the forms compiled to C-language in assembly but the interface is not very well documented I think.)
...and if you have to use FEniCS for that you can use it directly (in my opinion). One of the essential benefits of scikit-fem is the easy and fast installation with pip
.
Sure. There are some finite elements that we have and FEniCS does not, but I think easy install is the main benefit.
There could perhaps exist another package for creating these more complicated forms, possibly with linearization support, so that the forms are faster to evaluate. Maybe not in so much detail as ffcx
which is doing very nontrivial optimizations, but something simpler and easier to use.
In the meantime I experimented a bit with numba but beside raising the cpu workload the solution isn't archieved significantly faster 😠 . I essentially tried to compile functions called det_numba
and inv_numba
with the help of numba (optinally with the parallel keyword prange
). They seem to be essentially faster if I run %timeit inv_numba(A)
compared to the default %timeit np.linalg.inv(A.T).T
. Compared to the explicitely typed scikit-fem helper for the inverse of 3x3 matrix the speedup in a scikit-fem problem isn' t really worth the effort for my testcases. So I'll stick to the default helpers of skfem as they give good results without the hassle of compile anything.
I still haven't managed to project e.g. the cauchy stress to the nodal displacement degree of freedoms. If I have success I'll add a little code snippet and then I'll close this issue. Is that ok for you to keep this issue open till then?
I still haven't managed to project e.g. the cauchy stress to the nodal displacement degree of freedoms. If I have success I'll add a little code snippet and then I'll close this issue. Is that ok for you to keep this issue open till then?
There was a related question recently, maybe it is of help: https://github.com/kinnala/scikit-fem/issues/618
In the meantime I experimented a bit with numba but beside raising the cpu workload the solution isn't archieved significantly faster . I essentially tried to compile functions called
det_numba
andinv_numba
with the help of numba (optinally with the parallel keywordprange
). They seem to be essentially faster if I run%timeit inv_numba(A)
compared to the default%timeit np.linalg.inv(A.T).T
. Compared to the explicitely typed scikit-fem helper for the inverse of 3x3 matrix the speedup in a scikit-fem problem isn' t really worth the effort for my testcases. So I'll stick to the default helpers of skfem as they give good results without the hassle of compile anything.
If you want faster performance with numba, you often have to "unvectorize" the expressions and use explicit for-loops instead of matrix operations. Readibility will suffer a lot.
There was a related question recently, maybe it is of help: #618
Thank you, I'm already having a look at it.
If you want faster performance with numba, you often have to "unvectorize" the expressions and use explicit for-loops instead of matrix operations. Readibility will suffer a lot.
Yes, I tried to define compiled functions for common operations (like your in skfem's-helpers): dot- and double-dot products, determinants of 2x2 and 3x3 matrices and their inverse. Then the code in the forms would look (nearly) the same as before - only different helpers are imported. Anything else results in lengthy code and the produced code (at least for me) is error prone. But the increase of speed was barely notable - the only thing which was notable was my cpu fan :blush:
For example I tried these helper functions:
@njit(parallel=True, fastmath=True, cache=True, nogil=True)
def det(A):
dim,dim,na,nb = A.shape
detA = np.zeros((na,nb))
if dim == 2:
for a in range(na):
for b in range(nb):
detA[a,b] = A[0,0,a,b] * A[1,1,a,b] - A[0,1,a,b] * A[1,0,a,b]
elif dim == 3:
for a in prange(na):
for b in range(nb):
detA[a,b] = ( A[0,0,a,b] * A[1,1,a,b] * A[2,2,a,b]
+ A[0,1,a,b] * A[1,2,a,b] * A[1,0,a,b]
+ A[0,2,a,b] * A[1,0,a,b] * A[2,1,a,b]
- A[0,2,a,b] * A[1,1,a,b] * A[2,0,a,b]
- A[2,1,a,b] * A[1,2,a,b] * A[1,0,a,b]
- A[2,2,a,b] * A[1,0,a,b] * A[0,1,a,b]
)
else:
raise NotImplementedError("Wrong Dimension.")
return detA
@njit(parallel=True, fastmath=True, cache=True, nogil=True)
def inv(A, detA):
dim,dim,na,nb = A.shape
B = np.zeros_like(A)
if dim == 2:
for a in prange(na):
for b in range(nb):
B[0,0,a,b] = A[1,1,a,b] / detA[a,b]
B[1,1,a,b] = A[0,0,a,b] / detA[a,b]
B[0,1,a,b] = -A[1,0,a,b] / detA[a,b]
B[1,0,a,b] = -A[0,1,a,b] / detA[a,b]
elif dim == 3:
for a in prange(na):
for b in range(nb):
B[0,0,a,b] = (A[1,1,a,b] * A[2,2,a,b] - A[1,2,a,b] * A[2,1,a,b]) / detA[a,b]
B[1,1,a,b] = (A[0,0,a,b] * A[2,2,a,b] - A[0,2,a,b] * A[2,0,a,b]) / detA[a,b]
B[2,2,a,b] = (A[0,0,a,b] * A[1,1,a,b] - A[0,1,a,b] * A[1,0,a,b]) / detA[a,b]
B[0,1,a,b] =-(A[0,1,a,b] * A[2,2,a,b] - A[0,2,a,b] * A[2,1,a,b]) / detA[a,b]
B[1,0,a,b] =-(A[1,0,a,b] * A[2,2,a,b] - A[2,0,a,b] * A[1,2,a,b]) / detA[a,b]
B[1,2,a,b] =-(A[0,0,a,b] * A[1,2,a,b] - A[0,2,a,b] * A[1,0,a,b]) / detA[a,b]
B[2,1,a,b] =-(A[0,0,a,b] * A[2,1,a,b] - A[2,0,a,b] * A[0,1,a,b]) / detA[a,b]
B[0,2,a,b] = (A[0,1,a,b] * A[1,2,a,b] - A[0,2,a,b] * A[1,1,a,b]) / detA[a,b]
B[2,0,a,b] = (A[1,0,a,b] * A[2,1,a,b] - A[2,0,a,b] * A[1,1,a,b]) / detA[a,b]
else:
raise NotImplementedError("Wrong Dimension.")
return B
Hi,
I wrote an example for the so-called "Hu-Washizu" three-field-variation
(u,p,J)
for nearly-incompressible hyperelasticity. As an example linear hexahedron elements are used for the displacement fieldu
along with constantp
andJ
fields. They can easily be changed to quadratic tetrahedral elements with linear pressure and volume ratio interpolations if that's preferred.I'm actually surprised how easy the implementation of this mixed formulation was. I do not have any prior experience with Fenics and I'm more familiar with the "engineering approach of finite elements". Anyway, I really like how scikit-fem works!
I'll append the whole lengthy script as an additional answer. I used my own
helpers
just to learn how it works. Should I change it to make use ofskfem.helpers
? Could you have a look at the code if I'm using scikit-fem the way it should be? Is it worth to make an "official" example out of that? Or could it make sense to take some parts of the code for more shorter examples?A thing which could be of interest: I used (an easy to calculate) relative "nodal force residuals" - norm divided by the norm of reaction forces on the boundaries. Although not complicated I think this is not covered in the examples yet. If I remember correctly the checks in the examples are only performed on the incremental values of the unknowns
u
and no check of the resulting equilibriumnorm(f)
is done (in example 36).The most difficult parts for me were
a) The definition of the degrees of freedom to keep for the additional fields (personally I also did not really understand why this is called
I
- of course I read the docs section about theI
andD
keywords):b) Use the
x
keyword in thecondense
function for the boundary condition. Withoutx
(if example 36 was extended to several increments) you have to use really - and I mean really - small increments on ramping up external displacements.To sum up, I have some questions: 1) Is it possible to export several timesteps into a single XDMF file or do I have to use the native
meshio
interface for that? 2) How can I export elemental values like the Deformation Gradient or a stress tensor insidemesh.save()
in order to visualize it (in ParaView)? 3) Are there any easy speed-ups of theK11
assembly process for hyperelastic material formulations?More a ParaView specific question, but maybe you know an answer: 4) Is it possible to visualize quadratic elements with curved edges from a scikit-fem exported "XDMF" file in ParaView?