Loading [MathJax]/extensions/tex2jax.js
PolyFEM
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
NeoHookeanElasticity.cpp
Go to the documentation of this file.
2
5
6#include <type_traits>
7
8namespace polyfem::assembler
9{
10 namespace
11 {
12 bool delta(int i, int j)
13 {
14 return (i == j) ? true : false;
15 }
16 } // namespace
17
21
22 void NeoHookeanElasticity::add_multimaterial(const int index, const json &params, const Units &units)
23 {
24 assert(size() == 2 || size() == 3);
25
26 params_.add_multimaterial(index, params, size() == 3, units.stress());
27 }
28
29 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1>
31 {
32 assert(pt.size() == size());
33 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1> res;
34
35 double lambda, mu;
36 // TODO!
37 params_.lambda_mu(0, 0, 0, pt(0).getValue(), pt(1).getValue(), size() == 2 ? 0. : pt(2).getValue(), 0, 0, lambda, mu);
38
39 if (size() == 2)
40 autogen::neo_hookean_2d_function(pt, lambda, mu, res);
41 else if (size() == 3)
42 autogen::neo_hookean_3d_function(pt, lambda, mu, res);
43 else
44 assert(false);
45
46 return res;
47 }
48
49 Eigen::VectorXd
51 {
52 Eigen::Matrix<double, Eigen::Dynamic, 1> gradient;
53
54 if (size() == 2)
55 {
56 switch (data.vals.basis_values.size())
57 {
58 case 3:
59 {
60 gradient.resize(6);
61 compute_energy_aux_gradient_fast<3, 2>(data, gradient);
62 break;
63 }
64 case 6:
65 {
66 gradient.resize(12);
67 compute_energy_aux_gradient_fast<6, 2>(data, gradient);
68 break;
69 }
70 case 10:
71 {
72 gradient.resize(20);
73 compute_energy_aux_gradient_fast<10, 2>(data, gradient);
74 break;
75 }
76 default:
77 {
78 gradient.resize(data.vals.basis_values.size() * 2);
79 compute_energy_aux_gradient_fast<Eigen::Dynamic, 2>(data, gradient);
80 break;
81 }
82 }
83 }
84 else // if (size() == 3)
85 {
86 assert(size() == 3);
87 switch (data.vals.basis_values.size())
88 {
89 case 4:
90 {
91 gradient.resize(12);
92 compute_energy_aux_gradient_fast<4, 3>(data, gradient);
93 break;
94 }
95 case 10:
96 {
97 gradient.resize(30);
98 compute_energy_aux_gradient_fast<10, 3>(data, gradient);
99 break;
100 }
101 case 20:
102 {
103 gradient.resize(60);
104 compute_energy_aux_gradient_fast<20, 3>(data, gradient);
105 break;
106 }
107 default:
108 {
109 gradient.resize(data.vals.basis_values.size() * 3);
110 compute_energy_aux_gradient_fast<Eigen::Dynamic, 3>(data, gradient);
111 break;
112 }
113 }
114 }
115
116 return gradient;
117 }
118
121 const Eigen::MatrixXd &local_pts,
122 const Eigen::MatrixXd &displacement,
123 Eigen::MatrixXd &tensor) const
124 {
125 tensor.resize(local_pts.rows(), size() * size() * size() * size());
126 assert(displacement.cols() == 1);
127
128 Eigen::MatrixXd displacement_grad(size(), size());
129
130 for (long p = 0; p < local_pts.rows(); ++p)
131 {
132 double lambda, mu;
133 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
134
135 compute_diplacement_grad(size(), vals, local_pts, p, displacement, displacement_grad);
136 const Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(size(), size()) + displacement_grad;
137 const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
138 const Eigen::VectorXd FmT_vec = utils::flatten(FmT);
139 const double J = def_grad.determinant();
140 const double tmp1 = mu - lambda * std::log(J);
141 for (int i = 0, idx = 0; i < size(); i++)
142 for (int j = 0; j < size(); j++)
143 for (int k = 0; k < size(); k++)
144 for (int l = 0; l < size(); l++)
145 {
146 tensor(p, idx) = mu * delta(i, k) * delta(j, l) + tmp1 * FmT(i, l) * FmT(k, j);
147 idx++;
148 }
149
150 tensor.row(p) += lambda * utils::flatten(FmT_vec * FmT_vec.transpose());
151
152 // {
153 // Eigen::MatrixXd hess = utils::unflatten(tensor.row(p), size()*size());
154 // Eigen::MatrixXd fhess;
155 // Eigen::VectorXd x0 = utils::flatten(def_grad);
156 // fd::finite_jacobian(
157 // x0, [this, lambda, mu](const Eigen::VectorXd &x1) -> Eigen::VectorXd
158 // {
159 // Eigen::MatrixXd def_grad = utils::unflatten(x1, this->size());
160 // const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
161 // const double J = def_grad.determinant();
162 // Eigen::MatrixXd stress_tensor = mu * (def_grad - FmT) + lambda * std::log(J) * FmT;
163 // return utils::flatten(stress_tensor);
164 // }, fhess);
165
166 // if (!fd::compare_hessian(hess, fhess))
167 // {
168 // logger().trace("Hessian:{}"", hess);
169 // logger().trace("Finite hessian: {}", fhess);
170 // log_and_throw_error("Hessian in Neohookean mismatch");
171 // }
172 // }
173 }
174 }
175
176 Eigen::MatrixXd
178 {
179 Eigen::MatrixXd hessian;
180
181 if (size() == 2)
182 {
183 switch (data.vals.basis_values.size())
184 {
185 case 3:
186 {
187 hessian.resize(6, 6);
188 hessian.setZero();
189 compute_energy_hessian_aux_fast<3, 2>(data, hessian);
190 break;
191 }
192 case 6:
193 {
194 hessian.resize(12, 12);
195 hessian.setZero();
196 compute_energy_hessian_aux_fast<6, 2>(data, hessian);
197 break;
198 }
199 case 10:
200 {
201 hessian.resize(20, 20);
202 hessian.setZero();
203 compute_energy_hessian_aux_fast<10, 2>(data, hessian);
204 break;
205 }
206 default:
207 {
208 hessian.resize(data.vals.basis_values.size() * 2, data.vals.basis_values.size() * 2);
209 hessian.setZero();
210 compute_energy_hessian_aux_fast<Eigen::Dynamic, 2>(data, hessian);
211 break;
212 }
213 }
214 }
215 else // if (size() == 3)
216 {
217 assert(size() == 3);
218 switch (data.vals.basis_values.size())
219 {
220 case 4:
221 {
222 hessian.resize(12, 12);
223 hessian.setZero();
224 compute_energy_hessian_aux_fast<4, 3>(data, hessian);
225 break;
226 }
227 case 10:
228 {
229 hessian.resize(30, 30);
230 hessian.setZero();
231 compute_energy_hessian_aux_fast<10, 3>(data, hessian);
232 break;
233 }
234 case 20:
235 {
236 hessian.resize(60, 60);
237 hessian.setZero();
238 compute_energy_hessian_aux_fast<20, 3>(data, hessian);
239 break;
240 }
241 default:
242 {
243 hessian.resize(data.vals.basis_values.size() * 3, data.vals.basis_values.size() * 3);
244 hessian.setZero();
245 compute_energy_hessian_aux_fast<Eigen::Dynamic, 3>(data, hessian);
246 break;
247 }
248 }
249 }
250
251 return hessian;
252 }
253
255 const int all_size,
256 const ElasticityTensorType &type,
257 Eigen::MatrixXd &all,
258 const std::function<Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const
259 {
260 const auto &displacement = data.fun;
261 const auto &local_pts = data.local_pts;
262 const auto &bs = data.bs;
263 const auto &gbs = data.gbs;
264 const auto el_id = data.el_id;
265 const auto t = data.t;
266
267 Eigen::MatrixXd displacement_grad(size(), size());
268
269 assert(displacement.cols() == 1);
270
271 all.resize(local_pts.rows(), all_size);
272
274 vals.compute(el_id, size() == 3, local_pts, bs, gbs);
275 const auto I = Eigen::MatrixXd::Identity(size(), size());
276
277 for (long p = 0; p < local_pts.rows(); ++p)
278 {
279 compute_diplacement_grad(size(), bs, vals, local_pts, p, displacement, displacement_grad);
280
281 const Eigen::MatrixXd def_grad = I + displacement_grad;
282 if (type == ElasticityTensorType::F)
283 {
284 all.row(p) = fun(def_grad);
285 continue;
286 }
287 const double J = def_grad.determinant();
288 const Eigen::MatrixXd b = def_grad * def_grad.transpose();
289
290 double lambda, mu;
291 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
292
293 Eigen::MatrixXd stress_tensor = (lambda * std::log(J) * I + mu * (b - I)) / J;
294 if (type == ElasticityTensorType::PK1)
295 stress_tensor = pk1_from_cauchy(stress_tensor, def_grad);
296 else if (type == ElasticityTensorType::PK2)
297 stress_tensor = pk2_from_cauchy(stress_tensor, def_grad);
298
299 all.row(p) = fun(stress_tensor);
300 }
301 }
302
304 {
305 if (size() == 2)
306 {
307 switch (data.vals.basis_values.size())
308 {
309 case 3:
310 return compute_energy_aux<double, 3, 2>(data);
311 case 6:
312 return compute_energy_aux<double, 6, 2>(data);
313 case 10:
314 return compute_energy_aux<double, 10, 2>(data);
315 default:
316 return compute_energy_aux<double, Eigen::Dynamic, 2>(data);
317 }
318 }
319 else // if (size() == 3)
320 {
321 assert(size() == 3);
322 switch (data.vals.basis_values.size())
323 {
324 case 4:
325 return compute_energy_aux<double, 4, 3>(data);
326 case 10:
327 return compute_energy_aux<double, 10, 3>(data);
328 case 20:
329 return compute_energy_aux<double, 20, 3>(data);
330 default:
331 return compute_energy_aux<double, Eigen::Dynamic, 3>(data);
332 }
333 }
334 }
335
336 // Compute ∫ ½μ (tr(FᵀF) - 3 - 2ln(J)) + ½λ ln²(J) du
337 template <typename T, int n_basis, int dim>
339 {
340 if constexpr (std::is_same_v<T, double>)
341 {
342 Eigen::Matrix<T, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
343 local_disp.setZero();
344 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
345 {
346 const auto &bs = data.vals.basis_values[i];
347 for (size_t ii = 0; ii < bs.global.size(); ++ii)
348 {
349 for (int d = 0; d < dim; ++d)
350 {
351 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
352 }
353 }
354 }
355
356 Eigen::VectorXd jacs;
358 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
359
360 Eigen::Matrix<T, dim, dim> def_grad(size(), size());
361
362 T energy = T(0.0);
363
364 const int n_pts = data.da.size();
365 for (long p = 0; p < n_pts; ++p)
366 {
367 Eigen::Matrix<T, n_basis, dim> grad(data.vals.basis_values.size(), size());
368
369 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
370 {
371 grad.row(i) = data.vals.basis_values[i].grad.row(p);
372 }
373
374 const Eigen::Matrix<T, dim, dim> jac_it = data.vals.jac_it[p];
375 // Id + grad d
376 def_grad = (local_disp.transpose() * grad) * jac_it + Eigen::Matrix<T, dim, dim>::Identity(size(), size());
377
378 double lambda, mu;
379 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
380
381 const T J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
382 const T log_det_j = log(J);
383 const T val = mu / 2 * (def_grad.squaredNorm() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
384 energy += val * data.da(p);
385 }
386 return energy;
387 }
388 else
389 {
390 typedef Eigen::Matrix<T, Eigen::Dynamic, 1> AutoDiffVect;
391 typedef Eigen::Matrix<T, dim, dim> AutoDiffGradMat;
392
393 AutoDiffVect local_disp;
394 get_local_disp(data, size(), local_disp);
395
396 AutoDiffGradMat def_grad(size(), size());
397
398 T energy = T(0.0);
399
400 const int n_pts = data.da.size();
401 for (long p = 0; p < n_pts; ++p)
402 {
403 compute_disp_grad_at_quad(data, local_disp, p, size(), def_grad);
404
405 // Id + grad d
406 for (int d = 0; d < dim; ++d)
407 def_grad(d, d) += T(1);
408
409 double lambda, mu;
410 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
411
412 const T log_det_j = log(polyfem::utils::determinant(def_grad));
413 const T val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
414
415 energy += val * data.da(p);
416 }
417 return energy;
418 }
419 }
420
421 template <int dim>
422 Eigen::Matrix<double, dim, dim> hat(const Eigen::Matrix<double, dim, 1> &x)
423 {
424
425 Eigen::Matrix<double, dim, dim> prod;
426 prod.setZero();
427
428 prod(0, 1) = -x(2);
429 prod(0, 2) = x(1);
430 prod(1, 0) = x(2);
431 prod(1, 2) = -x(0);
432 prod(2, 0) = -x(1);
433 prod(2, 1) = x(0);
434
435 return prod;
436 }
437
438 template <int dim>
439 Eigen::Matrix<double, dim, 1> cross(const Eigen::Matrix<double, dim, 1> &x, const Eigen::Matrix<double, dim, 1> &y)
440 {
441
442 Eigen::Matrix<double, dim, 1> z;
443 z.setZero();
444
445 z(0) = x(1) * y(2) - x(2) * y(1);
446 z(1) = x(2) * y(0) - x(0) * y(2);
447 z(2) = x(0) * y(1) - x(1) * y(0);
448
449 return z;
450 }
451
452 template <int n_basis, int dim>
453 void NeoHookeanElasticity::compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::Matrix<double, Eigen::Dynamic, 1> &G_flattened) const
454 {
455 assert(data.x.cols() == 1);
456
457 const int n_pts = data.da.size();
458
459 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
460 local_disp.setZero();
461 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
462 {
463 const auto &bs = data.vals.basis_values[i];
464 for (size_t ii = 0; ii < bs.global.size(); ++ii)
465 {
466 for (int d = 0; d < dim; ++d)
467 {
468 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
469 }
470 }
471 }
472
473 Eigen::VectorXd jacs;
475 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
476
477 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
478
479 Eigen::Matrix<double, n_basis, dim> G(data.vals.basis_values.size(), size());
480 G.setZero();
481
482 for (long p = 0; p < n_pts; ++p)
483 {
484 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
485
486 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
487 {
488 grad.row(i) = data.vals.basis_values[i].grad.row(p);
489 }
490
491 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
492 Eigen::Matrix<double, n_basis, dim> delF_delU = grad * jac_it;
493
494 // Id + grad d
495 def_grad = local_disp.transpose() * delF_delU + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
496
497 const double J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
498 const double log_det_j = log(J);
499
500 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
501 delJ_delF.setZero();
502
503 if (dim == 2)
504 {
505
506 delJ_delF(0, 0) = def_grad(1, 1);
507 delJ_delF(0, 1) = -def_grad(1, 0);
508 delJ_delF(1, 0) = -def_grad(0, 1);
509 delJ_delF(1, 1) = def_grad(0, 0);
510 }
511
512 else if (dim == 3)
513 {
514
515 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
516 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
517 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
518
519 u = def_grad.col(0);
520 v = def_grad.col(1);
521 w = def_grad.col(2);
522
523 delJ_delF.col(0) = cross<dim>(v, w);
524 delJ_delF.col(1) = cross<dim>(w, u);
525 delJ_delF.col(2) = cross<dim>(u, v);
526 }
527
528 double lambda, mu;
529 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
530
531 Eigen::Matrix<double, dim, dim> gradient_temp = mu * def_grad - mu * (1 / J) * delJ_delF + lambda * log_det_j * (1 / J) * delJ_delF;
532 Eigen::Matrix<double, n_basis, dim> gradient = delF_delU * gradient_temp.transpose();
533
534 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
535
536 G.noalias() += gradient * data.da(p);
537 }
538
539 Eigen::Matrix<double, dim, n_basis> G_T = G.transpose();
540
541 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
542 Eigen::Matrix<double, N, 1> temp(Eigen::Map<Eigen::Matrix<double, N, 1>>(G_T.data(), G_T.size()));
543 G_flattened = temp;
544 }
545
546 template <int n_basis, int dim>
548 {
549 assert(data.x.cols() == 1);
550
551 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
552 const int n_pts = data.da.size();
553
554 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
555 local_disp.setZero();
556 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
557 {
558 const auto &bs = data.vals.basis_values[i];
559 for (size_t ii = 0; ii < bs.global.size(); ++ii)
560 {
561 local_disp.row(i) += bs.global[ii].val * data.x.block<dim, 1>(bs.global[ii].index * size(), 0).transpose();
562 }
563 }
564
565 Eigen::VectorXd jacs;
567 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
568
569 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
570
571 for (long p = 0; p < n_pts; ++p)
572 {
573 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
574
575 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
576 {
577 grad.row(i) = data.vals.basis_values[i].grad.row(p);
578 }
579
580 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
581 const Eigen::Matrix<double, n_basis, dim> delF_delU = grad * jac_it;
582
583 // Id + grad d
584 def_grad = local_disp.transpose() * delF_delU + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
585
586 const double J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
587 double log_det_j = log(J);
588
589 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
590 delJ_delF.setZero();
591 Eigen::Matrix<double, dim * dim, dim * dim> del2J_delF2(size() * size(), size() * size());
592 del2J_delF2.setZero();
593
594 if (dim == 2)
595 {
596 delJ_delF(0, 0) = def_grad(1, 1);
597 delJ_delF(0, 1) = -def_grad(1, 0);
598 delJ_delF(1, 0) = -def_grad(0, 1);
599 delJ_delF(1, 1) = def_grad(0, 0);
600
601 del2J_delF2(0, 3) = 1;
602 del2J_delF2(1, 2) = -1;
603 del2J_delF2(2, 1) = -1;
604 del2J_delF2(3, 0) = 1;
605 }
606 else if (size() == 3)
607 {
608 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
609 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
610 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
611
612 u = def_grad.col(0);
613 v = def_grad.col(1);
614 w = def_grad.col(2);
615
616 delJ_delF.col(0) = cross<dim>(v, w);
617 delJ_delF.col(1) = cross<dim>(w, u);
618 delJ_delF.col(2) = cross<dim>(u, v);
619
620 del2J_delF2.template block<dim, dim>(0, 6) = hat<dim>(v);
621 del2J_delF2.template block<dim, dim>(6, 0) = -hat<dim>(v);
622 del2J_delF2.template block<dim, dim>(0, 3) = -hat<dim>(w);
623 del2J_delF2.template block<dim, dim>(3, 0) = hat<dim>(w);
624 del2J_delF2.template block<dim, dim>(3, 6) = -hat<dim>(u);
625 del2J_delF2.template block<dim, dim>(6, 3) = hat<dim>(u);
626 }
627
628 double lambda, mu;
629 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
630
631 Eigen::Matrix<double, dim * dim, dim * dim> id = Eigen::Matrix<double, dim * dim, dim * dim>::Identity(size() * size(), size() * size());
632
633 Eigen::Matrix<double, dim * dim, 1> g_j = Eigen::Map<const Eigen::Matrix<double, dim * dim, 1>>(delJ_delF.data(), delJ_delF.size());
634
635 Eigen::Matrix<double, dim * dim, dim * dim> hessian_temp = (mu * id) + (((mu + lambda * (1 - log_det_j)) / (J * J)) * (g_j * g_j.transpose())) + (((lambda * log_det_j - mu) / (J)) * del2J_delF2);
636
637 Eigen::Matrix<double, dim * dim, N> delF_delU_tensor = Eigen::Matrix<double, dim * dim, N>::Zero(jac_it.size(), grad.size());
638
639 for (size_t i = 0; i < local_disp.rows(); ++i)
640 {
641 for (size_t j = 0; j < local_disp.cols(); ++j)
642 {
643 Eigen::Matrix<double, dim, dim> temp(size(), size());
644 temp.setZero();
645 temp.row(j) = delF_delU.row(i);
646 Eigen::Matrix<double, dim * dim, 1> temp_flattened(Eigen::Map<Eigen::Matrix<double, dim * dim, 1>>(temp.data(), temp.size()));
647 delF_delU_tensor.col(i * size() + j) = temp_flattened;
648 }
649 }
650
651 Eigen::Matrix<double, N, N> hessian = delF_delU_tensor.transpose() * hessian_temp * delF_delU_tensor;
652
653 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
654
655 H += hessian * data.da(p);
656 }
657 }
658
660 const OptAssemblerData &data,
661 const Eigen::MatrixXd &mat,
662 Eigen::MatrixXd &stress,
663 Eigen::MatrixXd &result) const
664 {
665 const double t = data.t;
666 const int el_id = data.el_id;
667 const Eigen::MatrixXd &local_pts = data.local_pts;
668 const Eigen::MatrixXd &global_pts = data.global_pts;
669 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
670
671 double lambda, mu;
672 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
673
674 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
675 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
676
677 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
678 result = mu * mat + FmT * mat.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * mat.array()).sum() * FmT;
679 }
680
682 const OptAssemblerData &data,
683 Eigen::MatrixXd &stress,
684 Eigen::MatrixXd &result) const
685 {
686 const double t = data.t;
687 const int el_id = data.el_id;
688 const Eigen::MatrixXd &local_pts = data.local_pts;
689 const Eigen::MatrixXd &global_pts = data.global_pts;
690 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
691
692 double lambda, mu;
693 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
694
695 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
696 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
697
698 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
699 result = mu * stress + FmT * stress.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * stress.array()).sum() * FmT;
700 }
701
703 const OptAssemblerData &data,
704 const Eigen::MatrixXd &vect,
705 Eigen::MatrixXd &stress,
706 Eigen::MatrixXd &result) const
707 {
708 const double t = data.t;
709 const int el_id = data.el_id;
710 const Eigen::MatrixXd &local_pts = data.local_pts;
711 const Eigen::MatrixXd &global_pts = data.global_pts;
712 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
713
714 double lambda, mu;
715 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
716
717 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
718 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
719
720 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
721 result.setZero(size() * size(), size());
722 if (vect.rows() == 1)
723 for (int i = 0; i < size(); ++i)
724 for (int j = 0; j < size(); ++j)
725 for (int l = 0; l < size(); ++l)
726 {
727 result(i * size() + j, l) += mu * vect(i) * ((j == l) ? 1 : 0);
728 // For some reason, the second gives lower error. From the formula, though, it should be the following.
729 // result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(j, l) * (FmT.col(i).array() * vect.transpose().array()).sum();
730 result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(i, l) * (FmT.col(j).array() * vect.transpose().array()).sum();
731 result(i * size() + j, l) += lambda * FmT(i, j) * (FmT.col(l).array() * vect.transpose().array()).sum();
732 }
733 else
734 for (int i = 0; i < size(); ++i)
735 for (int j = 0; j < size(); ++j)
736 for (int k = 0; k < size(); ++k)
737 {
738 result(i * size() + j, k) += mu * vect(j) * ((i == k) ? 1 : 0);
739 // For some reason, the second gives lower error. From the formula, though, it should be the following.
740 // result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, j) * (FmT.row(i).array() * vect.transpose().array()).sum();
741 result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, i) * (FmT.row(j).array() * vect.transpose().array()).sum();
742 result(i * size() + j, k) += lambda * FmT(i, j) * (FmT.row(k).array() * vect.transpose().array()).sum();
743 }
744 }
745
747 const OptAssemblerData &data,
748 Eigen::MatrixXd &dstress_dmu,
749 Eigen::MatrixXd &dstress_dlambda) const
750 {
751 const double t = data.t;
752 const int el_id = data.el_id;
753 const Eigen::MatrixXd &local_pts = data.local_pts;
754 const Eigen::MatrixXd &global_pts = data.global_pts;
755 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
756
757 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
758 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
759 dstress_dmu = def_grad - FmT;
760 dstress_dlambda = std::log(def_grad.determinant()) * FmT;
761 }
762
763 std::map<std::string, Assembler::ParamFunc> NeoHookeanElasticity::parameters() const
764 {
765 std::map<std::string, ParamFunc> res;
766 const auto &params = params_;
767 const int size = this->size();
768
769 res["lambda"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
770 double lambda, mu;
771
772 params.lambda_mu(uv, p, t, e, lambda, mu);
773 return lambda;
774 };
775
776 res["mu"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
777 double lambda, mu;
778
779 params.lambda_mu(uv, p, t, e, lambda, mu);
780 return mu;
781 };
782
783 res["E"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
784 double lambda, mu;
785 params.lambda_mu(uv, p, t, e, lambda, mu);
786
787 if (size == 3)
788 return mu * (3.0 * lambda + 2.0 * mu) / (lambda + mu);
789 else
790 return 2 * mu * (2.0 * lambda + 2.0 * mu) / (lambda + 2.0 * mu);
791 };
792
793 res["nu"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
794 double lambda, mu;
795
796 params.lambda_mu(uv, p, t, e, lambda, mu);
797
798 if (size == 3)
799 return lambda / (2.0 * (lambda + mu));
800 else
801 return lambda / (lambda + 2.0 * mu);
802 };
803
804 return res;
805 }
806
807} // namespace polyfem::assembler
double val
Definition Assembler.cpp:86
ElementAssemblyValues vals
Definition Assembler.cpp:22
int y
int z
int x
std::string stress() const
Definition Units.hpp:27
stores per element basis values at given quadrature points and geometric mapping
void compute(const int el_index, const bool is_volume, const Eigen::MatrixXd &pts, const basis::ElementBases &basis, const basis::ElementBases &gbasis)
computes the per element values at the local (ref el) points (pts) sets basis_values,...
std::vector< Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > > jac_it
Eigen::VectorXd eval_deformed_jacobian_determinant(const Eigen::VectorXd &disp) const
void lambda_mu(double px, double py, double pz, double x, double y, double z, double t, int el_id, double &lambda, double &mu) const
void add_multimaterial(const int index, const json &params, const bool is_volume, const std::string &stress_unit)
T compute_energy_aux(const NonLinearAssemblerData &data) const
double compute_energy(const NonLinearAssemblerData &data) const override
std::map< std::string, ParamFunc > parameters() const override
void compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::VectorXd &G_flattened) const
VectorNd compute_rhs(const AutodiffHessianPt &pt) const override
void compute_stress_grad_multiply_stress(const OptAssemblerData &data, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_energy_hessian_aux_fast(const NonLinearAssemblerData &data, Eigen::MatrixXd &H) const
void compute_dstress_dmu_dlambda(const OptAssemblerData &data, Eigen::MatrixXd &dstress_dmu, Eigen::MatrixXd &dstress_dlambda) const override
void compute_stress_grad_multiply_vect(const OptAssemblerData &data, const Eigen::MatrixXd &vect, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_stress_grad_multiply_mat(const OptAssemblerData &data, const Eigen::MatrixXd &mat, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void assign_stress_tensor(const OutputData &data, const int all_size, const ElasticityTensorType &type, Eigen::MatrixXd &all, const std::function< Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const override
void add_multimaterial(const int index, const json &params, const Units &units) override
Eigen::MatrixXd assemble_hessian(const NonLinearAssemblerData &data) const override
Eigen::VectorXd assemble_gradient(const NonLinearAssemblerData &data) const override
void compute_stiffness_value(const double t, const assembler::ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &tensor) const override
const basis::ElementBases & bs
const Eigen::MatrixXd & fun
const Eigen::MatrixXd & local_pts
const basis::ElementBases & gbs
Used for test only.
Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > AutoDiffGradMat
Eigen::Matrix< double, dim, dim > hat(const Eigen::Matrix< double, dim, 1 > &x)
Eigen::Matrix< double, dim, 1 > cross(const Eigen::Matrix< double, dim, 1 > &x, const Eigen::Matrix< double, dim, 1 > &y)
Eigen::Matrix< T, Eigen::Dynamic, 1 > AutoDiffVect
void neo_hookean_3d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
void neo_hookean_2d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
Eigen::VectorXd flatten(const Eigen::MatrixXd &X)
Flatten rowwises.
T determinant(const Eigen::Matrix< T, rows, cols, option, maxRow, maxCol > &mat)
Eigen::MatrixXd pk2_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
void compute_disp_grad_at_quad(const assembler::NonLinearAssemblerData &data, const AutoDiffVect &local_disp, const int p, const int size, AutoDiffGradMat &def_grad)
Eigen::Matrix< AutodiffScalarHessian, Eigen::Dynamic, 1, 0, 3, 1 > AutodiffHessianPt
nlohmann::json json
Definition Common.hpp:9
void compute_diplacement_grad(const int size, const ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const int p, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &displacement_grad)
void get_local_disp(const assembler::NonLinearAssemblerData &data, const int size, AutoDiffVect &local_disp)
Eigen::MatrixXd pk1_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
Eigen::Matrix< double, 1, Eigen::Dynamic, Eigen::RowMajor, 1, 3 > RowVectorNd
Definition Types.hpp:13