Loading [MathJax]/extensions/tex2jax.js
PolyFEM
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
NeoHookeanElasticity.cpp
Go to the documentation of this file.
2
5
6#include <type_traits>
7
8namespace polyfem::assembler
9{
10 namespace
11 {
12 bool delta(int i, int j)
13 {
14 return (i == j) ? true : false;
15 }
16 } // namespace
17
21
22 void NeoHookeanElasticity::add_multimaterial(const int index, const json &params, const Units &units)
23 {
24 assert(size() == 2 || size() == 3);
25
26 params_.add_multimaterial(index, params, size() == 3, units.stress());
27 }
28
29 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1>
31 {
32 assert(pt.size() == size());
33 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1> res;
34
35 double lambda, mu;
36 // TODO!
37 params_.lambda_mu(0, 0, 0, pt(0).getValue(), pt(1).getValue(), size() == 2 ? 0. : pt(2).getValue(), 0, 0, lambda, mu);
38
39 if (size() == 2)
40 autogen::neo_hookean_2d_function(pt, lambda, mu, res);
41 else if (size() == 3)
42 autogen::neo_hookean_3d_function(pt, lambda, mu, res);
43 else
44 assert(false);
45
46 return res;
47 }
48
49 Eigen::VectorXd
51 {
52 Eigen::Matrix<double, Eigen::Dynamic, 1> gradient;
53
54 if (size() == 2)
55 {
56 switch (data.vals.basis_values.size())
57 {
58 case 3:
59 {
60 gradient.resize(6);
61 compute_energy_aux_gradient_fast<3, 2>(data, gradient);
62 break;
63 }
64 case 6:
65 {
66 gradient.resize(12);
67 compute_energy_aux_gradient_fast<6, 2>(data, gradient);
68 break;
69 }
70 case 10:
71 {
72 gradient.resize(20);
73 compute_energy_aux_gradient_fast<10, 2>(data, gradient);
74 break;
75 }
76 default:
77 {
78 gradient.resize(data.vals.basis_values.size() * 2);
79 compute_energy_aux_gradient_fast<Eigen::Dynamic, 2>(data, gradient);
80 break;
81 }
82 }
83 }
84 else // if (size() == 3)
85 {
86 assert(size() == 3);
87 switch (data.vals.basis_values.size())
88 {
89 case 4:
90 {
91 gradient.resize(12);
92 compute_energy_aux_gradient_fast<4, 3>(data, gradient);
93 break;
94 }
95 case 10:
96 {
97 gradient.resize(30);
98 compute_energy_aux_gradient_fast<10, 3>(data, gradient);
99 break;
100 }
101 case 20:
102 {
103 gradient.resize(60);
104 compute_energy_aux_gradient_fast<20, 3>(data, gradient);
105 break;
106 }
107 default:
108 {
109 gradient.resize(data.vals.basis_values.size() * 3);
110 compute_energy_aux_gradient_fast<Eigen::Dynamic, 3>(data, gradient);
111 break;
112 }
113 }
114 }
115
116 return gradient;
117 }
118
121 const Eigen::MatrixXd &local_pts,
122 const Eigen::MatrixXd &displacement,
123 Eigen::MatrixXd &tensor) const
124 {
125 tensor.resize(local_pts.rows(), size() * size() * size() * size());
126 assert(displacement.cols() == 1);
127
128 Eigen::MatrixXd displacement_grad(size(), size());
129
130 for (long p = 0; p < local_pts.rows(); ++p)
131 {
132 double lambda, mu;
133 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
134
135 compute_diplacement_grad(size(), vals, local_pts, p, displacement, displacement_grad);
136 const Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(size(), size()) + displacement_grad;
137 const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
138 const Eigen::VectorXd FmT_vec = utils::flatten(FmT);
139 const double J = def_grad.determinant();
140 const double tmp1 = mu - lambda * std::log(J);
141 for (int i = 0, idx = 0; i < size(); i++)
142 for (int j = 0; j < size(); j++)
143 for (int k = 0; k < size(); k++)
144 for (int l = 0; l < size(); l++)
145 {
146 tensor(p, idx) = mu * delta(i, k) * delta(j, l) + tmp1 * FmT(i, l) * FmT(k, j);
147 idx++;
148 }
149
150 tensor.row(p) += lambda * utils::flatten(FmT_vec * FmT_vec.transpose());
151
152 // {
153 // Eigen::MatrixXd hess = utils::unflatten(tensor.row(p), size()*size());
154 // Eigen::MatrixXd fhess;
155 // Eigen::VectorXd x0 = utils::flatten(def_grad);
156 // fd::finite_jacobian(
157 // x0, [this, lambda, mu](const Eigen::VectorXd &x1) -> Eigen::VectorXd
158 // {
159 // Eigen::MatrixXd def_grad = utils::unflatten(x1, this->size());
160 // const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
161 // const double J = def_grad.determinant();
162 // Eigen::MatrixXd stress_tensor = mu * (def_grad - FmT) + lambda * std::log(J) * FmT;
163 // return utils::flatten(stress_tensor);
164 // }, fhess);
165
166 // if (!fd::compare_hessian(hess, fhess))
167 // {
168 // std::cout << "Hessian: " << hess << std::endl;
169 // std::cout << "Finite hessian: " << fhess << std::endl;
170 // log_and_throw_error("Hessian in Neohookean mismatch");
171 // }
172 // }
173 }
174 }
175
176 Eigen::MatrixXd
178 {
179 Eigen::MatrixXd hessian;
180
181 if (size() == 2)
182 {
183 switch (data.vals.basis_values.size())
184 {
185 case 3:
186 {
187 hessian.resize(6, 6);
188 hessian.setZero();
189 compute_energy_hessian_aux_fast<3, 2>(data, hessian);
190 break;
191 }
192 case 6:
193 {
194 hessian.resize(12, 12);
195 hessian.setZero();
196 compute_energy_hessian_aux_fast<6, 2>(data, hessian);
197 break;
198 }
199 case 10:
200 {
201 hessian.resize(20, 20);
202 hessian.setZero();
203 compute_energy_hessian_aux_fast<10, 2>(data, hessian);
204 break;
205 }
206 default:
207 {
208 hessian.resize(data.vals.basis_values.size() * 2, data.vals.basis_values.size() * 2);
209 hessian.setZero();
210 compute_energy_hessian_aux_fast<Eigen::Dynamic, 2>(data, hessian);
211 break;
212 }
213 }
214 }
215 else // if (size() == 3)
216 {
217 assert(size() == 3);
218 switch (data.vals.basis_values.size())
219 {
220 case 4:
221 {
222 hessian.resize(12, 12);
223 hessian.setZero();
224 compute_energy_hessian_aux_fast<4, 3>(data, hessian);
225 break;
226 }
227 case 10:
228 {
229 hessian.resize(30, 30);
230 hessian.setZero();
231 compute_energy_hessian_aux_fast<10, 3>(data, hessian);
232 break;
233 }
234 case 20:
235 {
236 hessian.resize(60, 60);
237 hessian.setZero();
238 compute_energy_hessian_aux_fast<20, 3>(data, hessian);
239 break;
240 }
241 default:
242 {
243 hessian.resize(data.vals.basis_values.size() * 3, data.vals.basis_values.size() * 3);
244 hessian.setZero();
245 compute_energy_hessian_aux_fast<Eigen::Dynamic, 3>(data, hessian);
246 break;
247 }
248 }
249 }
250
251 return hessian;
252 }
253
255 const int all_size,
256 const ElasticityTensorType &type,
257 Eigen::MatrixXd &all,
258 const std::function<Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const
259 {
260 const auto &displacement = data.fun;
261 const auto &local_pts = data.local_pts;
262 const auto &bs = data.bs;
263 const auto &gbs = data.gbs;
264 const auto el_id = data.el_id;
265 const auto t = data.t;
266
267 Eigen::MatrixXd displacement_grad(size(), size());
268
269 assert(displacement.cols() == 1);
270
271 all.resize(local_pts.rows(), all_size);
272
274 vals.compute(el_id, size() == 3, local_pts, bs, gbs);
275 const auto I = Eigen::MatrixXd::Identity(size(), size());
276
277 for (long p = 0; p < local_pts.rows(); ++p)
278 {
279 compute_diplacement_grad(size(), bs, vals, local_pts, p, displacement, displacement_grad);
280
281 const Eigen::MatrixXd def_grad = I + displacement_grad;
282 if (type == ElasticityTensorType::F)
283 {
284 all.row(p) = fun(def_grad);
285 continue;
286 }
287 const double J = def_grad.determinant();
288 const Eigen::MatrixXd b = def_grad * def_grad.transpose();
289
290 double lambda, mu;
291 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
292
293 Eigen::MatrixXd stress_tensor = (lambda * std::log(J) * I + mu * (b - I)) / J;
294 if (type == ElasticityTensorType::PK1)
295 stress_tensor = pk1_from_cauchy(stress_tensor, def_grad);
296 else if (type == ElasticityTensorType::PK2)
297 stress_tensor = pk2_from_cauchy(stress_tensor, def_grad);
298
299 all.row(p) = fun(stress_tensor);
300 }
301 }
302
304 {
305 if (size() == 2)
306 {
307 switch (data.vals.basis_values.size())
308 {
309 case 3:
310 return compute_energy_aux<double, 3, 2>(data);
311 case 6:
312 return compute_energy_aux<double, 6, 2>(data);
313 case 10:
314 return compute_energy_aux<double, 10, 2>(data);
315 default:
316 return compute_energy_aux<double, Eigen::Dynamic, 2>(data);
317 }
318 }
319 else // if (size() == 3)
320 {
321 assert(size() == 3);
322 switch (data.vals.basis_values.size())
323 {
324 case 4:
325 return compute_energy_aux<double, 4, 3>(data);
326 case 10:
327 return compute_energy_aux<double, 10, 3>(data);
328 case 20:
329 return compute_energy_aux<double, 20, 3>(data);
330 default:
331 return compute_energy_aux<double, Eigen::Dynamic, 3>(data);
332 }
333 }
334 }
335
336 // Compute ∫ ½μ (tr(FᵀF) - 3 - 2ln(J)) + ½λ ln²(J) du
337 template <typename T, int n_basis, int dim>
339 {
340 if constexpr (std::is_same_v<T, double>)
341 {
342 Eigen::Matrix<T, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
343 local_disp.setZero();
344 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
345 {
346 const auto &bs = data.vals.basis_values[i];
347 for (size_t ii = 0; ii < bs.global.size(); ++ii)
348 {
349 for (int d = 0; d < dim; ++d)
350 {
351 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
352 }
353 }
354 }
355
356 Eigen::VectorXd jacs;
358 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
359
360 Eigen::Matrix<T, dim, dim> def_grad(size(), size());
361
362 T energy = T(0.0);
363
364 const int n_pts = data.da.size();
365 for (long p = 0; p < n_pts; ++p)
366 {
367 Eigen::Matrix<T, n_basis, dim> grad(data.vals.basis_values.size(), size());
368
369 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
370 {
371 grad.row(i) = data.vals.basis_values[i].grad.row(p);
372 }
373
374 const Eigen::Matrix<T, dim, dim> jac_it = data.vals.jac_it[p];
375 // Id + grad d
376 def_grad = (local_disp.transpose() * grad) * jac_it + Eigen::Matrix<T, dim, dim>::Identity(size(), size());
377
378 double lambda, mu;
379 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
380
381 const T J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
382 const T log_det_j = log(J);
383 const T val = mu / 2 * (def_grad.squaredNorm() - size() - 2 * log_det_j) +
384 lambda / 2 * log_det_j * log_det_j;
385 energy += val * data.da(p);
386 }
387 return energy;
388 }
389 else
390 {
391 typedef Eigen::Matrix<T, Eigen::Dynamic, 1> AutoDiffVect;
392 typedef Eigen::Matrix<T, dim, dim> AutoDiffGradMat;
393
394 AutoDiffVect local_disp;
395 get_local_disp(data, size(), local_disp);
396
397 AutoDiffGradMat def_grad(size(), size());
398
399 T energy = T(0.0);
400
401 const int n_pts = data.da.size();
402 for (long p = 0; p < n_pts; ++p)
403 {
404 compute_disp_grad_at_quad(data, local_disp, p, size(), def_grad);
405
406 // Id + grad d
407 for (int d = 0; d < dim; ++d)
408 def_grad(d, d) += T(1);
409
410 double lambda, mu;
411 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
412
413 const T log_det_j = log(polyfem::utils::determinant(def_grad));
414 const T val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
415
416 energy += val * data.da(p);
417 }
418 return energy;
419 }
420 }
421
422 template <int dim>
423 Eigen::Matrix<double, dim, dim> hat(const Eigen::Matrix<double, dim, 1> &x)
424 {
425
426 Eigen::Matrix<double, dim, dim> prod;
427 prod.setZero();
428
429 prod(0, 1) = -x(2);
430 prod(0, 2) = x(1);
431 prod(1, 0) = x(2);
432 prod(1, 2) = -x(0);
433 prod(2, 0) = -x(1);
434 prod(2, 1) = x(0);
435
436 return prod;
437 }
438
439 template <int dim>
440 Eigen::Matrix<double, dim, 1> cross(const Eigen::Matrix<double, dim, 1> &x, const Eigen::Matrix<double, dim, 1> &y)
441 {
442
443 Eigen::Matrix<double, dim, 1> z;
444 z.setZero();
445
446 z(0) = x(1) * y(2) - x(2) * y(1);
447 z(1) = x(2) * y(0) - x(0) * y(2);
448 z(2) = x(0) * y(1) - x(1) * y(0);
449
450 return z;
451 }
452
453 template <int n_basis, int dim>
454 void NeoHookeanElasticity::compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::Matrix<double, Eigen::Dynamic, 1> &G_flattened) const
455 {
456 assert(data.x.cols() == 1);
457
458 const int n_pts = data.da.size();
459
460 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
461 local_disp.setZero();
462 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
463 {
464 const auto &bs = data.vals.basis_values[i];
465 for (size_t ii = 0; ii < bs.global.size(); ++ii)
466 {
467 for (int d = 0; d < dim; ++d)
468 {
469 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
470 }
471 }
472 }
473
474 Eigen::VectorXd jacs;
476 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
477
478 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
479
480 Eigen::Matrix<double, n_basis, dim> G(data.vals.basis_values.size(), size());
481 G.setZero();
482
483 for (long p = 0; p < n_pts; ++p)
484 {
485 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
486
487 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
488 {
489 grad.row(i) = data.vals.basis_values[i].grad.row(p);
490 }
491
492 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
493 Eigen::Matrix<double, n_basis, dim> delF_delU = grad * jac_it;
494
495 // Id + grad d
496 def_grad = local_disp.transpose() * delF_delU + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
497
498 const double J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
499 const double log_det_j = log(J);
500
501 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
502 delJ_delF.setZero();
503
504 if (dim == 2)
505 {
506
507 delJ_delF(0, 0) = def_grad(1, 1);
508 delJ_delF(0, 1) = -def_grad(1, 0);
509 delJ_delF(1, 0) = -def_grad(0, 1);
510 delJ_delF(1, 1) = def_grad(0, 0);
511 }
512
513 else if (dim == 3)
514 {
515
516 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
517 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
518 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
519
520 u = def_grad.col(0);
521 v = def_grad.col(1);
522 w = def_grad.col(2);
523
524 delJ_delF.col(0) = cross<dim>(v, w);
525 delJ_delF.col(1) = cross<dim>(w, u);
526 delJ_delF.col(2) = cross<dim>(u, v);
527 }
528
529 double lambda, mu;
530 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
531
532 Eigen::Matrix<double, dim, dim> gradient_temp = mu * def_grad - mu * (1 / J) * delJ_delF + lambda * log_det_j * (1 / J) * delJ_delF;
533 Eigen::Matrix<double, n_basis, dim> gradient = delF_delU * gradient_temp.transpose();
534
535 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
536
537 G.noalias() += gradient * data.da(p);
538 }
539
540 Eigen::Matrix<double, dim, n_basis> G_T = G.transpose();
541
542 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
543 Eigen::Matrix<double, N, 1> temp(Eigen::Map<Eigen::Matrix<double, N, 1>>(G_T.data(), G_T.size()));
544 G_flattened = temp;
545 }
546
547 template <int n_basis, int dim>
549 {
550 assert(data.x.cols() == 1);
551
552 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
553 const int n_pts = data.da.size();
554
555 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
556 local_disp.setZero();
557 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
558 {
559 const auto &bs = data.vals.basis_values[i];
560 for (size_t ii = 0; ii < bs.global.size(); ++ii)
561 {
562 local_disp.row(i) += bs.global[ii].val * data.x.block<dim, 1>(bs.global[ii].index * size(), 0).transpose();
563 }
564 }
565
566 Eigen::VectorXd jacs;
568 jacs = data.vals.eval_deformed_jacobian_determinant(data.x);
569
570 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
571
572 for (long p = 0; p < n_pts; ++p)
573 {
574 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
575
576 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
577 {
578 grad.row(i) = data.vals.basis_values[i].grad.row(p);
579 }
580
581 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
582 const Eigen::Matrix<double, n_basis, dim> delF_delU = grad * jac_it;
583
584 // Id + grad d
585 def_grad = local_disp.transpose() * delF_delU + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
586
587 const double J = use_robust_jacobian ? jacs(p) * jac_it.determinant() : def_grad.determinant();
588 double log_det_j = log(J);
589
590 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
591 delJ_delF.setZero();
592 Eigen::Matrix<double, dim * dim, dim * dim> del2J_delF2(size() * size(), size() * size());
593 del2J_delF2.setZero();
594
595 if (dim == 2)
596 {
597 delJ_delF(0, 0) = def_grad(1, 1);
598 delJ_delF(0, 1) = -def_grad(1, 0);
599 delJ_delF(1, 0) = -def_grad(0, 1);
600 delJ_delF(1, 1) = def_grad(0, 0);
601
602 del2J_delF2(0, 3) = 1;
603 del2J_delF2(1, 2) = -1;
604 del2J_delF2(2, 1) = -1;
605 del2J_delF2(3, 0) = 1;
606 }
607 else if (size() == 3)
608 {
609 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
610 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
611 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
612
613 u = def_grad.col(0);
614 v = def_grad.col(1);
615 w = def_grad.col(2);
616
617 delJ_delF.col(0) = cross<dim>(v, w);
618 delJ_delF.col(1) = cross<dim>(w, u);
619 delJ_delF.col(2) = cross<dim>(u, v);
620
621 del2J_delF2.template block<dim, dim>(0, 6) = hat<dim>(v);
622 del2J_delF2.template block<dim, dim>(6, 0) = -hat<dim>(v);
623 del2J_delF2.template block<dim, dim>(0, 3) = -hat<dim>(w);
624 del2J_delF2.template block<dim, dim>(3, 0) = hat<dim>(w);
625 del2J_delF2.template block<dim, dim>(3, 6) = -hat<dim>(u);
626 del2J_delF2.template block<dim, dim>(6, 3) = hat<dim>(u);
627 }
628
629 double lambda, mu;
630 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
631
632 Eigen::Matrix<double, dim * dim, dim * dim> id = Eigen::Matrix<double, dim * dim, dim * dim>::Identity(size() * size(), size() * size());
633
634 Eigen::Matrix<double, dim * dim, 1> g_j = Eigen::Map<const Eigen::Matrix<double, dim * dim, 1>>(delJ_delF.data(), delJ_delF.size());
635
636 Eigen::Matrix<double, dim * dim, dim * dim> hessian_temp = (mu * id) + (((mu + lambda * (1 - log_det_j)) / (J * J)) * (g_j * g_j.transpose())) + (((lambda * log_det_j - mu) / (J)) * del2J_delF2);
637
638 Eigen::Matrix<double, dim * dim, N> delF_delU_tensor = Eigen::Matrix<double, dim * dim, N>::Zero(jac_it.size(), grad.size());
639
640 for (size_t i = 0; i < local_disp.rows(); ++i)
641 {
642 for (size_t j = 0; j < local_disp.cols(); ++j)
643 {
644 Eigen::Matrix<double, dim, dim> temp(size(), size());
645 temp.setZero();
646 temp.row(j) = delF_delU.row(i);
647 Eigen::Matrix<double, dim * dim, 1> temp_flattened(Eigen::Map<Eigen::Matrix<double, dim * dim, 1>>(temp.data(), temp.size()));
648 delF_delU_tensor.col(i * size() + j) = temp_flattened;
649 }
650 }
651
652 Eigen::Matrix<double, N, N> hessian = delF_delU_tensor.transpose() * hessian_temp * delF_delU_tensor;
653
654 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
655
656 H += hessian * data.da(p);
657 }
658 }
659
661 const OptAssemblerData &data,
662 const Eigen::MatrixXd &mat,
663 Eigen::MatrixXd &stress,
664 Eigen::MatrixXd &result) const
665 {
666 const double t = data.t;
667 const int el_id = data.el_id;
668 const Eigen::MatrixXd &local_pts = data.local_pts;
669 const Eigen::MatrixXd &global_pts = data.global_pts;
670 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
671
672 double lambda, mu;
673 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
674
675 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
676 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
677
678 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
679 result = mu * mat + FmT * mat.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * mat.array()).sum() * FmT;
680 }
681
683 const OptAssemblerData &data,
684 Eigen::MatrixXd &stress,
685 Eigen::MatrixXd &result) const
686 {
687 const double t = data.t;
688 const int el_id = data.el_id;
689 const Eigen::MatrixXd &local_pts = data.local_pts;
690 const Eigen::MatrixXd &global_pts = data.global_pts;
691 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
692
693 double lambda, mu;
694 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
695
696 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
697 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
698
699 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
700 result = mu * stress + FmT * stress.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * stress.array()).sum() * FmT;
701 }
702
704 const OptAssemblerData &data,
705 const Eigen::MatrixXd &vect,
706 Eigen::MatrixXd &stress,
707 Eigen::MatrixXd &result) const
708 {
709 const double t = data.t;
710 const int el_id = data.el_id;
711 const Eigen::MatrixXd &local_pts = data.local_pts;
712 const Eigen::MatrixXd &global_pts = data.global_pts;
713 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
714
715 double lambda, mu;
716 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
717
718 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
719 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
720
721 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
722 result.setZero(size() * size(), size());
723 if (vect.rows() == 1)
724 for (int i = 0; i < size(); ++i)
725 for (int j = 0; j < size(); ++j)
726 for (int l = 0; l < size(); ++l)
727 {
728 result(i * size() + j, l) += mu * vect(i) * ((j == l) ? 1 : 0);
729 // For some reason, the second gives lower error. From the formula, though, it should be the following.
730 // result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(j, l) * (FmT.col(i).array() * vect.transpose().array()).sum();
731 result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(i, l) * (FmT.col(j).array() * vect.transpose().array()).sum();
732 result(i * size() + j, l) += lambda * FmT(i, j) * (FmT.col(l).array() * vect.transpose().array()).sum();
733 }
734 else
735 for (int i = 0; i < size(); ++i)
736 for (int j = 0; j < size(); ++j)
737 for (int k = 0; k < size(); ++k)
738 {
739 result(i * size() + j, k) += mu * vect(j) * ((i == k) ? 1 : 0);
740 // For some reason, the second gives lower error. From the formula, though, it should be the following.
741 // result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, j) * (FmT.row(i).array() * vect.transpose().array()).sum();
742 result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, i) * (FmT.row(j).array() * vect.transpose().array()).sum();
743 result(i * size() + j, k) += lambda * FmT(i, j) * (FmT.row(k).array() * vect.transpose().array()).sum();
744 }
745 }
746
748 const OptAssemblerData &data,
749 Eigen::MatrixXd &dstress_dmu,
750 Eigen::MatrixXd &dstress_dlambda) const
751 {
752 const double t = data.t;
753 const int el_id = data.el_id;
754 const Eigen::MatrixXd &local_pts = data.local_pts;
755 const Eigen::MatrixXd &global_pts = data.global_pts;
756 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
757
758 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
759 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
760 dstress_dmu = def_grad - FmT;
761 dstress_dlambda = std::log(def_grad.determinant()) * FmT;
762 }
763
764 std::map<std::string, Assembler::ParamFunc> NeoHookeanElasticity::parameters() const
765 {
766 std::map<std::string, ParamFunc> res;
767 const auto &params = params_;
768 const int size = this->size();
769
770 res["lambda"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
771 double lambda, mu;
772
773 params.lambda_mu(uv, p, t, e, lambda, mu);
774 return lambda;
775 };
776
777 res["mu"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
778 double lambda, mu;
779
780 params.lambda_mu(uv, p, t, e, lambda, mu);
781 return mu;
782 };
783
784 res["E"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
785 double lambda, mu;
786 params.lambda_mu(uv, p, t, e, lambda, mu);
787
788 if (size == 3)
789 return mu * (3.0 * lambda + 2.0 * mu) / (lambda + mu);
790 else
791 return 2 * mu * (2.0 * lambda + 2.0 * mu) / (lambda + 2.0 * mu);
792 };
793
794 res["nu"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
795 double lambda, mu;
796
797 params.lambda_mu(uv, p, t, e, lambda, mu);
798
799 if (size == 3)
800 return lambda / (2.0 * (lambda + mu));
801 else
802 return lambda / (lambda + 2.0 * mu);
803 };
804
805 return res;
806 }
807
808} // namespace polyfem::assembler
double val
Definition Assembler.cpp:86
ElementAssemblyValues vals
Definition Assembler.cpp:22
int y
int z
int x
std::string stress() const
Definition Units.hpp:27
stores per element basis values at given quadrature points and geometric mapping
void compute(const int el_index, const bool is_volume, const Eigen::MatrixXd &pts, const basis::ElementBases &basis, const basis::ElementBases &gbasis)
computes the per element values at the local (ref el) points (pts) sets basis_values,...
std::vector< Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > > jac_it
Eigen::VectorXd eval_deformed_jacobian_determinant(const Eigen::VectorXd &disp) const
void lambda_mu(double px, double py, double pz, double x, double y, double z, double t, int el_id, double &lambda, double &mu) const
void add_multimaterial(const int index, const json &params, const bool is_volume, const std::string &stress_unit)
T compute_energy_aux(const NonLinearAssemblerData &data) const
double compute_energy(const NonLinearAssemblerData &data) const override
std::map< std::string, ParamFunc > parameters() const override
void compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::VectorXd &G_flattened) const
VectorNd compute_rhs(const AutodiffHessianPt &pt) const override
void compute_stress_grad_multiply_stress(const OptAssemblerData &data, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_energy_hessian_aux_fast(const NonLinearAssemblerData &data, Eigen::MatrixXd &H) const
void compute_dstress_dmu_dlambda(const OptAssemblerData &data, Eigen::MatrixXd &dstress_dmu, Eigen::MatrixXd &dstress_dlambda) const override
void compute_stress_grad_multiply_vect(const OptAssemblerData &data, const Eigen::MatrixXd &vect, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_stress_grad_multiply_mat(const OptAssemblerData &data, const Eigen::MatrixXd &mat, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void assign_stress_tensor(const OutputData &data, const int all_size, const ElasticityTensorType &type, Eigen::MatrixXd &all, const std::function< Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const override
void add_multimaterial(const int index, const json &params, const Units &units) override
Eigen::MatrixXd assemble_hessian(const NonLinearAssemblerData &data) const override
Eigen::VectorXd assemble_gradient(const NonLinearAssemblerData &data) const override
void compute_stiffness_value(const double t, const assembler::ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &tensor) const override
const basis::ElementBases & bs
const Eigen::MatrixXd & fun
const Eigen::MatrixXd & local_pts
const basis::ElementBases & gbs
Used for test only.
Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > AutoDiffGradMat
Eigen::Matrix< double, dim, dim > hat(const Eigen::Matrix< double, dim, 1 > &x)
Eigen::Matrix< double, dim, 1 > cross(const Eigen::Matrix< double, dim, 1 > &x, const Eigen::Matrix< double, dim, 1 > &y)
Eigen::Matrix< T, Eigen::Dynamic, 1 > AutoDiffVect
void neo_hookean_3d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
void neo_hookean_2d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
Eigen::VectorXd flatten(const Eigen::MatrixXd &X)
Flatten rowwises.
T determinant(const Eigen::Matrix< T, rows, cols, option, maxRow, maxCol > &mat)
Eigen::MatrixXd pk2_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
void compute_disp_grad_at_quad(const assembler::NonLinearAssemblerData &data, const AutoDiffVect &local_disp, const int p, const int size, AutoDiffGradMat &def_grad)
Eigen::Matrix< AutodiffScalarHessian, Eigen::Dynamic, 1, 0, 3, 1 > AutodiffHessianPt
nlohmann::json json
Definition Common.hpp:9
void compute_diplacement_grad(const int size, const ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const int p, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &displacement_grad)
void get_local_disp(const assembler::NonLinearAssemblerData &data, const int size, AutoDiffVect &local_disp)
Eigen::MatrixXd pk1_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
Eigen::Matrix< double, 1, Eigen::Dynamic, Eigen::RowMajor, 1, 3 > RowVectorNd
Definition Types.hpp:13