PolyFEM
Loading...
Searching...
No Matches
NeoHookeanElasticity.cpp
Go to the documentation of this file.
2
4
5namespace polyfem::assembler
6{
7 namespace
8 {
9 bool delta(int i, int j)
10 {
11 return (i == j) ? true : false;
12 }
13 } // namespace
14
18
19 void NeoHookeanElasticity::add_multimaterial(const int index, const json &params, const Units &units)
20 {
21 assert(size() == 2 || size() == 3);
22
23 params_.add_multimaterial(index, params, size() == 3, units.stress());
24 }
25
26 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1>
28 {
29 assert(pt.size() == size());
30 Eigen::Matrix<double, Eigen::Dynamic, 1, 0, 3, 1> res;
31
32 double lambda, mu;
33 // TODO!
34 params_.lambda_mu(0, 0, 0, pt(0).getValue(), pt(1).getValue(), size() == 2 ? 0. : pt(2).getValue(), 0, 0, lambda, mu);
35
36 if (size() == 2)
37 autogen::neo_hookean_2d_function(pt, lambda, mu, res);
38 else if (size() == 3)
39 autogen::neo_hookean_3d_function(pt, lambda, mu, res);
40 else
41 assert(false);
42
43 return res;
44 }
45
46 Eigen::VectorXd
48 {
49 Eigen::Matrix<double, Eigen::Dynamic, 1> gradient;
50
51 if (size() == 2)
52 {
53 switch (data.vals.basis_values.size())
54 {
55 case 3:
56 {
57 gradient.resize(6);
58 compute_energy_aux_gradient_fast<3, 2>(data, gradient);
59 break;
60 }
61 case 6:
62 {
63 gradient.resize(12);
64 compute_energy_aux_gradient_fast<6, 2>(data, gradient);
65 break;
66 }
67 case 10:
68 {
69 gradient.resize(20);
70 compute_energy_aux_gradient_fast<10, 2>(data, gradient);
71 break;
72 }
73 default:
74 {
75 gradient.resize(data.vals.basis_values.size() * 2);
76 compute_energy_aux_gradient_fast<Eigen::Dynamic, 2>(data, gradient);
77 break;
78 }
79 }
80 }
81 else // if (size() == 3)
82 {
83 assert(size() == 3);
84 switch (data.vals.basis_values.size())
85 {
86 case 4:
87 {
88 gradient.resize(12);
89 compute_energy_aux_gradient_fast<4, 3>(data, gradient);
90 break;
91 }
92 case 10:
93 {
94 gradient.resize(30);
95 compute_energy_aux_gradient_fast<10, 3>(data, gradient);
96 break;
97 }
98 case 20:
99 {
100 gradient.resize(60);
101 compute_energy_aux_gradient_fast<20, 3>(data, gradient);
102 break;
103 }
104 default:
105 {
106 gradient.resize(data.vals.basis_values.size() * 3);
107 compute_energy_aux_gradient_fast<Eigen::Dynamic, 3>(data, gradient);
108 break;
109 }
110 }
111 }
112
113 return gradient;
114 }
115
118 const Eigen::MatrixXd &local_pts,
119 const Eigen::MatrixXd &displacement,
120 Eigen::MatrixXd &tensor) const
121 {
122 tensor.resize(local_pts.rows(), size() * size() * size() * size());
123 assert(displacement.cols() == 1);
124
125 Eigen::MatrixXd displacement_grad(size(), size());
126
127 for (long p = 0; p < local_pts.rows(); ++p)
128 {
129 double lambda, mu;
130 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
131
132 compute_diplacement_grad(size(), vals, local_pts, p, displacement, displacement_grad);
133 const Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(size(), size()) + displacement_grad;
134 const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
135 const Eigen::VectorXd FmT_vec = utils::flatten(FmT);
136 const double J = def_grad.determinant();
137 const double tmp1 = mu - lambda * std::log(J);
138 for (int i = 0, idx = 0; i < size(); i++)
139 for (int j = 0; j < size(); j++)
140 for (int k = 0; k < size(); k++)
141 for (int l = 0; l < size(); l++)
142 {
143 tensor(p, idx) = mu * delta(i, k) * delta(j, l) + tmp1 * FmT(i, l) * FmT(k, j);
144 idx++;
145 }
146
147 tensor.row(p) += lambda * utils::flatten(FmT_vec * FmT_vec.transpose());
148
149 // {
150 // Eigen::MatrixXd hess = utils::unflatten(tensor.row(p), size()*size());
151 // Eigen::MatrixXd fhess;
152 // Eigen::VectorXd x0 = utils::flatten(def_grad);
153 // fd::finite_jacobian(
154 // x0, [this, lambda, mu](const Eigen::VectorXd &x1) -> Eigen::VectorXd
155 // {
156 // Eigen::MatrixXd def_grad = utils::unflatten(x1, this->size());
157 // const Eigen::MatrixXd FmT = def_grad.inverse().transpose();
158 // const double J = def_grad.determinant();
159 // Eigen::MatrixXd stress_tensor = mu * (def_grad - FmT) + lambda * std::log(J) * FmT;
160 // return utils::flatten(stress_tensor);
161 // }, fhess);
162
163 // if (!fd::compare_hessian(hess, fhess))
164 // {
165 // std::cout << "Hessian: " << hess << std::endl;
166 // std::cout << "Finite hessian: " << fhess << std::endl;
167 // log_and_throw_error("Hessian in Neohookean mismatch");
168 // }
169 // }
170 }
171 }
172
173 Eigen::MatrixXd
175 {
176 Eigen::MatrixXd hessian;
177
178 if (size() == 2)
179 {
180 switch (data.vals.basis_values.size())
181 {
182 case 3:
183 {
184 hessian.resize(6, 6);
185 hessian.setZero();
186 compute_energy_hessian_aux_fast<3, 2>(data, hessian);
187 break;
188 }
189 case 6:
190 {
191 hessian.resize(12, 12);
192 hessian.setZero();
193 compute_energy_hessian_aux_fast<6, 2>(data, hessian);
194 break;
195 }
196 case 10:
197 {
198 hessian.resize(20, 20);
199 hessian.setZero();
200 compute_energy_hessian_aux_fast<10, 2>(data, hessian);
201 break;
202 }
203 default:
204 {
205 hessian.resize(data.vals.basis_values.size() * 2, data.vals.basis_values.size() * 2);
206 hessian.setZero();
207 compute_energy_hessian_aux_fast<Eigen::Dynamic, 2>(data, hessian);
208 break;
209 }
210 }
211 }
212 else // if (size() == 3)
213 {
214 assert(size() == 3);
215 switch (data.vals.basis_values.size())
216 {
217 case 4:
218 {
219 hessian.resize(12, 12);
220 hessian.setZero();
221 compute_energy_hessian_aux_fast<4, 3>(data, hessian);
222 break;
223 }
224 case 10:
225 {
226 hessian.resize(30, 30);
227 hessian.setZero();
228 compute_energy_hessian_aux_fast<10, 3>(data, hessian);
229 break;
230 }
231 case 20:
232 {
233 hessian.resize(60, 60);
234 hessian.setZero();
235 compute_energy_hessian_aux_fast<20, 3>(data, hessian);
236 break;
237 }
238 default:
239 {
240 hessian.resize(data.vals.basis_values.size() * 3, data.vals.basis_values.size() * 3);
241 hessian.setZero();
242 compute_energy_hessian_aux_fast<Eigen::Dynamic, 3>(data, hessian);
243 break;
244 }
245 }
246 }
247
248 return hessian;
249 }
250
252 const int all_size,
253 const ElasticityTensorType &type,
254 Eigen::MatrixXd &all,
255 const std::function<Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const
256 {
257 const auto &displacement = data.fun;
258 const auto &local_pts = data.local_pts;
259 const auto &bs = data.bs;
260 const auto &gbs = data.gbs;
261 const auto el_id = data.el_id;
262 const auto t = data.t;
263
264 Eigen::MatrixXd displacement_grad(size(), size());
265
266 assert(displacement.cols() == 1);
267
268 all.resize(local_pts.rows(), all_size);
269
271 vals.compute(el_id, size() == 3, local_pts, bs, gbs);
272 const auto I = Eigen::MatrixXd::Identity(size(), size());
273
274 for (long p = 0; p < local_pts.rows(); ++p)
275 {
276 compute_diplacement_grad(size(), bs, vals, local_pts, p, displacement, displacement_grad);
277
278 const Eigen::MatrixXd def_grad = I + displacement_grad;
279 if (type == ElasticityTensorType::F)
280 {
281 all.row(p) = fun(def_grad);
282 continue;
283 }
284 const double J = def_grad.determinant();
285 const Eigen::MatrixXd b = def_grad * def_grad.transpose();
286
287 double lambda, mu;
288 params_.lambda_mu(local_pts.row(p), vals.val.row(p), t, vals.element_id, lambda, mu);
289
290 Eigen::MatrixXd stress_tensor = (lambda * std::log(J) * I + mu * (b - I)) / J;
291 if (type == ElasticityTensorType::PK1)
292 stress_tensor = pk1_from_cauchy(stress_tensor, def_grad);
293 else if (type == ElasticityTensorType::PK2)
294 stress_tensor = pk2_from_cauchy(stress_tensor, def_grad);
295
296 all.row(p) = fun(stress_tensor);
297 }
298 }
299
301 {
302 return compute_energy_aux<double>(data);
303 }
304
305 // Compute ∫ ½μ (tr(FᵀF) - 3 - 2ln(J)) + ½λ ln²(J) du
306 template <typename T>
308 {
309 typedef Eigen::Matrix<T, Eigen::Dynamic, 1> AutoDiffVect;
310 typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> AutoDiffGradMat;
311
312 AutoDiffVect local_disp;
313 get_local_disp(data, size(), local_disp);
314
315 AutoDiffGradMat def_grad(size(), size());
316
317 T energy = T(0.0);
318
319 const int n_pts = data.da.size();
320 for (long p = 0; p < n_pts; ++p)
321 {
322 compute_disp_grad_at_quad(data, local_disp, p, size(), def_grad);
323
324 // Id + grad d
325 for (int d = 0; d < size(); ++d)
326 def_grad(d, d) += T(1);
327
328 double lambda, mu;
329 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
330
331 const T log_det_j = log(polyfem::utils::determinant(def_grad));
332 const T val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
333
334 energy += val * data.da(p);
335 }
336 return energy;
337 }
338
339 template <int dim>
340 Eigen::Matrix<double, dim, dim> hat(const Eigen::Matrix<double, dim, 1> &x)
341 {
342
343 Eigen::Matrix<double, dim, dim> prod;
344 prod.setZero();
345
346 prod(0, 1) = -x(2);
347 prod(0, 2) = x(1);
348 prod(1, 0) = x(2);
349 prod(1, 2) = -x(0);
350 prod(2, 0) = -x(1);
351 prod(2, 1) = x(0);
352
353 return prod;
354 }
355
356 template <int dim>
357 Eigen::Matrix<double, dim, 1> cross(const Eigen::Matrix<double, dim, 1> &x, const Eigen::Matrix<double, dim, 1> &y)
358 {
359
360 Eigen::Matrix<double, dim, 1> z;
361 z.setZero();
362
363 z(0) = x(1) * y(2) - x(2) * y(1);
364 z(1) = x(2) * y(0) - x(0) * y(2);
365 z(2) = x(0) * y(1) - x(1) * y(0);
366
367 return z;
368 }
369
370 template <int n_basis, int dim>
371 void NeoHookeanElasticity::compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::Matrix<double, Eigen::Dynamic, 1> &G_flattened) const
372 {
373 assert(data.x.cols() == 1);
374
375 const int n_pts = data.da.size();
376
377 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
378 local_disp.setZero();
379 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
380 {
381 const auto &bs = data.vals.basis_values[i];
382 for (size_t ii = 0; ii < bs.global.size(); ++ii)
383 {
384 for (int d = 0; d < size(); ++d)
385 {
386 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
387 }
388 }
389 }
390
391 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
392
393 Eigen::Matrix<double, n_basis, dim> G(data.vals.basis_values.size(), size());
394 G.setZero();
395
396 for (long p = 0; p < n_pts; ++p)
397 {
398 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
399
400 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
401 {
402 grad.row(i) = data.vals.basis_values[i].grad.row(p);
403 }
404
405 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
406
407 // Id + grad d
408 def_grad = local_disp.transpose() * grad * jac_it + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
409
410 const double J = def_grad.determinant();
411 const double log_det_j = log(J);
412
413 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
414 delJ_delF.setZero();
415
416 if (dim == 2)
417 {
418
419 delJ_delF(0, 0) = def_grad(1, 1);
420 delJ_delF(0, 1) = -def_grad(1, 0);
421 delJ_delF(1, 0) = -def_grad(0, 1);
422 delJ_delF(1, 1) = def_grad(0, 0);
423 }
424
425 else if (dim == 3)
426 {
427
428 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
429 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
430 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
431
432 u = def_grad.col(0);
433 v = def_grad.col(1);
434 w = def_grad.col(2);
435
436 delJ_delF.col(0) = cross<dim>(v, w);
437 delJ_delF.col(1) = cross<dim>(w, u);
438 delJ_delF.col(2) = cross<dim>(u, v);
439 }
440
441 double lambda, mu;
442 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
443
444 Eigen::Matrix<double, n_basis, dim> delF_delU = grad * jac_it;
445
446 Eigen::Matrix<double, dim, dim> gradient_temp = mu * def_grad - mu * (1 / J) * delJ_delF + lambda * log_det_j * (1 / J) * delJ_delF;
447 Eigen::Matrix<double, n_basis, dim> gradient = delF_delU * gradient_temp.transpose();
448
449 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
450
451 G.noalias() += gradient * data.da(p);
452 }
453
454 Eigen::Matrix<double, dim, n_basis> G_T = G.transpose();
455
456 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
457 Eigen::Matrix<double, N, 1> temp(Eigen::Map<Eigen::Matrix<double, N, 1>>(G_T.data(), G_T.size()));
458 G_flattened = temp;
459 }
460
461 template <int n_basis, int dim>
463 {
464 assert(data.x.cols() == 1);
465
466 constexpr int N = (n_basis == Eigen::Dynamic) ? Eigen::Dynamic : n_basis * dim;
467 const int n_pts = data.da.size();
468
469 Eigen::Matrix<double, n_basis, dim> local_disp(data.vals.basis_values.size(), size());
470 local_disp.setZero();
471 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
472 {
473 const auto &bs = data.vals.basis_values[i];
474 for (size_t ii = 0; ii < bs.global.size(); ++ii)
475 {
476 for (int d = 0; d < size(); ++d)
477 {
478 local_disp(i, d) += bs.global[ii].val * data.x(bs.global[ii].index * size() + d);
479 }
480 }
481 }
482
483 Eigen::Matrix<double, dim, dim> def_grad(size(), size());
484
485 for (long p = 0; p < n_pts; ++p)
486 {
487 Eigen::Matrix<double, n_basis, dim> grad(data.vals.basis_values.size(), size());
488
489 for (size_t i = 0; i < data.vals.basis_values.size(); ++i)
490 {
491 grad.row(i) = data.vals.basis_values[i].grad.row(p);
492 }
493
494 Eigen::Matrix<double, dim, dim> jac_it = data.vals.jac_it[p];
495
496 // Id + grad d
497 def_grad = local_disp.transpose() * grad * jac_it + Eigen::Matrix<double, dim, dim>::Identity(size(), size());
498
499 const double J = def_grad.determinant();
500 double log_det_j = log(J);
501
502 Eigen::Matrix<double, dim, dim> delJ_delF(size(), size());
503 delJ_delF.setZero();
504 Eigen::Matrix<double, dim * dim, dim * dim> del2J_delF2(size() * size(), size() * size());
505 del2J_delF2.setZero();
506
507 if (dim == 2)
508 {
509 delJ_delF(0, 0) = def_grad(1, 1);
510 delJ_delF(0, 1) = -def_grad(1, 0);
511 delJ_delF(1, 0) = -def_grad(0, 1);
512 delJ_delF(1, 1) = def_grad(0, 0);
513
514 del2J_delF2(0, 3) = 1;
515 del2J_delF2(1, 2) = -1;
516 del2J_delF2(2, 1) = -1;
517 del2J_delF2(3, 0) = 1;
518 }
519 else if (size() == 3)
520 {
521 Eigen::Matrix<double, dim, 1> u(def_grad.rows());
522 Eigen::Matrix<double, dim, 1> v(def_grad.rows());
523 Eigen::Matrix<double, dim, 1> w(def_grad.rows());
524
525 u = def_grad.col(0);
526 v = def_grad.col(1);
527 w = def_grad.col(2);
528
529 delJ_delF.col(0) = cross<dim>(v, w);
530 delJ_delF.col(1) = cross<dim>(w, u);
531 delJ_delF.col(2) = cross<dim>(u, v);
532
533 del2J_delF2.template block<dim, dim>(0, 6) = hat<dim>(v);
534 del2J_delF2.template block<dim, dim>(6, 0) = -hat<dim>(v);
535 del2J_delF2.template block<dim, dim>(0, 3) = -hat<dim>(w);
536 del2J_delF2.template block<dim, dim>(3, 0) = hat<dim>(w);
537 del2J_delF2.template block<dim, dim>(3, 6) = -hat<dim>(u);
538 del2J_delF2.template block<dim, dim>(6, 3) = hat<dim>(u);
539 }
540
541 double lambda, mu;
542 params_.lambda_mu(data.vals.quadrature.points.row(p), data.vals.val.row(p), data.t, data.vals.element_id, lambda, mu);
543
544 Eigen::Matrix<double, dim * dim, dim * dim> id = Eigen::Matrix<double, dim * dim, dim * dim>::Identity(size() * size(), size() * size());
545
546 Eigen::Matrix<double, dim * dim, 1> g_j = Eigen::Map<const Eigen::Matrix<double, dim * dim, 1>>(delJ_delF.data(), delJ_delF.size());
547
548 Eigen::Matrix<double, dim * dim, dim * dim> hessian_temp = (mu * id) + (((mu + lambda * (1 - log_det_j)) / (J * J)) * (g_j * g_j.transpose())) + (((lambda * log_det_j - mu) / (J)) * del2J_delF2);
549
550 Eigen::Matrix<double, dim * dim, N> delF_delU_tensor(jac_it.size(), grad.size());
551
552 for (size_t i = 0; i < local_disp.rows(); ++i)
553 {
554 for (size_t j = 0; j < local_disp.cols(); ++j)
555 {
556 Eigen::Matrix<double, dim, dim> temp(size(), size());
557 temp.setZero();
558 temp.row(j) = grad.row(i);
559 temp = temp * jac_it;
560 Eigen::Matrix<double, dim * dim, 1> temp_flattened(Eigen::Map<Eigen::Matrix<double, dim * dim, 1>>(temp.data(), temp.size()));
561 delF_delU_tensor.col(i * size() + j) = temp_flattened;
562 }
563 }
564
565 Eigen::Matrix<double, N, N> hessian = delF_delU_tensor.transpose() * hessian_temp * delF_delU_tensor;
566
567 double val = mu / 2 * ((def_grad.transpose() * def_grad).trace() - size() - 2 * log_det_j) + lambda / 2 * log_det_j * log_det_j;
568
569 H += hessian * data.da(p);
570 }
571 }
572
574 const OptAssemblerData &data,
575 const Eigen::MatrixXd &mat,
576 Eigen::MatrixXd &stress,
577 Eigen::MatrixXd &result) const
578 {
579 const double t = data.t;
580 const int el_id = data.el_id;
581 const Eigen::MatrixXd &local_pts = data.local_pts;
582 const Eigen::MatrixXd &global_pts = data.global_pts;
583 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
584
585 double lambda, mu;
586 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
587
588 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
589 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
590
591 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
592 result = mu * mat + FmT * mat.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * mat.array()).sum() * FmT;
593 }
594
596 const OptAssemblerData &data,
597 Eigen::MatrixXd &stress,
598 Eigen::MatrixXd &result) const
599 {
600 const double t = data.t;
601 const int el_id = data.el_id;
602 const Eigen::MatrixXd &local_pts = data.local_pts;
603 const Eigen::MatrixXd &global_pts = data.global_pts;
604 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
605
606 double lambda, mu;
607 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
608
609 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
610 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
611
612 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
613 result = mu * stress + FmT * stress.transpose() * FmT * (mu - lambda * std::log(def_grad.determinant())) + lambda * (FmT.array() * stress.array()).sum() * FmT;
614 }
615
617 const OptAssemblerData &data,
618 const Eigen::MatrixXd &vect,
619 Eigen::MatrixXd &stress,
620 Eigen::MatrixXd &result) const
621 {
622 const double t = data.t;
623 const int el_id = data.el_id;
624 const Eigen::MatrixXd &local_pts = data.local_pts;
625 const Eigen::MatrixXd &global_pts = data.global_pts;
626 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
627
628 double lambda, mu;
629 params_.lambda_mu(local_pts, global_pts, t, el_id, lambda, mu);
630
631 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
632 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
633
634 stress = mu * (def_grad - FmT) + lambda * std::log(def_grad.determinant()) * FmT;
635 result.setZero(size() * size(), size());
636 if (vect.rows() == 1)
637 for (int i = 0; i < size(); ++i)
638 for (int j = 0; j < size(); ++j)
639 for (int l = 0; l < size(); ++l)
640 {
641 result(i * size() + j, l) += mu * vect(i) * ((j == l) ? 1 : 0);
642 // For some reason, the second gives lower error. From the formula, though, it should be the following.
643 // result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(j, l) * (FmT.col(i).array() * vect.transpose().array()).sum();
644 result(i * size() + j, l) += (mu - lambda * std::log(def_grad.determinant())) * FmT(i, l) * (FmT.col(j).array() * vect.transpose().array()).sum();
645 result(i * size() + j, l) += lambda * FmT(i, j) * (FmT.col(l).array() * vect.transpose().array()).sum();
646 }
647 else
648 for (int i = 0; i < size(); ++i)
649 for (int j = 0; j < size(); ++j)
650 for (int k = 0; k < size(); ++k)
651 {
652 result(i * size() + j, k) += mu * vect(j) * ((i == k) ? 1 : 0);
653 // For some reason, the second gives lower error. From the formula, though, it should be the following.
654 // result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, j) * (FmT.row(i).array() * vect.transpose().array()).sum();
655 result(i * size() + j, k) += (mu - lambda * std::log(def_grad.determinant())) * FmT(k, i) * (FmT.row(j).array() * vect.transpose().array()).sum();
656 result(i * size() + j, k) += lambda * FmT(i, j) * (FmT.row(k).array() * vect.transpose().array()).sum();
657 }
658 }
659
661 const OptAssemblerData &data,
662 Eigen::MatrixXd &dstress_dmu,
663 Eigen::MatrixXd &dstress_dlambda) const
664 {
665 const double t = data.t;
666 const int el_id = data.el_id;
667 const Eigen::MatrixXd &local_pts = data.local_pts;
668 const Eigen::MatrixXd &global_pts = data.global_pts;
669 const Eigen::MatrixXd &grad_u_i = data.grad_u_i;
670
671 Eigen::MatrixXd def_grad = Eigen::MatrixXd::Identity(grad_u_i.rows(), grad_u_i.cols()) + grad_u_i;
672 Eigen::MatrixXd FmT = def_grad.inverse().transpose();
673 dstress_dmu = def_grad - FmT;
674 dstress_dlambda = std::log(def_grad.determinant()) * FmT;
675 }
676
677 std::map<std::string, Assembler::ParamFunc> NeoHookeanElasticity::parameters() const
678 {
679 std::map<std::string, ParamFunc> res;
680 const auto &params = params_;
681 const int size = this->size();
682
683 res["lambda"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
684 double lambda, mu;
685
686 params.lambda_mu(uv, p, t, e, lambda, mu);
687 return lambda;
688 };
689
690 res["mu"] = [&params](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
691 double lambda, mu;
692
693 params.lambda_mu(uv, p, t, e, lambda, mu);
694 return mu;
695 };
696
697 res["E"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
698 double lambda, mu;
699 params.lambda_mu(uv, p, t, e, lambda, mu);
700
701 if (size == 3)
702 return mu * (3.0 * lambda + 2.0 * mu) / (lambda + mu);
703 else
704 return 2 * mu * (2.0 * lambda + 2.0 * mu) / (lambda + 2.0 * mu);
705 };
706
707 res["nu"] = [&params, size](const RowVectorNd &uv, const RowVectorNd &p, double t, int e) {
708 double lambda, mu;
709
710 params.lambda_mu(uv, p, t, e, lambda, mu);
711
712 if (size == 3)
713 return lambda / (2.0 * (lambda + mu));
714 else
715 return lambda / (lambda + 2.0 * mu);
716 };
717
718 return res;
719 }
720
721} // namespace polyfem::assembler
double val
Definition Assembler.cpp:86
ElementAssemblyValues vals
Definition Assembler.cpp:22
int y
int z
int x
std::string stress() const
Definition Units.hpp:25
stores per element basis values at given quadrature points and geometric mapping
void compute(const int el_index, const bool is_volume, const Eigen::MatrixXd &pts, const basis::ElementBases &basis, const basis::ElementBases &gbasis)
computes the per element values at the local (ref el) points (pts) sets basis_values,...
std::vector< Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > > jac_it
void lambda_mu(double px, double py, double pz, double x, double y, double z, double t, int el_id, double &lambda, double &mu) const
void add_multimaterial(const int index, const json &params, const bool is_volume, const std::string &stress_unit)
double compute_energy(const NonLinearAssemblerData &data) const override
std::map< std::string, ParamFunc > parameters() const override
void compute_energy_aux_gradient_fast(const NonLinearAssemblerData &data, Eigen::VectorXd &G_flattened) const
VectorNd compute_rhs(const AutodiffHessianPt &pt) const override
void compute_stress_grad_multiply_stress(const OptAssemblerData &data, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_energy_hessian_aux_fast(const NonLinearAssemblerData &data, Eigen::MatrixXd &H) const
void compute_dstress_dmu_dlambda(const OptAssemblerData &data, Eigen::MatrixXd &dstress_dmu, Eigen::MatrixXd &dstress_dlambda) const override
void compute_stress_grad_multiply_vect(const OptAssemblerData &data, const Eigen::MatrixXd &vect, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void compute_stress_grad_multiply_mat(const OptAssemblerData &data, const Eigen::MatrixXd &mat, Eigen::MatrixXd &stress, Eigen::MatrixXd &result) const override
void assign_stress_tensor(const OutputData &data, const int all_size, const ElasticityTensorType &type, Eigen::MatrixXd &all, const std::function< Eigen::MatrixXd(const Eigen::MatrixXd &)> &fun) const override
void add_multimaterial(const int index, const json &params, const Units &units) override
Eigen::MatrixXd assemble_hessian(const NonLinearAssemblerData &data) const override
Eigen::VectorXd assemble_gradient(const NonLinearAssemblerData &data) const override
T compute_energy_aux(const NonLinearAssemblerData &data) const
void compute_stiffness_value(const double t, const assembler::ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &tensor) const override
const basis::ElementBases & bs
const Eigen::MatrixXd & fun
const Eigen::MatrixXd & local_pts
const basis::ElementBases & gbs
Used for test only.
Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3 > AutoDiffGradMat
Eigen::Matrix< double, dim, dim > hat(const Eigen::Matrix< double, dim, 1 > &x)
Eigen::Matrix< double, dim, 1 > cross(const Eigen::Matrix< double, dim, 1 > &x, const Eigen::Matrix< double, dim, 1 > &y)
Eigen::Matrix< T, Eigen::Dynamic, 1 > AutoDiffVect
void neo_hookean_3d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
void neo_hookean_2d_function(const AutodiffHessianPt &pt, const double lambda, const double mu, Eigen::Matrix< double, Eigen::Dynamic, 1, 0, 3, 1 > &res)
Eigen::VectorXd flatten(const Eigen::MatrixXd &X)
Flatten rowwises.
T determinant(const Eigen::Matrix< T, rows, cols, option, maxRow, maxCol > &mat)
Eigen::MatrixXd pk2_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
void compute_disp_grad_at_quad(const assembler::NonLinearAssemblerData &data, const AutoDiffVect &local_disp, const int p, const int size, AutoDiffGradMat &def_grad)
Eigen::Matrix< AutodiffScalarHessian, Eigen::Dynamic, 1, 0, 3, 1 > AutodiffHessianPt
nlohmann::json json
Definition Common.hpp:9
void compute_diplacement_grad(const int size, const ElementAssemblyValues &vals, const Eigen::MatrixXd &local_pts, const int p, const Eigen::MatrixXd &displacement, Eigen::MatrixXd &displacement_grad)
void get_local_disp(const assembler::NonLinearAssemblerData &data, const int size, AutoDiffVect &local_disp)
Eigen::MatrixXd pk1_from_cauchy(const Eigen::MatrixXd &stress, const Eigen::MatrixXd &F)
Eigen::Matrix< double, 1, Eigen::Dynamic, Eigen::RowMajor, 1, 3 > RowVectorNd
Definition Types.hpp:13