Page Menu
Home
Search
Configure Global Search
Log In
Paste
P2909
Experemental change to corrective-smooth (per loop tangent)
Active
Public
Actions
Authored by
Campbell Barton (campbellbarton)
on Apr 27 2022, 2:44 AM.
Edit Paste
Archive Paste
View Raw File
Subscribe
Mute Notifications
Award Token
Tags
None
Subscribers
None
diff --git a/source/blender/modifiers/intern/MOD_correctivesmooth.c b/source/blender/modifiers/intern/MOD_correctivesmooth.c
index 52162eaacc5..ac0bfbfae23 100644
--- a/source/blender/modifiers/intern/MOD_correctivesmooth.c
+++ b/source/blender/modifiers/intern/MOD_correctivesmooth.c
@@ -51,8 +51,7 @@
# include "PIL_time_utildefines.h"
#endif
-/* minor optimization, calculate this inline */
-#define USE_TANGENT_CALC_INLINE
+// #define USE_ALIGN_NORMALS
static void initData(ModifierData *md)
{
@@ -400,13 +399,14 @@ static void calc_tangent_ortho(float ts[3][3])
float v_tan_a[3], v_tan_b[3];
float t_vec_a[3], t_vec_b[3];
- normalize_v3(ts[2]);
+ // normalize_v3(ts[2]);
+ BLI_ASSERT_UNIT_V3(ts[2]);
copy_v3_v3(v_tan_a, ts[0]);
copy_v3_v3(v_tan_b, ts[1]);
cross_v3_v3v3(ts[1], ts[2], v_tan_a);
- mul_v3_fl(ts[1], dot_v3v3(ts[1], v_tan_b) < 0.0f ? -1.0f : 1.0f);
+ // mul_v3_fl(ts[1], dot_v3v3(ts[1], v_tan_b) < 0.0f ? -1.0f : 1.0f);
/* Orthogonalize tangent. */
mul_v3_v3fl(t_vec_a, ts[2], dot_v3v3(ts[2], v_tan_a));
@@ -425,11 +425,13 @@ static void calc_tangent_ortho(float ts[3][3])
/**
* accumulate edge-vectors from all polys.
*/
-static void calc_tangent_loop_accum(const float v_dir_prev[3],
- const float v_dir_next[3],
- float r_tspace[3][3])
+static void calc_tangent_loop(const float v_dir_prev[3],
+ const float v_dir_next[3],
+ float r_tspace[3][3])
{
- add_v3_v3v3(r_tspace[1], v_dir_prev, v_dir_next);
+ float tspace[3][3];
+
+ add_v3_v3v3(tspace[1], v_dir_prev, v_dir_next);
if (compare_v3v3(v_dir_prev, v_dir_next, FLT_EPSILON * 10.0f) == false) {
const float weight = fabsf(acosf(dot_v3v3(v_dir_next, v_dir_prev)));
@@ -438,24 +440,40 @@ static void calc_tangent_loop_accum(const float v_dir_prev[3],
cross_v3_v3v3(nor, v_dir_prev, v_dir_next);
normalize_v3(nor);
- cross_v3_v3v3(r_tspace[0], r_tspace[1], nor);
+ cross_v3_v3v3(tspace[0], tspace[1], nor);
mul_v3_fl(nor, weight);
/* accumulate weighted normals */
- add_v3_v3(r_tspace[2], nor);
+ copy_v3_v3(tspace[2], nor);
+ }
+ else {
+ zero_v3(tspace[0]);
+ zero_v3(tspace[2]);
}
+
+ copy_v3_v3(r_tspace[0], tspace[0]);
+ copy_v3_v3(r_tspace[1], tspace[1]);
+ copy_v3_v3(r_tspace[2], tspace[2]);
}
-static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tangent_spaces)[3][3])
+static void calc_tangent_spaces(Mesh *mesh,
+ float (*vertexCos)[3],
+ float (*r_tangent_spaces)[3][3],
+ float *r_loops_of_verts_count)
{
const uint mpoly_num = (uint)mesh->totpoly;
-#ifndef USE_TANGENT_CALC_INLINE
- const uint mvert_num = (uint)dm->getNumVerts(dm);
-#endif
+ const uint mvert_num = (uint)mesh->totvert;
+ const uint mloop_num = (uint)mesh->totloop;
const MPoly *mpoly = mesh->mpoly;
const MLoop *mloop = mesh->mloop;
uint i;
+ copy_vn_fl(r_loops_of_verts_count, (int)mvert_num * 3, 0.0f);
+
+#ifdef USE_ALIGN_NORMALS
+ float(*normals)[3] = MEM_calloc_arrayN(mvert_num, sizeof(float[3]), __func__);
+#endif
+
for (i = 0; i < mpoly_num; i++) {
const MPoly *mp = &mpoly[i];
const MLoop *l_next = &mloop[mp->loopstart];
@@ -471,7 +489,7 @@ static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tan
normalize_v3(v_dir_prev);
for (; l_next != l_term; l_prev = l_curr, l_curr = l_next, l_next++) {
- float(*ts)[3] = r_tangent_spaces[l_curr->v];
+ float(*ts)[3] = r_tangent_spaces[l_curr - mloop];
/* re-use the previous value */
#if 0
@@ -481,16 +499,34 @@ static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tan
sub_v3_v3v3(v_dir_next, vertexCos[l_curr->v], vertexCos[l_next->v]);
normalize_v3(v_dir_next);
- calc_tangent_loop_accum(v_dir_prev, v_dir_next, ts);
+ calc_tangent_loop(v_dir_prev, v_dir_next, ts);
+
+#ifdef USE_ALIGN_NORMALS
+ add_v3_v3(normals[l_curr->v], ts[2]);
+#endif
+ r_loops_of_verts_count[l_curr->v] += 1.0f;
copy_v3_v3(v_dir_prev, v_dir_next);
}
}
- /* do inline */
-#ifndef USE_TANGENT_CALC_INLINE
+#ifdef USE_ALIGN_NORMALS
for (i = 0; i < mvert_num; i++) {
+ normalize_v3(normals[i]);
+ }
+
+ for (i = 0; i < mloop_num; i++) {
+ const MLoop *l_curr = &mesh->mloop[i];
+ const uint v_index = l_curr->v;
float(*ts)[3] = r_tangent_spaces[i];
+ copy_v3_v3(ts[2], normals[v_index]);
+ calc_tangent_ortho(ts);
+ }
+ MEM_freeN(normals);
+#else
+ for (i = 0; i < mloop_num; i++) {
+ float(*ts)[3] = r_tangent_spaces[i];
+ normalize_v3(ts[2]);
calc_tangent_ortho(ts);
}
#endif
@@ -524,11 +560,15 @@ static void calc_deltas(CorrectiveSmoothModifierData *csmd,
const float (*rest_coords)[3],
uint verts_num)
{
+ const uint loops_num = (uint)mesh->totloop;
float(*smooth_vertex_coords)[3] = MEM_dupallocN(rest_coords);
float(*tangent_spaces)[3][3];
- uint i;
+ float *loops_of_verts_count;
+
+ uint l_index;
- tangent_spaces = MEM_calloc_arrayN(verts_num, sizeof(float[3][3]), __func__);
+ tangent_spaces = MEM_malloc_arrayN(loops_num, sizeof(float[3][3]), __func__);
+ loops_of_verts_count = MEM_malloc_arrayN(loops_num, sizeof(float), __func__);
if (csmd->delta_cache.totverts != verts_num) {
MEM_SAFE_FREE(csmd->delta_cache.deltas);
@@ -542,24 +582,28 @@ static void calc_deltas(CorrectiveSmoothModifierData *csmd,
smooth_verts(csmd, mesh, dvert, defgrp_index, smooth_vertex_coords, verts_num);
- calc_tangent_spaces(mesh, smooth_vertex_coords, tangent_spaces);
+ calc_tangent_spaces(mesh, smooth_vertex_coords, tangent_spaces, loops_of_verts_count);
- for (i = 0; i < verts_num; i++) {
- float imat[3][3], delta[3];
+ copy_vn_fl(&csmd->delta_cache.deltas[0][0], (int)verts_num * 3, 0.0f);
-#ifdef USE_TANGENT_CALC_INLINE
- calc_tangent_ortho(tangent_spaces[i]);
-#endif
+ for (l_index = 0; l_index < loops_num; l_index++) {
+ const int v_index = (int)mesh->mloop[l_index].v;
+ float delta[3], co[3];
+ sub_v3_v3v3(delta, rest_coords[v_index], smooth_vertex_coords[v_index]);
- sub_v3_v3v3(delta, rest_coords[i], smooth_vertex_coords[i]);
- if (UNLIKELY(!invert_m3_m3(imat, tangent_spaces[i]))) {
- transpose_m3_m3(imat, tangent_spaces[i]);
+ float imat[3][3];
+ if (UNLIKELY(!invert_m3_m3(imat, tangent_spaces[l_index]))) {
+ transpose_m3_m3(imat, tangent_spaces[l_index]);
}
- mul_v3_m3v3(csmd->delta_cache.deltas[i], imat, delta);
+ mul_v3_m3v3(co, imat, delta);
+
+ /* Accumulate `co` into the delta array. */
+ madd_v3_v3fl(csmd->delta_cache.deltas[v_index], co, 1.0f / loops_of_verts_count[v_index]);
}
MEM_freeN(tangent_spaces);
MEM_freeN(smooth_vertex_coords);
+ MEM_freeN(loops_of_verts_count);
}
static void correctivesmooth_modifier_do(ModifierData *md,
@@ -688,27 +732,29 @@ static void correctivesmooth_modifier_do(ModifierData *md,
smooth_verts(csmd, mesh, dvert, defgrp_index, vertexCos, verts_num);
{
- uint i;
+ const uint loops_num = (uint)mesh->totloop;
+ uint l_index;
float(*tangent_spaces)[3][3];
+ float *loops_of_verts_count;
const float scale = csmd->scale;
- /* calloc, since values are accumulated */
- tangent_spaces = MEM_calloc_arrayN(verts_num, sizeof(float[3][3]), __func__);
- calc_tangent_spaces(mesh, vertexCos, tangent_spaces);
+ tangent_spaces = MEM_malloc_arrayN(loops_num, sizeof(float[3][3]), __func__);
+ loops_of_verts_count = MEM_malloc_arrayN(loops_num, sizeof(float), __func__);
- for (i = 0; i < verts_num; i++) {
- float delta[3];
+ calc_tangent_spaces(mesh, vertexCos, tangent_spaces, loops_of_verts_count);
-#ifdef USE_TANGENT_CALC_INLINE
- calc_tangent_ortho(tangent_spaces[i]);
-#endif
+ for (l_index = 0; l_index < loops_num; l_index++) {
+ const uint v_index = mesh->mloop[l_index].v;
+ float delta[3];
- mul_v3_m3v3(delta, tangent_spaces[i], csmd->delta_cache.deltas[i]);
- madd_v3_v3fl(vertexCos[i], delta, scale);
+ mul_v3_m3v3(delta, tangent_spaces[l_index], csmd->delta_cache.deltas[v_index]);
+ mul_v3_fl(delta, 1.0f / loops_of_verts_count[v_index]);
+ madd_v3_v3fl(vertexCos[v_index], delta, scale);
}
MEM_freeN(tangent_spaces);
+ MEM_freeN(loops_of_verts_count);
}
#ifdef DEBUG_TIME
Event Timeline
Campbell Barton (campbellbarton)
created this paste.
Apr 27 2022, 2:44 AM
Log In to Comment