Changeset View
Changeset View
Standalone View
Standalone View
release/scripts/freestyle/modules/parameter_editor.py
| Context not available. | |||||
| # Purpose : Interactive manipulation of stylization parameters | # Purpose : Interactive manipulation of stylization parameters | ||||
| from freestyle.types import ( | from freestyle.types import ( | ||||
| BinaryPredicate1D, | |||||
| IntegrationType, | |||||
| Interface0DIterator, | |||||
| Nature, | Nature, | ||||
| Noise, | Noise, | ||||
| Operators, | Operators, | ||||
| StrokeAttribute, | IntegrationType, | ||||
| UnaryPredicate0D, | UnaryPredicate0D, | ||||
| UnaryPredicate1D, | UnaryPredicate1D, | ||||
| BinaryPredicate1D, | |||||
| StrokeAttribute, | |||||
| Interface0DIterator, | |||||
| TVertex, | TVertex, | ||||
| ) | ) | ||||
| from freestyle.chainingiterators import ( | from freestyle.chainingiterators import ( | ||||
| Context not available. | |||||
| VertexOrientation2DF0D, | VertexOrientation2DF0D, | ||||
| ) | ) | ||||
| from freestyle.predicates import ( | from freestyle.predicates import ( | ||||
| # logical operators UP1D | |||||
| AndUP1D, | AndUP1D, | ||||
| ContourUP1D, | OrUP1D, | ||||
| ExternalContourUP1D, | NotUP1D, | ||||
| FalseBP1D, | TrueUP1D, | ||||
| FalseUP1D, | FalseUP1D, | ||||
| Length2DBP1D, | # logical operators BP1D | ||||
| TrueBP1D, | |||||
| FalseBP1D, | |||||
| AndBP1D, | |||||
| OrBP1D, | |||||
| NotBP1D, | NotBP1D, | ||||
| NotUP1D, | # various | ||||
| OrUP1D, | ContourUP1D, | ||||
| ExternalContourUP1D, | |||||
| QuantitativeInvisibilityUP1D, | QuantitativeInvisibilityUP1D, | ||||
| TrueBP1D, | |||||
| TrueUP1D, | |||||
| WithinImageBoundaryUP1D, | WithinImageBoundaryUP1D, | ||||
| pyNatureUP1D, | pyNatureUP1D, | ||||
| Length2DBP1D, | |||||
| pyZBP1D, | pyZBP1D, | ||||
| ) | ) | ||||
| from freestyle.shaders import ( | from freestyle.shaders import ( | ||||
| # color | |||||
| ConstantColorShader, | |||||
| # geometry | |||||
| BackboneStretcherShader, | BackboneStretcherShader, | ||||
| BezierCurveShader, | BezierCurveShader, | ||||
| ConstantColorShader, | |||||
| GuidingLinesShader, | GuidingLinesShader, | ||||
| PolygonalizationShader, | PolygonalizationShader, | ||||
| SamplingShader, | SamplingShader, | ||||
| Context not available. | |||||
| pyBluePrintCirclesShader, | pyBluePrintCirclesShader, | ||||
| pyBluePrintEllipsesShader, | pyBluePrintEllipsesShader, | ||||
| pyBluePrintSquaresShader, | pyBluePrintSquaresShader, | ||||
| # various | |||||
| RoundCapShader, | |||||
| SquareCapShader, | |||||
| ) | ) | ||||
| from freestyle.utils import ( | from freestyle.utils import ( | ||||
| ContextFunctions, | ContextFunctions, | ||||
| getCurrentScene, | getCurrentScene, | ||||
| stroke_normal, | iter_t2d_along_stroke, | ||||
| iter_distance_from_camera, | |||||
| iter_distance_from_object, | |||||
| iter_material_color, | |||||
| iter_material_value, | |||||
| iter_distance_along_stroke, | |||||
| iter_distance_to_neighbors, | |||||
| ) | ) | ||||
| from _freestyle import ( | from _freestyle import ( | ||||
| blendRamp, | blendRamp, | ||||
| evaluateColorRamp, | evaluateColorRamp, | ||||
| evaluateCurveMappingF, | evaluateCurveMappingF, | ||||
| ) | ) | ||||
| import math | import math | ||||
| import mathutils | import mathutils | ||||
| import time | import time | ||||
| from mathutils import Vector | |||||
| class ColorRampModifier(StrokeShader): | class ColorRampModifier(StrokeShader): | ||||
| def __init__(self, blend, influence, ramp): | def __init__(self, blend, influence, ramp): | ||||
| Context not available. | |||||
| def evaluate(self, t): | def evaluate(self, t): | ||||
| col = evaluateColorRamp(self.__ramp, t) | col = evaluateColorRamp(self.__ramp, t) | ||||
| col = col.xyz # omit alpha | return col.xyz # omit alpha | ||||
| return col | |||||
| def blend_ramp(self, a, b): | def blend_ramp(self, a, b): | ||||
| return blendRamp(self.__blend, a, self.__influence, b) | return blendRamp(self.__blend, a, self.__influence, b) | ||||
| Context not available. | |||||
| v1 = facm * v1 + fac * abs(v1 - v2) | v1 = facm * v1 + fac * abs(v1 - v2) | ||||
| elif self.__blend == 'MININUM': | elif self.__blend == 'MININUM': | ||||
| tmp = fac * v2 | tmp = fac * v2 | ||||
| if v1 > tmp: | v1 = min(v1, tmp) | ||||
kjym3: How about a further simplification:
v1 = min(v1, fac * v2)
| |||||
| v1 = tmp | |||||
| elif self.__blend == 'MAXIMUM': | elif self.__blend == 'MAXIMUM': | ||||
| tmp = fac * v2 | tmp = fac * v2 | ||||
| if v1 < tmp: | v1 = max(v1, tmp) | ||||
| v1 = tmp | |||||
| else: | else: | ||||
| raise ValueError("unknown curve blend type: " + self.__blend) | raise ValueError("unknown curve blend type: " + self.__blend) | ||||
| return v1 | return v1 | ||||
| Context not available. | |||||
| nature = fe.nature | nature = fe.nature | ||||
| if (nature & Nature.BORDER): | if (nature & Nature.BORDER): | ||||
| if self.__persp_camera: | if self.__persp_camera: | ||||
| point = -sv.point_3d.copy() | point = -sv.point_3d.copy().normalized() | ||||
kjym3Unsubmitted Not Done Inline ActionsThe call of the .copy() method seems redundant: point = -sv.point_3d.normalized() kjym3: The call of the .copy() method seems redundant:
point = -sv.point_3d.normalized() | |||||
| point.normalize() | |||||
| dir = point.dot(fe.normal_left) | dir = point.dot(fe.normal_left) | ||||
| else: | else: | ||||
| dir = fe.normal_left.z | dir = fe.normal_left.z | ||||
| Context not available. | |||||
| self.__outer = thickness * 0.5 | self.__outer = thickness * 0.5 | ||||
| self.__inner = thickness - self.__outer | self.__inner = thickness - self.__outer | ||||
| elif position == 'INSIDE': | elif position == 'INSIDE': | ||||
| self.__outer = 0 | self.__outer = 0.0 | ||||
| self.__inner = thickness | self.__inner = thickness | ||||
| elif position == 'OUTSIDE': | elif position == 'OUTSIDE': | ||||
| self.__outer = thickness | self.__outer = thickness | ||||
| self.__inner = 0 | self.__inner = 0.0 | ||||
| elif position == 'RELATIVE': | elif position == 'RELATIVE': | ||||
| self.__outer = thickness * ratio | self.__outer = thickness * ratio | ||||
| self.__inner = thickness - self.__outer | self.__inner = thickness - self.__outer | ||||
| Context not available. | |||||
| raise ValueError("unknown thickness position: " + self.position) | raise ValueError("unknown thickness position: " + self.position) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | self.set_thickness(svert, self.__outer, self.__inner) | ||||
| sv = it.object | |||||
| self.set_thickness(sv, self.__outer, self.__inner) | |||||
| it.increment() | |||||
| # Along Stroke modifiers | |||||
| def iter_t2d_along_stroke(stroke): | |||||
| total = stroke.length_2d | |||||
| distance = 0.0 | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev = it.object.point | |||||
| while not it.is_end: | |||||
| p = it.object.point | |||||
| distance += (prev - p).length | |||||
| prev = p.copy() # need a copy because the point can be altered | |||||
| t = min(distance / total, 1.0) if total > 0.0 else 0.0 | |||||
| yield it, t | |||||
| it.increment() | |||||
kjym3Unsubmitted Not Done Inline ActionsThe whole updates of for iterations below can be revised after the iterator API revisions in D545, so I leave them for another round of review in the feature. kjym3: The whole updates of `for` iterations below can be revised after the iterator API revisions in… | |||||
| """ Along Stroke modifiers """ | |||||
| class ColorAlongStrokeShader(ColorRampModifier): | class ColorAlongStrokeShader(ColorRampModifier): | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in iter_t2d_along_stroke(stroke): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaAlongStrokeShader(CurveMappingModifier): | class AlphaAlongStrokeShader(CurveMappingModifier): | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in iter_t2d_along_stroke(stroke): | ||||
| sv = it.object | a = svert.attribute.alpha | ||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessAlongStrokeShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessAlongStrokeShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| Context not available. | |||||
| self.__value_max = value_max | self.__value_max = value_max | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in iter_t2d_along_stroke(stroke): | ||||
| sv = it.object | a = svert.attribute.thickness | ||||
| a = sv.attribute.thickness | |||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | ||||
| c = self.blend_thickness(a[0], a[1], b) | c = self.blend_thickness(a[0], a[1], b) | ||||
| self.set_thickness(sv, c[0], c[1]) | self.set_thickness(svert, c[0], c[1]) | ||||
| # Distance from Camera modifiers | |||||
| def iter_distance_from_camera(stroke, range_min, range_max): | |||||
| normfac = range_max - range_min # normalization factor | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| p = it.object.point_3d # in the camera coordinate | |||||
| distance = p.length | |||||
| if distance < range_min: | |||||
| t = 0.0 | |||||
| elif distance > range_max: | |||||
| t = 1.0 | |||||
| else: | |||||
| t = (distance - range_min) / normfac | |||||
| yield it, t | |||||
| it.increment() | |||||
| """ Distance from Camera modifiers """ | |||||
| class ColorDistanceFromCameraShader(ColorRampModifier): | class ColorDistanceFromCameraShader(ColorRampModifier): | ||||
| def __init__(self, blend, influence, ramp, range_min, range_max): | def __init__(self, blend, influence, ramp, range_min, range_max): | ||||
| Context not available. | |||||
| self.__range_max = range_max | self.__range_max = range_max | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaDistanceFromCameraShader(CurveMappingModifier): | class AlphaDistanceFromCameraShader(CurveMappingModifier): | ||||
| Context not available. | |||||
| self.__range_max = range_max | self.__range_max = range_max | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.alpha | ||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessDistanceFromCameraShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessDistanceFromCameraShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| Context not available. | |||||
| self.__value_max = value_max | self.__value_max = value_max | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.thickness | ||||
| a = sv.attribute.thickness | |||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | ||||
| c = self.blend_thickness(a[0], a[1], b) | c = self.blend_thickness(a[0], a[1], b) | ||||
| self.set_thickness(sv, c[0], c[1]) | self.set_thickness(svert, c[0], c[1]) | ||||
| # Distance from Object modifiers | """ Distance from Object modifiers """ | ||||
| def iter_distance_from_object(stroke, object, range_min, range_max): | |||||
| scene = getCurrentScene() | |||||
| mv = scene.camera.matrix_world.copy() # model-view matrix | |||||
| mv.invert() | |||||
| loc = mv * object.location # loc in the camera coordinate | |||||
| normfac = range_max - range_min # normalization factor | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| p = it.object.point_3d # in the camera coordinate | |||||
| distance = (p - loc).length | |||||
| if distance < range_min: | |||||
| t = 0.0 | |||||
| elif distance > range_max: | |||||
| t = 1.0 | |||||
| else: | |||||
| t = (distance - range_min) / normfac | |||||
| yield it, t | |||||
| it.increment() | |||||
| class ColorDistanceFromObjectShader(ColorRampModifier): | class ColorDistanceFromObjectShader(ColorRampModifier): | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | if self.__target is None: | ||||
| return | return None | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaDistanceFromObjectShader(CurveMappingModifier): | class AlphaDistanceFromObjectShader(CurveMappingModifier): | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | if self.__target is None: | ||||
| return | return None | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.alpha | ||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessDistanceFromObjectShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessDistanceFromObjectShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | if self.__target is None: | ||||
| return | return None | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | for svert, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | ||||
| sv = it.object | a = svert.attribute.thickness | ||||
| a = sv.attribute.thickness | |||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | ||||
| c = self.blend_thickness(a[0], a[1], b) | c = self.blend_thickness(a[0], a[1], b) | ||||
| self.set_thickness(sv, c[0], c[1]) | self.set_thickness(svert, c[0], c[1]) | ||||
| # Material modifiers | |||||
| def iter_material_color(stroke, material_attribute): | """ Material modifiers """ | ||||
| func = CurveMaterialF0D() | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| material = func(Interface0DIterator(it)) | |||||
| if material_attribute == 'DIFF': | |||||
| color = material.diffuse[0:3] | |||||
| elif material_attribute == 'SPEC': | |||||
| color = material.specular[0:3] | |||||
| else: | |||||
| raise ValueError("unexpected material attribute: " + material_attribute) | |||||
| yield it, color | |||||
| it.increment() | |||||
| def iter_material_value(stroke, material_attribute): | def iter_material_value(stroke, material_attribute): | ||||
| func = CurveMaterialF0D() | func = CurveMaterialF0D() | ||||
| it = stroke.stroke_vertices_begin() | it = Interface0DIterator(iter(stroke)) | ||||
| while not it.is_end: | for svert, inter in zip(stroke, it): | ||||
| material = func(Interface0DIterator(it)) | material = func(it) | ||||
| if material_attribute == 'DIFF': | if material_attribute == 'DIFF': | ||||
| r, g, b = material.diffuse[0:3] | r, g, b = material.diffuse[0:3] | ||||
| t = 0.35 * r + 0.45 * r + 0.2 * b | t = 0.35 * r + 0.45 * r + 0.2 * b | ||||
| Context not available. | |||||
| t = material.diffuse[3] | t = material.diffuse[3] | ||||
| else: | else: | ||||
| raise ValueError("unexpected material attribute: " + material_attribute) | raise ValueError("unexpected material attribute: " + material_attribute) | ||||
| yield it, t | yield svert, t | ||||
| it.increment() | |||||
| class ColorMaterialShader(ColorRampModifier): | class ColorMaterialShader(ColorRampModifier): | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__material_attribute in {'DIFF', 'SPEC'} and not self.__use_ramp: | if self.__material_attribute in {'DIFF', 'SPEC'} and not self.__use_ramp: | ||||
| for it, b in iter_material_color(stroke, self.__material_attribute): | for svert, b in iter_material_color(stroke, self.__material_attribute): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | svert.attribute.color = self.blend_ramp(a, b) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | |||||
| else: | else: | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | for svert, t in iter_material_value(stroke, self.__material_attribute): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaMaterialShader(CurveMappingModifier): | class AlphaMaterialShader(CurveMappingModifier): | ||||
| Context not available. | |||||
| self.__material_attribute = material_attribute | self.__material_attribute = material_attribute | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | for svert, t in iter_material_value(stroke, self.__material_attribute): | ||||
| sv = it.object | a = svert.attribute.alpha | ||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessMaterialShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessMaterialShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| Context not available. | |||||
| self.__value_max = value_max | self.__value_max = value_max | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | for svert, t in iter_material_value(stroke, self.__material_attribute): | ||||
| sv = it.object | a = svert.attribute.thickness | ||||
| a = sv.attribute.thickness | |||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | ||||
| c = self.blend_thickness(a[0], a[1], b) | c = self.blend_thickness(a[0], a[1], b) | ||||
| self.set_thickness(sv, c[0], c[1]) | self.set_thickness(svert, c[0], c[1]) | ||||
| # Calligraphic thickness modifier | # Calligraphic thickness modifier | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| func = VertexOrientation2DF0D() | func = VertexOrientation2DF0D() | ||||
| it = stroke.stroke_vertices_begin() | it = Interface0DIterator(iter(stroke)) | ||||
| while not it.is_end: | for svert, inter in zip(stroke, it): | ||||
| dir = func(Interface0DIterator(it)) | dir = func(it) | ||||
| orthDir = mathutils.Vector((-dir.y, dir.x)) | orthDir = Vector((-dir.y, dir.x)).normalized() | ||||
| orthDir.normalize() | |||||
| fac = abs(orthDir * self.__orientation) | fac = abs(orthDir * self.__orientation) | ||||
| sv = it.object | a = svert.attribute.thickness | ||||
| a = sv.attribute.thickness | |||||
| b = self.__thickness_min + fac * (self.__thickness_max - self.__thickness_min) | b = self.__thickness_min + fac * (self.__thickness_max - self.__thickness_min) | ||||
| b = max(b, 0.0) | b = max(b, 0.0) | ||||
| c = self.blend_thickness(a[0], a[1], b) | c = self.blend_thickness(a[0], a[1], b) | ||||
| self.set_thickness(sv, c[0], c[1]) | self.set_thickness(svert, c[0], c[1]) | ||||
| it.increment() | |||||
| # Geometry modifiers | # Geometry modifiers | ||||
| def iter_distance_along_stroke(stroke): | |||||
| distance = 0.0 | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev = it.object.point | |||||
| while not it.is_end: | |||||
| p = it.object.point | |||||
| distance += (prev - p).length | |||||
| prev = p.copy() # need a copy because the point can be altered | |||||
| yield it, distance | |||||
| it.increment() | |||||
| class SinusDisplacementShader(StrokeShader): | class SinusDisplacementShader(StrokeShader): | ||||
| def __init__(self, wavelength, amplitude, phase): | def __init__(self, wavelength, amplitude, phase): | ||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self._wavelength = wavelength | self._wavelength = wavelength | ||||
| self._amplitude = amplitude | self._amplitude = amplitude | ||||
| self._phase = phase / wavelength * 2 * math.pi | self._phase = phase / wavelength * 2 * math.pi | ||||
| self._getNormal = Normal2DF0D() | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| # separately iterate over stroke vertices to compute normals | for svert, distance in iter_distance_along_stroke(stroke): | ||||
| buf = [] | n = self._getNormal(Interface0DIterator(it)) | ||||
| for it, distance in iter_distance_along_stroke(stroke): | n = n * self._amplitude * math.cos(distance / self._wavelength * 2 * math.pi + self._phase) | ||||
| buf.append((it.object, distance, stroke_normal(it))) | svert.point += n | ||||
| # iterate over the vertices again to displace them | |||||
| for v, distance, normal in buf: | |||||
| n = normal * self._amplitude * math.cos(distance / self._wavelength * 2 * math.pi + self._phase) | |||||
| v.point = v.point + n | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| length = stroke.length_2d | length = stroke.length_2d | ||||
| it = stroke.stroke_vertices_begin() | for svert in it: | ||||
| while not it.is_end: | |||||
| v = it.object | |||||
| nres = self.__noise.turbulence1(length * v.u, self.__freq, self.__amp, self.__oct) | nres = self.__noise.turbulence1(length * v.u, self.__freq, self.__amp, self.__oct) | ||||
| v.point = v.point + nres * self.__dir | svert.point += + nres * self.__dir | ||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| self.__dir = mathutils.Vector((math.cos(angle), math.sin(angle))) | self.__dir = mathutils.Vector((math.cos(angle), math.sin(angle))) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | |||||
| v = it.object | |||||
| vec = mathutils.Vector((v.projected_x, v.projected_y)) | vec = mathutils.Vector((v.projected_x, v.projected_y)) | ||||
| nres = self.__noise.turbulence2(vec, self.__freq, self.__amp, self.__oct) | nres = self.__noise.turbulence2(vec, self.__freq, self.__amp, self.__oct) | ||||
| v.point = v.point + nres * self.__dir | v.point += nres * self.__dir | ||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| self.__start = start | self.__start = start | ||||
| self.__end = end | self.__end = end | ||||
| self.__xy = mathutils.Vector((x, y)) | self.__xy = mathutils.Vector((x, y)) | ||||
| self.__getNormal = Normal2DF0D() | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| # first iterate over stroke vertices to compute normals | it = Interface0DIterator(iter(stroke)) | ||||
| buf = [] | for svert, inter in zip(stroke, it): | ||||
| it = stroke.stroke_vertices_begin() | a = self.__start + svert.u * (self.__end - self.__start) | ||||
| while not it.is_end: | n = self.__getNormal(it) | ||||
| buf.append((it.object, stroke_normal(it))) | n *= a | ||||
| it.increment() | v.point += n + self.__xy | ||||
| # again iterate over the vertices to add displacement | |||||
| for v, n in buf: | |||||
| a = self.__start + v.u * (self.__end - self.__start) | |||||
| n = n * a | |||||
| v.point = v.point + n + self.__xy | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| pivot = it.object.point | pivot = it.object.point | ||||
| elif self.__pivot == 'PARAM': | elif self.__pivot == 'PARAM': | ||||
| p = None | p = None | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | |||||
| prev = p | prev = p | ||||
| v = it.object | p = svert.point | ||||
| p = v.point | u = svert.u | ||||
| u = v.u | |||||
| if self.__pivot_u < u: | if self.__pivot_u < u: | ||||
| break | break | ||||
| it.increment() | |||||
| if prev is None: | if prev is None: | ||||
| pivot = p | pivot = p | ||||
| else: | else: | ||||
| delta = u - self.__pivot_u | delta = u - self.__pivot_u | ||||
| pivot = p + delta * (prev - p) | pivot = p + delta * (prev - p) | ||||
| elif self.__pivot == 'CENTER': | elif self.__pivot == 'CENTER': | ||||
| pivot = mathutils.Vector((0.0, 0.0)) | pivot = mathutils.Vector((0.0, 0.0)) | ||||
| n = 0 | n = 0 | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | pivot = pivot + svert.point | ||||
| p = it.object.point | |||||
| pivot = pivot + p | |||||
| n += 1 | n += 1 | ||||
| it.increment() | |||||
| pivot.x = pivot.x / n | pivot.x /= n | ||||
| pivot.y = pivot.y / n | pivot.y /= n | ||||
| elif self.__pivot == 'ABSOLUTE': | elif self.__pivot == 'ABSOLUTE': | ||||
| pivot = mathutils.Vector((self.__pivot_x, self.__pivot_y)) | pivot = mathutils.Vector((self.__pivot_x, self.__pivot_y)) | ||||
| # apply scaling and rotation operations | # apply scaling and rotation operations | ||||
| cos_theta = math.cos(self.__angle) | cos_theta = math.cos(self.__angle) | ||||
| sin_theta = math.sin(self.__angle) | sin_theta = math.sin(self.__angle) | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | p = svert.point - pivot | ||||
| v = it.object | p.x = p.x * self.__scale_x * cos_theta - y * sin_theta | ||||
| p = v.point | p.y = p.y * self.__scale_y * sin_theta + y * cos_theta | ||||
| p = p - pivot | |||||
| x = p.x * self.__scale_x | |||||
| y = p.y * self.__scale_y | |||||
| p.x = x * cos_theta - y * sin_theta | |||||
| p.y = x * sin_theta + y * cos_theta | |||||
| v.point = p + pivot | v.point = p + pivot | ||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| return found | return found | ||||
| # Stroke caps | |||||
| def iter_stroke_vertices(stroke): | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev_p = None | |||||
| while not it.is_end: | |||||
| sv = it.object | |||||
| p = sv.point | |||||
| if prev_p is None or (prev_p - p).length > 1e-6: | |||||
| yield sv | |||||
| prev_p = p.copy() | |||||
| it.increment() | |||||
| class RoundCapShader(StrokeShader): | |||||
| def round_cap_thickness(self, x): | |||||
| x = max(0.0, min(x, 1.0)) | |||||
| return math.sqrt(1.0 - (x ** 2.0)) | |||||
| def shade(self, stroke): | |||||
| # save the location and attribute of stroke vertices | |||||
| buffer = [] | |||||
| for sv in iter_stroke_vertices(stroke): | |||||
| buffer.append((mathutils.Vector(sv.point), StrokeAttribute(sv.attribute))) | |||||
| nverts = len(buffer) | |||||
| if nverts < 2: | |||||
| return | |||||
| # calculate the number of additional vertices to form caps | |||||
| R, L = stroke[0].attribute.thickness | |||||
| caplen_beg = (R + L) / 2.0 | |||||
| nverts_beg = max(5, int(R + L)) | |||||
| R, L = stroke[-1].attribute.thickness | |||||
| caplen_end = (R + L) / 2.0 | |||||
| nverts_end = max(5, int(R + L)) | |||||
| # adjust the total number of stroke vertices | |||||
| stroke.resample(nverts + nverts_beg + nverts_end) | |||||
| # restore the location and attribute of the original vertices | |||||
| for i in range(nverts): | |||||
| p, attr = buffer[i] | |||||
| stroke[nverts_beg + i].point = p | |||||
| stroke[nverts_beg + i].attribute = attr | |||||
| # reshape the cap at the beginning of the stroke | |||||
| q, attr = buffer[1] | |||||
| p, attr = buffer[0] | |||||
| d = p - q | |||||
| d = d / d.length * caplen_beg | |||||
| n = 1.0 / nverts_beg | |||||
| R, L = attr.thickness | |||||
| for i in range(nverts_beg): | |||||
| t = (nverts_beg - i) * n | |||||
| stroke[i].point = p + d * t | |||||
| r = self.round_cap_thickness((nverts_beg - i + 1) * n) | |||||
| stroke[i].attribute = attr | |||||
| stroke[i].attribute.thickness = (R * r, L * r) | |||||
| # reshape the cap at the end of the stroke | |||||
| q, attr = buffer[-2] | |||||
| p, attr = buffer[-1] | |||||
| d = p - q | |||||
| d = d / d.length * caplen_end | |||||
| n = 1.0 / nverts_end | |||||
| R, L = attr.thickness | |||||
| for i in range(nverts_end): | |||||
| t = (nverts_end - i) * n | |||||
| stroke[-i - 1].point = p + d * t | |||||
| r = self.round_cap_thickness((nverts_end - i + 1) * n) | |||||
| stroke[-i - 1].attribute = attr | |||||
| stroke[-i - 1].attribute.thickness = (R * r, L * r) | |||||
| # update the curvilinear 2D length of each vertex | |||||
| stroke.update_length() | |||||
| class SquareCapShader(StrokeShader): | |||||
| def shade(self, stroke): | |||||
| # save the location and attribute of stroke vertices | |||||
| buffer = [] | |||||
| for sv in iter_stroke_vertices(stroke): | |||||
| buffer.append((mathutils.Vector(sv.point), StrokeAttribute(sv.attribute))) | |||||
| nverts = len(buffer) | |||||
| if nverts < 2: | |||||
| return | |||||
| # calculate the number of additional vertices to form caps | |||||
| R, L = stroke[0].attribute.thickness | |||||
| caplen_beg = (R + L) / 2.0 | |||||
| nverts_beg = 1 | |||||
| R, L = stroke[-1].attribute.thickness | |||||
| caplen_end = (R + L) / 2.0 | |||||
| nverts_end = 1 | |||||
| # adjust the total number of stroke vertices | |||||
| stroke.resample(nverts + nverts_beg + nverts_end) | |||||
| # restore the location and attribute of the original vertices | |||||
| for i in range(nverts): | |||||
| p, attr = buffer[i] | |||||
| stroke[nverts_beg + i].point = p | |||||
| stroke[nverts_beg + i].attribute = attr | |||||
| # reshape the cap at the beginning of the stroke | |||||
| q, attr = buffer[1] | |||||
| p, attr = buffer[0] | |||||
| d = p - q | |||||
| stroke[0].point = p + d / d.length * caplen_beg | |||||
| stroke[0].attribute = attr | |||||
| # reshape the cap at the end of the stroke | |||||
| q, attr = buffer[-2] | |||||
| p, attr = buffer[-1] | |||||
| d = p - q | |||||
| stroke[-1].point = p + d / d.length * caplen_beg | |||||
| stroke[-1].attribute = attr | |||||
| # update the curvilinear 2D length of each vertex | |||||
| stroke.update_length() | |||||
| # Split by dashed line pattern | # Split by dashed line pattern | ||||
| Context not available. | |||||
| class SplitPatternController: | class SplitPatternController: | ||||
| def __init__(self, pattern, sampling): | def __init__(self, pattern, sampling): | ||||
| self.sampling = float(sampling) | self.sampling = float(sampling) | ||||
| k = len(pattern) // 2 | # round down to even number | ||||
| n = k * 2 | n = (len(pattern) // 2) * 2 | ||||
| self.start_pos = [pattern[i] + pattern[i + 1] for i in range(0, n, 2)] | self.start_pos = [pattern[i] + pattern[i + 1] for i in range(0, n, 2)] | ||||
| self.stop_pos = [pattern[i] for i in range(0, n, 2)] | self.stop_pos = [pattern[i] for i in range(0, n, 2)] | ||||
| self.init() | self.init() | ||||
| Context not available. | |||||
| self._pattern = pattern | self._pattern = pattern | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| index = 0 # pattern index | index = 0 # pattern index | ||||
| start = 0.0 # 2D curvilinear length | start = 0.0 # 2D curvilinear length | ||||
| visible = True | visible = True | ||||
| sampling = 1.0 | sampling = 1.0 | ||||
| l = len(self._pattern) | |||||
| it = stroke.stroke_vertices_begin(sampling) | it = stroke.stroke_vertices_begin(sampling) | ||||
| while not it.is_end: | for svert in it: | ||||
| pos = it.t # curvilinear abscissa | pos = it.t # curvilinear abscissa | ||||
| # The extra 'sampling' term is added below, because the | """ | ||||
| # visibility attribute of the i-th vertex refers to the | The extra 'sampling' term is added below, because the | ||||
| # visibility of the stroke segment between the i-th and | visibility attribute of the i-th vertex refers to the | ||||
| # (i+1)-th vertices. | visibility of the stroke segment between the i-th and | ||||
| (i+1)-th vertices. | |||||
| """ | |||||
| if pos - start + sampling > self._pattern[index]: | if pos - start + sampling > self._pattern[index]: | ||||
| start = pos | start = pos | ||||
| index += 1 | index += 1 | ||||
| if index == len(self._pattern): | if index == l: | ||||
| index = 0 | index = 0 | ||||
| visible = not visible | visible = not visible | ||||
| it.object.attribute.visible = visible | svert.attribute.visible = visible | ||||
| it.increment() | |||||
| # predicates for chaining | # predicates for chaining | ||||
| Context not available. | |||||
| return math.acos(min(max(x, -1.0), 1.0)) > self._angle | return math.acos(min(max(x, -1.0), 1.0)) > self._angle | ||||
| class AndBP1D(BinaryPredicate1D): | |||||
| def __init__(self, pred1, pred2): | |||||
| BinaryPredicate1D.__init__(self) | |||||
| self.__pred1 = pred1 | |||||
| self.__pred2 = pred2 | |||||
| def __call__(self, i1, i2): | |||||
| return self.__pred1(i1, i2) and self.__pred2(i1, i2) | |||||
| # predicates for selection | # predicates for selection | ||||
| class LengthThresholdUP1D(UnaryPredicate1D): | class LengthThresholdUP1D(UnaryPredicate1D): | ||||
| Context not available. | |||||
| self._length_max = length_max | self._length_max = length_max | ||||
| def __call__(self, inter): | def __call__(self, inter): | ||||
| if self._length_min is None or self._length_max is None: | |||||
kjym3Unsubmitted Not Done Inline ActionsThis added condition seems wrong. If self._length_min is set to a number, then the length checking should be performed even if self._length_max is unset (and vice versa). kjym3: This added condition seems wrong. If self._length_min is set to a number, then the length… | |||||
| return False | |||||
| length = inter.length_2d | length = inter.length_2d | ||||
| if self._length_min is not None and length < self._length_min: | return (self._length_min < length < self._length_max) | ||||
| return False | |||||
| if self._length_max is not None and length > self._length_max: | |||||
| return False | |||||
| return True | |||||
| class FaceMarkBothUP1D(UnaryPredicate1D): | class FaceMarkBothUP1D(UnaryPredicate1D): | ||||
| def __call__(self, inter): # ViewEdge | def __call__(self, inter): # ViewEdge | ||||
| Context not available. | |||||
| class Curvature2DAngleThresholdUP0D(UnaryPredicate0D): | class Curvature2DAngleThresholdUP0D(UnaryPredicate0D): | ||||
| """ | |||||
| Returns True when the angle is within the given threshold. | |||||
| """ | |||||
| def __init__(self, angle_min=None, angle_max=None): | def __init__(self, angle_min=None, angle_max=None): | ||||
| UnaryPredicate0D.__init__(self) | UnaryPredicate0D.__init__(self) | ||||
| self._angle_min = angle_min | self._angle_min = angle_min | ||||
| Context not available. | |||||
| self._func = Curvature2DAngleF0D() | self._func = Curvature2DAngleF0D() | ||||
| def __call__(self, inter): | def __call__(self, inter): | ||||
| if self._angle_min is None or self._angle_max is None: | |||||
kjym3Unsubmitted Not Done Inline ActionsSame comment as in line 794. kjym3: Same comment as in line 794. | |||||
| return False | |||||
| angle = math.pi - self._func(inter) | angle = math.pi - self._func(inter) | ||||
| if self._angle_min is not None and angle < self._angle_min: | return (self.angle_min < angle < self._angle_max) | ||||
| return True | |||||
| if self._angle_max is not None and angle > self._angle_max: | |||||
| return True | |||||
| return False | |||||
| class Length2DThresholdUP0D(UnaryPredicate0D): | class Length2DThresholdUP0D(UnaryPredicate0D): | ||||
| def __init__(self, length_limit): | def __init__(self, length_limit): | ||||
| Context not available. | |||||
| seg1 = sv2.point - sv1.point | seg1 = sv2.point - sv1.point | ||||
| seg2 = sv3.point - sv2.point | seg2 = sv3.point - sv2.point | ||||
| seg3 = sv4.point - sv3.point | seg3 = sv4.point - sv3.point | ||||
| # skip to next if there are no tvertices | |||||
| if not ((is_tvertex(sv2.first_svertex) and is_tvertex(sv2.second_svertex)) or | if not ((is_tvertex(sv2.first_svertex) and is_tvertex(sv2.second_svertex)) or | ||||
| (is_tvertex(sv3.first_svertex) and is_tvertex(sv3.second_svertex))): | (is_tvertex(sv3.first_svertex) and is_tvertex(sv3.second_svertex))): | ||||
| continue | continue | ||||
| Context not available. | |||||
| integration_types = { | integration_types = { | ||||
| 'MEAN': IntegrationType.MEAN, | 'MEAN' : IntegrationType.MEAN, | ||||
| 'MIN': IntegrationType.MIN, | 'MIN' : IntegrationType.MIN, | ||||
| 'MAX': IntegrationType.MAX, | 'MAX' : IntegrationType.MAX, | ||||
| 'FIRST': IntegrationType.FIRST, | 'FIRST' : IntegrationType.FIRST, | ||||
| 'LAST': IntegrationType.LAST} | 'LAST' : IntegrationType.LAST} | ||||
| # main function for parameter processing | # main function for parameter processing | ||||
| Context not available. | |||||
| if lineset.select_external_contour: | if lineset.select_external_contour: | ||||
| upred = ExternalContourUP1D() | upred = ExternalContourUP1D() | ||||
| edge_type_criteria.append(NotUP1D(upred) if lineset.exclude_external_contour else upred) | edge_type_criteria.append(NotUP1D(upred) if lineset.exclude_external_contour else upred) | ||||
| if lineset.edge_type_combination == 'OR': | if lineset.edge_type_combination == 'OR': | ||||
| upred = join_unary_predicates(edge_type_criteria, OrUP1D) | upred = OrUP1D(*edge_type_criteria) | ||||
| else: | else: | ||||
| upred = join_unary_predicates(edge_type_criteria, AndUP1D) | upred = AndUP1D(*edge_type_criteria) | ||||
| if upred is not None: | if upred is not None: | ||||
| if lineset.edge_type_negation == 'EXCLUSIVE': | if lineset.edge_type_negation == 'EXCLUSIVE': | ||||
| upred = NotUP1D(upred) | upred = NotUP1D(upred) | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # prepare selection criteria by face marks | # prepare selection criteria by face marks | ||||
| if lineset.select_by_face_marks: | if lineset.select_by_face_marks: | ||||
| if lineset.face_mark_condition == 'BOTH': | if lineset.face_mark_condition == 'BOTH': | ||||
| Context not available. | |||||
| if lineset.face_mark_negation == 'EXCLUSIVE': | if lineset.face_mark_negation == 'EXCLUSIVE': | ||||
| upred = NotUP1D(upred) | upred = NotUP1D(upred) | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # prepare selection criteria by group of objects | # prepare selection criteria by group of objects | ||||
| if lineset.select_by_group: | if lineset.select_by_group: | ||||
| if lineset.group is not None: | if lineset.group is not None: | ||||
| names = dict((ob.name, True) for ob in lineset.group.objects) | names = dict((ob.name, True) for ob in lineset.group.objects) | ||||
| upred = ObjectNamesUP1D(names, lineset.group_negation == 'EXCLUSIVE') | upred = ObjectNamesUP1D(names, lineset.group_negation == 'EXCLUSIVE') | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # prepare selection criteria by image border | # prepare selection criteria by image border | ||||
| if lineset.select_by_image_border: | if lineset.select_by_image_border: | ||||
| xmin, ymin, xmax, ymax = ContextFunctions.get_border() | xmin, ymin, xmax, ymax = ContextFunctions.get_border() | ||||
| upred = WithinImageBoundaryUP1D(xmin, ymin, xmax, ymax) | upred = WithinImageBoundaryUP1D(xmin, ymin, xmax, ymax) | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # select feature edges | # select feature edges | ||||
| upred = join_unary_predicates(selection_criteria, AndUP1D) | upred = AndUP1D(*selection_criteria) | ||||
| if upred is None: | if upred is None: | ||||
| upred = TrueUP1D() | upred = TrueUP1D() | ||||
| Operators.select(upred) | Operators.select(upred) | ||||
| # join feature edges to form chains | # join feature edges to form chains | ||||
| if linestyle.use_chaining: | if linestyle.use_chaining: | ||||
| if linestyle.chaining == 'PLAIN': | if linestyle.chaining == 'PLAIN': | ||||
| Context not available. | |||||
| if linestyle.split_dash3 > 0 and linestyle.split_gap3 > 0: | if linestyle.split_dash3 > 0 and linestyle.split_gap3 > 0: | ||||
| pattern.append(linestyle.split_dash3) | pattern.append(linestyle.split_dash3) | ||||
| pattern.append(linestyle.split_gap3) | pattern.append(linestyle.split_gap3) | ||||
| if len(pattern) > 0: | if len(pattern): | ||||
kjym3Unsubmitted Not Done Inline ActionsI prefer either if len(pattern) > 0 or if pattern. kjym3: I prefer either `if len(pattern) > 0` or `if pattern`. | |||||
| sampling = 1.0 | sampling = 1.0 | ||||
| controller = SplitPatternController(pattern, sampling) | controller = SplitPatternController(pattern, sampling) | ||||
| Operators.sequential_split(SplitPatternStartingUP0D(controller), | Operators.sequential_split(SplitPatternStartingUP0D(controller), | ||||
| Context not available. | |||||
| Operators.select(LengthThresholdUP1D(length_min, length_max)) | Operators.select(LengthThresholdUP1D(length_min, length_max)) | ||||
| # sort selected chains | # sort selected chains | ||||
| if linestyle.use_sorting: | if linestyle.use_sorting: | ||||
| integration = integration_types.get(linestyle.integration_type, IntegrationType.MEAN) | |||||
| if linestyle.sort_key == 'DISTANCE_FROM_CAMERA': | if linestyle.sort_key == 'DISTANCE_FROM_CAMERA': | ||||
| integration = integration_types.get(linestyle.integration_type, IntegrationType.MEAN) | |||||
| bpred = pyZBP1D(integration) | bpred = pyZBP1D(integration) | ||||
| elif linestyle.sort_key == '2D_LENGTH': | elif linestyle.sort_key == '2D_LENGTH': | ||||
| bpred = Length2DBP1D() | bpred = Length2DBP1D() | ||||
| if linestyle.sort_order == 'REVERSE': | if linestyle.sort_order == 'REVERSE': | ||||
| bpred = NotBP1D(bpred) | bpred = NotBP1D(bpred) | ||||
| Operators.sort(bpred) | Operators.sort(bpred) | ||||
| # prepare a list of stroke shaders | # prepare a list of stroke shaders | ||||
| shaders_list = [] | shaders_list = [StrokeCleaner()] | ||||
| ### | |||||
| shaders_list.append(StrokeCleaner()) | |||||
| ### | ### | ||||
| for m in linestyle.geometry_modifiers: | for m in linestyle.geometry_modifiers: | ||||
| if not m.use: | if not m.use: | ||||
| Context not available. | |||||
| shaders_list.append(TipRemoverShader( | shaders_list.append(TipRemoverShader( | ||||
| m.tip_length)) | m.tip_length)) | ||||
| elif m.type == 'POLYGONIZATION': | elif m.type == 'POLYGONIZATION': | ||||
| shaders_list.append(PolygonalizationShader( | shaders_list.append(PolygonalizationShader(m.error)) | ||||
| m.error)) | |||||
| elif m.type == 'GUIDING_LINES': | elif m.type == 'GUIDING_LINES': | ||||
| shaders_list.append(GuidingLinesShader( | shaders_list.append(GuidingLinesShader(m.offset)) | ||||
| m.offset)) | |||||
| elif m.type == 'BLUEPRINT': | elif m.type == 'BLUEPRINT': | ||||
| if m.shape == 'CIRCLES': | if m.shape == 'CIRCLES': | ||||
| shaders_list.append(pyBluePrintCirclesShader( | shaders_list.append(pyBluePrintCirclesShader( | ||||
| Context not available. | |||||
| elif m.type == '2D_TRANSFORM': | elif m.type == '2D_TRANSFORM': | ||||
| shaders_list.append(Transform2DShader( | shaders_list.append(Transform2DShader( | ||||
| m.pivot, m.scale_x, m.scale_y, m.angle, m.pivot_u, m.pivot_x, m.pivot_y)) | m.pivot, m.scale_x, m.scale_y, m.angle, m.pivot_u, m.pivot_x, m.pivot_y)) | ||||
| color = linestyle.color | color = linestyle.color | ||||
| if (not linestyle.use_chaining) or (linestyle.chaining == 'PLAIN' and linestyle.use_same_object): | if (not linestyle.use_chaining) or (linestyle.chaining == 'PLAIN' and linestyle.use_same_object): | ||||
| thickness_position = linestyle.thickness_position | thickness_position = linestyle.thickness_position | ||||
| Context not available. | |||||
How about a further simplification: